diff --git a/.github/workflows/auto-assign-pr.yml b/.github/workflows/auto-assign-pr.yml new file mode 100644 index 00000000000..e20bf2e4627 --- /dev/null +++ b/.github/workflows/auto-assign-pr.yml @@ -0,0 +1,15 @@ +name: Auto Assign + +on: + pull_request: + types: + - opened + +jobs: + add_assignees: + runs-on: ubuntu-latest + steps: + - uses: actions-ecosystem/action-add-assignees@v1 + with: + github_token: ${{ secrets.github_token }} + assignees: ${{ github.actor }} diff --git a/.github/workflows/build-docs.yaml b/.github/workflows/build-docs.yaml index b9c076dd98e..916cce63ecf 100644 --- a/.github/workflows/build-docs.yaml +++ b/.github/workflows/build-docs.yaml @@ -9,7 +9,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-node@v1 with: - node-version: 16 + node-version: 22 - name: Install Dependencies run: npm ci - name: Cache Docusaurus diff --git a/.github/workflows/check-format.yaml b/.github/workflows/check-format.yaml index c371d6bdfff..198befa5c77 100644 --- a/.github/workflows/check-format.yaml +++ b/.github/workflows/check-format.yaml @@ -8,7 +8,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-node@v1 with: - node-version: 16 + node-version: 22 - name: Install Dependencies run: npm ci - name: Verify format with Vale diff --git a/.github/workflows/check-versions.yaml b/.github/workflows/check-versions.yaml index 6c6b5cb4a35..e4c56be0b54 100644 --- a/.github/workflows/check-versions.yaml +++ b/.github/workflows/check-versions.yaml @@ -37,7 +37,7 @@ jobs: if: ${{ env.MESSAGES != '' }} with: message: | - :wave: :robot: :thinking: Hello! Did you make your changes in all the right places? + :wave: :robot: :thinking: Hello, @${{ github.actor }}! Did you make your changes in all the right places? ${{ env.MESSAGES }} diff --git a/.github/workflows/preview-env-deploy.yml b/.github/workflows/preview-env-deploy.yml index 7a898c3e0a5..12d9cc4ced8 100644 --- a/.github/workflows/preview-env-deploy.yml +++ b/.github/workflows/preview-env-deploy.yml @@ -15,6 +15,9 @@ jobs: name: deploy-preview-env steps: - uses: actions/checkout@v4 + - uses: actions/setup-node@v1 + with: + node-version: 22 - name: Import secrets from Vault id: secrets diff --git a/.github/workflows/publish-prod.yaml b/.github/workflows/publish-prod.yaml index e4c297b8b03..8093a190cd6 100644 --- a/.github/workflows/publish-prod.yaml +++ b/.github/workflows/publish-prod.yaml @@ -12,6 +12,7 @@ on: - "!1.3.[0-9]+" - "!8.0.[0-9]+" - "!8.1.[0-9]+" + - "!8.2.[0-9]+" permissions: id-token: write @@ -23,7 +24,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-node@v1 with: - node-version: 16 + node-version: 22 - name: Install Dependencies run: npm ci - name: Enable Indexing @@ -32,6 +33,8 @@ jobs: run: npm run build env: NODE_OPTIONS: --max_old_space_size=8192 + DOCS_SITE_URL: https://docs.camunda.io + DOCS_SITE_BASE_URL: / - name: Get Github Actions IP id: ip uses: haythem/public-ip@v1.3 diff --git a/.github/workflows/publish-stage.yaml b/.github/workflows/publish-stage.yaml index b4a3872ca47..751d6b47779 100644 --- a/.github/workflows/publish-stage.yaml +++ b/.github/workflows/publish-stage.yaml @@ -17,19 +17,19 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-node@v1 with: - node-version: 16 + node-version: 22 - name: Install Dependencies run: npm ci - name: Cache Docusaurus uses: ./.github/actions/docusaurus-cache - - name: Update URL - run: 'sed -i ''s!url: "https://docs.camunda.io"!url: "https://stage.docs.camunda.io"!g'' docusaurus.config.js' - name: Update redirects for environment run: "sed -i 's!https://unsupported.docs.camunda.io!https://stage.unsupported.docs.camunda.io!g' ./static/.htaccess" - name: Build run: npm run build env: NODE_OPTIONS: --max_old_space_size=8192 + DOCS_SITE_URL: https://stage.docs.camunda.io + DOCS_SITE_BASE_URL: / - name: Get Github Actions IP id: ip uses: haythem/public-ip@v1.3 diff --git a/.github/workflows/sync-rest-api-docs.yaml b/.github/workflows/sync-rest-api-docs.yaml index 15baa342a86..4f6dbc5997b 100644 --- a/.github/workflows/sync-rest-api-docs.yaml +++ b/.github/workflows/sync-rest-api-docs.yaml @@ -35,10 +35,17 @@ jobs: exit 0 fi + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@v2 + with: + app_id: ${{ secrets.GH_APP_API_SYNC_ID }} + private_key: ${{ secrets.GH_APP_API_SYNC_KEY }} + - name: Create Pull Request uses: peter-evans/create-pull-request@v5 with: - token: "${{ secrets.GITHUB_TOKEN }}" + token: "${{ steps.generate_token.outputs.token }}" path: docs title: Update camunda REST API doc body: | diff --git a/.nvmrc b/.nvmrc index d9289897d30..7af24b7ddbd 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -16.15.1 +22.11.0 diff --git a/Dockerfile.build b/Dockerfile.build index 33db677d23d..4b7c61040fe 100644 --- a/Dockerfile.build +++ b/Dockerfile.build @@ -3,7 +3,7 @@ # To run: `npm run build:docker` # Install dependencies -FROM node:16 AS build-setup +FROM node:22 AS build-setup WORKDIR /app COPY package*.json ./ RUN npm install diff --git a/api/camunda/camunda-openapi.yaml b/api/camunda/camunda-openapi.yaml index d9cec7e22c5..8c6e3c1dc14 100644 --- a/api/camunda/camunda-openapi.yaml +++ b/api/camunda/camunda-openapi.yaml @@ -10,7 +10,7 @@ info: url: https://github.com/camunda/camunda/blob/main/licenses/CAMUNDA-LICENSE-1.0.txt externalDocs: description: Find out more - url: https://docs.camunda.io/docs/apis-tools/camunda-api-rest/overview/ + url: https://docs.camunda.io/docs/apis-tools/camunda-api-rest/camunda-api-rest-overview/ servers: - url: "{schema}://{host}:{port}/v2" @@ -25,11 +25,39 @@ servers: default: http description: The schema of the Camunda 8 REST API server. +tags: + - name: Authentication + - name: Authorization + - name: Clock + - name: Cluster + - name: Decision definition + - name: Decision instance + - name: Decision requirements + - name: Document + - name: Element instance + - name: Flow node instance + - name: Group + - name: Incident + - name: Job + - name: License + - name: Mapping rule + - name: Message + - name: Process definition + - name: Process instance + - name: Resource + - name: Role + - name: Signal + - name: Tenant + - name: User + - name: User task + - name: Variable + paths: /topology: get: tags: - Cluster + operationId: getTopology summary: Get cluster topology description: Obtains the current topology of the cluster the gateway is part of. responses: @@ -39,11 +67,14 @@ paths: application/json: schema: $ref: "#/components/schemas/TopologyResponse" + "500": + $ref: "#/components/responses/InternalServerError" /license: get: tags: - License - summary: Get status of Camunda license + operationId: getLicense + summary: Get license status description: Obtains the status of the current Camunda license responses: "200": @@ -52,10 +83,33 @@ paths: application/json: schema: $ref: "#/components/schemas/LicenseResponse" + "500": + $ref: "#/components/responses/InternalServerError" + + /authentication/me: + get: + tags: + - Authentication + operationId: getAuthentication + summary: Get current user + description: Retrieves the current authenticated user. + responses: + "200": + description: The current user is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/CamundaUser" + "401": + $ref: "#/components/responses/Unauthorized" + "500": + $ref: "#/components/responses/InternalServerError" + /jobs/activation: post: tags: - Job + operationId: activateJobs summary: Activate jobs description: | Iterate through all known partitions and activate jobs up to the requested maximum. @@ -80,16 +134,12 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /jobs/{jobKey}/failure: post: tags: - Job + operationId: failJob summary: Fail job description: | Mark the job as failed @@ -133,16 +183,12 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /jobs/{jobKey}/error: post: tags: - Job + operationId: reportJobError summary: Report error for job description: | Reports a business error (i.e. non-technical) that occurs while processing a job. @@ -186,16 +232,12 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /jobs/{jobKey}/completion: post: tags: - Job + operationId: completeJob summary: Complete job description: | Complete a job with the given payload, which allows completing the associated service task. @@ -239,17 +281,13 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /jobs/{jobKey}: patch: tags: - Job - summary: Update a job + operationId: updateJob + summary: Update job description: Update a job with the given key. parameters: - name: jobKey @@ -290,17 +328,13 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /incidents/{incidentKey}/resolution: post: tags: - Incident + operationId: resolveIncident summary: Resolve incident description: > Marks the incident as resolved; most likely a call to Update job will be necessary @@ -329,1413 +363,2386 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" - /user-tasks/{userTaskKey}/completion: + /tenants: post: tags: - - User task - summary: Complete user task - description: Completes a user task with the given key. - parameters: - - name: userTaskKey - in: path - required: true - description: The key of the user task to complete. - schema: - type: integer - format: int64 + - Tenant + operationId: createTenant + summary: Create tenant + description: Creates a new tenant. requestBody: - required: false content: application/json: schema: - $ref: "#/components/schemas/UserTaskCompletionRequest" - + $ref: "#/components/schemas/TenantCreateRequest" + required: true responses: - "204": - description: The user task was completed successfully. - "400": - description: > - The user task with the given key cannot be completed. - More details are provided in the response body. + "201": + description: The tenant was created successfully. content: - application/problem+json: + application/json: schema: - $ref: "#/components/schemas/ProblemDetail" - "404": - description: The user task with the given key was not found. + $ref: "#/components/schemas/TenantCreateResponse" + "400": + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found. The resource was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}/assignment: - post: + $ref: "#/components/responses/InternalServerError" + + /tenants/{tenantKey}: + patch: tags: - - User task - summary: Assign user task - description: Assigns a user task with the given key to the given assignee. + - Tenant + operationId: updateTenant + summary: Update tenant + description: Updates an existing tenant. parameters: - - name: userTaskKey + - name: tenantKey in: path required: true - description: The key of the user task to assign. + description: The unique identifier of the tenant. schema: type: integer format: int64 requestBody: - required: true content: application/json: schema: - $ref: "#/components/schemas/UserTaskAssignmentRequest" + $ref: "#/components/schemas/TenantUpdateRequest" + required: true responses: - "204": - description: The user task's assignment was adjusted. - "400": - description: > - The assignment of the user task with the given key cannot be completed. - More details are provided in the response body. + "200": + description: The tenant was updated successfully. content: - application/problem+json: + application/json: schema: - $ref: "#/components/schemas/ProblemDetail" - "404": - description: The user task with the given key was not found. + $ref: "#/components/schemas/TenantUpdateResponse" + "400": + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found. The tenant was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}: + $ref: "#/components/responses/InternalServerError" + get: tags: - - User task - summary: Return user task by a user task key. - description: | - Get the user task by the user task key. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + - Tenant + operationId: getTenant + summary: Get tenant + description: Retrieves a single tenant by tenant Key. parameters: - - name: userTaskKey + - name: tenantKey in: path required: true - description: The user task key. + description: The unique identifier of the tenant. schema: type: integer format: int64 responses: "200": - description: > - The user task is successfully returned. + description: The tenant was retrieved successfully. content: application/json: schema: - $ref: "#/components/schemas/UserTaskItem" + $ref: "#/components/schemas/TenantItem" "400": - description: "Bad request" + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "401": - description: "Unauthorized" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: "Not found" + description: Tenant not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: "Internal server error" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - patch: + $ref: "#/components/responses/InternalServerError" + + delete: tags: - - User task - summary: Update user task - description: Update a user task with the given key. + - Tenant + operationId: deleteTenant + summary: Delete tenant + description: Deletes an existing tenant. parameters: - - name: userTaskKey + - name: tenantKey in: path required: true - description: The key of the user task to update. + description: The unique identifier of the tenant. schema: type: integer format: int64 - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/UserTaskUpdateRequest" responses: "204": - description: The user task was updated successfully. + description: The tenant was deleted successfully. "400": - description: > - The user task with the given key cannot be updated. - More details are provided in the response body. + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: The user task with the given key was not found. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. + description: Not found. The tenant was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}/form: - get: - tags: - - User Task - summary: Return user task form - description: | - Get the form of a user task. - - :::note - This endpoint will only return linked forms. This endpoint does not support embedded forms. - ::: + $ref: "#/components/responses/InternalServerError" - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + /tenants/{tenantKey}/users/{userKey}: + put: + tags: + - Tenant + operationId: assignUserToTenant + summary: Assign a user to a tenant + description: Assign a single user to a specified tenant. parameters: - - name: userTaskKey + - name: tenantKey in: path required: true - description: The user task key. + description: The unique identifier of the tenant. + schema: + type: integer + format: int64 + - name: userKey + in: path + required: true + description: The unique identifier of the user. schema: type: integer format: int64 responses: - "200": - description: > - The form is successfully returned. - content: - application/json: - schema: - $ref: "#/components/schemas/FormItem" - "204": - description: > - The user task was found, but no form is associated with it. + "202": + description: The user was successfully assigned to the tenant. "400": - description: "Bad request" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "401": - description: "Unauthorized" + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: "Not found" + description: Not found. The tenant or user was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: "Internal server error" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}/assignee: + $ref: "#/components/responses/InternalServerError" + delete: tags: - - User task - summary: Unassign user task - description: Removes the assignee of a task with the given key. + - Tenant + operationId: removeUserFromTenant + summary: Remove a user from a tenant + description: Removes a single user from a specified tenant without deleting the user. parameters: - - name: userTaskKey + - name: tenantKey in: path required: true - description: The key of the user task. + description: The unique identifier of the tenant. + schema: + type: integer + format: int64 + - name: userKey + in: path + required: true + description: The unique identifier of the user. schema: type: integer format: int64 responses: - "204": - description: The user task was unassigned successfully. + "202": + description: The user was successfully removed from the tenant. "400": - description: > - The user task with the given key cannot be unassigned. - More details are provided in the response body. + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: The user task with the given key was not found. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. + description: Not found. The tenant or user was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: + $ref: "#/components/responses/InternalServerError" + + /tenants/{tenantKey}/mapping-rules/{mappingKey}: + put: + tags: + - Tenant + operationId: assignMappingRuleToTenant + summary: Assign a mapping rule to a tenant + description: Assign a single mapping rule to a specified tenant. + parameters: + - name: tenantKey + in: path + required: true + description: The unique identifier of the tenant. + schema: + type: integer + format: int64 + - name: mappingKey + in: path + required: true + description: The unique identifier of the mapping rule. + schema: + type: integer + format: int64 + responses: + "202": + description: The mapping rule was successfully assigned to the tenant. + "400": + description: The provided data is not valid. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found. The tenant or mapping rule was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + delete: + tags: + - Tenant + operationId: removeMappingRuleFromTenant + summary: Remove a mapping rule from a tenant + description: Removes a single mapping rule from a specified tenant without deleting the rule. + parameters: + - name: tenantKey + in: path + required: true + description: The unique identifier of the tenant. + schema: + type: integer + format: int64 + - name: mappingKey + in: path + required: true + description: The unique identifier of the mapping rule. + schema: + type: integer + format: int64 + responses: + "202": + description: The mapping rule was successfully removed from the tenant. + "400": + description: The provided data is not valid. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found. The tenant or mapping rule was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /tenants/{tenantKey}/groups/{groupKey}: + put: + tags: + - Tenant + operationId: assignGroupToTenant + summary: Assign a group to a tenant + description: Assign a single group to a specified tenant. + parameters: + - name: tenantKey + in: path + required: true + description: The unique identifier of the tenant. + schema: + type: integer + format: int64 + - name: groupKey + in: path + required: true + description: The unique identifier of the group. + schema: + type: integer + format: int64 + responses: + "202": + description: The group was successfully assigned to the tenant. + "400": + description: The provided data is not valid. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found. The tenant or group was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + delete: + tags: + - Tenant + operationId: removeGroupFromTenant + summary: Remove a group from a tenant + description: Removes a single group from a specified tenant without deleting the group. + parameters: + - name: tenantKey + in: path + required: true + description: The unique identifier of the tenant. + schema: + type: integer + format: int64 + - name: groupKey + in: path + required: true + description: The unique identifier of the group. + schema: + type: integer + format: int64 + responses: + "202": + description: The group was successfully removed from the tenant. + "400": + description: The provided data is not valid. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found. The tenant or group was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /tenants/search: + post: + tags: + - Tenant + operationId: searchTenants + summary: Query tenants + description: Retrieves a filtered and sorted list of tenants. + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/TenantSearchQueryRequest" + responses: + "200": + description: The tenants search result + content: + application/json: + schema: + $ref: "#/components/schemas/TenantSearchQueryResponse" + "400": + description: The provided data is not valid. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: Not found + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /user-tasks/{userTaskKey}/completion: + post: + tags: + - User task + operationId: completeUserTask + summary: Complete user task + description: Completes a user task with the given key. + parameters: + - name: userTaskKey + in: path + required: true + description: The key of the user task to complete. + schema: + type: integer + format: int64 + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskCompletionRequest" + + responses: + "204": + description: The user task was completed successfully. + "400": + description: > + The user task with the given key cannot be completed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "404": + description: The user task with the given key was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "409": + description: > + The user task with the given key is in the wrong state currently. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /user-tasks/{userTaskKey}/assignment: + post: + tags: + - User task + operationId: assignUserTask + summary: Assign user task + description: Assigns a user task with the given key to the given assignee. + parameters: + - name: userTaskKey + in: path + required: true + description: The key of the user task to assign. + schema: + type: integer + format: int64 + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskAssignmentRequest" + responses: + "204": + description: The user task's assignment was adjusted. + "400": + description: > + The assignment of the user task with the given key cannot be completed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "404": + description: The user task with the given key was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "409": + description: > + The user task with the given key is in the wrong state currently. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /user-tasks/{userTaskKey}: + get: + tags: + - User task + operationId: getUserTask + summary: Get user task + description: | + Get the user task by the user task key. + parameters: + - name: userTaskKey + in: path + required: true + description: The user task key. + schema: + type: integer + format: int64 + responses: + "200": + description: > + The user task is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskItem" + "400": + description: > + The provided data is not valid. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: The user task with the given key was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + patch: + tags: + - User task + operationId: updateUserTask + summary: Update user task + description: Update a user task with the given key. + parameters: + - name: userTaskKey + in: path + required: true + description: The key of the user task to update. + schema: + type: integer + format: int64 + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskUpdateRequest" + responses: + "204": + description: The user task was updated successfully. + "400": + description: > + The user task with the given key cannot be updated. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "404": + description: The user task with the given key was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "409": + description: > + The user task with the given key is in the wrong state currently. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /user-tasks/{userTaskKey}/form: + get: + tags: + - User task + operationId: getUserTaskForm + summary: Get user task form + description: | + Get the form of a user task. + + Note that this endpoint will only return linked forms. This endpoint does not support embedded forms. + parameters: + - name: userTaskKey + in: path + required: true + description: The user task key. + schema: + type: integer + format: int64 + responses: + "200": + description: > + The form is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/FormItem" + "204": + description: > + The user task was found, but no form is associated with it. + "400": + description: "Bad request" + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: "Not found" + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /user-tasks/{userTaskKey}/assignee: + delete: + tags: + - User task + operationId: unassignUserTask + summary: Unassign user task + description: Removes the assignee of a task with the given key. + parameters: + - name: userTaskKey + in: path + required: true + description: The key of the user task. + schema: + type: integer + format: int64 + responses: + "204": + description: The user task was unassigned successfully. + "400": + description: > + The user task with the given key cannot be unassigned. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "404": + description: The user task with the given key was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "409": + description: > + The user task with the given key is in the wrong state currently. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + /user-tasks/search: + post: + tags: + - User task + operationId: findUserTasks + summary: Query user tasks + description: | + Search for user tasks based on given criteria. + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskSearchQueryRequest" + responses: + "200": + description: > + The user task search result. + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskSearchQueryResponse" + "400": + description: > + The user task search query failed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /user-tasks/{userTaskKey}/variables/search: + post: + tags: + - User task + operationId: findUserTaskVariables + summary: Query user task variables + description: | + Search for user task variables based on given criteria. + parameters: + - name: userTaskKey + in: path + required: true + description: The key of the user task. + schema: + type: integer + format: int64 + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/UserTaskVariableSearchQueryRequest" + responses: + "200": + description: > + The user task variables search response. + content: + application/json: + schema: + $ref: "#/components/schemas/VariableSearchQueryResponse" + "400": + description: > + The user task variables search query failed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /variables/search: + post: + tags: + - Variable + operationId: findVariables + summary: Query variables + description: | + Search for process and local variables based on given criteria. + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/VariableSearchQueryRequest" + responses: + "200": + description: > + The variable search result. + content: + application/json: + schema: + $ref: "#/components/schemas/VariableSearchQueryResponse" + "400": + description: > + The user task search query failed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /variables/{variableKey}: + get: + tags: + - Variable + operationId: getVariable + summary: Get variable + description: | + Get the variable by the variable key. + parameters: + - name: variableKey + in: path + required: true + description: The variable key. + schema: + type: integer + format: int64 + responses: + "200": + description: > + The variable is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/VariableItem" + "400": + description: "Bad request" + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: "Not found" + content: + application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /user-tasks/search: + "500": + $ref: "#/components/responses/InternalServerError" + + /clock: + put: + tags: + - Clock + operationId: pinClock + summary: Pin internal clock (alpha) + description: | + Set a precise, static time for the Zeebe engine’s internal clock. + When the clock is pinned, it remains at the specified time and does not advance. + To change the time, the clock must be pinned again with a new timestamp. + + :::note + This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change + in future releases. + ::: + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ClockPinRequest" + responses: + "204": + description: > + The clock was successfully pinned to the specified time in epoch milliseconds. + "400": + description: The required timestamp parameter is missing or it is negative. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /clock/reset: + post: + tags: + - Clock + operationId: resetClock + summary: Reset internal clock (alpha) + description: | + Resets the Zeebe engine’s internal clock to the current system time, enabling it to tick in real-time. + This operation is useful for returning the clock to + normal behavior after it has been pinned to a specific time. + + :::note + This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change + in future releases. + ::: + responses: + "204": + description: The clock was successfully reset to the system time. + "500": + $ref: "#/components/responses/InternalServerError" + + /process-definitions/search: + post: + tags: + - Process definition + operationId: findProcessDefinitions + summary: Query process definitions + description: | + Search for process definitions based on given criteria. + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessDefinitionSearchQueryRequest" + responses: + "200": + description: > + The process definition search result. + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessDefinitionSearchQueryResponse" + "400": + description: > + The process definition search query failed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /process-definitions/{processDefinitionKey}: + get: + tags: + - Process definition + operationId: getProcessDefinition + summary: Get process definition + description: | + Returns process definition as JSON. + parameters: + - name: processDefinitionKey + in: path + required: true + description: The assigned key of the process definition, which acts as a unique identifier for this process definition. + schema: + type: integer + format: int64 + responses: + "200": + description: > + The process definition is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessDefinitionItem" + "400": + description: > + The process definition request failed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: > + The process definition with the given key was not found. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /process-definitions/{processDefinitionKey}/xml: + get: + tags: + - Process definition + operationId: getProcessDefinitionXML + summary: Get process definition XML + description: | + Returns process definition as XML. + parameters: + - name: processDefinitionKey + in: path + required: true + description: The assigned key of the process definition, which acts as a unique identifier for this process. + schema: + type: integer + format: int64 + responses: + "200": + description: > + The XML of the process definition is successfully returned. + content: + text/xml: + schema: + type: string + "204": + description: > + The process definition was found but does not have XML. + content: + text/plain: + schema: + type: string + "400": + description: > + The process definition request failed. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: > + The decision with the given key was not found. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /process-definitions/{processDefinitionsKey}/form: + get: + tags: + - Process definition + operationId: getStartProcessForm + summary: Get process start form + description: | + Get the start form of a process. + + Note that this endpoint will only return linked forms. This endpoint does not support embedded forms. + parameters: + - name: processDefinitionsKey + in: path + required: true + description: The process key. + schema: + type: integer + format: int64 + responses: + "200": + description: > + The form is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/FormItem" + "204": + description: > + The process was found, but no form is associated with it. + "400": + description: "Bad request" + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: "Not found" + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /process-instances: post: tags: - - User task - summary: Query user tasks (alpha) + - Process instance + operationId: createProcessInstance + summary: Create process instance + description: | + Creates and starts an instance of the specified process. + The process definition to use to create the instance can be specified either using its unique key + (as returned by Deploy resources), or using the BPMN process ID and a version. + + Waits for the completion of the process instance before returning a result + when awaitCompletion is enabled. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateProcessInstanceRequest" + examples: + "By process definition key": + summary: "Create a process instance by processDefinitionKey." + value: + processDefinitionKey: 12345 + variables: {} + "By process definition ID": + summary: "Create a process instance by processDefinitionId and version." + value: + processDefinitionId: "1234-5678" + version: 1 + variables: {} + responses: + "200": + description: The process instance was created. + content: + application/json: + schema: + $ref: "#/components/schemas/CreateProcessInstanceResponse" + "400": + description: The provided data is not valid. + "500": + $ref: "#/components/responses/InternalServerError" + + /process-instances/{processInstanceKey}: + get: + tags: + - Process instance + operationId: getProcessInstance + summary: Get process instance description: | - Search for user tasks based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/UserTaskSearchQueryRequest" + Get the process instance by the process instance key. + parameters: + - name: processInstanceKey + in: path + required: true + description: The process instance key. + schema: + type: integer + format: int64 responses: "200": - description: > - The user task search successful response. + description: The process instance is successfully returned. content: application/json: schema: - $ref: "#/components/schemas/UserTaskSearchQueryResponse" + $ref: "#/components/schemas/ProcessInstanceItem" "400": - description: > - The user task search query failed. - More details are provided in the response body. + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "500": - description: > - An internal error occurred while processing the request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: The process instance with the given key was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" - /variables/search: + /process-instances/search: post: tags: - - Variable - summary: Query process and local variables (alpha) + - Process instance + operationId: findProcessInstances + summary: Query process instances description: | - Search for variables based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Search for process instances based on given criteria. requestBody: required: false content: application/json: schema: - $ref: "#/components/schemas/VariableSearchQueryRequest" + $ref: "#/components/schemas/ProcessInstanceSearchQueryRequest" responses: "200": - description: > - The variable search successful response. + description: The process instance search result. content: application/json: schema: - $ref: "#/components/schemas/VariableSearchQueryResponse" + $ref: "#/components/schemas/ProcessInstanceSearchQueryResponse" "400": description: > - The user task search query failed. + The process instance search query failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" - /clock: - put: + /process-instances/{processInstanceKey}/cancellation: + post: tags: - - Clock - summary: Pin internal clock (alpha) - description: | - Set a precise, static time for the Zeebe engine’s internal clock. - When the clock is pinned, it remains at the specified time and does not advance. - To change the time, the clock must be pinned again with a new timestamp. - - :::note - This endpoint is an [alpha feature](/reference/alpha-features.md) and may be subject to change - in future releases. - ::: + - Process instance + operationId: cancelProcessInstance + summary: Cancel process instance + description: Cancels a running process instance. As a cancelation includes more than just the removal of the process instance resource, the cancelation resource must be posted. + parameters: + - name: processInstanceKey + in: path + required: true + description: The key of the process instance to cancel. + schema: + type: integer + format: int64 requestBody: - required: true + required: false content: application/json: schema: - $ref: "#/components/schemas/ClockPinRequest" + $ref: "#/components/schemas/CancelProcessInstanceRequest" responses: "204": - description: > - The clock was successfully pinned to the specified time in epoch milliseconds. + description: The process instance is canceled. "400": - description: The required timestamp parameter is missing or it is negative. + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "500": - description: An internal error occurred while processing the request. + "404": + description: The process instance is not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /clock/reset: - post: - tags: - - Clock - summary: Reset internal clock (alpha) - description: | - Resets the Zeebe engine’s internal clock to the current system time, enabling it to tick in real-time. - This operation is useful for returning the clock to - normal behavior after it has been pinned to a specific time. - - :::note - This endpoint is an [alpha feature](/reference/alpha-features.md) and may be subject to change - in future releases. - responses: - "204": - description: The clock was successfully reset to the system time. "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /process-definitions/search: + $ref: "#/components/responses/InternalServerError" + + /process-instances/{processInstanceKey}/migration: post: tags: - - Process definition - summary: Search process definitions (alpha) + - Process instance + operationId: migrateProcessInstance + summary: Migrate process instance description: | - Search for process definitions based on given criteria. - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Migrates a process instance to a new process definition. + This request can contain multiple mapping instructions to define mapping between the active + process instance's elements and target process definition elements. + + Use this to upgrade a process instance to a new version of a process or to + a different process definition, e.g. to keep your running instances up-to-date with the + latest process improvements. + parameters: + - name: processInstanceKey + in: path + required: true + description: The key of the process instance that should be migrated. + schema: + type: integer + format: int64 requestBody: - required: false + required: true content: application/json: schema: - $ref: "#/components/schemas/ProcessDefinitionSearchQueryRequest" + $ref: "#/components/schemas/MigrateProcessInstanceRequest" responses: - "200": - description: > - The process definition search successful response. - content: - application/json: - schema: - $ref: "#/components/schemas/ProcessDefinitionSearchQueryResponse" + "204": + description: The process instance is migrated. "400": - description: > - The process definition search query failed. - More details are provided in the response body. + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "500": - description: > - An internal error occurred while processing the request. + "404": + description: The process instance is not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /process-definitions/{processDefinitionKey}: - get: + "500": + $ref: "#/components/responses/InternalServerError" + + /process-instances/{processInstanceKey}/modification: + post: tags: - - Process definition - summary: Get process definition by key (alpha) + - Process instance + operationId: modifyProcessInstance + summary: Modify process instance description: | - Returns process definition as JSON. - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Modifies a running process instance. + This request can contain multiple instructions to activate an element of the process or + to terminate an active instance of an element. + + Use this to repair a process instance that is stuck on an element or took an unintended path. + For example, because an external system is not available or doesn't respond as expected. parameters: - - name: processDefinitionKey + - name: processInstanceKey in: path required: true - description: The assigned key of the process definition, which acts as a unique identifier for this process definition. + description: The key of the process instance that should be modified. schema: type: integer format: int64 + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyProcessInstanceRequest" responses: - "200": - description: > - The process definition is successfully returned. - content: - application/json: - schema: - $ref: "#/components/schemas/ProcessDefinitionItem" + "204": + description: The process instance is modified. "400": - description: > - The process definition request failed. - More details are provided in the response body. + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "404": - description: > - The process definition with the given key was not found. - More details are provided in the response body. + description: The process instance is not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": + $ref: "#/components/responses/InternalServerError" + + /flownode-instances/search: + post: + tags: + - Flow node instance + operationId: findFlowNodeInstances + summary: Query flow node instances + description: | + Search for flow node instances based on given criteria. + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/FlowNodeInstanceSearchQueryRequest" + responses: + "200": + description: > + The flow node instance search result. + content: + application/json: + schema: + $ref: "#/components/schemas/FlowNodeInstanceSearchQueryResponse" + "400": description: > - An internal error occurred while processing the request. + The Flow node instance search query failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /process-definitions/{processDefinitionKey}/xml: + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /flownode-instances/{flownodeInstanceKey}: get: tags: - - Process definition - summary: Get process definition XML (alpha) + - Flow node instance + operationId: getFlowNodeInstance + summary: Get flow node instance description: | - Returns process definition as XML. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Returns flow node instance as JSON. parameters: - - name: processDefinitionKey + - name: flownodeInstanceKey in: path required: true - description: The assigned key of the process definition, which acts as a unique identifier for this process. + description: The assigned key of the flow node instance, which acts as a unique identifier for this flow node instance. schema: type: integer format: int64 responses: "200": description: > - The XML of the process definition is successfully returned. - content: - text/xml: - schema: - type: string - "204": - description: > - The process definition was found but does not have XML. + The flow node instance is successfully returned. content: - text/plain: + application/json: schema: - type: string + $ref: "#/components/schemas/FlowNodeInstanceItem" "400": description: > - The process definition request failed. + The flow node instance request failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": description: > - The decision with the given key was not found. + The flow node instance with the given key was not found. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /process-instances: + $ref: "#/components/responses/InternalServerError" + + /decision-definitions/search: post: tags: - - Process instance - summary: Create process instance + - Decision definition + operationId: findDecisionDefinitions + summary: Query decision definitions description: | - Creates and starts an instance of the specified process. - The process definition to use to create the instance can be specified either using its unique key - (as returned by Deploy resources), or using the BPMN process ID and a version. - - Waits for the completion of the process instance before returning a result - when awaitCompletion is enabled. + Search for decision definitions based on given criteria. requestBody: - required: true + required: false content: application/json: schema: - $ref: "#/components/schemas/CreateProcessInstanceRequest" - examples: - "By process definition key": - summary: "Create a process instance by processDefinitionKey." - value: - processDefinitionKey: 12345 - variables: {} - "By process definition ID": - summary: "Create a process instance by processDefinitionId and version." - value: - processDefinitionId: "1234-5678" - version: 1 - variables: {} + $ref: "#/components/schemas/DecisionDefinitionSearchQueryRequest" responses: "200": - description: The process instance was created. + description: > + The decision definition search result. content: application/json: schema: - $ref: "#/components/schemas/CreateProcessInstanceResponse" + $ref: "#/components/schemas/DecisionDefinitionSearchQueryResponse" "400": - description: The provided data is not valid. - "500": - description: An internal error occurred while processing the request. + description: > + The decision definition search query failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /process-instances/{processInstanceKey}: + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /decision-definitions/{decisionDefinitionKey}: get: tags: - - Process instance - summary: Get process instance (alpha) + - Decision definition + operationId: getDecisionDefinition + summary: Get decision definition description: | - Get the process instance by the process instance key. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Returns a decision definition by key. parameters: - - name: processInstanceKey + - name: decisionDefinitionKey in: path required: true - description: The process instance key. + description: The assigned key of the decision definition, which acts as a unique identifier for this decision. schema: type: integer format: int64 responses: "200": - description: The process instance is successfully returned. + description: > + The decision definition is successfully returned. content: application/json: schema: - $ref: "#/components/schemas/ProcessInstanceItem" + $ref: "#/components/schemas/DecisionDefinitionItem" "400": - description: The provided data is not valid. + description: > + The decision definition request failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "401": - description: The request lacks valid authentication credentials. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: The process instance with the given key was not found. + description: > + The decision with the given key was not found. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /process-instances/search: - post: + $ref: "#/components/responses/InternalServerError" + + /decision-definitions/{decisionDefinitionKey}/xml: + get: tags: - - Process instance - summary: Query process instances (alpha) + - Decision definition + operationId: getDecisionDefinitionXML + summary: Get decision definition XML description: | - Search for process instances based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/ProcessInstanceSearchQueryRequest" + Returns decision definition as XML. + parameters: + - name: decisionDefinitionKey + in: path + required: true + description: The assigned key of the decision definition, which acts as a unique identifier for this decision. + schema: + type: integer + format: int64 responses: "200": - description: The process instance search successful response. + description: > + The XML of the decision definition is successfully returned. content: - application/json: + text/xml: schema: - $ref: "#/components/schemas/ProcessInstanceSearchQueryResponse" + type: string "400": description: > - The process instance search query failed. + The decision definition request failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "500": - description: An internal error occurred while processing the request. + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: > + The decision with the given key was not found. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /process-instances/{processInstanceKey}/cancellation: + "500": + $ref: "#/components/responses/InternalServerError" + + /decision-requirements/search: post: tags: - - Process instance - summary: Cancel process instance - description: Cancels a running process instance. - parameters: - - name: processInstanceKey - in: path - required: true - description: The key of the process instance to cancel. - schema: - type: integer - format: int64 + - Decision requirements + operationId: findDecisionRequirements + summary: Query decision requirements + description: | + Search for decision requirements based on given criteria. requestBody: required: false content: application/json: schema: - $ref: "#/components/schemas/CancelProcessInstanceRequest" + $ref: "#/components/schemas/DecisionRequirementsSearchQueryRequest" responses: - "204": - description: The process instance is canceled. - "400": - description: The provided data is not valid. + "200": + description: > + The decision requirements search result. content: - application/problem+json: + application/json: schema: - $ref: "#/components/schemas/ProblemDetail" - "404": - description: The process instance is not found. + $ref: "#/components/schemas/DecisionRequirementsSearchQueryResponse" + "400": + description: > + The search query failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /process-instances/{processInstanceKey}/migration: - post: + $ref: "#/components/responses/InternalServerError" + + /decision-requirements/{decisionRequirementsKey}: + get: tags: - - Process instance - summary: Migrate process instance + - Decision requirements + operationId: getDecisionRequirements + summary: Get decision requirements description: | - Migrates a process instance to a new process definition. - This request can contain multiple mapping instructions to define mapping between the active - process instance's elements and target process definition elements. - - Use this to upgrade a process instance to a new version of a process or to - a different process definition, e.g. to keep your running instances up-to-date with the - latest process improvements. + Returns Decision Requirements as JSON. parameters: - - name: processInstanceKey + - name: decisionRequirementsKey in: path required: true - description: The key of the process instance that should be migrated. + description: The assigned key of the decision requirements, which acts as a unique identifier for this decision requirements. schema: type: integer format: int64 - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/MigrateProcessInstanceRequest" responses: - "204": - description: The process instance is migrated. + "200": + description: > + The decision requirements is successfully returned. + content: + application/json: + schema: + $ref: "#/components/schemas/DecisionRequirementsItem" "400": - description: The provided data is not valid. + description: > + The decision requirements request failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: The process instance is not found. + description: > + The decision requirements with the given key was not found. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /process-instances/{processInstanceKey}/modification: - post: + $ref: "#/components/responses/InternalServerError" + + /decision-requirements/{decisionRequirementsKey}/xml: + get: tags: - - Process instance - summary: Modify process instance + - Decision requirements + operationId: getDecisionRequirementsXML + summary: Get decision requirements XML description: | - Modifies a running process instance. - This request can contain multiple instructions to activate an element of the process or - to terminate an active instance of an element. - - Use this to repair a process instance that is stuck on an element or took an unintended path. - For example, because an external system is not available or doesn't respond as expected. + Returns decision requirements as XML. parameters: - - name: processInstanceKey + - name: decisionRequirementsKey in: path required: true - description: The key of the process instance that should be modified. + description: The assigned key of the decision requirements, which acts as a unique identifier for this decision. schema: type: integer format: int64 - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ModifyProcessInstanceRequest" responses: - "204": - description: The process instance is modified. + "200": + description: > + The XML of the decision requirements is successfully returned. + content: + text/xml: + schema: + type: string "400": - description: The provided data is not valid. + description: > + The decision requirements request failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: The process instance is not found. + description: > + The decision requirements with the given key was not found. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" - /flownode-instances/search: + /decision-instances/search: post: tags: - - Flow node Instance - summary: Query flow node instances (alpha) + - Decision instance + operationId: findDecisionInstances + summary: Query decision instances description: | - Search for flow node instances based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Search for decision instances based on given criteria. requestBody: required: false content: application/json: schema: - $ref: "#/components/schemas/FlowNodeInstanceSearchQueryRequest" + $ref: "#/components/schemas/DecisionInstanceSearchQueryRequest" responses: "200": description: > - The Flow node instance search successful response. + The decision instance search result. content: application/json: schema: - $ref: "#/components/schemas/FlowNodeInstanceSearchQueryResponse" + $ref: "#/components/schemas/DecisionInstanceSearchQueryResponse" "400": description: > - The Flow node instance Search Query failed. + The decision instance search query failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /flownode-instances/{flownodeInstanceKey}: + $ref: "#/components/responses/InternalServerError" + + /decision-instances/{decisionInstanceId}: get: tags: - - Flow node instance - summary: Get flow node instance by key (alpha) + - Decision instance + operationId: getDecisionInstance + summary: Get decision instance description: | - Returns flow node instance as JSON. - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Returns a decision instance. parameters: - - name: flownodeInstanceKey + - name: decisionInstanceId in: path required: true - description: The assigned key of the flow node instance, which acts as a unique identifier for this flow node instance. + description: The assigned ID of the decision instance, which acts as a unique identifier for this decision instance. schema: - type: integer - format: int64 + type: string responses: "200": description: > - The flow node instance is successfully returned. + The decision instance is successfully returned. content: application/json: schema: - $ref: "#/components/schemas/FlowNodeInstanceItem" + $ref: "#/components/schemas/DecisionInstanceGetQueryResponse" "400": description: > - The flow node instance Get failed. + The decision instance request failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": description: > - The flow node instance with the given key was not found. + The decision instance with the given ID was not found. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. + $ref: "#/components/responses/InternalServerError" + + /decision-definitions/evaluation: + post: + tags: + - Decision definition + operationId: evaluateDecision + summary: Evaluate decision + description: | + Evaluates a decision. + You specify the decision to evaluate either by using its unique key (as returned by + DeployResource), or using the decision ID. When using the decision ID, the latest deployed + version of the decision is used. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/EvaluateDecisionRequest" + examples: + "By decision definition key": + summary: "Evaluate the decision by decisionDefinitionKey." + value: + decisionDefinitionKey: 12345 + variables: {} + "By decision definition ID": + summary: "Evaluate the decision by decisionDefinitionId." + value: + decisionDefinitionId: "1234-5678" + variables: {} + responses: + "200": + description: The decision was evaluated. + content: + application/json: + schema: + $ref: "#/components/schemas/EvaluateDecisionResponse" + "400": + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /decision-definitions/search: + "404": + description: The decision is not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + + /authorizations/{ownerKey}: + patch: + tags: + - Authorization + operationId: updateAuthorization + summary: Update authorization + description: Manage the permissions assigned to the authorization. + parameters: + - name: ownerKey + in: path + required: true + description: The key of the owner of the authorization. + schema: + type: integer + format: int64 + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/AuthorizationPatchRequest" + required: true + responses: + "202": + description: | + The authorization was patched successfully. + "400": + description: | + The authorization could not be patched. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: | + The owner was not found. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "409": + description: | + The request to add or remove permissions to an authorization was in conflict. + More details are provided in the response body. + "500": + $ref: "#/components/responses/InternalServerError" + /authorizations/search: post: tags: - - Decision definition - summary: Query decision definitions (alpha) + - Authorization + summary: Query authorizations description: | - Search for decision definitions based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Search for authorizations based on given criteria. + operationId: findAuthorizations requestBody: - required: false content: application/json: schema: - $ref: "#/components/schemas/DecisionDefinitionSearchQueryRequest" + $ref: "#/components/schemas/AuthorizationSearchQueryRequest" + required: true responses: "200": - description: > - The Decision Definition Search successful response. + description: The authorization search result. content: application/json: schema: - $ref: "#/components/schemas/DecisionDefinitionSearchQueryResponse" + $ref: "#/components/schemas/AuthorizationSearchResponse" "400": description: > - The Decision Definition Search Query failed. + The authorization search query failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "500": - description: > - An internal error occurred while processing the request. + $ref: "#/components/responses/InternalServerError" + + /roles: + post: + tags: + - Role + operationId: createRole + summary: Create role + description: | + Create a new role. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/RoleCreateRequest" + responses: + "201": + description: | + The role was created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/RoleCreateResponse" + "400": + description: | + The role could not be created. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /decision-definitions/{decisionDefinitionKey}: + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + /roles/{roleKey}: get: tags: - - Decision definition - summary: Get decision definition by key (alpha) + - Role + operationId: getRole + summary: Get role description: | - Returns a decision definition. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Get a role by its key. parameters: - - name: decisionDefinitionKey + - name: roleKey in: path required: true - description: The assigned key of the decision definition, which acts as a unique identifier for this decision. + description: The role key. schema: type: integer format: int64 responses: "200": - description: > - The decision definition is successfully returned. + description: The role is successfully returned. content: application/json: schema: - $ref: "#/components/schemas/DecisionDefinitionItem" - "400": - description: > - The decision definition Get by key failed. - More details are provided in the response body. + $ref: "#/components/schemas/RoleItem" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: The role with the given key was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "404": + "500": + $ref: "#/components/responses/InternalServerError" + patch: + tags: + - Role + operationId: updateRole + summary: Update role + description: Update a role with the given key. + parameters: + - name: roleKey + in: path + required: true + description: The key of the role to update. + schema: + type: integer + format: int64 + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/RoleUpdateRequest" + responses: + "204": + description: The role was updated successfully. + "400": description: > - The decision with the given key was not found. - More details are provided in the response body. + The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "500": - description: > - An internal error occurred while processing the request. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: The role with the roleKey is not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /decision-definitions/{decisionDefinitionKey}/xml: - get: + "500": + $ref: "#/components/responses/InternalServerError" + delete: tags: - - Decision definition - summary: Get decision definition XML (alpha) - description: | - Returns decision definition as XML. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + - Role + operationId: deleteRole + summary: Delete role + description: Deletes the role with the given key. parameters: - - name: decisionDefinitionKey + - name: roleKey in: path required: true - description: The assigned key of the decision definition, which acts as a unique identifier for this decision. + description: The key of the role to delete. schema: type: integer format: int64 responses: - "200": - description: > - The XML of the decision definition is successfully returned. + "204": + description: The role was deleted successfully. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: The role with the roleKey was not found. content: - text/xml: + application/problem+json: schema: - type: string - "400": - description: > - The Decision Definition Get XML failed. - More details are provided in the response body. + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + /roles/search: + post: + tags: + - Role + operationId: searchRoles + summary: Query roles + description: | + Search for roles based on given criteria. + requestBody: + required: false + content: + application/json: + schema: + $ref: "#/components/schemas/RoleSearchQueryRequest" + responses: + "200": + description: The roles search result. content: - application/problem+json: + application/json: schema: - $ref: "#/components/schemas/ProblemDetail" - "404": + $ref: "#/components/schemas/RoleSearchQueryResponse" + "400": description: > - The decision with the given key was not found. + The role search query failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "500": - description: > - An internal error occurred while processing the request. + description: An internal error occurred while processing the request. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - /decision-requirements/search: + + /groups: post: tags: - - Decision requirements - summary: Query decision requirements (alpha) - description: | - Search for decision requirements based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + - Group + operationId: createGroup + summary: Create group requestBody: - required: false content: application/json: schema: - $ref: "#/components/schemas/DecisionRequirementsSearchQueryRequest" + $ref: "#/components/schemas/GroupCreateRequest" responses: - "200": - description: > - The decision requirements search successful response. + "201": + description: The group was created successfully. content: application/json: schema: - $ref: "#/components/schemas/DecisionRequirementsSearchQueryResponse" + $ref: "#/components/schemas/GroupCreateResponse" "400": - description: > - The decision requirements search query failed. + description: | + The group could not be created. More details are provided in the response body. - /decision-requirements/{decisionRequirementsKey}: + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + /groups/{groupKey}: get: tags: - - Decision requirements - summary: Get decision requirements by key (alpha) + - Group + operationId: getGroup + summary: Get group description: | - Returns Decision Requirements as JSON. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Get a group by its key. parameters: - - name: decisionRequirementsKey + - name: groupKey in: path required: true - description: The assigned key of the decision requirements, which acts as a unique identifier for this decision requirements. + description: The group key. schema: type: integer format: int64 responses: "200": - description: > - The decision requirements is successfully returned. + description: The group is successfully returned. content: application/json: schema: - $ref: "#/components/schemas/DecisionRequirementsItem" - "400": - description: > - The decision requirements Get failed. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/schemas/GroupItem" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: > - The decision requirements with the given key was not found. - More details are provided in the response body. + description: The group with the given key was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /decision-requirements/{decisionRequirementsKey}/xml: - get: + $ref: "#/components/responses/InternalServerError" + patch: tags: - - Decision requirements - summary: Get decision requirements XML (alpha). - description: | - Returns decision requirements as XML. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + - Group + operationId: updateGroup + summary: Update group + description: Update a group with the given key. parameters: - - name: decisionRequirementsKey + - name: groupKey in: path required: true - description: The assigned key of the decision requirements, which acts as a unique identifier for this decision. + description: The key of the group to update. schema: type: integer format: int64 + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/GroupUpdateRequest" responses: - "200": - description: > - The XML of the decision requirements is successfully returned. - content: - text/xml: - schema: - type: string + "204": + description: The group was updated successfully. "400": description: > - The decision requirements Get XML failed. - More details are provided in the response body. + The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" "404": - description: > - The decision requirements with the given key was not found. - More details are provided in the response body. + description: The group with the groupKey is not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. + $ref: "#/components/responses/InternalServerError" + delete: + tags: + - Group + operationId: deleteGroup + summary: Delete group + description: Deletes the group with the given key. + parameters: + - name: groupKey + in: path + required: true + description: The key of the group to delete. + schema: + type: integer + format: int64 + responses: + "204": + description: The group was deleted successfully. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + description: The group with the groupKey was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - - /decision-instances/search: + "500": + $ref: "#/components/responses/InternalServerError" + /groups/{groupKey}/users/{userKey}: post: tags: - - Decision instance - summary: Query decision instances (alpha) + - Group + operationId: addUserToGroup + summary: Assign a user to a group description: | - Search for decision instances based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/DecisionInstanceSearchQueryRequest" + Assigns a user to a group. + parameters: + - name: groupKey + in: path + required: true + description: The group key. + schema: + type: integer + format: int64 + - name: userKey + in: path + required: true + description: The user key. + schema: + type: integer + format: int64 responses: - "200": - description: > - The decision instance search successful response. - content: - application/json: - schema: - $ref: "#/components/schemas/DecisionInstanceSearchQueryResponse" + "202": + description: The user was assigned successfully to the group. "400": - description: > - The decision instance search query failed. + description: | + The user could not be assigned. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "500": - description: > - An internal error occurred while processing the request. + "403": + $ref: "#/components/responses/Forbidden" + "404": + description: The group or user with the given key was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - - /decision-instances/{decisionInstanceKey}: - get: + "409": + description: The user with the given key is already assigned to the group. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + delete: tags: - - Decision instance - summary: Get decision instance by key (alpha) + - Group + operationId: unassignUserFromGroup + summary: Unassign a user from a group description: | - Returns a decision instance. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: + Unassigns a user from a group. parameters: - - name: decisionInstanceKey + - name: groupKey + in: path + required: true + description: The group key. + schema: + type: integer + format: int64 + - name: userKey in: path required: true - description: The assigned key of the decision instance, which acts as a unique identifier for this decision instance. + description: The user key. schema: type: integer format: int64 responses: - "200": - description: > - The decision instance is successfully returned. - content: - application/json: - schema: - $ref: "#/components/schemas/DecisionInstanceGetQueryResponse" + "202": + description: The user was unassigned successfully from the group. "400": - description: > - The decision instance request failed. + description: | + The user could not be unassigned. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "403": + $ref: "#/components/responses/Forbidden" "404": - description: > - The decision instance with the given key was not found. - More details are provided in the response body. + description: The group or user with the given key was not found, or the user is not assigned to this group. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - - /decision-definitions/evaluation: + $ref: "#/components/responses/InternalServerError" + /groups/search: post: tags: - - Decision definition - summary: Evaluate decision + - Group + operationId: searchGroups + summary: Query groups description: | - Evaluates a decision. - You specify the decision to evaluate either by using its unique key (as returned by - DeployResource), or using the decision ID. When using the decision ID, the latest deployed - version of the decision is used. + Search for groups based on given criteria. requestBody: - required: true + required: false content: application/json: schema: - $ref: "#/components/schemas/EvaluateDecisionRequest" - examples: - "By decision definition key": - summary: "Evaluate the decision by decisionDefinitionKey." - value: - decisionDefinitionKey: 12345 - variables: {} - "By decision definition ID": - summary: "Evaluate the decision by decisionDefinitionId." - value: - decisionDefinitionId: "1234-5678" - variables: {} + $ref: "#/components/schemas/GroupSearchQueryRequest" responses: "200": - description: The decision was evaluated. + description: The groups search result. content: application/json: schema: - $ref: "#/components/schemas/EvaluateDecisionResponse" + $ref: "#/components/schemas/GroupSearchQueryResponse" "400": - description: The provided data is not valid. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "404": - description: The decision is not found. + description: > + The group search query failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "500": description: An internal error occurred while processing the request. content: @@ -1743,129 +2750,119 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" - /authorizations/{ownerKey}: - patch: + /mapping-rules: + post: tags: - - Authorization - summary: Patch authorization - description: Manage the permissions assigned to the authorization. - operationId: patchAuthorization - parameters: - - name: ownerKey - in: path - required: true - description: The key of the owner of the authorization. - schema: - type: integer - format: int64 + - Mapping rule + operationId: createMappingRule + summary: Create mapping rule + description: | + Create a new mapping rule requestBody: content: application/json: schema: - $ref: "#/components/schemas/AuthorizationPatchRequest" - required: true + $ref: "#/components/schemas/MappingRuleCreateRequest" responses: - "202": - description: | - The Authorization was patched successfully. + "201": + description: The mapping rule was created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/MappingRuleCreateResponse" "400": - description: | - The Authorization could not be patched. - More details are provided in the response body. + description: The mapping rule could not be created. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "401": + "403": description: | - The request to patch the authorization was unauthorized. + The request to create a mapping rule was denied. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "403": - description: | - The request to patch an authorization was denied. - More details are provided in the response body. + "404": + description: The request to create a mapping rule was denied. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + /mapping-rules/{mappingKey}: + delete: + tags: + - Mapping rule + operationId: deleteMappingRule + summary: Delete a mapping rule + description: Deletes the mapping rule with the given key. + parameters: + - name: mappingKey + in: path + required: true + description: The key of the mapping rule to delete. + schema: + type: integer + format: int64 + responses: + "204": + description: The mapping rule was deleted successfully. + "401": + $ref: "#/components/responses/Unauthorized" "404": - description: | - The owner was not found. + description: The mapping rule with the mappingKey was not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" - "409": - description: | - The request to add or remove permissions to an authorization was in conflict. - More details are provided in the response body. "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" - /users: + /mapping-rules/search: post: tags: - - User - summary: "Create a user" - operationId: "createUser" + - Mapping rule + operationId: findMappings + summary: Query mappings + description: | + Search for mapping rules based on given criteria. requestBody: content: application/json: schema: - $ref: "#/components/schemas/UserRequest" + $ref: "#/components/schemas/MappingSearchQueryRequest" required: true responses: - "202": - description: | - The user was created successfully. + "200": + description: The mapping rule search result. content: application/json: schema: - $ref: "#/components/schemas/UserCreateResponse" + $ref: "#/components/schemas/MappingSearchResponse" "400": - description: | - The user could not be created. + description: > + The mapping rule search query failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "401": - description: | - The request to create a user was unauthorized. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Unauthorized" "403": - description: | - The request to create a user was denied. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Forbidden" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /messages/publication: post: tags: - Message - summary: Publish a message + operationId: publishMessage + summary: Publish message description: | Publishes a single message. Messages are published to specific partitions computed from their correlation keys. @@ -1891,16 +2888,14 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: Internal server error. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" + /messages/correlation: post: tags: - Message - summary: Correlate a message + operationId: correlateMessage + summary: Correlate message description: | Publishes a message and correlates it to a subscription. If correlation is successful it will return the first process instance key the message correlated with. @@ -1924,11 +2919,7 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "403": - description: Unauthorized - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Forbidden" "404": description: Not found content: @@ -1936,23 +2927,22 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: Internal server error - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /documents: post: tags: - - Documents + - Document + operationId: createDocument summary: Upload document (alpha) description: | Upload a document to the Camunda 8 cluster. + Note that this currently only supports an in-memory document store, which is not meant for production use. + :::note - This endpoint is an alpha feature. It currently only supports an in-memory document store, - which is not meant for production use. + This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change + in future releases. ::: parameters: - name: storeId @@ -2003,14 +2993,17 @@ paths: /documents/{documentId}: get: tags: - - Documents + - Document + operationId: getDocument summary: Download document (alpha) description: | Download a document from the Camunda 8 cluster. + Note that this currently only supports an in-memory document store, which is not meant for production use. + :::note - This endpoint is an alpha feature. It currently only supports an in-memory document store, - which is not meant for production use. + This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change + in future releases. ::: parameters: - name: documentId @@ -2041,22 +3034,21 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" + delete: tags: - - Documents + - Document + operationId: deleteDocument summary: Delete document (alpha) description: | Delete a document from the Camunda 8 cluster. + Note that this currently only supports an in-memory document store, which is not meant for production use. + :::note - This endpoint is an alpha feature. It currently only supports an in-memory document store, - which is not meant for production use. + This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change + in future releases. ::: parameters: - name: documentId @@ -2082,23 +3074,22 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /document/{documentId}/links: + $ref: "#/components/responses/InternalServerError" + + /documents/{documentId}/links: post: tags: - - Documents + - Document + operationId: createDocumentLink summary: Create document link (alpha) description: | Create a link to a document in the Camunda 8 cluster. + Note that this currently only supports an in-memory document store, which is not meant for production use. + :::note - This endpoint is an alpha feature. It currently only supports an in-memory document store, - which is not meant for production use. + This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change + in future releases. ::: parameters: - name: documentId @@ -2134,20 +3125,57 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" + /users: + post: + tags: + - User + operationId: createUser + summary: Create user + description: Create a new user. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/UserRequest" + required: true + responses: + "201": + description: | + The user was created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UserCreateResponse" + "400": + description: | + Unable to create the user. + More details are provided in the response body. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "409": + description: | + A user with the given username already exists. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + "500": + $ref: "#/components/responses/InternalServerError" + /users/search: post: tags: - User - summary: "Query users (alpha)" + operationId: findUsers + summary: Query users description: | Search for users based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: - operationId: "findAllUsers" requestBody: content: application/json: @@ -2156,55 +3184,112 @@ paths: required: true responses: "200": - description: "OK" + description: The user search result. content: application/json: schema: $ref: "#/components/schemas/UserSearchResponse" "400": - description: "Bad request" + description: > + The user search query failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "401": - description: "Unauthorized" + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /users/{userKey}/authorizations/search: + post: + tags: + - Authorization + summary: "Query user authorizations" + description: | + Search for user authorizations based on given criteria. + operationId: "findUserAuthorizations" + parameters: + - name: userKey + in: path + required: true + description: The key of the userKey of the authorization. + schema: + type: integer + format: int64 + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/AuthorizationSearchQueryRequest" + required: true + responses: + "200": + description: The user authorization search result. + content: + application/json: + schema: + $ref: "#/components/schemas/AuthorizationSearchResponse" + "400": + description: > + The user authorization search query failed. + More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" "403": - description: "Forbidden" + $ref: "#/components/responses/Forbidden" + "500": + $ref: "#/components/responses/InternalServerError" + + /users/{userKey}: + delete: + tags: + - User + operationId: deleteUser + summary: Delete user + description: | + Deletes a user. + parameters: + - name: userKey + in: path + required: true + description: The key of the user to delete. + schema: + type: integer + format: int64 + responses: + "204": + description: The user was deleted successfully. + "400": + description: The provided data is not valid. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "404": - description: "Not found" + description: The user is not found. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: "Internal server error" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /incidents/search: post: tags: - Incident - summary: Query incidents (alpha) + operationId: findIncidents + summary: Query incidents description: | Search for incidents based on given criteria. - - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: requestBody: required: false content: @@ -2214,7 +3299,7 @@ paths: responses: "200": description: > - The incident search successful response. + The incident search result. content: application/json: schema: @@ -2224,41 +3309,20 @@ paths: The incident search query failed. More details are provided in the response body. "401": - description: "Unauthorized" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Unauthorized" "403": - description: "Forbidden" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "404": - description: "Not found" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/Forbidden" "500": - description: "Internal server error" - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" + /incidents/{incidentKey}: get: tags: - Incident - summary: Get incident by key (alpha) + operationId: getIncident + summary: Get incident description: | Returns incident as JSON. - :::note - This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. - See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) - for further details. - ::: parameters: - name: incidentKey in: path @@ -2277,12 +3341,16 @@ paths: $ref: "#/components/schemas/IncidentItem" "400": description: > - The incident Get failed. + The incident request failed. More details are provided in the response body. content: application/problem+json: schema: $ref: "#/components/schemas/ProblemDetail" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" "404": description: > The incident with the given key was not found. @@ -2292,16 +3360,13 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: > - An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" + /deployments: post: tags: - Resource + operationId: createDeployment summary: Deploy resources description: | Deploys one or more resources (e.g. processes, decision models, or forms). @@ -2324,7 +3389,7 @@ paths: type: string description: The tenant to deploy the resources to. required: - - resource + - resources responses: "200": description: The resources are deployed. @@ -2343,6 +3408,7 @@ paths: post: tags: - Resource + operationId: deleteResource summary: Delete resource description: | Deletes a deployed resource. @@ -2381,16 +3447,13 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /element-instances/{elementInstanceKey}/variables: post: tags: - Element instance + operationId: createElementInstanceVariables summary: Update element instance variables description: | Updates all the variables of a particular scope (for example, process instance, flow element instance) with the given variable data. @@ -2422,16 +3485,13 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" /signals/broadcast: post: tags: - Signal + operationId: broadcastSignal summary: Broadcast signal description: Broadcasts a signal. requestBody: @@ -2460,14 +3520,135 @@ paths: schema: $ref: "#/components/schemas/ProblemDetail" "500": - description: An internal error occurred while processing the request. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" + $ref: "#/components/responses/InternalServerError" + +components: + schemas: + TenantCreateRequest: + type: object + properties: + tenantId: + type: string + description: The unique external tenant ID + name: + type: string + description: The name of the tenant. + required: + - tenantId + + TenantCreateResponse: + type: "object" + properties: + tenantKey: + description: The external key of the created tenant + type: "integer" + format: "int64" + + TenantUpdateRequest: + type: object + properties: + name: + type: string + description: The new name of the tenant. + required: + - name + + TenantUpdateResponse: + type: object + properties: + tenantKey: + type: integer + description: The unique system-generated internal tenant ID + format: int64 + tenantId: + type: string + description: The unique external tenant ID + name: + type: string + description: The name of the tenant. + + TenantItem: + description: Tenant search response item. + type: object + properties: + tenantKey: + type: integer + description: The unique system-generated internal tenant ID. + format: int64 + name: + type: string + description: The tenant name. + tenantId: + type: string + description: The unique external tenant ID. + assignedMemberKeys: + type: array + description: The set of keys of members assigned to the tenant. + items: + type: integer + format: int64 + + TenantSearchQueryRequest: + description: Tenant search request + type: object + allOf: + - $ref: "#/components/schemas/SearchQueryRequest" + properties: + filter: + description: The tenant search filters. + allOf: + - $ref: "#/components/schemas/TenantFilterRequest" + + TenantFilterRequest: + description: Tenant filter request + type: object + properties: + tenantId: + type: string + description: The ID of the tenant. + name: + type: string + description: The name of the tenant. + + TenantSearchQueryResponse: + description: Tenant search response. + allOf: + - $ref: "#/components/schemas/SearchQueryResponse" + type: object + properties: + items: + description: The matching tenants. + type: array + items: + $ref: "#/components/schemas/TenantItem" + + MappingItem: + description: Mapping rule search response item. + type: object + properties: + mappingKey: + type: integer + description: The unique system-generated internal mapping ID. + format: int64 + claimName: + type: string + description: The claim name to match against a token. + claimValue: + type: string + description: The value of the claim to match. + + MappingSearchQueryResponse: + description: Mapping rule search response. + allOf: + - $ref: "#/components/schemas/SearchQueryResponse" + type: object + properties: + items: + description: The matching mapping rules. + type: array + items: + $ref: "#/components/schemas/MappingItem" -components: - schemas: UserTaskSearchQueryRequest: allOf: - $ref: "#/components/schemas/SearchQueryRequest" @@ -2475,7 +3656,14 @@ components: type: object properties: filter: - $ref: "#/components/schemas/UserTaskFilterRequest" + description: The user task search filters. + allOf: + - $ref: "#/components/schemas/UserTaskFilterRequest" + UserTaskVariableSearchQueryRequest: + allOf: + - $ref: "#/components/schemas/SearchQueryRequest" + description: User task search query request. + type: object UserTaskSearchQueryResponse: allOf: - $ref: "#/components/schemas/SearchQueryResponse" @@ -2483,6 +3671,7 @@ components: type: object properties: items: + description: The matching user tasks. type: array items: $ref: "#/components/schemas/UserTaskItem" @@ -2492,29 +3681,61 @@ components: properties: userTaskKey: type: integer + description: The key for this user task. format: int64 state: type: string + description: The state of the user task. + enum: + - CREATED + - COMPLETED + - CANCELED + - FAILED assignee: - type: string + description: The assignee of the user task. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" + priority: + description: The priority of the user task. + allOf: + - $ref: "#/components/schemas/IntegerFilterProperty" elementId: type: string + description: The element ID of the user task. candidateGroup: - type: string + description: The candidate group for this user task. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" candidateUser: - type: string + description: The candidate user for this user task. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" processDefinitionKey: type: integer + description: The key of the process definition. format: int64 processInstanceKey: type: integer + description: The key of the process instance. format: int64 - tenantIds: + tenantId: type: string + description: Tenant ID of this user task. processDefinitionId: type: string - variables: + description: The ID of the process definition. + elementInstanceKey: + type: integer + description: The key of the element instance. + format: int64 + processInstanceVariables: + type: array + description: Process Instance variables associated with the user task. + items: + $ref: "#/components/schemas/UserTaskVariableFilterRequest" + localVariables: type: array + description: Local variables associated with the user task. items: $ref: "#/components/schemas/UserTaskVariableFilterRequest" UserTaskVariableFilterRequest: @@ -2522,63 +3743,92 @@ components: properties: name: type: string + description: Name of the variable. value: type: string + description: The value of the variable. UserTaskItem: type: object properties: userTaskKey: + description: The key of the user task. type: integer format: int64 + name: + type: string + description: The name for this user task. state: type: string + description: The state of the user task. + enum: + - CREATED + - COMPLETED + - CANCELED + - FAILED assignee: + description: The assignee of the user task. type: string elementId: type: string + description: The element ID of the user task. elementInstanceKey: type: integer + description: The key of the element instance. format: int64 - candidateGroup: + candidateGroups: type: array + description: The candidate groups for this user task. items: type: string - candidateUser: + candidateUsers: type: array + description: The candidate users for this user task. items: type: string processDefinitionId: type: string + description: The ID of the process definition. processDefinitionKey: type: integer + description: The key of the process definition. format: int64 processInstanceKey: type: integer + description: The key of the process instance. format: int64 formKey: type: integer + description: The key of the form. format: int64 creationDate: type: string + description: The creation date of a user task. format: date-time completionDate: type: string + description: The completion date of a user task. format: date-time followUpDate: type: string + description: The follow date of a user task. format: date-time dueDate: type: string + description: The due date of a user task. format: date-time - tenantIds: + tenantId: type: string + description: Tenant ID of this user task. externalFormReference: type: string + description: The external form reference. processDefinitionVersion: type: integer + description: The version of the process definition. format: int32 customHeaders: type: object + description: Custom headers for the user task. additionalProperties: type: string priority: @@ -2594,31 +3844,35 @@ components: type: object properties: filter: - $ref: "#/components/schemas/VariableFilterRequest" + description: The variable search filters. + allOf: + - $ref: "#/components/schemas/VariableFilterRequest" VariableFilterRequest: description: Variable filter request. type: object properties: variableKey: description: The key for this variable. - type: integer - format: int64 + allOf: + - $ref: "#/components/schemas/LongFilterProperty" name: description: Name of the variable. - type: string + allOf: + - $ref: "#/components/schemas/StringFilterProperty" value: description: The value of the variable. - type: string + allOf: + - $ref: "#/components/schemas/StringFilterProperty" scopeKey: description: The key of the scope of this variable. - type: integer - format: int64 + allOf: + - $ref: "#/components/schemas/LongFilterProperty" processInstanceKey: description: The key of the process instance of this variable. - type: integer - format: int64 + allOf: + - $ref: "#/components/schemas/LongFilterProperty" tenantId: - description: Tenant id of this variable. + description: Tenant ID of this variable. type: string isTruncated: description: Whether the value is truncated or not. @@ -2630,6 +3884,7 @@ components: type: object properties: items: + description: The matching variables. type: array items: $ref: "#/components/schemas/VariableItem" @@ -2659,7 +3914,7 @@ components: type: integer format: int64 tenantId: - description: Tenant id of this variable. + description: Tenant ID of this variable. type: string isTruncated: description: Whether the value is truncated or not. @@ -2670,9 +3925,11 @@ components: type: object properties: filter: + description: The process definition search filters. allOf: - $ref: "#/components/schemas/ProcessDefinitionFilterRequest" ProcessDefinitionFilterRequest: + description: Process definition search filter. type: object properties: processDefinitionKey: @@ -2693,10 +3950,10 @@ components: description: Version tag of this process definition. type: string processDefinitionId: - description: Process defintion id of this process definition. + description: Process definition ID of this process definition. type: string tenantId: - description: Tenant id of this process definition. + description: Tenant ID of this process definition. type: string ProcessDefinitionSearchQueryResponse: allOf: @@ -2704,6 +3961,7 @@ components: type: object properties: items: + description: The matching process definitions. type: array items: $ref: "#/components/schemas/ProcessDefinitionItem" @@ -2728,10 +3986,10 @@ components: description: Version tag of this process definition. type: string processDefinitionId: - description: Process definition id of this process definition. + description: Process definition ID of this process definition. type: string tenantId: - description: Tenant id of this process definition. + description: Tenant ID of this process definition. type: string ProcessInstanceSearchQueryRequest: description: Process instance search request. @@ -2740,77 +3998,284 @@ components: type: object properties: filter: + description: The process instance search filters. allOf: - $ref: "#/components/schemas/ProcessInstanceFilterRequest" + AdvancedIntegerFilter: + description: Advanced integer (int32) filter. + type: object + properties: + $eq: + description: Checks for equality with the provided value. + type: integer + format: int32 + $neq: + description: Checks for inequality with the provided value. + type: integer + format: int32 + $exists: + description: Checks if the current property exists. + type: boolean + $gt: + description: Greater than comparison with the provided value. + type: integer + format: int32 + $gte: + description: Greater than or equal comparison with the provided value. + type: integer + format: int32 + $lt: + description: Lower than comparison with the provided value. + type: integer + format: int32 + $lte: + description: Lower than or equal comparison with the provided value. + type: integer + format: int32 + $in: + description: Checks if the property matches any of the provided values. + type: array + items: + type: integer + format: int32 + IntegerFilterProperty: + description: Integer property with advanced search capabilities. + type: object + oneOf: + - type: integer + format: int32 + - $ref: "#/components/schemas/AdvancedIntegerFilter" + BasicLongFilter: + description: Basic advanced long (int64) filter. + type: object + properties: + $eq: + description: Checks for equality with the provided value. + type: integer + format: int64 + $neq: + description: Checks for inequality with the provided value. + type: integer + format: int64 + $exists: + description: Checks if the current property exists. + type: boolean + $in: + description: Checks if the property matches any of the provided values. + type: array + items: + type: integer + format: int64 + AdvancedLongFilter: + description: Advanced long (int64) filter. + allOf: + - $ref: "#/components/schemas/BasicLongFilter" + - type: object + properties: + $gt: + description: Greater than comparison with the provided value. + type: integer + format: int64 + $gte: + description: Greater than or equal comparison with the provided value. + type: integer + format: int64 + $lt: + description: Lower than comparison with the provided value. + type: integer + format: int64 + $lte: + description: Lower than or equal comparison with the provided value. + type: integer + format: int64 + AdvancedStringFilter: + description: Advanced string filter. + type: object + properties: + $eq: + description: Checks for equality with the provided value. + type: string + $neq: + description: Checks for inequality with the provided value. + type: string + $exists: + description: Checks if the current property exists. + type: boolean + $in: + description: Checks if the property matches any of the provided values. + type: array + items: + type: string + $like: + description: | + Checks if the property matches the provided like value. + Supported wildcard characters depend on the configured search client. + type: string + AdvancedProcessInstanceStateFilter: + description: Advanced ProcessInstanceStateEnum filter. + type: object + properties: + $eq: + description: Checks for equality with the provided value. + allOf: + - $ref: "#/components/schemas/ProcessInstanceStateEnum" + $neq: + description: Checks for inequality with the provided value. + allOf: + - $ref: "#/components/schemas/ProcessInstanceStateEnum" + $exists: + description: Checks if the current property exists. + type: boolean + $in: + description: Checks if the property matches any of the provided values. + type: array + items: + $ref: "#/components/schemas/ProcessInstanceStateEnum" + $like: + description: | + Checks if the property matches the provided like value. + Supported wildcard characters depend on the configured search client. + type: string + AdvancedDateTimeFilter: + description: Advanced date-time filter. + type: object + properties: + $eq: + description: Checks for equality with the provided value. + type: string + format: date-time + $neq: + description: Checks for inequality with the provided value. + type: string + format: date-time + $exists: + description: Checks if the current property exists. + type: boolean + $gt: + description: Greater than comparison with the provided value. + type: string + format: date-time + $gte: + description: Greater than or equal comparison with the provided value. + type: string + format: date-time + $lt: + description: Lower than comparison with the provided value. + type: string + format: date-time + $lte: + description: Lower than or equal comparison with the provided value. + type: string + format: date-time + $in: + description: Checks if the property matches any of the provided values. + type: array + items: + type: string + format: date-time + BasicLongFilterProperty: + description: Long property with basic advanced search capabilities. + type: object + oneOf: + - type: integer + format: int64 + - $ref: "#/components/schemas/BasicLongFilter" + LongFilterProperty: + description: Long property with full advanced search capabilities. + type: object + oneOf: + - type: integer + format: int64 + - $ref: "#/components/schemas/AdvancedLongFilter" + StringFilterProperty: + description: String property with full advanced search capabilities. + type: object + oneOf: + - type: string + - $ref: "#/components/schemas/AdvancedStringFilter" + ProcessInstanceStateFilterProperty: + description: ProcessInstanceStateEnum property with full advanced search capabilities. + type: object + oneOf: + - $ref: "#/components/schemas/ProcessInstanceStateEnum" + - $ref: "#/components/schemas/AdvancedProcessInstanceStateFilter" + DateTimeFilterProperty: + description: Date-time property with full advanced search capabilities. + type: object + oneOf: + - type: string + format: date-time + - $ref: "#/components/schemas/AdvancedDateTimeFilter" ProcessInstanceFilterRequest: description: Process instance search filter. type: object properties: processInstanceKey: - type: integer description: The key of this process instance. - format: int64 + allOf: + - $ref: "#/components/schemas/LongFilterProperty" processDefinitionId: - type: string - description: The process definition id. + description: The process definition ID. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" processDefinitionName: - type: string description: The process definition name. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" processDefinitionVersion: - type: integer description: The process definition version. - format: int32 + allOf: + - $ref: "#/components/schemas/IntegerFilterProperty" processDefinitionVersionTag: - type: string description: The process definition version tag. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" processDefinitionKey: - type: integer description: The process definition key. - format: int64 - rootProcessInstanceKey: - type: integer - description: The root process instance key. - format: int64 + allOf: + - $ref: "#/components/schemas/LongFilterProperty" parentProcessInstanceKey: - type: integer description: The parent process instance key. - format: int64 + allOf: + - $ref: "#/components/schemas/LongFilterProperty" parentFlowNodeInstanceKey: - type: integer description: The parent flow node instance key. - format: int64 - treePath: - type: string - description: The path of keys to this process instance, separated by '/'. + allOf: + - $ref: "#/components/schemas/LongFilterProperty" startDate: - type: string description: The start date. - format: date-time + allOf: + - $ref: "#/components/schemas/DateTimeFilterProperty" endDate: - type: string description: The end date. - format: date-time + allOf: + - $ref: "#/components/schemas/DateTimeFilterProperty" state: - $ref: "#/components/schemas/ProcessInstanceStateEnum" - description: The state, one of ACTIVE, COMPLETED, CANCELED. + description: The process instance state. + allOf: + - $ref: "#/components/schemas/ProcessInstanceStateFilterProperty" hasIncident: type: boolean description: Whether this process instance has a related incident or not. tenantId: - type: string - description: The tenant id. + description: The tenant ID. + allOf: + - $ref: "#/components/schemas/StringFilterProperty" + variables: + description: The process instance variables. + type: array + items: + $ref: "#/components/schemas/ProcessInstanceVariableFilterRequest" ProcessInstanceVariableFilterRequest: description: Process instance variable filter. type: object properties: name: + description: Name of the variable. + type: string + value: + description: The value of the variable. type: string - description: The variable name. - values: - type: array - description: The variable values. - items: - type: string ProcessInstanceSearchQueryResponse: description: Process instance search response. allOf: @@ -2818,6 +4283,7 @@ components: type: object properties: items: + description: The matching process instances. type: array items: $ref: "#/components/schemas/ProcessInstanceItem" @@ -2831,7 +4297,7 @@ components: format: int64 processDefinitionId: type: string - description: The process definition id. + description: The process definition ID. processDefinitionName: type: string description: The process definition name. @@ -2846,10 +4312,6 @@ components: type: integer description: The process definition key. format: int64 - rootProcessInstanceKey: - type: integer - description: The root process instance key. - format: int64 parentProcessInstanceKey: type: integer description: The parent process instance key. @@ -2858,9 +4320,6 @@ components: type: integer description: The parent flow node instance key. format: int64 - treePath: - type: string - description: The path of keys to this process instance, separated by '/'. startDate: type: string description: The start date. @@ -2871,14 +4330,14 @@ components: format: date-time state: $ref: "#/components/schemas/ProcessInstanceStateEnum" - description: The state, one of ACTIVE, COMPLETED, CANCELED. hasIncident: type: boolean description: Whether this process instance has a related incident or not. tenantId: type: string - description: The tenant id. + description: The tenant ID. ProcessInstanceStateEnum: + description: The state, one of ACTIVE, COMPLETED, CANCELED. enum: - ACTIVE - COMPLETED @@ -2902,9 +4361,11 @@ components: type: object properties: filter: + description: The flow node instance search filters. allOf: - $ref: "#/components/schemas/FlowNodeInstanceFilterRequest" FlowNodeInstanceFilterRequest: + description: Flow node instance filter. type: object properties: flowNodeInstanceKey: @@ -2920,7 +4381,7 @@ components: description: The process definition key associated to this flow node instance. format: int64 processDefinitionId: - description: The process definition id associated to this flow node instance. + description: The process definition ID associated to this flow node instance. type: string state: description: State of flow node instance as defined set of values. @@ -2960,13 +4421,10 @@ components: - UNKNOWN flowNodeId: type: string - description: The flow node id for this flow node instance. + description: The flow node ID for this flow node instance. flowNodeName: type: string description: The flow node name. - treePath: - type: string - description: The path of keys from process instance to this flow node instance separated by '/'. hasIncident: type: boolean description: Shows whether this flow node instance has an incident related to. @@ -2975,7 +4433,7 @@ components: description: The key of incident if field incident is true. format: int64 tenantId: - description: The tenant id. + description: The tenant ID. type: string FlowNodeInstanceSearchQueryResponse: allOf: @@ -2983,6 +4441,7 @@ components: type: object properties: items: + description: The matching flow node instances. type: array items: $ref: "#/components/schemas/FlowNodeInstanceItem" @@ -3002,7 +4461,7 @@ components: type: integer format: int64 processDefinitionId: - description: The process definition id associated to this flow node instance. + description: The process definition ID associated to this flow node instance. type: string startDate: description: Date when flow node instance started. @@ -3013,10 +4472,10 @@ components: type: string format: date-time flowNodeId: - description: The flow node id for this flow node instance. + description: The flow node ID for this flow node instance. type: string - treePath: - description: The path from process instance leading to this flow node instance. + flowNodeName: + description: The flow node name for this flow node instance. type: string type: description: Type of flow node as defined set of values. @@ -3062,7 +4521,7 @@ components: type: integer format: int64 tenantId: - description: The tenant id of the incident. + description: The tenant ID of the incident. type: string DecisionDefinitionSearchQueryRequest: allOf: @@ -3070,9 +4529,11 @@ components: type: object properties: filter: + description: The decision definition search filters. allOf: - $ref: "#/components/schemas/DecisionDefinitionFilterRequest" DecisionDefinitionFilterRequest: + description: Decision definition search filter. type: object properties: decisionDefinitionKey: @@ -3081,7 +4542,7 @@ components: description: The assigned key, which acts as a unique identifier for this decision definition. decisionDefinitionId: type: string - description: The DMN id of the decision definition. + description: The DMN ID of the decision definition. name: type: string description: The DMN name of the decision definition. @@ -3091,23 +4552,25 @@ components: description: The assigned version of the decision definition. decisionRequirementsId: type: string - description: the DMN id of the decision requirements graph that the decision definition is part of. + description: the DMN ID of the decision requirements graph that the decision definition is part of. decisionRequirementsKey: type: integer format: int64 description: The assigned key of the decision requirements graph that the decision definition is part of. tenantId: type: string - description: The tenant id of the decision definition. + description: The tenant ID of the decision definition. IncidentSearchQueryRequest: allOf: - $ref: "#/components/schemas/SearchQueryRequest" type: object properties: filter: + description: The incident search filters. allOf: - $ref: "#/components/schemas/IncidentFilterRequest" IncidentFilterRequest: + description: Incident search filter. type: object properties: incidentKey: @@ -3120,7 +4583,7 @@ components: description: The process definition key associated to this incident. processDefinitionId: type: string - description: The process definition id associated to this incident. + description: The process definition ID associated to this incident. processInstanceKey: type: integer format: int64 @@ -3146,7 +4609,7 @@ components: description: Error message which describes the error in more detail. flowNodeId: type: string - description: The flow node id associated to this incident. + description: The flow node ID associated to this incident. flowNodeInstanceKey: type: integer format: int64 @@ -3167,11 +4630,8 @@ components: type: integer format: int64 description: The job key, if exists, associated with this incident. - treePath: - type: string - description: The path from process instance via flow node ids and flow node instance keys leading to this incident. tenantId: - description: The tenant id of the incident. + description: The tenant ID of the incident. type: string IncidentSearchQueryResponse: allOf: @@ -3179,6 +4639,7 @@ components: type: object properties: items: + description: The matching incidents. type: array items: $ref: "#/components/schemas/IncidentItem" @@ -3195,7 +4656,7 @@ components: description: The process definition key associated to this incident. processDefinitionId: type: string - description: The process definition id associated to this incident. + description: The process definition ID associated to this incident. processInstanceKey: type: integer format: int64 @@ -3208,6 +4669,7 @@ components: - UNKNOWN - IO_MAPPING_ERROR - JOB_NO_RETRIES + - EXECUTION_LISTENER_NO_RETRIES - CONDITION_ERROR - EXTRACT_VALUE_ERROR - CALLED_ELEMENT_ERROR @@ -3221,7 +4683,7 @@ components: description: Error message which describes the error in more detail. flowNodeId: type: string - description: The flow node id associated to this incident. + description: The flow node ID associated to this incident. flowNodeInstanceKey: type: integer format: int64 @@ -3240,56 +4702,10 @@ components: - PENDING jobKey: type: integer - description: The job key, if exists, associated with this incident. - format: int64 - treePath: - type: string - description: The path from process instance via flow node ids and flow node instance keys leading to this incident. - tenantId: - description: The tenant id of the incident. - type: string - OperationItem: - description: " Operation" - type: object - properties: - id: - type: string - batchOperationId: - type: string - type: - type: string - enum: - - RESOLVE_INCIDENT - - CANCEL_PROCESS_INSTANCE - - DELETE_PROCESS_INSTANCE - - ADD_VARIABLE - - UPDATE_VARIABLE - - MODIFY_PROCESS_INSTANCE - - DELETE_DECISION_DEFINITION - - DELETE_PROCESS_DEFINITION - - MIGRATE_PROCESS_INSTANCE - state: - type: string - enum: - - SCHEDULED - - LOCKED - - SENT - - FAILED - - COMPLETED - errorMessage: - type: string - completedDate: - type: string - format: date-time - ProcessInstanceReferenceItem: - description: "Process instance reference description" - type: object - properties: - instanceId: - type: string - processDefinitionId: - type: string - processDefinitionName: + description: The job key, if exists, associated with this incident. + format: int64 + tenantId: + description: The tenant ID of the incident. type: string DecisionDefinitionSearchQueryResponse: allOf: @@ -3297,6 +4713,7 @@ components: type: object properties: items: + description: The matching decision definitions. type: array items: $ref: "#/components/schemas/DecisionDefinitionItem" @@ -3309,7 +4726,7 @@ components: description: The assigned key, which acts as a unique identifier for this decision definition. decisionDefinitionId: type: string - description: The DMN id of the decision definition. + description: The DMN ID of the decision definition. name: type: string description: The DMN name of the decision definition. @@ -3319,14 +4736,72 @@ components: description: The assigned version of the decision definition. decisionRequirementsId: type: string - description: the DMN id of the decision requirements graph that the decision definition is part of. + description: the DMN ID of the decision requirements graph that the decision definition is part of. decisionRequirementsKey: type: integer format: int64 description: The assigned key of the decision requirements graph that the decision definition is part of. tenantId: type: string - description: The tenant id of the decision definition. + description: The tenant ID of the decision definition. + PermissionTypeEnum: + description: Specifies the type of permissions. + enum: + - ACCESS + - CREATE + - CREATE_PROCESS_INSTANCE + - CREATE_DECISION_INSTANCE + - READ + - READ_PROCESS_INSTANCE + - READ_USER_TASK + - READ_DECISION_INSTANCE + - UPDATE + - UPDATE_PROCESS_INSTANCE + - UPDATE_USER_TASK + - DELETE + - DELETE_PROCESS + - DELETE_DRD + - DELETE_FORM + - DELETE_PROCESS_INSTANCE + ResourceTypeEnum: + description: The type of resource to add/remove permissions to/from. + enum: + - AUTHORIZATION + - MAPPING_RULE + - MESSAGE + - BATCH + - APPLICATION + - SYSTEM + - TENANT + - DEPLOYMENT + - PROCESS_DEFINITION + - DECISION_REQUIREMENTS_DEFINITION + - DECISION_DEFINITION + - GROUP + - USER + - ROLE + OwnerTypeEnum: + description: The type of the owner of permissions. + enum: + - USER + - ROLE + - GROUP + - MAPPING + - UNSPECIFIED + PermissionDTO: + type: object + properties: + permissionType: + description: Specifies the type of permissions. + type: object + allOf: + - $ref: "#/components/schemas/PermissionTypeEnum" + resourceIds: + type: array + description: A list of resource IDs the permission relates to. + uniqueItems: true + items: + type: string AuthorizationPatchRequest: type: object properties: @@ -3337,49 +4812,83 @@ components: - ADD - REMOVE resourceType: - description: The type of resource to add/remove perissions to/from. - enum: - - AUTHORIZATION - - MAPPING_RULE - - MESSAGE - - BATCH - - APPLICATION - - SYSTEM - - TENANT - - DEPLOYMENT - - PROCESS_DEFINITION - - DECISION_REQUIREMENTS_DEFINITION - - DECISION_DEFINITION - - GROUP - - USER - - ROLE + description: The type of resource to add/remove permissions to/from. + type: object + allOf: + - $ref: "#/components/schemas/ResourceTypeEnum" permissions: type: array description: The permissions to add/remove. items: - properties: - permissionType: - description: Specifies the type of permissions. - enum: - - CREATE - - READ - - UPDATE - - DELETE - resourceIds: - type: array - description: A list of resource IDs the permission relates to. - items: - type: string + $ref: "#/components/schemas/PermissionDTO" + AuthorizationSearchQueryRequest: + allOf: + - $ref: "#/components/schemas/SearchQueryRequest" + type: object + properties: + filter: + description: The authorization search filters. + allOf: + - $ref: "#/components/schemas/AuthorizationFilterRequest" + AuthorizationFilterRequest: + description: Authorization search filter. + type: object + properties: + ownerType: + description: The type of the owner of permissions. + type: object + allOf: + - $ref: "#/components/schemas/OwnerTypeEnum" + ownerKey: + description: The id of the owner of permissions. + type: integer + format: int64 + AuthorizationResponse: + type: "object" + properties: + ownerKey: + description: The id of the owner of permissions. + type: integer + format: int64 + ownerType: + description: The type of the owner of permissions. + type: object + allOf: + - $ref: "#/components/schemas/OwnerTypeEnum" + resourceType: + description: The type of resource that owner have permissions. + type: object + allOf: + - $ref: "#/components/schemas/ResourceTypeEnum" + permissions: + type: array + description: The permissions. + items: + $ref: "#/components/schemas/PermissionDTO" + AuthorizationSearchResponse: + type: object + allOf: + - $ref: "#/components/schemas/SearchQueryResponse" + properties: + items: + description: The matching authorizations. + type: array + items: + $ref: "#/components/schemas/AuthorizationResponse" UserRequest: type: "object" properties: password: + description: The password of the user. type: "string" username: + description: The username of the user. type: "string" name: + description: The name of the user. type: "string" email: + description: The email of the user. type: "string" UserCreateResponse: type: "object" @@ -3394,31 +4903,121 @@ components: type: object properties: filter: + description: The user search filters. allOf: - $ref: "#/components/schemas/UserFilterRequest" + MappingSearchQueryRequest: + allOf: + - $ref: "#/components/schemas/SearchQueryRequest" + type: object + properties: + filter: + description: The mapping search filters. + allOf: + - $ref: "#/components/schemas/MappingFilterRequest" UserFilterRequest: + description: User search filter. type: object properties: username: + description: The username of the user. type: "string" name: + description: The name of the user. type: "string" email: + description: The email of the user. + type: "string" + MappingFilterRequest: + description: Mapping search filter. + type: object + properties: + claimName: + type: string + description: The claim name to match against a token. + claimValue: + type: string + description: The value of the claim to match. + CamundaUser: + type: "object" + properties: + userId: + description: The ID of the user. + type: "string" + userKey: + description: The system generated key of the user. + type: "integer" + format: "int64" + displayName: + description: The display name of the user. + type: "string" + authorizedApplications: + description: The applications the user is authorized to use. + type: array + items: + type: "string" + tenants: + description: The tenants the user is a member of. + type: array + items: + type: "object" + properties: + tenantId: + type: "string" + description: The ID of the tenant. + name: + type: "string" + description: The name of the tenant. + groups: + description: The groups assigned to the user. + type: array + items: + type: "string" + roles: + description: The roles assigned to the user. + type: array + items: + type: "string" + salesPlanType: + description: The plan of the user. type: "string" + c8Links: + description: The links to the components in the C8 stack. + type: array + items: + type: "object" + properties: + name: + type: "string" + description: The name of the component. + link: + type: "string" + description: A link to the component. + canLogout: + description: Flag for understanding if the user is able to perform logout. + type: boolean + apiUser: + description: Flag for understanding if the user is an API user. + type: boolean UserResponse: type: "object" properties: id: + description: The ID of the user. type: "integer" format: "int64" key: + description: The key of the user. type: "integer" format: "int64" username: + description: The username of the user. type: "string" name: + description: The name of the user. type: "string" email: + description: The email of the user. type: "string" UserSearchResponse: type: object @@ -3426,9 +5025,216 @@ components: - $ref: "#/components/schemas/SearchQueryResponse" properties: items: + description: The matching users. type: array items: $ref: "#/components/schemas/UserResponse" + UserUpdateRequest: + type: object + properties: + changeset: + $ref: "#/components/schemas/UserChangeset" + UserChangeset: + description: | + JSON object with changed user attribute values. + type: object + properties: + name: + type: string + description: The new name of the user. + email: + type: string + description: The new email of the user. + password: + type: string + description: The new password of the user. + RoleCreateRequest: + type: "object" + properties: + name: + type: "string" + description: The display name of the new role. + RoleCreateResponse: + type: "object" + properties: + roleKey: + description: The key of the created role. + type: "integer" + format: "int64" + RoleUpdateRequest: + type: object + properties: + changeset: + description: The set of changed role attributes. + allOf: + - $ref: "#/components/schemas/RoleChangeset" + required: + - changeset + RoleChangeset: + description: A set of changed role attributes. + type: object + properties: + name: + type: string + description: The updated display name of the role. + RoleItem: + description: Role search response item. + type: object + properties: + key: + type: integer + description: The role key. + format: int64 + name: + type: string + description: The role name. + assignedMemberKeys: + type: array + description: The set of keys of members assigned to the role. + items: + type: integer + format: int64 + RoleSearchQueryRequest: + description: Role search request. + allOf: + - $ref: "#/components/schemas/SearchQueryRequest" + type: object + RoleSearchQueryResponse: + description: Role search response. + allOf: + - $ref: "#/components/schemas/SearchQueryResponse" + type: object + properties: + items: + description: The matching roles. + type: array + items: + $ref: "#/components/schemas/RoleItem" + + GroupCreateRequest: + type: "object" + properties: + name: + type: "string" + description: The display name of the new group. + GroupCreateResponse: + type: "object" + properties: + groupKey: + description: The key of the created group. + type: "integer" + format: "int64" + GroupUpdateRequest: + type: object + properties: + changeset: + description: The set of changed group attributes. + allOf: + - $ref: "#/components/schemas/GroupChangeset" + required: + - changeset + GroupChangeset: + description: A set of changed group attributes. + type: object + properties: + name: + type: string + description: The updated display name of the group. + GroupItem: + description: Group search response item. + type: object + properties: + groupKey: + type: integer + description: The group key. + format: int64 + name: + type: string + description: The group name. + assignedMemberKeys: + type: array + description: The set of keys of members assigned to the group. + items: + type: integer + format: int64 + GroupSearchQueryRequest: + description: Group search request. + allOf: + - $ref: "#/components/schemas/SearchQueryRequest" + type: object + properties: + filter: + description: The group search filters. + allOf: + - $ref: "#/components/schemas/GroupFilterRequest" + GroupFilterRequest: + description: Group filter request + type: object + properties: + name: + type: string + description: The name of the group. + GroupSearchQueryResponse: + description: Group search response. + allOf: + - $ref: "#/components/schemas/SearchQueryResponse" + type: object + properties: + items: + description: The matching groups. + type: array + items: + $ref: "#/components/schemas/GroupItem" + + MappingRuleCreateRequest: + type: object + properties: + claimName: + type: string + description: The name of the claim to map. + claimValue: + type: string + description: The value of the claim to map. + required: + - claimName + - claimValue + MappingRuleCreateResponse: + type: object + properties: + mappingKey: + description: The key of the created mapping rule. + type: integer + format: int64 + claimName: + type: string + description: The name of the claim to map. + claimValue: + type: string + description: The value of the claim to map. + MappingSearchResponse: + type: object + allOf: + - $ref: "#/components/schemas/SearchQueryResponse" + properties: + items: + description: The matching mapping rules. + type: array + items: + $ref: "#/components/schemas/MappingResponse" + MappingResponse: + type: "object" + properties: + mappingKey: + description: The key of the created mapping rule. + type: integer + format: int64 + claimName: + type: string + description: The name of the claim to map. + claimValue: + type: string + description: The value of the claim to map. + TopologyResponse: description: The response of a topology request. type: object @@ -3469,6 +5275,14 @@ components: licenseType: description: Will return the license type property of the Camunda license type: string + isCommercial: + description: Will be false when a license contains a non-commerical=true property + type: boolean + expiresAt: + description: The date when the Camunda license expires + type: string + format: date-time + nullable: true BrokerInfo: description: Provides information on a broker node. type: object @@ -3676,6 +5490,7 @@ components: type: object properties: jobs: + description: The activated jobs. type: array items: $ref: "#/components/schemas/ActivatedJob" @@ -3792,6 +5607,22 @@ components: description: The variables to complete the job with. type: object nullable: true + result: + $ref: "#/components/schemas/JobResult" + JobResult: + type: object + nullable: true + description: The result of the completed job as determined by the worker. + properties: + denied: + type: boolean + description: > + Indicates whether the worker denies the work, i.e. explicitly doesn't approve it. + For example, a Task Listener can deny the completion of a task by setting this flag to true. + In this example, the completion of a task is represented by a job that the worker can complete as denied. + As a result, the completion request is rejected and the task remains active. + Defaults to false. + nullable: true JobUpdateRequest: type: object properties: @@ -3856,11 +5687,13 @@ components: type: object properties: sort: + description: Sort field criteria. type: array items: allOf: - $ref: "#/components/schemas/SearchQuerySortRequest" page: + description: Pagination criteria. allOf: - $ref: "#/components/schemas/SearchQueryPageRequest" type: object @@ -3868,16 +5701,20 @@ components: type: object properties: from: + description: The index of items to start searching from. type: integer format: int32 limit: + description: The maximum number of items to return in one request. type: integer format: int32 searchAfter: + description: Items to search after. Correlates to the `lastSortValues` property of a previous search response. type: array items: type: object searchBefore: + description: Items to search before. Correlates to the `firstSortValues` property of a previous search response. type: array items: type: object @@ -3885,59 +5722,48 @@ components: type: object properties: field: + description: The field to sort by. type: string order: - type: string - default: asc + $ref: "#/components/schemas/SortOrderEnum" required: - field SearchQueryResponse: type: object properties: page: + description: Pagination information about the search results. allOf: - $ref: "#/components/schemas/SearchQueryPageResponse" - type: object SearchQueryPageResponse: + description: Pagination information about the search results. type: object properties: totalItems: + description: Total items matching the criteria. type: integer format: int64 firstSortValues: + description: The sort values of the first item in the result set. Use this in the `searchBefore` field of an ensuing request. type: array items: type: object lastSortValues: + description: The sort values of the last item in the result set. Use this in the `searchAfter` field of an ensuing request. type: array items: type: object - VariableValueFilterRequest: - type: object - properties: - name: - type: string - eq: - type: object - neq: - type: object - gt: - type: object - gte: - type: object - lt: - type: object - lte: - type: object DecisionRequirementsSearchQueryRequest: allOf: - $ref: "#/components/schemas/SearchQueryRequest" type: object properties: filter: + description: The decision definition search filters. allOf: - $ref: "#/components/schemas/DecisionRequirementsFilterRequest" DecisionRequirementsFilterRequest: + description: Decision requirements search filter. type: object properties: decisionRequirementsKey: @@ -3953,7 +5779,7 @@ components: description: The assigned version of the decision requirements. decisionRequirementsId: type: string - description: the DMN id of the decision requirements. + description: the DMN ID of the decision requirements. tenantId: type: string description: The tenant ID of the decision requirements. @@ -3963,6 +5789,7 @@ components: type: object properties: items: + description: The matching decision requirements. type: array items: $ref: "#/components/schemas/DecisionRequirementsItem" @@ -3982,7 +5809,7 @@ components: description: The assigned version of the decision requirements. decisionRequirementsId: type: string - description: the DMN id of the decision requirements. + description: The DMN ID of the decision requirements. resourceName: type: string description: The name of the resource from which this decision requirements was parsed. @@ -4068,12 +5895,13 @@ components: type: integer format: int64 evaluatedDecisions: + description: Decisions that were evaluated within the requested decision evaluation. type: array items: $ref: "#/components/schemas/EvaluatedDecisionItem" EvaluatedDecisionItem: type: object - description: List of decisions that were evaluated within the requested decision evaluation. + description: A decision that was evaluated. properties: decisionDefinitionKey: description: The unique key identifying the decision which was evaluate. @@ -4100,16 +5928,18 @@ components: description: The tenant ID of the evaluated decision. type: string matchedRules: + description: The decision rules that matched within this decision evaluation. type: array items: $ref: "#/components/schemas/MatchedDecisionRuleItem" evaluatedInputs: + description: The decision inputs that were evaluated within this decision evaluation. type: array items: $ref: "#/components/schemas/EvaluatedDecisionInputItem" MatchedDecisionRuleItem: type: object - description: The decision rules that matched within this decision evaluation. + description: A decision rule that matched within this decision evaluation. properties: ruleId: description: The ID of the matched rule. @@ -4119,12 +5949,13 @@ components: type: integer format: int32 evaluatedOutputs: + description: The evaluated decision outputs. type: array items: $ref: "#/components/schemas/EvaluatedDecisionOutputItem" EvaluatedDecisionInputItem: type: object - description: The decision inputs that were evaluated within this decision evaluation. + description: A decision input that was evaluated within this decision evaluation. properties: inputId: description: The ID of the evaluated decision input. @@ -4154,21 +5985,30 @@ components: type: object properties: filter: + description: The decision instance search filters. allOf: - $ref: "#/components/schemas/DecisionInstanceFilterRequest" DecisionInstanceFilterRequest: + description: Decision instance search filter. type: object properties: decisionInstanceKey: type: integer format: int64 - description: The key of the decision instance. + description: | + The key of the decision instance. Note that this is not the unique identifier of the entity itself; the `decisionInstanceId` serves as the primary identifier. + decisionInstanceId: + type: string + description: The ID of the decision instance. state: $ref: "#/components/schemas/DecisionInstanceStateEnum" - description: The state of the decision instance. evaluationFailure: type: string description: The evaluation failure of the decision instance. + evaluationDate: + description: The evaluation date of the decision instance. + allOf: + - $ref: "#/components/schemas/DateTimeFilterProperty" processDefinitionKey: type: integer format: int64 @@ -4178,9 +6018,9 @@ components: format: int64 description: The key of the process instance. decisionDefinitionKey: - type: integer - format: int64 description: The key of the decision. + allOf: + - $ref: "#/components/schemas/BasicLongFilterProperty" decisionDefinitionId: type: string description: The ID of the DMN decision. @@ -4193,7 +6033,6 @@ components: description: The version of the decision. decisionDefinitionType: $ref: "#/components/schemas/DecisionDefinitionTypeEnum" - description: The type of the decision. tenantId: type: string description: The tenant ID of the decision instance. @@ -4203,6 +6042,7 @@ components: type: object properties: items: + description: The matching decision instances. type: array items: $ref: "#/components/schemas/DecisionInstanceItem" @@ -4213,10 +6053,12 @@ components: decisionInstanceKey: type: integer format: int64 - description: The key of the decision instance. + description: The key of the decision instance. Note that this is not the unique identifier of the entity itself; the `decisionInstanceId` serves as the primary identifier. + decisionInstanceId: + type: string + description: The ID of the decision instance. state: $ref: "#/components/schemas/DecisionInstanceStateEnum" - description: The state of the decision instance. evaluationDate: type: string format: date-time @@ -4248,7 +6090,6 @@ components: description: The version of the decision. decisionDefinitionType: $ref: "#/components/schemas/DecisionDefinitionTypeEnum" - description: The type of the decision. result: type: string description: The result of the decision instance. @@ -4275,17 +6116,25 @@ components: The matched rules of the decision instance. DecisionDefinitionTypeEnum: + description: The type of the decision. enum: - DECISION_TABLE - LITERAL_EXPRESSION - UNSPECIFIED - UNKNOWN DecisionInstanceStateEnum: + description: The state of the decision instance. enum: - EVALUATED - FAILED - UNSPECIFIED - UNKNOWN + SortOrderEnum: + description: The order in which to sort the related field. + enum: + - ASC + - DESC + default: ASC MessageCorrelationRequest: type: object @@ -4372,7 +6221,7 @@ components: DocumentReference: type: object properties: - documentType: + camunda.document.type: type: string description: Document discriminator. Always set to "camunda". enum: @@ -4386,8 +6235,8 @@ components: metadata: $ref: "#/components/schemas/DocumentMetadata" DocumentMetadata: + description: Information about the document. type: object - additionalProperties: true properties: contentType: type: string @@ -4403,14 +6252,19 @@ components: type: integer format: int64 description: The size of the document in bytes. + customProperties: + type: object + description: Custom properties of the document. + additionalProperties: true + DocumentLinkRequest: type: object properties: - expiresAt: - type: string - format: date-time - description: The date and time when the link expires. - nullable: true + timeToLive: + type: integer + format: int64 + description: The time-to-live of the document link in ms. + default: 3600000 # 1 hour DocumentLink: type: object properties: @@ -4430,10 +6284,12 @@ components: format: int64 description: The unique key identifying the deployment. deployments: + description: Items deployed by the request. type: array items: $ref: "#/components/schemas/DeploymentMetadata" tenantId: + description: The tenant ID associated with the deployment. type: string DeploymentMetadata: type: object @@ -4447,6 +6303,7 @@ components: form: $ref: "#/components/schemas/DeploymentForm" DeploymentProcess: + description: A deployed process. type: object properties: processDefinitionId: @@ -4469,6 +6326,7 @@ components: type: string description: The tenant ID of the deployed process. DeploymentDecision: + description: A deployed decision. type: object properties: decisionDefinitionId: @@ -4501,6 +6359,7 @@ components: description: | The assigned key of the decision requirements graph that this decision is part of. DeploymentDecisionRequirements: + description: Deployed decision requirements. type: object properties: decisionRequirementsId: @@ -4526,6 +6385,7 @@ components: type: string description: The resource name from which this decision requirements was parsed. DeploymentForm: + description: A deployed form. type: object properties: formId: @@ -4677,6 +6537,7 @@ components: type: integer format: int64 mappingInstructions: + description: Element mappings from the source process instance to the target process instance. type: array items: $ref: "#/components/schemas/MigrateProcessInstanceMappingInstruction" @@ -4708,10 +6569,12 @@ components: type: object properties: activateInstructions: + description: Instructions describing which elements should be activated in which scopes and which variables should be created. type: array items: $ref: "#/components/schemas/ModifyProcessInstanceActivateInstruction" terminateInstructions: + description: Instructions describing which elements should be terminated. type: array items: $ref: "#/components/schemas/ModifyProcessInstanceTerminateInstruction" @@ -4725,7 +6588,7 @@ components: ModifyProcessInstanceActivateInstruction: type: object description: | - Instructions describing which elements should be activated in which scopes and which variables should be created. + Instructions describing an element that should be activated. properties: elementId: description: The ID of the element that should be activated. @@ -4739,6 +6602,7 @@ components: format: int64 default: -1 variableInstructions: + description: Instructions describing which variables should be created. type: array items: $ref: "#/components/schemas/ModifyProcessInstanceVariableInstruction" @@ -4866,6 +6730,27 @@ components: description: The version of the form. type: integer format: int64 + responses: + InternalServerError: + description: > + An internal error occurred while processing the request. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + Unauthorized: + description: The request lacks valid authentication credentials. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + Forbidden: + description: Forbidden. The request is not allowed. + content: + application/problem+json: + schema: + $ref: "#/components/schemas/ProblemDetail" + securitySchemes: bearerAuth: type: http diff --git a/api/camunda/generation-strategy.js b/api/camunda/generation-strategy.js index f72390bf7c1..305cb90d63c 100644 --- a/api/camunda/generation-strategy.js +++ b/api/camunda/generation-strategy.js @@ -10,9 +10,11 @@ function preGenerateDocs() { console.log("adjusting C8 spec file..."); const specUpdates = [ - addDisclaimer(), + ...addDisclaimer(originalSpec), ...redefineCreateProcessInstanceRequest(originalSpec), ...redefineEvaluateDecisionRequest(originalSpec), + ...addAlphaAdmonition(), // needs to go before addFrequentlyLinkedDocs + ...addFrequentlyLinkedDocs(), ]; replace.sync({ @@ -26,14 +28,26 @@ function postGenerateDocs() { removeDuplicateVersionBadge(`${outputDir}/camunda-8-rest-api.info.mdx`); } -function addDisclaimer() { +function addDisclaimer(originalSpec) { + // Make this a repeatable task by checking if it's run already. + if ( + originalSpec.includes( + "Disclaimer: This is a modified version of the Camunda REST API specification, optimized for the documentation." + ) + ) { + console.log("skipping addDisclaimer..."); + return []; + } + // Adds a disclaimer to the very beginning of the file, so that people know this isn't the true spec. - return { - from: /^/, - to: `# Disclaimer: This is a modified version of the Camunda REST API specification, optimized for the documentation. + return [ + { + from: /^/, + to: `# Disclaimer: This is a modified version of the Camunda REST API specification, optimized for the documentation. `, - }; + }, + ]; } function redefineCreateProcessInstanceRequest(originalSpec) { @@ -208,6 +222,39 @@ function redefineEvaluateDecisionRequest(originalSpec) { ]; } +function addAlphaAdmonition() { + // This task is inherently repeatable, because the `match` is replaced by something that won't match again. + + return [ + { + // Matches an empty line, followed by an alpha warning, with these capture groups: + // $1: the blank line before the warning + // $2: the indentation before the warning + // $3: the warning text + from: /^([^\S\n]*\n)([^\S\n]*)(This endpoint is an alpha feature and may be subject to change\n[\s]*in future releases.\n)/gm, + + // Surrounds the warning with `:::note` and `:::`, creating an admonition. + to: "$1$2:::note\n$2$3$2:::\n", + }, + ]; +} + +function addFrequentlyLinkedDocs() { + // This task is inherently repeatable, because the `match` is replaced by something that won't match again. + + // Adds links to the Camunda Alpha REST API documentation, so that they don't have to live in the upstream spec. + return [ + { + from: /The Camunda 8 API \(REST\) Overview page/g, + to: "The [Camunda 8 API (REST) Overview page](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api)", + }, + { + from: /endpoint is an alpha feature/g, + to: "endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md)", + }, + ]; +} + module.exports = { outputDir, preGenerateDocs, diff --git a/api/generate-api-docs.js b/api/generate-api-docs.js index 2a346ac99dd..0268d6b6fac 100644 --- a/api/generate-api-docs.js +++ b/api/generate-api-docs.js @@ -2,13 +2,11 @@ const { execSync } = require("child_process"); // More strategies to come, for other APIs. const operate = require("./operate/generation-strategy"); -const zeebe = require("./zeebe/generation-strategy"); const tasklist = require("./tasklist/generation-strategy"); const consolesm = require("./console-sm/generation-strategy"); const camunda = require("./camunda/generation-strategy"); const apiStrategies = { operate, - zeebe, tasklist, consolesm, camunda, @@ -23,7 +21,7 @@ function runCommand(command) { // API name must be passed in as an arg. const api = process.argv[2]; if (api === undefined) { - const validAPIs = string.join(apiStrategies.join, ", "); + const validAPIs = Object.keys(apiStrategies).join(", "); console.log(`Please specify an API name. Valid names: ${validAPIs}`); process.exit(); } @@ -31,7 +29,7 @@ if (api === undefined) { // The API name must be recognized. const strategy = apiStrategies[api]; if (strategy === undefined) { - const validAPIs = string.join(apiStrategies.join, ", "); + const validAPIs = Object.keys(apiStrategies).join(", "); console.error(`Invalid API name ${api}. Valid names: ${validAPIs}`); process.exit(); } diff --git a/api/operate/operate-openapi.yaml b/api/operate/operate-openapi.yaml index 2ef6348a549..823e7248016 100644 --- a/api/operate/operate-openapi.yaml +++ b/api/operate/operate-openapi.yaml @@ -84,7 +84,6 @@ paths: searchAfter: - small - 9007199254741200 - required: true responses: "200": description: Success @@ -168,7 +167,6 @@ paths: sort: - field: bpmnProcessId order: ASC - required: true responses: "200": description: Success @@ -261,7 +259,6 @@ paths: sort: - field: bpmnProcessId order: ASC - required: true responses: "200": description: Success @@ -362,7 +359,6 @@ paths: searchAfter: - 1646904085499 - 9007199254743288 - required: true responses: "200": description: Success @@ -459,7 +455,6 @@ paths: searchAfter: - 1646904085499 - 9007199254743288 - required: true responses: "200": description: Success @@ -553,7 +548,6 @@ paths: sort: - field: decisionRequirementsId order: ASC - required: true responses: "200": description: Success @@ -645,7 +639,6 @@ paths: sort: - field: decisionId order: ASC - required: true responses: "200": description: Success @@ -737,7 +730,6 @@ paths: sort: - field: decisionId order: ASC - required: true responses: "200": description: Success @@ -1483,6 +1475,8 @@ components: processVersion: type: integer format: int32 + processVersionTag: + type: string bpmnProcessId: type: string parentKey: @@ -1501,6 +1495,8 @@ components: - ACTIVE - COMPLETED - CANCELED + incident: + type: boolean processDefinitionKey: type: integer format: int64 @@ -1549,6 +1545,8 @@ components: version: type: integer format: int32 + versionTag: + type: string bpmnProcessId: type: string tenantId: @@ -1602,6 +1600,7 @@ components: - UNKNOWN - IO_MAPPING_ERROR - JOB_NO_RETRIES + - EXECUTION_LISTENER_NO_RETRIES - CONDITION_ERROR - EXTRACT_VALUE_ERROR - CALLED_ELEMENT_ERROR diff --git a/api/zeebe/generation-strategy.js b/api/zeebe/generation-strategy.js deleted file mode 100644 index 06ebef03f02..00000000000 --- a/api/zeebe/generation-strategy.js +++ /dev/null @@ -1,49 +0,0 @@ -const replace = require("replace-in-file"); -const removeDuplicateVersionBadge = require("../remove-duplicate-version-badge"); - -const outputDir = "docs/apis-tools/zeebe-api-rest/specifications"; -const specFile = "api/zeebe/zeebe-openapi.yaml"; - -function preGenerateDocs() { - hackChangesetDescription(); -} - -function postGenerateDocs() { - removeDuplicateVersionBadge(`${outputDir}/zeebe-rest-api.info.mdx`); -} - -module.exports = { - outputDir, - preGenerateDocs, - postGenerateDocs, -}; - -function hackChangesetDescription() { - // This is a temporary hack, until https://github.com/camunda/camunda-docs/issues/3568 is resolved. - // The OpenAPI generator plugin we're using does not use the correct `description` property - // for the `UserTaskUpdateRequest` object. Instead of picking up the actual property description, - // it picks up the description of the first merged schema in the `allOf` property (i.e. from the `Changeset` schema). - // This adjustment replaces the description of the `Changeset` schema with the current description of - // the `UserTaskUpdateRequest.changeset` property. - console.log("hacking changeset description..."); - replace.sync({ - files: `${specFile}`, - from: /^ description: A map of changes.$/m, - to: ` description: | - JSON object with changed task attribute values. - - The following attributes can be adjusted with this endpoint, additional attributes - will be ignored: - - * \`candidateGroups\` - reset by providing an empty list - * \`candidateUsers\` - reset by providing an empty list - * \`dueDate\` - reset by providing an empty String - * \`followUpDate\` - reset by providing an empty String - - Providing any of those attributes with a \`null\` value or omitting it preserves - the persisted attribute's value. - - The assignee cannot be adjusted with this endpoint, use the Assign task endpoint. - This ensures correct event emission for assignee changes.`, - }); -} diff --git a/api/zeebe/zeebe-openapi.yaml b/api/zeebe/zeebe-openapi.yaml deleted file mode 100644 index 7bc6a5925e1..00000000000 --- a/api/zeebe/zeebe-openapi.yaml +++ /dev/null @@ -1,428 +0,0 @@ -openapi: "3.0.3" -info: - title: Zeebe REST API - version: "0.1" - description: API for communicating with the Zeebe cluster. - license: - name: Zeebe Community License Version 1.1 - url: https://github.com/camunda/camunda/blob/main/licenses/ZEEBE-COMMUNITY-LICENSE-1.1.txt -externalDocs: - description: Find out more - url: https://docs.camunda.io/docs/apis-tools/zeebe-api-rest/overview/ - -servers: - - url: "{schema}://{host}:{port}/v1" - variables: - host: - default: localhost - description: The hostname of a Zeebe Gateway. - port: - default: "8080" - description: The port of the Zeebe REST API server. - schema: - default: http - description: The schema of the Zeebe REST API server. - -paths: - /topology: - get: - tags: - - Cluster - summary: Get cluster topology - description: Obtains the current topology of the cluster the gateway is part of. - responses: - "200": - $ref: "#/components/responses/TopologyResponse" - /user-tasks/{userTaskKey}/completion: - post: - tags: - - User task - summary: Complete a user task - description: Completes a user task with the given key. - parameters: - - name: userTaskKey - in: path - required: true - description: The key of the user task to complete. - schema: - type: integer - format: int64 - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/UserTaskCompletionRequest" - - responses: - "204": - description: The user task was completed successfully. - "404": - description: The user task with the given key was not found. - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "400": - description: > - The user task with the given key cannot be completed. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}/assignment: - post: - tags: - - User task - summary: Assign a user task - description: Assigns a user task with the given key to the given assignee. - parameters: - - name: userTaskKey - in: path - required: true - description: The key of the user task to assign. - schema: - type: integer - format: int64 - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/UserTaskAssignmentRequest" - responses: - "204": - description: The user task's assignment was adjusted. - "404": - description: The user task with the given key was not found. - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "400": - description: > - The assignment of the user task with the given key cannot be completed. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}: - patch: - tags: - - User task - summary: Update a user task - description: Update a user task with the given key. - parameters: - - name: userTaskKey - in: path - required: true - description: The key of the user task to update. - schema: - type: integer - format: int64 - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/UserTaskUpdateRequest" - responses: - "204": - description: The user task was updated successfully. - "404": - description: The user task with the given key was not found. - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "400": - description: > - The user task with the given key cannot be updated. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - /user-tasks/{userTaskKey}/assignee: - delete: - tags: - - User task - summary: Unassign a user task - description: Removes the assignee of a task with the given key. - parameters: - - name: userTaskKey - in: path - required: true - description: The key of the user task. - schema: - type: integer - format: int64 - responses: - "204": - description: The user task was unassigned successfully. - "404": - description: The user task with the given key was not found. - "409": - description: > - The user task with the given key is in the wrong state currently. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - "400": - description: > - The user task with the given key cannot be unassigned. - More details are provided in the response body. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - -components: - responses: - TopologyResponse: - description: Obtains the current topology of the cluster the gateway is part of. - content: - application/json: - schema: - $ref: "#/components/schemas/TopologyResponse" - ProblemResponse: - description: Response for exceptional uses cases, providing more details. - content: - application/problem+json: - schema: - $ref: "#/components/schemas/ProblemDetail" - - schemas: - TopologyResponse: - description: The response of a topology request. - type: object - properties: - brokers: - description: A list of brokers that are part of this cluster. - type: array - nullable: true - items: - $ref: "#/components/schemas/BrokerInfo" - clusterSize: - description: The number of brokers in the cluster. - type: integer - format: int32 - nullable: true - partitionsCount: - description: The number of partitions are spread across the cluster. - type: integer - format: int32 - nullable: true - replicationFactor: - description: The configured replication factor for this cluster. - type: integer - format: int32 - nullable: true - gatewayVersion: - description: The version of the Zeebe Gateway. - type: string - nullable: true - BrokerInfo: - description: Provides information on a broker node. - type: object - properties: - nodeId: - description: The unique (within a cluster) node ID for the broker. - type: integer - format: int32 - host: - description: The hostname for reaching the broker. - type: string - port: - description: The port for reaching the broker. - type: integer - format: int32 - partitions: - description: A list of partitions managed or replicated on this broker. - type: array - items: - $ref: "#/components/schemas/Partition" - version: - description: The broker version. - type: string - Partition: - description: Provides information on a partition within a broker node. - type: object - properties: - partitionId: - description: The unique ID of this partition. - type: integer - format: int32 - role: - description: Describes the Raft role of the broker for a given partition. - type: string - enum: - - leader - - follower - - inactive - health: - description: Describes the current health of the partition. - type: string - enum: - - healthy - - unhealthy - - dead - UserTaskCompletionRequest: - type: object - properties: - variables: - additionalProperties: true - description: The variables to complete the user task with. - type: object - nullable: true - action: - description: > - A custom action value that will be accessible from user task events resulting - from this endpoint invocation. If not provided, it will default to "complete". - type: string - nullable: true - UserTaskAssignmentRequest: - type: object - properties: - assignee: - description: The assignee for the user task. The assignee must not be empty or `null`. - type: string - nullable: false - allowOverride: - description: > - By default, the task is reassigned if it was already assigned. Set this to `false` - to return an error in such cases. The task must then first be unassigned to - be assigned again. Use this when you have users picking from group task - queues to prevent race conditions. - type: boolean - nullable: true - action: - description: > - A custom action value that will be accessible from user task events resulting - from this endpoint invocation. If not provided, it will default to "assign". - type: string - nullable: true - UserTaskUpdateRequest: - type: object - properties: - changeset: - allOf: - - $ref: "#/components/schemas/Changeset" - description: | - JSON object with changed task attribute values. - - The following attributes can be adjusted with this endpoint, additional attributes - will be ignored: - - * `candidateGroups` - reset by providing an empty list - * `candidateUsers` - reset by providing an empty list - * `dueDate` - reset by providing an empty String - * `followUpDate` - reset by providing an empty String - - Providing any of those attributes with a `null` value or omitting it preserves - the persisted attribute's value. - - The assignee cannot be adjusted with this endpoint, use the Assign task endpoint. - This ensures correct event emission for assignee changes. - type: object - nullable: true - action: - description: > - A custom action value that will be accessible from user task events resulting - from this endpoint invocation. If not provided, it will default to "update". - type: string - nullable: true - Variables: - description: A map of variables. - type: object - additionalProperties: true - Changeset: - description: | - JSON object with changed task attribute values. - - The following attributes can be adjusted with this endpoint, additional attributes - will be ignored: - - * `candidateGroups` - reset by providing an empty list - * `candidateUsers` - reset by providing an empty list - * `dueDate` - reset by providing an empty String - * `followUpDate` - reset by providing an empty String - - Providing any of those attributes with a `null` value or omitting it preserves - the persisted attribute's value. - - The assignee cannot be adjusted with this endpoint, use the Assign task endpoint. - This ensures correct event emission for assignee changes. - type: object - additionalProperties: true - properties: - dueDate: - type: string - format: date-time - description: The due date of the task. Reset by providing an empty String. - nullable: true - followUpDate: - type: string - format: date-time - description: The follow-up date of the task. Reset by providing an empty String. - nullable: true - candidateUsers: - type: array - description: The list of candidate users of the task. Reset by providing an empty list. - items: - type: string - nullable: true - candidateGroups: - type: array - description: The list of candidate groups of the task. Reset by providing an empty list. - items: - type: string - nullable: true - ProblemDetail: - description: > - A Problem detail object as described in [RFC 9457](https://www.rfc-editor.org/rfc/rfc9457). - There may be additional properties specific to the problem type. - type: object - properties: - type: - type: string - format: uri - description: A URI identifying the problem type. - default: about:blank - title: - type: string - description: A summary of the problem type. - status: - type: integer - format: int32 - description: The HTTP status code for this problem. - minimum: 400 - maximum: 600 - detail: - type: string - description: An explanation of the problem in more detail. - instance: - type: string - format: uri - description: A URI identifying the origin of the problem. - securitySchemes: - bearerAuth: - type: http - scheme: bearer - bearerFormat: JWT diff --git a/docs/apis-tools/administration-api/tutorial.md b/docs/apis-tools/administration-api/tutorial.md index f9d02023875..af7c6ccd38c 100644 --- a/docs/apis-tools/administration-api/tutorial.md +++ b/docs/apis-tools/administration-api/tutorial.md @@ -27,12 +27,16 @@ Make sure you keep the generated client credentials in a safe place. The **Clien ## Set up authentication -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. +If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client ID and client secret. Then, we return the actual token that can be passed as an authorization header in each request. To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `CLUSTER_ID`, `ADMINISTRATION_CLIENT_ID`, `ADMINISTRATION_CLIENT_SECRET`, `ADMINISTRATION_AUDIENCE`, which is `api.cloud.camunda.io` in a Camunda 8 SaaS environment, and `ADMINISTRATION_API_URL`, which is `https://api.cloud.camunda.io`. These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CONSOLE_CLIENT_ID`, `CAMUNDA_CONSOLE_CLIENT_SECRET`, `CAMUNDA_CONSOLE_OAUTH_AUDIENCE`, and `CAMUNDA_CONSOLE_BASE_URL`. Locate your `CLUSTER_ID` in Console by navigating to **Clusters**. Scroll down and copy your **Cluster Id** under **Cluster Details**. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/docs/apis-tools/camunda-api-rest/camunda-api-rest-authentication.md b/docs/apis-tools/camunda-api-rest/camunda-api-rest-authentication.md index db50db49ed0..7571524637f 100644 --- a/docs/apis-tools/camunda-api-rest/camunda-api-rest-authentication.md +++ b/docs/apis-tools/camunda-api-rest/camunda-api-rest-authentication.md @@ -121,7 +121,7 @@ curl --header "Authorization: Bearer ${TOKEN}" \ ${ZEEBE_REST_ADDRESS}/v2/topology ``` -A successful response includes [information about the cluster](/apis-tools/camunda-api-rest/specifications/get-cluster-topology.api.mdx). For example: +A successful response includes [information about the cluster](/apis-tools/camunda-api-rest/specifications/get-topology.api.mdx). For example: ```json { diff --git a/docs/apis-tools/camunda-api-rest/camunda-api-rest-overview.md b/docs/apis-tools/camunda-api-rest/camunda-api-rest-overview.md index f0190956ca6..820ed96d30e 100644 --- a/docs/apis-tools/camunda-api-rest/camunda-api-rest-overview.md +++ b/docs/apis-tools/camunda-api-rest/camunda-api-rest-overview.md @@ -15,7 +15,7 @@ Ensure you [authenticate](./camunda-api-rest-authentication.md) before accessing For SaaS: `https://${REGION}.zeebe.camunda.io:443/${CLUSTER_ID}/v2/`, and for Self-Managed installations: `http://localhost:8080/v2/`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Zeebe component. ::: @@ -28,7 +28,7 @@ See [the interactive Camunda 8 REST API Explorer][camunda-api-explorer] for spec You can change the `maxMessageSize` default value of 4MB in the [Gateway](../../self-managed/zeebe-deployment/configuration/gateway.md#zeebegatewaynetwork) and [Broker](../../self-managed/zeebe-deployment/configuration/broker.md#zeebebrokernetwork) configuration. -If you do change this value, it is recommended that you also configure the [Deploy resources](./specifications/deploy-resources.api.mdx) REST endpoint appropriately. By default, this endpoint allows single file upload and overall data up to 4MB. +If you do change this value, it is recommended that you also configure the [Deploy resources](./specifications/create-deployment.api.mdx) REST endpoint appropriately. By default, this endpoint allows single file upload and overall data up to 4MB. You can adjust this configuration via the following properties: @@ -39,17 +39,4 @@ spring.servlet.multipart.max-request-size=4MB For example, if you increase the `maxMessageSize` to 10MB, increase these property values to 10MB as well. -### Query API - -:::warning -Query API endpoints do not currently support [resource authorizations][], and can be used to expand user access to restricted resources. If you use resource permissions, allowing public access to those endpoints is not recommended. -::: - -All Query API endpoints contain an `(alpha)` declaration. Those endpoints are not accessible by default in Camunda 8 clusters. - -You can enable the [alpha feature][] search endpoints by setting either the configuration property `camunda.rest.query.enabled` to `true`, -or the environment variable `CAMUNDA_REST_QUERY_ENABLED` to `true`. - [camunda-api-explorer]: ./specifications/camunda-8-rest-api.info.mdx -[resource authorizations]: /self-managed/concepts/access-control/resource-authorizations.md -[alpha feature]: /reference/alpha-features.md diff --git a/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx index d9ecbdd6d26..888d6b44c49 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx @@ -5,7 +5,7 @@ description: "Iterate through all known partitions and activate jobs up to the r sidebar_label: "Activate jobs" hide_title: true hide_table_of_contents: true -api: eJztWVtv4zYW/isHfNkJVrE93Wl36rYLuMl069k2DRJPu0CSB0o6tphQpIak7LiG//vikLrZljMe7O7bBDBiiYfn+p0L6Q1zfGHZ+I691zF7iFiKNjGicEIrNmZTh4Y7BJcZXS4y4FLCk9IrBQU3ThCVBa5S4IkTS6J81LGFsgCnwWUIBj+WaB2mkPNnkZf54F6xiFWvf9Tpmo03/lEYTNnYmRIjlmjlUDla4kUhRcJJ0vDRklIbZpMMc07f3LpANmY6fsTEsYgVRhdonEDbrm72bCK1HnUMtBoBt5DiXChMQSiv8o/Xv15BYXSC1sIrHCwG8P2fiDGOHbdPl0TsDfcMfrhnBV/nqNy5RbMUCd4zGP7jzFtZaWedEWrBthFbafOEpl8lxXMEPfcqBLraqUItoFLaRpBr6+QaSospzLUBqRcLoihKU2iL9kBuxFQpJY8lBvduI+ZEjrp0h4pw7xmDrjTkET53aMBlwkJCoV8JKUFpBzE2EU8hXgNX2mVoAlWpnJBe40oOvBIKcnsGGbcQIyowyJMM066XhHK4QMMiNtcm5y68+uYN+S3nz+91bGd6Ugntd2EFsYBBp1tQxutgRAW7Twn921ckdI4uyX7nRgTXHbpKCusoYsuKxsv0uwhVNcya1e9AzAHzwq0jn0dLYUUssbOdu8ZrxLcOv1Y+0LRkE100KCHuPiIxNiHrepQbw9cH4Y+YcJh38qPB57bJy9kxfMzalG5EJzovJBIQVhkqMkIiJ8+ooKKwHaxo04Cq4TTbRckApvO+5R9gFAGnbOWldA22hPW5cGzX97RLakoQLSUlirCQCksOSX3p6myitdYckeeYCu5QriPAJapgoNIHZg1OAjIVV687G4/6khIVV26a2pewNr20Pvye1npcrDKRZDtwJ/wfwuBY2A9UqXAQCvJdIG9rRl82PtCyIwbURiYNbm+qfNtuA1NbaGVDbf5qNOqHV21pixkyZ8A+oyucyrT10ZEG4olajx3z5PHd/8J1f6l6QqoCUCrxsUQQKSon5gJNk+mPOj6tNh5vcrTSrRWvbKZLSZ2YStQq4w5W3LYt+qyvZVWNcKqs4yrBowY96vgvtmmboiInO08zo9rZttdp2i8oLnLVyJledgzsyE8bNi/Y1Mr6HY0VAUWHApdh8XRBLzWVA9EvIeS/FRk8ixJpPjnmT26tToTPCZpvoCKH6WWf62pmn4JDBWwyogL3uh5j9gRGNe1KuKwawXba3D6mTh0aktI6nf+MPEXTW1Mt+poQ6CALhM00mJZkM+Q6Rd83vuuMRRZotxFcij8xhfe3v11BqpOSXNNTVHia+nhxed0pEFXZ/8yZMFT7to75uWanWLTBMuhMVYt64p7rUnkHVGQgce7C3B5YNgWDyxVf0+QGhbbCiSWenYb2FHkqheqpT76T1oUp4Wp3ouQLLlQElnDonf3havpvwEJToxM5Wsfz4jQUNNPVoQ4TGsKa4asex6qDi88Fj8PIjwSl18t1hzLS5HOjXTf5/jbVlrNAB46KtF4pe9gTOlNb3XvrfpzSWW7bXdnryqEPV235zbFOXBi9FCmlAnec5h2a/JdcimreOdKPC6NjiflfP9WXJ3AdKCFFx4WE4MFwIiPCOJzJ7m5+uoBv33z994dXmXOFHQ+Hq9VqYObJOabCaTPQZjE084Q+RHc2gFmGhs4Da4+rJiDQNmiwBSZiLpI64JXavmvuzHOfOFzuH7YaCJZGsP3z9AQ+3EwPKuKO6O6YyHisSzeOJVdPrA3nodB9KbbMc27WnRLaEbCNmHXcld0R5mgW7/MmYPw8m11DYAGJTrEaWoStBZERuVB0GGPjN6ORnxnD0zejkS8MFPETLFGAz4XkKmTcnjl0XtAGK/x4w+om8T+KjDZiIfblDnaSrgLxZbAopNTXfSk1UUBeNoRDNEYb0ElSGuMPTkI2ja6WXc1lX3LtS659ybVjuUaXQ+gynbIxK7S/2im4y9iYDenwNmzbNYsY3dD5WfBuw0oj2ZhtQsJsx8PhJtPWbcebQhu3HS4pGjuzAy2HxKoBI3XCZRaEHgaOFrrz2wXPS5VyeAs3725n8E/ucMXX3pEkcpf129HbUS9XIj3CcXI9hWBhgF2nFNRsKad72QbiUxhvtw/kyKQ0wq1vaVtwT4zcoJmU5PsGDpU8z52eAxGLqi8/1SB5/8fMx5nK2E17NfzumdNdTB++6qm5fdPcaY567wtHBxd6d/Xeh8N7r9HOdUxL6VE/116jCpOHziLk1AdKNhq8PsT/9dSncaLzvFS+lquFP/8A7zg/kaV15PSISZEgTWzjDSNMdcT+ElagOsLC6wEBJ6C7LuEL4bIyHiQ6HyZhW/M/ljoe5lyoYSXCDi8mv364upyc/zK9eHd1++789WA0cM/OB4hSLOeqo8dk79Jpx9JN27f+bz8lVMhw+OyGheRCEfi9/ZuqFNyxSrdOMXiIqoS+Y5tNzC1+MHK7pdcfSzRrNr57aPOfnrYRCydEXz3oWmPMLoJx57NwQ7bksvTXRPuXU9uo3jFJEizci7QPnYp2/dvtjLKl+pmEzqJszAxf0U8ofMXG7J7dMxYx7R3uE9G/3zDJ1aLkC6IPfOnvP0L6ChY= +api: eJztWVtv4zYW/isHfNkJVrE93Wl36rYLuMl069k2DRJPu0CSB0o6tpihSA1J2XEN//fFIXWzLSce7O7bBAgSiYfn+p0LqQ1zfGHZ+I691zF7iJgu0HAntJqmbMx44sSSO3yvY8silqJNjChomY3Z1BEpgsuMLhcZcCnho9IrBQU3ThCVBa5SqLnAo44tlAU4DS5DMPipROswhZw/ibzMB/eKRax6/aNO12y88Y/CYMrGzpQYsUQrh8rREi8KKRKv7vDRklIbZpMMc07/uXWBbMx0/IiJYxErDBnnBNp2dbNnE6n1qGOg1Qi4hRTnQmEKQnmVf7z+9QoKoxO0Fl7hYDGA7/9EjHHsuP14ScTecM/gh3tW8HWOyp1bNEuR4D2D4T/OvJWVdtYZoRZsG7GVNh/R9KukeI6g516FQFc7VagFVErbCHJtnVxDaTGFuTYg9WJBFEVpCm3RHsiNmCql5LHE4N5txJzIUZfuUBHuPWPQlYY8wucODbhMWEgo9CshJSjtIMYm4inEa+BKuwxNoCqVE9JrXMmBV0JBbs8g4xZiRAUGeZJh2vWSUA4XaFjE5trk3IVX37whv+X8ieA505NKaL8LK4gFDDrdgjJeByMq2L0k9G9fkdA5uiT7nRsRXHfoKimso4gtKxov0+8iVNUwa1a/AzEHzAu3jnweLYUVscTOdu4arxHfOvxa+UDTkk100aCEuPuIxNiErOtRbgxfH4Q/YsJh3smPBp/bJi9nx/Axa1O6EZ3ovJBIQFhlqMgIiZw8o4KKwnawok0DqobTbBclA5jO+5Z/gFEEnLKVl9I12BLW58KxXd/TLqkpQbSUlCjCQiosOST1pauzidZac0SeYyq4Q7mOAJeogoFKH5g1OAnIVFy97mw86ktKVFy5aWqfw9r00vrwe1rrcbHKRJLtwP0xlPI9GBwL+4EqFQ5CQb4L5G3N6MvGB1p2xIB6zKTB7U2Vb9ttYGoLrWyozV+NRv3wqi1tMUPmDNhndIVTmbY+OtJAPFEvw0PtXvD2cQn/wnV/OfuIVCmgVOJTiSBSVE7MBZqmGjzq+LT6ebwR0kq3nryymS4ldWsqY6uMO1hx27bxs762VjXLqbKOqwSPGvSo47/YprWKipzsPM2MamfbgmmA6RMUF7lq5EwvOwZ25KcNm2dsamX9jsaKgLRDgcuweLqg5xrPgejnEPLfigyeRYk0wxzzJ7dWJ8LDnWYgqMhhetnnuprZS3CogE1GVOBe16POnsCopl0Jl1Vj2k4r3MfUqYNFUlqn85+Rp2h6665FXzcCHWSBsJkY05Jshlyn6HvLd53RyQLtNoJL8Sem8P72tytIdVKSa3oKD09THy8urzsFomoNnzk3ho7Qlig/++wUizZYBp2palFP3HNdKu+Aigwkzl2Y7QPLpmBwueJrmu6g0FY4scSz09CeIk+lUD31yXfbujAlXO1OnXzBhYrAEg69sz9cTf8NWGhqhiJH63henIaCZgI71GFCg1ozoNUjW3W48bngcRj5saH0ernu4EaafG6060Ggv/O05SzQgaMirVfKHvaEzmRX9+e6Z6d0GNx2V/Y6d+jVVet+c6xbF0YvRUqpwB2nmYhOB0suRTUTHenZhdGxxPyvL/XuCVwHSkjRcSEheDCc2ogwDue2u5ufLuDbN1///eFV5lxhx8PharUamHlyjqlw2gy0WQzNPKFfojsbwCxDQ2eGtcdVExBoGzTYAhMxF0kd8Ept3zV3Zr4XDqD7B7IGgqURB2fuCXy4mR5UxB3R3VGS8ViXbhxLrj6yNpyHQvel2DLPuVl3SmhHwDZi1nFXdkeYo1m8z5uA8fNsdg2BBSQ6xWpoEbYWREbkQtGBjY3fjEZ+rgxP34xGvjBQxE+wRAE+FZKrkHF75tCZQhus8OMNq5vE/ygy2oiF2Jc72Em6CsSXwaKQUl/3pdREAXnZEA7RGG1AJ0lpjD9cCdk0ulp2NZd9ybUvufYl147lGl0gocs03XYW2l//FNxlbMyGdHgbtu2aRYxu8fwseLdhpZFszDYhYbbj4XCTaeu2402hjdsOlxSNndmBlkNi1YCROuEyC0IPA0cL3fntguelSjm8hZt3tzP4J3e44mvvSBK5y/rt6O2olyuRHuE4uZ5CsDDArlMKaraU071sA/EpjLfbB3JkUhrh1re0LbgnRm7QTEryfQOHSp7nTs+BiEXVPz/VIHn/x8zHmcrYTXt9/O6J031NH77qqbl909x7jnrvFEcHl3539d6Hw7ux0c6VTUvpUT/XXqMKk4fOIuTUB0o2Grw+xP/11KdxovO8VL6Wq4U//wDvOD+RpXXk9IhJkSBNbOMNI0x1xP4SVqA6wsLrAQEnoLsu4QvhsjIeJDofJmFb8zeWOh7mXKhhJcIOLya/fri6nJz/Mr14d3X77vz1YDRwT84HiFIs56qjx2TvYmrH0k3bt/5vnxsqZDh8csNCcqEI/N7+TVUK7lilW6cYPERVQt+xzSbmFj8Yud3S608lmjUb3z20+U9P24iFE6KvHnStMWYXwbjzWbhFW3JZ+mui/QusbVTvmCQJFu5Z2odORbv+7XZG2VJ9SqGzKBszw1f0mYWv2Jjds3vG6LOP95//3kLvN0xytSj5gugDX/r5Dxt+IOU= sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -37,7 +37,11 @@ Iterate through all known partitions and activate jobs up to the requested maxim The list of activated jobs. -
Schema
    jobs object[]
  • Array [
  • customHeaders object
    +
    Schema
      jobs object[]
      + +The activated jobs. + +
    • Array [
    • customHeaders object
      a set of custom headers defined during modelling; returned as a serialized JSON document diff --git a/docs/apis-tools/camunda-api-rest/specifications/add-user-to-group.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/add-user-to-group.api.mdx new file mode 100644 index 00000000000..cd65f5d47d7 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/add-user-to-group.api.mdx @@ -0,0 +1,64 @@ +--- +id: add-user-to-group +title: "Assign a user to a group" +description: "Assigns a user to a group." +sidebar_label: "Assign a user to a group" +hide_title: true +hide_table_of_contents: true +api: eJztWMFy2zYQ/RUMTsmUJuXESR3eNI6duk1Sjy23B8cHEFiJSECAAUDLHA7/vbMAKSuS7GQ8PeqgoUgC+7DY9wDwddSzhaP5Df1gTVPT24SaGizz0uhzQXPKhLh2YGcmvk+oAMetrLEBzenUObnQjjDSOLDEG8LIAlumXzRNaM0sq8CDRYiOalYBzWlo8Be0NKESg9TMlzShFr430oKgubcNbCLNSoiRyTdoU5pQx0uoGM076tsaw0rtYQGWJnRubMV8fPT2iPZ9ssLGYT4HOqT3DORbDO5qox047PFq8govjwAsmSMsTCkI4hrOwbl5o1SLM+vHGUhpn9CjyeSJQNw0ShBtPClgFTH9oj8ZC0SAZ1I5wiyQ2po7KUAQqQPAOFhSGNHGInKjPWiPaKyuleSBHVltTaGg+u2rQ+hubVY2KEIuYssBl5jiK3BPmCOxYRHRby7PTsi7oze/374ova9dnmXL5TK1c34AQnpjU2MXmZ1z/GG7lymZlWCBVKwNaQohEZMpzKoG6yU44mrgci75OIPDsAmWLuY3FDEOC0m76vxQ4lWpnbdSL9Yr3Vi5LQtyfXlOpADt5byVerENHfrMWaMwBitM4/NCMf0Na+ulVztBN1FcU1XMtsTMdwD0CXWe+cb9lKmvX23FRib9MZtdkBiCcCOAzI0lvpRuBMIkKqll1VQ0P5pMElqx+3j3djLpMSZW/Bcy0QTua8V0oNZmOlKT6oG3ITGpnWea/1+VMVYu5CZuigvHWAs6kPh9zKjv+6DB19t8PzO2kEKADvQkuLSA80S6IEemlFmCSPe62utqr6tHdXW0e2+LBwBjh91S+jLuivIONO7OYQNFmc1No/ci24tsL7InRPbuqZPotrakI0xZYKJ9OKP+eCzdy20vt73cdsvtza7vtakmOMsWeQjWGksM5421IMiylCqEx2/AEXs4TO6/y/Za22vtMa31Ca3Alwbtq9q4QB20eXKahX3KZd3oQPUZbnYu6wZbqEd3B+zdaFg1VtGcdlFCfZ5lXWmc7/OuNtb32R3W545ZyQoVWYmvo9RGCinDmSrjMLZLiS/QmRpTPGFVowUjx+Ty9GpGPjAPS9aGqUXIH0MfT44nO6Ni00ciTi/OScwwEnFtcRjDosp3ho2NfyVw8L0c8MZK315htzg9BTALdtpgNVYEGfBCdLyPjWgy/DkbafPnv7NQeannJnQfGLA9EKwKWBdHPkkPt9l2cR5Ew01VNTqsnHoRzzxsLTGuGucxoYQqyUG7wOvBSRybfYxvyD8RkRymWJTInHHBXEhfNkXKTZXx2G11LZQpsopJnQ0QLjuZfrr+/H568PH85PTz1enBYTpJ/b0PySOhK6bXxhEt2G0HdjPp7mHD+JltOxTGw73PasWkRqqEjLpBSjfRxHU0ofmanRvUhM9Gm/U2GSRxQ7uuYA6urep7fPy9AdvS/Ob2QUFBckI6/C9oPmfKbXqx60m8uBxc25dkyxzemcHwkOk2yFY1eEcT+g3adVca3eJnj2LdJ37GIMZ562/7hJbABNgwK/HtlHOo/Vq/R/d91N9qEbz4+wqV8x9I3zlT +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

      Assign a user to a group

      + + + +Assigns a user to a group. + +## Request + +

      Path Parameters

      + +The user was assigned successfully to the group. + +
      + +The user could not be assigned. +More details are provided in the response body. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Forbidden. The request is not allowed. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +The group or user with the given key was not found. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +The user with the given key is already assigned to the group. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +An internal error occurred while processing the request. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      diff --git a/docs/apis-tools/camunda-api-rest/specifications/assign-group-to-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/assign-group-to-tenant.api.mdx new file mode 100644 index 00000000000..0fd9e5aadda --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/assign-group-to-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: assign-group-to-tenant +title: "Assign a group to a tenant" +description: "Assign a single group to a specified tenant." +sidebar_label: "Assign a group to a tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWFtT4zYU/itn9LQ7NXFg2S31W4aFLe2WYSC0D5QH2T6JtStLRhdCxuP/3jmSHQIJLdvhMQ8Zx9K56nyfZJ2WOT63LLthU1RcOXabMN2g4U5odVayjHFrxVx9Mdo3U90LJaxEWxjRkBTL2CTIAAcr1FwizEkanKaRBgsxE1iCC7ojlrCGG16jQ0OOW6Z4jSxjcf53XLKECbLacFexhBm888JgyTJnPD53Pa0QvBJ3HkGUqBz5MqBn4Cpcc2mLCmvOspa5ZUPehHI4R8MSNtOm5i4OfTpkXZesQgp5vGlEweKPBnRLLm2jlUVLGgfjA3psuo3rvuAWrC8KtHbmpVxCLCGVQD9Zli5hh+PxdlON0feixBJK7jgIC0o7uOdSlBR9oZVD5UiVN40URcBL2hidS6x/+mbJTruW5DO8wEWUhBIdFxJ0/g0LB9xCFMyxBKHg5vL0GH45/Pjz7bvKucZmabpYLEZmVuxhKZw2I23mqZkV9CO59yOYVmgQar6EHIGXpSCfXFJCDRon0A6YLIb16MMGqsTob8WSoSYxLELsSvmxYqvKWWeEmq8XzhuxyRG4vjwbELEUar7pOujMuJdkg+fauyyXXH2nQjnh5Fanz71YX9fcLAfAPXXQJcw67rz9T+B9ONiwTbD4dTq9gGgCCl0izLQBVwk7OKIkaqFE7WuWHY7HCav5Q3z7NB53ZJMq/opMFOBDI7kK0HqejlBQa4M9fkJiQlnHVfFWldFGzMVzvyPaHoZasB7En2NGXdcFQn3YxPupNrkoS1QBnkD7B1o3sIpLqRe449WOVzte/QuvDjfxfq4dzLRXZeRVPNdAm7WDUK1EdvTa0WtHrxfo9XHbd+BEAa2yIRyiMdqALgpvDJawqIQM5ukrc/Ddn2sRizuu7bi249om17qE1egqTXfrxgfk0K0yY2k8vmzarm7CXRoOMpu2w020o6sjmvvh7uyNZBlrI4u6LE3bSlvXZW2jjevSeyrRPTeC5zICk6Yj2wYUSV1wGYa3VZMm6DY8ZHnMa69KDkdweXI1hS/c4YIvw+qSy6emj8ZH461WSfQFi5OLM4gZRiyu7Q+DWSL6VrNR+DWGw6XaYuGNcMsrUovLkyM3aCaeKrLCSO8vWKf3KMSS/s/pgJzf/pqG4gs100G9B8FmIFQVNDZGPh7tbwLu4izwptB17VXYPNUcFsJVwNcSK6S3jhJKmBQFKhug3XcvBrGvcQb+jB5hf0RFicgZ9sy5cJXPR4Wu0yKqrZ651Hlac6HS3oVNjyd/XJ9/nux9PTs+Ob862dsfjUfuwYXkG21dzdVaHKvu0FpbyG1tJLWPp8aP9pT6Ujl8cGkjuVAEnpBj2xPspu8wWZawbL3ZFDlGo6t+z23SE+WGtW3OLV4b2XU0fOfRLFl2c/vIq0DEUlj6X7JsxqV93hRaT+zdZd8+eg+vbF5tza0f5GoZKC49vbGEfcflk2YatbPePrhVH+t/xLZa5e62S1iFvEQT1jBOT4oCG7em+OLnA3F4tZdeXBP5/gFDXylv +sidebar_class_name: "put api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

      Assign a group to a tenant

      + + + +Assign a single group to a specified tenant. + +## Request + +

      Path Parameters

      + +The group was successfully assigned to the tenant. + +
      + +The provided data is not valid. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Forbidden. The request is not allowed. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Not found. The tenant or group was not found. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +An internal error occurred while processing the request. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      diff --git a/docs/apis-tools/camunda-api-rest/specifications/assign-mapping-rule-to-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/assign-mapping-rule-to-tenant.api.mdx new file mode 100644 index 00000000000..880eab7995f --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/assign-mapping-rule-to-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: assign-mapping-rule-to-tenant +title: "Assign a mapping rule to a tenant" +description: "Assign a single mapping rule to a specified tenant." +sidebar_label: "Assign a mapping rule to a tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/is7OCVTWpQdJ3V50zh26zbxeGy5Pbg+gORKRAICNB6WNRz+984SpEw93LoZH3XQSCIW+8D3fQCxNXN8bllyx6aouHLsPmK6QsOd0OoiZwnj1oq5+sqrSqj5tZc41Z1pxHK0mREV2bKETVpL4GCFmkuEMswB4yWC0zRQYSZmAnNwrYsRi1jFDS/RoaEsaqZ4iSxhYfwPXLKICXJecVewiBl88MJgzhJnPG5mMC0QvBIPHkHkqBzFMqBn4AochLRZgSVnSc3csqJoQjmco2ERm2lTchcefTpmTROtUurKedOchkv0fzO7p8i20sqipRlH4yP62o6+hsOCW7A+y9DamZdyCQFfgkSvLVMTsePxeLfHyuhHkWMOOXcchAWlHTxyKXIqItPKoXI0lVeVFFlLprgyOpVY/vTNkp96UOsGjeAqWEKOjgsJOv2GmQNuIRimmINQcHd9fgq/HH/8+f5d4VxlkzheLBYjM8sOMBdOm5E289jMMvqQ3fsRTAs0tCBLSBF4nguKySUVVKFxAm3P0axfjy5tIEBGfysW9dCEtIjBq8nPwK0AtM4INR/i543Ylg7cXl/0/FgSWFuh2zkz7iX54Kn2LkklV98JKCec3Bl0M4r1ZcnNsqffeoAmYtZx5+1/8u/D0ZZvosVv0+kVBBeQ6Rxhpg24Qtg+EBVRCiVKX7LkeDyOWMmfwr9P43FDPgnxV1SiAJ8qyVVLrc1yhIJSG+z40xYmlHVcZW+FjDZiLjbjjmi76LFgHYk/h4qapmkF9WGb7+fapCLPUbX0BNpN0LpeVVxKvcC9rva62uvqX3R1vM33S+1gpr3Kg67CuQbabJ+HamW5V9leZXuVvaCyj7teBycKaJUN8RCN0QZ0lnljMIdFIWTrnl42+9jd8Ra4uNfaXmt7rW1rrYlYia7QdP+ufMscumomLA6nmI3r1QW5ibvz7IDOMxvXz/fUhm6UaB77u7U3kiWsDnJqkjiuC21dk9SVNq6JHwmrR24ET2VgKA0H2fV0kjrjsn28C1YaoNtyX+4pL73KOZzA9dnNFH7lDhd82S4zhVx3fTI+Ge/0SqYveJxcXUCoMJBysFH0bknxO90G49c4bu/aFjNvhFve0LSwPClyg2biCZoVWbp4rXf6H4xY1P047yn0+1/TlgVCzXQ7vWPDdiKEChobMh+PDreZd3XRCijTZelVu4uqOSyEK4APCsukt44KipgUGSrbcrzrbvRmX8II/BkiwuGIQAnM6TfPuXCFT0eZLuMsTFt9p1KnccmFirsQNj6dfL29/Dw5+HJxenZ5c3ZwOBqP3JNri6+0dSVXgzxWTaTt7pHb2Xaqn0+RH+xAdcA5fHJxJblQRKW24rrT3V3Xj7IsYsmwNbUmPRocNInuo04/d6yuU27x1simoccPHs2SJXf3z3Jr9ZkLS79zlsy4tJudpGGh7667ntN7eGXPa2eR3UOulq3ypad/LGLfcbnWg6Mu2Nsnt9n8+oEUB8vd3DcRK5DnaNrFDAaTLMPKDaa++J5BGl9tule3JM5/AE8DRis= +sidebar_class_name: "put api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

      Assign a mapping rule to a tenant

      + + + +Assign a single mapping rule to a specified tenant. + +## Request + +

      Path Parameters

      + +The mapping rule was successfully assigned to the tenant. + +
      + +The provided data is not valid. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Forbidden. The request is not allowed. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Not found. The tenant or mapping rule was not found. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +An internal error occurred while processing the request. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      diff --git a/docs/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx index a5aa2c555c1..77758fd209c 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx @@ -5,7 +5,7 @@ description: "Assigns a user task with the given key to the given assignee." sidebar_label: "Assign user task" hide_title: true hide_table_of_contents: true -api: eJztWFtv2zYU/isHfFmLKba7pV2rNzdtt2y9BImzPSQBQlPHNhuKVHmxIxj678MhJTu+pC22PjqAEUk8PNfvO5TOknk+dSy/YpcOLXju7thNxgp0wsrKS6NZzobOyal2wCF0QrCQfgZ+hjCVc9RwhzV48+ABj3sQeyxjFbe8RI+WDC2Z5iWynJGuEXd3f2HNMibJUMX9jGXM4pcgLRYs9zbgtjejGUZzZhLNrV3yprVKNp2YYclZvmS+rsic1B6naFnGJsaW3KdHL45Z09wkk+j8a1PUtGfbA2G0R+1piVeVkoKTM/3Pjjxa7hoz488oPIVuTYXWS3Rxb5sUut4NqluFibGbofVgY70MzoM2HsYIWFa+BmPhVgelbin01gfnrdRTljFa4GOFLJ9w5bDJGFfKLD7N0VpZ7HHmdQ0FTnhQPot+xOxKBxZbFwqQE5AeFtwBVxZ5UXfeFT24QA9+Jh0V5DaavKVLiz5YDVwDWmssSA0uiBkI7tClCKOhGJ2foYaJtC4GGfTKsDf0YHXLp1zqHlw6TCYXtK82AWZ8nhLooJLiTuopTKwpYWpNqJKhLwEDRi8ri3PUHiwXCMLoQlImXO9ar9M5NkYh1xv5JHBQOkVK3HYehyCC86aEJABzrgL5yT0spFIxECHQOTlWmNxbozl6RDl3QfmV+zFI1EVlpPYg9dwkKPbgdBIhUVkzlwUWWawPWWlLSXFetwi8Zhuh7UFKjKzJmJeebmN3ILKmVlCi9ueJMaxpSM6iq4x2Cea/DI73I3wV3U+uLSFpSjAqPgfnseixJmPHg8HXKBI37dB/T0cSXLc0EaasFJJ++GAsQoGeS+WAW1yljBBJ+7tYYGyKOiXqEf5X1owVlj/v9oFtIJwlydYupP4A3EESHCfrV+fvTuDV8fPfbp7MvK9c3u8vFouenYgjLKQ3tmfstG8ngn4k9zTyxiKUvI5wKhJ0uYJ15wFXoZATKboO3boNVP4NIDzSttLqcgcuq0YarGQ7hwZcnp+CLFB7OakJwDum454ITpYzPjbB52PF9R1bA2/X6LYVF8qS29VxsGmgyZjz3Af3zYPg1192dBPg/hiNziCpAGGKrjdL1xmiIEqpZRlKlh8PBhkr+X26ezEYNKSTKv4dkWjA+0pxHaG1HY7UUK5xGwOT2nmuxY+qjLFyKrft9tjDJtCC+E2KKBH/+Jtc30dMYjwxc2KCLnoHgh0IdiDYowR79R8IJl13mi2s0dOYYQQRrEXtVX04BQ8kPZD0x5H0+b4X1qEGyrIlHKZPHiMiAwtYzKSK6uPbf2u7/QY9cO3AtQPXHuNak7ES/cwUNC4yLkKHpkY569OBeEQHousvHwyXmv76q5EGQ2jn3SQqWMVytkwkavJ+fzkzzjf5sjLWN/05VWjOraRv4lhQWk5k60CkjOBqlhzZLSYt0LirC/KEl0EXHF7C+duLEfzOPS54HZNLJjdVvxy8HOzVSqKPaByenUKKMEHxQXvo1BLP96pNwt+jOI7MHIpgpa8vaFtKzxi5RTsMVI8VRFp7UXscpEQhlrUX7zrg/PnPKNaeWtv5ehz39p7Tp/vm+GyNu61RVhrXdQOZTixid2KiUy2ydsOjWqN1aeOg92wXxWenkYzClGXQsSPraXr54g/SJRQNMizRVEmB2kXf26FnJ/Y+rcDfySI861GpEx67RjyVfhbGPWHKvkjbVv/Hyoz7JZe635pw/ZPhh8uPb4ZH709P3n68eHv0rDfo+XsfU0pEKbl+4Eea46zfIbeDXa4PoP89/m1x4PHe9yvFpaZ6xFCXLXev2Jq7LGP55mj4AX1vspaCV2y5HHOHl1Y1DT3+EtDWLL+6WTM2UryQjq6Ldvj5lTCfnLdD36fwfSPmvXG1D7muY+9Qge5Yxu6w3pp5NzdNxmbIC7TR0yRxkvw5GpGetYadqXOTdTuGQmDlH5HdeF8g1q6a59mnixGRsJ14l4bowyxf0ACeL1jOrtk1eW5iriK/4/MlU1xPA5+SfNJLf/8COgNsig== +api: eJztWFtv2zYU/isHfFmLKba7pV2rNzdtt2y9BImzPSQBQlPHNhuKVHmxIxj678MhJTu+pC22PjqAEUk8PNfvO5TOknk+dSy/YpcOLXju7thNxkyFlntp9GnBcsadk1NNAiNaz1iBTlhZkQDL2TAuO+AQOh2wkH4GfoYwlXPUcIc1ePPgQVKJ2GMZq7jlJXq05MeSaV4iy1lozf2FNcuYJEMV9zOWMYtfgrRYsNzbgNvejGYYzZlJNLd2yZvWKtl0YoYlZ/mS+boic1J7nKJlGZsYW3KfHr04Zk1zk0yi869NUdOebQ+E0R61pyVeVUqKmLv+Z0ceLXeNmfFnFJ5Ct5RpL9HFvW1S6Ho3qG4VJsZuhtaDjfUyOA/aeBgjYFn5GoyFWx2UuqXQWx+ct1JPWcZogY8VsnzClcMmY1wps/g0R2tlsceZ1zUUOOFB+Sz6EbMrHVhsXShATkB6WHAHXFnkRd15V/TgAj34mXRUkNto8pYuLfpgNXANaK2xIDW4IGYguEOXIoyGYnR+hhom0roYZNArw97Qg9Utn3Kpe3DpMJlc0L7aBJjxeUqgg0qKO6mnMLGmhKk1oUqGvgQMGL2sLM5Re7BcIAijC0mZcL1rvU7n2BiFXG/kk8BB6RQpcdt5HIIIzpsSkgDMuQrkJ/ewkErFQIRA5+RYYXJvjeboEeXcBeVX7scgUReVkdqD1HOToNiD00mERGXNXBZYZLE+ZKUtJcV53SLwmm2EtgcpMbImY156umVdb0itoETtzxNjWNOQnEVXGe0SzH8ZHO9H+Cq6n1xbQtKUYFR8Ds5j0WNNxo4Hg69RJG7aof+ejiS4bmkiTFkpJP3wwViEAj2XygG3uEoZIZL2d7HA2BR1StQj/K+sGSssf97tA9tAOEuSrV1I/QG4gyQ4Ttavzt+dwKvj57/dPJl5X7m8318sFj07EUdYSG9sz9hp304E/UjuaeSNRSh5HeFUJOhyBevOA65CISdSdB26dRuo/BtAeKRtpdXlDlxWjTRYuXtowOX5KcgCtZeTmgC8YzruieCkM2hsgs/Hius7tgbertFtKy6UJber42DTQJMx57kP7psHwa+/7OgmwP0xGp1BUgHCFF1vlq4zREGUUssylCw/HgwyVvL7dPdiMGhIJ1X8OyLRgPeV4jpCazscqaFc4zYGJrXzXIsfVRlj5VRu2+2xh02gBfGbFFEi/vE3ub6PmMR4YubEBF30DgQ7EOxAsEcJ9uo/EEy67jRbWKOnMcMIIliL2qv6cAoeSHog6Y8j6fN9L6xDDZRlSzhMnzxGRAYWsJhJFdXHt//WdvsNeuDagWsHrj3GtSZjJfqZobFVZVyEDk2NctanA/GIDkTXXz4YLjX99VcjDYbQzrtJVLCK5WyZSNTk/f5yZpxv8mVlrG/6c6rQnFtJ38SxoLScyNaBSBnB1Sw5sltMWqBxVxfkCS+DLji8hPO3FyP4nXtc8Doml0xuqn45eDnYq5VEH9E4PDuFFGGC4oP20Kklnu9Vm4S/R3EcmTkUwUpfX9C2lJ4xcot2GKgeK4i09qL2OEiJQixrL951wPnzn1GsPbW28/U47u09p0/3zfHZGndbo6w0rusGMp1YxO7ERKdaZO2GR7VG69LGQe/ZLorPTiMZhSnLoGNH1tP08sUfpEsoGmRYoqmSArWLvrdDz07sfVqBv5NFeNajUic8do14Kv0sjHvClH2Rtq3+j5UZ90sudb814fonww+XH98Mj96fnrz9ePH26Flv0PP3PqaUiFJy/cCPNMdZv0NuB7tcH0D/e/zb4sDjve9XiktN9YihLlvuXrE1d1nG8s3R8AP63mQtBa/YcjnmDi+tahp6/CWgrVl+dbNmbKR4IR1dF+3w8ythPjlvh75P4ftGzHvjah9yXcfeoQLdsYzdYb01825umozNkBdoo6dJ4iT5czQiPWsNO1PnJut2DIXAyj8iu/G+QKxdNc+zTxcjImE78S4N0YdZvqABPF+wnF2za/LcxFxFfsfnS6a4ngY+Jfmkl/7+BWsSd60= sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/assign-user-to-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/assign-user-to-tenant.api.mdx new file mode 100644 index 00000000000..84d3a2644cb --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/assign-user-to-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: assign-user-to-tenant +title: "Assign a user to a tenant" +description: "Assign a single user to a specified tenant." +sidebar_label: "Assign a user to a tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWFtT4zYU/itn9LQ7NXFg2S31W4aFlnaXYSC0D5QH2T6JtStLRhdCxuP/3jmSHQIJLdvhMQ8Zx9K56nyfZJ2WOT63LLthU1RcOXabMN2g4U5odVayjHFrxVxdWzRT3cskrERbGNGQEMvYJIgAByvUXCJ4iwacpoEGCzETWIILqiOWsIYbXqNDQ25bpniNLGNx/g9csoQJMtpwV7GEGbzzwmDJMmc8Pvc8rRC8EnceQZSoHPkyoGfgKlxzaYsKa86ylrllQ96EcjhHwxI206bmLg59OmRdl6xCojTeNCAy+KPh3JJH22hl0ZLGwfiAHlu80qIvuAXriwKtnXkplxCrR+uvn6xJl7DD8Xi7pcboe1FiCSV3HIQFpR3ccylKCr7QyqFypMqbRooiQCVtjM4l1j99s2SnXcvxGVbgIkpCiY4LCTr/hoUDbiEK5liCUHBzeXoMvxx+/Pn2XeVcY7M0XSwWIzMr9rAUTpuRNvPUzAr6kdz7EUwrNAg1X0KOwMtSkE8uKaEGjRNoB0AWw3r0YQMVYvS3YslQkhgWwXWl/FiwVeGsM0LN1+vmjdjkB1xfng14WAo133QddGbcS7LBc+1dlkuuvlOhnHByq9PnXqyva26WA9yeOugSZh133v4n7j4cbNgmWPw2nV5ANAGFLhFm2oCrhB0cURK1UKL2NcsOx+OE1fwhvn0ajzuySRV/RSYK8KGRXAVoPU9HKKi1wR4/ITGhrOOqeKvKaCPm4rnfEe0NQy1YD+LPMaOu6wKhPmzi/VSbXJQlqgBPoN0DrRtYxaXUC9zxaserHa/+hVeHm3g/1w5m2qsy8iqea6DN4zmoVhI7du3YtWPXC+z6uO0zcKKAVtkQDtEYbUAXhTcGS1hUQgbz9JE5+O6PtYjFHdd2XNtxbZNrXcJqdJWmW3XjA3LoSpmxNJ5eNm1Xt+AupXPMpm1/Ce3o2ojmfrg1eyNZxtrIoS5L07bS1nVZ22jjuvSeCnTPjeC5jLCk6ci1AUNSF1yG4W21pAm6Bw85HvPaq5LDEVyeXE3hV+5wwZdhbcnlU9NH46PxVqsk+oLFycUZxAwjEtd2h8Es0Xyr2Sj8GsPhQm2x8Ea45RWpxeXJkRs0E0/1WCGk9xes03sUYkn/53TAze9/TUPphZrpoN5DYDMQqgoaGyMfj/Y34XZxFlhT6Lr2Kmydag4L4Srga4kV0lsXmwlSFKhsAHbftxjEvsQZ+DN6hP0RFSUiZ9gx58JVPh8Vuk6LqLZ65lLnac2FSnsXNj2efL0+/zzZ+3J2fHJ+dbK3PxqP3IMLyTfaupqrtThWbaHHfpDb2kBqH4+MH+wl9YVy+ODSRnKhCDohw7Yn103fWbIsYdl6kynwiwaHNs9t0pPkhrVtzi1eG9l1NHzn0SxZdnP7yKlAwlJY+l+ybMalfd4LWs/q3WXfNXoPr2xZbc2sH+RqGegtPb2xhH3H5ZMWGjWx3j64oX31P0Ib1ri77RJWIS/RhBWMs5OiwMat6b342UDsXe2hF9dEu38A4lYjaw== +sidebar_class_name: "put api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

      Assign a user to a tenant

      + + + +Assign a single user to a specified tenant. + +## Request + +

      Path Parameters

      + +The user was successfully assigned to the tenant. + +
      + +The provided data is not valid. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Forbidden. The request is not allowed. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +Not found. The tenant or user was not found. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      + +An internal error occurred while processing the request. + +
      Schema
        = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
      diff --git a/docs/apis-tools/camunda-api-rest/specifications/broadcast-signal.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/broadcast-signal.api.mdx index 9b411c23d1f..2ccbc676fe3 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/broadcast-signal.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/broadcast-signal.api.mdx @@ -5,7 +5,7 @@ description: "Broadcasts a signal." sidebar_label: "Broadcast signal" hide_title: true hide_table_of_contents: true -api: eJztWN9zm0YQ/ldu9qmdYqGkTurypthO6zRxPLbSPjh6WGAlLoE7fHdY1jD8753lQEK/0nQmj9aMRgL29tv9dr+DpQaHCwvRPdzJhcIcZgGkZBMjSye1ggjeGI1pgtZZgcK2RiMIwNBDRda90ekKoro9lIZSiJypKIBEK0fK8SUsy1wmyO7CL5Z91mCTjArkf25VEkSg4y+UOAigNLok4yTZ1q7Fu8aC+Gg7sGlGQmFBQs+Fy6iLTTgt4j5kDrQDsM5ItYAmgEc0EuPcA2CaSvaH+c0A2OewD9dBrD0IZFLe3X28Fj6BAWCXUROAI4XKXaWHU7i66BPwdsJl6IReKjvI6kAiTTAg/X7I1CwAJ13Oxr6o6xLe+qJB0/jlttTKeiJejseH4+tyXqLd5vWHVvgvWh1Gr5R8qIYk9VVmkvZi6qCkcrQgAwHMtSnQ+VOvT/+7GF0J/h/coCJHefdMd8SfHuO6NPpRppSKFB0KaYXSTjxiLtNvMF4aHedU/LLP/DbARNx4S5GSQ5l3Lcs97A1jSoVU4v727bn4/fTVb7OfMudKG4XhcrkcmXlyQql02oy0WYRmnvCX7X4eiWlGhkSBKxGT2GhKbGotbEmJnMuEBep8rm0wTOLos9oTzm6j+Kv1DumDEldGwq5oJ+LT7ZWQKSkn5yupFvvQ7Zo5Vjn7wFhXLopzVF9hU8590F0UWxUFmlXfNdsATQDWoasGaRxp0V9f7vnmtvhzOr0R3oVIdEpiro1wmbQ9ECdRSCWLqoDodDwOoMAnf/R6PG7YJ1f8OzJRgp7KHFXbWrvpSCUKbajrnzYxqaxDlfyoymgjF3IXd7Qlra6JL3xGvaBOv7l5dUqa60o9K+lZSc9KOqqkV4duTRMlmGXDfUjGaCN0klTGUCqWmcxb9wlZ22N3z4bPSntW2rPSDiutCaAgl+kUIii1bVsHXQYRhP6eZcP1oyYEYMk8kuE5rYbK5BBB7SXTRGFYZ9q6JqpLbVwTPnI9tmYcvuyl1bdMrhPMMw+7Xzq+MByszrGoVIriTNxe3k3FH+hoiauWSobcdn02Phsf9MqmRzxObq6Ez9A33mAz6N2yqg+69cbf47hpZkxkUhnpVne8zNMTExoyk4rZXzdEh9d652NvBEH3523fJu/+mbaV5o3sdjMSXz5hUXoZDkfYTZ8NC7Q1lQwGVanmug2p66L95NgRGevZGI9e7HfszVUrvEQXRaXa3VctxFK6TOCArCSvrGOSAshlQjypRDUoH3Nv9t5fEX97RPFixIX23dhvugvpsioeJboIE79s/RvnOg4LlCrsIGx4Pvnw6fpicvL+6vzy+u7y5MVoPHJPriWURVGgGsSxnqa6p7rdZOvNzebYS4uuuI6eXFjmKBXT3GZQd/LrB2nLpV4LcBZ0IrqHuo7R0ieTNw2ffqjIrCC6n21KykdNABlhSqZV7FeebuHcR3cy5SDYPK84mL3huQn6FZMkodJ903Y22EduPt5NOezulUyhU15jcMmva3AJEXyGzwAB6Jaxtvnb8zXkqBYVLtje++XPvyQVPTQ= +api: eJztWFFv2zYQ/ivEPW2YYrld2mV6c9N0S9emQeJsD4kfTtLZYiuRKknFMQT99+FEyZYjp+uAPiaAEcs83nf33X2UTjU4XFmIbuFarhTmsAhAl2TQSa3OU4ggNhrTBK3rDAJIySZGlmwBEbzp161AYVubCQRg6GtF1r3R6Qaiur2UhlKInKkogEQrR8rxEpZlLpMWMPxs2WcNNsmoQP7mNiVBBDr+TImDAErD4TlJtrVr8S6wIL7aD2yekVBYkNBL4TLqYhNOi21KHGgHYJ2RagVNAPdoJMa5B8A0lewP88sBsM9hDNdBbD0IZFLeX3+6ED6BAWCXUROAI4XKMdmHUjh/2yfg7YTL0Am9VnaQ1YFEmmBA+u2QqUUATrqcjX1NtyW88kWDpvHbbamV9US8nE4Px9flvEa7z+sPrfBftDmMXin5tRqS1FeZSRrF1EFJ5WhFBgJYalOg8z+9Pv7vYnQl+H9wg4o8ybtnuiP++CmuS6PvZUqpSNGhkFYo7cQ95jL9BuOl0XFOxS9j5vcBZuLSW4qUHMq8a1nuYW8YUyqkErdX707F78evflv8lDlX2igM1+v1xCyTI0ql02aizSo0y4Q/bPfzRMwzMiQK3IiYxE5TYldrYUtK5FImLFDnc22DYRInd2oknMeN4lfrR6QPSlwZOTq8ZuLm6lzIlJSTy41UqzF0u2eJVc4+MNaVi+Ic1RfYlXMM+hjFVkWBZtN3zT5AE4B16KpBGk+06K8vR765Lf6czy+FdyESnZJYaiNcJm0PxEkUUsmiKiA6nk4DKPDBX72eThv2yRX/jkyUoIcyR9W21uN0pBKFNtT1T5uYVNahSn5UZbSRK/kYd7Inra6J3/qMekEdf/Pw6pS01JV6VtKzkp6V9KSSXh26Nc2UYJYN9yEZo43QSVIZQ6lYZzJv3SdkbY/dPRv6XnzW2rPWnrU21loTQEEu0zyDldq2rYMugwhCf9ey4fZhEwKwZO7J8CRXQ2VyiKD2kmmiMKwzbV0T1aU2rgnvuR57Uw4ve2n1LZPrBPPMw45LxwvD0eoUi0qlKE7E1dn1XPyBjta4aalkyH3XJ9OT6UGvbPqEx9nlufAZ+sYbHAa9W1b1Qbfe+HscN82CiUwqI93mmrd5emJCQ2ZWMfvbhujwWu987Y0g6L6869vk/T/zttJ8kF3thuKzByxKL8PhELvrs2GB9uaSwagq1VK3IXVdNE6OHZGxno3p5MW4Yy/PW+Eluigq1Z6+aiXW0mUCB2QleWUdkxRALhPiWSWqQfmYe7MPfkX87RHFiwkX2ndjf+iupMuqeJLoIkz8tu3/ONdxWKBUYQdhw9PZx5uLt7OjD+enZxfXZ0cvJtOJe3AtoSyKAtUgju081T3XPU623t1snnpt0RXX0YMLyxylYprbDOpOfv0obbnUWwEugk5Et1DXMVq6MXnT8M9fKzIbiG4Xu5LyVRNARpiSaRX7hedbOPXRHc05CDbPKw5mND43Qb9jliRUum/aLgbnyOWn6zmH3b2UKXTKewyu+YUNriGCO7gD4FdA7KFt/vb3GnJUqwpXbO/98t+/TxlJew== sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/cancel-process-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/cancel-process-instance.api.mdx index b0170a83a09..831d4748c8d 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/cancel-process-instance.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/cancel-process-instance.api.mdx @@ -1,11 +1,11 @@ --- id: cancel-process-instance title: "Cancel process instance" -description: "Cancels a running process instance." +description: "Cancels a running process instance. As a cancelation includes more than just the removal of the process instance resource, the cancelation resource must be posted." sidebar_label: "Cancel process instance" hide_title: true hide_table_of_contents: true -api: eJztWFtv2zYU/ivEeWoxRVK6tOv0MMBL0y3rZYbjbg9JHmjpyGJDkSpJxTEE/ffhkPLdQVNgjzFgWCIPz/X7aB524PjcQnYNY6NztJYJZR1XOcJtBAXa3IjGCa0gg3MalpZxZlqlhJqzZm9NDBE03PAaHRrS2oHiNUIGg+TlIPgBlxCBIK0NdxVEYPBbKwwWkDnT4r7paYXsDpdMl8xVeGCXOc1y7x15YPMKaw5ZB27ZkHGhHM7RQASlNjV3YejNGfT9bbCM1v2uiyWt2ThScmkxglwrh8rRHG8aKXJOTiVfLXnWHVrTs6+YO4hAtVLymcRVSI3RDRon0JI0PXtNEyzRIGU86/bCHjGzmvTh55W2qNhs6bPQWjTMVdyxhZCSzZA13DjKEZeSGcy1KSwzaFvpqFql0TVzlbBsbTu+UZ9a62jtbyxloqTU3osCi/hGQfTdBEZQCyXqtobstO8jcMJRvANUxrtFn4REQ9+TqEHbaGVDMl6lZ4fRT49VWtih0ljE0EdwlqaPrvSBsII7TsuUduyeS1EQRh4pamP0TGL902Fx9wszDpKsQMeFZKHojFsWBGdYMKHY9eT9Ofv17PUvty8q5xqbJclisYhNmZ9gIZw2sTbzxJQ5fUnuZcymFRpkNV9SVXhRCLLJJdvAh9kGc1GKnHA/8ME7Q+XaKdwai7vYC7NrxFpnhJpvV7c1AvY5OGJfJpdMFKicKJcEqAPTfk3JW0k6+Ey3LptJru5gA41Do/tWbFvX3GxzfctAH4F13LX2u/T++dWBboLFn9PpmAUVLNcFslKbQIrBULwN6rM0jaDmD+HtTZr2pJMq/oRIFMOHRnLlobUfjlCs1gYH/PjA1jvv/1MZbcRc7NuNYZumA4jfhYgCL89+hIrEqVK36plTz5x65tSjnHp97E9qpBhl2RAO0RhtmM7z1hgs2KISck24le3hnPLMtGemPTPtONP6CGp0lS6or9DWQ4faiwySgUwnKzdt0h32JH0SzpbSB02tBJr7VSfTGgkZdIFUfZYkXaWt67Ou0cb1yT1V7J4bQUd+X2CaDuRbgUrqnMsqOHZYXJqgdmkV9DmvW1Vw9pZNLq6m7A/ucMGXPtlkclf12/RtelQriT6icTS+ZCHCAM2t7WKllnh/VG0Qfopi32RZzFsj3PKKloX0zJAbNKOW6rOGzGDPa6f3IATR8PB+BaS//p16LNBWN9k0cBcPvG4CUY/1V6nHaam9wXWrsu861RGNDbGm8ekhYseXnni5rutW+d1XzdlCuIrxrVTksrWOUhCBFDkq6/0aGuKV2Mcww/4JFtlpTGUMWFttunPhqnYW57pO8rBs/TuTepbUXKhkMGGT89GnL5/fjU4+Xp5ffL66ODmN09g9OJ8uIkXN1Y4fBPiDw91+zN3mP+eJ9wBDQR0+uKSRXCiCmI+rG0h5DQekhAiyo1cFO7y8jQZuXUPXzbjFL0b2PQ1/a9EsIbu+3VDRc7cQlp43Tf2jwb2YDP3/S/ZDlw5Hwx0GuVr6vUG29AYR3OHy+J1If9tHUCEv0Hi/g+B58O5kSuo2ig5uI/potWKU59i4R2R3jglEzvWeOf77akpcG65Cal3QWsMXdEHDF5DBDdxQANpnztPYj3cguZq3fE7yQS99/gPloVm8 +api: eJztWN1v2zYQ/1eIe2oxRXK6tOv0MMBL0y1b2wWOuz0keaClk8WWIlWSimMI+t+HIyV/yUFbYC8DEsCIJR7v8/c789iC40sL6Q1cGZ2htUwo67jKEO4i0DUa7oRWlzmkkNFr2ctdDmIR5GgzI2qSgxTOvZRlnJlGKaGWrD7QHLMpLQd1Xj0TKpNNjpZV2iBzJVfsU2MdcyUyg5W+55Lpwj8eamMGrW5MhpFf3tU6rLCKdC2Q1do6zGOIoOaGV+jQUOwtKF4hpFDvx/YnriECQVHV3JUQgcEvjTCYQ+pMg4ehz0tkn3H9qKdO9+6RBzYrseKQtuDWNRkXyuESDURQaFNxF169OoOuuwuW0bpfdb6mPVtHCi4tRpBp5VA5WuN1LUXmU5B8suRZO7amF58wcxCBaqTkC4lDSLWhqjuBlqQ3CJhhgQap4Gl7EPaUmWHRh5+V2qJii7XPQmPRUEUdWwkpfRW4cZQjLiUzmGmTWypVIx2hpTC6Yq4Ulm1sx7fqfV/BX9iEiYJSey9yzONbBdFXExhBJZSomgrS066LwAlH8fZQPQD0LCQauo5EDdpaKxuS8WJyNo5+fqzSwvaVJrR1EZxNJo/u9IGwnDtO25R27J5L4VH6SFFroxcSqx/GxT0szFWQZDk6LiQLRWfcsiC4wJwJxW5mb8/Zz2cvf7p7VjpX2zRJVqtVbIrsBHPhtIm1WSamyOhDcs9jNi/RIKv4mqrC81yQTS7ZFj7M1piJQmSE+54P3hkq117hNljcx15Y3SDWOiPUcre6jRGj9jNlH2eXTOSonCjWBKiRab+n4I0kHXyhG5cuJFefYQuNsdFDK7apKm52ub5joIvAOu4a+1V6//hipJtg8ft8fsWCCpbpHFmhTSBFbyjeBfXZZBJBxR/C06vJpCOdVPFviEQxfKglV6FlHoQjVOjIQZsPbPP78N9URhuxFId2Y9ilaQ/iNyGiwMuz76EicarQjXri1BOnnjj1KKdeHvuRmtLhzKEhHKIx2jCdZY0xmLNVKeSGcIPt/pwSsPjEtSeuPXFtzLUuggpdqWmqopnEDySuhBSSnk4ng5s2acdTSZeE02WYc2iYQHM/zDKNkZBCG0jVpUnSltq6Lm1rbVyX3FPF7rkRdOj3BablQL4BVFJnXJbBsXFxaYEGpiHoc141KufsNZtdXM/Zb9zhiq99ssnkvurXk9eTo1pJ9BGN06tLFiIM0NxpF4Na4v1RtUH4WxT7Mcti1hjh1te0LaRngdygmTZUnw1kenteOz0HIYj6L28HIP3xz9xjgVrdbDvCXTzwqg5EPTZhTTxOC+0NboaVQ9epjmhsiHUSn44Re3XpiZfpqmqU775qyVbClYzvpCKTjXWUggikyFBZ71c/Eg9i78IK+ztYZKcxlTFgbWi6S+HKZhFnukqysG3zfyH1Iqm4UElvwibn0/cfP7yZnry7PL/4cH1xchpPYvfgfLqIFBVXe34Q4EfHu8OY2+1vzv/kJqIHlMMHl9SSC0UQ93lt+6ZwA6OmABGkRy8r9vrCXdRz+wbadsEtfjSy6+j1lwbNGtKbu20r8L0jF5a+b68VHk3us1l/A/Gcfde1x9Fw+5dcrX1vkg09QQSfcX38Vqa76yIokedovN9B8Dx4dzIndVtFo/uQLhp2TLMMa/eI7N4xhZrDpmdf/XU9J673lzGVzmmv4Su6IuIrSOEWbikA7TPn24h/34LkatnwJckHvfT3L0h9xuY= sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -32,7 +32,7 @@ import TabItem from "@theme/TabItem"; path={"/process-instances/{processInstanceKey}/cancellation"} > -Cancels a running process instance. +Cancels a running process instance. As a cancelation includes more than just the removal of the process instance resource, the cancelation resource must be posted. ## Request diff --git a/docs/apis-tools/camunda-api-rest/specifications/complete-job.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/complete-job.api.mdx index a38ab297b42..aa348692e63 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/complete-job.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/complete-job.api.mdx @@ -5,7 +5,7 @@ description: "Complete a job with the given payload, which allows completing the sidebar_label: "Complete job" hide_title: true hide_table_of_contents: true -api: eJztWE1z2zYQ/SsYnJIpTSqpkya8qY7TOs2Hx1bag+PDElyJsEGAAUDLGg3/e2cBUp92ks6kN2lGI5HA7tvFvgcSu+QeZo7nV/ydKfh1wkt0wsrGS6N5zk9M3Sj0yIDdmILNpa+Yr5DN5B1q1sBCGSgTNq+kqBgoZeaOiWgj9SxMBeeMkOCxZA7tnRTIPLjb9IvmCW/AQo0eLYWw5Bpq5Dm/McVfuOAJlxRDA77iCbf4tZUWS5572+JuoJMK2S0umJkGUArWmyEUTHnCnaiwBp4vuV80hCK1xxlanvCpsTX4eOvlMe+66wiHzv9uygXZrNGnoBwmXBjtUXsag6ZRUgBFkt04Cme5j2aKGxSeUramQeslOhq9AyuhUPECylKSF1DnG5MeS3dlupnpKnsqFaW9C69bpcgq+u26hHvp6ZII0JdbGn0R0+ddR1MsusZoF6N8Pjqmn/2AAiysCEAFb4VA56atUouUdwk/Ho2+YbvNLiqnAK2NZwWunabsg7HISvQglWNgkTXW3MkSSyZ1sB/CZYUpF5Foj5SrsaZQWP+yX7btEMfsPM7scVlcTwaOxYlFRL+6eHvCXh+/+O36SeV94/Ism8/nqZ2KIyylNzY1dpbZqaAvzXuaskmFFlkNC0pzTQG2JgpzDQo5lYIqTQn2YTMqbszv2yyLoysuOm+lnm0Sv7WS73JszD5fnDFZovZyuhjkvAUdbKbQKvIBhWl9XijQt3xNq33QXRTX1jXYlXS3AbqEOw++dd8V7q/P93wTsf6cTM5ZdMGEKZFNjWW+km4AoiRqqWXd1jw/Ho0SXsN9vHo5GnXkkyr+A5lohveNAh2otZuO1Kxe8zYkJrXzoMXPqoyxciZ3cVO+KfGexG9iRlHbx9+U874kSeGkyalpdZkepHWQ1kFaj0rr9X+SlnTDE2xujZ6FtUUmWmtRe7U4PPkO8jzI8+fJ88VDL6NjzWiVLfEQrTWWGREUWNIpRwX39E47YPfnhIPWDlo7aO0xrXUJr9FXpqTjvHGBOnSqz3l2YwqXLeOJv8vE6gBKJ3a0d0NnoLWK53wZldPlWbasjPNdvmyM9V12R2XZOkvTcFTYwBxlBKgqou9XkAao/TBkdgJ1q0tgr9jF6eWE/QEe5xAPsQS57frV6NXoQa809RGP4/MzFjOM/NvYEwa3JO4H3cbJP+I49DIcitZKv7gks7g8BYJFO26pCCte9HjBO13HSTzp/7wd2PLun0koOO1nF+s+yek9UPV22xpdIOHUBKCeIvshU/3QupjjKH22T8fzs6AqYeq61WFr1bP4FgUbSyBU6zylnnAlBWoX4ukbS8O093GE/R0R2bOUyhc5NuyoM+mrtkiFqTMRzVa/hTJFVoPUWQ/hspPxh88f34yP3p+dnH68PD16lo5Sf+/DMhHja9CbcQzdmhtT7Ca6XD9F/vf2W193j/c+axRITUwMy7DsBXpFzTjHE56vmnIbGr1Oep1d8eWyAIefreo6uv21Rbvg+dX1WpZBx6V09H/dR3s09ycXfcvtKft+c+/BRPqboBdhc1AtXfGE3+Ji3WTsrruEVwgl2hBgHDyJYRxNyMXaeK/T1yWDxVgIbPwjc7deAEiRq93w/NPlhATWtxlrU5KthTl1PGHOc/6Ff6GgTViioN1wf8kV6FkLs9AwDX7p8y98k4hh +api: eJztWE1z2zYQ/Ss7uDSZ0pSSOmnCm+o4rdMk9dhKe3B8WJIrETYJMABomaPhf+8sQOrTTtKZ9CbPaCwSwH6+t6TeUjicW5FciXc6FdeR0DUZdFKrs1wkItNVXZIjXoxETjYzsuZVkYiTfg0QbnQKC+kKcAXBXN6RghrbUmMewaKQWQFYlnphobcn1dxvRWt1JtFRDpbMncwIHNrb+LMSkajRYEWODIe3FAorEom40emf1IpISI6hRleISBj60khDuUicaWg30GlBcEst6Jl3ysE6PYRCsYiEzQqqUCRL4dqavUjlaE5GRGKmTYUu3Hp5LLruOrgj637Tectn1t5nWFqKRKaVI+V4Deu6lJkv6OjGcjjLfW86vaHMccqGy+8kWV69QyMxLcMF5rlkK1ieb2x6LN3V0c1MV9lzqzjtXfeqKUs+Fex2nKhtSvdQpNtbHwohnB2KPsSQ+wDQQs6draSiHNLWb1loc0sm3itDTkpycVdBpFqXhGoPkGcq51KThUVBriCzYRa8Fbu6E4GMKQa65/ZIV7aQa7LqJwdY10bfEUgXw1ttgO6RQ48AYYr2Ft5L60iRgQwVW20385NaccboYcyJWXI92qWFWYlzbgiXLIYzFe6uHDxqR1owVBuypFyoV6CcK9Bt5sgRrZrta8yVi2FiAft+7HnpsRx8cHMpB1S53+adG6pQKguYOXlHMbyhGTal88jyeA9s3cFOFwknHV/yYLkIONq5e7KK4iIEIbquC6irtbKh+8/HxwEEu/jyQEa7gSzbZBlZO2vKso1FF4nj8fgrZ7fnFQ+IDJXSDtINuMbwQRtitKIsLaAhYHjInHKQyp8fwoVU520oxiMDoDY6Lan6eX8QbIc4gfOws/cLgXahpbwxDd6vLt6ewOvjF79ePymcq20yGi0Wi9jMsiPKpdMm1mY+MrOMP7zvaQzTggxBhS2nuR4qsOYc2JoyOZOZh2rhE/bBMP1Cfl+fW2F1RVfrjFTzzVHaGLlH3gl8ujgDmZNyctYOD4gt1/6MB59IBKa6cUlaoroVa1jtO931YpuqQrN6GGw76CJhHbrGfvNR8MvzPdsMrD+m03MIJiDTOcFMm8Dy3hEnUUklq6YSyfF4HIkK78PVy/G4Y5vc8e/IRPnhhQqHYbGZjlRQrXHrE5PKOlTZj+qMNnIud/3GWxTvQfwmZBS4ffxVOu9TkhnOnJzpRuXxgVoHah2o9Si1Xv8nakk7PMEWRqu5ry1B1hhDypXt4cl3oOeBnj+Oni8eehmdKOAqG8YhGaMN6MwzMOffzaU3z++0g+/+bf3AtQPXDlx7jGtdJCpyhWYBq9bWQ4d1okSMbnRqR8ugIXWj9c9g1oDI3A1aU2NKkYhlYE6XjEbLQlvXJctaG9eN7rgtW+oMLweGDcgpdYZlEbzvd5AXWNAaMjvBqlE5wiu4OL2cwu/oaIHhRyy73Db9avxq/KBV3vqIxcn5GYQMA/42ZsJglsn9oNmw+XsMe3XMUtYY6dpLPhbKkxIaMpOGm7DCRe/PW/eyjt8kov7L2wEt7/6Z+obzPLtYK2+nQTbZFcq2VKtBOhokCalm2kfQY2c/F24sGRuSH8fP9nF6fubplumqapSfuWoeXq9wozZZ2VgX1KxSZqSsD7TXMIdt78MK/B08wrOY+xrAN4zauXRFk8aZrkZZOLb6n5Y6HbE4M+pd2NHJ5MOnj28mR+/PTk4/Xp4ePYvHsbsPygtToUK1GcegFd3sy7vL9ePlf1d6e0A4unejukSpGKK+DMueuVes+1oRiWSl/26Q9zrqCXgllssULX0yZdfx7S8NmVYkV9drvnqC59Ly97Vk+2juTy56dfcpfFtHfjCR/iaq1k+NsuErEYlbatd6dnfdRaIgzMn4AMPiSQjjaMom1of3ROUuGk5Msoxq98jerTcDpupqTJ7/dTll5vWKdqVzPmtwweI6LkQiPovPHLT2JfKk9veXokQ1b3DutXlvl//+BdIObqg= sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -40,7 +40,11 @@ Complete a job with the given payload, which allows completing the associated se The variables to complete the job with. -
    +
    result objectnullable
    + +The result of the completed job as determined by the worker. + +
The job was completed successfully. diff --git a/docs/apis-tools/camunda-api-rest/specifications/complete-user-task.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/complete-user-task.api.mdx index e8d4ac40c2a..707f67a255b 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/complete-user-task.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/complete-user-task.api.mdx @@ -5,7 +5,7 @@ description: "Completes a user task with the given key." sidebar_label: "Complete user task" hide_title: true hide_table_of_contents: true -api: eJztWE1z2zYQ/Ss7OCVTmlRSJ014Ux2ndZukHltuD7YPILiSEIMAA4CSNRz+984C1Lc1cTs5yjMai8Ri3368B1LbMs8njuW37MahBc/dA7tPWIlOWFl7aTTL2ZmpaoUeHXBolmYwl34KfoowkTPU8ICLlCWs5pZX6NGS05ZpXiHLGe0acffwJy5YwiQ5rbmfsoRZ/NZIiyXLvW1wF3k0RXIMZhyQ1uDegOijIlQnplhxlrfML2oClNrjBC1L2NjYivt46+0p67r7CIrO/2rKBe1ZxzDmymHChNEetac1XtdKCk7xZF8dBdXuo5niKwpP2VtTo/USHa3OuJW8UPGCl6UkL1xdbhgdSnq1dTPTnRpQAyj53SB0oxTtjd67hHERPbc7QEMQjfOmgmgAM64aAuEe5lIpKBC4EOicLBTC2JpqAx1nqL0Di65RXupJXPdT6QB1WRupPUg9M7F2KVyMQRsPtTUzWWKZgOxRShzzRnlK9I4tU71j6Z1e5+a8lXqyn1uXMC89XQb+EsV6skqjr2KXWdeRnUVXG+1iM14PTvfLMdquLnerwpfgmlCIcaPUImVdwk4Hg+962BMICK6pCAWuXafw2ViEEj2XygG3uKoRSB32L0OHwpSLWJcDDK2tKRRWP+0zdbfzl9Gyx4VIHuAOomER0W+vPp7B+9M3v9y/mHpfuzzL5vN5asfiBEvpjU2NnWR2LOhDdi9TGE3RIlR8EfizYj2stQGuRiHHUlDPKcE+bKBub/X9gLDiarvHjpXWGyvZrqyGcHN1AbJE7eV4QYzdgw57AhtZznhhGp8XiusHtubZPuguimuqitvVmbUN0CXMee4b992z6ufXe76JXr+PRpcQXYAwJcLY2Ki6HoiSqKSWVVOx/HQwSFjFH+PV28GgI5/U8WdkogEfa8V1oNZuOlJDteZtSExq57kWP6ozxsqJ3MVN2abmexJ/iBlFnZ8+Q9r7wiS1kzLHptFlehTYUWBHgR0U2Pv/ITDplk+zuTV6EiqMIBprUXu1OD4FjyI9ivTHifTNU6+nQw1UZUs8RGuNBSOCAkuYT6UK7sPrfo/d/0w6au2otaPWDmmtS1iFfmpKmmkYF6hDo42cZfRAPKEHosvajQlIl4nVb1SaXaCdLccljVUsZ20UUZdnWTs1znd5Wxvru2xGHdqaKtByFNuSRMoIrqYxkP1m0gLNZJZJnvGq0SWHd3B1fj2C37jHOY+/cAly2/W7wbvBk17J9IDH4eUFxAwjFTeOh6Vb0vmTbqPxcxyHqY5D0VjpF9e0LZanQG7RDhvqx4oiPV7wTtfRiCX9l49L4vzxzyj0no62q/XE6PyRU/d2Bzwbc5YlCQNDxyZA9/zZT4I6itbFjYP01T5XLy+C5ISpqkaHc1dP4isW3yiKUI3zVIyEKSlQuxBhP39bmn2KK/B3RIRXKTU0sm553E6knzZFKkyVibht9b9QpsgqLnXWQ7jsbPj55suH4cmni7PzL9fnJ6/SQeoffSgcyaHiejOO5SRr9a64m267ftD8p7lj31uPjz6rFZeaqh8Sa3s93rK1HlnC8u2Z5IYk75NeVresbQvu8MaqrqPb3xq0C5bf3q9VGGRbSkff1wPEgym9uOpnjS/hubPNJzPrb3K9CCeCauiKJewBFzvj1u6+S9gUeYk2xBotzmJEJyPys/awN+3skuWOoRBY+wO2W28BpMXVkXj51/WIpNWPWitT0l7L5zT75XOWszt2R5GbUK2g2nC/ZYrrScMnZB/90t+/xBTa1w== +api: eJztWE1z2zYQ/SuYPSVTmlRSJ014Ux2ndZukHltuD7YPILmSEIMAA4CSNRz+984CpL5oTdxOjvKMxiKx2Lcf74HUNuD4zEJ6CzcWDXPcPsB9BLpCw53Q6qKAFHJdVhIdksmELCIo0OZGVGQCKZx1BpZxVvd+2FK4OXNzZDOxQMUecBVDBBU3vESHhlAbULxESKHuXP+JK4hAkNOKuzlEYPBbLQwWkDpT4z7yZI7kmOmpR9qAO836sAnV5nMsOaQNuFVFgEI5nKGBCKbalNyFW29PoW3vAyha96suVrRnE8OUS4sR5Fo5VI7WeFVJkftiJV8tBdUM0XT2FXNH2RsqrRNoaXXBjeCZDBe8KAR54fJyy+hQ0uut25nu1YAaQMnvB6FqKWlv8N5GwPPgudkDGrO8tk6XLBiwBZc1gXDHlkJKliHjeY7WikwimxpdbqHjApWzzKCtpRNqFtbdXFiGqqi0UI4JtdChdjG7mDKlHauMXogCi4iJDqXAKa+lo0Tv1ly8g/hObXKzzgg1G+bWRuCEo0vo2duRVWh1FboMbUt2Bm2llQ3NeD06HZZjsltdbteFL5itfSGmtZSrGNoITkej73oYCITlXFERMty4jtlnbZAV6LiQlnGD6xoxofz+PnSW6WIV6nKAoZXRmcTypyFT9zt/GSw7XBbIw7hlwTAL6LdXH8/Y+9M3v9y/mDtX2TRJlstlbKb5CRbCaRNrM0vMNKcP2b2M2WSOBlnJV54/a9azjTaYrTAXU5FTzynBLmxG3d7p+wFhhdVmwI611msjBqfYmN1cXTBRoHJiuiLGDqD9Hs9GSIFnunZpJrl6gA3PhqD7KLYuS27WZ9YuQBuBddzV9rtn1c+vB76JXr9PJpcsuGC5LpBNtQmq64AoiVIoUdYlpKejUQQlfwxXb0ejlnxSx5+RiWL4WEmuPLX20xGKlRve+sSEso6r/Ed1RhsxE/u4MWxrviPxh5BR0PnpM6Q9FCapnZQ51bUq4qPAjgI7CuygwN7/D4EJ2z/Nlkarma8wsrw2BpWTq+NT8CjSo0h/nEjfPPV6OlaMqmyIh2iMNkznXoEFW86F9O79636H3f1MOmrtqLWj1g5prY2gRDfXNEmptPXUodFGCgk9EE/ogWiTZmsC0ib5+jcqzS7QLPpxSW0kpNAEEbVpkjRzbV2bNpU2rk0W1KGdqQItB7H1JJI653IeAhk2kxZoJtMnecbLWhWcvWNX59cT9ht3uOThFy5B7rp+N3o3etIrmR7wOL68YCHDQMWt46F3Szp/0m0wfo5jP9WxmNdGuNU1bQvlyZAbNOOa+rGmSIfnvdN1MIKo+/KxJ84f/0x87+lou9pMjM4fOXVvf8CzNWfpSegZOtUeuuPPMAnqKBobNo7iV0OuXl54yeW6LGvlz101C69YfKsouayto2JEIEWOyvoIu/lbb/YprLC/AyJ7FVNDA+v643Ym3LzO4lyXSR62rf9nUmdJyYVKOgibnI0/33z5MD75dHF2/uX6/ORVPIrdo/OFIzmUXG3H0U+y1u+K++k2mwfNf5o7dr11+OiSSnKhqPo+sabT4y1s9AgRpLszyS1J3kedrG6haTJu8cbItqXb32o0K0hv7zcq9LIthKXvmwHiwZReXHWzxpfsubPNJzPrbnK18ieCrOkKInjA1d64tb1vI5gjL9D4WIPFWYjoZEJ+Nh4G08426neM8xwrd8B25y2AtLg+Ei//up6QtLpRa6kL2mv4kma/fAkp3MEdRa59tbxq/f0GJFezms/IPvilv38BCf/mzg== sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/correlate-a-message.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/correlate-a-message.api.mdx deleted file mode 100644 index bd1ae35bf5d..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/correlate-a-message.api.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: correlate-a-message -title: "Correlate a message" -description: "Publishes a message and correlates it to a subscription." -sidebar_label: "Correlate a message" -hide_title: true -hide_table_of_contents: true -api: eJztWE1v20YQ/SuLObUoLSqJm6a8KY7TKk0cwZbbg+3DkhyJmyx3mf2wrAr878VwKZGSLMcFeikgA4ZE7uzMzpv3huKswPG5heQGPqG1fI5wF0GONjOickIrSGDiUylsgZZxVgYjxlXOMm0MSu7QMuGY04wz69PNzsGtGs82RkIrJiyzPsvQ2pmXtGchpGQGnTeKuQLZTBjrWGU02TChrOMqQ/YVl83yOvgmcM4WwhWDWwURGPzm0bq3Ol9CsmouhcEcEmc8RpBp5VA5WuJVJUXWHCn+YinFFdiswJLTN7esEBLQ6RfMHERQGV2hcQItrSpeIn1uIzTtHY4sGLcsx5lQmDMRUns7+XSxzqw5bxvHOiPUHGo64QapP3D5eJA+moSKnvWB2fNKlZxxLx0kQCHuuRE8lSGVwylszCiPD1efL1iuM18SfBHwPBe0h8tJD5kA8i52yktJjsJ6HYFDxZUb5/vxKY2wymbasEUhsmKr6MKyqiVi/kiiO6EolnB0ueb1WQfdZaAK1DXZGbSVVjaA8nI4fBobYfv0c5pphUwbVmqDe8y18C+Idzhmr9K92O1qREVaoJT0+R0RCXerHhXP06RvIx1k5ZPn67wL5XCOBiKYaVNyF269PoUniTHtiDF+95wwnaJaDMYtBM85/wHwnug+z8nvO2wM/GvpeHqIgZXR9yLHnOXcceKh0o7dcynywyyrjE4llj99j20jNgmWLEfHhWSBCaGPkWEaOtnN5fsz9uvpz7/c/VA4V9kkjheLxcDMshPMhdNmoM08NrOM/snuxwGbFmiQlXzJUmRd92Adx5itMBMzkZGeXEi1OQwB+xyChtXVXlPY1MEbAbuPtRG7vhwzkaNyYrYUar4fequB8lR7l6SSq6/QFXQ/6G4U68uSmw3DtgPUEVjHne+lcYBHr17u+SZW/D6dTlhwwTKdY9M9XUHdMgSiJEqhROlLSE6HwwhK/hCuXg+HNfmkij8jE8XwoZJchcfPTjpChRYYvDWJrdXzH1VGGzEXu3EHW+JqSfwuZLTW06t9vl8r7l2hjfgbj+o5queonsPqOd3n+4WmH2leHaVzlM5ROoek8/NjP+TGyqEhElo092gYGqPNUUZHGR1l9KiM6ghKdIXOIYFK24Y63BWQQNy+j9m4NxSBCIKwaKK1Am8kJLAKqqmTOF4V2ro6WVXauDq+p5JszURoOahrzRqpMy6LEHm/erTQDHzarM546VXO2Rt2eX41Zb9xhwu+bNCkkNuu3wzfDB/1SqYHPI4m47Z1BO71+sHaLQn7UbfB+DmO6/qOgMy8EW55RdsCPClyg2bkqQAbTrTxGu90HYwgar+8XzPlw1/TptjUyy67Yd35Ay+roMQwXOtItjsR61b6RduaHvTe/oWa6eaYLbn2EyZHaGxAaDh4sU/kybjRY6bL0qumKat589bPeA/ATHrrCLgIpMiQ3uW7bNZmH8MK+zNEZC8GVPzA0HUvngtX+HSQ6TLOwrbNZyp1GpdcqLgNYeOz0afri3ejk4/js/OLq/OTF4PhwD24BmTSSslV/xzrsUU3xd3Nd9U9hv4nQ9+Wgg4fXFxJLhQVvsF01faJm/X0ym4TisbcQe03sFql3OK1kXVNt795NEtIbu46ntFVHUGBPEfTtJavDSHPAl4nUzoHmUtP59kb9NXRescoy7ByT9re9Xre5PPVlKTUTrVLndMewxc08eYLSOAWbgEi0A38jUqb+yuQXM09VTmB4Jf+/gEz9VEJ -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Correlate a message

- - - -Publishes a message and correlates it to a subscription. -If correlation is successful it will return the first process instance key the message correlated with. - -## Request - -

Body

required
    variables objectnullable
    - -The message variables as JSON document - -
- -The message is correlated to one or more process instances - -
Schema
- -The provided data is not valid - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Unauthorized - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Not found - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Internal server error - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/correlate-message.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/correlate-message.api.mdx new file mode 100644 index 00000000000..7833cf500bc --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/correlate-message.api.mdx @@ -0,0 +1,61 @@ +--- +id: correlate-message +title: "Correlate message" +description: "Publishes a message and correlates it to a subscription." +sidebar_label: "Correlate message" +hide_title: true +hide_table_of_contents: true +api: eJztWVFv2zYQ/ivEPW2YYrlt2nV6c9N0c9ekRuJuD0keKOlssaVIlaTiGIb++3CiZMlWnGbAXgY4QGDLPN7xvvu+k3zegONLC9ENXKC1fIlwF4Au0HAntJqmEEGijUHJHbYWAaRoEyMKMoEIZmUshc3QMs5yb8O4Stl2o2XCMacZZ7aMtztHt2q62BoJrZiwzJZJgtYuSkl7VkJKZtCVRjGXIVsIYx0rjCYbJpR1XCXIvuG6Xm6DbwOnbCVcNrpVEIDB7yVa906na4g29aUwmELkTIkBJFo5VI6WeFFIkdRHCr9aSnEDNskw5/TOrQuECHT8FRMHARSG4HICLa0qniO97iI07x2OLBi3LMWFUJgy4VN7N7u4bDOrz9vEsc4ItYQqgB5Sf+L68SB9NAkVvegDM/BKlVzwUjqIgELccyN4LH0qh1PYmlEeH68/X7JUJ2VO8AXA01TQHi5nPWQ8yPvYqVJKcuTXqwAcKq4c0W4/PqXhV9lCG7bKRJLtFF1YVjRETB9JdC8UxRKOLlvin3XQXXmqQFWRnUFbaGU9KC/H46exEbZPP6eZVsi0Ybk2OGCuhX9BvMMxe5XuxW5WAyrSCqWk1x+ISLhb9ah4niZ9E+kgK588X+ddKIdLNBDAQpucO//Rm1N4khjzjhjT988J0ymqwWDaQPCc8x8A74nu85z8fsBGz7+GjqeHGFgYfS9STFnKHSceKu3YPZciPcyywuhYYv7Lj9g2YTNvyVJ0XEjmmeD7GBnGvpPdXH04Y7+dvv717qfMucJGYbharUZmkZxgKpw2I22WoVkk9E92P4/YPEODLOdrFiPrugfrOMZsgYlYiIT05Hyq9WEI2OcQ1K9uBk1hW4fSiMFtbcK+XE2ZSFE5sVgLtRyG3mmgPNali2LJ1TfoCjoMuh/FlnnOzZZhuwGqAKzjruylcYBHr14OfBMr/pjPZ8y7YIlOse6eLqNu6QNRErlQIi9ziE7H4wBy/uCv3ozHFfmkij8jE8XwoZBc+dvPXjpC+RbovdWJter5jyqjjViK/bijHXE1JH7vM2r19GrI9w/axCJNUdX0ZM2zQysqLqVeYTo66uqoq6OuDurqdMj3S02Pb6U63pKO0jlK55B0Xj/2iDdRjFA2xEM0Rhumk6Q0hh4zMyG3Xy7a2M09y3PxqLWj1o5aG2qtCiBHl2macxXa1tThLoMIwubrnA17MxUIwKK5R0MTsw2URkIEG6+aKgrDTaatq6JNoY2rwnsqyc5IhZa9ulrWSJ1wmfnIw+rRQj0varI643mpUs7esqvz6zn7nTtc8XWNJoXcdf12/Hb8qFcyPeBxMpsyn6HnXq8ftG5J2I+69cbPcVxVdwRkUhrh1te0zcMTIzdoJiUVYMuJJl7tna69EQTNmw8tUz7+Pa+LTb3sqpv1nT/wvPBK9LO5jmT7A7VupV+0neFDb3gg1ELXx2zINUyYHKGxHqHx6MWQyLNprcdE53mp6qaslvXQgPEegIksrSPgApAiQRoFdNm0Zp/8CvvLR2QvRlR8z9C2Fy+Fy8p4lOg8TPy27WssdRzmXKiwCWHDs8nFl8v3k5NP07Pzy+vzkxej8cg9uBpk0krOVf8c7dSjN3DZyXbT3YT+JxPjhoAOH1xYSC4Ulb1GdNN0iZt29GV36URDdK/1G9hsYm7xi5FVRR9/L9GsIbq561hGV1UAGfIUTd1YvtV0PPN4nczpHGQuSzrPYEpYBe2OSZJg4Z60vet1vNnn6zkJqRmJ5zqlPYavaFzOVxDBLdwC0A8C5KHWaP35BiRXy5KqHIH3S3//ADwPcwc= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Correlate message

+ + + +Publishes a message and correlates it to a subscription. +If correlation is successful it will return the first process instance key the message correlated with. + +## Request + +

Body

required
    variables objectnullable
    + +The message variables as JSON document + +
+ +The message is correlated to one or more process instances + +
Schema
+ +The provided data is not valid + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-deployment.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-deployment.api.mdx new file mode 100644 index 00000000000..5b9de891e8d --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-deployment.api.mdx @@ -0,0 +1,65 @@ +--- +id: create-deployment +title: "Deploy resources" +description: "Deploys one or more resources (e.g. processes, decision models, or forms)." +sidebar_label: "Deploy resources" +hide_title: true +hide_table_of_contents: true +api: eJzVWUtz2zYQ/is7ODVTmlJSN03Zk2olrdrY9dhKe3B8AMmViJgEGAC0rdHwv3cWoERKom3FcdzpQSNRWOzj228XDy6Z5XPDogt2hkZVOkF2GTBVouZWKDlJWcQSjdziGMtcLQqUlgUsRZNoUZIIi5gfMqAkgtJQKI2gG3UGvsNwHkKpVYLGoAkgxUQYoSQUKsXcBDRnpnRhXoQf5TQTBoQBLoFbVYgEEp7nAYgQQ0BhM9TA87yjn2uE1HmAKamSzo0Z2AwLGgw/ShYwjZ8rNPZXlS5YtHSPQmPKIqsrDFiipKXQoiUrqtyKkms7IKcOUm45/W2SDAv3yy5KZBFT8SdMCIxSE15WoPGaG8c6olxrvtiBbZohxEJyvQAyAlaBh5pcb0IivNtYQ5hYAqdUxog4R5qS8Wv0kNuMS5cD8hsoArgRNoNUzGaoSVE7IHmBhp6drX4veMcHD6KwWHTjMlYLOWcBI8Xcsoh5TayuA2ZRcmmJQbvyu0B4abLujTq/2iRbFTqlbd4uOkhf1rUfNKWSxkP/ajikr11L/cwJ2QYJeFnmInE1MPhkaO7+DGhR+xMXHXEhLc5Rd+ES0r4+7MWjkuJzhXCFCxApSitmCyHnW8wIWR10zJndgCeUsbY84hWurhgo5i2Gbmf4jhCbah7jTEjhTW1bHrVWG+mOuX3V7sueuCzkygxMxgFwQzw3mEJa0awOaAFYNUfXSFx5ECDXqF1Hcm0I+EfZwN9AL9B1KOBgSkzETCRrY+naVyqROtiN4W+v+0Ei/PCqNzRujJjLFsWVr2Gvsa9g3NrQFS4CuMlEkgFPrCEsOfQDYqlbr9PrS9AV1wkvcL/UrWa4lgQzrYrGdlc33KwT6sw8qrdMxs2y0MNM17CEzXG9nBFVTv2wrzK/aO1J+ZX4w5zfVbxvWGkh27X0WUi/jspT/fqJmL0OokvtXVyegttrU19K8q3Q5d78Hh+feGqvuecV3ZuuJyV5y8UOrGd+IXXLxpcQrqu/gVJ3VMFc85Loxe0mbm7bQjsPNbs/co9vn5tP1Nu+2n/nYm+/GDfCd0Ww2zLG20na8Gb/7vHYdPaD8EAz+eXuZmJW3eSOWnrGZrIRUbezfH3tbuh+/kLe4sg3LJd+MB/bOzf99ml/6g1Dv8cb24d7S3ejXGuP1r0rPQk8XKcktS8B3CntWdZz5/vTlp9zvltt9Md/sSv1efkGDHMRfqP9aON0L0PfERP7h47RcndPsX3uvs82N0YlgltMWwp1G1ewFUm/6bPmzM38EfzwrlN3qpLKXWZUZa54CjMuckxDOFbuEG65yP2BvNTqWqSYgpCrOwBnAGKVLvw1xB3n9FKrOMfi+93z+nbxnnrJxi74miVGecHYW784e3cEPx/++NPld5m1pYkGg5ubm1DPkgNMhVU6VHo+0LOEPiT3IoRphhqh4AuIEXiauo0rz6HtBm31WeUCbNwGQtvHd38r8aP33MFUWuzQbgQfziY7Nwkbpt2cGa9y0sFjVdkozrm8Ym3eHyL3CExVFHSV1BB700AdMGO5rcxju8zv0+kpeBWQqBQ3zqBkiIIohBRFVbDocDgMWMFv/dPr4dAtlJTxPSKRgLdlzqWj1nY4QvobN6/NBSaksVwmT5UZpcVcbNvd7AwNicc+otrXX4E2U3RpWyrjqMNtxiI26F4SBcygphbNooslq3TOIrb0xVJHg8EyU8bW0bJU2taDa8rENdeCx7nnHw37olqRJVcJzzNvcDdpNNDdRB3xopIphzdw9vZ8Cr9xizd84a8zlN5S/Wb4ZtirlUTv0Dg6nYCP0FOu0wZWaqmee9V64X0U1/UlAZlUWtjFOU3z8MTINepRRbivqdDYc9rp2QuxoPnxbkWQP/6ZuhwLOVNuepPrXUdYZ8lmw/DlLq9OJ648ElUUlXQ9Us59p+edwJK8MpYCClguEqROHi2bLfLa7Hs/As1VFrwMKSmeOavWOBc2q+IwUcUg8dPW33Gu4kHBhRw0JszgaHT84WQ8Ong/OXp7cv724GU4DO2tdcETdQsuO3745aa9ud0OdtkuCf+LlxENKyze2kGZc+F2YA7OZVOxFxvXupdBU3UXbLmMucEPOq9r+vtzhXrBoovLtkjpqQ5YhjxF7Ur8irZf7MhDdDAl4ySeV+RE3+uOOlhNGiUJlrYjvnMxTmWw7jqnf51PidXNWxbClEWu/zm9QfuTnKzrfwFFFVRu +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Deploy resources

+ + + +Deploys one or more resources (e.g. processes, decision models, or forms). +This is an atomic call, i.e. either all resources are deployed or none of them are. + +## Request + +

Body

required
+ +The resources are deployed. + +
Schema
    deployments object[]
    + +Items deployed by the request. + +
  • Array [
  • processDefinition object
    + +A deployed process. + +
    decisionDefinition object
    + +A deployed decision. + +
    decisionRequirements object
    + +Deployed decision requirements. + +
    form object
    + +A deployed form. + +
  • ]
+ +The document upload failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-document-link-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-document-link-alpha.api.mdx deleted file mode 100644 index f9784f197de..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/create-document-link-alpha.api.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: create-document-link-alpha -title: "Create document link (alpha)" -description: "Create a link to a document in the Camunda 8 cluster." -sidebar_label: "Create document link (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJzdV21v2zYQ/isEP7WYLMlp2nX65iXp5qHtgsTdgMX5QEsniy1fVJKKIxj678ORkl9iJ82GYh8WILBlHu+eu+fetKaOLS3Nbui5zhsJyll6G9ECbG547bhWNKNnBpgDwojg6gtxmjBS9NKEK+IqIGdMNqpg5C3JRWMdmHiu5irLMqUdzNWs4paAKmrN8Y4lTBEm6oqREphrDMRk6kjeGAPKiZZoJVpim7rWxnlhrkYSpDbt1rJ12kA0V6uK5xXqVNoRCUw5UmpDaqOLJkcPSGMh9mDmika0ZoZJcGDQ7TVVTALN6KB2WtCIcvS6Zq6iETXwteEGCpo508DD0MwqINNzoksfhQ02p32sYhpRm1cgGc3W1LU1WrLOcLWkXRdtjHtXtpa/NmDaPdMlE/a5tr2yAcH+UWm0fBrTbbAK1v2sixYlDkDkWjlQDs9YXQueM8STfLYIan2oWy8+Q+4w8EbXYBwHi6dwX3MDduIOYUS01EYyh7QwByPHJdBj3hc+LVVBUIKsKgjJ6B3v9aO7qhGCLQQECrsuoo47fNwk/XuuvlwFv2nXoYQBW2tlA9aTdIwfRwAMkfUmV8yS3NdKQWyT52Bt2QjRIobvFLXGiGPxOkQ2lOou/zHtov8u7o/FuQ/waZo+J6Y+nljEJeMCiph8wOwuwDEuLGEGsNDveAHF0IoG5shCF23sS/6R4NdGLwTIHw5J2Ec1IZdBsrdLAjmEWRIEF8H6zdW7M/LT6esfb19UztU2S5LVahWbMh9BwZ02sTbLxJQ5/qPcy5jMKjBAJGvJAggrCo42mSBb2omtIeclzwc6e9gEyQv+PZ0z4fQJshvDD2iekE9XU8ILUI6XLVfLQ9P+TskagTrYQjcuWwiGBG+I/1amTohtpGSmHbrYvoEuotYx1+y4QblysASzi58r9+rkaKL+OptdkqCC5LoAPxkcDqPeEDohueKykTQ7TdOISnYfnt6kaYc6kfFneKIw8wVTIVsfuMMVkdu89Y5xZR1T+fdiRhu+5A/t7hdhn8TnwaMulKEEV+kCx522PnVw6mU0GaowWW8nY5dgSVocIGDuhgHqWxJdh+LpsiRZV9q6Llvj8O6SO2TmjhmOHdgTicehyIbkETpnogoADknEAxyUg3PbXePq4npGfmEOVqz1QUWT+6rfpm/To1pR9BGNk8spCR6GFNxpC4NarO+jaoPwcxT7YWshbwx37TVeC+FZADNgJg3ysEmN3p7Xjs9BiEb9l3dDwvz258xzji3tajvIL+6ZrENB7vR/epKenI7G6Wiczsavs/RN9uo0Pj0Z/xXys9QeQJ89h64gr2Bs8D2Nx4eZejn1BZdrKRvlu65akhV3FWFHNkYaUcFzUNbj7FejQex9OCF/BItkHCOtIfeGZrvkrmoWca5lkodrm8+F0ItEMq6S3oRNziYfPn08n4zeT88uPl5fjMZxGrt758OHxSCZ2sURduD94fTC77AvHzq+3g6c/9nu3Gejg3uX1IJxhZnS7yShc9xsNmka0Wxvqw7N4zbqG8ANXa8XzMInI7oOfw5rL7aUglvsFo8svrvh/fc78FFXvkC7t47fMdGgFMVaHZrYP4T44qrfoF+Sb78tHEXV/8hUuwtpQLsT4+62i2gFrADjQQaBswBlNEM1WwUHayi+kIQbkzyH2j0pe7szOi5/v55hK+rfGKQu8I5hK3yHYSua0TmdI2Dtw+O7nP99TQVTy4YtUT7oxb+/AQDYBpk= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Create document link (alpha)

- - - -Create a link to a document in the Camunda 8 cluster. - -:::note -This endpoint is an alpha feature. It currently only supports an in-memory document store, -which is not meant for production use. -::: - -## Request - -

Path Parameters

Query Parameters

Body

- -The document link was created successfully. - -
Schema
- -The document link creation failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-document-link.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-document-link.api.mdx new file mode 100644 index 00000000000..01738dfb698 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-document-link.api.mdx @@ -0,0 +1,54 @@ +--- +id: create-document-link +title: "Create document link (alpha)" +description: "Create a link to a document in the Camunda 8 cluster." +sidebar_label: "Create document link (alpha)" +hide_title: true +hide_table_of_contents: true +api: eJztV0tz2zYQ/is7ONlTipQTN015U22ndSdJPbbSHmwfVuRKREICDABa5mj43zsLkJZkybHb5ljN6EUs9/XtfrtcCYcLK9JrcaqzpiLlxG0kdE0GndTqPBepyAyho+H8vVRfRCRyspmRNQuJVJx4EUAopfoCTgNC3suDVOAKghOsGpUjvIWsbKwjE9+oG/VROwJXoANXSAtZYwwpV7agVdmCbepaG2cBFUg1qqjSpl1rtk4bimBZyKwAaUFpBxWhcjDXBmqj8yZjB6Gx5K2laaq0oxs1ZWOk8lpL9tAbuMayLhDmhK4xdHuQZLqqtSLlbEJoynaEWUbWJl4ufI56aRtX+SGgyqHCFmYEtpl9psxxKrIC1YJulFQwb1gYDJWElmzsPbpRIhI1GqzIkWEsVkJhRSIVQ6TnuYiE5ETX6AoRCUNfG2koF6kzDT1GY1oQnJ+CnvvEP6TLaQ9PLCJhs4IqFOlKuLZmS9YZqRai66IH4z67a8tfGzLtluk5lvaltr2ywYPto7nR1bd9ug1WybpfdN6yxI4TmVaOizddCazrUma+fJPPlp1a7erWHh5OvOFid5KsP5UVTfV7eUcbslI5WpARkZhrU6ELl94c73QBx84aRk6PSnlHO1nwwUsFlY39zXNsSifS12/G/Oq6SDjpSra52W2XIXbRdSxhyNZa2eDvq/ERf+26sW1yiRZCF+dgG1/F86YsW/biO2WuMeUudPsSNDDEZmJi0UWC7mtpyE7cPj0Pmc/R0YiTvFd57llI5R4GWBYUuMfb7PXH4qk89wk+Ho9fklOfTyaXOcqS8hg+cIXn5FCWFtAQE9CdzCkfGHBADmY6b2Pf9k8kvzZ6VlL1wy4I215N4CJI9nYhgANoIQjOgvXry3cn8PPxjz/dHhTO1TZNkuVyGZt5NqJcOm1ibRaJmWf8ZrnDGKYFGRroDPNcsk0sYQ072JoyOZfZAGfvNjB4Ib5nus2ffgPsxsgdmCfw6fIcZE7KyXkr1WLX9GZvCZzpxqWzEhngB+Cfq9QJ2Kaq0LRDD28b6CJhHbrGPksTr1/tLdTfptMLCCog0zn5ieVHYG+Ig6ikklVTifR4PI5Ehffh35vxuGOdjPgLIlFc+SWqUK2PwmEyWtetD0wq61Bl3wsZbeRCPra73YR9EZ+GiLrQhhW5QvP6UWvrS4cnXyqSoQttslqPxy7hnrQ8RcjcDVPUc5JYhe7p0iRZFdq6Ll3xUtEldwzNHRqJszIUJB+HLhuqp9QZlkXwYBdFPuBpOUS33nEuz66m8Cs6WmLrs8omt1W/Hb8d79XKok9onFycQ4gw1OAGLwxqucH3qg3CL1HsJ66lrDHStVd8W0jPjNCQmTQMxENt9Pa8dv4fhETU/3g3VMzvf0096Mxpl+tpfnaPVd135MbwHUYil+Nch9NQLLuOM4pkbIh0HB/tFubFue+vTFdVozzJqgUspSsA9+ylIhKlzEhZ71W/DQ1i78MJ/BkswlHMIIZKG7h1IV3RzOJMV0kWbnv4npV6llQoVdKbsMnJ5MOnj6eT0fvzk7OPV2ejo3gcu3vnk8W1X6Ha9CNs2tuz6MAvo4ePA1+t58v/G/p/3dD7cnd075K6RKm4AfutJ3DT9cO+zkSUbi3vgZ5uo55irsVqNUNLn0zZdXw5bNdMWrm0zEdP7NebkP77VXtvLF+o3dr677BsWEowGww0+Q9dPLjsF/VDeP6hZK9X/UVU7aZLg7cbOe5uu0gUhDkZ72QQOAmujKasZq1gZ9Pl555wxyTLqHbflL3dmE4Xf1xNmez6B5NK53yPwSU/KuFSpOJG3LDD2qfH86i/vhIlqkWDC5YPevn1N6xZZOg= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Create document link (alpha)

+ + + +Create a link to a document in the Camunda 8 cluster. + +Note that this currently only supports an in-memory document store, which is not meant for production use. + +:::note +This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change +in future releases. +::: + +## Request + +

Path Parameters

Query Parameters

Body

+ +The document link was created successfully. + +
Schema
+ +The document link creation failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-document.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-document.api.mdx new file mode 100644 index 00000000000..6bfc6514516 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-document.api.mdx @@ -0,0 +1,67 @@ +--- +id: create-document +title: "Upload document (alpha)" +description: "Upload a document to the Camunda 8 cluster." +sidebar_label: "Upload document (alpha)" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/is7OCVTilTSNE15U52kVadJPbHSHmwfVuRKRAICDABaZjX6750FSD0s23HTHHqIZ2xTwr73248A1sLj0on8XLw0RVuT9uIyEaYhi14aPS1FLgpL6Gm7noiSXGFlwwIiF+8bZbAEhLKXAG/AVwQnWLe6RHgBhWqdJ5te6Av91ngCX6EHX0kHRWstaa86MFp14NqmMdY7QA1Sj2qqje12lp03lhJYVbKoQDrQxkNNqD0sjIXGmrItOCxoHQVveZ5r4+lCz9gZ6bIxUntWRQ3nqJoKYUHoW0uXj7LC1I3RpL3LCK3qRlgU5FwW5OLfUS/t0rp8DKhLqLGDOYFr5x+oCMkXFeolXWipYdGyMFhShI5cGiK60CIRDVqsyZPl6q+FxppELkKC01IkQnJtP7VkO5EIS59aaakU+QKVo5stmFUE05dgFqHuh9XigNrYooNFb1KRCFdUVKPI18J3TQzASr0Um02yjWlQ+VphbQNKYboILWysuZIllQkgaFqx0koqxWVdkmYsUpnCWUOFXHRSL7l5dC2d5+dB2JJrlQepw6q1xoK84RmVJSy7qMvNuLcClzFBcv5nU3YsscvX25YSURjteSTytahb5WWD1mcLY+tRiT7YPLJuAkq4/5anzEtyvLqQio5jSAQbQy9yMZcabSc2iajJ42D+sN5THcV5AHBuWn+QPbf7/iD6dGZB6DiW4+b2CsCSN9uccqyc1tuAooeYY7wNZlgzmKDrRlpyE39veUr0NPKyplsN82qYVZaAVUX6EBe9j+DPyb/3w5Xa05Lsvi+p/fNnt/ph3SO4Sw3zzvfWi9Z5U58elP1mUw7tngQN2HXquNCJwLKULI9q3zSDdLNJhJee0bWl+DcDgnhxh+nziMJLLrouTMklZmTv4e0AIQKbRski4C374IwWG/5hk64x2sXsno6fHCN1tl+gFbqeEKgE1wbKXbRKdZzZ3ogdufsX81XEd1G6LZp/EMiHikEp+etaavTGpjBRK+wcOApsdjGYvxAcMum25mr2X4aCDrz+oDm4i8oDgvbY+AuNpd9Y5BuLfGUWubn4jhZkSRckIiU8G48/wwL9HmWBUvHL/g1vXUryKJUDtLTdI3AlOPCBZGBuyi6+yu/gisaauaL6u2POOAxnAqdRsvcLsZaADqLgPHo/f/f6BH569sOPl48q7xuXZ9lqtUrtohhRKZkfjF1mdlHwL8s9TmFWkaVhq7ir835DXNjeyGLYPvdhh7GI+d0/d3cx2hZxrZVHuJjA+3dTkCVp32+tjlwHnQW2im0EQsjnCvVHsWv758ZyAq6ta7TdgLpDB4Ef0bfusxPz/dNbJ+bX2ewUogkoTEnhNBCOF70jTqKWWtbMzM/G40TUeB0/PR+PmVVDxx+QCW88G4U68uONdKSGeofbkJjUziPPwdfpjLFyKW/6TQ9GsAfxy5hR/0quyVeGD3ONcQE66CuRi2wYP8c7YbJXw3GktUrkYh1HZZNn2boyzm/yNZ/ONtkV9+EKrcS5iujj5ThSA1SUKVBV0d1xy3hhn6l3h8V3r85m8At6WmEXSsguD02/GL8Y32qVRe+wODmdQswwAm6PBAazPM23M3IQfojhcGpwVLRW+u6M1WJ55oSW7KTlqm+B0PsL1vlzFBJJ//B6gMdvf81Ch6VemKDed/o4EO4KWRcjH6dPjlF1Og3DUZi6bnVgSL2ElfQV4C0HdpEIJQvSLuC3Pw8OYr/HFfgzeoQnKTclImcgxqX0VTtPC1Nn/XZo+3+uzDyrUeqsd+Gyk8mb929fTka/T09evT17NXqSjlN/7UPyDNwa9V4c/cXD9g3yKBzQH9/Meb17L3y7q/iyu4oer56ufdYolJonKDR63TPJudgxyWXSs8G5WK/n6Oi9VZsNfx3vDZhfSumYOu64Odhv2Zffbdwa9UfqDq5ZrlC1LCX4wuM/R/X/utq4pwAHdzq7GlzuWF3k53xwqQhLsqFnUfMk1mAU9vc73dvuP7ikUWlSFNT4PfHjo+Pl3kvq9I+zGdNgf+1Sm5J12HCwm+weOcjN5h95Mz/J +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Upload document (alpha)

+ + + +Upload a document to the Camunda 8 cluster. + +Note that this currently only supports an in-memory document store, which is not meant for production use. + +:::note +This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change +in future releases. +::: + +## Request + +

Query Parameters

Body

required
    metadata object
    + +Information about the document. + +
    customProperties object
    + +Custom properties of the document. + +
+ +The document was uploaded successfully. + +
Schema
    metadata object
    + +Information about the document. + +
    customProperties object
    + +Custom properties of the document. + +
+ +The document upload failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-element-instance-variables.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-element-instance-variables.api.mdx new file mode 100644 index 00000000000..5664d9a1006 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-element-instance-variables.api.mdx @@ -0,0 +1,56 @@ +--- +id: create-element-instance-variables +title: "Update element instance variables" +description: "Updates all the variables of a particular scope (for example, process instance, flow element instance) with the given variable data." +sidebar_label: "Update element instance variables" +hide_title: true +hide_table_of_contents: true +api: eJztWN1u4zYWfpUD3iTBKraTzbSzAraAm6a7aaczQeLpXsQBSklHFmcoUiUpO4YhoK+xr7dPsjikJP/IRnvRq90JECQiz//Pd0humOMLy+JndiexROVAKOu4SpG9RExXaLgTWt1nLGapQe6wpbtvyX7mRvBEomURy9CmRlTEwGL2scq4QwtcSnAFwrKjBJ0Dh4obJ9JacgM21RXCea4N4CsvK4kRVEanaG1vTgS51CvAAysvYCVc4eUvxBJVrwUy7vhorp4qTEW+9hSHzCCUX/8F9336Ede/kH28RIdmNFcsYv0nBWvDFC+RxWzIyCImyPuKu4JFzOCvtTCYsdiZGg9DNCsQPuOaAnLUPqeh9lE8CGCuyapZISykXEES9g9D5kWfcws6cVwozCCrjVCLLYHPqNDqIgJtgIcQzlVrRwS2TgvgFjhYNEtBFnH7Gc4t4unA6RDUTzqBEq3lC7wIIbRpgSVn8Ya5dUXRE8rhAg2LWK5NyV1Y+uqGNc1LCB1a963O1sRzGMlUK4fK0RavKilS78n4k6XIbobKdPIJU0eZNFTWTqCl3T6o9LGfnR+ePryHwAYGK4MWlaP47SfDabDoulpqI/Kf3/5tQ12PWMR4lgkSyuXDjvLgx755TcSkTrkcWnOfezVOg+c7MIIbhBLNAjOwzojUyTUIRcQFgpfYdRm3YH1PCMwgWZ/MIyXtgyvQrIQ9qo7iyBfcYRbqtMK2ky1wlXljufN8unZoSm0daIWjuZqrdxgilGplRYbGk+VaSr2iALcoEBPprECDXqFb6U7B2dWZV3J2fUY97j2jNWFDJ3BDfRQ81rknA2o2zHNMnVgeoBHxdKYbUnsFf/8GNjBnudZzBjFcQzNX191qwo1fvaLVuZqqrk3bmg2gNAwq9dLZ9Vm0o31XyRtoIu9WmzB0c9XmGyTyJbYl5V2tVVpwRQknDp59qq3r96/PKCd7pka9ojfQ+CR8u4YMc15LFwWDe7XEnHNJed8qXAkpCWp6i73Bc+WT3avdpdoJU0CAttYTrSVy5SeG189ir63ZmTiPmKNBmkODTpiC6TY9xKWFtqi6Wq6tLyfuelNo0viZIyUYTLXJLBi0tfTNnBtdgiMo7XWP4KfaOmL9BiYgcir1pcgw2/PiJHpFrBRKlHXJ4qum2Z0Bzztw8xIxJ5wkSU/oujH6GAqINU3gtJVWNoDT9eRmGIvZXluuqFVCKWYjwpKbyeQ4T+eRH5PUNko7WHIpMsKrE9haGZ1ILP8yxNjDBD0ESsjQcSE7EOUWAmGCGeHl8+P3t/C3mzdfv5wXzlU2Ho9Xq9XI5OklZsJpM9JmMTZ5Sr9Ed+G7mKCOryk9W1yFLap38JZCC3+t2UB528vgiZEQdvvBQXiqFrtpro0YnHam8PHxHkRGIyJfd1NiT/VuvTOe6NrFieTqMyWqLYWh0kMtti5Lbvojw76CJmLWcVfb3x2yf70eyKay+Ods9gBBBKQ6I0w2oTlaRaPd6r6ZTCJW8tfw9dVk0pBMyvgf8EQBvlaSK19ah+4IBaU22NaPd6w/l/45mdFGLMSh3hFrtrlgbRF/FzwKHfnmWENNFc1aNFSHaIw2oNO0NgYzWBVC9iezTnc7JUItfum1L732pdeGvdZErERXaLp5Vtr60qE7VczG7cHqsjPTjjfDs1YzXu5cTen+0t3daiNZzDaho5p4PN4U2rom3lTauGa8pHTtXQtoO3ReV1H+pFQEq4aZpQ26IHYe3/KyVhmHt/B49zSDf3CHK772kSaV+6LfTt5Ojkol0hMSpw/3EDwMdbmDFZ1YavqjYgPxHxHsb2UW09oIt34ithCeBLlBM60pOX29tPq8dH/m80Qsav/5vquiH/4184VAOPe4vfHdhRvA4Q1tez0Kx9Oj58WJr+Bce2va+hr6RUlGY0MgJqOrYS0/3PuWTHVZ1srjslqEgzLfiVMqa0uvBCxiUqSorDe6fR/oyN6FHfg5aISrEeU4FGIHxwvhijoZpbocp4Gt/5tInYxLLtS4VWHHt9OfPr7/bnr57v727v3T3eXVaDJyr87Hktql5GrHjvAWM3xgWJ56vdls59L/2ENOW58OX924klwo6hifiU0LMM9sADAsYvHRt56983zAiWe22STc4kcjm4aWf63RrFn8/LIl9ziUCUv/Z30xn8zA+WN7hbiA/68no6PJahe5CgmQNX2xiH3G9fEnuealiViBPEPjAx8Ib0N4L2ckbito8JjURB3HNE2xcido9w5shJT99Hr48DQj4GsfskqdEa/hK3of5CsWszmbkwPap95jql/fMMnVouYLog9y6ee/gBeNbg== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Update element instance variables

+ + + +Updates all the variables of a particular scope (for example, process instance, flow element instance) with the given variable data. +Specify the element instance in the `elementInstanceKey` parameter. + +## Request + +

Path Parameters

Body

required
    variables objectrequired
    + +JSON object representing the variables to set in the element’s scope. + +
    { \"foo\" : 2 }\n2 => { \"bar\" : 1 }\n\nAn update request with elementInstanceKey as '2', variables { \"foo\" : 5 }, and local set\nto true leaves scope '1' unchanged and adjusts scope '2' to { \"bar\" : 1, \"foo\" 5 }.\n\nBy default, with local set to false, scope '1' will be { \"foo\": 5 }\nand scope '2' will be { \"bar\" : 1 }.\n","type":"boolean","default":false}}>= 1`"} schema={{"description":"A reference key chosen by the user that will be part of all records resulting from this operation. Must be > 0 if provided.\n","type":"integer","format":"int64","minimum":1}}>
+ +The variables were updated. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-group.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-group.api.mdx new file mode 100644 index 00000000000..630ede47a51 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-group.api.mdx @@ -0,0 +1,57 @@ +--- +id: create-group +title: "Create group" +description: "Create group" +sidebar_label: "Create group" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/is7OLVTWpQTJ3V5UxUndZuHx5bbg+0DCK5ExCDAAKBljYb/vbMAqYdluTnkVmlGI5JY7LeP74MILJnnM8eyG/bBmqZmdwkzNVrupdHnBcuYsMg9xsGEWfzWoPO/m2LBsiUTRnvUni55XSspwrz0qzOanjlRYsXpyi9qZBkz+VcUniWstoTiJToa1bzCDSvnrdQzlrACnbCyJp8sY5MSoZCuVnwBNAPMFHyJoHEOM4pvwNo2YV56RV5CyOMQ/WWMmrUtGVh0tdEuQr8aHtPPLlLwCHPuIFagANcIgc5NG6UWA5b8qOQD0F+4eD6Me1z0efZxdLkmvV+pPc7QsoRNja24j4/enuyvRsy/K8fJcPhSBYRpVAHaeMhXIQxu9SdjEQr0XCoH3CLU1jzIAguQOkTbVxlyUywGt3p/wWprcoXVL7uF245pBBfRssOFWFDgDqJhHtFvLt+P4beTN7/e/VR6X7ssTefz+cBOxREW0hs7MHaW2qmgL9n9PIBJiRah4gvKkheFJEyuYN0qcDUKOZUCvAkJdmEDdSHm93Kf4+guyVdNa6zc4fwIri/PQRaovZwupJ7tQoc5U94o8sFz0/gsV1zfs3X3/0tZI3BNVXG7oto2QJsw57lvNtLYQ7rXr55V7R+TyQVEFyBMgTA1FnwpXQ9ESVRSy6qpWHYyHCas4o/x7u1w2JJP6vh3ZKIBH2vFdaDW03SkhmrN25CY1M5zLX5UZ4yVM/kUd3td6kj8LmbUa3DPKtStt6C4uHfwwJUsgDe+JNQoH9JkCIIr98KqdBDZQWT/e5G93uX7e2NzWRSoAz1XepMu/OVxpcwci4OuDro66Gqfrt489wI50kBVtsRDtNZYMEI01mIB81Kq4J7epnvsTniHF8WD1g5a26e19qnvuKGLOzWqDPrS0Ka9Ni7wivuSZSwN444lzKF9QEvb/SVrrGIZW0YRtVmaLkvjfJsta2N9mz5Qhx64lTxXkZc0HMXWk0gZwVUZsXabSQOb+/QxrxpdcDiFy7OrCXzgHud8EYpLkNuuT4enw2e9kukej6OLc4gZRipuLA+9W9L5s26j8fc4bts7KqRorPSLK5oWy5Mjt2hHDZV8RZEOL3in+2jEku7ifU+cP/+ZhN7T0na5PmM5e+RVHYUZz0h6zgVCTk1A6uiyGzM1EK2LSQ4Hx7vUvDgPChOmqhodllk9g7n0JfCNGgjVOE+5J0xJgXR0sA6oN/sYR+DviAjHA+pfJFm/us6kL5t8IEyVijht9Zsrk6cVlzrtIFw6Hn26/vxudPTxfHz2+ers6HgwHPhHH+pEBK+43oxjWwpbia7a4fHRp7XiUlMFQ3DLTiU3rFPJXdIx/YYtlzl3eG1V29Ljbw3aBctu7tbCoLs2YSXyAm2Q1T2d5LBx/Ac7mhAumauG8HfOidqknzESAmv/ou3dhsIvvlxNiEbdQVxlCppj+ZwO6ficZeyW3TJGp3nkITA0PF8yxfWs4TOyj37p8y+f9OiH +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Create group

+ + + +Create group + +## Request + +

Body

+ +The group was created successfully. + +
Schema
+ +The group could not be created. +More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-mapping-rule.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-mapping-rule.api.mdx new file mode 100644 index 00000000000..f98a7f937e4 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-mapping-rule.api.mdx @@ -0,0 +1,57 @@ +--- +id: create-mapping-rule +title: "Create mapping rule" +description: "Create a new mapping rule" +sidebar_label: "Create mapping rule" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/SuYPbVTWpQTJ3V5UxWndRs7HllOD7YOILgSEYMAA4CWNRr+984SlERZsuOZ5tCDNKMRPxb79d4DqV2C5zMHyS1c8LKUesZspRAmEZgSLffS6PMMEhAWucfWZkQmEWTohJUl2UACw8aCcaZxzoqOszsNEVj8VqHzv5tsAckShNEetadDXpZKiiZS/NWRqyU4kWPB6cgvSoQETPoVhYcISkt5eYmucaO4LC55gR1T563Us530xjkyzQtkZsp8jqxZyryhVHtQR8HXF66qVzp7INNnvNWhYGkxo9Zu0twKM4nAS68oTqexoY+j0C+o6+DMlUa7UPSb/jH97GbUbTqbc8cCaBlzlRDo3LRSatGjHH5M99t4f+Nifz73uFj3p82kmyJl0gaQ2uMMLUQwNbbgPlx6f7LG5f+J8QvoBbxa+E76/VcgJkylMqaNZ+m6YS+gVVqTKix+2UVtO86AXQVLlqHnUrGAJuOOBcMUMyY1ux19HLLfTt79Ovkp9750SRzP5/OenYojzKQ3tmfsLLZTQV+y+7nHxjlaqmJBKfMskxSTK7bhCXMlCjmVghpHXWzTZtT8XrM1vEyycHcXrDVRKit3sBuwm9E5kxlqL6cL6vBO6GbNlFeKfPDUVD5JFdf3sMH1ewwZMFcVBbdrlm8HqCNwnvuqU8YzRH/7Zi/7/hyPr1hwwYTJkE2NZT6XbhWIiiiklkVVQHLS70dQ8Mdw9r7fr8knIf6KSjTDx1Jx3VDraTlSs8JYbPnTFCa181yLH4WMsXImn8bd1lhL4g+hopWu3u7XVfu0Ic6J1WNpZ3fMUEvMenf6YlOcY9w2CTzILMjCN+6CnFlqskUg7UGUB1EeRLlflCf/XZQHhR0UdlDYMwp7t+91cqAZddkSD9FaY5kRorIWMzbPpWrc05+AVexWjYen2UFrB609p7U6ggJ9bmgCUhrXUIf7HBKI2yfXET25HETg0D6gpUnKEiqrIIFlkEudxPEyN87XybI01tfxA2HxwK3kqQoMpNtBViu6KCO4ykPIXdjoRveP7pAXlc44O2Wjs+sx+4N7nPNF00YKue36tH/a3+uVTJ/xOLg6Z6HCQLrORrByS4re6zYYv8ZxXU+okaKy0i+uaVloT4rcoh1U1Pk1Gdp4jXc6D0YQtQcfVxT5659xgzJtYqPNFOrskRdlkGBnwrChWHdUsLrasHJqmiRazuyWQ9iidaH+fu94l59X543MhCmKSjd7rZ6xufQ54532CFU5T22JQEmBNE1IlqBDmiuzT+EO+xIisuMeQRv4t9piZ9LnVdoTpohFWLb+TZVJ44JLHbchXDwcXNxcfhgcfTofnl1enx0d9/o9/+ibFpIECq67eYQXue5r3NN6O8O+78wIW2A9Pvq4VFxqanhTy7KV3S1sy24StdK5heUy5Q5vrKpruvytQruA5HayURqd1RHkyDO0jU7vaXIFw5De0ZjCk3nAfGcyVkerFQMhsPQv2k46O8fV5+sx8bKdfRYmozWWz2kuyueQwB3cAdDIlTw0lG+uL0FxPav4jOyDX/r8Cym2fIY= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Create mapping rule

+ + + +Create a new mapping rule + +## Request + +

Body

+ +The mapping rule was created successfully. + +
Schema
+ +The mapping rule could not be created. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request to create a mapping rule was denied. +More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request to create a mapping rule was denied. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-process-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-process-instance.api.mdx index 905b3c36ffe..b63d15221f2 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/create-process-instance.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/create-process-instance.api.mdx @@ -5,7 +5,7 @@ description: "Creates and starts an instance of the specified process." sidebar_label: "Create process instance" hide_title: true hide_table_of_contents: true -api: eJztWEtz2zYQ/isYnJIpTcmOk7jsyZGdVs3LYyvJwfIBBFciYhJgANCyRsP/3lmApGiKcj1J21M94xEpALvf7n77gDbUsqWh0TW90IqDMURIY5nkQG8CmoDhWhRWKEkjOtHALBjCZEKMZdriY7ufqAWxKRBTABcLAQkpvMRwLmcpNG8kgYWQAkUSq0hpAD+4E+3Ot/I4kyTuygNhU9CkNEIuibCGlFJ8L4HcwnounzFDNNhSS0hIvCZnUGRqTTQYVWoO5nlAVHMW1by5+PCxxTQ9c0YxcgfaCCXDuZzLrwx1LJR2+7nKiwwc7NrQoucwEsNCaahRoB6G6svMzuUqBUnYigk72coRhoBkcQZJOJc0oBq+l2DsG5WsabRxr0JDQiOrSwgoV9KCtLjEiiITnKGU0TeDwdlQw1PIGT7ZdQE0oir+BtzSgCoJnxY0ut5dscJm0Ea2ZsC0tueyhrN+B2taBT96eprQ6uZpm2kVULhn6CCDhrxZD7HmFpx7TJnnTK9bkYQNRKQVcNaefwfrkAb0jmUloJyhDTQ6PHpx/BJ3aYERQjhVVQV7IE3PfgbRNHHsa7j3KLhpQiOK4A5evnp9glv9KRod7qCtHGINplDSeI8ejcf48TCvZ0NkXjFTZ2WCiH6YfIVWBWgrvP5hZw8BuoV1P9E6Dl+lgqcOY2kg6VWQvi0+u2pgQlpYgqYBXSidM+u/enWM3Bv09hC4fvH4OaDhXDZQu0iN1UIuB3F9aYI+BK5mxH/ovRdHHZRNTu+NbF21RQLSYmHXDdKabjsIfkOEMXiwqxQ03IF2xdVVjbmUAIkZyjYk0TMIlyGZ4Hs2XHaeP5kgFiSTdh8r/GqHD/sMCoeC3E1eypLEhYoh5DZ/fB94qPg0y5yu9ji5E0bEGTZSt6CVssRwVXTV1umJ9eFvCrMvHtTXkuNHysedSCAhCbMMG5tUltyxTCQh2vZy6Nwpzg4WtGQZAa2VJorzUmsXZJG1TGw6dh3uR6pRoVWcQf7LblXqaSYXfidJwDKREe8OwjBLcGMMCbrv+vLthPx6/PL1zbPU2sJEo9FqtQr1gh9AIqzSodLLkV5w/Md9z0MyQ36SnK2Rsdswkm0dbCYajrSuM86Bwdg8oOKeIupXNz0KdfhaakF3eEI+X06bpFs3Pn2g2p1ZsDJDGSxWpY3ijMlbumXJrtK+lroJdqpPR0EVUGOZLTtm7C8pfdnIsz9mswviRRCuEqjHM2EaRWhELqTIy5xGx+NxQHN2799ejccVysSIP8ESSeC+yJhkvZnPmSMkyXHW89KcYe3c/M9ERmmxFH294YOMrUl85i2q230ONlU4JBTKOOowm9KIjupkOmhgGhpQAxqbhRsNS53RiG58ylTRaLRJlbFVtCmUttXo7oj2ShQu+9RqKJMpzrLUq90NHS5Ilrf3hAnLS5kwckIuz69m5HdmYcXWzpWo8qHok/HJeFAqbt0j8fRiSryFnnidYtCIxaweFOs3P0VwhcOtAV5qYddXeMy7JwamQZ+W6P2WELU+Jx3f/SYa1A9vG5r8+XXmIo2F7HJ7Kzj3s/H+QWrcmwG7HWvLRqwljtWXsAANjrNjl5naYuHXJcdVzwvIIIeHEtDi3l2GRguWGQjoAixPv2wxXDeHbtr7zUzkoEpLo7HLmoVyDmq60I6ruzMuHYeHu/lzMXVlgKs8L6XrBXJJVsKmhHVCx7PSWAxZQDPBAZtatKHIyI7a936F1AMWOQyRdj43mhawFDYt45CrfMT9sfYzzlQ8ypmQo1qFGU1OP3z+eHZ68H46Of94dX5wGI5De+9bL6ZozmQXh5/D+gND3+bNtgP+fyv/l2/ldepauLejImNCYjFxjNjUxfWa7hbXm6AukNd0s4mZgc86qyr8+nsJeEe8vtnmKr5VAU2BJaBd1rk7Lp34IB/MEEJ7K9y9gOHl3J845RwK++jem06PuPh0NcPqU//ikKsEz2i2wl8j2IpGdE7n1JWMuiJs/PcbmjG5LNkS93u5+PcXnXJx1A== +api: eJztWEtz2zYQ/isYnJIpTcmOnbjsSbGdVs3LYyvJwfIBBFciYhJgANCyRsP/3lmApGiKSj1J21M94xEpLHY/7H77gDbUsqWh0Q291IqDMURIY5nkQG8DqgrQzAolpwmNKNfALNRy00YsoAkYrkWBcjSiZ07KECYTYizTFh9brUQtiE2BmAK4WAhISOH1hXM5S6F5IwkshBSoklhFSgP44QG4/a0+ziSJu/pA2BQ0KY2QSyKsIaUU30ogd7Cey2fMEA221BISEq/JORSZWhMNRpWag3keENXsRTOvL99/aDFNz92hGLkHbYSS4VzO5ReGNhZKO3mu8iIDB7s+aNFzK4lhoTTUKNAOQ/NlZudylYIkbMWEPdvqEYaAZHEGSTiXNKAavpVg7GuVrGm0ca9CQ0Ijq0sIKFfSgrS4xIoiE9zFb/TVYHA21PAUcoZPdl0AjaiKvwK3NKBKwscFjW52V6ywGbSR7cX/qoazfgtrWgU/unua0Or2acK0Cig8MHSQwYO8Xg+x5g6ce0yZ50yvW5WEDUSkVXDe7n8L65AG9J5lJaCeIQEaHR69OD5BKS0wQginqqpgD6Tp+c8gmiaOfQ33vgvOpSuCOzh5+eoURf0uGh3uoK0cYg2mUNJ4jx6Nx/jxOK9nQ2ReMVNnZYKIfph8hcZSY4W3P+zsIUB3sO4nWsfhq1Tw1GEsDSS9CtI/i8+uGpiQFpagaUAXSufM+q9eHiP3Br09BK5fPH4OaDiXDdQuUmO1kMtBXJ+boA+BqxnxH3rvxVEHZZPTeyNbV22RgLRY2HWDtKbbDoLfEGEMHuwqBQ33oF1xdVVjLiVAYoayDUn0DMJlSM7wPRsuO8+fTBALkkm7jxV+tcOHfQcKh4LcTV7KksSFiiHkNn98H3hseJJlzla7ndwLI+IMG6lb0EpZYrgqumbr9MT68DeF2RcP6mvJ8XfKx71IICEJswwbm1SW3LNMJCGe7WRo3wRnBwtasoyA1koTxXmptQuyyFomNh27DrcP1p56VGgVZ5D/sluXerbJpZckCVgmMuIdQhjmCQrGkKADb67enJFfj09e3T5LrS1MNBqtVqtQL/gBJMIqHSq9HOkFx3+Uex6SGTKU5GyNnN0GkmwrYTPTcCR2nXMODEbnERn3lFG/uumRqMPYUoud8W1CPl1Nm7RbN159ZNrtWbAyQx0sVqWN4ozJO7rlya7RvpW6DXbqT8dAFVBjmS07x9hfVPq6kWl/zGaXxKsgXCVQD2jCNIbwELmQIi9zGh2PxwHN2YN/ezkeV6gTI/6Ek0gCD0XGJOtNfe44QpIcpz2vzR2sna//mcgoLZaibzd8lLM1ic/9ieqGn4NNFY4JhTKOOsymNKKjOp0OGpiGBtSAxnbhhsNSZzSiG58yVTQabVJlbBVtCqVtNbo/or0ihcs+tRrKZIqzLPVmd0OHC5Ll7U3hjOWlTBg5JVcX1zPyO7OwYmvnSjT5WPXp+HQ8qBVF92icXE6JP6EnXqcYNGoxqwfVeuGnKK5wvDXASy3s+hq3effEwDToSYnebwlR23Pa8d0L0aB+eNPQ5M8vMxdpLGRX23vBhZ+O949S494U2O1ZWza2t78rWIAGx9mxy0xtsfTrkuOq5wVkkMNjDXji3m2GRguWGQjoAixPP28x3DSbbtsbzkzkoEpLo7HLmoVyDmr60I6ru1MuHYeHu/lzOXVlgKs8L6XrBXJJVsKmhHVCx7PSWAxZQDPBAdtatKHIyI7Zd36F1CMWOQyRdj43mhawFDYt45CrfMT9tvYzzlQ8ypmQo9qEGZ1N3n/6cD45eDc9u/hwfXFwGI5D++CbL6ZozmQXh5/E+iND/8ybbQf8/17+L9/L69S18GBHRcaExGLiGLGpi+sN3S2ut0FdIG/oZhMzA590VlX49bcS8JZ4c7vNVXyrApoCS0C7rHO3XHrmg3wwQwjtvXD3CobXc79jwjkU9ruyt50ecfnxeobVp/7NIVcJ7tFshb9HsBWN6JzOqSsZdUXY+O83NGNyWbIlynu9+PcXAB+Akg== sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-role.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-role.api.mdx new file mode 100644 index 00000000000..62319b3c6e3 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-role.api.mdx @@ -0,0 +1,57 @@ +--- +id: create-role +title: "Create role" +description: "Create a new role." +sidebar_label: "Create role" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/Ss7OLVTWpQTJ3V5Ux2ndZsPjy23B9uHJbgSEYMAA4CWNRr+984SpCVHlptDjvKMR6Sw2Lcf70HkrkTAuRfZtbiwmsRtImxNDoOy5qwQmZCOMFC3loiCvHSq5kWRiZNuCRAMLcBZTaMbIxLh6GtDPvxui6XIVkJaE8gEvsS61kp2vtMvnn2shJclVchXYVmTyITNv5AMIhG140iCIs+rBivasPLBKTPfCmlaEhTK1xqXwDvAziCUtI5QtG0iggqanXBWMYmLGLNoW1535GtrfAR+NT7kj20c9gcL9BBLVIBvpCTvZ43Wy1iKH5Q7I/1Ny+fDuKPlkOUQSMw0GbwqE2hOTiRiZl2FIX719mhnLWL2fTGOxuMX8pe20QUYGyB/xB/dmI/WERQUUGkP6AhqZ+9VQQUo04U6lBhyW7xcrNrZXFP1y3bRnoY0gfNo2eNCLCagh2iYR/Tri/cn8NvRm19vfypDqH2WpovFYuRm8oAKFawbWTdP3UzyP9v9PIJpSY6gwiVniUWhGBM1rNsEviapZkpCsF2CfdjAPYj5vdzjuLrN78eWNU5t0X0CVxdnoAoyQc2Wysy3obs9M2w0+8DcNiHLNZo7se79/4lqAr6pKnSPPHsK0CbCBwzNRho7KPf61bOC/XM6PYfoAqQtCGbWQSiVH4A4iUoZVTWVyI7G40RU+BDv3o7HLfvkjn9HJgboodZoOmp9m44yUK152yWmjA9o5I/qjHVqrr7FfXom9SR+FzMaJLjrCIrHFmiUdx7uUasCsAklo0b5sCa7IFD70V5ke5HtRbZTZK+3+f7eulwVBZmOno96U777yUOt7YKKva72utrrapeu3jz3/DgxwFV2zENyzjqwUjbOUQGLUunOPT9LD9i98PYPinut7bW2S2ttIioKpeXX9tr6jjoYSpGJlN/VvEiEJ3dPjl/3V6JxWmRiFWXSZmm6Kq0PbbaqrQttes89uEenMNeRebwc5TTQRFuJuoxQ2+3ihc2X8BOsGlMgHMPF6eUU/sBAC1x25WPIp66Px8fjZ72y6Q6Pk/MziBlGsm0cAINbVvKzbqPx9zhu21supGycCstL3hbLkxM6cpOGK/5Igh6v88730Ugk/cX7gRp//TvtusuH18V6gHL6gFUdpRcHIAOrOsrNbIfUE2I7Zm4gOR+THI8Ot8l3ftZpSNqqakx3kJo5LFQoATdqIHXjA+eeCK0k8WxgHdBg9iGuwD8REQ5H3L9IsuH8nKtQNvlI2iqVcdvjZ65tnlaoTNpD+PRk8vHq07vJwYezk9NPl6cHh6PxKDyErk7M7wrNZhxxGuWemVRtDKF2Da36dgV6CGmtURmucBf8qhfRtYgiuk16IVyL1SpHT1dOty1//bUhtxTZ9e1aN3zXJqIkLMh1qrvjOY44ifEcTBmWzXXD8FtDojYZdkykpDq8aHu7of/zz5dTZlk/hKtswXscLnhAhwuRiRtxIwRP+9hDHDPx9yuh0cwbnLN99Mt//wEycfDn +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Create role

+ + + +Create a new role. + +## Request + +

Body

+ +The role was created successfully. + +
Schema
+ +The role could not be created. +More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-tenant.api.mdx new file mode 100644 index 00000000000..20d3f77beb9 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/create-tenant.api.mdx @@ -0,0 +1,56 @@ +--- +id: create-tenant +title: "Create tenant" +description: "Creates a new tenant." +sidebar_label: "Create tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/Ss7OLVTWpQTJ015Ux2ndZumHltpD44PILgSkYAAgw/LGg3/e2cBUpJDOc0hR3nGI5JY7Mfb9yBxN8zzpWPFLZuj5tqzu4yZFi330ujLihVMWOQe+9WMVeiElS0ts4Kdx0UHHDSuwEejCcuYxc8Bnf/VVGtWbJgw2qP2dMnbVkkR3ecfHTnZMCdqbDhd+XWLrGCm/IiCorWWkvESXVyN/imrraXzVurlKK95jRC0/BwQ8MGj1Vz12cHla9ZlTPMGv80NWYJZgK9xW2DXpRKlxYqw2yZ2lzEvvSKXCbEE0HWCg3WPN3obMD5wrdEu1fhsekof4zz69FfcQWpJBS4Igc4tglJrQv27wvwnrg8nsgX0E64HYIaM/ECTPoLUHpdoWcYWxjbcp0cvzyKCh5FKWBBUXcbOptPDSbTW3MsKK6i45yAdaOPhnitZfQWI1ppSYfPTGJDHAWZwlSyhQs+lggQUcAfJsMQKpIbb6zfn8MvZi5/vfqi9b12R56vVamIX4gQr6Y2dGLvM7ULQP9n9OIF5jRah4WsoEXhVSYrJFexaAK5FIRdSgDcR3D5tIEwnH/QO3af6F1fH3N62IFg5ovoM3l9fgqxQe7lYS70ch457Fjwo8sFLE3xRKq4/sV0v/09QM3ChabjdEudxgC5jznMf9sp4gkLPnx0U6+/z+RUkFyBMhbAwFnwt3RCIimiklk1oWHE2nWas4Q/p7uV02pFP6vg3VKIBH1rFdaTWl+VIDY2x2PMnFia181yL79UZY+VSfhl38khXPYlfp4oGQT0f8/2NsaWsKtSRntAf3oOquFJmhUddHXV11NVXdHU25vs742Fhgq4GXTkTrMD4Ha63a0ddHXV11NUTunpx6AfgTAOhHH+EorXGghEiWIsVrGqponv6YTzE7r/QEhePWjtq7ai1sda6jDXoa0Pv/K1xkTrc16xgeXqrcyxjDu09WpoXbFiwihVsk4TSFXm+qY3zXbFpjfVdfk9duOdW8lIl7tFyEtRAFGUEV3UKNm4YLey/fZ/zJuiKwyu4vriZw2/c44qvI4AU8rHrV9NX04NeyfQJj7OrS0gVJrrtHQGDW9LyQbfJ+Fscd90dASmClX59Q9sSPCVyi3YWCPMtDfp40TvdJyOW9RdvBnL88e889peOr+vd8OXigTdtL77t8GTHrTQFGe4jERcmGvc0GddBTUXrUuHTyemYkleXUVnCNE3Q8XjVS1hJXwPfw0Wo4DzhkTElBdIrf7EZEhrM3qYV+CdFhNMJ9TQRbzhVl9LXoZwI0+Qibdt+lsqUecOlzvsQLj+f/fX+3evZydvL84t3Nxcnp5PpxD/4iB2xvuF6P484k9hNNR5VujfWenIO1nfR44PPW8WlJpBj/pteXcPwyNHsKCnklm02JXf43qquo8efA9o1K27vdoKiuy5jNfIKbZTjJ5rXsPOU0smcApO5CpTAaBzUZcOOmRDY+q/a3u0dDVd/38yJfv1krzEV7bF8RVM/vmIF+8A+MEZTRPIQmR2fb5jiehn4kuyTX/r7D0boEAc= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Create tenant

+ + + +Creates a new tenant. + +## Request + +

Body

required
+ +The tenant was created successfully. + +
Schema
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found. The resource was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/create-user.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/create-user.api.mdx index 29d10a7197a..c76c57f4bce 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/create-user.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/create-user.api.mdx @@ -1,11 +1,11 @@ --- id: create-user -title: "Create a user" -description: "Create a user" -sidebar_label: "Create a user" +title: "Create user" +description: "Create a new user." +sidebar_label: "Create user" hide_title: true hide_table_of_contents: true -api: eJztWE1z2zYQ/SuYPbVTWpQdJ3V5UxWndZukHlluD7IPILgSkYAAgw/Liob/vbME9WFJ9nQ6PsozHpHYxS7e7nuQZpfg+cxBNoFbhxbuEzA1Wu6l0VcFZCAsco+tLQGL3wI6/6spFpAtQRjtUXt65HWtpGi3pV+c0bTmRIkVpye/qBEyMPkXFB4SqC0l8RIdWWvu3NzYYsvTeSv1DJoEgkOreYUHjc8asOJSHbA0CXjpFS0RpFHEAw0ZCJy0WEDmbcB2wdVGu3jIs/4ZfRTohJU14YQMxiUyOiCbc8dipQrmghDo3DQotejdaUheq06U6U9cHD7GV1wwM2W+xPVBQmxaF1Rqj7N2YWpsxX1cene+V5Rhu33UgafaNAmc9/svwBcmqIJp41m+Tt+705+MRVag51I5xi2y2poHWWDBpG5Puqowy03xcq1qa3KF1U/7NXt6pAG7jp5dXhZrybhj0TGP2SejD0P2y/nbn+9/KL2vXZam8/m8Z6fiBAvpje0ZO0vtVNA/+f3YY+MSLbKKLwglLwpJOblimy4xV6OQUymYNy3A7tiMehDxvdziaN2l7VbLgpWQ7GG+HV0xWaD2crqQerafut0z5UFRDJ6b4LNccf0VNr3fT7qbxYWq4nZNs6cJmgSc5z5swXiGcm/O9mITkX4fj69ZDMGEKZBNjWW+lG6ViEBUUssqVJCd9/sJVPwxvr3r9xuK6Q+qfg+JZvhYK65bau3CkZpVG962wKR2nmvxWp0xVs7kbt7eEx12JH4fEa0keHpYgt2tTJyL2mN8cysFzYMvjZXfj5I8SvIoyVeW5Jv/IckCtTyK8SjGoxhfVYxvD/1EHWhGVbbEQ7TWWGaECNZiwealVG14+rW+yt0Jt3dU2lFpR6UdVFqzG3u4/Q1HpUFfGpod1Ma1xOK+hAxSMjtIwKF9oKdssoRgFWSwjCJqsjRdlsb5JlvWxvomfaAOPXArea4iL8kcxbYikTKCqzKm2m8mGWhMsAI55FXQBWcXbHR5M2a/cY9zvmiLSymfhr7oX/QPRiXXZyIOrq9YRBipuHU9rMKSzg+Gjc7/JXDT3FMhRbDSL25oWyxPjtyiHQSq+JoiXb42Or1HJ0i6hw8r4vzxz7jtPV1to82o5/KRV3UU5mZUs+HdZkKzWdt97+YxWxMaqaemPWLHs32w1Hm0Llan3zvd5/T1VStNYaoq6PZ+1jM2l75kfKt4QgXnqWgJKCmQZhrZcnXCldvHaGF/x4zstEeNj+xcXcsz6cuQ94SpUhG3rT9zZfK04lKnXQqXDgefbj+/H5x8vBpefr65PDnt9Xv+0bcFJmFUXG+fY0dDT5CuG+nx0ae14lK3ozGrYk+o2ROI8rpPOolMYLnMucNbq5qGlr8FtAvIJvcbRdFbk0CJvEDb6vErzZdgGL/6TsaUltxVoPR7w6smWe0YCIG1f9H3futmuP7rZkz86waJlSloj+VzGjLyOWRwB3cANIykCC212/UlKK5ngc/IP8alv38BGs8qbg== +api: eJztWUtT4zgQ/isqnXZrTRxmmFnGtywDu+zOg4KwewAObbkTa5AljyQnpFL+71st2SSQBOYwx1CVwla3+vl9stNZcg9Tx7Mbfu3Q8ruEmxoteGn0ecEzLiyCxyBLeIFOWFmTkGf8JIgYMI1z1ji0A55wi98bdP4PUyx4tuTCaI/a0yXUtZIiWE6/ObKw5E6UWAFd+UWNPOMm/4bC84TXluLwEh1Ja3BubmxB10+DGJfIeikzE+ZLfIyls+m8lXrK24STQEOF28300lfN7DbxQ9uxAqm27w+iVwy0CffSK1qitlzGgvOWBFR9abHgmbcNhgVXG+1iFd8MD3dnzubgWOx2wVwjBDo3aZRaDG41T35WI8nTP7jYHsY9Lvrc+0CaCLzOqNQep2FhYmwFPi69P9ooSoTmZZc81aZN+NFwuOn3WkOukHnTuVxV/lZ/NhZZgR6kcgwsstqamSywYFIHvb66LDfFy3WqrckVVr9t1utpOCN2ETU7vyzWkYFjUTGP3m8uz07Yh6N3v9/9UnpfuyxN5/P5wE7EARbSGzswdpraiaAP6f06YOMSLbIKFixHBkUhyScotuoQczUKOZGC6kEJdmEzqn/M7+X2RunyGWTX2tVYuXGOjNj15TmTBWovJwupp5uuw54JNIpsQG4an+UK9D1f9X3T6XMvrqkqsI8Qe+qgTbjz4Ju1NHbA7e2bDdsE3r/G4wsWTTBhCmQTY5kvpesdURKV1LJqKp4dDYcJr+Ah3r0fDluy6buD4ZVMNMOHWoEO0HqejtSsWuE2JCa186DFz+qMsXIqn/sdPOFgB+KPMaOefjtOn+6RwRSIe8dmoGTBoPEleY30IW6GIEC5wZ5ke5LtSbaTZG838X5mbC6LAnWA5yPfpGPaeAZKmTkWe17tebXn1W5efdiG9/jiLH0ZrE3lDPXqawQoi1AsGD5I593+9XDPsD3DdjPs3bZvZyPNqMqWcIjWGsuMEI21WLB5KVUwT19Ue9/do23PtT3X9lzbxbU24RX60tB4rzYuQAd8yTOe0rPL8YQ7tDO6ym6WvLGKZ3wZadJmabosjfNttqyN9W06ox7MwEqao4SWkTjSqYeJMgJUGV1ttosE64OzE6gaXQA7ZpenV2P2J3icwyKUj1w+NX08PB5utUqqOyyOLs5ZzDCCbe0A6M0Sk7eajco/Yrht76iQorHSL65oWyxPjmDRjhqq+CMIOn/BOt1HJZ50F2c9NP7+bxy6S4fX5WrUevoAVR2ptxqVrpC1mnuu1p7fd1PJtTml1BMTQuyQtJksdR6ti9UZDg43UXtxHsgnTFU1OpzAehpfmGCteEI1zsdhp5ICaWSXLfsIe7VPUcL+jR7Z4YAaH9HZH7xT6csmHwhTpSJue/yfK5OnFUiddi5cejL6fP3l4+jg0/nJ6Zer04PDwXDgH3woMBGjAr0eR5wPNltG4Wtz7u1T8a7LHh98WiuQOkyjrYoNIyTc8Mi9u6Tjzw1fLnNweG1V29Ly9wbtgmc3dyu60V2b8BKhQBvIek+zVX4SozkYk1tSVw253xjctkm/YyQE1v5F3bu1Y+Pi69WYwNlN+StT0B4Lc/oFAOY847f8lnP6MYEsBNyH9SVXoKcNTEk/2qW//wHn3XQR sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -25,32 +25,34 @@ import Markdown from "@theme/Markdown"; import OperationTabs from "@theme/OperationTabs"; import TabItem from "@theme/TabItem"; -

Create a user

+

Create user

-Create a user +Create a new user. ## Request -

Body

required
+

Body

required
The user was created successfully.
Schema
-The user could not be created. +Unable to create the user. More details are provided in the response body.
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
-The request to create a user was unauthorized. -More details are provided in the response body. +The request lacks valid authentication credentials.
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
-The request to create a user was denied. -More details are provided in the response body. +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +A user with the given username already exists.
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-document-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-document-alpha.api.mdx deleted file mode 100644 index 96313bd0955..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/delete-document-alpha.api.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: delete-document-alpha -title: "Delete document (alpha)" -description: "Delete a document from the Camunda 8 cluster." -sidebar_label: "Delete document (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztV01z2zYQ/SsYnJIpTSqp06a8aWynVSfNeBylPdg+QOBSRIIPBlhY1mj43zsLkpIsy6077amTg0YSsXjvLfaDiw1HsQy8vObnTkYDFgO/zXgFQXrVonKWl/wcNCAwwarBhtXeGYYNsDNhoq0Ee8ukjgHB5zf2xpZlaR3CjZ03KjCwVeuURaYCE5YJ3TaC1SAwesjZDJmM3oNFvWbO6jULsW2dx2Ss7IkB4/x6xx3Qechu7KpRsiFM65AZECTLedZ6V0VJylkMkCcxN5ZnvBVeGEDw5O6GW2GAl3yEnVU844q8bQU2POMevkbloeIl+giHRzJvgM3OmavTKWy1oWNVOqycZzzIBozg5YbjuiWugF7ZJe+6bEufnNlxf43g1w/Ia6HDc9kT2E7Dw0UK2V+ruiXe0DobIND668mEvh5TbzFXIgxcFQtRSgihjlqvc95l/HRy+nfbFTZJ5FLdgSWPCJDCWbtoqzyFTTqLYJGgRNtqJQVBFa13Cw3mu8+BcDd7bj0knLLL3pJVgEJp5hafQSJLyslwARVTll1fvTtjP52++fH2RYPYhrIoVqtV7mt5ApVC53Pnl4WvJX3I7mXO5g14YEas2QKYqCpFnEJTCrbgUUFgoQWpaiUpKuTpIJvR2ff+DVHoZVGabjfvYnQYq4zXzhuBvOTRK36YHlP26WrGVAUWVb1WdvmYOu2pRdSEIRYuYrnQwn6hwKFCfZT0kCVEY4Rfj3n4kKDLeECBcc8NrizCEvy+fmXx+9ePsClNfpnPL1kPwaSrIFU3UkMZiMgJo6wy0fDydDLJuBH3/b8fJpOOMCniz/DEMrhvtbAptQ7dUZYZqqseLTmmbEBh5X8VGefVUh3y5tQnxljwIYnPe4+6jtbeHKvPKfVMBE95CN47z5xM/bViq0bpBE91OnJTp4GA32rtW619q7Wnaq3LuAFsXEUDQ3rfpXkCG17yYnydhWKzGyY6etOCvxunjeg1L/mmr5yuLIpN4wJ25YYmna64o7DcCa/EQvfJSMt9hY2Zo50UOj0+FkFaoJli9Gw3mF1dfJyznwXCSvQvZqJ8CP128nZyFJVMn0CcXs5Y72Gff3s9YYSl4j4K2xs/BzhNJQFk9ArXH2lbfzwLEB78NFIItnkx8CV0+t8b8Wz48W7Mll//mKeAK1u7tH0I/GMhFBXwoVc+yV89TrLLWaoV6YyJNjVMu+wHG3FkOOYZ10qCDSmdhxlwNHvfr7Dfe0b2Kqeg9Jkz9smlwiYuculMIftt2++FdovCCGWLgSIUZ9PfPn04n568n51dfPh4cfIqn+R4j8n51gU0wu7pGMb87Xj2Ik3qLw993uxeE/+Tm8GQPgj3WLRaKEsJnc59M1T59faeEHjGy71Lw202lOo132wWIsAnr7uOHvezPBV/pQLV9RPT/P55/pvB/qgXX2D94JZxJ3QkK051NTacfyjyxdVwO3nJnnMNOqpreCjsel/UqHfvfLvbLuMNiAp8ktkbTKWEFve2PjmukJ/b3n1+8f5ifsG77k8MLyok -sidebar_class_name: "delete api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Delete document (alpha)

- - - -Delete a document from the Camunda 8 cluster. - -:::note -This endpoint is an alpha feature. It currently only supports an in-memory document store, -which is not meant for production use. -::: - -## Request - -

Path Parameters

Query Parameters

- -The document was deleted successfully. - -
- -The document with the given ID was not found. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-document.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-document.api.mdx new file mode 100644 index 00000000000..6d0c334ab44 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-document.api.mdx @@ -0,0 +1,58 @@ +--- +id: delete-document +title: "Delete document (alpha)" +description: "Delete a document from the Camunda 8 cluster." +sidebar_label: "Delete document (alpha)" +hide_title: true +hide_table_of_contents: true +api: eJztV0tz4zYM/iscnpKpInm32XarmyfJtu5sM5mstz0kOdAUZHGXDy0JJvF49N87oCTbSZw2fRxz8EMiiA/gB4DAmqNYBl5e8VMnowGL/CbjrgUvUDk7q3jJK9CAsFnPeAVBetWSAC/5aVpmglWDBKu9MwwbYCfCRFsJ9p5JHQOCz6/ttT13CAwbgQwbFZiM3oNFvWLO6hULsW2dx8CEZcoeGTDOr7a6AzoPGbtrlGyYCsw6ZAYEoTrPWu+qKMkwFgMktLIsrUO4tnMCA1u1TlmkrcKyK6HbRrAaBEYPNweFdKZ1FiyGAoTXqyMhJYRQJLn++2iQDrmpDpmwFTNixRbAQlx8AYkMHZONsEu4tsqyOpIw86BBBAh5suja8oy3wgsDCJ7Of82tMECnPXg6q3jGFR1wK7DhGffwLSoPFS/RR3jMwrwBNjtlrk4HvzkudKynL+cZD7IBI3i55rhqCSugV3bJuy7bwKfz3WJ/i+BXD8BrocNL0ZOyrQ0PFylK/tqqG8INrbMBAq2/nUzo5yn0RuedCANWxUJM1NVR61XOu4wfT47/brvCJhm5VLdgySNSSCFWu2irPNEmnUXKg3LNRdtqJVOmFK13Cw3muy+B9K533HoIOGUXvSSrAIXSzPVRkywnwQVUTFl2dfnhhP10/O7Hm4MGsQ1lUdzd3eW+lkdQKXQ+d35Z+FrSh+QOczZvwMMYjqKqFGEKTWnRgkcFgYUWpKqVJFbI08FsRmff+zew0JtFYbrZvOXoMVcZr503AnnJo1dPSsSUfb6cMVWBRVWvlF0+hU57ahE16RALF7FcaGG/EnGoUO8FfYwSojHCr8Y4fAjQZTygwLjjBlcWYQl+135l8fu3T3RTmPwyn1+wXgWTroJUcVIJG4DICaOsMtHw8ngyybgR9/3TD5NJRzqJ8Rd4Yhnct1rYFFqP3VGWGcqrXltyTNmAwsr/ixnn1VI9xs2pToxc8CGIT3uPuo7W3u3LzymVcQRPcQjeO8+cTCW/oiKuk3rK0xGbKg0EfM2111x7zbXncq3LuAFs3LY9S/0ENrzkxXidhWK9bSY6umnB347dRvSal3zdZ05XFsW6cQG7ck3NV1fcEi23wiux0H0w0nKfYWPkaCeFTq/3MUgL1FOMnm17wcuzT3P2s0C4E/3FTJAPVb+fvJ/s1Uqiz2icXsxY72Effzs1YVRLyb1XbS/8EsWpKwkgo1e4+kTb+uNZgPDgp5Eo2MTFgJe003MvxLPhz4cxWn79Y54IV7Z2aftA/FNDiBXwobd8kr95GmQXs5Qr0hkTbSqYdtk3NmJPP84zrpUEG1I4Dz3gKPaxX2G/94jsTU6k9JEz1smlwiYuculMIfttm9+FdovCCGWLASIUJ9PfPp+fTo8+zk7Ozj+dHb3JJzneY3K+dQGNsDt2DJPFpj07SP334WOf19tr4nUY+bfDyBCxCPdYtFooSzmUqF4PheVqM5oEnvFyZ065yYbqcMXX64UI8NnrrqPX/fhA9aZSgUrJMwPELoX/ZZbY68VXWD0YbG6FjiTFKZXHGvcPjTy4HAaiQ/aSyWuvXcNLYVe7Ro327pxvd9NlvAFRgU9m9gJTKaHFna3Pdkjk5+a6OD37eDY/4133J+uLijc= +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Delete document (alpha)

+ + + +Delete a document from the Camunda 8 cluster. + +Note that this currently only supports an in-memory document store, which is not meant for production use. + +:::note +This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change +in future releases. +::: + +## Request + +

Path Parameters

Query Parameters

+ +The document was deleted successfully. + +
+ +The document with the given ID was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-group.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-group.api.mdx new file mode 100644 index 00000000000..ba907e25239 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-group.api.mdx @@ -0,0 +1,52 @@ +--- +id: delete-group +title: "Delete group" +description: "Deletes the group with the given key." +sidebar_label: "Delete group" +hide_title: true +hide_table_of_contents: true +api: eJztV01z2zYQ/SuYPSVTmpRTJ3V509hK6tbNeGy5Pbg6gOBKRAwCDABa1nD43zsLULIsyY0PPfqgofCx+3ax7+GjA88XDvI7+GJN28AsAdOg5V4afVFCDiUq9BgHEyjRCSsbGoUczsOYY75CtqAZbCl9FZvyATW7x1UKCTTc8ho9WgLqQPMaIYdg8QeuIAFJ3hruK0jA4vdWWiwh97bFXchpheSVmfkWqjcshklgTlRYc8g78KuGcKT2uEALCcyNrbmPXZ9OoO9nBOcaox06svgwOqHPPuSQHXcDUMlcKwQ6N2+VWqXQJ3AyOj5sSwmh80xxce/YA1eyZLz1FWovRVhoJiyW1OTKUQrCaI/akzveNGqYlTXWFArrn7458t1tZfocdMyu4kxWoudSMVN8Q+FZiJ4mFlgyqdnd9ecz9uvJx19m7yrvG5dn2XK5TO1cHGEpvbGpsYvMzgX9aN77lE0rtMhqvmIFMl6WkjC5Yo0l2niJjrkGhZxLQWWhGg1hMypH+o+GZF2YGBbxY2P8VLZN+Zy3Ui+2q9dauUfFMbu9vmAyrOJ8JfViHzrYzHmryAcvTOvzQnF9T8Xz0quDoLsorq1rbjf8ew7QJ+A89637Ift+/rDnm6jy23R6xaILJkyJbG4s85V0ayBKopZa1m0N+clolEDNH2Pr02jUk0+q+Csy0QwfG8V1JOBOOlKz2lgc+BMSk9p5rsX/VRlj5ULu4qbQP9UCBhKfx4z6vg8i+2+BbrafYW8JktXGs7lpdfmmrTdtvWnrJW19HI0O8F0zWmVLPERrjWVGiNZaLNmykiq4p3NwjT0cdpGLb1p709qb1va11idQo6/M0/06XJJ9BTlk4ehyWbc+wnq606J9WN+eW6sghy6Kps+zrKuM833eNcb6PnugijxwK3mhIg9pOIprTRplBFeh+1DxaICu6Oukznjd6pKzU3Y9uZmyL9zjksc7L0E+d306Oh0d9EpTX/A4vrpgMcNIva3tYO2WdH3QbZz8Gsfhtu9QtFb61Q2ZxeUpkFu045ZWf0OJAS94p3acBMnw5/OaKL//PQ21lnpugvlQ8/1AqCpoXYx8lB7v8+vqIshEmLpuddgr9SLeaPhWYkK1zlNCCSgpULvA5OFJtZ52GUfYXxGRHadUlMic9Ra5kL5qi1SYOhPRbPMtlCmymkudDRAuOxv/efv1fHx0eXE2+XozOTpOR6l/9CH5xjhfc70VR3wYxlvYbqLd07Hw6gfkUBWPjz5rFJeaeBLS6Qbd3MXnpIME8s3DcpYM5L+Driu4w1ur+p66v7doV5DfzZ60EsRVSkf/S8jnXLnd1+d26O+uh3fqe/bjN+nB+IdOrldBsaqlFiRwj6vt13E/6xOokJdoQ4hxeCwENn7L8MUDlmi/2W3OJ5eT6QT6/l+/9H04 +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Delete group

+ + + +Deletes the group with the given key. + +## Request + +

Path Parameters

+ +The group was deleted successfully. + +
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The group with the groupKey was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-mapping-rule.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-mapping-rule.api.mdx new file mode 100644 index 00000000000..37eb4e5c5fc --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-mapping-rule.api.mdx @@ -0,0 +1,55 @@ +--- +id: delete-mapping-rule +title: "Delete a mapping rule" +description: "Deletes the mapping rule with the given key." +sidebar_label: "Delete a mapping rule" +hide_title: true +hide_table_of_contents: true +api: eJztV01v4zYQ/SvEnHZRRXK22W2qm5F427TZRZA47SH1gaJGFjcUqSWpOIag/74YUf520BToMQdDJjWcx9F7jx8teD53kD7AF17XUs+ZbRTCLAJTo+VeGn2VQwo5KvQ4xNxSSAQ5OmFlTTGQwmUf4ZgvkVVbudhC+rLvncsn1OwRlzFEUHPLK/RoCbwFzSuEFIaBf+ISIpCUtua+hAgsfm+kxRxSbxvcx56WSHmZKQ7hvWFh8oTqRIkVh7QFv6wJUGqPc7QQQWFsxX3o+nQGXTcjVFcb7dDRiA+jM3ocIu9Wy92AlzPXCIHOFY1Syxi6CM5Gp8dTUHnoPFNcPDr2xJXMGW98idpL0bPAhMWcmlw5qkQY7VF7SsfrWg1RSW1NprD66Zuj3O1WwbugY3YTIlmOnkvFTPYNhWf97Ckww5xJzR5uP1+wX88+/jJ7V3pfuzRJFotFbAtxgrn0xsbGzhNbCPpR3PuYTUu09FWWLEPG81wSJlestqQpL9ExV6OQhRTEDjE2TJsRK/E/GqIVP2FapJf14A17axadt1LPt0lsrDxQ6Jjd314x2X/FYkmMHUD3YwreKMrBM9P4NFNcPxJ5Xnp1FHQfxTVVxe1ajbsAXQTOc9+4fxXhzx8OcpNUfp9Ob1hIwYTJkRXGMl9KtwKiIiqpZdVUkJ6NRhFU/Dm0Po1GHeUkxl9RiWb4XCuugwD3ypGaVcbioJ++MKmd51r8X8wYK+dyHzeGbsMFDCK+DBV1Xdeb7FU+Xa1KmyWn9642nhWm0fmbyd5M9mayl0z2cTQ6onfN6Ctb0iFaaywzQjTWYs4WpVR9etoQV9jDrhe0+Oa1N6+9ee3Qa10EFfrSbE7h/enZl5BCMmxeJ7SluaTd7GUdHXbRPq3O141VkEIb3NOlSdKWxvkubWtjfZc8ETVP3EqeqSBIeh1ctlKPMoKrvvsYi/SCDvGr6i541eics3N2O7mbst+4xwUPp2CC3E19PjofHc1KoS9kHN9csVBh0ODWurBKSwY/mjYEvyZxfw1wKBor/fKOhoXPkyG3aMcN0bDWxoDXZ6d2CIJo+PN5pZg//p72pEtdmH74QP7hRIgVtC7MfBSfHgrt5qr3izBV1eh+0dTzcLjhW4UJ1ThPBUWgpEDtekkPl65V2HV4w/4KiOw0JlKCclZr5Vz6ssliYapEhGHrZ6ZMllRc6mSAcMnF+Mv918vxyfXVxeTr3eTkNB7F/tn3xdfG+YrrrXmEGyTjOwe1/YrbzUbxX6+cA0sen31SKy416aYvrx0M9QA7hoII0q0b6SwaXPEAbZtxh/dWdR11f2/QLiF9mG1M1Lsul47+55AWXLn9a+t2Ke9uhwvue/bqy+zReoZOrpe9o1VDLYjgEZe79+tu1kVQIs/R9nMNAWMhsPZbQ1/ci8kY64XpcnI9mU6g634AaJKdyw== +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Delete a mapping rule

+ + + +Deletes the mapping rule with the given key. + +## Request + +

Path Parameters

+ +The mapping rule was deleted successfully. + +
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The mapping rule with the mappingKey was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-resource.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-resource.api.mdx index 5f845193cd1..41f29794c3f 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/delete-resource.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-resource.api.mdx @@ -5,7 +5,7 @@ description: "Deletes a deployed resource." sidebar_label: "Delete resource" hide_title: true hide_table_of_contents: true -api: eJztWE1v3DYQ/SvEnBJUltapm6Y6FNg6Tus2SY31pj3YBsKVRismFKmQlNfCQv+9GFLSfhrJoZeiNrCwJA5nOG/eG4lcg+NLC+kNzNDqxmQIdxHkaDMjaie0ghReo0SHlnGWYy11izkzvXF8q+alsCzjii2QcVYbnaG1LMdCKEEOIpZjJqzQihn80giDFSq3a6ENK7Sptp7dqjFWY4VaMldiH34MbhmqvNZCuZhd15iJovVmwzhrdcNWXDnmNMt9Ekwob/JxsPkD24+s5oZX6NDEtwoiGG8JlzUoXiGksDUDIhAETM1dCRH0aeWQOtPgPnrzEtlnbJkudhc3rmkPQ7exPw7njsFRbAm8wZ6w3ZlxADREYLMSKw7pGlxbU7JCOVyigQjInLvw6OUZdN1dSBit+0XnLc3Z5F9waTGCTCuHytEYr2spMk6Rkk+WAFkfRtOLT5g5iEA1UvKFxAHJ2ugajRNoyZquvacZFmhQZUhPd9GeMjMM+pSzUltUbBGY0VgkNLhjKyEloV1z4zwuUjKDmTa5pRI10hHpCqMr5qg4Y+z4Vr1rrKO5P7MJEwXV6F7kmAfyfA3ACCqhRNVUkJ52XQROOMq3F9kgwllAGLqObAzaWisbUHgxmRymPd9mlrA9s/IYugjOHpswLJzl3HGapbRj91yKPIZHi1gbvZBYfXdYzP1CXAVLlqPjQrJQZMZpcWS4wJzUeDN7c85+Ovvhx7tnpXO1TZNktVrFpshOMBdOm1ibZWKKjH5k9zxm8xINsoq3vufkuScyl2xDF2Z9PxAZycyFXP1iqDw7hRq5t8u1MDoy1Doj1HK7mo0RsC/1Kfswu2QiR+VE0Q5daye0n1PwRpIPvtCNSxeSq8+wocJh0P0otqkqbsaWshugi8A67hr7VTl//+LAN9Hit/n8igUXLNM5UscIIugDxdskPptMIqj4Q7h7OZl05JMq/g2ZKIYPteSKh061m45QrNIGe/74xISyjve6/xcqo41Yiv24MWzLsifx65BRkOPZ5OzrCiQtFbpRT1p60tKTlh7V0g/HXk5TxQhlQzxEY7RhOssaYzBnq1JIHD6Lhtj998iT0p6U9qS040rrIqjQlTqnbYu2njq0e0khGbdTyXprj9Ml/huSFhKBRXM/bIcaIyGFdVBQlybJutTWdem61sZ1yT2V554bQd/xvpo0HJQ2MEjqjMsyrOKwkjRAe64hw3NeNSrn7BWbXVzP2a/c4Yq3HlkKuev61eTV5KhXMn3E4/TqkoUMAw+3esPglkR+1G0w/hbHfudkMWuMcO01TQvwLJAbNNOGijHyo4/nvdN9MIKov3gzsOb3v+e+8NTXZptd2cUDr+qgymObpoknZaF9wJ4yh0unOqKxIddJfHpIz6tLr7JMV1WjfKtVS7YSrmR8C4pMNpb21hCBFBkq69fV76oHs7dhhP0VIrLTmMoYuDZ02KVwZbOIM10lWZg2/l9IvUgqLlTSh7DJ+fTdh/evpydvL88v3l9fnJzGk9g9OA8XKaDiamsdYfs1fsHt57revFj+d8chPSUdPrikllwoEomvzLrvITfj4YiFCNLdk5KxjdxFfSu4gfV6wS1+MLLr6PGXBk0L6c3dpnP4VpMLS9ebg4VHa/Js1p9BPGf/hfOWo5D2D7lqfQeVDd1BBJ+x3Tt+6u66CErkORqPU7A4D2iczMnPxsPBCUwXDTOmWYa1e8R255OJetf4/rj683pOrag//ql07s/H+IrOwvgKUriFW1q59pXyXc4/X4PkatnwJdkHv/T3D2QeJJs= +api: eJztWN9v2zYQ/leIe2oxRXK6rOv0MMBL0y1b2wWOuz0kAUpLJ4stRaokFUcw9L8PR0ryz6B92MuwBDBiicc73nffdya5BseXFtIbmKHVjckQ7iLQNRruhFaXOaSQo0SH43gEOdrMiJoMIIXXftgyznKspW4xZ6a3jW/VvBSWZVyxBTLOaqMztJblWAglyEHEcsyEFVoxg18aYbBC5XYttGGFNtXWu1s1xmqsUEvmSuzDj8EtQ5XXWigXs+saM1G03mwYZ61u2Iorx5xmIUcmlDf5ONj8ge1HVnPDK3Ro4lsFEYyPBNsaFK8QUtiaAREIAqbmroQI+rRySJ1pcB+9eYnsM7ZMF7uLG9e0h6Hb2B+Hc8fgKLYE3mBP2O7MOAAaIrBZiRWHdA2urSlZoRwu0UAEZM5dePXyDLruLiSM1v2i85bmbPIvuLQYQaaVQ+VojNe1FJmnWvLJEiDrw2h68QkzBxGoRkq+kDggWRsiqhNoyXok7QwLNKgypLe7aE+ZGQZ9ylmpLSq2CMxoLBIa3LGVkJLQrrlxHhcpmcFMm9xSiRrpiHSF0RVzVJwxdnyr3jXW0dyf2YSJgmp0L3LMA3m+BmAElVCiaipIT7suAicc5duLbNDgLCAMXUc2Bm2tlQ0ovJhMDtOebzNL2J5ZeQxdBGePTRgWznLuOM1S2rF7LkUew6NFrI1eSKy+OyzmfiGugiXL0XEhWSgy47Q4MlxgTmq8mb05Zz+d/fDj3bPSudqmSbJarWJTZCeYC6dNrM0yMUVGH7J7HrN5iQZZxVvfc/LcE5lLtqELs74fiIxk5kKufjFUnp1Cjdzb5VoYHRlqnRFquV3NxoiDRjllH2aXTOSonCjaoWvthPZzCt5I8sEXunHpQnL1GTZUOAy6H8U2VcXN2FJ2A3QRWMddY78q5+9fHPgmWvw2n1+x4IJlOkfqGEEEfaB4m8Rnk0kEFX8ITy8nk458UsW/IRPF8KGWXPHQqXbTEYpV2mDPH5+YUNbxXvf/QmW0EUuxHzeGbVn2JH4dMgpyPJucfV2BpKVCN+pJS09aetLSo1r64diP01QxQtkQD9EYbZjOssYYzNmqFBKHbdEQu9+PBC4+ae1Ja09aO9RaF0GFrtR04Ku19dSh80sKyXigStZbp5wu8btIWkgEFs39cCBqjIQU1kFBXZok61Jb16XrWhvXJfdUnntuBO3kfTVpOChtYJDUGZdlWMVhJWmATl1Dhue8alTO2Ss2u7ies1+5wxVvPbIUctf1q8mryVGvZPqIx+nVJQsZBh5u9YbBLYn8qNtg/C2O/dnJYtYY4dprmhbgWSA3aKYNFWPkRx/Pe6fnYARR/+XNwJrf/577wlNfm23OZRcPvKqDKo8dmyaelIX2AXvKHC6d6ojGhlwn8ekhPa8uvcoyXVWN8q1WLdlKuJLxLSgy2Vg6XUMEUmSorF9Xf64ezN6GEfZXiMhOYypj4NrQYZfClc0iznSVZGHa+H8h9SKpuFBJH8Im59N3H96/np68vTy/eH99cXIaT2L34DxcpICKq611hAPYuIfbz3W9+WH5312I9JR0+OCSWnKhSCS+Muu+h9yM1yMWIkh370rGNnIX9a3gBtbrBbf4wciuo9dfGjQtpDd3m87hW00uLH3fXC08WpNns/4W4jn7L9y4HIW0f8lV6zuobOgJIviM7d4FVHfXRVAiz9F4nILFeUDjZE5+Nh4O7mC6aJgxzTKs3SO2O1sm6l3j78fVn9dzakX9BVClc39Dxld0G8ZXkMIt3NLKta+U73L+/RokV8uGL8k++KW/fwA1zTCM sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-role.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-role.api.mdx new file mode 100644 index 00000000000..0364f27370c --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-role.api.mdx @@ -0,0 +1,52 @@ +--- +id: delete-role +title: "Delete role" +description: "Deletes the role with the given key." +sidebar_label: "Delete role" +hide_title: true +hide_table_of_contents: true +api: eJztV0tz2zYQ/iuYPSVTWpRTJ3V509hK6zbJeGy5Pbg6gOBKRAwCNABa1nD43zsLUA9LcuNDjzpoKDx2v13s9+HRgudzB9k93BiFME3A1Gi5l0ZfFZBBgQo9hrEECnTCypoGIYPLMOSYL5FZo5AtpC9Day6fULMHXA4ggZpbXqFHSygtaF4hZEAGf+ISEpDkq+a+hAQsPjbSYgGZtw3uAk5KJKfMzDaY3rAYIkE5UWLFIWvBL2tCkdrjHC0kMDO24j52fTqDrpsSmquNdujI4sPwjD77iDE17nqcgrlGCHRu1ii1HECXwNnw9BVTfGzQeaa4eHDsiStZMN74ErWXIqwxExYLanLlKANhtEftyR2va9XPSmtrcoXVT98d+W63En0JOmLXcSYr0HOpmMm/o/AsRE8TcyyY1Oz+5vMF+/Xs4y/Td6X3tcvSdLFYDOxMnGAhvbEDY+epnQn60bz3AzYp0SKr+JLlyHhRSMLkitWWGOMlOuZqFHImBVWFKtSHzagag380JKu6xLCIHGvjTdXW1XPeSj3fLl5j5R4NR+zu5orJsIqzpdTzfehgM+ONIh88N43PcsX1AxXPS68Ogu6iuKaquF2z7yVAl4Dz3Dfuh+T7+cOeb6LK75PJNYsumDAFspmxzJfSrYAoiUpqWTUVZGfDYQIVf46tT8NhRz6p4m/IRDN8rhXXkYA76UjNKmOx509ITGrnuRb/V2WMlXO5izuAblML6El8GTPqui6I7D/1udp6+n0lCFYbz2am0cVRWUdlHZX1mrI+DocH+K4ZrbIlHqK1xjIjRGMtFmxRShXc0ym4wu6PusjFo9aOWjtqbV9rXQIV+tJsLtbhfuxLyCClk8ulbX+AdXSdRfu0ujY3VkEGbZRMl6VpWxrnu6ytjfVd+kT1eOJW8lxFFtJwlNaKMsoIrkL3odLRAN3NVyld8KrRBWfn7GZ8O2G/cY8LHu+7BPnS9fnwfHjQK019xePo+orFDCPxtjaDlVtS9UG3cfJbHIeLvkPRWOmXt2QWlydHbtGOGlr7NSF6vOCd2nESJP2fzyua/PH3JFRa6pkJ5n3F9wOhqqB1MfLh4HSfXddXQSTCVFWjw06p5/E6w7cSE6pxnhJKQEmB2gUe92+p1bQvcYT9FRHZ6YCKEpmz2iDn0pdNPhCmSkU0W39zZfK04lKnPYRLL0Zf775djk6+XF2Mv92OT04Hw4F/9iH52jhfcb0VR3wQhivYbp7t5kx467uxr4nHZ5/WiktNLAnJtL1m7sMr0kEC2eo5OU164t9D2+bc4Z1VXUfdjw3aJWT3041OgrAK6eh/AdmMK7f75tyO+91N/zp9z374Ej0YfN/J9TKIVTXUggQecLn1Iu6mXQIl8gJtCDCOjoTA2m/ZvXqwEuHXu8zl+Mt4Moau+xc67XWI +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Delete role

+ + + +Deletes the role with the given key. + +## Request + +

Path Parameters

+ +The role was deleted successfully. + +
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The role with the roleKey was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-tenant.api.mdx new file mode 100644 index 00000000000..7a30c6f4db3 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: delete-tenant +title: "Delete tenant" +description: "Deletes an existing tenant." +sidebar_label: "Delete tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/SuYPSVTmpQTJ3V50/ijdet6PLbcHlwdQHIlIgEBGlha1nD43zsASEmW5MaHHnXQUMQC+7DY9wBiWyA+t5A+wgQVVwTTCHSNhpPQ6qqAFAqUSNhbIyjQ5kbUzgwpnHujZVwxfBGWhJoz8l1jiKDmhldIaBxAC4pXCCkE+x+4hAiEc1JzKiECg0+NMFhASqbBbaRJiaxR4qlBJgpUJGYCDdMzRiVuQNq8xIpD2gIta4cmFOEcDUQw06biFJq+nkDXTR2mrbWyaN2IT6MT99jFDe7ZglsWVqNgtslztHbWSLmMoYvgZDTaP7g2+lkUWLCCE2fCMqWJPXMpCjffXCtCRW4or2spcr/wSW10JrH66Zt1ftqNsF4DjNlt6MkKJC4k09k3zIn5mbqOGRZMKPZ4d3nGfjn58vP0Q0lU2zRJFotFbGb5ERaCtIm1mSdmlruf6/cxZpMSDbKKL1mGjBeFcJhcuoBqNCTQMltjLmYiZ6R9HvppM7f28T8KoiELYVqOEqvB6xytcmXJCDXfTFVjxA7nxuzh7mogwdIzbhvaj5nxRjofPNMNpZnk6rtLFAmSe0G3UWxTVdwsB469BugisMSpsT+k2udPO74dLX6bTG5ZcMFyXSCbacOoFHYAckFUQomqqSA9GY0iqPhLePs6GnXOp8v4OyJx2qwlV55a2+EIxSptsOePD0woS1zl/1dmtBFzsY0bQ7fOBfQkPg8RdV3nBfV5l++X2mSiKFB5ejK3ZaClQVVcSr3Ag64Oujro6j90teeUu9HEZrpRRdDVxoGnVpaDqg6qOqjqDVV92ff5N1bMrbJxPERjtGE6zxtjsGCLUkjv3n1FDtj9cRa4eNDaQWsHre1qrYugQir1+m7qb5pUQgpJOLhs0q4umZ27FKJ5Hi6hjZGQQhtk06VJ0pbaUpe2tTbUJc8uJ8/cCJ7JwERnDvIaaCN1zqVv3pc+Z3A33SGsM141quDslN1d3E/Yr5xwwcOd0UG+dn06Oh3t9eq6vuFxfHvFQoSBfBsbwuDWKXuv29D5PY79ddli3hhBy3s3LCxPhtygGTdu/Vek6PG8d/ceOkHU/7kcqPL73xOfbaFm2g/vs747EZcVNDbMfBQf7zLs9soLJddV1Si/W6o5WwgqGd8ILJeNJRdQBFLkqKzncl+ZGLpdBwv7KyCy49glJTBn2CTngsomi3NdJXkYtnpmUmdJxYVKegibnI3/fLg5Hx9dX51d3NxfHB3Ho5heyAdfa0sVVxvzCFWV/itsO9J2fTL8oPzSJ4PwhZJacqEcPXwUbS+Yx74YYyGCdF2XmUY96x+hbTNu8cHIrnPNTw2aJaSP07VIvKoKYd3/AtIZl3a7eLM55Q93fZnnI3tnSWdvGH0jV0uvV9m4N4jgOy5flZi6aRdBibxA4yca7OM8x5o2Rr55wjrWr7ab84vri8kFdN2/cEKbPQ== +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Delete tenant

+ + + +Deletes an existing tenant. + +## Request + +

Path Parameters

+ +The tenant was deleted successfully. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found. The tenant was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/delete-user.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/delete-user.api.mdx new file mode 100644 index 00000000000..18745fd4d8a --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/delete-user.api.mdx @@ -0,0 +1,52 @@ +--- +id: delete-user +title: "Delete user" +description: "Deletes a user." +sidebar_label: "Delete user" +hide_title: true +hide_table_of_contents: true +api: eJztV8Fy2zYQ/RXMnpIpTcqpk7q8aWyldZtkPLbcHlwdQHApIgYBBgAtazT8984ClCxLcpNDjzpoKBLYfbvY94DFCjyfO8jv4c6hhVkCpkXLvTT6qoQcSlToMYwlUKITVrY0CDlchiHHOOsc2vQfDQm03PIGPVpyuQLNG4QcaPxPXEICkgxb7mtIwOK3TlosIfe2w13v0xrZAy6ZqZivMUAwb1iMJ4UEnKix4ZCvwC9bQpHa4zzEWRnbcB8/fTiDvp8RmmuNdujI4t3ojB77iAFmwd2AUzLXCYHOVZ1SyxT6BM5Go8OmrTWPssSSldxzJh3TxrNHrmRJ0QqjPWpPprxtlRRhibPWmkJh89NXR35WW0m9BBiz6ziTlei5VMwUX1F4FiKliQWWTGp2f/Pxgv169v6X2Zva+9blWbZYLFJbiRMspTc2NXae2UrQj+a9Tdm0Rous4UtWIONlKQmTK0qoReslOuZaFLKSgirgY64hGFr5WPihBjEsIsLG+LlCm0o5b6Webxeqs3KPX2N2d3PFZInay2op9XwfOthUvFPkgxem83mhuH6gQnnp1UHQXRTXNQ23G6a9BOgTcJ77zn2XaD+/2/NNtPh9Or1m0QUTpkRWGct8Ld0aiJJopJZN10B+Nhol0PCn+PZhNOrJJ1X8BzLRDJ9axXWg1m46UrPGWBz4ExKT2nmuxf9VGWPlXO7iptA/1wIGEl/GjPq+D4L6Ly0OOqpMp486OuroqKNXdfT+0ME01oxW2RIP0VpjmRGisxZLtqilCu7pfFtj05mMzkcuHrV21NpRa/ta6xNo0NfmuT8Ona+vIYeMTi2XrYaWt6dGFe3juiHurIIcVlEyfZ5lq9o43+er1ljfZ49Uj0duJS9UZCENR2mtKaOM4Cp8PlQ6GqCue53SBW86XXJ2zm4mt1P2G/e44LGTJciXrs9H56ODXmnqKx7H11csZhiJt7UZrN2Sqg+6jZN/xHFo4R2Kzkq/vCWzuDwFcot23NHabwgx4AXv9B4nQTL8+bimyR9/T0Olpa5MMB8qvh8IVQWti5GP0tN9dl1fBZEI0zSdDjulnrOF9DXjW4kJ1TlPCSWgpEDtAo+HW9J62qc4wv6KiOw0paJE5qw3yLn0dVekwjSZiGabZ6FMkTVc6myAcNnF+PPdl8vxyaeri8mX28nJaTpK/ZMPybfG+YbrrTjivS60X7t5rp7PhIPXv6EAHp981iouNVEiRL4aBHIfLoMOEsjXt8JZMrD8Hlargju8s6rv6fO3Du0S8vvZsyiCikrp6H8JecWV2706bgf55ma4ZL5l371QHgx++Mj1MihTdfQGCTzgcuti28/6BGrkJdoQYBwdC4Gt37J79RQldm+2lMvJp8l0An3/L4P8WVg= +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Delete user

+ + + +Deletes a user. + +## Request + +

Path Parameters

+ +The user was deleted successfully. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The user is not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/deploy-resources.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/deploy-resources.api.mdx deleted file mode 100644 index 19d360be134..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/deploy-resources.api.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: deploy-resources -title: "Deploy resources" -description: "Deploys one or more resources (e.g. processes, decision models, or forms)." -sidebar_label: "Deploy resources" -hide_title: true -hide_table_of_contents: true -api: eJzVWd9z2zYM/ldwfFpviux2XddpT17cbt6aLJe420OaB0qCLTYSqZJUEp9P//sOpGzLtpw4qZvdHnKWRBD4AHwAf2TOLJ8aFl2yczSq0gmyq4ClaBItSiuUZBEbYpmrmQElEZSGQmkE3Ugb+A7DaQilVgkagyaAFBNhhJJQqBRzE9CcidKFeRF+kuNMGBAGuARuVSESSHieByBCDAGFzVADz/OWfq4RUocAU1IlHYwJ2AwLGgw/SRYwjV8qNPZXlc5YNHevQmPKIqsrDFiipEVpaaiocitKrm2PQB2l3HL6bJIMC/dkZyWyiKn4MyaWBazUqkRtBRqvuQHWEuVa8xnbDNs4Q4iF5HoGZASsgkQjt0jQG5cKlHblawgjS8EplTEizpGmZPwGfchtxqXLAeEG8gBuhc0gFZMJalK0GpC8QEPvzlY3Ct7C4IMoLBZtv4zVQk5ZwEgxtyxiXhOr64BZlFzaUdolvx0IL03WvVGHa5Vkq0KndJW3y2Wk2VVd+zFTKml85F/1+/SzbaibOCFb4wAvy1wknKb1Phuauz8BVkH7E2ctcSEtTlG3oyWkffO6MxyVFF8qhGucgUhRWjGZCTndIEbI6qBlrotxmxnbgbmpziFOhBQexqOn7JvpuCzkoh3AaBgAN8RJgymkFc1qeRiAVVN0Re+oTP7foHbdw7UM4J9kE6smTgJdNwEOpsRETESyNJYusRKd62Dbh7+97gez9sOrTte4MWIqMV1abLCGnca+gh5LQ9c4C+A2E0kGPLGGYsmhOyCWOmsDwgFaVMIpL3C/1C1muPYBE62KxnZbN9wuE+rMPKkPjIZNC1+19hV0Uipsjsulh6hy5od9SfgF5jF03p6zL+K0kKsl7Vn4vLDWsPjmQKRdOtFm7XZcDkHbpanH8nfDdbk3dYcnp561S1p5Rfem66D8XUJvh/Xcr2eufT+GcG39TSh1SxVMNS+JXtyux83tHmgDoCb3e+7j2wXzQG3rq/E7iJ2tYNgI7/Jg/27w1PR0O/VAc/hld3Mwi+6wozaesTmsedTuFF9fi2u6n78w18zvrNKDtr+1YD61F67j9mk/9NrejXhtpb+3FNfKr/bRergMSWrf/LqzzbMsvyR54Opy4NvFRB/+i/2h8+1bEMh5+I12hg3oTgK+J6J1D52g5e50f/9ptXvyeXPYZP7s+XrXcTNVSeUO8VWZK57ChIsc0xBOlDt9Wi5yfxIttboRKaYg5OLs6wxArNKZP37vOKCWWsU5Ft9vH1TX4QzgzEs2dsFXHXHCC8be+uX5+2P4+fWPP119l1lbmqjXu729DfUkOcJUWKVDpac9PUnoj+RehDDOUCMUfAYxAk9Tt1PkOazqeVU/VjkHG9hA0fb+3d8M/Og9dw+VFlvEGcDH89HWEXrNtJsz4VVOOnisKhvFOZfXbJX3h+g5AFMVBV2hNNRcN1AHzFhuK/PUPvH7eHwGXgUkKsW18xwZIicKIUVRFSx63e8HrOB3/u1Nv+9WMsr4Hp5IwLsy59JRa9MdIf1Nk9fmHBPSWC6TQ2VGaTEVm3bXa7sh8dB7VPv6K9BmKmURK5Vx1OE2YxHrtW9HAmZQU5Nl0eWcVTpnEZv7YqmjXm+eKWPraF4qbeveDWXihmvB49zzj4Z9US3IkquE55k3uJ00Gmjvco55UcmUw1s4f3cxht+4xVs+81cDSm+oftt/2+/USqI7NA7ORuA99JRrtYGFWqrnTrVeeB/FdX1FgUwqLezsgqb58MTINepBRXFfUqGx57TTuxdiQfPwfkGQP/4ZuxwLOVFuepPrbSCsteiyfvhym1dnI1ceiSqKSroeKad+uectx5K8MpYcClguEqROHs2bPezS7Ac/As21ELwMKSmeOYvWOBU2q+IwUUUv8dOWv3Gu4l7Bhew1JkzveHDy8XQ4OPowOn53evHu6GXYD+2ddc4TdQsuWzj8crO6stx0dr5aEv4Xl/ANKyze2V6Zc+H2UC6c86ZiL9fuM6+Cpuou2Xwec4MfdV7X9PlLhXrGosurVZHSWx2wDHmK2pX4NW2g2LEP0dGYjJN4XhGIrmv+OlhMGiQJlrYlvnUjTGWw7Dpnf12MidXNfxcopixy/c/pDVaPBLKu/wW62+qq -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Deploy resources

- - - -Deploys one or more resources (e.g. processes, decision models, or forms). -This is an atomic call, i.e. either all resources are deployed or none of them are. - -## Request - -

Body

required
- -The resources are deployed. - -
Schema
    deployments object[]
  • Array [
  • processDefinition object
    decisionDefinition object
    decisionRequirements object
    form object
  • ]
- -The document upload failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/download-document-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/download-document-alpha.api.mdx deleted file mode 100644 index ec758a86aca..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/download-document-alpha.api.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: download-document-alpha -title: "Download document (alpha)" -description: "Download a document from the Camunda 8 cluster." -sidebar_label: "Download document (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztV0tv20YQ/iuLPcUoTcqp06a8CbaTqkgDw1bag+3DcjkUN9kHszu0TAj878UsSVm25dRFeypyECRxZ+f75smZDUexCjy/4qdOtgYsBn6T8BKC9KpB5SzP+albW+1EyQQrRylWeWcY1sBOhGltKdhbJnUbEHx6ba9tnufWIVzbZa0CA1s2TllkKjBhmdBNLVgFAlsPKVsgk633YFF3zFndsdA2jfMYhZU9NGCc7+6xAzoPybVd10rWpNM6ZAYE0XKeNd6VrSTurA2QRjLXlie8EV4YQPBk8IZbYYDnfFK7KHnCFdnbCKx5wj18bZWHkufoW3jslGUNbHHKXBW9sOWGjpWju1Ke8CBrMILnG45dQ2gBvbIr3vfJlkA05x79awu+ewBfCR1eih+V7bJ4eExh+zavG0IOjbMBAp2/ns3o6yn4VudahC0alCy0UkIIVat1R1DSWQSLpEQ0jVZSkJLMSQQ8DOhBGDp7jlHCK+eNQJ7zQlnhO973fZ/w49nx3/FSWEfrV+oWLDmLmFKuVK61ZRpz4hl2jXeFBvPD50B6d9k9BJyz80GSlYBCaeaKzyCRkUuiYAElU5ZdXbw7Yb8cv/n55lWN2IQ8y9brdeoreQilQudT51eZryR9SO4gZcsaPDAjOlYAE2WpCFNoyu8GPCoILDQgVaUkBZwsHWkzcuFg3+jMgRbVwPbyvau/4fLWK/448+bs08WCqRIsqqpTdvUUOt6pRKtJhyhci3mhhf3C+4SjQr0X9DFKaI0RvptS/CFAn/CAAtsdM7iyCCvwu/yVxR9fP9FNafLrcnnOBhVMuhJi60DqViMQGWGUVaY1PD+ezRJuxN3w76fZrCedFPEXWGIZ3DVa2Jhaj81Rlhkq2UFbNEzZgMLK/yoyzquVeoybUguaYsHHJD4dLBoK7M2+wp9TQ0bwlIfgvfPMydi8S7aulY7qqfonbGpiEPB7rX2vte+19lyt9Qk3gLUrec5XEDOHJpCcZ9O7LGSb+zGlp/c3+Ntpjmm95jnfDGXT51m2qV3APt/QDNVntxSTW+GVKPSQiXQ8lNeUNtpJoePjfeGjA5pVJrPuR76Ls8sley8Q1qKL7iTIh6rfzt7O9mol0Wc0zs8XbLBwSL6dhjCppcreq3YQfoniOOsEkK1X2F3StcE9BQgPft5SCLZJMeJF7fR/EOLJ+OPdlCq//bmM0Va2cvH6GPWnRCgq4MPAfJYePc2w80UsFOmMaW3slnY1TDViz9jNE66VBBtiLo+z5ST2YThhfwyI7CiloAyZMzXJlcK6LVLpTCaHa9vvQrsiM0LZbIQI2cn8908fT+eHHxYnZx8vzw6P0lmKdxiNb1xAI+wOj+0KsZ3OXsUt4OCx1Zv7t8T/Zu8YUwjhDrNGC2UpqaPvN2OlX223kMATnu+sJDfJWK5XfLMpRIBPXvc9PR72BGoApQpU289sCrse/XdLw147vkD3YIe5FbolKU7VNbWdf0jz1cW4+xywl61Ze5mND4XtdmlNjHd83N/0Ca9BlOAj0UFgLiU0uHP12e2FLN028Pdn1AD+AimWTlk= -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Download document (alpha)

- - - -Download a document from the Camunda 8 cluster. - -:::note -This endpoint is an alpha feature. It currently only supports an in-memory document store, -which is not meant for production use. -::: - -## Request - -

Path Parameters

Query Parameters

- -The document was downloaded successfully. - -
Schema
    - -string - -
- -The document with the given ID was not found. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/evaluate-decision.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/evaluate-decision.api.mdx index 4d8d08b9f8f..4d4319dccbe 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/evaluate-decision.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/evaluate-decision.api.mdx @@ -5,7 +5,7 @@ description: "Evaluates a decision." sidebar_label: "Evaluate decision" hide_title: true hide_table_of_contents: true -api: eJztWtty2zYQ/RUMnpopLSmJcynfHMtp1ebisZ1kOrYfIHIlIiEBBpfIHA3/vbMAKVImJTly2jzUnvHYJIDdxe45i8tySQ2baxpe0jFEXHMpSAwzLrjhUtDrgMagI8Vz9xjSk28stcyAJozE1YDBlfhbWqJziPisICaBVRMxkkA1hAA3CSgyLYjVXMwJN5pYwb9aIF+gIL8wTRQYqwTEZFpciTHkqSzOQEurIngUEKmqkWsqJuMB+ZSA6G8L3IsUTTYkdhIhvhLfQLkOcrY+gGtiNcSDK0EDquCrBW1eybig4dI9cgUxDY2yENBICgPCYBPL85RHDJ00/KzRU0uqowQyhv+ZIgcaUjn9DJGhAZUC3s9oeNltMdyk0HJzHZOzypLiLyhoGewxcBLT8npnP1oGFG5Ylqeg0fJXReOaBhYYLjdDm2VMFS1x686cNqPHq8F/QTGgAcUBgEJ6e9Dw8ZOnh8+wm+Js6q0pyzLYZNFkvLdBk3iHPZOYhhTtOXj2/MVL2jGqdIYp0LkU2vvtyWjkZbW5c9E2ZcH0ihnOgL3RlCuZgzLca97gzj5bWtzjMQjDZ0WHQIuER0nX2MoGLgzMQdGAzqTKmPGvnh8ijPr92GfHZNyh4Xa12igu5v1K3rEM+tUIlsEPVPTRZ5B+XZvSyz7ufPqkbcCZT0IZCKO/w6GqNYzMFcsTYhJmdlqHCTFnyhA52+qXtln3AtyPNXQbQKU1uTVdS/88f/+OxDKyaINXvuBpSrjQhgnD66yiQNvU3CnEbjXp+G7GeArx+P488YJIbFFyrRYX5k1arYK3oDWb91ClaiD+7RQlLpJit/+9Eb0qDQgmzKZp+dbW7BqZqx3GNuhNXFwi+H7Ycd1Mqddr2+CzsrIOoG4lZ6YUK2hAuYFM9yXtdSvfcO2AVFujK9iBantjwU3CRYU9t1y3XHRrAv/xovCwJvzMNaEx4MIN6mVZke8/05+eKv+dDJIxEyUQn9lqt7sfede2dQpleU9U0hvabk4362xFGbt5UYvH3r2zc2JEDDf9kjg27RC2DXwrH7934LinA7sRIx50uusf37DbQxtlbsH43VLFPpI/NgeMTmbApn1kl90D3Wo58mGZGMho2e741od6dfBDlPhOrZhOxP1D2pypnbDtS9pdueFk7RV6N7I3Pq5l78DvkHuPsG+SvC3qLnDdoHf7bezS3Aj48yz1x9vDTSfaXMlvPEa7mWG4CxfS4NT4tlNtruQ0hezX7ul2XcEROfU9SQyG8ZR4vBGm6/0p7vwFuTx7fUx+O3z24vqXxJhch8PhYrEYqFl0ADE3Ug2kmg/VLMJf7PdoQC4SxGLGCjIFwuLYLaAsJQ3gqistHuE1lvFzdcZgQNZWqg2HcVMtx+sBbGVTq3iHO0fkw9mks/laU+3GzJhNUQabSmvCacrEF9rEs6v0tpbqsqQG4LqCMqDaMGPbXN+4GvSx/4+Li1PiRZBIxkBmUnmeV4pwEhkXPLMZDQ9HI1yOb/zT89HI7W4w4neYiSBwk6dMOGjdng4XJJMKKvwMPC/9geEHRUYqPue39Q7WaFqBeOxnVBPqcMcVUcWlmbTigUsPXHrg0kYuPetbnI5wBTWgEIeglFRERpFVCrcdCU+d+Aj0qmhQnakfmPbAtAem9TMNz8xgEokFkVxqBx1mEhrSYb1qHTRFGT1sdvI0oBoUXoG4updVKQ3p0hOoDIfDZSK1KcNlLpUph9+e3KqyUGz2RKsBlMqIpYk3ohtIbGhv3I9ZZkXMyEtydnJ+QX5nBhascI5FleuiX45ejnqlYtcNEo9OJ8TP0MOwlRpqscjxXrG+810El1jD0xBZxU1xjsO8e6bAFKgji7FYwaPS56Tjs+9Eg+qf1zVo/vx04eKOae2sKXme+DrglvvD0a1SWPueZu0cNJPOrApX3QlitOurMzoaPO5i+HTiqBjJLLPC5WO8l+YmIazlsCi12qCjApryCPD0Ei6pcEe7ldo3voVUl3Xk8QCD7RFZp+E5N4mdDiKZDSM/bPV3msrpMGNcDCsVenh89PbDu/HRwZvJ8cm785ODx4PRwNwY51SkScZEy45VabL26u3ZLpv15/9Vb69wa+DGDPOUcYHocYFZVnnmkvblGbq6uai+WfDZ4pIul1Om4YNKyxJff7WAxeHL6wa3+FQGNAEWg3KpydW26bGPwIG7UF1Vh7uVWazF+xFHUQS52dr3upU+T9+fXyAVq28LMhnjGMUW+N0BW9CQXtErih8L5KYuLrj3S5oyMbeudkO9XPz5BxLR3Gg= +api: eJztWttu2zgQ/RWCTy1Wsd02vaze0jjdda9BkrZYJHmgpbHFViJVXuoIhv59MaRkyZFst267fdgECBKJw5nhzDnDm5bUsLmm4SUdQ8Q1l4LEMOOCGy4FvQ6ozEExfJjENKTwlaWWGaiFaUBj0JHiuZMP6UkloAkjcSU0uBL/SEt0DhGfFcQksGoiRpJaJwFuElBkWhCruZgTbjSxgn+xQD5DQe4xTRQYqwTEZFpciTHkqSzOQEurIrgfEKmqnmsmJuMB+ZiA6G8L3IsUXTYkdhohvhJfQTkBOVvvwDWxGuLBFQ5dwRcL2jyXcUHDpXvkCmIaGmUhoJEUBoTBJpbnKY9cHIefNEZqSXWUQMbwP1PkQEMqp58gMjSgUsC7GQ0vuy2GmxRaYa7zcFZ5UryCgpbBHh0nMS2vd8rRMqBww7I8BY2ePy+a0DS4wXS5EdosY6poqVsP5rTpPV51fgXFgAYUOwAq6ZWg4YOHjw4fo5jibOq9Kcsy2OTRZLy3Q5N4hz+OGejPweMnT5/RjlOlc0yBzqXQPm4PRyOvq82di7YrC6ZXzHAO7I2mXCGHDfeWN4Szz5cW93gMwvBZ0SHQIuFR0nW28oELA3NQNKAzqTJm/Ksnhwij/jj2+TEZd2i43aw2iot5v5G3LIN+M4Jl8BMNffAVpN/WpvKyTzgfPWw7cOaLUAbC6O8IqGp1I3PF8oSYhJmd3mFBzJkyRM62xqXt1g8B7uc6ug2g0prcmq6nL8/fvSWxjCz64I0veJoSLrRhwvC6qijQNjXflGI3m3RiN2M8hXj84zzxikhsUXNtFifmTVatgjegNZv3UKVqIP7tFDUukmJ3/L0TvSYNCCbMpmH51tboGp2rFcY26E1cXiL4fthx3QypN2rb4LPysk6g7lpfNVUwAtUe3YKbhIsKS276bQ253yGmFCtoQLmBTPdNBuv2j1oLMefA7cLzH88dd1PH75w6GgcuXKdeMhb5/iP97RX11xSajJkogfjMptDD8bVVnUIZP8KqV8PyndVmf3Kj2e+2us59VLGbZbV6lO6NlVMjYrjp18SxaYeybVBeZeydg9qGdHTzSjw09f7R3q13PZ6+YXdEN+rcwrBvK1T7aP7Q7II6dQmb9tFddnedqznTp3FiIKNlW/CNh8Zqd4qo8kItDEzEZgg0G3ontH3+/YXMdOZ7Jt/vJ6jTtBeeXM/epLuWvdG0Q+8PYGmT5m1QcmjoIqkrt1GkOQvxO3nqN/aHm/byuZJfeYx+M8Nw/yGkwaHxbfv5XMlpCtkf3X39bQydekkSg2E8JR5rhOl6ZY57HkEuz14ckz8PHz+9vpcYk+twOFwsFgM1iw4g5kaqgVTzoZpF+Ity9wfkIkEiZKwgUyAsjt2agKWkAVx1mMcjPMAzfqzOGUzI2uS74RjCVCuM9QS2SrpVvIc3788mnfXkmmnXZ8ZsijrYVFoTTlMmPtMmn12jt61Ux0Q1ANcNlAHVhhnb5vnGKalvlvj74uKUeBUkkjGQmVSe55UhHETGBc9sRsPD0QhXGDf+6clo5BZsmPFvGIkgcJOnTDho3R4OFySTCir8DDwv/VbpJ2VGKj7nt+0O1mhagXjsR1QT6nBX0fZcmkkr7rh0x6U7Lm3k0uO+yekIZ1ADCnEISklFZBRZpXDZkfDUqY9Ar65LqtMHj8U7rt1x7Y5rXa7hQQCYROJlUC61gw4zCQ3psJ63DpoLKT1s1vI0oBoUnuu4Oz+rUhrSpSdQGQ6Hy0RqU4bLXCpTDr8+vHXDRLHZE60GUCojlibeiW4isaG9dD9mmRUxI8/I2cn5BfmLGViwwgUWTa6rfjZ6NurViqIbNB6dTogfoYdhqzTUapHjvWq98LcoLvH+UkNkFTfFOXbz4ZkCU6COLOZiBY/KntOOz16IBtU/L2rQvPx44fKOZe2sue498XegWw5FR7euAduHT2s7oZl0blW46g4Qs12fB9LR4EEXw6cTR8VIZpkVrh7jmTw3CWGtgEWp1QYDFdCUR4D7l3BJhdvcrcy+9i2kOoEkDwaYbI/IugzPuUnsdBDJbBj5bqu/01ROhxnjYliZ0MPjozfv346PDl5Pjk/enp8cPBiMBubGuKAiTTImWn6srmXrqN4e7bKZf/5f3xpUuDVwY4Z5yrhA9LjELKs6c0n76gxdHYhUH3T4anFJl8sp0/BepWWJr79YwIvxy+sGt/hUBjQBFoNypcnd69Njn4EDd0q8uhnv3krjdwi+x1EUQW62yl63yufpu/MLpGL1XUUmY+yj2AK/uWALGtIrekXxQ4nc1Bcr7v2SpkzMrbu3ol4v/vwLl+ZD2w== sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -51,7 +51,23 @@ The message variables as JSON document. The decision was evaluated. -
Schema
    evaluatedDecisions object[]
  • Array [
  • matchedRules object[]
  • Array [
  • evaluatedOutputs object[]
  • Array [
  • ]
  • ]
  • evaluatedInputs object[]
  • Array [
  • ]
  • ]
+
Schema
    evaluatedDecisions object[]
    + +Decisions that were evaluated within the requested decision evaluation. + +
  • Array [
  • matchedRules object[]
    + +The decision rules that matched within this decision evaluation. + +
  • Array [
  • evaluatedOutputs object[]
    + +The evaluated decision outputs. + +
  • Array [
  • ]
  • ]
  • evaluatedInputs object[]
    + +The decision inputs that were evaluated within this decision evaluation. + +
  • Array [
  • ]
  • ]
The provided data is not valid. diff --git a/docs/apis-tools/camunda-api-rest/specifications/fail-job.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/fail-job.api.mdx index 5c05c19cc0f..41a216423a2 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/fail-job.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/fail-job.api.mdx @@ -5,7 +5,7 @@ description: "Mark the job as failed" sidebar_label: "Fail job" hide_title: true hide_table_of_contents: true -api: eJztWEtz2zYQ/is7uDSZ0pKSOmmim+MkbdI8PLbTHhwfluRShA0CLABa5mj43zsLkHrbyaFHeUZjEsS+v29J7EJ4nDkxvRIfTSquE5GTy6ysvTRaTMVntLfgS4IbkwI6KFAqyr9rkYgaLVbkybL0QmisSEzFjUn/olYkQrJ4jb4UibD0byMt5WLqbUPbNi5LgltqwRRLS94ESyORCJeVVKGYLoRva7YgtacZWZGIwtgKfVx6eSy67jqaIuffmLxlmZXlApWjRGRGe9Ken2FdK5khezG+cezKYteaSW8o8xyuNTVZL8lFvd72l7vBYGUa7TmeftsyLleaRuVQ4h2BosKHRD4a12/PBeerwEZ5MZ10iSBrjf1MzuGMdu2faDDhGhVUcRPEHanUM5iX7dKZWMsRXJbSgXRQo/UyaxRa1ULjqGgUyAIwbLaNdmCajahQ54AapM5kTtqzDovSUZ4wVDyrHVzIUENJqga6rxVKHRxZl53jIDxaT4rzVuqZSIRulMJUUYRQl4QKtG8wu/1aFPvLkGJ2a4oCvKyIPX8iNVTuKRTGhhxouvchlnb04zq8PN6uwx1ayR4FEGCey5j1szWc7EP7x4uvXyCiCnyJHuZSKZDaedReoqfg21I5oA8LymSowGWmpjWi/OIAnTMZy+Xg0d1uhLIE71b2ukR46fmWWf8epTqPtBFd14XcutpoF0N7Pjnen1+GhRxawkh0iTieTPZvra25kznlkKNHFtLGwx0q2Vf7AVbW1qSKql932bkFejiLOyEnj1IN+UU3YJ9ykBquzt+fwuvjF79fPym9r910PJ7P5yNbZEeUS2/syNjZ2BYZ/3jfU2YHWYIKW0gJVmWGVT8AV1MmC5lx2/Ix2uAMV2F/PTabSXy62MH8En+NlWIbRyfw7fwDBPLIomVu75heR6zA1DR+mirUt2JV/12j21ZcU1Vol91500CXCOfRN+6H/bnvY9vA+PPy8gyiCshMTj05uRtFQxxEJbWsmkpMjyeTRFR4H+9eTpiGseI/EYmOvUcHaG2Hw63BWOrxEwKLlMz+r8oYK2dy2+5IrHOxB/HbGFGk4vFj7JtLXwZ1M3lHGuK7d6BXYRp9oNeBXgd6PUqv1z9Nr9vILanD2twaPQu5Jcgaa0l71Y7g8ypMB2jXXn293PBuhdTk7YGeB3oe6PkwPV/s+6A84WODJ8s4DIchMFlgYA7zUqqgPiPnBtv9kfDAtQPXDlx7iGtdIirypcl5amNcgA4Pb6ZifGNSN17Ej8tuzMe9xhJPZsjeDdOfxioxFYtIm246Hi9K43w3XdTG+m58xzXZODDz40ivATbhhFtG07vl4wc8YhrCOsWq0TnCKzh/d3EJf6CnObYhnWxyU/WryavJXq289QGNJ2cfIEYYwbfWEAa1zOy9auPmn1EcZlaOssZK316wWExPSmjJnjRcgSUoentBO9/HTSLpL94PUPn4z2WoNjez89U87N09VnWk4nJ8NdmeJ62QtzlgmWzWrwuwLUzwrgfVbpxcdLIuJmYyerYL4LMPgYeZqapGh2bMQyr+7sK1vGWqcZ7zlQglM9IuBNFPHIdtn+IT+DtahGcjrnkE5tCDZ9KXTTrKTDXOotjyf6pMOq5Q6nFvwo1PTz5/+/L25OjTh9N3Xy7eHT0bTUb+3ofcMkcq1Gt+8BiFPxu3g1ys3jmPjVP7Gnu69+MwImPUBe8XPROveLjqRCKmyyHrQMbrpCfUlVgsUnT0zaqu4+V/G7KtmF5dr+oXCJtLx9erweiDPj8572eoT+HxSe3eCPpF1G3oAKrhO5GIW2pX0+LuuktESZiTDc7Fh6fRhaNLVrES3hnbdskgcZJlVPsH9m684pl2y3539vXiklnUz4wrk7OsxTmzAOdiKr6L7+x0nKzGATCvL4RCPWsicaJe/vsPr+YI7Q== +api: eJztWEtz2zgS/itduGxSy0jKrJPN8OZ4khlnJonLdnYPjg9NsinCBgEOAFpmqfjftxog9baTwx7lKpVJotHP72sSvRQe506kN+KTycRtIkxDFr00+rwQqShRKl5IREEut7LhFZGKz2jvwVcEdyYDdMByVHzXIhENWqzJk2WtS6GxJpGKO5P9SZ1IhOTtDfpKJMLS3620VIjU25Z2bVxXBPfUgSlXlrwJliYiES6vqEaRLoXvGrYgtac5WZGI0tgafXz09kT0/W00Rc6/N0XHe9aWS1SOEpEb7Ul7XsOmUTIPOZjeOXZluW/NZHeUew7Xcsa8JBf1ejtc7geDtWm153gGsVVcrjKtKqDCBwJFpQ+JfDauf/0SalJiq7xIZ30iyFpjP5NzOKd9+6caTLhGBXUUgiiRST2HRdWtnIm1nMB1JR1IBw1aL/NWoVUdtI7KVoEsAYOwbbUD025FhboA1CB1LgvSnnVYlI6KhKHiWe3oQo4aKlIN0GOjUOrgyObeBY6bJ5tJcd5KPReJ0K1SmCmKEOqTUIHuPeb3X8vycBkyzO9NWYKXNbHnL6SG2r2E0tiQA02PPsTSTX5ch7cnu3V4QCvZowACLAoZs36xgZNDaP909fULRFSBr9DDQioFUjuP2kv0FHxbKQf04YEyOSpwuWlogyj/cIDOmZz3FeDR3W+FsgLvTvb6RHjp+Za7wUeU6jLSRvR9H3LrGqNdDO2X2cnh/DIs5NgSJqJPxMlsdli0seZBFlRAgR55kzYeHlDJodpPsLKxJlNU/3OfnTugh4soCQV5lGrML7oR+1SA1HBz+fEMfj158+/bF5X3jUun08ViMbFl/ooK6Y2dGDuf2jLnH8u9ZHaQJaixg4xgXWZY9wNwDeWylDm3LR+jDc5wFQ7XY7uZxNXlHuZX+Gut3OvMp/Dt8hwCeWTZMbf3TG8iVmBmWp9mCvW9WNd/3+iuFdfWNdpVd9420CfCefSt+2F/HvrYLjD+uL6+gKgCclPQQE7uRtEQB1FLLeu2FunJbJaIGh/j3dsZ0zBW/Cci0bH36ACt3XC4NRhLA35CYJGS+f+rMsbKudy1OxGbXBxA/FuMKFLx5Dn2LaSvgrq5fCAN8d070qs0rT7S60ivI72epdevP02v+8gtqcOzhTV6HnJLkLfWkvaqm8DndZgO0G68+oZ947sVMlN0R3oe6Xmk59P0fHPog/KUjw2eLOMwHIbA5IGBBSwqqYL6nJwbbQ9HwiPXjlw7cu0prvWJqMlXhodBjXEBOjy8ScX0zmRuuowfl/2Uj3utJZ7MkH0Ypz+tVSIVy0ibPp1Ol5Vxvk+XjbG+nz5wTbYOzLwc6TXCJpxwq2h6v3y8wCOmMawzrFtdILyDyw9X1/A7elpgF9LJJrdVv5u9mx3UyqJPaDy9OIcYYQTfRkMY1TKzD6qNwj+jOMysHOWtlb674m0xPRmhJXvacgVWoBjsBe18H4VEMlx8HKHy6b/XodrczC7X87APj1g3kYqr8dVsd560Rt72gGW2Xb8+wLY0wbsBVPtxctHJupiY2eT1PoAvzgMPc1PXrQ7NmIdU/N2FG3nLVes85ysRSuakXQhimDiOYn/FFfhPtAivJ1zzCMyxB8+lr9pskpt6msdtq/+ZMtm0Rqmngwk3PTv9/O3Lb6ev/jo/+/Dl6sOr15PZxD/6kFvmSI16ww8eo/Bn426Qy/U757lx6lBjT49+GkZkjLrg/XJg4g0PV51IRLoaso5kvE0GQt2I5TJDR9+s6nt+/HdLthPpze26foGwhXR8vR6MPunzi8thhvoSnp/UHoxgeIi6Cx1AtXwnEnFP3Xpa3N/2iagIC7LBubh4Fl14dc0q1pv3xrZ9Mu44zXNq/BOyW694pt2q3118vbpmFg0z49oUvNfiglmAC5GK7+I7Ox0nq3EAzM+XQqGet5E4US///Q/qaREQ sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-all-users.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-all-users.api.mdx deleted file mode 100644 index da5d5df32ba..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/find-all-users.api.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -id: find-all-users -title: "Query users (alpha)" -description: "Search for users based on given criteria." -sidebar_label: "Query users (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWUtz2zYQ/isY9JJMqYcTJ015Uxy7dZsmri2nB1kHiFyKSECAAZaWVQ3/e2cBynqQTuxOe5NnPBKBfWAX3wdQuyuOYu54POHXDiyfRjwFl1hZojSax/wKhE1ylhnLKgfWsZlwkDKj2VzegmaJlQhWiv6NvtFxHGuDcKPHuXQMdFoaqZFJx4RmQpW5YBkIrCwwoVOmDTLQYqaCwRNRVDoVLFGVQ3JlKmQmY5gDm5m7/o2+AvBPk7XoG3Z5ejVmo4tzZm7B3kpYTJ8NRCldD41RbpAEwZ4oZc+Cw9ZAb63XL9IfvlZglzT1/EZTxFllMQfLUkAhlev7CG80j7gpwQpK0XnKY55JnY6Uogw6HnELXytw+NakSx6veGI0gkb6KspSycQrDj47SvCKuySHQvhZpT5mPJ6sOC5L4DE3s8+QII94ackhSnBew1hvrZES1oolj7hEKNxT7GQSVLplyKGVes7riBubgm3PEDgyUSkkry7hdR2ClRZSglAwOI04SlRwj54/Ka1XxuJlSAyvp6RZijk8abnWFFtrkhphDpZHPDO2EBiGXr6g9StZSHykrPNrHGW4E3Erqbtrq+8V30JmLDxFs+7Mz4WYwyY/UYe/DqWHFdp7rZoAH5tuorsWBXQC5MEJKIRUHTNb6yeanPnl7OJhe74rxl20oa3AD7jSaBeW/GI4pI/dE+zj7zx6PAf3c/LYbD0VzGhQqPM9iHTD9PUxJTaT1iGR6JNQFbinQVWJf6v7LbCGzD8FrVsau+nY9/+ddbXU00dm8QssHyn5v8J/k4lu4G/P1xE/7sL1W5Gy5qZ5GOClNTMFxY9toO8aG7GLINncdSwkmgnHguAMUiY1m1yenbCfj1/9NH2WI5YuHgwWi0XfZkkPUonG9o2dD2yW0D/JPe+zcQ4WWCGWbAZMpKkkn0KxzRYyV0IiM5kwNP6Cb5bNKIt9f+F+h01+tn1d3W9rZSXff7UZsevLcyZT0CizpdTztuvdG29mKoxnSugvfLNrXXfkrhdXFYWwy/WrzK4DukdQYPX9Y+Dli5btcQ7s1/H4ggUTLDEp+Fc1pPevxhEFUUgti6rg8fFwGPFC3IWn18NhTTaxE7StSDSDu1IJ7aG1H47UrDAWGvz4wKR2KHTyX+2MsXIu9/32d7jVgPhdiGhNnqM23q+1qDA3Vv4N6YE9B/Yc2PMge1628X5m7EymKegDdQ7UOVDnQeoct/H+wSDLTKUPt86BOgfqPESdV10/eM410k8yxRzYW7AMrDX2QKMDjQ406qRRHfECMDdUoS6NrxKUAnMe84Ev5Q9CAZNTJZMI5XzZqrKKx3wV2FLHg8EqNw7reFUai/XglrbiVlhJZXu/czQdWLVGizKJUHnw2N41mqD6yTqavVr+LwJhIZY+i2VT6N6YfjN8M+y0SqIPWKTuQIgwYG7rHFibJUJ3mg3CjzFcU1HLQVJZicsrUgvpmYGwYEcVJf4eC40/b52egxCPmi9na4T89tfYbzKdYZebrsLpnSjKwMBNXXdTsNpAbf+5KU9tFaxCL2Fy3wzYyDY9gFDon27K9aEMP7wvsg/3SuiTVcjEdnGcxmpPi8z4NDSgbSeU0AXWhR0Y9o/aBLk49zxPTFFU2h/2es4WEnMmtjaoaSLRCaBkAlRLi1frfKzF3ocZ9il4ZEd9AldgwPqMn0vMq1k/McW6d3T/OVNmNiiE1IPGhRucjP64/vBu1Ht/fnL64eq0d9Qf9vEuVESJg4XQW+vwVdGmr/bM98ee78e71T469OK6e3ENpxDucFAqIbUv31oVyvLEu4knh+NrWFKLKhxbE75aURavraprGvZueTyZbk45eqojnoNIA75DFZmfhI3pjck/iavKl673+wt1tNYYJQmU+E3Z6dahffHxakxnQtNHLExKOlYsqMcoFjzmN/yG+24kWfDHjR9fcSX0vPKE5cEu/f0D9ExSDw== -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query users (alpha)

- - - -Search for users based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

required
    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -OK - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -Bad request - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Unauthorized - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Forbidden - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Not found - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Internal server error - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-authorizations.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-authorizations.api.mdx new file mode 100644 index 00000000000..26777316286 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-authorizations.api.mdx @@ -0,0 +1,83 @@ +--- +id: find-authorizations +title: "Query authorizations" +description: "Search for authorizations based on given criteria." +sidebar_label: "Query authorizations" +hide_title: true +hide_table_of_contents: true +api: eJztWm2T2jgS/isqfbqrc4DsZvdyfCPgSXw7Aywve7U3mZoIuwFtbMmR5GE4iv9+1ZKNbTAzJJX9tJOq1Bip1S/qp1vtlnfUsJWm3Vvay8xaKv4/ZrgU9M6jEehQ8dT+7NIpMBWuyVIqwqqUmiyYhohIQVb8AQQJFTegOGt9FNSjMgVl6YKIdumSi6gmR1OPKviSgTbvZLSl3R0NpTAgDD6yNI15aAnbf2hUY0d1uIaE2dk4Hi1p93ZHzTYF2qVy8QeEhno0VSjWcNB2hVSW25E9Uhmy5BBHpcbUK1gxpdiWepQbSPTXCLMcT6XN1pALM5KgQmSxrYjTRnGxonuPShWBal5vpwgXZLPm4frAyKyBKIiZgciJQL4gssQ6ddqnHh34077z6JJlsaHdfNxwE0O+FyPk7uOy/d75hCuIkIcz6a5CbqHwawZqiysnzn90f4crU7aCU/3HbMWFdWRtty/eVSWT5k3hIoJHIpfEespuimHKEG115GJFcG1lq7kwsAJFPbqUKmHGDf34A25+zBPeABWUk7BHnmQJEVmyAFUTqMBkSqBjpEBX2N24UKJTs7c0TU4PDia52GNI1iJ9qZy/7Ry6/1PMtEFX/MbiDPQnku/eFvVkJFXwwGWmCz4KdCqFhqfwXvfH/qDqO1hK1eDgY10Xlq5R2SVXf7a2+0awjtkKSrB6DTY2LDq/4DT040Y3InxqKbOwzC3Q9UCor+2dX1fZjzMKyY0ANbMkTTrhYtxydIolxR8pqIRrjam5ScA5PS/lV+Sl+dSfUI9ORtc+9ej7yWg+ph696Y3HwfA99eh8OB37/eAq8AfVzDMqLHKJ6g4TJg79Atsz6SG60MLmIP35TQ0WNW9cWS/Uk18jYROY6jnWqAzsgEO6dd4Pnc5XQEmBzmKbdC4+Ps+79mlUPZvguXBbiM9sITN3RNUUfRLz38DqaY2NNCwOilxxtKM4l+fyhBl3ZKCQhqLgPEY8epTUml1nz+sHS1DA0q6z8vEAcWc5mkU0mBaZayBmzXUx96mahD/lFQVmTUFA6Ax1Pz1/Lsjt9fPjYuVx2dfqbs+676b6U4neBdNRZNaC8UBxjBh+Biy2EMhBUq+CL1e6MUv/mQnM+2scBAq0zFQIz9tZUBKzZibXcM0e4HvYXPKWhEVRW0Ei65yJke2iJj2U6fPZh9Ek+G9vFoyGpf33k7ndlht/Ou29x6d3vVn/A/Vobzy+DvoF+fT36cy/oR6d+cPecGYr/vH16Pcb3/4YT0Z9fzq9H/hXwTDI1wz8fjANRsP7if/rPJj4SHuOpDZa+KjquYpnJhU3lM6p2F+JiCJSTrfzyBMXhlK5qhkE0xRCvuRYi1Zc9o1ev4jZwcF99AD1aH/i92b+4eG+8E0wnM56w35l5rD7lamJ3xvkf5pW2nH0y/2sN/2lGGhiNB8PnBruoYlZPlNlN/CvfbvKPRSryoHBZFD+uBpNbk5oSwkVzIxrfjsN6SB6HjU9EnNtaiEYDJxvSp+Q8l0EvZMJ/iWDvDLAIuwUacWbefUMKdUdzEb1qVrhVx4wz9SGVcK9R998Ven3Bc8ysmQ8hqhFbqQCEoFhPNaEKcCXqwceQVQ5oq0sspDR1vVozpSMqZKLGJJ/nJaOxzs/dpS5XOJChzBNHOHCSb+dXPXJv9789M+7v62NSXW33d5sNi21DF9BxI1ULalWbbUM8T/S/b1FZmtQeOZuyQIwn3KUyWJSxj3RLg7D4v0yV9tGpLPvmeowTxV1h1eO0UzxBrTNJwHhEQjDl9uiZqyJptV+iy1eu4uYic+0xMKp0GMpOksSprbFaVoXgC/lhpmsitezzYamLPthNhsTx4KEMgLb3bM1Wy4IjUi4wM4H7b7pdDya90Fo9+dOZ4880eMXWCIIPKYxy+v6I3O4IEmJW2sYF9owEX4vz0jFV/xYbqv2cpeDeOAsKiLxdXMk5qUqiVn4WWNVzCMbnSg1LPpcYJVgsX7ivewlyF6C7C8fZD+e4v1KqgWPIhAWnod445oIaQiLY7mB6CWuXuLqJa7OxdVPTWVkD5tqBhTiEJSSisgwzJSCCK+VYss+BK0L2UVP5qVQfIm1l1g7E2t7jyZg1hKvuFOpLXSYWdMubde7hG33zkbxNk09gNK2r5CpmHbpzoXNvttu79ZSm313l0pl9u0H9MkDU5wtYodEnHbhVcAmliGL1070qftwQrDk0JTrsyQTESNvycSfzsh7ZmDDtnY708NlecH6bedtp7lHgw3hZo69cUCchQ58lYRQsMXIbmTriC9hvMfGrYYwU9xsp7jMbc8CmAKF79kVUOTyLHf87Yiolz9cFVD5939m1tuYzCbllwn+I0tSF4rlPV+1r1q7ikLEu88Obg+fBBxaCXflJbm71+4cbp47RzfCtztnYvXmFcf2FvhLae3LYXm6UwgbUNptbaf1+jQExoGN5FAmSSZsOhcrsuFmTVhl58M40/mFY8xDwEZFd0cRUBWx126G/OYkktctRI2DdpHFV9yss0UrlEk7dMsOfxexXLQTxkU7F6Hb/d7NfDjovboO+v5w6r963eq0zKO7sMAoS5io6GFb+kdN+WODKx+XfOv3LDmaDDyadhozLhDf1spdHvO39ESJPOrvvDxyb+luh1LmKt7vcdj2cGj39q4MdPy19+gaWOSQQD8jsmjfWfDKwg7J48w2xY7v+vZesaIXhpCaJ2nvKglsPJpi03iRf46TyAjXKLbBT3XYhnbpR/qR2k97TNHPteM7GjOxyiy0qeOL//4PH+mCiA== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query authorizations

+ + + +Search for authorizations based on given criteria. + +## Request + +

Body

required
    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Authorization search filter. + +
+ +The authorization search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching authorizations. + +
  • Array [
  • permissions object[]
    + +The permissions. + +
  • Array [
  • ]
  • ]
+ +The authorization search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-decision-definitions.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-decision-definitions.api.mdx new file mode 100644 index 00000000000..ee029efec67 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-decision-definitions.api.mdx @@ -0,0 +1,79 @@ +--- +id: find-decision-definitions +title: "Query decision definitions" +description: "Search for decision definitions based on given criteria." +sidebar_label: "Query decision definitions" +hide_title: true +hide_table_of_contents: true +api: eJztGttu2zb0Vwg+bZhiu1u3dXrzknTz1kuWpNuDG6CUeGRxpUiVpOIYhv59OKRky7Ycu12GvThAkEg89yt1yCV1bGZpPKUXkAortCIcMqGEE1rRu4jqEgzDhwmnMc2E4i3gxQrO0ohysKkRpUeL6Q0wk+Yk04bwXbqWJMwCJ1qRmbgHRVIjHBjBBu8VjaiBTxVY97PmCxov/aMwwGmcMWkhoqlWDpTDNVaWUqRevuHfFlkvqU1zKJhflfJtRuPpkrpFCTSmOvkbUkcjWhrUywmwHkMbT21LB20cyQRIvpaPRi0pZgxb0IgKB4X9HGae4i632xwaZk4TFIgkiw4764xQM1pHVBsOph/fLxGhyDwXab4i5HIgBiRzwAMLpAuqKtDt45tzGtGLy5tz9DaHjFXS0bh574ST0NjiLVK/RLS6jjpemTYq3XXAvfv/qMAsEPM6OJTWd4hZshnsyn/FZkJ5R25Y+2irGl30G0UoDg9EZ8R7yhvFMeOI9TIKNSOI2zG1UA5mYGhEM20K5sKr775F40tRiJ5QQT4FexBFVRBVFQmYDYYGXGUUOkYrdIW3xpEcg5jjzPU5fbJSKeQbQ7ABOdcm+Nuvofs/SGYduuJPJiuwH0hjvQXKyUhp4F7oyrZ0DNhSKwuPxfumP+qVqD9Dpk2Pg7dlTTxcr7CZMP+1tHVvsF6xGayDNerRsQdpP8Ju6steN2L49JTJVr+AZjfTYZNCT/HexO7YZo9wfKes/w6Ljun6g/SH5zu1H7Vh1oqZAk4+wiJqyhFLnSXMEkYqJT5VQAQH5UQmwPhG4XJh+8wwwDTYlW7CO8I15bFPlIvXb8jkAiPH9ZvZ01esgOPpIfQhivdgrAgd6VCaP2rBhs4hdu3761CXC1DOHmMit99EpkOKzAwrc+Jy5vaJQYQlJVZWne2V6IlC6qlFdaCYcsdGVIA+GFSdarG7ZXrps3KzMe6H7qs2dejCofT5DP52NPrs2mLAVtKFfdeT76sOdnqhguPxf5boKjhsQ7RHy94XkHpcYqcdk5O2aWxZEteapl4wF/YOyKRnd7g3suuIbnW3fpf5jdu9B2ijzON5/riTCJs6VItYcAPyzkKooM3ah243/tBsLbF9KgLKVij77kbkiCa/uZE4WnhE+1zZ/abnyUR/rOOHJNpKw42kW0EciiCxJ3j8DrEJmr5PouNVOTXuU+M+Ne7/q3Fjc9isJgfadVs5PM7zL2jRn5AOyZiQwAfktTYI7ZiQljAD+G10LzjwTmH1HEmi+eLRxl4anUgovtlt8JvyjclVgGz4klCIsCQEwCRwn16/PCc/Pf/+x7uvcudKGw+H8/l8YLL0DLhw2gy0mQ1NluIvwn09ILc5GKyMC5IAYZx7zZkk6xJHbAmpyETafh42YhP0d9DvQE/3q7vxsQrdyoidcBmTd9eTttAt2k6/wZp2xyV+yxEnkqmPdB0bh4JyTGxVFMys8mKTAX5TO+Yq+6W16Nfb2ysSSJBUc1iX64YRKlEIhYMLGj8fjSLajDFo/MNo5GsBevwITRSBh1KyZje2pY5QpFjHrVdMKOuYSp/KM9qImdjmu5nLTRBfBI3afHzWn4/NBoNIln60uJcRnLDK5cg1bcdU4IVg0jfvU5KdkuyUZP1J9t1uvL/UJhGcg/Lhuco3YYnSjjAp9Rz4Ka9OeXXKq3159X3fZnKMoxAHBuMQjNGG6DStjAGOX3PSk0/B2pZ3+yV92iiecu2Ua3tyrY5oAS7XeAReautDh7mcxnTYfriddWY5w/DlRvFIzOAYwI8uKyNpTJcheep4OFzm2ro6XpbauHp4j565Z0awRIZ4xOWQZG3wSJ0ymQcBdp2IC90BxzkrKsUZeUGuL29uyS/MwZwtvFHL1Yl3S/rF6MWolyqC7qE4vpqQoGEIwU5ZaMlifveSDcDHEK5x6GYhrYxwixtEC+ZJgBkw4wr9sAqNhp+njs8BiEbNPy/bgPntr1vvcyxp1+v7BpcPrChDQnYP63oHbKN9w611fIbh1Pp5NVrq4G7PfbqJs2cOM+oOPjpXA8I9hunqjsFq6W596h4Oykero+zR1hHzdBnM3T3KxXe1T8VMe1s3ibLrta6OdDR4tpuUVxNfW1JdFJXyDUbNyFy4nLBOFKSyss2ppRQp4AAlXrbmbMFehRXyZzNoezbACA5p1vaVmXB5lQxSXQzTgLb6m0idDAsm1LBhYYfn49fv3lyMz15Nzi/f3FyePRuMBu4hDL4x7wumOnL4AU/vMHdb7eW6s/67qzFNlDt4cMNSMqHQ7V7jZVORprSvItHWpXhLI9SVKV0ukdc7I+saX/s5E42nd+syhE91RHNgPMQG/YjxR8+DNme3KA6Cy8oPrrdPjeqoxRinKZTuUdi7TpG9entzi0nbXAEqNEccw+Z4PYjNaUzf0/eU4vWkoB/eE8L3SyqZmlU+2Gmgiz//AEMA4x4= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query decision definitions

+ + + +Search for decision definitions based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Decision definition search filter. + +
+ +The decision definition search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching decision definitions. + +
  • Array [
  • ]
+ +The decision definition search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-decision-instances.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-decision-instances.api.mdx new file mode 100644 index 00000000000..dd40cd83d25 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-decision-instances.api.mdx @@ -0,0 +1,95 @@ +--- +id: find-decision-instances +title: "Query decision instances" +description: "Search for decision instances based on given criteria." +sidebar_label: "Query decision instances" +hide_title: true +hide_table_of_contents: true +api: eJztW21v2zgS/isE0Q+7OMV20myv6/vkOs6eb9MkFzvdw6XBhpbGNrcUqZBUEsPwfz8MKdmSJb/kNu3h0BQIaosczgznlY/oObVsYmj7hp5AyA1XknBpLJMh0NuAqgQ0s1zJfkTbdMxllE/rZ7MMDWgEJtQ8wXm0TQfAdDglY6VJtL6mISNmICJKkgl/AElCzS1ozhqfJQ2ohvsUjP2gohltz91XriGi7TETBgIaKmlBWhxjSSJ46GRr/mGQ8ZyacAoxc6NCXIxp+2ZO7SwB2qZq9AeElgY00aiT5WAchdJutTUNlLZkzEFEK/lokC/FtGYzGlBuITbPYeZWrHIbTiFjZhVBgchoVmBnrOZyQhcBVToCXU/vhgiX5HHKw+lyITsFokEwC5FngeuCTGM0eGfQpQE96Q26aOkIxiwVlraz55ZbAdleXODqPSRbLIKCVW4ylW4L053x/5mCniHllTcoXdwiZcImUJX/kk24dIYs7fbeu6pVXL8pXEbwRNSYOEu5TbFMW2KcjFxOCNIWtppLCxPQNKBjpWNm/aO3R7j5gse8xlWQT8yeeJzGRKbxCHSJoQabanR/oiSawu3Gnhy9mJ2xrTN6f6mSjzaG0xqkq7S3txtD898JZiya4hMTKZg7ku3eDOVkJNHwwFVq8nU0mERJA9v8vWyPxVLUDzBWusbA67KO3LxaYcdcf21pF7XOeskmsHLWoEbHGqLNBNXQF7VmRPepJMlcO09kysFQpq8k7TJtYV82CBatpfNfYVbYtHr3fHdcyfmoxxdwVrJ1KjXIubJA7JRhUuKGcEOk8gkqlfw+BcIjkJaPuY8gHMDvdka4NSDGf/MOsi5vP7ojBvQDGMKMm5JoHjM9K6yHtWURVFTtRwVNsyxbp1f/ZLNaLk4tszVej6RuaAv1Khn3PnXOrjvD3gkN6Gmnf+Y+XJ8PLnvd/mk/+/br+cVv58Vsu16MB8jPJ+qAwgMTqcurp4yL1EfmbnVXZGTs6barv5p/snEfCmtGu3Zko6szCweWx7DKCY/cTsk4FYKw6AHpo9z9Q5awERcc3bwuCJSEcnFZbsjS0aOcH10E67J0cnbLSXsH3Bu4r+5RdwrhF+NaJrhPmeC5ct6h1QOPICK4iVDtDDbITN/IHay4fEFm8MSNNRv5cW/xMNUapF2Z0JMV+IyUEsBcwL6Z1BTcXzQwCxpTiSShihOmuVHyBTSY1HlviV1unpflK2q0PFOPX0VHUadjgdnX0ZDLXX6x9IeY2XCKyVwui0mZrdmj1m+Vp1jL8zjG3DLkMZz6Mr0optjS0GUmJza0LrJDMOYExlxy1Otlqme2LImW67pEmz1+2Uqd8yql9DwvVxTbq+hvS+JnSk7W8veIGR7++QS+cQOq2ftDmaNAmX5wk3/8X6TxLZK/cA7fyulrJPBvHfibFSxGvbM/emJNuK+NleO9GhfP7yFPPp4XIqV20XMW79mrSRbDMxb+BNpwD5XsOn/WcXvw5NV4r2U2dBzqcgbyrksaeS980uv2B/2L89+HnQ9nPRrQs/6wd9U5+733r8ur3gCHntMcl0XKu2MLkkm7r/387B1HgcXmxtx7UxkN2djE1xwwFx548add5/FHrdYzj5MaTCqsB9peHEjbCe1w6T0MP7ORSv3ZryTa1pPuf7HUdomtskz08wSyto84lqE4LjEhWOTSXxUO3JZU1+CMDYdEROp8isu9y9E5/ggdeRQP1SIGbINcG8gO0X7srgi/3GVYIuIlkoA0KcpeRZ72QHXKyNHewiPZc2V3KNeLib4N4vEhtBaCpZBbztjlQXyD8zhIMHOaKgK+vyLfJVbzfSM1OYSyx0FmB3azA2f5dvjQd3dIeqF43MDkteP8f+04fRncb5uzkrk1sL5tB4udUrm0bu1b8yLqKI6f3ave4youv0DUIB+VxrmWcWEI04WT4arDcPzISEWzrR1uotVIQPyXaqe7hvCSSz8z40t8TcbC5SeOPPebq9Mu+fn4p7/e/jC1NjHtZvPx8bGhx+EBRNwq3VB60tTjEP9w3o8NMpyCxhZhRkZAWBQ5T2GCrKo9MQmEfMzD/MVYJrZzZK/fjuY2C4WNdSTVvOImHXJ91c9r8SxveUusafFFseu92yPB5Be68otdztghJo1d1V9l1wKDrEynu4/1G3LG34fDS+KXIKGKwMEkro/JGKESMZf4ypa2j1utgGYvcGn7XavlMgtafA9NJIGnRLDsWLKmDpckXvmtU2x5s+JlLKM0n/B1vuUozpz4xGuUR+NhfTRmnTYRDAGaByZ4RFhqp8g1zF/QgxOCCdfHvgbZa5C9Bll9kL2t+vup0iMeRSCdey7jLTtfMSHUI0SvcfUaV69xtSmufqprJTvYQFrQ6IegtdJEhe5FRYT34cTyAJfzziGl10bxNdZeY21DrC0CGoOdKrz4myjjXIfZKW3TZn5sO1iCmk1/bqN4FVDjqd0h+KkWtE3nPnQW7WZzPlXGLtrzRGm7aD6gXR6Y5mwkvDfisA+x3HWECpmYevZVE+JAEZHosjiVESPvyVVvMCS/MAuPbOaBoeVN33zp9633rdpVceqGFTuXfQ9heqCymBTyZTG6a5f1k/dZeIHYs4Ew1dzOBkjmt2cETIPupGiFpWNk/Nzq7v2rm0SD7MNp7i7/+G3oLI4J7Wp1z7r3xOLEh2PxkmIN0tyqB2ZXnlkDKNYNeoyTHrWOjg8Ojw4O3w4P37YPf24fHzWO3x39m26CDFv16F5BrCpBPX5WjNN6KGzbjCWm1SpiMIV72v5S+c3ywvdy6HZ1BdrfWm4t7xW31u773sy9DxTv1eKzhcsOY+UcIIvdqithXOVS0lbjsJonLvsu3YUqjlPpap6c+BsErOCaoUhNdh1C8BAQ0WnPqfRblE878yMk2xdy2MCw8rGfl7oJt9N01AhV3Aw92fL/kVCjZsy4bGYsTLPb+Xh9ftI5OOt3e+eD3sFho9WwT/6lFKaimMmCHA5xqnnRsq70fFXq/8xvFLKws/Bkm4lg3N10cNrOswR5Q6sJkubGRLzQp7kbOp8jp2stFgt87EAv2r65XWVF/LYI6BRY5L2CfkHPpl2vyYGDPHG6SN0LpfV3uXj5xVN0whASu3XubSHjX14MhphDsl9ixCpCGs0e8Vca7JG26Wf6meJFHLe7Lj2553MqmJykzs2pXxf//QeYuYZJ +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query decision instances

+ + + +Search for decision instances based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Decision instance search filter. + +
    evaluationDate object
    + +The evaluation date of the decision instance. + +
    oneOf
    + +string + +
    decisionDefinitionKey object
    + +The key of the decision. + +
    oneOf
    + +integer + +
+ +The decision instance search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching decision instances. + +
  • Array [
  • ]
+ +The decision instance search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-decision-requirements.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-decision-requirements.api.mdx new file mode 100644 index 00000000000..5e7e149ef01 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-decision-requirements.api.mdx @@ -0,0 +1,79 @@ +--- +id: find-decision-requirements +title: "Query decision requirements" +description: "Search for decision requirements based on given criteria." +sidebar_label: "Query decision requirements" +hide_title: true +hide_table_of_contents: true +api: eJztWm1v27YW/isEP91hiu1u3dbpm5ekd97tS5a42wc3QCnxyOJKkSpJxTEM/ffhkJIt23Lsbh2GC7hA0MQ87+c85/DFK+rY3NJ4Rq8gFVZoRQx8qoSBApSz9D6iugTDnNBqwmlMM6F4S3rbpYwoB5saUSIpjekdMJPmJNOG8D7RJGEWONGKzMUDKJIa4cAINnivaESREqz7SfMljVe0YeQ0zpi0ENFUKwfK4RorSylSb+HwD4u6V9SmORTMr0r5NqPxbEXdsgQaU538AamjES0NeuYEWM+hjZe244Q2jmQCJN/YR6NWFDOGLWlEhYPCfo4yL3Ff2zSHRpnTBA0iybKjzjoj1JzWEdWGg+nn90tEKLLIRZqvBbkciAHJHPCgAuWCqgpM/fjukkb06vruEvPNIWOVdDRuPnfCSWhi8RalXyNbXUedrMwal+475D7/v1Zglsh5GxJK63vkLNkc9u2/YXOhfCK3on1yVI0u+oMiFIdHojPiM+WD4phxxHobhZoT5O2EWigHczA0opk2BXPho2+/weBLUYieUkE9BXsURVUQVRUJmC2FBlxlFCZGKyBNeZ+oMZg5zlxf0idrlwLgGJINyKU2Id9+DdP/QTLrMBW/MVmB/UCa6C3RTkZKAw9CV7aVY8CWWll4qt6381GvTf0JMm16Erxra+Lpeo3NhPmnra17i/WGzWFTrFGPjz1Mhxn2oS9704jls+6THDKhhAdC419gs9tw2JbQ28C3+TvROWAe72nt/4NlJ3z9hfr9870BgB4xa8VcAScfYRk1LYmlzhJmCSOVEp8qIIKDciITYPy0cLmw/SNjgGBQrICOOU1T7FN+9foNQWqsGNcN757MBzBWhMlxDI5PetnIOa6wL8wTftwt17g1uTquw4Fiyp0iFX0I1CcI7gCgbx/w0hfadrd/ir4PRHUYLgHRviy/GY2OQKav6A3YSrqwofjiG4ajI0yoUDr4O0t0FYbwlmlP4vkviHraYqcdk5O2G+7EEteaaVUwF4YiKunZ9hzsAHVEd9p2f9L8juTBE7TV5vm8fhyRYbeCbhELbkDeWQhtoVn70B0zH5o9E84FRUDZCm3fn7AnTK/tCXmy8cj2ubb7af7FTH9qlAUY7UBxC3ZrimMVJA4Uj9/6NEVzoG+c6st5IP1fDaTpZwwkA1ZXJoU3JwesG6yW22/V29PNwcyQBbOkZMYC/xeHITbbbXQeHYEtFj3X80Njr2n9n5CHZExI4APyWhu01DEhLWEGcMv8IDjwTlvy0kmi+fLJsVganUgovt4fj9u2jMlNoGz0koBiRFMgTIL22e3LS/Lj8+9+uP9P7lxp4+FwsVgMTJZeABdOm4E286HJUvxBuq8GZJqDwb6yJAkQxrnfDzNJNv2B2BJSkYm0PTU0ZhPMcPDvyET0q/sVsYZYZcRegYzJu9tJ2yOW7ZzcUk27p2g/sONEMvWRbirhWBmOia2KgpllW4LbCvCo5Zir7F9tET9PpzckiCCp5rDpdI0idKIQCs+zNH4+GkW0Od3S+PvRyLcMzPgJnigCj6VkzV5mxx2hSLGpW++YUNYxlX6pzGgj5mJX7zZ2myK+Ch612HvWj71mPBPJ0o8WdwKCE1a5HLWm7e0FeCOY9JPvDLIzyM4g6wfZt/v1/lKbRHAOypfnGm/CEqUdYVLqBfAzrs64OuPqEK6+69s4jvEiwYHBOgRjtCE6TStjgON2WnrxKVjb6m7PoeeN4hlrZ6wdwFod0QJcrvFttNTWlw5zOY3psD00XnQPjcNwdKP4VGLweO5v/iojaUxXAT11PByucm1dHa9KbVw9fMDUPDAjWCJDQeJyQFlbPVKnTObBgv0s4kL3NH3JikpxRl6Q2+u7Kfkvc7BgSx/Vcv0S2op+MXox6pWKpAckjm8mJHgYarDTF1qxCPBesYH4FME13llZSCsj3PIO2UJ4EmAGzLjCRKxro9HnpePfgYhGzS8v24r55fepTzr2tNvNO/T1IyvKgMjuI86B66lRezG0Kcn1pc7o8H3LhnpzY9F5+A2v1LP1C/J66X7zphqeQUfrh8rRzgPibBWC1n2ow89qj6hM+4g19b4f+64fdDR4to+tm4lvEakuikr5OaHmZCFcTlgnl6msbPMiJUUKeOcRr9qQtWSvwgr5rbnGejbAOgxgacfDXLi8SgapLoZpYFv/n0idDAsm1LBRYYeX49fv3lyNL15NLq/f3F1fPBuMBu4x3P4ifAumOnb4O5n+y59dv1ebCfk3v/rQVKuDRzcsJRMKE+99XjWtZUZ7Wwtts4rP8KFBzOhqhcreGVnX+LG/MaLx7H7TT/CvOqI5MB7Kg37EAqaXwZ+LKdqD5LLy97e7ryd11HKM0xRK9yTtfadd3ry9myL6mu94FJojj2EL/P4HW9CYvqfvKcVvoKAED2z/+YpKpuaVr3ca5OK/PwFbwShQ +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query decision requirements

+ + + +Search for decision requirements based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Decision requirements search filter. + +
+ +The decision requirements search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching decision requirements. + +
  • Array [
  • ]
+ +The search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-flow-node-instances.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-flow-node-instances.api.mdx new file mode 100644 index 00000000000..b0b45006c16 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-flow-node-instances.api.mdx @@ -0,0 +1,79 @@ +--- +id: find-flow-node-instances +title: "Query flow node instances" +description: "Search for flow node instances based on given criteria." +sidebar_label: "Query flow node instances" +hide_title: true +hide_table_of_contents: true +api: eJztWu1z2jga/1c0+nQ35wDd7e72+OaAs/WVEIohvZ00kwpbYG1tyZXkEIbhf795JNsYMAlpczd3c+lMJ1gvz4ueF/30SGusyULh7g2+SMQScRFRxLjShIcU3zpYZFQSzQT3I9zFc8YjGDcUEfWLUQo7OKIqlCyDcbiLA0pkGKO5kGh+QFShGVE0QoKjBbunHIWSaSoZaX3m2MGSfsup0uciWuHu2nwySSPcnZNEUQeHgmvKNfSRLEtYaIRr/6mA8xqrMKYpgV+7Ih0qh5SVsmCIHUyS5GqOuzdrrFcZxV0sZn/SEHoyCcugGVWGh5D6kEMgpEZzRpNoqxF2SlJESrLCDmaapobIqcwMxUNuk5gWzLRAIBCarWrslJaML/DGwUJGVDbPN12IcbSMWRhXhHRMkaQJ0TSyLIAu5XkKTuIGPezgvhf0wDkiOid5onG3aNdMJ7RYiyug7sG0zcap2fGmUOm2NtxY4mNO5QpmjguLbG5hZkYW9FD+EVkwbky/s9onr6oUafOiMB7RByTmyFjKLIomUhfewvgCwdzaUjOu6YJK7OC5kCnRtunnn2DxE5ayBlcBPil5YGmeIp6nMyp3GEqqc8nBMILT0j9P5GjFdOe6yeh+pZJ1fQLDWqgnpLW36QPzf0mI0mCKa5LkVH1BxeqtQE6CMknvmcjVNoRUJriij/n7rj02lajndC5kg4H3ZZ2ZcY3Czpn8d0u7aXTWEVnQrbM6DTo2TDo+4TD0k0Yzmtg/ms/sLLUbDU8mQzurtiTHZNpL/h/oqrZeW888lJgoxRacRugrXTlFyiGhVogoRFDO2becIhZRrtmcUWl2Dx0z1aBqa8/1f30Lrp9JEVKlni9ZMXG7GF/pCsQVITNJ0PjZ8wXp0znjDPg8X5SomvviwvhHdpMG1n7/dM4HG4/SRDfEdQDNEJgNHkwK7jRCimoYdG8i+pB+bUPqTfxrDzu4d3U5GngTr48dPPHGl/7QhY/bTTn3QOlVtifI9/CfDoOR1/MvfMN4NL7qeUGAHRxMz++2X961N5zc7bYFE3c8uTM92MH+EIT2+r478e567qT3vrlr8n589anqOr+aDvvu+I+qwRv2q9+BN772e97dxA0+YAePvZ7nX1ef08Abl78v3eHUHZRfxR/vn73BNIAZv7sT75P7hxHlsG3kjt3BwBvUmqy6527g9Wutgfdx6g173t3F4OoTcJ0OJv6dPwwmLrSeX/VhVM8dDO6MVf0JfJ9PA3/oBcHdeDqohA96Y380qb5A61Kt4Yfh1aehsXuVrKJa+FUWfCyp+v3H80+N+JCk9LnkOUktkZgon4cm69VozIRIKOGHuDoWS4WWMdUxPSociiGhAnywdCssp4VhWbafnpQgAwE+KQmyeQE9ty0KaZkfyUCacsL1sbRje5Hfb0gitQ10/9BxYfarXaR4bGzT5ruxoNQiAbOz/dTpPHerlVTlibbnlpPPJadC1CdxL+N2reE3mYncIvcd0R5FAd9B6nGJtdAk8UsItbeQ0FdA3JRoi6SBScNZqRnfWmfaw3rNFjPHGJu6wXG1OStJpQ1/wNX2iANqQaJvoamiNpyKvi91bPql8HYAkxxRrnKQ/RCWnwB5d2H1ycLDtOfKbo4ALyb6Y/jXxtBeDO7EXDXiKQ9iR5zHnJcKp2moKJyuyf88nH1p9HpCyB2Dsy+PXr9HmP8gnJW63whpoRV2Zd64VcA8GjVByEq7iGh6pllKgRHl0XewASVV/Bw+u+DoR8BQw2rtg6OnwNCzGbxC+v9PSP9fdKrcw+4vANVbyJ8bII1IokS95wNdAcjOpLhn0U6Ul2eFQ2S/V08rsfpeRl4yHf9gTj4R4JdoptL2WYAfcOUuEHkU5peQw8x4ewzaH78V+AZk0JywhEYtdCkkRRHVhCUKEUkrQ9QAmWGIZiJaPXogyKSYJTT921MXFi4a2ZEFX2QRjPVmGDiz3G/GFz3097e//Hb7l1jrTHXb7eVy2ZLz8IxGTAvZEnLRlvMQ/sO4v7bQJKYSENUKzSgiUWS2R5KgLTZCKqMhm7OwLLIWYiMwmNXvibNAkZ+P7kW5ZAe4ykXTsV+CplV5QthhjeuXDuao0p0lhH/FW8d46ijuIpWnKZGr0iF3GRRJJleNQHCv4t6EDN9PJiNkSaAQfKva2wpGoETKOJT/cfdtp+Pg4jIAd3/tdDZAEyx+giYc0YcsIcUpbk8dxlG69dvi7F9c7L2MZYRkC7bPt7UTxoUT961GZTi+aQ7H4mCCEhJ+VZClWYRIrmPgGpaXPdQIQRKTv1+D7DXIXoOsOch+brgFF3LGoohy455VvDGFuNCIJIlYWnDxGlevcfUaV01x9UsTlnQBqWsqwQ+plEIiEYa5lICvY5ZU1YiSd1mBewWKr7H2GmtHYm3j4JTqWMC7s0yYF1oZ0THu4jYcVeHYdlaVgNv23IbhWYm8p1KZC49cJriL1zZ0Nt12ex0LpTfddSak3rTvwS73RDIyS6w3QrcNsdJ1EhGSJLbsD00IHaZ+VajWI2nOI4LeobEXTNDvRNMlWZklzapXYyXpd513neZXAHAF0EzRHfnIamgdsJYUSrIQ3Y1k7eBTCG+gVK9omEumVwFMs8szo0RS6eZghcoxCn6GuqlImEHYKX5clO7yj08TY3FIaOPtKz/vgaSZDcftg5fGunynuSjeOVaibuowrxgrVy6qSdtyT6HSbgWwXimthcFOiXPbvlMWgmLOXmGmUy+Y1AvMxj9uqhd/Vdft9g2cfbbWqR6WdfYefN2sreHqD6ugbWNCei6M1YqAO7Q/BAOVyjpMp/XmMLhHvslRoUjTnJuNii9sBYnU/ClMclU8JkpYSKEO011jbtepHDawPejackRvWhALNmDL/WnBdJzPWqFI26GdVv2dJWLWTgnj7YKFavfcS6icng38njcMvLM3rU5LP9iLN8gfKeE1OT7aAs/hZdK+1uvtBv1DD1sL19L0QbezhDBTsjP6rou8doMP8xouzQnvJW12usHrNXCaymSzgWZTq8Ldm9ttMoMvKFFSElm/wF/B93DPqnIGhXKT+5Lc3Jrt31hvnHKGG4Y004+Ova0l6tFVAHXqWfF8NxURzJFkCU97yRJ38Wf8GWN4VwwUTFYx7WucEL7IjaNjSxf+/Qtj90nf +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query flow node instances

+ + + +Search for flow node instances based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Flow node instance filter. + +
+ +The flow node instance search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching flow node instances. + +
  • Array [
  • ]
+ +The Flow node instance search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-incidents.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-incidents.api.mdx new file mode 100644 index 00000000000..b5bdd44a810 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-incidents.api.mdx @@ -0,0 +1,76 @@ +--- +id: find-incidents +title: "Query incidents" +description: "Search for incidents based on given criteria." +sidebar_label: "Query incidents" +hide_title: true +hide_table_of_contents: true +api: eJztWm1z2rgW/isafbp3rgMkze52/Y2C0/VuAiyQbGfTDCvsA6i1JSrJSRiG/37nSLYx4CR02/1w56YzmYJezjk6r48OWlPD5pr6tzQUEY9BGHrnUbkExQyXIoypT2dcxMWsph6NQUeKL3Ge+nQETEULMpOK8GIRmTINMZGCzPk9CBIpbkBx1vgoqEcVfMlAm3cyXlF/bb9yBTH1ZyzR4NFICoOC+GvKlsuER1aU5ieN/NZURwtImZ1Nkv6M+rdralZLoD6V008QGerRpcIjGA7a7pDKUtsTXCpDZhySeCsf9QpSTCm2oh7lBlL9NcwsxUNu4wXkzIwkKBCZrirstFFczOnGo1LFoOr32ynCBXlY8GhREjILIAoSZiB2LJAuiCxFs7ZHHerRbjDqoGFjmLEsMdTPxw03CeS66CP1ALdtNl7FKrf5ke4qy63Nf89ArXDn0BmUbu5w55LN4VD+AZtzYQ25o+2jtapkWq8ULmJ4JHJGrKWsUgxThmgrIxdzgnsrqubCwBwU9ehMqpQZN/TmDJWf8JTXuArySdkjT7OUiCydgtphqMBkSqBhpEBTWG0cydGJ2Z6ZOqOH5ZFckDFc1iAdqZy97Rya/6+EaYOmuGFJBvovkmtvhXIyslRwz2WmCzoK9FIKDc/5+649NqWo72AmVY2B92Wd2nW1ws64+qel3dQ664DNYeusXs0ZazY9veEw9JNaMzo3dbmxOJRbq3djYE+jtVsqWnhCjILVb7CqqKbeCX88P0joKC3Tms8FxOQzrLw83bDIaMI0YSQT/EsGxDLhMw7KZn+z4Lo8ZgN9e6lkBFp3YcYFR+rfIFFOi8QlMZQNBZURt6nPeteLIoRxRYI86x7JL+weyy4U2jARwXc4L89JHXVaUEqqseX20hlL57J7CC4mD9wsCHNHhphoMBiQ9zZIqzXlujcaBJ3wIgy61KPXvd96/T961KNhf3LVHgzC3vtJMBz2h9Sjv/bfTXr9yTAYD8NgRD3a6fe64Tjs98olwYfxsN0ZT27al9dBOdppX14G3UlwGVwFvXE5fN37pd3r2hkcmQQ3QW9MPXoVjEbt98FkFP4ZTIIPnSDoWuFyMt2gE46qTLcDyLa9I9BFf3g16fXHk4v+da9L7wrFXoHWeW17XreBVWnqlufR41ZMMQsuIFc6FySVCkgMhvHEWnCWyIeejOFYN8X1RMgYjvHOkvi3u+eW71c5aKTAYoAxT4/QY5cZsIW28NVie6MqZMwMnBgkiDXKMHME5ZHJSe+IeHQAtDvj8CZAtwvfD9tj62nDYNS/vLEfB0GvG/beW8/5JKffoOZPcupSMJ8ReOTaaK+qYyvvoZYNCCZM+AQKdbPoL/b8UNl7AEcrFbFIGBe2Cu1Cvv01ddVz41ClK+W2Tp21WsfVSgU6S4y7PHz3y8GLcJULZyj8zKYyc4B7R7Rny/jfIPW8xEYaloQF8tlTH87lyDRlxgFgZFJzxXnSEzFV7EK0ejvZ24eLjsKX7D7LH7Obu5ngsTCWGuRaQ+GuDglWIeVf+f0IMaAgIHSGsh+i6SOQ6i4aPlp43Pa1slvk/t1Efw62usjZi7idSCtXvIgQn3Aee83Jnaa8zB8v/ysQfQWi/zNANPgQdK4t7rsMR+OgFwxfgeorUH0FqjVq/i64tA5l/KNAFRHSbkmthadF0bQrz4+FpF9wM5kxnkDcIFfb4NCEKZs073kMcQVIWDZkKuMVAlnL67SeV44bSMKizxqNymPCMrPAIhcVLVSwNY8l1txPgOKlktME0v8cguNdpm0ycCvzMxBXz7HGFtnAnuR2eNEhP5//8NPdvxbGLLXfbD48PDTULDqBmBupGlLNm2oW4R+u+3eDjBegEFWsyBQIi2NbwVhCtkiB6CVEfMajoj+Yi21LgQP9L+DhJ+pM6W+Z4gde3CbXw7BADqsCJe+wptV+uYXr/jRh4jPdutRL4dsmOktTplaFC+8yyPNApl/Mbm/OauPwl/F4QBwJEmGSK/FPzggPkXKBnWvqn7daHs372NT/sdXaIE20+BEnEQQelwnLbzJ7x6kpEEW6/U6WkYrP+T7fxk7g507cdScqAvrNob9fSDXlcQzCumcZb1wTIQ1hSSIfIH6Nq9e4eo2rp+Lqh7pC2cYOhwGFfuiAo4yiTClEBAuelHeJgndxQX6usfMaa6+x9n8daxuPpmAWEl9iLKW2rsPMgvq0WbZomg6VUvyNVt2D0rYNmamE+nTtImbjN5vrhdRm46+XUplN8x7Ncc8UZ9PEOSFOu8gqPCaREUsWjuuh5XBCsBSKE3VYmomYkbdkGIzG5D0z8MBWrh9RPsEoSL9tvW3VNxywMVdPsT0IiTuh87tKLijIYlDXknWLjyG8wQaahihT3KxGuM2pZwpMgWpnqPzSH3J+ljp+d4uol3+4KLzk1z/G1tCYx4bbBzDBI0uXLgq3vx7v9M1aT7Wv6ibse53SReuaQK2d5sxeB2W3e1Dx9cplv2Z0j/7u3Zmetc7OT07PTk7fjE/f+Kc/++dnjfOfzv6k5SV4e00tLqGt6t2w8jjGveS5LV/ZlFN323cn7qlIq3zM0dp7ZHG7dvatPmbAsY0N+Jm0xs3D8dBNMGZAaedXrcbpYegPQpvBIpmmmbBlTMyLC/uWXpRkOv81P+ER4BXUX1OMpgrbSzdDbhxHctrAkHFxXVSvOTeLbNqIZNqM3Lby/2kip82UcdHMWehmp3113eu2Ty7DTtAbBSenjVbDPLquOWaXlImKHPaKvO0E7591vS3af+MZWB5ABh5Nc5kwbi/F9mzrPMPd0irrPMfdeXmeuqXrNTK4Vslmg8P2Tk7927ttWsNvG48ugMXO9PQzuhftOLlPbBTg8iSz/e39X5Q2XrGjHUWwNM+uvatk6kF/hN2+af7GLZUx7lHsAd+/sQfq04/0I6X41A4p2Pxix9c0YWKeuQB0dPHffwGu8aAk +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query incidents

+ + + +Search for incidents based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Incident search filter. + +
+ +The incident search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching incidents. + +
  • Array [
  • ]
+ +The incident search query failed. More details are provided in the response body. + +
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-mappings.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-mappings.api.mdx new file mode 100644 index 00000000000..20b97713c77 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-mappings.api.mdx @@ -0,0 +1,76 @@ +--- +id: find-mappings +title: "Query mappings" +description: "Search for mapping rules based on given criteria." +sidebar_label: "Query mappings" +hide_title: true +hide_table_of_contents: true +api: eJztWVFv2zYQ/isEnzbMsZ027Tq9uW6yZWvaLHG6BzdAKOlksaFIlaTiGIb++3CkZEu2nDpF9zQXCJqIx7vj3fd9osgltWxmaDClFyzPuZwRXQigtz2qctDMciXPYxrQhMu4sjC0R2MwkeY5DtOAXgPTUUoSpUnW8GJIyAzEREky4w8gSaS5Bc1Z/7OkParhawHGvlXxggZLGilpQVr8leW54JELPvhiMMSSmiiFjLlRIT4mNJguqV3kQAOqwi8QWdqjucakLQfjZijtvG3kqrQlCQcRr9OhvdoV05otaI9yC5l5TjDncTvaJIUqmFUEEyLhohHOWM3ljJY9qnQMunu+GyJcknnKo3TlyKZANAhmIfYh0C/IIsNmjq7HtEffnV6PsZUxJKwQlgbVc8utgKoWH9H7KU4rS98TriFGH35Jtw1z1+a/C9ALnHnl+0fLW5yZsxls53/JZly6RraqvXdVtcq6i8JlDI9EJcR1yhXFMm2JcTkiAnFuo9RcWpiBpj2aKJ0x6x+9fIHFFzzjHVDBOBl75FmREVlkIehWQA220BIboyS2wlVjz4g+zVFiu5p+vlqS5xVDsz4ZK+377caw/XeCGYut+MREAeaOVNVbYJ6M5BoeuCpM7UeDyZU08BTe2/0oV6m+hUTpjgZv5ho6u85kE67/62zLTrBeshmswdrrWGPHpN0TtqkvOtvo4ePlsFqTNzVtCrRnXXTNaNRgRxKRYDz7wDJolKVSl021xrycOZEsA2xPxizCbMa4NJYwYtU9yD7i1Nm5fu3n9wFNsZ92FaT2329VulrmmVteW0k2TLp60pYqqwtwDzxgXD1eDIdPdwRfUA2oFcJRd++X0GY39lW0b8okl14t8HcWqsILfSvRJ/HzHa6eztgqy8R5zbiNguJYpYiuyVhZ1/rtV2u3HL4+QZhtSEN359xbzwHM1Ahz81x8lGH/RsRlEQO2T24MEJtyU4/dNaXsrnovo/ZIAtIUDhVbKr6HQrZVeO/kcdpzc3dvjB+W+lNy6bm0QckWF1cWm4jhO8Di2FeBpLVP3D/nzVCVm79g0R3vHhYrNdLg9krNyHvC85ni6mR1WwPzH6qoeaeerruyU0mbJmWPnjxHKL9i50nCuIC4Ty6UBhKDZVwYwjTgC/2BxxA3AO1CkVDFC7/p3yGwuVahgOyXbaFtJzYil96yiks8LggzxBuGPvr06mxMfjt59evtT6m1uQkGg/l83tdJdAQxt0r3lZ4NdBLhD9r93CeTFDQue0FCICyOOcZkgqwRR0wOEU94VO9pqrQJttGv7xta6ka3276CXKH5FgpG5ObqnPAYpOXJolbYVmja3OM7qQ9CweQ9XYPgW1gbEVNkGdMrvrQD4EbQMls0Oblzg9uF4z8mk0viXZBIxeC+Fp3CVYFwERmXuNumwclw2KPV3psGr4fDEn1ix/dYiSTwmAtWvQU3lsMlyda4dQvDXQ+T0Y/qjNJ8xjfjttlagfidX1FNxONuIlbCTgSL7g3qAY8JK2yKUaP62wpcEkyYJ3YxB5IdSPa/J9nLbbyfKR3yOAbp4LniGzdEKkuYEGoO8YFXB14deLWLV6+6dpEj/AS1oBGHoLXSREVRoTXEeJQpnPsIjKlj118wh43igWsHru3gWtmjGdhU4aVMroyDDrMpDeig+mI7ct/UA//JRvEAVz+ANu6sqNCCBnTpWVMGg8EyVcaWwTJX2paDB2zJA9OchcIDEYc9u2rUCBUxkfrI293Dgeb375hlhYwZeUOuTq8n5HdmYc4Wrpr56n6mdv1m+GbY6RVNd3gcXZ4Tv0KPvYYe1G6R2J1uvfE+jks85TAQFZrbxTVO8+UJgWnQowIbsMJEFc95x7+9Ee1Vv5zVSPnzn4lrNmrZ1foy7PSRZbln4vpouXEEscZc8yyhcZfkL76mq0up1dDt+prG36wMV3cfw407ienSr7h59o/PSkeDRLnlViDdLhyiCLTxlR72j7cJcXnueB2pLCukE3c5I3NuU8IajYhEYarjb8EjwFOLYEmlL0Nt9t6PkE8+IjnuI4g80mtNn3GbFmE/Utkg8tNW/4dChYOMcTmoQpjBeHRx8+Hd6Oj9+fj0w/Xp0XF/2LeP/rAPOZcx2cjDHYfVxyVbl6ONi83vvCetUGXh0Q5ywbjEHrvlLSvqT2mL+rRuHF7eeQJP6XKJQW60KEt87E5yaDC9XfMd/yp7NAUWewTQezxWo2Of/9EE80Bzj7et8/GyV88YRRHk9knb24aMXX68niA7qovgTMU4R7M5XhKzOQ3oZ/qZUrySRg+OeO75kgomZ4WDNPV+8d+/CT6yoQ== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query mappings

+ + + +Search for mapping rules based on given criteria. + +## Request + +

Body

required
    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Mapping search filter. + +
+ +The mapping rule search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching mapping rules. + +
  • Array [
  • ]
+ +The mapping rule search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-process-definitions.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-process-definitions.api.mdx new file mode 100644 index 00000000000..579c38161f7 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-process-definitions.api.mdx @@ -0,0 +1,79 @@ +--- +id: find-process-definitions +title: "Query process definitions" +description: "Search for process definitions based on given criteria." +sidebar_label: "Query process definitions" +hide_title: true +hide_table_of_contents: true +api: eJztWm1v2zYQ/isEP22YYrsv2zp9c5N0817SLHG3D26AUtLJYkuRKknFMQz99+FIyZJt2XG2DhgwBwgSi/fKu+eOOnpFLZsbGs7otVYxGEMSSLnklitJ7wKqCtAMP0wSGtKUy6Smu1iTGRrQBEyseeG4QnoLTMcZSZUmxY5UQyJmICFKkjm/B0lizS1ozgbvJQ2ohs8lGPtaJUsartxHriGhYcqEgYDGSlqQFtdYUQgeO+uGHw1qXlETZ5AztyrE25SGsxW1ywJoSFX0EWJLA1po9MpyMI5DaSdtywWlLUk5iKS1jwaNKKY1W9KAcgu5eYoyJ3FX2zSDWplVBA0i0bKjzljN5ZxWAVU6Ad3P75YIl2SR8ThbC7IZEA2CWUi8CpQLsswx5uPbcxrQi8vbc4x1AikrhaVh/dxyK6Dei7co/RLZqiroRGVWu3TXIXfR/70EvUTOGx9QWt0hZ8HmsGv/NZtz6QK5sdtH76pWef+mcJnAA1EpcZFym2KZtsQ4G7mcE+TtbDWXFuagaUBTpXNm/aMXz3HzBc95T6qgnpw98LzMiSzzCPSGQg221BIDoySGwu3GkRq9mePU9gV9snbJw40h2YCcK+3j7dYw/B8EMxZD8QcTJZgPpN69JdrJSKHhnqvSNHI0mEJJA4fyfTMe1drU15Aq3RPgbVsjR9drbMr1v21t1Zus12wObbIGPT72MO1n2IW+6A0jps9ulWzc81xmEw1b4HmEubMze0wrtkv6L7DsN/QTLF1Ztxk3PVY/mtbfvcS0lizvyZErlgPG+HHZbT3UYFSpY7jqlXhTrxL5N0TfgzbcN5VNqX/4hePkHUJ3rWHK5vuVWDZ/quE70Zz0dJyepJlcPFWTBcmk7RM/dStPFtnB2M4x441L5s1uspe4D6GV71y+XLi8fz4aPRWPGkwprD+qfPGjyKPNkUufRfg/i1Tp2/uGaYdLxdNFHbbYKsvEpKmzWxuJa3UfzJn17RaV9ByoDlWLrYbQHzF31rl3BD7l8ECljXX6sfn6cxC6RQzYAXlnwCdmvfah28A+1Kcx7DiSgDQl2r7bu4/oi5u992jjke2ptrtzwhcz/VCT9BjaAuEG5tYUj2UQ35M87lBVJ03PS8Txnvw/m91xdp+63X+122FN3QTh4R7X4M2xvHx6X/uMYkjKuIBkQH5TGkgClnFhCNOO8Z4nkHSqkVNIIpUsD3bDQqtIQP7NblfcNG9Mrj1lrZd4+BKG1iJh5LXPbt6ckx9efvv93VeZtYUJh8PFYjHQaXwGCbdKD5SeD3Ua4y/SfT0g0ww0lpMliYCwJHGeM0HawkBMATFPedy8htRmEwyZ9++RRuhWV1sh7uCg1HxnTjIm724mhCcgLU+XTXvcUE27r+WuT4eRYPITbTNjV+m2FlPmOdPLprdsKsB3N8ts2S2de3G8LRuT6qfp9Jp4ESRWyWbtQUXoRM4lviDT8OVoFND6dZmG341GFcrEiB/hiSTwUAhWH2G23OGS5G3eOse4NJbJ+EtFRmk+59t6B9tAxocX3qMGjs/64Vh3ZSJY/MngAYAnhJU2Q61xMw4BZwQTruWdQHYC2Qlk/SB7sZvvb5SOeJKAdOm5xhs3RCpLmBBqAckJVydcnXC1D1ff9p0lxzg/sKAxD0FrpYmK41JrSPD2QawPmo3u5vXzdFA8Ye2EtT1YqwKag80UXrQWyrjUYTajIR3WcDrrDECG/sWN4s2LxndrN+4rtaAhXXnsVOFwuMqUsVW4KpS21fAeA3PPNGeR8OmIyx5jTe4IFTORef27McSFdpoO5JzlpUwYeUVuLm+n5EdmYcGWbk+L9cVqI/rV6NWoVyqS7pE4vp4Q76HPwE5VaMQivHvFeuJjBFc4qDIQl5rb5S2y+e2JgGnQ4xLDsM6MWp+Tjp89EQ3qf940+fLzn1MXcqxoN+219uUDywuPx/ZOqH8qNWrGR202bg6B2ufrAc5oc9TSUvROStrldrzRmXn4y/HZ+uJ6vXTXXuX629fR+n50tHVvOVv5ze3eD+KzyuEuVW5na1TsxqjrGh0Nnu0i8HriCkms8ryUrpvIOVlwmxHWiXksSlNfhgkeAw5LwlWzuw3Zr36FNPOoZwPMVw+qponMuc3KaBCrfBh7tvXfSKhomDMuh7UKMzwf//bu6mJ89uvk/PLq9vLs2WA0sA9+NIwgz5ns2OGGOX3jzm2vV20X/Ufftqgz2sKDHRaCcYlBd/6u6uIzoz3FhzbxxHt/X0JmdLVCVe+0qCp87CZKNJzdtRUHP1UBzYAlPjHoJ8xyeu59OZuiNUguSjfY3b5UqYKGYxzHUNiDtHedcnr99naK+Ky/VJKrBHk0WyCg2IKG9D19Tyl+3cX7h988wecrKpicly7TqZeLP38Bm1tLlg== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query process definitions

+ + + +Search for process definitions based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Process definition search filter. + +
+ +The process definition search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching process definitions. + +
  • Array [
  • ]
+ +The process definition search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-process-instances.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-process-instances.api.mdx new file mode 100644 index 00000000000..2f1f4e5114f --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-process-instances.api.mdx @@ -0,0 +1,175 @@ +--- +id: find-process-instances +title: "Query process instances" +description: "Search for process instances based on given criteria." +sidebar_label: "Query process instances" +hide_title: true +hide_table_of_contents: true +api: eJztXG1v2zgS/isEsR92cY7tpGmv62+uk+75Lm1zjdsFLg1QWqIsbilSpag4huH/fhhSkiWLkt02ado7FSjqii8zw5l5NBpyuMaaLBI8usaXSno0SRATiSbCo/imh2VMFdFMiqmPRzhgws96TbNOCe5hnyaeYjF0wyN8RYnyQhRIheKdGRM0Jwn1kRRowW6pQJ5imipG+h8E7mFFP6c00S+kv8KjtfkvU9THo4DwhPawJ4WmQkMbiWPOPMPZ4K8E6K5x4oU0IvCrytCuXCixHGbk+riHCedvAjy6XmO9iikeYTn/i3oa93CsYAk0o4khIZWuE7iSSqOAUe5vBcK9fCqiFFnhHmaaRmaSQ4mZGevUZiHNiGmJgCE0X5XIJVoxscCbHpbKp8o93jQhJtAyZF5YTKRDihTlRFPfkoB5qUgjMI/x1QT38Nn51QQMw6cBSbnGo+y5ZprTbC3ewOznMGyz6ZXUeJ2JdFPqblTx75SqFYx8a1WCNzcwMiYL6lAnWTBhNF9Z7YNXVcnIvShM+PQOyQAZTZlF0UTpzFyYWCAYW1pqJjRdUIV7OJAqIto+enICi89ZxBymAnQicseiNEIijeZUVQgqqlMlQDFS0LKBHkDRsjkOtEvp00Ika/sEuvXRRCqrb9MG6v/ISaJBFe8JT2nyEWWrtwI+CYoVvWUyTbY+lMRSJLTN3qv62BSsvqCBVA4F7/I6N/2czAZMPTS3G6exXpIF3RprzyGjY1DzgLrrc6cawXx2QTUXzo5Jqr5wGBLaoaVVaWArrmL/v+jKzeInahSgQ5bU2G3j70KKxVaFS6ZDFKScI+LfwlA/59cjMZkzzoAvF9dS0CoWuP3m2Sne9HZ5GOe0ODDzq+n2W2mFmnh/QRLmbTltGN2+vr/Qz/UFnYTU+5SY9yn9nBLO8qXR1hhumU99dAsOsBcpjMT4F7GHDhP3RYnesUQnjcRYYOb2UqWo0FvV22ElInMpOSXCzMnEvvmKeSKivZAmiIjMIHflSA6AgmYBy35u9A8G/NL6LljWHm0vHC+IPxQlmiqkQyKQJ6OYKJZI8a16WGgH0FZo5eZ1j0S5Q74Lubx/6bhLuhKl+5ZtsymFMDlglHVfat4+vsysEqKbXo6lZzRgguURdhve+0VPND1rQ6IrEwE+CI4WwWUjatoej4F428D3fvGtNO//Aprl4hi/YZ8cjrOHeoUszJCt2AdxlcaxVPDxsGTc94jykRcSRTyIS5BPYyrMt59ZJCkCtkhVyRg5o0LbL8E6r7veZo3c4W/lhj0e95pEDvEbfE6QqDV26byu87rO6w7wuvdUJUw61r/B8W5t/zbfm9o39Y7zPdBXg/nabvTEbIwJ/Z+c/EChv00SfI/QP6P0EG77PSLmjP3vGzFnRB8+Yi4Ifb+IOSP5Y3y2ATMuZMsgxAFtlZbDsG1GFl8Kb0iTRRdedOFFF158Y3jRmJR0+N4nuurSkV068ufy9S4d2aUjHyAdScDkLw/c4LG961tRHaB2gNoBageoHaBmgPqSy+Vr6dNDETXgcomE9GmHqR2mdpjaYWqHqSVMNefxzoiLc8BQe1zPJ7p1ow7GH2kW0QdBzSy7UZLfz+m1AWfR6RGTbA08P0zmrZHYj79tsE+Ch8KNfXTvEzr203oI9NhH9ZHSqm5+XAlMwJYZi6gD06pNVVyjwm9GNcivdpjWYVqHaR2m/USYluhGRKufYNd78G0nL3kF/aHA5H7hzhlRatozxRgyQOPJbPr+vIcmb15dXpzPzs96aDJ+PTm/OD+rFMqYbriHi37wO+tYLn5pkqoNVBtX4ntibJOaHnPJbu4P1X9Y8X7qzMCPs5Y/+S65SzAHODd3qwJ1SJKp8JifVVbumMhudeefIdWheSM6antQSBJEihJClk0Lb04hdR/sTVNBhG46625bu/Pt3VGY7ijMFx+FuSWKkTmnDWhbc9ai/+GoXSsgzOc42CGE88w9nMTPVZ5P6TRRowq3fKbpgEk2jRD5PhtkF7eo1mwZsdPzpqWrqw50Y+ujbVGqWZ6T4fDQmLkoaU25KRG+9+r4ba3sobXVewu2mbBfHfCbzGVqS84rorQXsH75VO0ca6kJnzaEKNCW1WYbcADgN75dL/JvS7XvFCk3xEJQf29BLrdhM87Qh4JwW5sPYqGE6j56l1D7Es7aPpaLqj9mNwRAFbRAVCQp8F6vJz+gVrtaD34w8zDsS3k3tev3xnpb4bY17R2HrXho0WOfBbXEt4XR1O7B+AbE3XFPs8b77dxduV0z3roM+0q56+beUNi4m344uNCxrXLrqya1lVx7SlMOWJr2WpX6Cfg9p8W/SpbK6fGW47BfJ0529MCh45bTYoeQaj8+1kSv4TDF4QSbT1fUSVb2Gvfrprr36M71ldL8+ycsp/3d07Vk2B4tb/WIX5P7l7T0ddkSqEE00Br0Od8TZsDpl0Zvn2ESFBDGqd9Hr6SiyKeaMJ4gokqfIdt3qAX9ufRX9kuiIeqLlZxzGv1tX/Q3hoQi9MzoIvv+QARwADrOLfXrty8n6PfTp3+/+TXUOk5Gg8FyueyrwDuiPtNS9aVaDFTgwV/o91sfzUKq4CW4QnOKiO8bTCEcbd9MKImpxwLm5Re6ZGwj0GXlS6kpfDOtLbntVLGaKYzRu7dTZGyJBas8qKuQxuULjkx0OZpzIj7hrVXsM7gxStIoIqr8KVsikPlvelDFksuY/zGbXSI7BfIA0uDDP/cjIARCREzAVUN4dDoc9nB28RAePRsONzAnaPwASQSidzEnWeC9Iw4TKNrarRGsuEDsfjQjFVuwXbo1F4aHZ1ai3BmP3c6YxZKIE0gD3BLOfERSHQJVL79YihomCE9aPq06J+uc7P/eyZ7U7f2lVHPm+1QY8yz8jSXw3kaEc7mkfudXnV91ftXkV09dkeRYmOsFFNghVUoqJD2TjPfhHkdehJk57Txp0gWKna91vtbga5sejqgOJdxvG8vEmA7RIR7hQeZOR0XabmA/2zDcYKkg9WJS1KnieITX1nM2o8FgHcpEb0Zr2CzaDG5BLZW9GWi2HpZbDpce4aGlXtcgNIjSDsmERKnwCXqO3p5fzdAfRNMlWdn8T3FBbT718+HzoTsnAklb94zjyymyElr7K2FCPi04tzsRYjofMrE5EZ5QL1VMr65gmF2eOSWKqnEKSijsIqNnZjcZBdMJ97IfL3Nr+eefM6NwwLO32/uEz+9IFFtv3N6t6UqKDhsSmFvDbEhGtnQo0oquyctJwJY5CuYaM2/D1jTZsJLRwifDk9Oj45Oj4yez4yej499Hpyf906fP/oNLWaq2TpU0j1YpLeditmKUzP463/srt5rNvGJnzhwbMwZ8Xdx+vG3c3gdsr/AdFpfsDncuv71eW8sqXzILzzYGcgJpzCoDhLqBAmO5wvCwf1wHn8upwVBPRlEqzItULLIrh0oG7/E0ybZEOfMoZImK/c+C7IVtQZkVoOM+OKtFlPz9uWA6TOd9T0YDzw4r/p1zOR9EhIlBRiIZTMav3r0+Gx9dTCfnr6/Oj477w76+s3s5gG8RESU+TBarvj+xK/N6Gz58w/3emStreqcHMSfMHBAwsq4zzL3GNczFuSYhA2mR8xqv10DoneKbDTw2aTQ8ur7ZWhz8D9KRlPjWJPAncAM8sXIczYCXrQnWdkzhCIgdMfY8GuvWvjeld8jlm6sZwFJ2iXkkfRijyBIuOCdLPMIf8AcMh0/M2hrEM8/XmBOxSI2NYzsv/PkvsYCdlw== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query process instances

+ + + +Search for process instances based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Process instance search filter. + +
    processInstanceKey object
    + +The key of this process instance. + +
    oneOf
    + +integer + +
    processDefinitionId object
    + +The process definition ID. + +
    oneOf
    + +string + +
    processDefinitionName object
    + +The process definition name. + +
    oneOf
    + +string + +
    processDefinitionVersion object
    + +The process definition version. + +
    oneOf
    + +integer + +
    processDefinitionVersionTag object
    + +The process definition version tag. + +
    oneOf
    + +string + +
    processDefinitionKey object
    + +The process definition key. + +
    oneOf
    + +integer + +
    parentProcessInstanceKey object
    + +The parent process instance key. + +
    oneOf
    + +integer + +
    parentFlowNodeInstanceKey object
    + +The parent flow node instance key. + +
    oneOf
    + +integer + +
    startDate object
    + +The start date. + +
    oneOf
    + +string + +
    endDate object
    + +The end date. + +
    oneOf
    + +string + +
    state object
    + +The process instance state. + +
    oneOf
    tenantId object
    + +The tenant ID. + +
    oneOf
    + +string + +
    variables object[]
    + +The process instance variables. + +
  • Array [
  • ]
+ +The process instance search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching process instances. + +
  • Array [
  • ]
+ +The process instance search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-user-authorizations.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-user-authorizations.api.mdx new file mode 100644 index 00000000000..f41e04c7e2f --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-user-authorizations.api.mdx @@ -0,0 +1,83 @@ +--- +id: find-user-authorizations +title: "Query user authorizations" +description: "Search for user authorizations based on given criteria." +sidebar_label: "Query user authorizations" +hide_title: true +hide_table_of_contents: true +api: eJztGmuT2rb2r2j0qZ3rAGnTNuUbAW/i212gPNrp3ewkwj6AGltyJHlZLsN/v3MkG9tgdklv+qmbmcwa67xfOj7Sjhq20rR7S3uZWUvF/8sMl4LeeTQCHSqe2p9dOgWmwjVZSkUyDYqwKrgmC6YhIlKQFb8HQULFDSjOWu8F9ahMQVm4IKJduuQimmtQNYaaejRliiVgQKE8OypYArRLkdsvsKUe5ShHysyaelTB54wriGjXqAyOhZ2tgXyCLZFLYtZAchLFz5roLepRHa4hYbS7o2abIk8uDKxAUY8upUqYca9+fEX3+zvHG7R5I6Mt4oRSGBAGH1maxjy0dNt/ahRlVyHO4ni0tKrlbOTiTwgNaq7QRIaDthhSWWpHDpDKkCWHOCqtS72CFFOKWRsZSPSXMLMUT7mhBR0zIwkKRBbbCjttFBcruveoVBGoZny7RLggmzUP1wdC6AIFMTMQORZIF0SW2Cic9qlHB/6070JwybIYze/eG25iyG0xQuo+ou331Xi4zVW6q4Db2P01A7VFzInzH93fIWbKVnAq/5ituLCOrFn7YqsqmTQbhYsIHjASraesUQxThmgrIxcrgrgVUzfH4vffofFjnvCGUEE+CXvgSZYQkSULUDWGCkymBDpGCnSFtcaFHJ2YvaVpcnpwUMkVC4ZgLdKXyvnbrqH7P8ZMG3TFbyzOQH8kufVsijKSKrjnMtMFHQU6lULDY/Fe98f+IOobWErV4OBjWRcWrlHYJVd/t7T7xmAdsxWUweo16NiAdB7hNPXjRjfOjmtkoZlD0PVEqOP2zuNV7HFGILkRoGYWpEkmRC5quAXFHymohGuNW0gTg3NyXkqvqEvzqT+hHp2Mrn3q0beT0XxMPXrTG4+D4Vvq0flwOvb7wVXgD6qVZ1Ro5ArVHRZMfIUbWnN5iC7U8JEtqmRf88aV9UK9+DUCNgVTvcbinmtfuEi3zvuu02lW6bRdqGRKFtvKc/Eeet6/j4fWk1WeC2dHfGYLmbl9qiboo4H/F0g9LrGRhsVBUTCOzIpreUFPmHH7BjJp6AzOB4pHjypbs//spn1vAYrYtHiWP+4ibkNHtYgG0yJzDcSsuS7WPlYr8ce8rcDSKQgInaHsp5vQBQW+volcLDyifansdsP7aqI/Vu1dRh2lZy0jDxDHEcPPBIvtBvIgqbftlwvdWKr/zirm/TN2AwVaZiqEp/UsIIlZM5NLuGb38DV0LmlLwqKorSCRdcrEyHbRmB569fns3WgS/Kc3C0bDUv8Pk7k1y40/nfbe4tOb3qz/jnq0Nx5fB/0CfPrHdObfUI/O/GFvOLNt//h69MeNb3+MJ6O+P51+GPhXwTDIcQZ+P5gGo+GHif/rPJj4CHsOpPa28FHVcxXPTCpuKJ1T0b+SEUWmnJrzyBMXplKJ1RwE0xRCvuTYkFZc9he9fhGxg4P76AHq0f7E7838w8OHwjfBcDrrDfuVlYP1K0sTvzfI/zRh2vfolw+z3vSX4kUTofl44MRwD03E8pUquYF/7Vss91BglS8Gk0H542o0uTmBLTlUYmZc89tpSgfR01HTIzHXppaCwcD5pvQJKT9I0DuZ4J8zyDsDN/04jrTi87y6h5TiDmaj+lKt+ys3mCcaxCrg3qOvvrz/+4wbGlkyHkPUIjdSAYnAMB5rwhTgZ9Y9jyCq7NOWIVnIaOsmS2f6xlTJRQzJv077x2Pzjx1kzpe4/CFMEwe4cNxvJ1d98vOrH366+2ZtTKq77fZms2mpZfgCIm6kakm1aqtliP8R7tsWma1B4ca7JQvAosqRJ4tJmfxEu2QMiy/NXGyblk6/J1rEvF7UvV7ZSzPFG0JuPgkIj0AYvtwWjWONNa1OXmwH213ETHyiZUCcMj3morMkYeowdKszwM9zw0xWDdqzY4emUvtuNhsTR4KEMgI7mLSNW84IlUi4wBkI7b7qdDyaT0Ro98dOZ4800eMXaCIIPKQxy5v7I3W4IEkZt1YxLrRhIvxanpGKr/gx31btMy8P4oHTqEjHl83pmPerJGbhJ42tMY9sdiLXsJh4gRWCxfqRj7PnJHtOsn98kn1/Gu9XUi14FIGw4XnIN66JkIawOJYbiJ7z6jmvnvPqXF790NRL9nCyZkBhHIJSUhEZhplSEOEBU2zJh6B1wbsYzDw3is+59pxrZ3Jt79EEzFriwXwqtQ0dPGHv0jZ+t+n2Lj8837fro8O2+4bD03NQ98WZfaZi2qU7l0b7bru9W0tt9t1dKpXZt+/RR/dMcbaIXWTisku3IoxiGbJ47UQ5dScu4MWAQs0+SzIRMfKaTPzpjLxlBjZsa82bHo7RC9KvO687zYMbnBI3U+yNA+I0dMFYKRAFWcz0RrIO+BLC9l6BhjBT3GyniObMswCm3E2JSpDk/Cx1/O2AqJc/XBWh8+/fZ9b7WNwm5Z0F/4ElqUvN8gSwOmytHVJhBrgLCbeHywKH+cJdeXzuTrw7hzPpztFZ8e3OqVg9k8V3e5sIS2n1y8P01FIYNqC0M22n9fI0JcaBzexQJkkmbHkXK7LhZk1YxfJhnOn8KDLmIeD0onu4aVKAXbsV8pvjSF62MGpcaBdVfcXNOlu0Qpm0Q4d2+LuI5aKdMC7aOQvd7vdu5sNB78V10PeHU//Fy1anZR7cKQZmXcJERQ4752+6ZXOsdeXuyf91PyePKwMPpp3GjAuMdKvvLq8Gt/YaDgrQLe/jnAiXl4Q7L0/rW7rbIeO5ivd7fG0HPrR7e1dWATej5BqfI9pdslgfX+epqvnNJD+E/JZ84SWfRiWL4ZzY2sIUZ/iLevQThv7h6pEd662BRS6Q89W+E+qFzZoS++T8cu8VGL0whNQ8CntXqcfj0RQH4Yv8nlEiI8RRbIP3n9iGdul7+p7a+1WmmFHb9zsaM7HKbGZSRxf//Q+UIQsQ +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query user authorizations

+ + + +Search for user authorizations based on given criteria. + +## Request + +

Path Parameters

Body

required
    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Authorization search filter. + +
+ +The user authorization search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching authorizations. + +
  • Array [
  • permissions object[]
    + +The permissions. + +
  • Array [
  • ]
  • ]
+ +The user authorization search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-user-task-variables.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-user-task-variables.api.mdx new file mode 100644 index 00000000000..d34fbf8cbee --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-user-task-variables.api.mdx @@ -0,0 +1,67 @@ +--- +id: find-user-task-variables +title: "Query user task variables" +description: "Search for user task variables based on given criteria." +sidebar_label: "Query user task variables" +hide_title: true +hide_table_of_contents: true +api: eJztWVtv2zYU/isEn1ZMsd0u7Tq9uUm6eeslS5z2ITVQSjqy2FCkSlJJDEP/fTikbrYV173srQWCGiJ5rt93DnW0ppYtDQ2v6ZUBTSwzN3QRUFWAZpYrOUtoSFMuE1yeM3PzjmnOIgGGBjQBE2te4D4a0ktgOs5IqjQpG1nkttlOImYgIUqSJb8FSWLNLWjORh8kDWjBNMvBgkZT1lSyHGhIy1rnP7CiAeWopGA2owHV8LnkGhIaWl3CtiXzDMgNrIhKic2gs2ZEA2riDHJGwzW1qwKVcGlhCZoGNFU6Z9Y/enZMq2rhFYGxL1SywjOd3pQJAwGNlbQgLa6xohA8dlEbfzJoyLqnjQnxNnXO1XpV9Alii75rjLblYNwJpZ20rdgqbUnKQSRd4GjQiGJaMxchC7n5GmVO4q42jJ9XZhVBg0i06qkzVnO5pFVAlU5AD593S4RLcpfxOGsFYT40CGYh8SpQLsgyRwhOL09oQE/PLk8QgwmkrBSYD//cciugjsVblH6Gx6qqj4br2qVFb7uD5b8l6BWevPAJpdUCTxZsCbv2n7Mlly6RG9E+OKpa5cNB4TKBe4Sly5QLimXaEuNs5HJJ8Gwv1MPg/O0JBl/wnA9ABfXk7J7nZU5kmUegNxRqsKWWmBglMRUuGgdq9GZOUzuU9Fnrkq8DDLeNyInSPt9uDdP/UTBjMRXvmCjBfCR19BxfGSk03HJVmkaOBlMoaWAf3jfzUbWmvoBU6YEEb9sauX2DxqZc/9/WVoNgPWdL6MAaDPg4cKh/YNPltrw3ln7G/QMAaEHdiN+u/EPqKk9D77tz8slkMozNod6wHTzXE354af0i2bn0mMffLFKlL1edcaWwZrMQfLeo/RZbZZmYNbjZiiWu1bzOmfXlA5UMNIiHe1xAtwA+nDRXu2/dhqarunNOPxYTX9fRLWLAjsiVAWIzbpq1j31Cfqy7CzJIEpCmRNt3oXgAzzdrycHG47Gvtd3VvR9m+j7SexbULWqQ5e2ObZo3JN1m+U5VegBx/AGwuaZSg6zl7T5/95vVGORy8GWrGo14ERy0DW97ePF0aWt2H0gAf9nclvqG5eDRMiyxuwQ5ZA25LMoDJaSlEO+GpbwshfDYPUyUiVWxP0w1A9zGfTL3hazQKgZjZtJYJuOD9NVHCK/PfKtqC5JJOxu6tboVMjs9LFLczHUpY7yJ7sp6n4HNsE9lUEefG2Kb/URpIlWf7pFSApjcaMsN5rF+bxJ+sJM2lHY7j7+he3qqp4wLSEbktdJAErCMC0OYdgm45QkkvYrnKRipZLW34xZaRQLyX3c776Z9U3Lud9Z6iWczYYb4jZHXfn3x8oT8cfz098UvmbWFCcfju7u7kU7jI0i4VXqk9HKs0xj/cN+jEZlnoLEErUgEhCUJR51MkK5OEFNAzFMeN1e32myCKfL+faHZutX1FlJ6GCw133npnZKrixnhCUjL01XTgjdU0/6rjLsLhJFg8oZ2iNhVuq3FlHnOdJ9NPQXIe8ts2W80D97jh16W/5rPz4kXQWKVQFdLa0XoRM4lvlTQ8HgyCWj9ikHDZ5NJhTIx4wd4IgncF4LV16Qtd7gkeYdb51hTL35QZpTmS76td7TB2xrEp94jz8enQ3yc4jXPgkYcgtZKExXHpdaQ4EuvaGteo7u5Jfzk2k+u/eTaA1yrApqDzRTOHQtlHHRw5hfSMfa+I+x9ZrzujQarcdsKx74V4pgP9G0zTSy1oCFdeypV4Xi8zpSxVbgulLbV+Bbz1IpA33HZU66BklAxE5k3ZzeluCDb+yKQE5aXMmHkObk4u5yTP5mFO7ZyIS7a8V4j+vnk+WRQKm59QOL0fEa8hx6QvSLRiEW2D4r1mw8R7AagBuJSc7u6xGM+PBEwDXpaYlZaoNT6nHR3I3KbaFD/eNnA5+/3c4cALHAX3XD17J7lhaenn4Bet9PJ9s626OZ1fsQ2aYdgk63h1PXa294fAuGzyqE8Vc7wGoO7IUA8gDY+ZpPR4128n88cbWOV56V0tVsuyR23GWG9kMaiNDgEo2hnDHi9C9vhdrPtlV8h77xG8niEcPCYbUr2ktusjEaxysexP9b+HwkVjXPG5bhWYcYn09dXb06nR69mJ2dvLs+OHo8mI3vv35eRUjmTPTvc9XPoXrnt9brrWd81668BY+HejgvBuEQIO3/XNdWvaUd1GtBw8ztA38Ca74ug5uw1Xa9R+ZUWVYWP3aWYhteL7pwf3HCDv7tB/oOu/nJRT5cfkX3fFQa9al6OpbfbveFRGtAbfGHa+LxRLaqAZsASj956x4m34miOcjoJO+OwKmhOTOMYCrt376JXYc/fXs6Ro/XnjVwleEazO/zGwu5oSD/QD2ixcpFx9HfP11QwuSwdHamXi//+AwJhI5s= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query user task variables

+ + + +Search for user task variables based on given criteria. + +## Request + +

Path Parameters

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
+ +The user task variables search response. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching variables. + +
  • Array [
  • ]
+ +The user task variables search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-user-tasks.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-user-tasks.api.mdx new file mode 100644 index 00000000000..e906343ea7d --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-user-tasks.api.mdx @@ -0,0 +1,120 @@ +--- +id: find-user-tasks +title: "Query user tasks" +description: "Search for user tasks based on given criteria." +sidebar_label: "Query user tasks" +hide_title: true +hide_table_of_contents: true +api: eJztG2tvG7nxrxDEfbhDZUm5S66pvulkO6fWSVw/0g+OgVC7I4lnLrkhubIFQf+9GHKfEiWvHeeKtgoQRFkOZ4bz4nCGXFHLZoYObui1AU0sM3f0tkNVCppZruQ4pgM65TLG4Stm7gzt0BhMpHmK43RAL4HpaE6mSpOswGHIhBmIiZJkxhcgSaS5Bc1Z97OkHarhawbG/qbiJR2s3H+5hpgOpkwY6NBISQvS4hhLU8Ejx0vvD4MEV9REc0iYGxXi45QOblbULlOgA6omf0BkaYemGtdgORg3Q2mHbYNzpS2ZchBxxR/tFKiY1mxJO5RbSMxTiDmM29Su5pATs4ogQ2SyrJEzVnM5o+sOVToGHZ7vhgiX5H7Oo3mJyM6BaBDMQuxJIF6QWYKKHV6OaIcen1yOULMxTFkmLB3k3y23AnJZfETsJzhtve7UtHKTL+m2Bu6U/s8M9BJnXniF0vUtzkzZDLb5P2czLp0iG9JuLVWtkrBQuIzhgagpcZpyQrFMW2Icj1zOCM6tiZpLCzPQtEOnSifM+k+//IzCFzzhAVNBOgl74EmWEJklE9ANghpspiUqRklUhZNGS4qezeHUhpQ+LpfkvYwhWJeMlPb6dmOo/i+CGYuq+MREBuYLyaW3RD4ZSTUsuMpMgUeDSZU0sM/em/pYl6z+BlOlAwre5HXi4ILMTrn+3tyug8Z6zmZQGWsnsMbApPqE5pLLoFlw+hXhAwawM1SIoNrR3LJN3B7YNJ1mFz8euD0nWR7f/wHLmjQru93m7w6WLujbOTcVs90NG//1tbNxyyzU8ObBLoTWgaIZ2LoM6gFtdHEyvDo5ph06+vj+/Owk/z38MDo5cz9Ph2P8cbvuUGYMn0kIWCvSKkaD5HbJ+NLxXlnsPbdzMs2EICxeMBlBXCgsYimbcMFRxiENKAnN0FfuAZs0hyVmT9xr93Gt/gBft1c+mkN0Z5zu4GvGBC8WgSJItVrwGGKyQM8M7k8/yEewcvk8vPDAjTU7UXOvpCjTGqStFOCn1VBOlBLApMPJ5WP4SjwJs9EcDGFyWRhEk2vTIv4Uy0Hagt8FDO8R6g2yiCGX2Gd5maWp0rjH33MRR0zHJJozzSKMCiSGFKTLuJyQlJzyWaZrxig4SOvzr21ei6hXGJo38lMfntb1jb82cJ7zjts+Wh5Xmttl2NWK0Se52tiHnw1f+2Y327Mb73S8fA750UH+9J/wwH1JxMv65F5K38NLZ4GU650GhluYnTNJIpWkTHOj5LeyP7MBn2zQKrTygkRFYH1n6v7lVydCq6tR+g5r+7Mj7G5mQoEsDyGBSNYYaYYyEJCAtOO4XcqSg5PxcSC6rTs0YjLmMbPwTqssDcfHEobMEGhHZnXISQ45ySEnaZ+TlF6FB6PHHM952sHvDn538LtvPwuoCIw5himXHNf6tLJCJXHEQuISTbi6kMONpbHI8DfR4jmSMCULkrXMCxxkmRI0A0pIQm2TjSrJCImnlrx8izSKjGa/NDbk/olpziYC6i5QuMZGKTjnvJhJFsVULMmoiDNv2Xm8aITiHQW/zWgnWdKi4PSBJWXxp+DBCdH5WTuFONAAkponFc2TQkLeZcqyIpadVcREewGeIfj/u9QCoBsgtyGYUGF37RsevsrsJPFzv9+yLKvBZCIPpi/euHq0lcKl90v8zSYq882gBmt7K8bPQLWfY6ssE+PC3Dbkh2N518TtcZi/uC1qu/0WPub5wLPRPggrynXG/F5d2Jmb5+hjq8Z3zXBZxIDtkmsDPlLnY1/q7Y4vee8O+xOSgDQZ8r5dXm/RRWl2alozj9OeyrvrKr0Y6/taKt51Nlyu4WolROsmylbTZ4fJ8R3W5np2uZVVren2K36kR7J3B21E3xYm3S7yIhGEDB5T/kv7LFu5/wtXX/68fKhZ53l8Fw/UfcyOA+ieo0XjjPtUokjkGTS/X/b6v3pywC/PQo8Td5gbFq25ksetfb6YQeLc99mOxi2OH1megKOjklTAEymVc55Ga6qEUPfXaXtKfsbTqMQZtCcQZ/A07C93SIQHC1oycap0cgFT0CCjllwXU539EF1MDvvYJ9CG+xS1hXEuPHR7X/ONgigzViW/A4ubYarcbDdKIw6czD18HqI2+4Zx7Agycd7MQQMBq2pOtlhivVtZ1znBwTmfzUHnBxc8xdjmx2IycphwiXeG6KDfofn9ITp41e/XLmK96YdOKZhBh884waTKQb5ufWbxGdaUcQFxl7xXGkgMlnFhCNO1mlOVaTo6ZKLi5d6TTqrVREDyl+0Tz0YRk5x7yJwu8VZAGNoRAk489ZuL0xH52+s3f739cW5taga93v39fVdPoyOIuVW6q/Ssp6cR/kW4n5yONGZ+SzIBUpkIqZI5YlKI+JRHxYWknG2CptEoi+065LjRbT8srT7TfMuwhuT6Ykx4DNLy6bI4+jRI0/oFPXcGG0wEk3e0soPHnH9ITJYkTNf3pxqBPEnMWnXUQq7x+9XVOfEoSKTiWiqaE2qY/et+3fB/7fcx+jqNt1iJJPCQCpYfTzeWwyVJKrt1Cyt24BfSjNJ8xjfpNksUuREf+xUVXvgq7IX5iYsIhjXfBRM8Jiyzc6QaFRcjwTHBhDumHJzs4GQHJws72S/b9n6q9ITHMUi/Txf+xg2RyhKGeSLEB786+NXBr3b51ZtQCjmU7vqbO06A1koTFbnOa4zvEER5BChoF6XFQ6J48LWDr+3wtXWHJmDnCh9Ypco402F2Tge0h8e1I1er7vnzGsWnF3rhzs03K5ppQQd05V1mPej1VnNl7HqwwisB694C9bGo9xJx2LtWYTKu3zj3ZLdVhwOy1uQbsSSTMSNvycXJ5RV5xyzcs6WvJZQvqwrUb/tv++FDNfY0whiH52PiV+gNrxYMCrTo1eFCtgNug3iNvQcDUYan80uc5sUzAaZBDzOUfmkQOT2H3V0kcUC0k/84Lczk7/+6cprGQHZRvWs7eWBYAaONRx6NBkK/LNfXKu1VHb2yyKp20W8UxyuIzQuGgRF/AaqONFRn7YdLov16UWsPjuZwqPC+TaDW9L4pGtAVjryjXJZybrdb5W1moU05Q70pn+fVURZdVv/GrF++AutvvM66WXkLqr+Cwm9rF1OmyplP7vHbhoiMFYU22u++2o4u52MXJCOVJJl0O6Wc5Xfea4YdiczkN74EjwDrP2XrviR75kdIXtojr7rolD5yFBvkjNt5NulGKulFflr570SoSS9hXPZyEqY3Gr6//nA8PDobj04+XJ4cver2u/bBtzQxgCVM1vhw9ala121zsasqMXjOC9LcSS082F4qGHcXvNzqVnkYvaFVGKWF0vAJow+GN3S1QgrXWqzX+NmVxOjg5raKnfi/dYf6EqSznTu0YTrynB9dIROVtW11/PHunp8xjCJI7V7Y29p+cP7x8gojTf4+NlExztHsHt/Osns6oJ/pZ4q3Bp00XRBz31dUMDnLnDlTjxf//BtMi9l5 +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query user tasks

+ + + +Search for user tasks based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +User task filter request. + +
    assignee object
    + +The assignee of the user task. + +
    oneOf
    + +string + +
    priority object
    + +The priority of the user task. + +
    oneOf
    + +integer + +
    candidateGroup object
    + +The candidate group for this user task. + +
    oneOf
    + +string + +
    candidateUser object
    + +The candidate user for this user task. + +
    oneOf
    + +string + +
    processInstanceVariables object[]
    + +Process Instance variables associated with the user task. + +
  • Array [
  • ]
  • localVariables object[]
    + +Local variables associated with the user task. + +
  • Array [
  • ]
+ +The user task search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching user tasks. + +
  • Array [
  • customHeaders object
    + +Custom headers for the user task. + +
  • ]
+ +The user task search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-users.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-users.api.mdx new file mode 100644 index 00000000000..98c0340fb41 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-users.api.mdx @@ -0,0 +1,76 @@ +--- +id: find-users +title: "Query users" +description: "Search for users based on given criteria." +sidebar_label: "Query users" +hide_title: true +hide_table_of_contents: true +api: eJztWd9v2zYQ/lcIPm2YYjtt2nV6c51k89AfWeJ0D66BUNLZYkORKknFMQz978ORki3bcuIW3cvmAkEt8Xh3vPu+I3VcUstmhoZjemtA00lAVQ6aWa7kMKEhnXKZ4IihAU3AxJrnOEZDegNMxymZKk0KFCARM5AQJcmMP4AkseYWNGedz5IGVMPXAox9q5IFDZc0VtKCtPiT5bngsbPY/WJQ9ZKaOIWMuVEhPk5pOF5Su8iBhlRFXyC2NKC5Rk8tB+NmKO20bfmotCVTDiJZu0ODWhXTmi1oQLmFzHyLMadx19oohcqYVQQdItGiYc5YzeWMlgFVOgHdPt8NES7JPOVxulJkUyAaBLOQeBOoF2SRYeb6NwMa0POLmwHmL4EpK4SlYfXeciugisVH1H6B08rS54RrSFCHX9KkIe7S+1cBeoEzr33+aDnBmTmbwa7/V2zGpUvkRrQPjqpWWXtQuEzgkagpcZlyQbFMW2Kcj1zOCM5thJpLCzPQNKBTpTNm/auXLzD4gme8BSpoJ2OPPCsyIossAr1hUIMttMTEKImpcNE40KJ3sz+1bUkfrpbk+cRQrEMGSvt8uzFM/51gxmIqPjFRgLkjVfQW6CcjuYYHrgpT69FgciUNPIX3zXyUK1ffwlTplgRv+xo5uVZnp1z/296WrWC9YjNYgzVoWWPLpP0TdqkvWtOI8MEyWC/Iy5lN/G9Oud0Rb6x+j3k0IVnWkpvaARzFGNvqubUA7Vdx0HTIGBft893QMwoaOcAYXLq1bxaY5nhbnjbLl9UFuBceRC5SL3q957OkwRTC0fjgDWk7P4dWt2dLJpe+cuBvFqnCF/0NR5+E03eoetpjqywTw5p9W4HEsao6Zsz6IoxGWrbZ9tL4+gyRtFUm2jPmdsAHJ1Ajy81z9rEk+90Rl0UM2A65NUBsyk09dtcsa3fVHo11SBKQpkDfdyv6AdVysyIf7DxO+1bf3e7xw1x/qnR6Dm3xcIODK4ltxPA9YHFbawUSd1Y83NcdE3vOXcPzPUXnKfTdw6Jd2z0svkPdf7E4r7PdXpab42VAzw6pul8RRmTKuICkQ94rDSQBy7gwhGnAk8IDTyBpsMOZIJFKFv5rYk+1zrWKBGS/7FbtTYf65MpLVnaJBxthhnjByFsfX18OyG9nr36d/JRam5uw253P5x09jU8g4VbpjtKzrp7G+IdyP3fIKAWNcF+QCAhLEo42mSBrGBOTQ8ynPK4PS5XbBDPh1/dMYXajy63MNRBZaL7zudYnt9dDwhOQlk8XdbneME2bHw9u3wgjweQ9XWd+1+i2FVNkGdMr+mwawBOmZbZoEn3vyXlbN8Loj9HoingVJFYJuM9PVy4rQ7iIjEs8xtPwrNcLaHWop+HrXq9EnbbixzMrkQQec8GqLXVrOVySbI1btzAujWUy/lGZUZrP+LbdzgY/KxCf+xXVBDxtJ2C1SxDB4nuDGxJPCCtsilbj+qMNnBNMmCeOREeSHUn2vyfZy128Xyod8SQB6eC54hs3RCpLmBBqDsmRV0deHXm1j1ev2k6PffyetXh2FgS0VpqoOC60hgR7pMKpj8GY2nb9OXQ8KB65duTaHq6VAc3ApgqveHJlHHSYTWlIu+4Dves/1Sh2hPUDXv9gw6nQgoZ06dlSht3uMlXGluEyV9qW3QdMxQPTnEXCAxCHPatqtAgVM5F6i7tZw4HmR+6AZYVMGHlDri9uRuR3ZmHOFi6K+erCp1b9pvem16oVRfdo7F8NiV+hx1yjDtRqkdCtar3wIYpLbJUYiAvN7eIGp/nwRMA06H6BgV9hobLntOOzF6JB9eOyRsiff49ckrGGXa9v1y4eWZZ7Bq571evOxBpq289V36DRSfB3auPVfddqaLK+AfKXNr3VtUpv67pjvPRrb14r4LvSEWGq3MIrmO6GEPEE2viY9zqnu5S4GjpmxyrLCunKu5yRObcpYY2UxKIwVX9d8BiwXxEu6wjUYu/8CPnkLZLTDsLJY76u6jNu0yLqxCrrxn7a6v9IqKibMS67lQnTHfTf334475+8Gw4uPtxcnJx2eh376HuHyLqMyYYfrrvm+2Pb62xcmH7jvWsFKguPtpsLxqVrVGnhW9IIvDGtTVacnwQVb8d0uUTlt1qUJb52jRsajidrmuNTGdAUWOLT7ZtqdOD9PRmhfRQXhev2bffWy6Ce0Y9jyO2TspNG1br6eDNCUlQXyplKcI5mc7xsZnMa0s/0M6V4n40aHN/c+yUVTM4Kh1/q9eK/fwB+d73L +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query users

+ + + +Search for users based on given criteria. + +## Request + +

Body

required
    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +User search filter. + +
+ +The user search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching users. + +
  • Array [
  • ]
+ +The user search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/find-variables.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/find-variables.api.mdx new file mode 100644 index 00000000000..e3f219e6eca --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/find-variables.api.mdx @@ -0,0 +1,116 @@ +--- +id: find-variables +title: "Query variables" +description: "Search for process and local variables based on given criteria." +sidebar_label: "Query variables" +hide_title: true +hide_table_of_contents: true +api: eJztW1tv2zgW/isEMQ8zWNV2LzPb9ZubprPe6bTZJM08pAFKS0cWJxSpkpQdw9B/XxxSsiRbvsyOi8UsVCBoIpLn/h2S50hratnc0PE9vWOas5kA+hBQlYFmlis5jeiYxlxG1aihAY3AhJpnOE7H9AaYDhMSK00yrUIwhjAZEaFCJsiiWkZmzEBElCRzvgBJQs0taM4GnyUNqIavORj7RkUrOl67P7mGiI5jJgwENFTSgrQ4xrJM8NAJN/zdoARrasIEUuZGhfgY0/H9mtpVBnRM1ex3CC0NaKZRKcvBuBVKO2pbqihtScxBRLV8NKhIMa3ZigaUW0jNH2HmKO5yu02gZGYVQYHIbNVgZ6zmck6LgCodge5e74YIl2SZ8DDZELIJEA2CWYg8C6QLMk/R0ZObCxrQt5c3F+jqCGKWC0vH5XPLrYDSFh+R+iUuK4qg4ZX7UqWHxnQXBf/OQa9w5bV3KC0ecGXG5rAr/xWbc+kc2bL2yVbVKu02CpcRPBEVE+cpZxTLtCXGycjlnODahqm5tDAHTQMaK50y6x+9fIHGFzzlHaGCfFL2xNM8JTJPZ6BbDDXYXEt0jJLoCmeNEzl6MSex7XL6dKOShx3DaQNyobT3txtD938RzFh0xR0TOZgvpLTeCuVkJNOw4Co3FR0NJlPSwKF4b/uj2Ij6BmKlOxy8LevMzesUNub6W0tbdAbrFZtDHaxBh44di5oL2ipXabIS9CtO7/D/3kwhOr2O0bbYIu3nmjZk9kjj554uR8XrF1h1C/MIK5fxbcLNRrJDorxXcl47dcltQuJcCMKiBZMhRJVWIcvYjAuOknTJqSS0s0M3kn56RYtgW4ZJxUugMN+7aT+Utjkk+xtmeFhLumf1YYt+B193LXmRQPhonCHha84Er0yDmMi0WvAIIrJASBzNHU5j+p08wofLc3GCJ26s2cuMx452mGsN0tau98saTGZKCWDS0eTyGL0NnZTZMAE8a7gcsauHOSE57FewiXznfwzgdx6eGFlHvD3v2DJ+1sAQhTZhkoQqzZjmRsk/64e57Ui9LV5VeJ2RqejQ771anl870aVdg9O5dSuKxqGmShhN3zeG68dXZVTieSegkqUdQn9gKVSRekrGvHHnv2+SMzdHy70Z0s/4X2S3+th73lzWoPv/kLkqdRxG+GNHvB3h3mKLFEqLfZY3eZYpjVeHJRdRyHREwoRpFuJxg0SQgXQXOWckJWM+z3UjGAUHaf21blfWbWT5IO/AVnOgjS4n5r4jksh7jPUY6zH2JzFmQpUdPvyXRnUT/R/9RaC/CPxlod5fBPqLwPkvAmU9fCqNxeWnJNSqhM7LNX1u7XNrn1v73Nrn1q3cakEyaaddfTU3QqZvO3PnzlWFm1udyxB7Zbu0fkvAJs4M1e2SG2Kr+WgYqWwXzhqRXVXhvRbtztj2nK4mQ+Gbb77j4SL/xWh0Wo9Ag8lFeVE4ew/1aFePS+94/J3NVO77ki3RDrYv/gtShyW2yjIxrdLTlvlwrGzgubSHd3OXXHc7wYfAtNXJ6vaTa9L6BFrlVbfO8ceuoW/golrEgB2QTwZ8KJdjX5qdty9lGxlbZZKANDnKvtvsOaGh124aniw8LvujsrsG59lEP9Td88jZQlwLaZsZp/bzdtqPeyKO7wk21z0ug2zzjsQhfQ+LVQnkfPAt23snAOBY9ftYPt5T37ura3vHKOCZ966byjs8Di9OJ3WGQsgJJvtW94QTWP/1tlHM323Ad26eFaTdzFf7NszcoFTMPLYBHjMuIBqQX5UGEoFlXBjCdOOwU+c5D7yZilYH99lMq5mA9G+7++3W/Ylc+ZklX+IxTBiWBXHizHO/v353Qf7x6se/P3yfWJuZ8XC4XC4HOg6fQcSt0gOl50Mdh/iD834YkNsENCaeFZkBYVHEkScTpM4OxGQQ8piH1ZsZpdgEHdMqOO7bYt3odgm1EXm55juvkE3Ip+sp4RFIy+NVtfG2WNPmm0ruBDCeCSYfaR0Hu0y3uZg8TZluYqjBANFumc2PX11evtihjbH0z9vbK+JJkFBFUGfQkhEqkXKJ7wzR8avRKKDlG0R0/NNoVCBN9PgJmkgCT5lg5eFoSx0uSVrHrVOsyhJn8ozSfM63+Q5aaC2D+K3XqELh824Ulvs9EQwvoQsmeERYbhPkGlZviIETggm3TfYg60HWg6wbZC87jj1Kz3gUgXThucEbN7jvEiaEWkLU46rHVY+rfbj6sesIOcHKhAWNcQhaK01U6IqxEb6QLTbH9Ip3dbHtD4o91nqs7cFaEdAUbKLw05NMGRc6zCZ0TIebUsnQX9covoKuF6CNKx/mWtAxXXvEFOPhcJ0oY4vxGt+1KIYLdMeGBKqIwx5ZVcS4z1YSz3XXczggG+/xXbA0lxEjr8n15c0t+ZlZWLKVs2S2+cKkIv169HrUSRWn7qE4uZoSr6GPu0YuqMgiqDvJ+smnEHb1eANhrrld3eAyb54ZMA16kqPxN/FQ8nPU3V3dTaJB+cu7Kkr+9dutczTmsev6+57LJ5ZmHoX12+6tgtSoqiHVoVfWhOoHdV1m1F0+GTVLG/W6VpXC6hzQqM5T95vvdDZFjYf6yxX/sclo8znIaOszjfu1N2Hzcwh8VjhMxcrZr4z4XU+ghqCNd91o8HwXXVdTlyRClaa5dDuFnPsWDGt4NhS5KRulgoeA9Y/xujJmNe29HyF3niN5PsCo9NCpNog5t0k+G4QqHYZ+2eb/mVCzYcq4HJYszPBi8uunD28nz95PLy4/3Fw+ez4YDeyTLygjgFMmG3K4+kxd9NzWdV3vi2f5tKyMWgtPdpgJxl0P1Gm7LtPKPW0KUyaWh6BMDvd0vUYGn7QoCnzsKkR0fP9Q5xL8qwhoAizywUAfMQbphdfk2S3KUEfxTvsFe51+xSQMIbMH5z400uPVx5tbRF753VyqIlyj2RK/qWNLOqaf6WeKHX1nXQdq93xNBZPz3EU39XTx338AIIl/bw== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query variables

+ + + +Search for process and local variables based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Variable filter request. + +
    variableKey object
    + +The key for this variable. + +
    oneOf
    + +integer + +
    name object
    + +Name of the variable. + +
    oneOf
    + +string + +
    value object
    + +The value of the variable. + +
    oneOf
    + +string + +
    scopeKey object
    + +The key of the scope of this variable. + +
    oneOf
    + +integer + +
    processInstanceKey object
    + +The key of the process instance of this variable. + +
    oneOf
    + +integer + +
+ +The variable search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching variables. + +
  • Array [
  • ]
+ +The user task search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-authentication.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-authentication.api.mdx new file mode 100644 index 00000000000..1a376170168 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-authentication.api.mdx @@ -0,0 +1,56 @@ +--- +id: get-authentication +title: "Get current user" +description: "Retrieves the current authenticated user." +sidebar_label: "Get current user" +hide_title: true +hide_table_of_contents: true +api: eJztWMty2zYU/ZU7WLVTRpRTN3W50zhO6tbNeBy5XbhegOAViRgPBgBtqxr9e+cClETJUu1mssxCQxGPcx/nXDy4YIHXnhU3bNKFBk2QggdpDbvNmG3RxZfzihWsxrAzJGMVeuFkG98KdoXBSbxHD6FBEJ1zaALwzSSsoPPoRixjDn1rjUfPigV7PR7TYxttOsCgWSA9+E4I9H7WKTUHh6FzBiuCE9YENIFQeNuq3sP8kyeoBfOiQc3pX5i3yApmy08oAstY6yjKIJMjZIeC3efL+VuwsxjZKoYeywcnTc2WWZz+O873z/dzH1BDjYayihXc4fwAojQBa3QsYzPrNA+p6c0x2aikbxWff+Aa99vpB4DhGp/1mMixTv6D1WSTNr8feJBYv0YlVjYgECy1Dkxx5/icZUwG1H5AwMqFZcYCGm7CAaN957Y90KhLdGBnL7B0gOoEnMje9mlX1tvkp3kjSp7pOXh++pCLNcCSgq+d7doDsac+4N7L2qTk7iHzBRl2VuEBG7HrK5jwXKG/VNxMY9c+U63i5llBipMLae4OOKuoa+WjsLq1Bkkc0sSW0xPwgYu7LxfFlzG69iSqgpx8HmMSg3kSy0oWgpsLW9suPE3EO8VrmFkHnanQ+cBNJU0NcrZdI6VCQm/R0RoCKqINMlNaq5CbuAi08tqj+2JTBiaX57uUrvGpwmVQ1HbKdWcqHo2lOI/HRwd0iZ879AEUF3ce7rmS1XAnkdaAcFjRK1f+P7aA1tlSof7h6VawS8hlGgkVBi4VJIkA95AGlliR0G6u3p3CL8c//Xz7XRNC64s8f3h4GLmZeIWVDNaNrKtzNxP0o3Hfj2DaoEPQfA4lAq8qSTa5go34wLco5EyKlSJ6t4GyOfrbbPJ6aDnry25XdesNpHNyjwivr85BxizO5kTtE9Nxzox3ijB4abtQlIqbO7ah9Xmp+05r7ta73bYBWjwCD92wPPdvgD++3luKv06nl5AgQNgKo2JDI/3KEAWhpZG606w4Ho8zpvljenszHtOeGhl/QSQG8JGWsSTAnXCkAW0d9vqJgUlDRSO+FjPWyVru2h1tlVgv4rcpolRkP+07Xk0MUJYd6RCdsw6siMetCh4aqSI8nbVWtvuCTFr8Vmvfau1brT2ttWXGNIbG9vclUg4PDStYvr135RpZxjy6e3R0+VqwzilWsEWqmGWR54vG+rAsFq11YZnfEx333Ena1yN71J0qa6UYZQVXsXkfc9QxPLT0ezGcwNXZxym85wEf+DxmkkxuQ5+MT8Z7UWnoAUQ6E6QIk+4Ga8EKlop6L2wa/BLg5fKWEik6J8P8I01L6SmRO3R0YR3oobcX0eMZJQ5iWf/n3Uolv/01jURLM7Nx+vb5ZeAIsYLOJ8/Ho6On4ro8jzUirNadifSbGh5kaIAPAhOq8yGdn5QUaDxujqNrsxepB/5MFuFoRKQk5azWx1qGpitHwupcpGnrZ6lsmWsuTd6b8Pnp5I/rD28nry7OT88+fDx7dTQaj8JjiMG31gfNzcCP9xi2buS7wS42+8L/+hbQsxPwMeSt4jIeSmNYi75+bhjf/fSgkT5RpCq4YYtFyT1eO7VcUvPnDt2cFTe3m6Kht2XGGuQVHXVvFuyObupsIgS2IVaX6uKVYff7ASlsXdXvz0ga/wIiCPHv +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get current user

+ + + +Retrieves the current authenticated user. + +## Request + +
+ +The current user is successfully returned. + +
Schema
    tenants object[]
    + +The tenants the user is a member of. + +
  • Array [
  • ]
  • c8Links object[]
    + +The links to the components in the C8 stack. + +
  • Array [
  • ]
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-cluster-topology.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-cluster-topology.api.mdx deleted file mode 100644 index 600acc0930b..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-cluster-topology.api.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: get-cluster-topology -title: "Get cluster topology" -description: "Obtains the current topology of the cluster the gateway is part of." -sidebar_label: "Get cluster topology" -hide_title: true -hide_table_of_contents: true -api: eJy1Vt9v2zYQ/leIe9oA1XK6l0BvXpIGHrouSNINWOAHSjpLbCVSISmnnqD/vTiSshVbTRyg84slivd9d/zuBzuwvDCQPMBF1RqLGlYR5GgyLRorlIQE/kotF9IwWyLLWq1RWmZVoypVbJla+3Vv7J4LbvGJb5kwrOHaMrWeQQQaTaOkQQNJB+/nc/r7P3gyJS1KS/C8aSqRcYKPvxji6MBkJdb8mPy+RDa4SGR8T63xsUVjCdxuG4QEVPoFMwsRNFo1qK3wUaVafUVtjsEXrBKGHGRhC7Mlt4xrHDxnthRmiG7ExLXmW4hAtlXF0wohsbrFCITFeoLoRquNyNEwIddK1y50piTjgZhJlePrgdCuZT59SK0Ujy2yX56ELQUhB6d/ddhsecnWyuvjKUdsQlosUEME3jm/9Nt76CMolbHThPRF8hodrkaelUIW0wTGaiELgmuU/gEcfTkB6iVfSTNBkC9Kvd/Fai55gTlzrD4n6U161Y+4B9HfLPKOku3UeZPuO/tXxF9e7lJ2Z3Li2WlFSXyIfeneUvTFf8vXltHGoepDEKQaZ4XYoJzkDfJHgLKtqaVVyPPgR1WpJ/coJM+s2CCsKOuQV7Z8zZ+hGfndg1MneeBNSMxW7p9z5Dms+j4CKyydB9wMYECrG9RG+H51rEE4jLBnIvtHsL+7vUu5Vm45lOqd+G9CA8KWbZ2iHjcqIcd99xSRD5vVs4K5UK38QWHuuUeVQy3SNBp5znimlTE/wZuhBIWSH3hmlZ72J1NyLYpWY85GFmztTEKTm+zZb/ElzLC/XxI8KD3k3b+IKbJrbziVewcko3S4DyPtNgw66OkXQY22VDkkUKBrCpxqAuJhAkIEBvXGjbaHDlpdQQKdn6R9Escdteg+6ai19vGGAt1wLcgH11b2vX3N24rOo1IZr9zy4WXjWccPEV/wupU5Z+fs9urufh/6sz4/QJ/Pz+eTqK7xTyMubpbMR+hAx3eEAba0tpmE9ZtPAe77FR1k1mpht3dkFq4NyDXqResbUVAz8Dl0evebIAoPH4bM+uOfe1faNAuceZD62BEY9RWYz86OoiFnKa0zVdetdPkuCzdJGB8FNkr4SmRIaURXBl6PaT/6LywkNjubkSg+cyg0k8RxIWzZprNM1XHmzXb/aaXSuOZCxoHCxBeLPz9/uly8+7i8uPp0d/XubDaf2W/WBd8oY2suR35co93fFfdp/Czgbn9X/El3z6CdxW82biouJGWTC7oLRfUAO29Ww6XnAbou5QY/66rvafmxRb2F5GG1ryN68/OKBhpV4VfckmZZho11BVe17u5weOelpNvV9/UVZct3D9Ul1A== -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get cluster topology

- - - -Obtains the current topology of the cluster the gateway is part of. - -## Request - -
- -Obtains the current topology of the cluster the gateway is part of. - -
Schema
    brokers object[]nullable
    - -A list of brokers that are part of this cluster. - -
  • Array [
  • partitions object[]
    - -A list of partitions managed or replicated on this broker. - -
  • Array [
  • ]
  • ]
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-by-key-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-by-key-alpha.api.mdx deleted file mode 100644 index 50d9fa48161..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-by-key-alpha.api.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: get-decision-definition-by-key-alpha -title: "Get decision definition by key (alpha)" -description: "Returns a decision definition." -sidebar_label: "Get decision definition by key (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWEtz2zYQ/is76MWeUqSSOmnKm8Z2UrdJxmMr7UHWASSWIhISYIClZI2G/z0DkLRli3404+lJB49McLHPbz8Cu2HEF5bFM3aCqbRSKxCYSSVJasXmARNoUyMr/xizC6TaKAscxK54eKWuVBzHShNeqWkuLaASlZaKQFrgCnhR5Rwy5FQbBK4EKE2AiicFCtAKjnlZK8EhLWpLaCzomkBnQDlCoq/DK3WJ6J9mveg7uDi9nMLk/Az0Es1S4mp+EPFK2hFpXdgobQVHvJIjg5Z2Fkb9vrAUv3yv0azdq8MrlWkDWW0oRwMCicvChj7CK8UCVnHDS3Resni2YYqXyGLW5+XkJi1/45oFTLr0VZxyFjCD32tpULCYTI33czzNEbi1cqFQwDdc9/EPZDyAVS7THHhKFrgrS63k9xpBClQkM4kGXBDkatHvD1nAbJpjyVm8YbSunNtSES7QsIBl2pSc2qW3R6xp5s5hW2ll0bodr8dj97Pr9ICDru62TlO0NquLYg3GAwhF6FOYakWoyKnjVVXIlLtN0VfrdG52vdTJV0zJ5d7oCg3J1qPhnD8juKdS/1P53e4I1gQD3p2JLecsGakWg66cfPoMUjxSf6+/Bd5z9TnppzQu0VjZluDRDP72+vEMdnqeMtevX7RtUaIi+5wU0cMpMluqYGF4lQPlnB5yw8G04sZRzYMevRCkXtpVQsUVPRdRrfSToHJ6JRVO2ckueglL1jRO6Oi/UMEHJEjWPgcZlwWKED5pgz2zAjcIldFLKVCAVN69nngg0WL9KGdURicFlr/ucsdd7yZw3kp2dqHlFNfdrWDSWp9dvD+GP47e/D4/yIkqG0fRarUKTZaOUEjSJtRmEZksdX9O7jCEaY4GoeRrSBC4ED5uXsAtW4GtMJWZTIG0D7BzG1zp2vgep7r27W6pb1BYG7lT+Ql8uTjrOWst1WLXtN+T8bpwOniia4qTgqtv7BYKT+FrArYuS25uIH7XQBMwS5xq+7O08ud0eg6tCki1wFvm7Qy5IEqpZFmXLD4ajwNW8uv26e147NvaVfwZkSjA66rgykPrfjhSQXmLWx+YVJa4Sl+qMtrIhbxv925bdiA+aSPqu/HoiW5cScq9zoVcovKtuOLWH8IyXat9R+47ct+RL9iRb4a+jxMFLsvG4RCN0QZ0mtbGoHCnzcKrdwfm3rY7IKClfa/te23faw/1WhOwEinXgsVsgR457q4ds6j/+I1uj6I22gxeGRt3M0az7K/0tSlYzDZtMzVxFG1ybamJN5U21ERLV6klN9KNL3xh3eu26XowFTrlhV8eKqp7sX0huzfT+MAJV3ztk+xM3lX9bvxuPKjViT6g0U1J2ghbSG7RRK/W9fug2lb4OYr9zMBiWhtJ60u3rU1PgtygmdSuMDdQ6ex57e65FWJB98/7HkB//Tv1GJAq0357h4VdR9jWDZaNw1e7uDs/8+2T6rKsledQtWgPR3wrsG4I5RqrkCkq6xHezXl6sY/tG/inu+u+Cl1RWuT01LmQlNdJmOqynz3d/CaFTqKSSxV1Jmx0PPn05fPJZPTx7Pj08+Xp6FU4DumafPCVtlRyteWHu1UN3ba6m9aBH7kd3k/B5vZDsp/pbc30OkwSXlNUFVwq1yW+mJuOUGZsiFBYwOLhKdQ86FhhxjabhFv8YoqmccveKxbP5rck4llHSOtzx+KMF/b+eHC7cgfdfEIcwv8xNBxMTrfI1dqTYVG7Jxawb25c8sBkrpk3AcuRCzQ+4lZ2kqZY0ZaWnZGgo5Ublv9w6vjgB7LY7oE= -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get decision definition by key (alpha)

- - - -Returns a decision definition. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The decision definition is successfully returned. - -
Schema
- -The decision definition Get by key failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The decision with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-xml-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-xml-alpha.api.mdx deleted file mode 100644 index 22ed43648fa..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-xml-alpha.api.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: get-decision-definition-xml-alpha -title: "Get decision definition XML (alpha)" -description: "Returns decision definition as XML." -sidebar_label: "Get decision definition XML (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWN9v20YM/leI20uCyZLTpV2nNyNJs2xJESTOVsDxw0mirGulO/V+2DEM/e8DT5Lt2s7Wh2JPfjBkSTySH/mROt6KWT4zLJ6wS0yFEUpChrmQwgol2TRgGZpUi9rfxuwBrdPSQLYvDNzAp7vb8Fk+yziOpbL4LMeFMIAyq5WQFoQBLoGXdcEhR26dRuAyA6ksoORJiRkoCRe8cjLjkJbOWNQGlLOgcrAFQqJewmf5iOjvJr3oe3i4ehzD6P4G1Bz1XOBiehLxWpiBVao0UdoKDngtBhqN3Xsw6NeFVfbTV4d6Sa9On2WuNORO2wI1ZGi5KE3oET5LFrCaa14hecniyYpJXiGLWR+ey3V0/sQlC5igGNbcFixgGr86oTFjsdUOdwM9LhC4MWImMYMvuOzxHwh8AItCpAXw1BpKAgcnxVeHIDKUVuQCNRAIS7no14csYCYtsOIsXjG7rMltIS3OULOA5UpX3LaP3p2zppmSw6ZW0qChFW+GQ7rsO/3p7vZffCUKGJemaEzuynIJ2hMKs9BHM1XSorTeJXyx0UtV0v89R43VQs5Y0zRNwM5fc2XN6E0a4BqtdzHnosQshDulsU8rcI1QazUXGWYgpAfRo4ZEZcs9L3ldlyLlpDqqtUpKrH7+bMiDba+/dW0E961kZxdU8hlTS6lrBZPW+uThwwX8dv721+lJYW1t4ihaLBahztMBZsIqHSo9i3Se0o/kTkMYF6gRKr6EBIFnmQfNS0JVo7YCDZgaU5GLFKzyADu3gULb4uuC3LpFHF8v3qRgNxVblHFasF06j+Dp4aYn5FLI2b5pvybnriQdPFHOxknJ5RfWBMwKWx40umvFuKriel0t3xpoAmYst878J+V/ebOnmwj1+3h8D60KSFWGm7LqDBGISkhRuYrF58NhwCr+0t69Gw4b0kkZ/w4kEvClLrn01NqFIyRUG956YEIay2X6ozKjtJiJXbshaza5YB2JL1tEfSmeHy7FdStYCFt4nTMxR+k724Ib/wXIlZPHijxW5LEif2BFvj30cRxJoChr4iFqrTSoNHVaY0ZbidKrp090b5u2KmjssdaOtXastddqrQlYhbZQGYvZDD1zaKMfs6j/+A02+2ATrQ7OCI3f8gbMoJ73M4XTJYvZqi2oJo6iVaGMbeJVrbRtojlla861oPnJJ5det4XXE6pUKS/940OJpRc0uPSAd4aqa25xwZc+0GTyW9Xvh++HB7WS6CsaaUxrEba03GoVvVqq+YNqW+HvUeyHFoOp08IuH2lZG54EuUY9cpScNV06e1473bdCLOj+fOhJ9MffY88DIXPll3d82HeEsoLatJ4Pw7N97t3f+BJKVVU56fuonLUbJL4FrJuCqbhKkaI0nuXdoNmL3bZv4K/WIpyFlJSWOX37nAlbuCRMVdUPv+trUqokqriQUWfCRBeju6ePl6PB7c3F1cfHq8FZOAzti/Xga2VsxeWWHzRWHRr3aNQ68QP/6S7+1eZLcjxW2D1W6Fjph+C65EJSnfh0rrq2MmGH2goLWPza4QN1lmnQdYcJW60SbvBJl01Dj71vLJ5MN83Ed59MGB9BFue8NLvnFNtJPHnoTjRO4f84vTgYou4hl0vfFEtHdyxgX3D56rFMM20CViDPUHvErewoTbG2W1rWBxLUVtad/vqK+sE/A/ewdw== -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get decision definition XML (alpha)

- - - -Returns decision definition as XML. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The XML of the decision definition is successfully returned. - -
Schema
    - -string - -
- -The Decision Definition Get XML failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The decision with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-xml.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-xml.api.mdx new file mode 100644 index 00000000000..7c059fc358e --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition-xml.api.mdx @@ -0,0 +1,67 @@ +--- +id: get-decision-definition-xml +title: "Get decision definition XML" +description: "Returns decision definition as XML." +sidebar_label: "Get decision definition XML" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/SuYPSVTmpQTJ01509hO6tbOeGyn9YzrAwgsRcQgwACgZY2G/70DgJRki2pyyPSkg0b8WOzu230PJHcJjs4s5HdwgkxYoRXhWAolnNAK7hPQDRrqT8445DBDN9idrMxuL84hAY6WGdGEdTlcoWuNsoRveyXUktuL8/QfBQk01NAaHRqfwxIUrRFy4Fsx/sQFJCC864a6ChIw+K0VBjnkzrT4Mv5NhYRaK2YKOXnABdElcRWO5ZOQeSVYRShz1udGSavEtxaJ4KicKAUaUmpDXCXWeFJIwLIKawr5Etyi8WkL5XCGBhIotampi5feH0HX3fuEbaOVRetXvJlM/N920rcX5/+RKxGW2JYxtLZspVwQE+qMPFaTaeVQuZASPrnsqZb+eCtR64xQM+i6rkvgaFcqY/F91dE6UlIhkafkQhtv6KiQllCDpDH6UXDkRKgAYkBNCs0XW1nSppGCBX5ljdGFxPqXr9ZnsJn189Sm5DJa9nGJLr4ic7510bCI0e+uPh6T347e/Xr/qnKusXmWzefz1JTsALlw2qTazDJTMv/zdq9TclOhQVLTBSmQUM4DaCo9qgaNE2iJbZCJUjDidADYp018aSO+vsgxLc/x1eJ1C162YoMyrRFbcpqSL1dnAyEXQs22Q4c1JW2l90EL3bq8kFQ9QJeAE06OBn0ZxbZ1Tc1KLc8DdAlYR11rv0v5t2+2fHtC/X5zc0miC8I0x7Ws+kAeRC2UqNsa8qPJJIGaPsWz95NJ5336jv8AEkXwqZFUBWq9hCMUqde8DcCEso4q9rM6o42YiZdxU+jWvYCexCcR0SDFw3EpDrKTlD1Y8kil4IS2rvJRo3wIMxiSoNKme5HtRbYX2U6Rvd3m+0dtCsE5qkDPld6EJUo7QqXUc+R7Xe11tdfVbl0dfec9ci5cFXzOxCOq8Fo+p1FhpW7V/nVyr8i9In+iIt+NfdlNFfFVNp6HaIw2RDPWGoPcfwfL4N5/Xw6x+0fhXmt7re21tktrXQI1ukr3M7Iw2HIV5JAND7+D9RDFZsvRAVcX5jUJWDSPw0CsNRJyWEZBdXmWLSttXZcvG21clz36bj1SI2ghI0f97Si8gVBSMyrD5bHG+ht+6jYAPqZ1qzglH8jV6fUN+UQdzukiFNqHfO76w+TDZNSrN93hcXp5RiLCSMuNrWJw6zU/6jYa/4jjMHGzyFoj3OLaL4vlKZAaNNPWN2dFlz5e8O7PoxEk/cHHgUR//H0TeCBUqcPyng/bifiuoLEx80l6uM29y7MgIabrulVhH1Wz+IJEN4Ax2VrnASUgBUNlA8v7Kelgdh7vkL9iRHKY+qZE5gzb50y4qi1SpuuMxWWr/0LqIqupUFkfwmbH04svn0+mB+dnx6efr08PDtNJ6p5cAN9o62qqNvL4hG50VjgyF16unyA/PCLumxQGmo2kQnnaBHTLXmV3MKYySCDfNUj2QrtPerHcwXJZUItfjOw6f/lbi2YB+d39WltBjFxYf8whL6m0L2fOm9heXfXT6dfk/5hEj5aov0jVIuwRsvVnkMADLnaO2Lv7LoEKKUcTEEfbKWPYuA0vq+GyV9lq4/t06uXxL9vxan4= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get decision definition XML

+ + + +Returns decision definition as XML. + +## Request + +

Path Parameters

+ +The XML of the decision definition is successfully returned. + +
Schema
    + +string + +
+ +The decision definition request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The decision with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition.api.mdx new file mode 100644 index 00000000000..041e88e477c --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-decision-definition.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-decision-definition +title: "Get decision definition" +description: "Returns a decision definition by key." +sidebar_label: "Get decision definition" +hide_title: true +hide_table_of_contents: true +api: eJztWU1v2zgQ/SsDnlqsYjlt2u3qZiRpN7ttEaTp7iGbA0WNLDYSqZIjO4ah/16QlGwnlpO0CPbkg2FTGs7neyN5uGTEp5YlV+wEhbRSK8gwl0qS1IpdR0zXaLhbnGUsYVOkXu5kLRaxDK0wsvarhF0gNUZZ4JBtK4V0ATe4GP3n9tXc8AoJjXNhyRSvkCUs2zLxNy5YxKRTXnMqWMQMfm+kwYwlZBq878FlgcCtlVOFmbMGOgcqcMifCOaFFAVwQRa4c7pR8nuDIDNUJHOJBnJtgAppV/tHLGJWFFhxliwZLWrntlSEUzQsYrk2Fadw6e0Ra9tr57CttbJo3Y5X47H72nZ6KGHSgm2EQGvzpiwXYHx6MQspFFoRKnLqeF2XUvhqxd+s07nc9lKn31CQy71xtSUZPBrO+ROCeyz1v5TfjfBHrI0GvHNwXDlnyUg1HXTl5NNnODt5oP5efwDeU/U56cc0ztBYGUrwYAZfv3o4g52ex8z11y8CLSpUZJ+SItqdIrOhCqaG1wVQwWmXGw6mNTcEOt/p0TNB6rldJVRc0VMRFaQfBZXTK6l0yrZb5hlhxdrWCR39TCtwgaIlyLksMRvBJ22cIHFZWuAGoTZ6JjPMQCrvW991INXZ4sGGURudllj9tt047ro2gfMg2dmF0FActYNgGqxfXbw/hj+O3vx+/aIgqm0Sx/P5fGRycYCZJG1G2kxjkwv3cXIvR3BZoEGo+AJSBJ5lPmhewrpVga1RyFwKIO0D7NwGV7cQ38N9LtzdrvMKgo2RW2WfwNeLs75hLaSabpv2e3LelE4HT3VDSVpydcPWOHgMXBOwTVVxs8L3XQNtxCxxauyv9pQ/Ly/PIagAoTNct93OkAuikkpWTcWSo/E4YhW/Dau347HntKv4EyJRgLd1yZWH1v1wpIJqjVsfmFSWuBLPVRlt5FTet3uXkx2IT0JEPRUPh6nY067k4sbCjJcyA95Q4awG+oAw6J3gpR3tSbYn2Z5kO0n2ehvv77VJZZah8vBc8U1aUJqAl6WeY7bn1Z5Xe17t5tXRI++Rc0mF1zmVM1T+RXrOA8Ny3aj96+SekXtGPiMj3wz9s5socFk2DodojDaghWiMwczNSUqv3o16etvdo3DPtT3X9lzbxbU2YhVSobsRtR8sU8ESFvcPv4P1EMXGy8FhZ+tmumhm/TC6MSVL2DKQqU3ieFloS22yrLWhNp65Ss24kTwtAz7d7UC6HkylFrz0l4eK6m5sjhKPedWojMM7uDj9cgkfOOGcL3ySncm7qt+N340HtTrRHRon52cQIgyQ3GgTvVrH90G1Qfgpiv2026JojKTFF7ctpCdFbtBMGleYFVQ6e167WwchFnU/3vcA+uvfS48BqXLtt3dY2HaEbcxe2Xh0uI278zNPH6GrqlG+h6ppeDniG4GJsrHkAopYKQUq6xHenVD0Yh/DHfinm9IejlxRAnL61jmVVDTpSOgqFmHb6jstdRpXXKq4M2Hj48mnr59PJgcfz45PP385PTgcjUd0Sz74WluquNrw4wPS0JzwfszL9ZPjJw5nuhIR3lJcl1wqBxof27Lj1xUb4heLWDJ8nHAddSS5Ystlyi1+NWXbusvfGzQLllxdrznlSZhJ635nLMl5ae+f82zG9aIbNGcv4f84/RlMTneRq4XvDWXjVixiN27uveOIpb1uI1Ygz9D4iIPsRAisaUPL1tmOY9mq6X04dfT4Abf9p6g= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get decision definition

+ + + +Returns a decision definition by key. + +## Request + +

Path Parameters

+ +The decision definition is successfully returned. + +
Schema
+ +The decision definition request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The decision with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-instance-by-key-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-instance-by-key-alpha.api.mdx deleted file mode 100644 index ab996c389c6..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-decision-instance-by-key-alpha.api.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: get-decision-instance-by-key-alpha -title: "Get decision instance by key (alpha)" -description: "Returns a decision instance." -sidebar_label: "Get decision instance by key (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWd9z4jgS/ldUupeZWmOYvezunN+4QOa4zWRyhMxeVZLaEnaDtWNLHqkNoSj+962WbCBgCDOb2qc8pIht6eufX6slLTmKqeXRHe9BLK3UikllUagY+EPAE7CxkQVKrXjEh4ClUZYJluwODu/VvYqiSGmEezVKpWWgkkJLhUxaJhQTWZEKNgGBpQEmVMKURgZKjDNImFbsXOSlSgSLs9IiGMt0iUxPGKbAxvoxvFc3AO7prh76ng37NyPWvR4wPQMzkzB/eNMWhbQt1Dqz7dgPbIlCtgxY3HvRqueFefKPryWYBX16e68m2rBJaTAFwxJAITMbOgvvFQ94IYzIgbTk0d2SK5EDj3jtlUHllF9hwQMuyXWFwJQH3MDXUhpIeISmhF3/jlJgwlo5VZCwL7Cord/zdsDmqYxTJmK0TFBASiW/lsBkAgrlRIJhZABSHPZjxQNu4xRywaMlx0VBukuFMAXDAz7RJhfoX/18xlerB9LbFlpZsDTjx06HfvZ135NEobdlHIO1kzLLFsy4DIIkdF6MtUJQSGCiKDIZCwJr/2EJcbmlo8iyTxPn6UpbPf4DYqRAGF2AQek1awrACSY2xeGY+0O+CrhFgdDsB/fpyOyAgypzIl3/c/fytjvq93jAL7qDS/fP7dXNdf98cDGonn69+vTbFbERJWZkSG/HzBuS1yfIVcBhJrLSObJXKViZb9FINd22PhEILZQ5NHpgA8SS4/Y8kXohZFaaRsFHZUz8vONiCqMpmXowkUrStJeJcAXLkjXutrSXzaZa1hO7amtf2rAa94CQQXJamAa9GrD38eo50CtXDE+BpbL5DcCfwVjpS8NRt/zzx0ZpMz/9RNeMnIQmdpPsfZANp3v988HN4NPV76Puvy/7POCXg1F/2L38vf//62H/hj59C8mfqlSz3IAtMzzNzX7scWIhKKHw1Hzwo7fSogl0dbhgDRDIiufKeVUdIBmookS7pZswRri1FSHffr8GOro4ERjDVCCbg1kXIUjYXGIq1c6yualR4Z6GDst77RhrNhKeakGAT729CjxmTaLjpPl23M8iKw8AE9b3IG/FuV/P2gSc/OOjfbj8Q1IH5WA63StCyAXGKSTDMoOXygZDWD4ZKvRvzwLCeD4Janga3RggB6MSeGxGkvTpGbCDBXGzREPyqcS/TqeGBNEed98//sN30cRPbXSX//TdRHkW+S9Q5SD2Ma74sHiybA/86ENdD6PkP8yo7bx4hk6usT9UoT8A/o92Q8Oq8afhq4Cfnd7601YHLLq+DpKQfdQG6p0UE8Y1QjOZOO47Les9BhvrZHF0g1AYPc4g/2F/o/BUsS679iMrucznM+2Y/MCxl343vDhn/zr76ZeHNyliYaN2ez6fh2YStyCRqE2ozbRtJjH90bi3IRultHTkYsHGwESSuBVaZGyT+cwWEMuJjBnquvNzylBOePuOL39YtSAH+/fSyD1udtntcFDvAxdSTfdFuzkT4foHLsa6xGicCfWFb5LuuSagy2yZ58Jsd7VbAqrtUWm/t1f7z2h0zTwEi3UCm91sJYiMyKWSOfVcZ50OrQ2P/unnTsdRgyJ+giWKwWORCeV3ITvmSMXyTd6GfhmtjkdeJjLayKnclfu0e6qSuOctqol4dioRaTlz4FM5A+V2B3Nh3enLRJfqlZqv1Hyl5gtS86emNbJLhEQwlIdgjDZMx3FpDLWbqczWxwK17Gr1fOXaK9deuXaIa7QjBEx1wiM+BZc5dMwe8Xa9CrZqNW172XA4vKKTcDCz+hy/NBmP+NITaRW128tUW1xFy0IbXLVnFKWZMJLuLFxQ6bMnXJ1ImY5F5l43BZQ+bO9Ldi4yPgiEuVj4w0dtdqDfd953GlFp6AFEuhrxFvp03CoRNSxxvRHWDz4F2G0lLMSlkbi4oWnePWMQBky3pKCs06SS59Dp2Q/iQfXPRZ08//1t5OIv1US76VUe7CtCUanPBnknfLefc9cDR51Y53mpXP1UU98YiS3DqpsnIlUmY6BtT7S+3KmHXfovrDqNZO9CCorPnLpsTiWm5TiMdV5fOK1/x5ket3MhVbsSYdvn3Y+3V71u63Jw3r+66bfehZ0QH9EZX2iLuVBbenwAbGjyxgvX2L1xt2xvdx2w3Cwhr5d460u8Kh8RHrFdZEK6IyYXyGVVSO74fiHhAY+a7pkegqoa3PHlciws3JpstaLXTiMe3T1sioerNom0zm88mojM7t4GbsfszbC6N3zL/sY7wkb/1CdGauFqoTsi4TzgX+i+ovECbvWwCngKIgHjzPYju3EMBW5h7N0AUk1Zl/cPfSoGfwIPW7FT -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get decision instance by key (alpha)

- - - -Returns a decision instance. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The decision instance is successfully returned. - -
Schema
    evaluatedInputs object[]
    - -The evaluated inputs of the decision instance. - -
  • Array [
  • ]
  • matchedRules object[]
    - -The matched rules of the decision instance. - -
  • Array [
  • evaluatedOutputs object[]
  • Array [
  • ]
  • ]
- -The decision instance request failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The decision instance with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-instance.api.mdx new file mode 100644 index 00000000000..3cc41b3ca67 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-decision-instance.api.mdx @@ -0,0 +1,75 @@ +--- +id: get-decision-instance +title: "Get decision instance" +description: "Returns a decision instance." +sidebar_label: "Get decision instance" +hide_title: true +hide_table_of_contents: true +api: eJztWltz4jYU/isaPbVTB9ht2m7pEw1kS5tlU0K2nclmWmEfsLq25JWOkzAM/71zJBscbCDZZvrEQyaAj75z/Y5uXnIUc8u7N7wPobRSKyaVRaFC4LcB1xkYgVKrYcS7fA5YSg1LoYBHYEMjM5LiXT4GzI2yTLBoG7D1UfGAZ8KIFBAMaV1yJVLgXR5t4Q4jHnBJgJnAmAfcwOdcGoh4F00O21onMTBhrZwriNiwz/SMYQx1EwJ2H8swZiJEywRZmSv5OQcmI1AoZxIMm2nDMJa2wQEecBvGkAreXXJcZGS5RSPVnK9Wt2SkzbSyYOn5606H/tUNreEyaZnNwxCsneVJsmDGBREiH7FQKwSFBCayLJGhy0j7H0uIy4pFIknez1xUC9v09B8IkYJuKJMovWXbwf4NFhWHpEKYg+EBn2mTCvQ/fX9aSzX58gkWO4PdYiONwDAW6AMqLVManXA97AUKfccFk2ghmf3kfvq7Xhx/MwvmDlwOSSQzMhVmUcFr8VXQVFW1xDV5taeCHK5FgdCcXPdoz+iAg8pTItzgQ+/iujcZ9HnAz3vDC/fhenR1OTgbng+Lb7+N3v8xIiaixISs3ibgFekbEOQq4HAnktxVR78wcNvXdUojgXCCMq0zmLzYALFovz+PtJ4LmeSmUfFeHTM/br+azGhiSB9mUkka9jJlW8CyaI1b1fayFCl1PfKr9PalHStxdyh5Phf670aHQEeumz8Flvr+M4A/gLHS97u9Yfn2daO2Oz/8iaGZOA1N7CbddZANp/uDs+HV8P3or0nv54sBD/jFcDIY9y7+Gvx5OR5c0aPnkPyxSSXLDdg8waeF2cvuJxaCEgqfWg9e+kCLXO1uWEME8uLQHFV0B4iGKsvRVmwTxogFrQ4Q0urva6DHNveqBmY5+vnoXli2VsHuJcZSbc36m/7UqlnnkHzE9jFmo+GxDQS4tXwIPGZJoP2EeT7uB5HkO4AJ60uQKzkelKM2yab4+Ezvbv0QeXC7u5Q+KkJIBYYxROM8gZepBJMnxcKkgH5+CRDE4Qoo4Um6MTsORkXw0Iwk6dEBsJ2dcDM3Q/Q+x5JH+/Kxdl17+YqaZ0b7MO7jePoHX8QpP7QxvP7RF7PqIPJ/4NVO7H3E8mn0zKoKvvOlUYoRU3bTr1pHB7jntjW7WvlbwN9zMItxse0h8VXAT5++8aFdHVh0C0CIWuydNiSGQiaWCeNWTHcyco3CWVnusNhUR4u926PM6GkC6Tf1bdJ2V7j0koVe5uuZdhZecOq134zPz9iPp9/9cPtVjJjZbrt9f3/fMrPwBCKJ2rS0mbfNLKQ/kvu6xSYxGAr3gk2BiShyU7lI2Kbymc0glDMZMtTlEtEZQzXh/ds/T2KxVtm50M+NbOiE1+NhuVlaSDWvq3ZjZsItNLiY6hy700SoT3xTdIdWCz1m89RtyzbL34qCYh+V2y9d1P0ymVwyD8FCHcFm514oIidSqWRKi7PTTocmkgf/7ftOx1GDMv4ETxSDhywRym9XttyRiqWbum35Obc4HnmZzGgj53Jb7+NlVlHEfe9RScRXzUQsaZeI8JOlRiUjJnKMSaunDwsNOCNE4nr1kWRHkh1J1kyyb+v1fq7NVEYRKFeea74VB2AiSfQ9REdeHXl15NVuXp0+dRVJezcHPpd3oGivQNt7YtpM5+q4rDwy88jMF2Tmd037ux7xEcFQHYIx2jAdhrkxdLQSy2R99l3qLqbEI9eOXDtybRfX6OgTMNbF5bu7PceYd3m7nARPSjNte1m/7VzRhTVdlBaX7blJeJcvPY9W3XZ7GWuLq+4y0wZX7TtK0p0wUkyLg1Z67PlW1lGiQ5G4n5vySQ+qR2pnIs1VJNgbNh5cTdhbgXAvFv6CTZst6DedN51GVBLdgdi7HPqrYH/hW+0QJSxRvRHWCz8F2J2CWQhzI3FxRcN8eKYgDJheTjlZV0mhz6HTdy/Eg+LDeVk7v/4xcemXaqbd8KIM6oZQVsr7L95pvaqX3OXQMSfUaZor1z7V3C+LRMWxMMktkkMBT2QIdGLXXb+BUYpd+CesuHFjr1qUFF85ZdecS4zzaSvUaTv0w9b/p4metlMhVbtQYdtnvXfXo37v5GJ4NhhdDU5etTotfEDnfKYtpkJV7HgLWF/ibXu83EwZh984KfKC8IDtLBHSXSk4h5YFn254nU884N2G9wdug4IUN3y5nAoL1yZZrejnz3QUyrs3txsOOdJF0tLniHdnIrHbr65UPflqXLzk8jX7/15oaYxOeeSvFq4juDNuzgP+iW6mm16rWN2uAh6DiMA4p71gLwwhwwpE7f0VIta6xb0dECP+Bad8t7U= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get decision instance

+ + + +Returns a decision instance. + +## Request + +

Path Parameters

+ +The decision instance is successfully returned. + +
Schema
    evaluatedInputs object[]
    + +The evaluated inputs of the decision instance. + +
  • Array [
  • ]
  • matchedRules object[]
    + +The matched rules of the decision instance. + +
  • Array [
  • evaluatedOutputs object[]
    + +The evaluated decision outputs. + +
  • Array [
  • ]
  • ]
+ +The decision instance request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The decision instance with the given ID was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-by-key-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-by-key-alpha.api.mdx deleted file mode 100644 index 0833c5f5475..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-by-key-alpha.api.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: get-decision-requirements-by-key-alpha -title: "Get decision requirements by key (alpha)" -description: "Returns Decision Requirements as JSON." -sidebar_label: "Get decision requirements by key (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWE1z2zYQ/Ss76MWeUqSSOmnKm8Z2UreJ67GV9iDrAAJLEQkJMPiQrNHwv3cAkrJsSbbTyUwvOnhkAotd7O57C2BXxNKZIemEnCETRigJGr85obFCaQ2ZRoSjYVrUVihJUnKN1mlpYC1+vSEO1MAfN39dxrfyVqZpKpXFWzkuhAGUvFZCWhAGqARa1gWFHKl1GoFKDlJZQEmzEjkoCae0cpJTYKUzFrUB5SyoHGyBkKm7+FbeIIavSS/6Dq7Pb8YwuroANUc9F7iYHiW0FmZglSpNwlrBAa3FQKOxWwODfl1c8Z++OdRLP3V8K3OlIXfaFqiBo6WiNHHw8FaSiNRU0wr9Lkk6WRFJKyQp4V2ENgP0Jy5JRIQPZE1tQSLSRZuT1GqHj6M9LhCoMWImkcNXXPYR4LuSFcGiEKwAytpMUHBSfHMIgqO0IheowTtifT52aohJRAwrsKIkXRG7rL0fQlqcoSYRyZWuqG2H3p6Qppn6/ZtaSYPGr3g9HPqfbR92WvNQMI4xNCZ3ZbkEHbCFPA5RZUpalNYrpHVdCka9wuSL8VpX2/tU2Rdk1qdDqxq1Fe2e9qXhBQ4+l40fEfAm6vCy3o6xWsjZTuNnny7BSz8Jg6BzjtqINlBPevnL66e97PQ8b3BXmC/4827Zzi3Bn7eh0SinGV6+OGCbwepXQ65V1WVuf2ZgQQ3UVBvkwbZFSaV9iUfebisNF2fPOeU1C1t6dWe7ImixIk3jxU6+j1sf0EJORYk8hk9KY1+2gGqEWqu54MhByD40gcSQKb58kn21VlmJ1c/bLHy4sRFctZKdXWjZ6VnSCmat9cn1+1P47eTNr9OjwtrapEmyWCxinbMBcmGVjpWeJTpn/s/LHccwLlAjVHQJGQLlXHibtIR73oOpkYlcMLAqONhtG3zmWv+eLhrt7Ham19RxWmwlfgSfry967i+FnG2bDmty6kqvg2bK2TQrqfxK7nHwHLxGYFxVUb0+Cx4aaCJiLLXO/Ffq/z4eX0GrApjieF/BOkPeiUpIUbmKpCfDYUQqetd+vR0OQynwGX+BJxLwri6pDNB67I6QUN3jNjgmpLFUsh+VGaXFTDy2+5CTHYjPWo96Ip58DxEXwhbBwEzMUYZD3JcWf93JlZMHeh7oeaDnD6Tnm13n5EiCj7L2OEStlQbFmNMaub8IlEG9v4f2tj2D0dgD1w5cO3BtH9eaiFRoC8VJSmYYkOPftClJ+pNwsHkSJqs9T7HGvzpRz/v3s9MlScmq5VOTJsmqUMY26apW2jbJ3CdrTrXwvYKQWz/d8q7HU6kYLcPwrrz6ic2XwaMGwgdqcUGXIc7e5EPV74bvhju1etE9Gn1LovWwReVGpejVesrvVNsKv0RxeI8bZE4Lu7zxy9rwZEg16pHzuVmjpbMXtPvvVohE3T/vewz98c84wEDIXIXlHRy2N0I2Xp1kGL/aht7VRWAQU1XlZCijctbej+iGY13Hx3OrFAylCSDvmiq92Md2Bv7u3qevYp+UFjl99ZwJW7gsZqrqGz3r36xUWVJRIZPOhElOR58+X56NBh8vTs8vb84Hr+JhbO9scL5WxlZUbuzDP6923/eyZbjjHYUO1/HjIKzuT5NDK21HK61Dp8U7m9QlFdLzJaR11VWXCdlZXUhE0n2tnmnUlYgJWa0yavCzLpvGD4eNkXQyva8ooQRxYUL4SJrT0jxuy20m8agzxo/hf2jW7QxXN0jlMhTK0vkvEpGvvuu1tx/WTJuIFEg56hCCVnrEGNZ2Q89WK84XnfUx8OHcV4t/AVkozl4= -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get decision requirements by key (alpha)

- - - -Returns Decision Requirements as JSON. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The decision requirements is successfully returned. - -
Schema
- -The decision requirements Get failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The decision requirements with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml-alpha.api.mdx deleted file mode 100644 index 20db0594502..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml-alpha.api.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: get-decision-requirements-xml-alpha -title: "Get decision requirements XML (alpha)." -description: "Returns decision requirements as XML." -sidebar_label: "Get decision requirements XML (alpha)." -hide_title: true -hide_table_of_contents: true -api: eJztWN9v20YM/leI20uCyZLbpV2nNyNNu2xJESTOVsDxw+lEWddKd+r9sGMY+t8HnqTEtZ2uA4o95cGQJfHIj+RHnngb5vjCsnTG3qKQVmoFBr94abBG5SybRyxHK4xsnNSKpewanTfKQn5IHLiFj5cX8Z26U2maKu3wTk1LaQFV3mipHEgLXAGvmpJDgdx5g8BVDko7QMWzCnPQCk557VXOQVTeOjQWtHegC3AlQqbv4zt1gxjuZoPoG7g+u5nC5Ooc9BLNUuJqfpTwRtqR07qyiegER7yRI4PW7T0YDeviOv/pi0ezplfHd6rQBgpvXIkGcnRcVjYOHt4pFrGGG14joWTpbMMUr5GlbAjQ9VZ8/sQ1i5ikODbclSxiffRyljrjcTfY0xKBWysXCnP4jOshAgeDH8GqlKIELrpEcPBKfvEIMkflZCHRADniKB+DhphFzIoSa87SDXPrhqBL5XCBhkWs0Kbmrnv0+oS17Zwg20Yri5ZWvByP6bIP++PlxTfREhGsFwKtLXxVrcEEYmEeh5gKrRwqF0DhvUvu64r+70G1zki1YG3bthE7eQrMYQTv0QWYBZcV5jFcaoNDeoEbhMbopcwxB6mCI4PnkOl8vYeTN00lBSezSWN0VmH98ydLGLZxfw1uAledZG8XdPYJhaP0dYJZZ312/e4Ufjt59ev8qHSusWmSrFar2BRihLl02sTaLBJTCPqR3HEM0xINQs3XkCHwPJdkk1fkVYPGSbRgGxSykAKcDg72sIGC2/nXh7mDRVx/WPyYhN1kbNHGG8l2ST2B2+vzgZRrqRb7psOagvuKdPBMe5dmFVefWRsxJ1110OiuFevrmpuHmvnaQBsx67jz9l9p/8vLPd1Eqd+n0yvoVIDQOT6WVm+InKilkrWvWXoyHkes5vfd3evxuCWdlPHv8EQB3jcVV4Fau+5IBfUjb4NjUlnHlfhRmdFGLuSu3Zi1j7lgPYnfdh4NxXjyX4pxJV0ZDCzkElVodituw7ZQaK+ey/O5PJ/L8weW56tDe+VEAUXZEA/RGG1AC+GNwZy+LaqgnnbswTZVMFr3XGvPtfZca0/VWhuxGl2pc5ayBQbm0Ld/ypJhJxxt74TJ5onJoQ0fwRGzaJbDrOFNxVK26WqqTZNkU2rr2nTTaOPaZEkJW3Ijaa4K+aXXXe0NnKq04FV4fCi39IIGmsHnnWHrPXe44usQazL5teo34zfjg1pJ9AmNNL51HnbM3OoWg1oq+4NqO+HvURwGGYvCG+nWN7SsC0+G3KCZeMrPA2N6e0E73XdCLOr/vBt49Mff00AFqQodlveU2AdCWUFjO+Tj+MU+/a7OQxUJXddehVaqFt03Et9yrJ+Oqb4qKVDZQPR+AB3ELro38FdnEV7ElJSOOUMHXUhX+iwWuh6G4odrVuksqblUSW/CJqeTy9sPbyeji/PTsw83Z6MX8Th29y4432jraq62cNCYdfibj4avo3AUcBzvhmDzuJ88nznsnzn01AyzcVNxqahYQk43fXuZsYPthUUsffpogjrMPOq7xIxtNhm3eGuqtqXHAR5LZ/PHphK6UC5tCCJLC17Z3VOM7Uwe9SbzY/h/zjYOxql/yNU6tMfK0x2L2Gdcf+Pgpp23ESuR52iC1530RAhs3Jaeh+MKajEPjf/9GfWGfwC4/8L9 -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get decision requirements XML (alpha).

- - - -Returns decision requirements as XML. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The XML of the decision requirements is successfully returned. - -
Schema
    - -string - -
- -The decision requirements Get XML failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The decision requirements with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml.api.mdx new file mode 100644 index 00000000000..9dc3ff49753 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml.api.mdx @@ -0,0 +1,67 @@ +--- +id: get-decision-requirements-xml +title: "Get decision requirements XML" +description: "Returns decision requirements as XML." +sidebar_label: "Get decision requirements XML" +hide_title: true +hide_table_of_contents: true +api: eJztWN9v2zYQ/leIe2oxRXLatOv0ZqRply0pgiTdAmR5oMiTxYYiVZKKYxj63weSku3EctcCxZ78YFg/jnff8b6PpG4Jjs4s5LfwHpmwQiti8GsrDNaonIW7BHSDhjqh1SmHHGboBsvLDcOb8zNIgKNlRjTeGHK4RNcaZQkf80yoJTfnZ+k/ChJoqKE1OjQeyRIUrRFy4CNx/sQFJCC8+4a6ChLonXLInWnxOYbrCgm1VswUcnKPC6JL4iocx5SQeSVYRSiL+ChplfjaIhEclROlQENKbYirxDqrFBKwrMKaQr4Et2g8dKEcztBAAqU2NXXx0dsj6Lo7D9k2Wlm0fsSrycT/bcO+OT/7JloiLLEtY2ht2Uq5ICbMN/I4p0wrh8oFUPjossda+ustqNYZoWbQdV2XwNEuMOMI/A1aR0oqJPKUnGvjTR0V0hJqkDRGPwiOnAgVEhkyJ4Xmiy2ctGmkYIFrWWN0IbH+5Yv1GDZxPwU3JRfRso9LdPEFmfPli4ZFjH57+eGY/Hb05te7F5Vzjc2zbD6fp6ZkB8iF0ybVZpaZkvmft3uZkusKDZKaLkiBhHIufEwqfVYNGifQEtsgE6VgxOmQYA+b+MmN+fXTHGF5rq8Gr4vwvBgbtGmN2BLWlHy+PB1IuRBqth06jClpK70PWujW5YWk6h66BJxwcjTo8yi2rWtqVpp5GqBLwDrqWvuftH/9asu3p9Tv19cXJLogTHNcS6sP5JOohRJ1W0N+NJkkUNPHePd2Mum8T1/x78hEEXxsJFWBWs/TEYrUa96GxISyjir2syqjjZiJ53FT6Na1gJ7E72NGgxgPx8U4yE5Sdm/JA5WCE9q6ykeN8iHMYABBpU33ItuLbC+ynSJ7vc33D9oUgnNUgZ4rvQlLlHaESqnnyPe62utqr6vdujr6kZPkXLgqBJiJB1ThpD6nUW6lbtX+bLmX516eP1Geb8Y+9KaK+Fk2nodojDZEM9Yag9x/GMvg3n9uDrH7fXGvtb3W9lrbpbUugRpdpfvmWeh2uQpyyIad8GBzJ8yWO9peXejgJGDRPAyNstZIyGEZNdXlWbastHVdvmy0cV324Av2QI2ghYw09a+j9gZOSc2oDI/Hautf+G7ckPMxrVvFKXlHLk+urslH6nBOF2Gufcinrt9N3k1GvXrTHR6nF6ckZhiZubFaDG697EfdRuPvcRy6cBZZa4RbXPlhcXoKpAbNtPX1WTGmjxe8+/toBEl/8WHg0R9/XwcqCFXqMLynxDYQXxU0NiKfpIfb9Ls4DSpiuq5bFZZSNYtnJLqRGJOtdT6hBKRgqGwget89HczO4hvyV4xIDlNflMicYQWdCVe1Rcp0nbE4bPVfSF1kNRUq60PY7Hh6/vnT++nB2enxyaerk4PDdJK6RxeSb7R1NVUbOD6i23HmG+kZL9fbyA+0j/tChTZnI6lQnjohw2UvtlsYFRskkO/uMnu93SW9Zm5huSyoxc9Gdp1//LVFs4D89m4tsaBJLqy/5pCXVNrnDenNBF/0IflL8v+0qUfnqX9I1SIsFrL1d5DAPS6+0YPv7roEKqQcTcg6Wk8Zw8Zt+Fl1nr3gVsvgxxOvlH8BuLx9kQ== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get decision requirements XML

+ + + +Returns decision requirements as XML. + +## Request + +

Path Parameters

+ +The XML of the decision requirements is successfully returned. + +
Schema
    + +string + +
+ +The decision requirements request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The decision requirements with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements.api.mdx new file mode 100644 index 00000000000..34d380d6fbc --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-decision-requirements.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-decision-requirements +title: "Get decision requirements" +description: "Returns Decision Requirements as JSON." +sidebar_label: "Get decision requirements" +hide_title: true +hide_table_of_contents: true +api: eJztWU1z2zYQ/Ss7OCVTWpQTJ3V509hOqjR2Pf5oD64OILgUEYMAA4CSNRr+9w4AUpYtylY6melFBw1FcrG7b3cfAC6WxNKpIckdOUXGDVcSNH6vucYSpTVkEhFVoaaWKznOSEKmaDvJq3XBiGRomOaVkyQJuUJba2lgpXZdGqiBL9d/Xgz+kSQiFdW0RIva+bEkkpZIEpL1WPkDFyQi3OmvqC1IRFpnM5JYXeNzJ24KBGoMn0rM4B4XoHKwBULWhzWCecFZAZQFBynUkn+vEXiG0vKco4ZcabAFN/0aBiQihhVYUpIsiV1UDgeXFqeoSURypUtqw6OPR6RpJs5/Uylp0LgR74ZDd9nE0GsNuAFTM4bG5LUQC9A+5JiFqDIlLUrrFNKqEpz5HMbfjNO63PRTpd+QWZcO7TJuefBpWxp2APhaNn5GwJuorZeVO8ZqLqe9xk/PL8BJv1gGXucMteEhUC+ifP/uZZStntcN9oXZ8W1XWOPT121oNKrWDC92Dth6sLrRkGtVtpnbnhmYUwMV1QYzb9uipNLuiihI7wDKaeZWOHV9s9LYYkmaxokd/Ri33A0aCznlArMBnCvtRC3lwgDVCJVWM55hBlx24fFEhlRlixcZWGmVCix/2WTiU+dGcBkkW7sQGOqYEgTTYP3u6tMJ/Hb04dfJm8LayiRxPJ/PBzpnB5hxq/RA6Wmsc+Z+Tu7tAG4K1AglXUCKQLOMO5tUwCP3wVTIeM4ZWOUBtm6Dy17A9/LEEd5uZntFn1rzjeSP4PZq3PF/weV007Qfk9NaOB00VbVNUkHlPXmshddKbASmLkuqV+vBUwNNRIyltjb/lf6/39xcQlABTGX4OIu1hhyIkkte1iVJjobDiJT0Idx9HA79dOAyvgMSCfhQCSp9aT2HwyWUj3XrgXFpLJXsZ2VGaT7lz+0+5WVbxKcBUUfGw34ydrQTlN0bmFHBM6C1LZzVQB9gGr0TVPj1dk+yPcn2JOsn2fvNev+kdMqzDKUvzxXfuAGpLFAh1Nyt2Hte7Xm159U2Xh39yE5yzm3hDUz5DKX/CnV7Y0e3XNVyv7fc03NPz59Izw99H3ojCS7K2tUhaq00KMZqrTFzX7LCq3eNlM52uy7uubbn2p5r27jWRKREW6i2Lew7ubYgCYm7lfBgfSWMl1t6iY1rm6KedQ3gWguSkGXgU5PE8bJQxjbJslLaNvHMJWtGNaepCCXqXgfedfUkFKPCP+7Lq3ux3to6oWUtMwrHcHV2fQOfqcU5Xfg4O5NPVR8Pj4e9Wp3oFo2jyzEEhKEq12aKTq2jfK/aILyLYt9QNshqze3i2g0L4UmRatSj2uVmVS2tPa/d3QchErV/PnU19OXvG18GXObKD2/LYdMRstY2JcPB4WbpXY49g5gqy1r6aVROw/6IrgFjojbWAYqI4Ayl8UXengp0Yl/DG/irbbAeDlxSQuV0s+eU26JOB0yVMQvDVtdUqDQuKZdxa8LEJ6Pz24vT0cHX8cnZxfXZweFgOLAP1oOvlLEllWt+fEbbv997jnr5uHz8yJlImyWLDzauBOXS1Y2Ht2xZdkd6WUYikmzr2U+ilip3ZLlMqcFbLZrGPf5eo16Q5G7yyCxPxYwb9z8jSU6FeX6+so7tTWssewv/w6lLb7jah1Qu/IQhandHInLvji+2Hmw0kyYiBdIMtQ9BkB4xhpVd07NxpuLIt5oOP5851vwLGRKGfA== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get decision requirements

+ + + +Returns Decision Requirements as JSON. + +## Request + +

Path Parameters

+ +The decision requirements is successfully returned. + +
Schema
+ +The decision requirements request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The decision requirements with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-document.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-document.api.mdx new file mode 100644 index 00000000000..3b84aed358c --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-document.api.mdx @@ -0,0 +1,62 @@ +--- +id: get-document +title: "Download document (alpha)" +description: "Download a document from the Camunda 8 cluster." +sidebar_label: "Download document (alpha)" +hide_title: true +hide_table_of_contents: true +api: eJztV0tz2zYQ/isYnOwpTSqp06a8aWwnVSfNeBylPdg+QOBKRIIHAywsczT8750FSVm25dR9HH3wQ8Jiv318u9jdcBSrwMtLfupkNGCRX2fcNeAFKmdnFS/5CnB7mPEKgvSqoVNe8lO3ttqJiglWDTJs6Z1hWAM7ESbaSrC3TOoYEHx+Za/sR4fAsBbIsFaByeg9WNQtc1a3LMSmcR4DE5Ype2TAON/e6Q7oPGRsXStZMxWYdcgMCEJ1njXeVVGSaSwGSGhlWVqHcGXnBAa2apyySFeFZZdCN7VgSxAYPVwfFNKZxlmwGAoQXrdHQkoIoUhy/e+jQTrkpjpkwlbMiJYtgIW4+AISGToma2FXcGWVZctIwsyDBhEg5MmiK8sz3ggvDCB4Cv+GW2GAl3z0dFbxjCsKcSOw5hn38C0qDxUv0Ud4mId5DWx2ytwyBX4bLnSsGjKU84wHWYMRvNxwbBtCC+iVXfGuy7YGpAjfoX+L4Nt78Euhw3Pxk7JdK+4fE1O+b9c1IYfG2QCBzl9PJvTnMfhW51qELRpULMSUwWXUuiUo6SwSkcsNF02jlUw8L5xEwKOAHoShs6csyvjSeSOQl3yhrPAt77quy/jx5Pjv7FJYJ+9X6gYsBYssJf4uXbRVnjjxhHWNdwsN5ocvgfTuWncfcMrOe0lWAQqlmespSSFJgguomLLs8uLdCfvl+M3P1wc1YhPKoliv17lfyiOoFDqfO78q/FLSD8kd5mxeg4eR66KqFGEKTTXXgEcFgYUGpFoqSQknTwezGYWw928IZm8W1cD28l2ovxPy6NWjDjRlny9mTFVgUS1bZVePodOdpYiadIiFi1gutLBfeZdxVKj3gj5ECdEY4duR4vcBuowHFBh33ODKIqzA79qvLP74+pFuosmv8/k561Uw6SpI7Sz1xwGInDDKKhMNL48nk4wbcdt/+mky6UgnZfwZnlgGt40WNlHroTvKMkMl22tLjikbUFj5f2XGebVSD3FzakFjLvhA4tPeo77A3uwr/Cm9EQieeAjeO8+cTO9JRS+ETuqp+kdsamIQ8KXWXmrtpdaeqrUu4wawdsPolyYVrHnJi/EtC8Xmbkzp6P0GfzPOMdFrXvJNXzZdWRSb2gXsyg2NdV1xQzm5EV6Jhe6ZSMd9eY200U4Knb7elz46oFlldOtuyrw4+zRn7wXCWrQpnAR5X/XbydvJXq0k+oTG6fmM9R725NtpCKNaquy9anvh5yhOs04AGb3C9hNd68OzAOHBTyOlYEuKAS9pp8+9EM+Gf96NVPntz3nKtrJLl64PWX9sCGUFfOgtn+SvHjPsfJYKRTpjok3d0q76qUbsmfR5xrWSYEPi8jBbjmIf+hP2R4/IXuWUlJ45Y5NcKazjIpfOFLK/tv270G5RGKFsMUCE4mT6++ePp9OjD7OTs4+fzo5e5ZMcbzE537iARtgdO7Zby3Y6O0iz/eFDrzd3r8TLqvPvV52BtQi3WDRaKEt1lNK9GZrL5XbxCTzj5c4WdJ0NHeKSbzYLEeCz111HX/erCfWcSgVqJ08sJ7tJ/G97yl4/vkJ7b226ETqSFKeCHjvdPzTz4GJYtw7Z8za7vZYNXwrb7po1WrwT4+66y3gNogKfDO0FplJCgztXn1yYyNPtm/H+jHrOX7yFrTk= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Download document (alpha)

+ + + +Download a document from the Camunda 8 cluster. + +Note that this currently only supports an in-memory document store, which is not meant for production use. + +:::note +This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change +in future releases. +::: + +## Request + +

Path Parameters

Query Parameters

+ +The document was downloaded successfully. + +
Schema
    + +string + +
+ +The document with the given ID was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-flow-node-instance-by-key-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-flow-node-instance-by-key-alpha.api.mdx deleted file mode 100644 index 5c5d725101e..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-flow-node-instance-by-key-alpha.api.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: get-flow-node-instance-by-key-alpha -title: "Get flow node instance by key (alpha)" -description: "Returns flow node instance as JSON." -sidebar_label: "Get flow node instance by key (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztGV1z2zbyr2DQl2Sqr7RuL8c3WqYTNrKsEyn7OrZHA5FLEQ0JMABoWaPRf79ZkJRlUU7iu1yf/OCRsVjs9y6Wiw01bKmpc0PPM7kiQsZAuNCGiQjoXYfGoCPFC8OloA6dgimV0CRp4RKmyR/B5bh3KxzHEdLArQhTrgmIuJBcGMI1YYKwrEgZSYCZUgFhIiZCGgKCLTKIiRRkyPJSxIxEWakNKE1kaYhMiEmBLORD71YEAHZ106C+J1MvCIk78Ym8B3XPYXX3ps8KrrtGykz3owqxywreVaBNC9BtzvXy+KcvJag1br29FYlUJCmVSUGRGAzjma40vBW0QwumWA4oJXVuNlSwHKhD0ThoG782zSdY0w7laL+CmZR2qIIvJVcQU8eoEg6NHKZoTc2XAmLyGdaN9m2jd8gq5VFKWGQ0OoCRUvAvJRAegzA84aAIamDQEe3jPdqhOkohZ9TZULMuUHouDCxB0Q5NpMqZqUC/n9Dt9g4l14UUGjSe+GUwwJ+29EfCg2uiyygCrZMyy9ZE2UiCuGcNGUlhQBikxooi4xFDav2/NJLctIWUi78gMugBJQtQhlcCId/xgeWPafZ1e/8Aqx6azgqKyh9I1hakxns0HAYA01pGnBmIiZFf4/tNH+4EOYOEC46MvylKvEP9/wvjx98tC49fLIo2ioslMtaGKXPGDLTZIZSsUhDHwtieg7hNc0+7mBnoGp4DMgIR/xdsUEmdvoTPLvSfMeAek/hb0duyllEAEyxdx33DTEoSJfN28GbAYi6WL/VOBWjxWheAtfCRCKsDAmKiwd4S9ywrQR8zG4gyx2tuNg4m3tA/970z2qGT6eXQCwLaocHsdP648q68cTh/CgtCdxrO7Q7tUH8cetML78x3Q28+dMPhx+Nb4cfp5fVu6/RyNj5zp3/uAN74bPd/4E2v/KE3D93gE+3QqTf0/KvdchZ40+b/C3c8c0fNqv7x/j0czQI88cENvWv3TytKGzZxp+5o5I32QJW6p27gne1BA+9fM2889Obno8tr5Dobhf7cHwehi9DTyzPEGrqj0dwdhv6VH+L6dBb4Yy8I5tPZaCd8MJz6k3C3Qq0btcafxpfXY3pXZeWxVAkQ/NTz+23Hi0PACuuh6JcXk5EX2khAl/ljFxcoSsq0LyJb8Y8IlMqVxuS1fcEzkU1SZjseXpPpET8heN8Tlmm5v/MJ1ng/Fkre8/hJ0i+kzIAJzIk95LZAjaiHBXrFTfo/lmgDggnzXFmpdrGm1E3KTtt2XiMxbjIEnR9c1L6BnG63iHLygqbiAxiSMJ5B3CMXUkHTohGmYGdOwoWVrGldyELG66+2HYWSiwzyn9vtx1OpXDKpMGu+pGpLqphExEXF/WZ6PiT/PPntH3dvUmMK7fT7q9Wqp5KoCzE3UvWkWvZVEuEf4r3tkTAFBSRna7IAwuLY3nksI48ND9EFRDzhUVVdrcJWGDR7pd/Xu6Wmyj57wZSKt5oll8ymftMJrW1tP2RtzySszJAGW8jSOIuMic/0MQDaTA+56DLPmdr1vk8Z1KWi1N/sW3/95Wi79zEMJ6QiQSIMqd2NWDNCJXIueI4V42Qw6NCcPVSr3weDLdJEj3+HJoLAQ5ExYUPrUB0uSP4Yt70qz+uPrx/jGan4kh/y7T1JxjqIzyqNmiw8+e4srIsMkCW/h6pHXDFtv+sSWYrX3HzNzdfc/IG5+duxG9LFdsKAwjgEpaQiMopKpbAJSHm2+4JqeOMIArR5zbXXXHvNtedybduhOZhUxtShS7CRY7+Cab+Z8XUbMXV/c2Tut8UJG6j7ZkJYqow6dFMl0tbp9zep1GbrbAqpzLZ/j166Z4rjNNQ6FberhGsCKZMRyyz4mENxA8eQjaIHI9IPzMCKra2BkeVT0u8H7wdHqSLqMxRx6FppWIXjXoloyGKuHyVbIX8PYTt71BCVipt1gMcq8yyAKVBuWY0m6jCp+Vnq9iPKItFO/c95Ezx/XIfW/1wk0h6v46AtCHoFlK4kH/TetWNu4tvUiWSel8LWT7GsGiO2p1g908akyngEQtvorsfGDdqo2iFXFUfyrodOqSKnKZtLbtJy0Ytk3oyyd7+LTC76OeOiX7PQ/aF7gVOH7sgfeuPA677rDXrmwVjlC6lNzsSeHPajqt3lLda2s3tjB/hvDy2webxDXh8Jnj4S1FFp4MH0i4xx+zVv3bmpy8kNbZcT2qHOsYeEu05dE27oZrNgGmYq224RbCWizs3dYwmxNSfm2tqNOgnL9OFrw77j3kzrd4m35G99gzhqoRrIxNrWxKzEFe3Qzzj/OPrGsrVzG2AxKKt4helGERRmj0brgQFry67Mf/CwKPwHTBtN3A== -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get flow node instance by key (alpha)

- - - -Returns flow node instance as JSON. -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The flow node instance is successfully returned. - -
Schema
- -The flow node instance Get failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The flow node instance with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-flow-node-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-flow-node-instance.api.mdx new file mode 100644 index 00000000000..2a840442a7f --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-flow-node-instance.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-flow-node-instance +title: "Get flow node instance" +description: "Returns flow node instance as JSON." +sidebar_label: "Get flow node instance" +hide_title: true +hide_table_of_contents: true +api: eJztWV9z4jgS/yoqPe3WOcDsZndneXPAmfEOIRw2yW7lUilhN1g7tuSR5DAUxXe/atkGgp1Mcjd3TzxQWFKr//9abWtDDVtq2r+jl6lcESFjIFxow0QE9N6hMgfFDJfCj2mfLsEg2VjG4NdEDo1BR4rnSEX7dAqmUEKTRYMfYZr8EVyPO/8S1KE5UywDAwqlb6hgGdA+xV3igP0nWFOHcmScM5NQhyr4UnAFMe0bVcCx9DBBMZovBcTkM6yJXBCTQIs2DlklPEoIi4xGzRgpBP9SAOExCMMXHBRZSEVMwtuM6VCH6iiBjNH+hpp1jtpzYWAJijp0IVXGTDn16zndbu9Rc51LoUHjjp96Pfxrat/iN66JLqIItF4UabomyroY4tKRkRQGhEFuLM9THtl4df/WyHLTVFLO/4bIYAQURtfwUqHFUWDR822Wvezv7+DVY9dZRdH4I82ailR0e8dhAjCtZcSZgZgY+ZLcb8Zwp8gQFlxwFPxNVeId6f9eGYToK3Xxh29WRRvFxRIFa8OUGTIDTXE4S1YJiLY0tvsgbvI8sC5mBs4MzwAFgYj/AzFopE7eImeX+s84cC/EH34rexveqpmPbYF7mT0WwTcLKCcajNc5YPHbM2FVBkBMNBhce2RpAbrNTyCKDA+F2TiYeAP/0veG1KGT6fXACwLq0GB28bAfeTfeOHx4OheE7jR8sCvUof449KZX3tB3Q+9h4IaDj+1L4cfp9e1u6eJ6Nh660792E954uHsOvOmNP/AeQjf4RB069Qaef7MbzgJvWj9fueOZO6pH1Z/352A0C3DHBzf0bt2/rCrNuYk7dUcjb3QwVZp74Qbe8GA28P4588YD7+FydH2LUmej0H/wx0Ho4uzF9RCpBu5o9OAOQv/GD3F8MQv8sRcED9PZaKd8MJj6k3A3Qqtrs8afxte3Y3pfwrANGwFOP4384QH85hSwynqo+vXVZOSFNhMwZP7YxQGqkjDti8iW+BaFErnSiFaTwLOZTRI8LAThFZsO8RcED3jCUi0PVz7BGg/EXMlHHj9B+VzKFJhATBwQNxWqVT2uyCtukv+yJhsQTJjn6ki5ikWk6kp21jZxjcy4SXHquOXyDWR0u0WS8zd0Edg4gTZkwXgKcYdcSQUkBsN4qglTsHMp4cJqV/crZC7j9Yu9Rq7kPIXsH82e46lmLpmUlJVcUvYiZV4i4byUfje9HJDfz3/57f6HxJhc97vd1WrVUYvoDGJupOpIteyqRYQ/pPuxQ8IEFJCMrckcCItje9CxlOy7HKJziPiCR+WBZw22yqDrS/tebpHqSvvsqVIo3uiQXDKb+nX7s+Zi2RRt9yxYkSIPNpeF6c9TJj7TfRI0hR5L0UWWMbVreJ8KqMpFob/ZrP78U2uP9zEMJ6RkQSJMq90pVQlCIzIueIZV47zXc2jGvpajX3u9LfLEiL/CEkHga54yYVPr2BwuSLbP206J9epN5PtERiq+5MdyO08AWSXxsLSoRuK7diTWsEtZ9FljveUxYYVJUGoJHxIpsEqw1FbiE8hOIDuBrB1kPzfz/VKqOY9jEDY9d3jjmghpCEtTuSrbhBOuTrg64aodV+evbiOrLhnIkj9C+VVjxUqsLWQhTo3lCZsnbH5HbP7S9orn4vuwAYV5CEpJRWQUFUrhW2zC0903v1p2dSiesHbC2glrz2Ft69AMTCKrmy57R2US2qfd+lbqrFZTdzctN1VbvBMC9VjfaRUqpX26KYG07Xe7m0Rqs+1vcqnMtvuIUXpkirN5WuYmLpeAqxMplRFL7XRbQHHBfjOuDB2wrBAxI+/J1AtC8oEZWLG1dTCKfMr6fe99r5Urkj7D0Z34pLSwTMeDElGzRay3si2JX8PY3pZpiArFzTrAbaV75sAUKLfAoOzSpJJnuduvgJaIOtXDZZ08f9yGNv5cLKTdXuVBUxGMCihdat7rvGvm3MS30IlklhXC1k+xLBsjdmBYlBbaoEEOTXkEQtvsri46a7JRuUJuSonkXQeDUmZOXTaX3CTFvBPJrBuV23b/81TOuxnjoluJ0N2Be4Wfzc9G/sAbB97Zu06vY74aa3wutcmYONDjA5iWLu/Y5M3+0Hj1BW8VHwNfTTdPGbcfZq1hmwpYd7QJLOrQftsl8L1ToeOObjZzpmGm0u0Wp78UoNa0f3e/B5NFX8w1Pse0v2CpPr4pPrToh2l1p/wj+b/eH7d6qJpkYm2rQ1rgiDr0M37Kbr0f39pP8MBiUNbwktKNIsjNAY/G5TCibFfwPngIj38D1yj/XA== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get flow node instance

+ + + +Returns flow node instance as JSON. + +## Request + +

Path Parameters

+ +The flow node instance is successfully returned. + +
Schema
+ +The flow node instance request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The flow node instance with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-group.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-group.api.mdx new file mode 100644 index 00000000000..0f2a5dd8cd5 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-group.api.mdx @@ -0,0 +1,56 @@ +--- +id: get-group +title: "Get group" +description: "Get a group by its key." +sidebar_label: "Get group" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/Ss7OCVTRpQTJ0150ziO6+ZjPI7dHlwfQHAlIgYBBlha5mj43ztLkLJqyXYOvVUHDUUCePv1HkDuSpBcBJFdiRPvmlpcJ8LV6CVpZ08LkYkFUhxJRIFBeV3zkMjECRJIWPAY5C1oCnCD7eRvKxJRSy8rJPSMvBJWVshQPPcTtiIRmhFqSaVIhMcfjfZYiIx8gw/NXJQ4GGF0kYigSqykyFaC2pphtSVcoBeJmDtfSYqP3h2Krrtm9FA7GzDwitfTKV8es6ADhEYpDGHeGNOCR2q8xYLNKmcJLfFyWddGqz5F6ffAGKsNrx5kqQcOKL0qYfQFNGHFoEMELv+OijhvnpNPOnq7zteuWJ/J0sNUJEMR1kiBvLaLJ4B4/oQXyhD0wmLxBasc/SdswwaM9F62O1ECErg5+xP4WvWrA4xoQA5oNMYec07Cz5W1SwRpMjjm95Sw4sddIg6nB7srzCzDQGCkuglwK40uQDZUoqWhlqA8FnwrTXii4rV3ucHql+cqP4OzOBMKJKkNxCqDDBAn5liAtnB1/vEIfjt8++v1i5KoDlmaLpfLiZ+rV1hocn7i/CL1c8U/nvdyAhcleoRKtpAjyKLQbFMauOcPhBqVnms15nlwGzi7UaRPky+ObtNlXYzG6626z+Dy/BR0n8V5q+1i23S/Zi4bwxgydw1luZH2RtwX9TmOziA0VSV9y7zaNtAlIpCk5nkyvXm9k7m/X1ycQYQA5QqEufNApQ6jIQ6i0lZXTSWyw+k0EZW8i3fvptOOMbniPxGJBbyrjbSRgA/C0RYq53HgTx+YtoGkVf9VZZzXC/3Q7kRsCmwg8YcY0SiyN9t8/+h8rosCbU/Ptd50AOsIpDFu+eROutfVXlf/e10dPvV6stRUxjNT36LlkxWWMspr7hq7F9deXHtxPSqut7ve/WcWOMueeYjeOw9OqcZ7LGBZatPD8/fAaHs41SIX91rba22vtW2tdYmokEo3tA/6fgCVIhNpf46FdDV+2Xb8PY/+dmwUNN6ITKyiYrosTVelC9Rlq9p56tJbLset9FrmJpKQh6OyRsYYp6TpH++qHA/wh+0Y0ZGsGltIeA/nx98u4EQSLmXbZ5JN/hv6/fT9dCcqT30EcXZ2CjHCyLuNvWCEZVHv/oLuJ/8McN/pCKgar6n9xstienKUHv2s4eyv+TDY69H5Pk4SyfDn48iSP/666Aut7dz1y4eCbzvCVUEfoufTycE2uc5Oe40oV1WN7TdKu4jvM3IjMGWaQBxQIoxWaENP46F7NE77HEfgz2gRDiZclMiccX9caCqbfKJclaq4bH3NjcvTSmqbDiZCejT7cvn1w+zV59Oj46/fjl8dTKYTuqM++NoFqqTd8IP7XotdHbHV/YHwZHNsKAPhHaW1kdoyMXr/V4NQrmLrJ4hEZOsm0HUysP1KrFa5DHjpTdfx4x8N+lZkV9f34ujVVOjA/wuRzaUJDztrm+6+OB96cC9hq5O0092x+2PbXpGm4TuRiBvuVt03rrrrLhElygJ971EcnimFNW0s3GqoMZ3XW8jJMfPwH24DGmg= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get group

+ + + +Get a group by its key. + +## Request + +

Path Parameters

+ +The group is successfully returned. + +
Schema
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The group with the given key was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-incident-by-key-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-incident-by-key-alpha.api.mdx deleted file mode 100644 index 08b53456ffd..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-incident-by-key-alpha.api.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: get-incident-by-key-alpha -title: "Get incident by key (alpha)" -description: "Returns incident as JSON." -sidebar_label: "Get incident by key (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWN1v2zYQ/1cI7qXFFNvt0q3Tm2YrmdpENmwlG5YEBiWdLLYSqZKUHcPw/z6QlBx/dfHW7i0PgSPyeJ+/uyNvhRWZSeze4YAlNAWm8IODU5CJoJWinGEXj0HVgklEGwpEJPowGYade+a6LuMK7lmUU4mApRWnTCEqEWGIFFVOUAZE1QIQYSliXCFgJC4gRZyhPilrlhKUFLVUICTitUI8QyoHFPPHzj2bAJivu5b0PRr7kwh5owDxOYg5hcXDqy6pqDxTnBeym1jCM1LRMwFSHSyctec6ZfrDlxrEUm+9vmcZFyirhcpBoBQUoYW0Ft4z7OCKCFKC1hK7dyvMSAnYxa1LPsISO5hqb1VE5djBAr7UVECKXSVq2HdplAMiUtIZgxR9hmVrdcvPQYucJjkiiZLa3QTVjH6pAZldmlEQSOurtNvbQx3sYJnkUBLsrrBaVlZDBTMQ2MEZFyVRdunnc7xeP2gtZcWZBKlPvO319M+hppvAU4lknSQgZVYXxRIJgwxIO8ZFCWdKA8hdYVJVBU2I5tH9JDWj1aFqPP4EidK+FbwCoahVY9unJ9jxnGv/oyvXRi1t6gAyyqjm/g0aNbxQumFmwk6k5AklClKk+CkqBOmWBlIJymanyqPpqeICJhVhCXwHe2nD6iRrQQguIiPtORvbaoXMGaSJ0YKqHBFrMqRIgikmc1LUIHVuAKtLXeluwsnI7wcXgT/ADr4JP4bDP0Ls4GA4vfZGoyC8nPrj8XCMHfxh+Ns0HE7HfjQO/Al2cH8YDoIoGIYbEv/PaOz1o+mtd3Xjb1b73tWVP5j6V/61H0ab5Zvwdy8cmB29MvVv/TDCDr72JxPv0p9Ogr/8qf9n3/cHRrmGzcDvB5NtoU8LWqy3o9DFcHw9DYfR9GJ4Ew7wQ+vYa5CSzE7wrW9cWlryJnssRQzSlCnrdMpQyQU0xdJEMCv4IuQpnApTTY8YT+EUdG6Yfzs8t+T+G4AmAkxNi2h5gh8HRIFG4KZ8tsc720qmRMGZ0gzXDpaKqBM4T1TDekfFkxPA60fBra9hF1yOvcggbexPhle35t+RHw6C8NIg5xOPv+bmQ69+4rGtuDRD8Eilks62S416Bx1rvy05WAmAke6ip1U6onKUCV4e1pw5JTsIk+YScjz2EhVAUspmxyOvgBGmLKwPdbC7GsN7fVwbuGvCWjOjqoCtIhYoKPF6rbfOn+3Bl6BQRmgBaQddP6WfRESYsjunKaQ6N7UebX9HMU+X/9ilK8HjAsofD7v1ri4eGlnKRi6yXVx31rZGGOl344s++vX83S8Pr3KlKul2u4vFoiOy5AxSqrjocDHriizRf5rudQdFOQhAJVmiGBBJU9O3SIGe7gdIVpDQjCY2SsZgo4x2srXvny8X6ivdZYPCWtADmHnoZhy094Wlwci+aHMmI3WheZCY18qNC8I+46dwPwdmD8m6LInYXAZ3BTTVoZbP1ryf3h5NlN+jaIQsC5Ro+G9uPY0gbURJGS11lTjv9Rxckkf79XOvt9Y8dcRPsIQheKwKwgy09s050jbaRPxOkeGCzui+3M5O6jUgHliL2tw7fyb3mgoGaEbnYG9wCyLNsybjNXvJyJeMfMnI75iR7451Q48h7WWhcWgvojxJaiH0DSOnxebt0crWL3GQ6iXXXnLtJde+lmtrB5egcp5iF8/AIMfcwHG3bX6yu9qazKz1rAnEvJ2H1aLALl7ZvFm73e4q51Kt3VXFhVp35zoocyKonv2ZGOptm18tbgqekMIsH4uf3tBDt9auvYHgJVGwIEs7xeBij/X73vve8ccDF+orHPWI0Vpo0bdVEVq2OrWPsrXEpzA2UzgJSS2oWk70MeueGIgA4dU7r6BGnuGuvy0Rdpp/LlqsfPgjMuGmLOPmeBP2Q0V0VEBIq3mv8+YQYqPAZErCy7JmplyyWfvQfOLXTHB1DhU0ASYNmJshaUt2ZXfQrZWI3nR0UCxy2io5oyqv407Cy3Zwu/mNCx53S0JZtxEhu33v+iYceGdXQd8PJ/7Zm06vox6VMb7iUpWEbemhX06bq1y8NNe3V2ZI/Xrf7tVTo3gZf2/1FQWPqlsVhDKdEyZ0q6ZS3G0GtxI72N2e4j44Tbrf4dUqJhJuRLFe62WjAHbvHp6qgyknKZXGTdjNSCH3x+fb0Xk1bgbtr9H/NlQ/anyzSNjSlLai1l/YwZ/1uGRniL3Wg5QcSArCGGcpvCSBSm2dPZiZ69KwKcqXvs7pvwGDE9hq -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get incident by key (alpha)

- - - -Returns incident as JSON. -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The incident is successfully returned. - -
Schema
- -The incident Get failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The incident with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-incident.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-incident.api.mdx new file mode 100644 index 00000000000..2c867668475 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-incident.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-incident +title: "Get incident" +description: "Returns incident as JSON." +sidebar_label: "Get incident" +hide_title: true +hide_table_of_contents: true +api: eJztWd9z4rYW/lc0emqnXmDbtHcvb77gpN4mJgMm3blphhH2AWvXlrySHMIw/O93jmQbEkjDbbdvPGQClnR+ft85RmdDDVtq2r+noUh4CsLQB4/KEhQzXIowpX26BNMuejQFnShe4irt0zGYSglNeL2BME0+TkZR5w9BPVoyxQowoFDDhgpWAO3TZu9vsKYe5SimZCajHlXwteIKUto3qoKXuuIMCNOaLwWk5AusiVwQk0Gr2yOrjCcZYYnRaAcjleBfKyB2lS84KLKQipiM7wzuUI/qJIOC0f6GmnXpLDSwBEU9upCqYMY9+uWCbrcPaKUupdCg8cSPvR7+O7S0jQjXRFdJAlovqjxfE2VDBqkLUSKFwcD2N5SVZc4TG/fuZ42CNoemyflnSDAPpcIsGe7M2I/pCX68Fdq/GMqtNQtdHcKCC47S/4ZFtSyStsJs2pnWMuHMQEqMPMUERHFrgTaKi+Wp+sLhqepCoQ0TCXwDf3kt6iRvQSmpYqvtLR8bFhN7huBmsuImI8y5DCnRYJBWjyyvQCM3QFQFlodpNLkNBuFlGAypR6fRb9Ho94h6NBzNbvzb2zC6mgXj8WhMPfpx9J9ZNJqNg3gcBhPq0eBTMJjG4SiaXYeTOIiC8fP1wSgahna9ERF8isf+IJ7d+dfToH068K+vg+EsuA5ugihuH0+jX/1oaFfwySy4C6KYevQmmEz8q2A2Cf8bzIJPgyAYWuNrMcNgEE72le4eoFr/mUGXo/HNLBrFs8vRNBrShybwN6A1W54Q+8CGvHDba3a5HXPQtoy5pHBBCqmApGAYz22GF7lcRTKFU2GM+4mQKZyC3lb434fvTu//BeBEga15MS9OiOOQGUCEtuW1Od7ZNzJlBt4ZFLj1qDbMnCB5YmrRz0w8mSD+IA7vAoRdeDX2Y4u0cTAZXd/Zj7dBNAyjK4ucz3L+WpgPo/pZzl1F5gsCT1wb7e2H1Jp30NFeti2PGhBMGAehQyVuFfHyoqeisOdh26IwbnLYKyihgYJut7h08WY/xCYP2pAF4zmkHXKzg7smTNky+MhTSJELaEvTb8lcpus/7ZqlkvMcih8Ou+dze3xy63bWeonrqtjpGk5a7ffjywH598XP/3r4LjOm1P1ud7VaddQieQcpN1J1pFp21SLBP9z3fYfEGSggBVuTORCWpraPsJzs+jXRJSR8wRPHB+uwNQYD7fz782ZvXqn2bdYrxQ+w5JPpOGz695qL5aFqe2bBqhxlsLmsTH+eM/GF7lL+Fol8oquiYKp9OXuuoGZjpd+sMT/9eJQNv8bxLXEiSIKlpn0LqRWhEwUXvEBWXvR6Hi3Yk/v2S6+3RZmY8RM8EQSeypwJC62X7hwp003R+0aZkYov+Uu9nWf0q0E8dB41/Ht/nH8N7XKWfNFYv3hKWGUy1Orog6XUGsFyW9nOJDuT7Eyy4yT76RDvl1LNeZqCsPBs+cY1EdIQludyBemZV2denXn1Oq8u3nh5rF93gSz5I7jrgBVzDFvISpxfJ8+MPDPyGzLy52M/53xBMMoKcehuLWSSVErhz9GM5+1FVqO7boVnrp25dubaa1zberQAk8l64mKnJyajfdptmp/ubvau+bc4uAD12AxXKpXTPt043mz73e4mk9ps+5tSKrPtPmJSHpnibJ47KOKy41eDm1wmLLePj+UPF3CC0/g1YEUlUkY+kHEwickVM7Bia3clLtUL0R96H3rH77ylMq9I9G9D4jx06NurCI1YpPZRsW7zKYLtSEdDUilu1hM85sIzB6ZA+RXmoEVFrc9Kx+9uE/XqD5cNVj7+Htt0c7GQ9nid9kNDMCugtLO813l/CLHb0DIlkUVRCVsuxbK5ldzJS/JKG3TIozlPQGgL5nri1my7divkzmkk7zuYFIecpkouucmqeSeRRTdxx9r/81zOuwXjolur0N2BfzONhv6763AQRJPg3ftOr2OejHW+lNoUTOzZcQWmfZV76ehm1xnemCrWuTDwZLplzrhAdFgnNjVn7tt5mKYe7e8Pxx68Gvj3dLOZMw1TlW+3+PhrBWpN+/cPO55YYqVc4+eU9hcs1y+nkvtmfzeu55ffk39sVnnU+fohE2tL8rzCb9SjX/CW+dlscIv3zxmwFJR1zu3wkwRKs3f2YBSJJGnL01WA6P4fvBNhLQ== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get incident

+ + + +Returns incident as JSON. + +## Request + +

Path Parameters

+ +The incident is successfully returned. + +
Schema
+ +The incident request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The incident with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-license.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-license.api.mdx new file mode 100644 index 00000000000..202e1a3e160 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-license.api.mdx @@ -0,0 +1,44 @@ +--- +id: get-license +title: "Get license status" +description: "Obtains the status of the current Camunda license" +sidebar_label: "Get license status" +hide_title: true +hide_table_of_contents: true +api: eJytVt9v2zYQ/lcIPm2YYjldu2UC9mCkaZah7YLEWR+yPFDU2bqWIlXyFMcQ9L8PR0mO4x9bMezBsGWS33ff3XdHtZLUMsjsXr5HDTaAfEikq8ErQmevCpnJJdC4lsgCgvZY86LM5B85KbRBUAkikKImCLeIT7rxHiyJc1U1tlDCbBA8hNrZAEFmrXw1nfLXcdQR5yX6Pqp2lsASg6m6Nqhj/OnnwIitDLqESu1TzUsQY0AMvoEUHr42EGgiE0nrGmQmXf4ZNMlE1p4TRNhLeFQGizFB+wS+AYEHoxYYRDyciIUyIW5zVIJfYZQ00ObOGVBWJtI2xqjcgMzi/i6RA9I87tyl/oTGCA/UeBvpR1rGFYOG9fGMDvSBPNql7BKJ4dxVFXiNyhxhy2GQsirBbiWTqxNLqoR19kRHHNTK/EqcnzGYfdFdIuGpRg9hRoerVyga6A6leDi8JyeRC+crRTKTDHBCWMGLDHNcXZdIQuLHsTtuBq/IruPVN4fsO7MCLYG3ygjw3nnhdLRxIVYlmihXQwholzHm0Wl/2eM+rr3LDVQ//JufZ+K63ykKIIVG9KYVKoh+Yw6FQCvub96di19ev/n54buSqA5Zmq5Wq4lf6BMokJyfOL9M/ULzh/d9PxHzEjyISq25zKookDmVEc/tIEINGheoBbkobQg7Wq7X98+9RIORjxar8bg3hGbi7uZKYAGWcLEes/qCOp5ZqMYwhspdQ1lulP0inyu8T7rLEpqqUn7TMS8JukT2I2oLiV2wBL8dP1r68dUeNvv4t/n8epxy2hUgFs4LKjGMRCyiQotVU8ns9XSayEo99U8/TacdY3LFv0GJ5bYwykZr7cpBKyrnYfBPFIY2kLL6/6qM87jEXd6J3O62wcRve0Vd32wVUOmGC4mdo6iUmUyfJ1YA/wieL7NWNt7ITLZ9n3RZmralC9Rlbe08dekjF+FReeRujzXj5b6fRp8Yp5WJfx+qFy9YVcHuAD0TNxe3c3GpCFZqHfPHlC+hz6Zn04OovPUI4uz6SvQKe7dtTYARllv5IGy/+VuAu+6BE6kbj7S+5WN9enJQHvys4ZxvXDDwRfQ4teMmmQw/3o3e+P3TPJYX7cLF40OZ9wPhqoAPfeTTyem+pa6vYmfwFdLYOB7tUqyQSqG2hGnTBGJBmzuSebleW7TDTBd/9ozidMJF6Z0zTsUlUtnkE+2qVPfHNt+5cXlaKbSjBUN6Pvtw9/Ht7OT91fnFx9uLk9PJdEJPFMXXLlCl7FYcl0Cbi2oYHjty2+f74D+9aw11IniitDYK44UaBbZD/9xv8vOQDD1wL9s2VwHuvOk6/vtrA34ts/uH55bhpy6RJagCfGy4L7Dm8mgNNcXeMg1T772Msb82nXx5wcb4G6S+o1I= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get license status

+ + + +Obtains the status of the current Camunda license + +## Request + +
+ +Obtains the current status of the Camunda license + +
Schema
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-by-key-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-by-key-alpha.api.mdx deleted file mode 100644 index 3dccb8f653e..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-by-key-alpha.api.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: get-process-definition-by-key-alpha -title: "Get process definition by key (alpha)" -description: "Returns process definition as JSON." -sidebar_label: "Get process definition by key (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWEtz2zYQ/is76MWeUqSSOGnKm8ZxUreJ67GV9mD7AJJLEQkJMHhI1mj43zsLkpYsMrE9k+nJB49MYPHt81sSu2GWLwyLr9i5VikaAxnmQgorlGQ3AcvQpFrU/jFmF2idlgbqgSxwA39e/n0WXss4jqWyeC3nhTCAMquVkBaEAS6Bl3XBIUdunUbgMgOpLKDkSYkZKAnHvHIy45CWzljUBpSzoHKwBUKibsNreYnon6560bdwcXI5h9n5Kagl6qXA1c1BxGthJlap0kRpKzjhtZhoNHawMOnPhVX2yzeHek1bh9cyVxpyp22BGjK0XJSm9fBasoDVXPMKyUoWX22Y5BWymHXBeXcXm79wzQImKIA1twULmMZvTmjMWGy1w/0ozwsEboxYSMzgK65794dRD2BViLQAnlpDGeDgpPjmEESG0opcoAZywVImhsdDFjCTFlhxFm+YXddkvpAWF6hZwHKlK27bpTdHrGluyHJTK2nQ0ImX0yn9DK0fqQ9hwLiUVnNXlmvQvpQwC30kUyUtSktovK5LkXI6FH0xBLkZGqmSL5haSoFWNWorWoNGQz9qIYX1gcg8GI+gy/g+/hmvsM3ZQ9jGaiEXhKTRKKdTPBtFvOh2gRQ+0u4t9hK1EW0g78P+0248ztbxOLx6uaNhzhffV2L54qlBGaTzNBviD/sWiOypmixKLu0Y/NzvPBmSMIUtcWvgjhcWK9Y0JHP0BAZR00BjIeeixCyET0pj35SAa39mKTLMQEjfL3quQqKy9Q95VmuVlFj9OuTbfctmcN5Kdnqh5SG1nlYwabVfXbw/ht+PXv92c1BYW5s4ilarVajzdIKZsEqHSi8inaf0R3KHIcwL1AgVX0OCwLPMO81L2DIcTI2pyEUKVvUN0RtDsW/9+3F7aHc3e7naKWinBdvvxjP4fHHaN9S1kIuhan8m564kDJ4oZ+Ok5PIr21bBUOm+FuOqiuvdZr+joAmYsdw682CjfvVygE319Md8fg4tBKQqu99ESBE5UQkpKlex+Gg6DVjFb9unN9NpQ5iU8Ud4IgFv65JLX1r77ggJ1bZuvWNCGstl+rMyo7RYiH294T4jafFd61HPxKNHM3ElbOHRF2KJ0r9LVtz4L5lcOfnMzWduPnPzJ3Lz9dhbciaBoqypDlFrpUGlqdMaM/ooLu+I2+vuXp/PXHvm2jPXvse1JmAV2kJlLGYL9JVDN9aYRR2bJtvXoIk2Y/ethi6VqJf9rdjpksVs01KpiaNoUyhjm3hTK22baEl5WnItaALg00rbLeX6UipVyku/PJZS2pB3Vy6EvbHAB25xxdc+xKTyPvTb6dvpKCqJfgeRBg2th21B7jSJHpbYPgrbCj8G2F+3DaZOC7u+pGNteBLkGvXMUVruCqXT59HpuRViQffP+758/vx37itAyFz5410lDA1hO/dGNg1fDKvu/NSTJ1VV5aTvoHLRfhrxHce6OQ7RqhQpSuPruxuV9GIf2x3o74ovQkpKWzl941wIW7gkTFXVj2/ufpNSJVHFhYw6FSY6nn36fPZuNvl4enxydnkyeRFOQ3trvfO1MrbicseOD2jHvvOStf+2O/BDq8P9CGy2b5Hnwdj9wVhXlRZvbVSXXEjiiU/npmsoV2ykobCAxaMjnJug6wpXbLNJuMHPumwaWvY2sfjqZttEfNfJhPGRY3HOS7M/YttN3cFFN4w7hP918DYao26Ry7XviqWjJxawrzTHGh9vNTdNwArkGWrveSs6S1Os7Q7IYKxG7eWu1384ob7wH6hhsbc= -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get process definition by key (alpha)

- - - -Returns process definition as JSON. -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The process definition is successfully returned. - -
Schema
- -The process definition request failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The process definition with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-xml-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-xml-alpha.api.mdx deleted file mode 100644 index ace97d3f840..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-xml-alpha.api.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: get-process-definition-xml-alpha -title: "Get process definition XML (alpha)" -description: "Returns process definition as XML." -sidebar_label: "Get process definition XML (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWEtv20YQ/iuD7cVGKVJJnTTlTXCc1K0dGLbSBpB1WJJDcRNyl9mHZEHgfy9mScqKRCE5pDcdBIrL2Xl+35CzG2b5wrB4xu60StEYyDAXUlihJJsHLEOTalH725jdo3VaGqgPZIEb+HR7Ez7KRxnHsVQWH+W0EAZQZrUS0oIwwCXwsi445Mit0whcZiCVBZQ8KTEDJeGSV05mHNLSGYvagHIWVA62QEjUU/goHxD93awXfQP3Vw9TmNxdg1qiXgpczc8iXgszskqVJkpbwRGvxUijsQcLo35fWGW/fHWo1/To/FHmSkPutC1QQ4aWi9KEPsJHyQJWc80rJC9ZPNswyStkMeuy83abnL9xzQImKIM1twULmMavTmjMWGy1w/00TwsEboxYSMzgC6778A/THsCqEGkBPLWGSsDBSfHVIYgMpRW5QA0UgqVKdNtDFjCTFlhxFm+YXdfks5AWF6hZwHKlK27bpdcXrGnm5K6plTRoaMfL8Zguhy5/ur057imV37iUVnNXlmvQHkuYhT6TqZIWpfUe4ZONnqqS/h/4aawWcsGapmkC9nJ8MezJgPkVN5ArJzNInIVMofHAK/gSO+AOeFGXnKr2PT8ujmVkwA+qPBoLORclZiHcKo09soBrv2cpMsxASJ/KPveQqGx94Cav61KknFRHtVZJidWvn43ac/pbzyZw10p2dkElnzG1hJ9WMGmtz+7fXcIfF69+n58V1tYmjqLVahXqPB1hJqzSodKLSOcp/UjuPIRpgRqh4mtIEHiW+aB5SVHVqK1AA6bGVOQiBat6rHhnKLNtfF2OW7eIZtvNzxXYr8QOcJ0WbJ9SE/h4f92zYi3k4tC035NzV5IOnihn46Tk8gtrAmaFLQeN7lsxrqq43mXsjoEmYMZy68x3iffbywPdhKc/p9M7aFVAqjL8httkiIKohBSVq1h8MR4HrOJP7d3r8bghnVTxH4hEAj7VJZceWvvhCAnVM259YEIay2X6syqjtFiIfbsha55rwToQv20j6pl4pCNkmArj+4Cwhde5EEuUvrtSa6Be4NvDiZEnRp4Y+fMY+Wro3TiRQFnWhEPUWmlQaeq0xoy+Z8rti7O33b00T1w7ce3EtWNcawJWoS1UxmK2QI8cGjZiFnVsGj1/hppoMzSlNP67O2AG9bIfapwuWcw2LZ2aOIo2hTK2iTe10raJllSrJdeCBjhfWnrc0q6HU6lSXvrlobLSA5qc+nD3prr33OKKr32ayeS3qt+M34wHtZLoEY00J7YRtqDcaRS9WmL8oNpW+EcU+8HJYOq0sOsH2tamJ0GuUU8clWYLls6e1073rRALuj/vegj99e/Uo0DIXPntHRoOHaGqoDat5+PwxSHy7q49gVJVVU76LioX7ecR3wmsG8OJWqVIURqP8W7S7cVu2ifwT2sRXoRUlBY5ffNcCFu4JExV1U/f22tSqiSquJBRZ8JEl5Pbjx/eTkY315dXHx6uRi/CcWifrA++VsZWXO748R7t0KxFA+mZP3A43w9/8/waOZ1q7J1qdJjcmYCbrpibrqXM2EBLYQGLjxx9UFeZB11nmLHNJuEGP+qyaWjZe8bi2fy5kfjOkwnj88finJdm/5Rkt4Jn9915yjn8/2cng+npFrlc+3ZYOrpjAfuC62MnQs28CViBPEPtw21FJ2mKtd1Rsj0OoX6ybfDvr6gR/Af/Pdv+ -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get process definition XML (alpha)

- - - -Returns process definition as XML. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The XML of the process definition is successfully returned. - -
Schema
    - -string - -
- -The process definition was found but does not have XML. - -
Schema
    - -string - -
- -The process definition request failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The decision with the given key was not found. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-xml.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-xml.api.mdx new file mode 100644 index 00000000000..cdec028f056 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-process-definition-xml.api.mdx @@ -0,0 +1,75 @@ +--- +id: get-process-definition-xml +title: "Get process definition XML" +description: "Returns process definition as XML." +sidebar_label: "Get process definition XML" +hide_title: true +hide_table_of_contents: true +api: eJztWEtv2zgQ/isDnlqsYjlt+ljdjDTtZjcpgjTdLZDNgSJHFhuKVPmwYxj67wuS8qO2jPbQvflgWKLImW9mvo+iZkkcnVpS3JMboxlaCxwroYQTWpGHjOgWDQ03l5wUZIqun/ZuPevL9RXJCEfLjGjjsoLcovNGWWj3bAK18OX6avSvIhlpqaENOjQBwJIo2iApSLvr4S9ckIyIYLilriYZMfjNC4OcFM543PV+VyNQa8VUIYdHXICuwNU4gCaDeS1YDZQ5G5BR8Ep88wiCo3KiEmig0gZcLdbBjEhGLKuxoaRYErdoA2ahHE7RkIxU2jTUpaHXZ6TrHgJc22pl0YYVL8bj8LcP+cv11WGkICxYz8Jo5aVcgIkpRp4yybRyqFxEhE8uf2pkuN7DaZ0Rakq6rusy8mJ8NoxkwP2cWqi0VxxK74BrtKC0g5rOcFPPXRStpKFqP8JxdigjAzhC5dE6qKiQyEdwrQ0CR0eFtEBNXDMTHDkIFVO5yj2Umi/2YNK2lYJFguet0aXE5revVu+A/h7ZBG7SzN4v6PIrMhf4kyaWyfv97ftz+P3s1ZuHZ7VzrS3yfD6fj0zFTpALp81Im2luKhZ+Yd7zEdzVaBAauoASgXIeg6YyRNWicQIt2BaZqAQDp1dciWBCZlN8fY4TrCCz9eJNBXYrsUVcb8SeoCfw+fZypYqFUNN913FNRb0MNmipvStKSdUj6TLihJODTne9WN801GwrdstBlxHrqPP2h8J7+WLPduDTH3d3N5BMANMcv9N2cBSCaIQSjW9IcTYeZ6ShT+nu9XjcBZuh4j8RiQJ8aiVVkVq74QgFzYa3MTChrKOK/arKaCOmYtfviHSbWpCexO9SRCslng4rcSU7SdmjhRmVggP1rg5ek3yAGYwgqIxb5FFkR5EdRTYsspf7fH+vTSk4RxXpudabSK9ZKqWeIz/q6qiro64O6+rAcZYjEzYeYoWro82pmKGKnwbhXBsUFs+2x+PkUZFHRf46Rb4a+rCbKAhZNoGHaIw2oBnzxiAPH+Ny/dW38t2/Co9aO2rtqLVDWusy0qCrdd+ki701V5OC5L2aTjY9FJsvh1psXWwaZcSima06ct5IUpBlklNX5Pmy1tZ1xbLVxnX5LNRqRo2gpUwMDY+T7FZ0kppRGYeHyhoehLbfKtxz2njFKbyF24tPd/CBOpzTRUxzcPm96bfjt+NBq2HqAYuTm0tIESZSbm0UK7NB8YNm0+SfMRy7fhaZN8ItPoVlKT0lUoNm4kNp1mTp/UXr4T5NIll/8X5FoT//uYssEKrScXnPhn0goSpobEI+Hp3uM+/mMgqI6abxKu6iapqOR3QrMCa9dSGgjEjBUNnI8b5Nu5p2lZ7A38kjnI5CURJzVpvnVLjalyOmm5ylZev/Uuoyb6hQee/C5ueT688f301Ori7PLz5+ujg5HY1H7snF4FttXUPVFo4P6IYahQNt6eXm9fGzHeq+RFvdzK6Pbdkr7J4MKIxkpDjQxg4ie8h6odyT5bKkFj8b2XVh+JtHsyDF/cNGV1GIXNhwzUlRUWl3O97bgT277Xvjz+H/74MPpqcfpGoRdwfpwx3JyCMuDnX3u4cuIzVSjiaGm6ZOGMPWbRlZt7aDvNb73YeLoIv/AMMKlJ4= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get process definition XML

+ + + +Returns process definition as XML. + +## Request + +

Path Parameters

+ +The XML of the process definition is successfully returned. + +
Schema
    + +string + +
+ +The process definition was found but does not have XML. + +
Schema
    + +string + +
+ +The process definition request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The decision with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-process-definition.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-process-definition.api.mdx new file mode 100644 index 00000000000..62125e25c28 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-process-definition.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-process-definition +title: "Get process definition" +description: "Returns process definition as JSON." +sidebar_label: "Get process definition" +hide_title: true +hide_table_of_contents: true +api: eJztWUtz2zYQ/isYnJopTcqJk6a8aWwndZq4HltpD64PILgkEYMAA4CSNRr+984ClCWLdGzPZHrSQSPh9e3zW1KLFXWstDS9phdGc7CW5FAIJZzQit5EVDdgGA7OcprSEly/7WSzK6I5WG5E40cpvQTXGmVJMwAkzJJPV3+dx//iqYYZVoMDg+JXVLEaaEqbXfw/YUkjKhC5Ya6iETXwvRUGcpo608Ku+FkFhFkrSgU5uYUl0QVxFYyoE5FFJXhFGHcWVWOkVeJ7C0TkoJwoBBhSaENcJcasiWlELa+gZjRdUbdsUH2hHJRgaEQLbWrmwtS7I9p1N6i5bbSyYPHE68kEv4bajzhOWGJbjrNFK+WSGO9jyIMnuVYOlEM01jRScB+x5JtFyNVQSZ19A+4wBAbj60RQaNT1oxqiW5/wzJP+iPqI7+KfsxpCzJ7Cts4IVSKSAatbw+F8FPGyXyUo8Jl6b7DnYKwIjnwI+3dYeJ6u435483pLwoyVjwtxrHypUwbhRArv4g9ZT85OXirJgWLKjcHP/MqLIRFTOAkbBbescFDTrsM9Ry9gEBYNsI4UTEjIY/JFGyA5OCakJcz4M3ORQ06E8vVizVWS6Xz5Q541RmcS6l+HfHuo2ZRchJ29XBJ4iKUnbMyC9OvLD8fk96O3v938UjnX2DRJFotFbAp+ALlw2sTalIkpOH5w36uYzCowQGq2JBkQlufeaCbJhuHENsBFIThxel0QvTLo+2Dfj8tDWF3txGoroVsjBg+DKfl6ebYuqEuhyqFof6ZgrUQMlunWpZlk6pZusmAodFeKbeuame1ivyWgi6h1zLX2yUL95vUAG/Ppj9nsggQIwnX+sIigIDSiFkrUbU3To8kkojW7C6N3k0mHmBjxZ1iiCNw1kimfWrvmCEXqTd56w4Syjin+syKjjSjFrtx4l5E4eRIsWjPxcJyJa9pJxm8tmTMpcsJaV6HUQB/CDXglmLTxnmR7ku1J9ijJ3gzz/YM2mchzUD497/kmLFHaESalXkC+59WeV3tePc6ro2e/Ri6Eqzx6Keag/B+hBQtcK3Sr9i+We27uufkTufl27C/eVBH0ssE8BGO0IZrz1hjIsaMj74m7lt0/FPdc23Ntz7XHuNZFtAZX6b7P6xu0rqIpTXo2HWwegzZZjTULO+yIgpmvW7qtkTSlq0ClLk2SVaWt69JVo43rkjnGac6MYJkM2YnLgXLrVJKaM+mnx0KKC+q+XwjkmNWtyhl5Ty5Pr2bkI3OwYEvvYhT5EPr95P1kFBW3PoI4vTgjwcKQkFtFYg2LbB+FDZufA+x7xRZ4a4RbXuGx4J4MmAEzbTEs94nSy/PoOA6baNT/+LBOn0//zHwGCFVof7zPhKEidKvpSSfx4TDrLs48ebiu61b5CqrK8GrEtgzjsrUODYqoFByU9fnd9/nX2z6HFbJudB7GGJSQOevCWQpXtVnMdZ3wcOz+O5M6S2omVNKLsMnx9MvX85Ppweez49Pzq9ODw3gSuzvnjW+0dTVTW3p8BDfynrdr8mrz2Hj2/UYfHwd3LmkkEwozxhu26ql1TUeoRSOajnbib6KeH9d0tcqYha9Gdh1Of2/BLGl6fbOhk+dfLiz+zmlaMGl3b0q2bfrlsr9TeUX+1/uTUR/1k0wtfX2QLY5oRG/xOmL8lqK76SJaAcvBeMvD1inn0LgtkMHtCBLtvup9PEWG/AddXGgW +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get process definition

+ + + +Returns process definition as JSON. + +## Request + +

Path Parameters

+ +The process definition is successfully returned. + +
Schema
+ +The process definition request failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The process definition with the given key was not found. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-process-instance-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-process-instance-alpha.api.mdx deleted file mode 100644 index 4fa0c258110..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-process-instance-alpha.api.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: get-process-instance-alpha -title: "Get process instance (alpha)" -description: "Get the process instance by the process instance key." -sidebar_label: "Get process instance (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWUtz2zYQ/is76CHOlHo4ddKUN42tpGodV2MrycH2ASKXImISYIClZI1G/72zIOmHRNuqp0cdPJLA5bfYx7cLL1aC5MyJ8FKMrYnQOVDakdQRiutAxOgiqwpSRotQfEYCShGKDUmYLtvXb3DZvdJXOgxDbQiv9CRVDlDHhVGaQDmQGmRWpBISlFRaBKlj0IYAtZxmGIPRcCzzUscSoqx0hNaBKQlM4lVOzW33Sl8g+l+XjehHOB9eTGAwHoGZo50rXFwf9GShXIeMyVwvqgQ7slAdi462FjrNe908/uVniXbJj95e6cRYSEpLKVqIkaTKXNdbeKVFIAppZY68SxFeroSWOYpQ1I4Z1X75G5ciEIpdWkhKRSAs/iyVxViEZEvc9PvkKd+KQLgoxVyKcCVoWbAupQlnaEUgEmNzSdXShyOxXl+zIlcY7dDxG+/6ff7YQZly4MqI15Iyy5ZgkUqrMeYdREYTamIkWRSZiiQj9X44hls92OBjPZvpBg6ljVJodgiKMGf82i4z/YERsYutKdCSqmxocW2bL7ZtvMFllUTKbdnbbfFe0Kg6wURpxUij+IEuR1bpWauqBj6+exNU3G2FPPMJ80pQzrZ22G9onarisYNrWqDnFcCmY35795y6iZy92pZaIZCctZu0e6Rb0GvybMfYGkPj16YUv/wkUVvySVrUr9dWvf6f9X3KzOLMxPhqhUlmFqBNjDuoJIs45hK3WxpISpmTN7h0QKadmwE45CpLGHPfedN74/PDkbR0ImlH9nhxiCU95jovdEjlyJCo490BUccvwDmqwVp3QxiA0cjmD44no2/DAI7/+TI+HU6GJwEcD86Oh6fDE0ZHXebcrysxEYg7Of5eC3LzJkUZ3lfaJuAXrGzIIOtApNKNdKTiuoDXdk6NyVDqLUO/p+j7XmtgIJUOJFjMfGxUDQvGckP3QSLUUtOuZbOSrkrl+klzRoS5WK9Z4uiZfjZXMfoISW5mfMKYy0w918AKa6YZ5r++1MgGMK4k6+MAVJ0KJFccFpx6b8Dl+adj+OPo/e/XBylR4cJeb7FYdG0SdTBWZGzX2FnPJhH/sdzbLkxStAi5XMIUQcaxr14yg/seCK7ASCUqqgjjbfWbYf92/Znk+QZaPd2Ox10Sl1ZthWcAX89H4OOrkqXSs23V/p1ElhljyKkpKZxmUt+I+1C+lAQDcGWeS7tsTnuPFdScKt2Lp5/f3rUm2J+TyRgqCIi4oPHZrsltVsRG5EqrnBl31O8HIpe31a8P/f6aMTniO1iiAW+LTGqfWpvmKA25sVjnjzfs7hD+/0TGWDVTm3q3aMWLJ5VFDaEO2wnF51V0BJmMblzFJJAlpay1og9EFv0mZOb2JNuTbE+yp0l2tON/YQtFqceeqTn6QywsZNXMElPqfTPb82zPsyd59r7tdDjQwF62nIdorbFgoqi0FmNYpCq7I2Gju258e6btmbZnWjvT1oHIkVITi1DM0GeOnwCIXs2lTrNL11ttj+/WPNREO29GqKXNRChWFY3WYa+3So2jdbgqjKV1b84xmkureFzsQ8qPK7o1aZSZSGZ+uS2c/IAnZ42ZGzPkz5JwIZfVEMrYDeiP/Y/99lmGsfQEIk+lKwurZHxQIBpYZnr73MIL7wLsx70Oo9IqWl7wa5V7pigt2kH5aChT6/PofgDghURQf/nUpM5f3yc++konxr9eZ8H2RjgqzdBR9LuH2xk3HnniRCbPS+2rp55VRxz5wLB66M+UylSE2vncrufqjdhp9QTquSMcdjkoVeY0RXOmKC2n3cjkzaz/7nOamWkvl0r3ahWudzz48vXsZNA5HR0Pzy6GncNuv0u35I0vjKNc6gf74IuRrdPagb/YeLtp+Oq+cewvVF6+UKkTlPCWekUmlWbK+Miu6rpyKbbqighE2HIxcB3UxeFSrFZT6fCrzdZrXvb7EeHl9X0t8cUnVs57TYSJzNzmxczDUB6c11c4b+G565pWc+pFqZe+lmUl/xKBuOHRbNsNx/qaR3coY7R+m5XgIIqwoAcQW9cxXBLuavPnIXP5X1+1t5k= -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get process instance (alpha)

- - - -Get the process instance by the process instance key. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The process instance is successfully returned. - -
Schema
- -The provided data is not valid. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The request lacks valid authentication credentials. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The process instance with the given key was not found. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-process-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-process-instance.api.mdx new file mode 100644 index 00000000000..77f040db8d1 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-process-instance.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-process-instance +title: "Get process instance" +description: "Get the process instance by the process instance key." +sidebar_label: "Get process instance" +hide_title: true +hide_table_of_contents: true +api: eJztWcFy2zYQ/RUMTsmUluTETVPeNLKcqnVcj60kB9cHkFyJiEGAAZaWNRr+e2cBUnYkylbT3qqDxxK4eIvFvregFiuOYu54fMMvrUnBOSa1Q6FT4LcRNyVYgdLoScZjPgdsjCatTcQzcKmVJRnxmH8AZJgDKzfAWLLsHr+DZe8vzSNeCisKQLC0mBXXogAe8/J7f3/AkkdckqdSYM4jbuFbJS1kPEZbweZyprtc8oi7NIdC8HjFcVmSL6kR5mB5xGfGFgLD0LsTXte35MiVRjtwNOPNYED/9nAmHXNVSmOzSqkls4CV1ZDRClKjETQSkihLJVO/1f2vjuBWTxb4vZ/NRDEHwqY5a1fIJEJB+E1cJvkKKdIWW8onyhBDx9Z27cV2jHewZGbGMJduK95ex+5FratTmEktWzqtfTm0Us87XbXw2Xomm5z2OiEvPGF+EJTY1g37GayTIR97bE0H9H0A2NyYt2+eczcV8x+OpXHIUMy7Q9o/0x3ojXg6ciws6M368A9c+ek7xbrL35kyiwuTwQ87nCmzYNpksIdLh8LiqcA9eebNWSbwe1XQwBHKAggSdLY/IOjsBTiHDVjnahAiZjSQeIej6eTzOGKjPz9eno+n49OIjYYXo/H5+JTQQVcFnQnBjEd8bUefG0M6IFCigsea1KbhmpyNCaSOeC7cRKcya0pdE2dijAKhtwL9kgPmYLvLC8uFY4JZUAIhY7KBZcYybdDTHUELjfsWmGAdikq9M5wJQsHrmixOnqn89zIDnyFBZV8bZPdCyedKfWlNoqD46aWSP2SXwZJlgEIqFmo6E6RNMkz8brCbq7MR+/Xk519uX+WIpYv7/cVi0bOz9Agyicb2jJ337SylP7J73WPTHCywQixZAkxkmde5UOzxtGCuhFTOZMrQtKe4Xwztbzi9nz9qwtPtfKxJXFm5lZ4h+3Q1YT6/craUer7t2s+ZiUoRhkhMhXGihL7jj6l8iQRD5qqiELY50jYdNJqq3IvvCW/fdBLst+n0kgUIllKZmZlHbpMjCqKQWhakuJPBIOKFeAjf3g0GNWFSxveIRDN4KJXQnlqb4UjNCmOh4Y8PbP2i999kxlg5l5t+t2RFg6cholZQx92Cojc7cMiUSO9cUBITFebkNciHpRb8IoRyB5EdRHYQ2W6Rvd3m+5mxicwy0J6ea701R5dQyiye/Z1y0NVBV/97XZ3s2QdYSMw99lzeg/8ZxRYiKG1mKn3Q2UFnB53t1NnPXb+6hprRLlviIVhrLDNpWlkLGVvkUq1F2PpuDrjAxYPWDlo7aG1ba3XEC8DcNP1+35jHnMe836jpqF2l66+2W8g1NdbB3rdt/MoqHvNVkFEd9/ur3Dis41VpLNb9e8rRvbBSJCowkx4HubU0UiYVyg93pZMeUPe2DXMkikpngr1nV+PrKfsgEBZiGRqhxm5Avx+8H3Q3B43FHYjDywkLEQYyPikQLSwpvbsj6I33AfZXDg7SykpcXtO0sD0JCAt2WFFK1iRp/Hl031rzRjxqPpy11Pn9y9RnX+qZ8dMbFmwvhLLSNr75oHe8zbjLiRdOaoqi0r566nl4yRFPAktV5ZACiriSKWjnud3c7bRm5+EJa3rf7LhHSQnMaYvmXGJeJb3UFP00TFv/T5RJ+oWQut+4cP3R8OOni9Ph0flkNL64Hh8d9wY9fEAffGkcFkI/WQfdWW2+r20GvHo8MP7FHVeTL4QH7JdKSE0M8oGuGpnd8C2Z8YjHHXc1t1GjlRu+WiXCwSer6pqGv1Vglzy+uX2UltdiJh19zng8E8pt3pU9jfDVVXOr9po9d4PWGU4zKPTSS1tV9I1H/I46812XTvUt9YhBZGD9MoPhME2hxCcQWzdkpJB1qfowJmr/DVXf9us= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get process instance

+ + + +Get the process instance by the process instance key. + +## Request + +

Path Parameters

+ +The process instance is successfully returned. + +
Schema
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The process instance with the given key was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-role.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-role.api.mdx new file mode 100644 index 00000000000..7e5d0769e97 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-role.api.mdx @@ -0,0 +1,56 @@ +--- +id: get-role +title: "Get role" +description: "Get a role by its key." +sidebar_label: "Get role" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/Ss7OCVTRpQTJ0150zh26uZjPI7THlwfQHAlIgYBBlha5mj43zNLkLJiyY4PvVUHDUUCePv1HkjsSpBcBJFdinNnUFwlwtXoJWlnTwuRiQVSP5CIAoPyuuYRkYn3SCDBO4OQt6ApwDW2k3+tSEQtvayQ0DPsSlhZocgET/2ArUiE5vW1pFIkwuP3RnssREa+wftGLkqMJhhbJCKoEispspWgtmZQbQkX6EUi5s5XkuKjN4ei664YPNTOBgy84uV0ypcHDOgAoVEKQ5g3xrTgkRpvsWCryllCS7xa1rXRqs9O+i0wxGrDqZ+xOW0QUHpVwugJaMKKMQf/Xf4NFXHOPKeddPT1GtudQT6enfspSIbUr4ECeW0XD+Pw9AmvkyHohcXiE1Y5+g/Yhg0U6b1sd4IEJHBzdifwtepXBxjRgBzQYIv95VyEpxWzSwRpMjik9ZSw4qddIg6nBw+UFb83GAiMVNcBbqTRBciGSrQ0VBCUx4JvpQmP1Ln2LjdY/fares/gLM6EAklqA7G4IAPEiTkWoC1cnp8cwR+Hr3+/elYS1SFL0+VyOfFz9QILTc5PnF+kfq74x/OeT+CiRI9QyRZyBFkUmm1KA3e0gVCj0nOtxiwPbgMnN+rycc7F0W2urGvReL1V9Rl8PT8F3Wdx3mq72Dbdr5nLxjCGzF1DWW6kvRZ3Nf0VQWcQmqqSvmVWbRvoEhFIUvNrLr16uZO3f15cnEGEAOUKhLnzQKUOoyEOotJWV00lssPpNBGVvI13b6bTjjG54k+IxALe1kbaSMB74WgLlfM48KcPTNtA0qr/qjLO64W+b3ciNvU1kPhdjGgU2attvp84n+uiQNvTc603HcA6AmmMWz66f+51tdfV/15Xh498kyw1lT3eQt+g5dcqLGVU19w1dq+tvbb22npQW693fe/PLHCWPfMQvXcenFKN91jAstSmh+dDwGh7eKlFLu61ttfaXmvbWusSUSGVbmgX9B0AKkUmUn6NhXQ1HP07PsGjvxkbA403IhOrqJcuS9NV6QJ12ap2nrr0hotxI72WuYkU5OGoq5Evxilp+se76sYDfKYd4zmSVWMLCW/h/PjLBbyXhEvZ9nlkkz9Dv52+ne5E5akPIM7OTiFGGFm3sROMsCzp3YfnfvJTgPveRkDVeE3tF14W05Oj9OhnDed+zYbBXo/O93GSSIY/JyNH/vrnoi+ztnPXLx/Kve0IVwV9iJ5PJwfb1Do77RWiXFU1tt8m7SJ+zciNwJRpAnFAiTBaoQ09iYdu0TjtYxyBv6NFOJhwUSJzxt1xoals8olyVarisvU1Ny5PK6ltOpgI6dHs09fP72YvPp4eHX/+cvziYDKd0C31wdcuUCXthh/c5vI7+l+ru7fBY62woQiEt5TWRmrLtOi9Xw0iuewbY0EkIhs7ZFfJwPRLsVrlMuBXb7qOH39v0Lciu7y6E0avpEIH/l+IbC5NuN9G2/T12fnQcHsO99tHO30dez627cVoGr4TSexQrXt63VWXiBJlgb73J47OlMKaNtZtdc+YyOut4/0xM/AHOuUOvw== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get role

+ + + +Get a role by its key. + +## Request + +

Path Parameters

+ +The role is successfully returned. + +
Schema
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The role with the given key was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-start-process-form.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-start-process-form.api.mdx new file mode 100644 index 00000000000..31abfadc796 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-start-process-form.api.mdx @@ -0,0 +1,69 @@ +--- +id: get-start-process-form +title: "Get process start form" +description: "Get the start form of a process." +sidebar_label: "Get process start form" +hide_title: true +hide_table_of_contents: true +api: eJztWU1z2zYQ/Ss7ODVTWpQTJ015U/yRuk08HttpD44PILmSEIMAAywtazT8750FKMmWqCSd6aEz1cEjkQDeYnffg8jnhSA58SK7FZfOFug9lDhWRpGyRtwlwtboJF+clyITE6Rrko66uWfWVSIRJfrCqTosycR7JKApgueJMLauAjsGCXVcM/hsPpsLSwg0lTxTeUBT1lYZgpnSGqzRc3BIjTOglbnHMqD4Adw8m1xa9GAsgW/q2joCrHIsy9X0z0YkopZOVkjoOMeFMLJCkYluLyerVP0fOBeJUJxALWkqEuHwa6McliIj1+BmljdTXGYE9zgfiET4YoqVFNlC0LzmKMoQTtCJRPCGJMVbb45E294xvq+t8eh5xcvhkD+2Y4T6KQ++KTjWuNGr4mAZUyysITTE62Vda1WEfqVfPIMstrdl8y9YENfGcXdJxS1wJK5C7zbucc5dpG5HnO53k0wEoZGGmDl9mHEUzk92IHtyykwYJ68rswvl3eXHix/BWJdhGyOO7YDoytUm4gGdV7Go2xjd4D+uEpdJkeZJLKdzwkq0Ld9+OTzqD7Xk3Ux6GNvGlAnkDYGxK7ZI722hJGEJM0VTUMRUaRNx1Mezd7IEZjt62k2n2tlcY/XzNq2eg43gMs6EEkkqDbF+IPlk4Yk5lqAM3F6dHcOvR69/uftpSlT7LE1ns9nAjYsDLBVZN7BukrpxwX887wXrHx1CJeeQI8iyDNKVGtZEBl9jocaqALKhD922gfsQ5fJtFcTRxQZ/nrStcWrryBvBp6tzUCUaUuO5MpPt0GHNWDaaMWRuG8pyLc29WPd/O+hmFN9UlXQrJT4PwBwnSY3/7gn06uUWNtPqt5ubS4gQUNgycDiez10gTqJSRlVNJbKj4TARlXyMV2+Gw5YxueM/kIkBfKy1NIFam+koA5V12PEnJKaMJ2mKf6sz1qmJ2ow7eKbFjsQnMaMoyKPhYb8gO/GAlsW9hwepVQmyoSlHjfKBwmHYhNR+sBfZXmR7ke0U2attvp9Zl6uyRBPoudKbis9/Ums7w3Kvq72u9rrarauep8kLS/EJci+dvXT20tkhndd9L00jA1xlxzxE56wDWxSNc/zKNVV69Za2jN39Zn3zlX2vtb3W/tdaaxNRIU1t5zUG/46mIhNpp6aDtTvp00Wvjdem42hLenQPS9+vcVpkYhEF1WZpuphaT222YOOwTR+4Ww/SKZnryFEejsJbEkrbQupwu6+xPMDm4jLhY1k1ppTwFq5Or2/gvSScyXkoNId8Dv12+HbYixpszX7E0eU5xAwHm/bWEpY13wv73O/6FnBwKj0WjVM0v+ZlsTw5Sodu1HBzVnTp4gV0vo6TRNJ9OVuS6Pe/bgIPlBnbsLzjw/ZGxBPTTQwHh9vcuzwPEipsVTUmnKNmEj0v+SSxQjeeOKFEaFWg8YHlnRm8nPYhjsCfnZN3OOCmROYsj8+JommTDwpbpUVctvrMtc3TSiqTdiF8ejz6+OniZHTw4fz49OL69OBwMBzQI4Xka+upkubJPtg4X1p7a/N8M+XF+sfjP2a1dywgfKS01lIFwzGUb9HJ+Fb0yFgkItvlx4cC3CWdGm/FYpFLj5+cblu+/bVBNxfZ7d1avEHtpfL8vRTZWGq/6d0/reBPV53L/wJ6HP3ejLqb0szDmaEbvhKJuGfvfMd/Ftq7NhFTlCW6sME4d1QUWNMTlC37ntW3OhDfn7Js/gbGd9/2 +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get process start form

+ + + +Get the start form of a process. + +Note that this endpoint will only return linked forms. This endpoint does not support embedded forms. + +## Request + +

Path Parameters

+ +The form is successfully returned. + +
Schema
+ +The process was found, but no form is associated with it. + +
+ +Bad request + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-status-of-camunda-license.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-status-of-camunda-license.api.mdx deleted file mode 100644 index 6b1955b761c..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/get-status-of-camunda-license.api.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: get-status-of-camunda-license -title: "Get status of Camunda license" -description: "Obtains the status of the current Camunda license" -sidebar_label: "Get status of Camunda license" -hide_title: true -hide_table_of_contents: true -api: eJydVMFO4zAQ/RVrzqEpnFBuFdtFu2LZFZTlgHpw3Glj1rGDPS5UUf59NU5SCgVptbk4icfvzZt54xZIbgIUD3ClFdqAsMxghUF53ZB2Fgr4WZLUNgiqUASSFINw6/SlovdoSVzIOtqVFGaAyMBjaJwNGKBo4Ww65eVz1BHnLfoxqnKW0BKDyaYxWkkGyx8DI7YQVIW1PKZaVCjGhBh8Dyk8PkUMNIEMaNcgFODKR1QEGTTeNehJ9xK20ujVWKFjAh9R6A+zFjqIdDgTa2lCCnNUoX/WSdJAWzpnUFrIwEZjZGkQihTfZTAgLVLke+p7bYzwSNHbRD/SMq4YNOw+r+hAH8hru4Guy4A0Mfloh5uhbtDxk0GNVLkVFLDBVCVJFRSQvyIG9Fv07KgWojdQQNv3pSvyvK1coK5oG+epy7dnkMFWes16U5l5u9e4ltEQFGCckib9fm9L7ipvWFnje4Hn4mZ+uxCXkvBZ7ibQZcCUb6HPp+fTD1E59BPE2a9voleYQA8dN8JWRM2HsH3wvwB33ZILqaLXtLvlY315SpQe/SxyzduxdwNfQk9WSkGQDS9fna8l5/X9fpEarO3apeNDo48T4a6gD33m08npkRpOdu28UK6uo01jaDfiWVMl5IEwZWIgFrT3MPNyvw5oB5+J3z2jOJ1wU3rnsLRQ5PlGUxXLiXJ1rvpj+7U0rsxrqe1owZBfzH7cXX+ZnVx9u5hf385PTifTCb1QEt+4QLW0B3lc4uG9czwhb5S3r1fQf12LQ8sIXyhvjNSWTZS0tsMoPexLtcyGcXiAti1lwDtvuo5/P0X0Oygelq/Tw19dBhXKFfo0e39wx51SChtKY2YiUx/dm2y1/VBfztkjfwFIJxdy -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Get status of Camunda license

- - - -Obtains the status of the current Camunda license - -## Request - -
- -Obtains the current status of the Camunda license - -
Schema
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-tenant.api.mdx new file mode 100644 index 00000000000..5edc09947f6 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-tenant.api.mdx @@ -0,0 +1,60 @@ +--- +id: get-tenant +title: "Get tenant" +description: "Retrieves a single tenant by tenant Key." +sidebar_label: "Get tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWMtu2zgU/ZULrlqMYjlt2uloZ6Rpx9MHgtSdWWSyoKhriw1FqiQVRzD078UlJdu1nSYDzNILQ5bI+z6Hgs6Keb5wLLtmM9Rce3aTMFOj5V4aPS1Yxhbo+6WEFeiElTWtsYxdobcS79ABByf1QiH4sBPydvj3AdsRS1jNLa/Qo6VQK6Z5hSxjcc8HbFnCJHmsuS9Zwix+b6TFgmXeNrgbdlYiNFp+bxBkgdrLuUQLZg6+HBKgkE6UWHGWrZhva4omtccFWpawubEV9/HR6zPWdTcU09VGO3Rk8WI8pst+3L6qJXdg++oLcI0Q6Ny8USoUK4z2qD154HWtpAjNTL85crPaSmzHf/TtkFtRwpAPSI8Vee2rMPk3FDSL2tKcvIwZb1p5qOAHG+ha57E6WaCmkWMBZGM1V0Ol07ejAw1L+gmuQzlvpV4cjNQ7IoMRWcZ7gtZTrPs88X4/rS5h3Dm50Fh8wipH+wFbt+WVW8vbg04desLLLbaOrlWwdjB4A292sEQzcE+DElUovcL1QKceK3reJezsIVjV1tzJAgsouOcgHWjj4Y4rWfwCT7U1ucLqt8dwNYHLuBMK9FwqiBAC7iBuzMPc4frq3Tn8cfbq95tnpfe1y9J0uVyO7FycYCG9sSNjF6mdC/rRvucjmJVoESreQo7Ai0JSTK5gA05wNQo5l2Loap82UCtH/+rHkR1W97Gy7nxj5d6UJ/D1ajocD63Ui/3QwWbOG0U+eG4an+WK61u2meBjAJ2Aa6qK23Y4fX4O0CXMee6bx5Hz8sVBnP45m11CdAHCFAhzY8GX0g2BqIhKalk1FcvOxuOEVfw+3r0ejzvySRN/QiUa8L5WXAdo7ZYjNVTGYo+fUJjUznMt/q/JGCsXcjfuiG2zqQfx21jRQKjTw4SiNwg6D4qLWxeZBLzxJUWN9AFhMSTBlTuS7EiyI8keJtnLfby/MzaXRYE6wHPNt/7VxZUySzy+vI68OvLqF7w6e/AjgEg0N40+UuhIoSOFHqTQq0MfVBO9+YpFa40FI0RjLRawLKUK7ul7fYjdv7siFo9cO3LtyLV9rnUJq9CXplcEg6TnS5axNKokLl2tJaiO1De0d4Pa11jFMraKnOmyNF2VxvkuW9XG+i69o4HccSt5riIMaTlya8CMMoKr8PjQ7GiB9KWhpnNeNbrg8AauLr7M4D33uORt6CWF/Nn1m/Gb8UGvtPUBj5PLKcQKI/K2ToPBLdH6sPgUNj/FcdAlHYrGSt9+IbPYnhy5RTtpqP9rRPTxgne6j5tY0v95N+Dkr39mYdRSz00w70e+nwhNBa2LmY9Hp/vwupwGlghTVY0OR6VewFL6EvhWYUI1zlNBCVNSoHYByL0EPGz7GFfg7xgRTkc0lIic4YRcSF82+UiYKhXRbH3NlcnTikud9iFcej759PXz28nJx+n5xecvFyeno/HI3/tQfG2cr7jeyuM9+l7v2y1ztXkn/BfFux+Lx3uf1opLTUAJ9ax66lz3UqhjCcs2+u1N0uP/mq1WOXf41aquo8ffG7Qty65vNnQJ/Cqko/8Fy+ZcuV29fDv/Z1e9sv4cnqiiHyxjEFh1G5irGrpjCbsl+XlLiu5uuoSVyAu0IdG4PhECa79luSeSE+7Xp837CwLsD/VPf2s= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get tenant

+ + + +Retrieves a single tenant by tenant Key. + +## Request + +

Path Parameters

+ +The tenant was retrieved successfully. + +
Schema
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Tenant not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-topology.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-topology.api.mdx new file mode 100644 index 00000000000..7a0a1749a58 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-topology.api.mdx @@ -0,0 +1,52 @@ +--- +id: get-topology +title: "Get cluster topology" +description: "Obtains the current topology of the cluster the gateway is part of." +sidebar_label: "Get cluster topology" +hide_title: true +hide_table_of_contents: true +api: eJy1V9ty2zYQ/ZUdPCVTRVLSpE31ptpOqk6bemylnanrBxBckUhAgAFAyaqG/95ZAJRoi75kJn3QiBdgzy7O2Qt3zPPCsdkVO1GN82jZ9YiZGi330uhFzmasQL80tVGm2LIRy9EJK2t6y2bsj8xzqR34EkE01qL24NNiMKv4PBoO1wX3uOFbkA5qbj2Y1ZiNmEVXG+3QsdmOvZpO6e//wBFGe9SezPO6VlKEICefHGHsmBMlVvwYfFkidC4SGD9AW/zSoPNk3G9rZDNmsk8oPBux2tIxehmjyqz5jNYdG5+Dko4chLQEfMk9cIud5+BL6broekjcWk6M6EYpnilkM28bHDHpsRoAOrdmLXN0IPXK2CqEDkYDT8CgTY6PB0KrSBZDh9Ro+aVBeLaRvpRkOTn9PNiGxSmsTOQnQvbQpPZYoGUjFp2Lj75/xdoRK43zw4D0RvMKg12LXJRSF8MAzlupCzJXG3uPOXrzBFMP+UqcSTL5INWHVVBxzQvMIaBGTdKdjqwfYXekfzXJe0jYs/NVvO/3P0L+4nQv2f2WJ56dNSTiu7ZPw12GMfkv+MoDLeyyPgVBrHEo5Br1IG6if8RQNxWVO4U8T34oZTbhUmouvFwjuybVIVe+fMyfrhjF1Z1TT/IgbiEyG324zpHn7LptR8xLT+fBzjtjjJ6u0ToZ69UxB+kw0poB9ffM/hzWLvTKhMcpVS/lvwMckG3dVBnafqGSul93n0Ly3WJ1K2FOTKPvScwDdi9zqES62iLPgQtrnPsG3nQpKI1+x4U3dtgfYfRKFo3FHHo7YBW2pCI3WLO/xpfUw/58iPDEdKe7vxEzhPdx45D27oD05NC1+IvU6Fjb0us3Q914roECsZorQGuNBSNCIuSwKaVCqK0R6FxXQbsu+Y++vwnX1mQKq+8ea8ZzOI8rIUfPpYJYr4A7yFNm5qTMq4t3J/DT6zc/Xj8rva/dbDLZbDZjuxIvMJfe2LGxxcSuBP1o3fMxLEu0CBXfQobA8zwIjSs4VEJwNQq5kgK8iZmenKGDjvE9XEbj290RMXs5NFYeTVlz+HixAJmj9nK17U71FnTYs+KNIhs8M42fZYrrz+xA8THoXRTXVBW3+4HqNkA7Ys5z3/TCeEjOx1r9Zbk8h2gCBM0D+zxJQBREJbWsqD6+nk5HrOI38e6H6bQlm8T4EyLRgDe14jr1v9vhSA2VsZj0EwKT2nmuxbdixlhZyLu441vVN4n4NEbUxmSr0JcmjdykHE79h038Yfp2aNdhjLzascYqNmO7mCjtbDLZ0TjUznY0xrSTNbGw5lZSvgfSDnNUJxRlBFfh8RBh++kqBXLCq0bnHN7Cxdnl8lBmbs1Unem307fTQathyBq2OD9fQIwwyq1XAjqzlMuDZuPipxhu22s6SNFY6beXtC2N6Mgt2nkTm36SQcIL1uk+LmKjdPGuE8evfy0DvzR3he2J52NHWK+Hs+n45bGmzhchNYSpqkaH+qiLMLUB7wXWay5KCqSSTeM5r/qwv8U3kJoIvBwTKVE5XVkspC+bbCxMNRFx2/4/UyabVFzqSYJwk5P57x8/nM5f/LY4Oftwefbi5Xg69jc+BF8b5yuue368R3/4LrvnI3J3aAnf6Dsvcefxxk9qxaUmNYWgdymprtjem+vuA+OK7XYZd/jRqralx18atFs2u7o+5BHdxdmQhkfKws+4Jc6EwNqHhFNNmNPvfl+S6Pb5/f6M1PIf4YNfmg== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get cluster topology

+ + + +Obtains the current topology of the cluster the gateway is part of. + +## Request + +
+ +Obtains the current topology of the cluster the gateway is part of. + +
Schema
    brokers object[]nullable
    + +A list of brokers that are part of this cluster. + +
  • Array [
  • partitions object[]
    + +A list of partitions managed or replicated on this broker. + +
  • Array [
  • ]
  • ]
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-user-task-form.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-user-task-form.api.mdx new file mode 100644 index 00000000000..215c90b8658 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-user-task-form.api.mdx @@ -0,0 +1,69 @@ +--- +id: get-user-task-form +title: "Get user task form" +description: "Get the form of a user task." +sidebar_label: "Get user task form" +hide_title: true +hide_table_of_contents: true +api: eJztWU1z2zYQ/Ss7OCVTWpQT56O8KY6dqk08HsduD44PILmSEIEAAywtazT8750FKMn6cJLO9NCZ6uCRSABvsbvvQeTzQpAce5HdihuPDkj6qbhLhK3RSVLWDEuRiTESj15LPz23rhKJKNEXTtU8Q2TiAxLQBGFkXQV2BBKaJVjvi/liLiwh0ETyLOUBTVlbZQhmSmuwRs/BITXOgFZmimXA8T243phcWvRgLIFv6to6AqxyLMvV9C9GJKKWTlZI6DilhTCyQpGJptv8HzgXiVC85VrSRCTC4bdGOSxFRq7B7byuJ7jOBKY474lE+GKClRTZQtC8ZnRlCMfoRCJ4I5Lirdcnom3vOIKvrfHoecWLfp8/dqOEyikPvikK9H7U6FVRsIypFdYQGuL1sq61KkJ/0q+eQRa727L5VyyIa+K4m6TiFjgS12HvNqY45/4te8np/jDJRBAaaYiZsg8zjsLw/RPInpwyY8bJ68o8hfLu8tPFz2Csy7CLEceegOjK1SbiHp1Xsai7GN3gP64Sl0mR5kksoSFhJdqWb7/on+wPtWbeTHoY2caUCeQNgbErvkjvbaEkYQkzRRNQxGRpE3Gyj2nvZAnMePT0NKFqZ3ON1S+7xNoEG8BlnAklklQaYgVBeogTcyxBGbi9Oj+FX09evbl7NiGqfZams9ms50bFEZaKrOtZN07dqOA/nveclY8OoZJzyBFkWSqOKTWsqQy+xkKNVAFkQye6bQN3Igrm+zqIo4stBj1qXOPUzkE3gJurIagSDanRXJnxbuiwZiQbzRgytw1luZZmKtYM2A26HcU3VSXdSoubAZjlJKnxPzyDXr7YwWZi/XZ9fQkRAgpbBhbHk7kLxElUyqiqqUR20u8nopIP8ep1v98yJnf8JzIxgA+1liZQazsdZaCyDjv+hMSU8SRN8W91xjo1Vttxextq7Ej8PmYUJXnSP94vyU48oGUx9XAvtSpBNjThqFE+UDgMm5Da9w4iO4jsILInRfZyl+/n1uWqLNEEeq70puKTn9TazrA86Oqgq4OuntbVnufJC0vxCfIgnYN0DtJ5Qjqv9r00DQxwlR3zEJ2zDmxRNM7xK9dE6QDP7+zL2N1v1ndf2g9aO2jtf621NhEV0sR27mJw7mgiMpGy63HErodPF498uzYdRefRo7tf2nuN0yITi6ieNkvTxcR6arMF+4Ntes+tuZdOyVxHQvJwVNmSPdoWUofb+7rIA+whLrM7lVVjSglv4ers8zV8kIQzOQ9V5ZCb0G/7b/t7UYN7uR9xcDmEmGFv281awrLA98Ju2lvfAw7GpMeicYrmn3lZLE+O0qEbNNyJFTe6eAGdr+MkkXRfzpeM+f2v69B0ZUY2LO+av7sR8chjE/3e8S7RLodBL4WtqsaEQ9OMo8ElHyVW6MYTJ5QIrQo0PlC683yX0z7GEfizM+6Oe9yUyJzlWTlWNGnyXmGrtIjLVp+5tnlaSWXSLoRPTwefbi7eD44+Dk/PLj6fHR33+j16oJB8bT1V0jzaB3vjaydvtMc+X6x/Jf5DTnrXfcIHSmstVXAVQ9kWnVZvxVqrIhHZpsseMr1LOsndisUilx5vnG5bvv2tQTcX2e3dWqFB0qXy/L0U2Uhqv+3IPy7Vs6vOu38Oe336vQl0N6WZh6NBN3wlEjFlR3zj/wTtXZuICcoSXdhYnDEoCqzp0dodK56ltTraPpyxJv4GOW2+/Q== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get user task form

+ + + +Get the form of a user task. + +Note that this endpoint will only return linked forms. This endpoint does not support embedded forms. + +## Request + +

Path Parameters

+ +The form is successfully returned. + +
Schema
+ +The user task was found, but no form is associated with it. + +
+ +Bad request + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-user-task.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-user-task.api.mdx new file mode 100644 index 00000000000..9011260b49a --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-user-task.api.mdx @@ -0,0 +1,67 @@ +--- +id: get-user-task +title: "Get user task" +description: "Get the user task by the user task key." +sidebar_label: "Get user task" +hide_title: true +hide_table_of_contents: true +api: eJztWU1z2zYQ/SsYnJIpLcmJk6a8aWTZUWunHsduD64PILkUEZMAgw/LGg3/e2cBUqJE2pHTHnXwmCJ29+1i9wEgdkUNm2sa3tFbDYoYph/ofUBlCYoZLsUsoSGdg8HRGxwMaAI6VrzEURrSczDEZEBso06i5c6LB1gO/hE0oCVTrAADCgFXVLACaEhtbfoPWNKAcjRaMpPRgCr4brmChIZGWdhFvumA0IDqOIOC0XBFzbJE61wYmIOiAU2lKpjxrz6e0Kq6RwRdSqFBo8a70Qj/vYTCNdE2jkHr1Ob5kigwVglIfHyxFAaEQSOsLHMeuykcftNoadX1TUbfIDY4MQon3HDvR3tCev15gCWR6fYsY/A/DDmo53ztgTaKi3knqQiCkiSVipiM6zZOFVBtmNnTjBPt9RaELbDwJtfT8c30lAZ08ufl1cW0fh5/mUwv3OPZeIYP91VAmdZ8LgD656UZfWlyalergEIOBQiDFb5PILU4mZ32mG+ZE9owEUOdvE5KXsxmg8FrI4PeHMZMJDxhBs6VtKVuwTCl2LIXZK1D5k6pN7MB5QYK3Z2Pqo2KS8FrQRHkJzBLJZFrp5BywZvlaJ9kbZJUmyDJ2obLVsfyT+Wrz3hfxmq5/1QbDdbLtYFvfso8Kj5TbgrcOna6N+cbDZLU3GfbGV9j4PiR4QU4HFmUObwSaa3zOqxU5rlc3Jb7I3mN16EkFvYHSCy8zroBwfZcvpzkmhSd5RyeDCjB8jOpimtIQYGI9/S6UXX1Q1Sj3M+xv0Bp7vfCPYrz0Uvvz7X371wZWW1k8RlYsr1MrTfbbaSJEyeZl6+XqJ2tgyWJA2T51dZG3bdgcam42Zd/jfhuzgkOZnyegffmkeUW3FPrZaOMHhZc8AK301FAC/bkn49HIwRNmc0NDT+M0EHDTY5eNee5mYGCVhUOnTx3/CmVfOQJJFieDI9AQhp0iScDcikVkAQM47kmTLWEuXBeNicsEslk+eIxqVQyyqH4pXtc2nZpTK68ZI1LfGYJw9pAwcij312fTchvJx9+vX+TGVPqcDhcLBYDlcZHkHAj1UCq+VClMf6h3Fs37wpIwZYkArJJO9kc0IguIeYpj4mRTV06ZzDdPr6XT3d+tMutdSVbxTvFMia31zPCExCGp0su5l1o2so1ZZG0JoxyJh7oJus/IvSYaFsUTLX3nBZAffCzure6d5jYV+6fb26uiDdBYpm0jpc10FYpn4zaxfxxNMIV1WV8j0gEgacyZ8LvQzvhcEGKTd26wJpd9X/KjFR8zndxB7TNwLqIT31EDQWP+ymIX0KgDclZ/KA99wizJkNUTx/cdJ0TLNeDA8kOJDuQ7FmSve/W+5lUEU8SEH7vbfhWb3YMz36QHHh14NWBV8/z6uRH12cLbjJndM4fQbjPzwXzFEulFQeCHQh2INizBPvQ94E2FgRn2X2Dg1JSERnHVilIyCLj+fq7ucGud7bDl9iBaweuPce1KqAFmEzWzS/XujIZDekQ97Ij3Mv0cNXq01TYegL12LS2rMppSFeeOFU4HK4yqU0VrkqpTDV8xKw8MsVZlPtaxGFPsKZwchmz3L3uSyAOuA5NHdiEFVYkjHwi19OvN+ScGViwpb+Gk2rH9KfRp1H/fZRU5hmL46sZ8RH68mstCY1Z5HZ/D8gJ72PYNeU0xBYvtr6imp+eCJgCNbaYhHVZ1HjOOv72QjSoH86aYvn97xuXby5S6dTrvHcdwaw0d5R0NDju1tjVzFEllkVhhVsvxdwfa1grsDi32mBAAc15DEK7aq77nY3YhR8h9a0oOR5gUnzlNMvknJvMRoNYFsPYq63/R7mMhgXjYlhD6OFkfHn75XR8dDGbTL98nR4dD0YD82T8zaTUpmCi5Qd2btdHs91IV5u94VUt3jo1Bp7MsMwZF1gsLqZVzaE7uuEQDWjY7nbeBzUR7uhqFTENtyqvKnz93YJa0vDufsMbR7SEa3xOaJiyXO/2iNtRvLmuu8lvSW/nuNfzpsUllo6wucVfNKAP2GLZatRW2J70t8jOMS8xjmMoTUu30xfGgl+vNedTrNR/ASt74Mg= +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get user task

+ + + +Get the user task by the user task key. + +## Request + +

Path Parameters

+ +The user task is successfully returned. + +
Schema
    customHeaders object
    + +Custom headers for the user task. + +
+ +The provided data is not valid. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The user task with the given key was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/get-variable.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/get-variable.api.mdx new file mode 100644 index 00000000000..562ed0ee3d7 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/get-variable.api.mdx @@ -0,0 +1,63 @@ +--- +id: get-variable +title: "Get variable" +description: "Get the variable by the variable key." +sidebar_label: "Get variable" +hide_title: true +hide_table_of_contents: true +api: eJztWUtz2zYQ/is7OCVTRpQTJ015Ux0nVZtmPI7jHFwfQHAlIgYBBlha1mj43zvLh0zr4bid3qqDRxKx+L59faC5XAmS8yCSK3EpvZapQXEdCVeil6SdnWYiEXOk9WIkMgzK65JXRSI+IAHlCLedAaTLh79vcDn6y4pIlNLLAgk9s62ElQWKRPR2f+BSREIzZCkpF5Hw+L3SHjORkK9wk/dik0NEIqgcCymSlaBlyeDaEs7Ri0jMnC8ktZfeHIu6vmaCUDobMPCOl+MxfzxCogOESikMYVYZswSPVHmLWRudcpbQEmPIsjRaNemLvwUGWg1ce8jQ5xUCSq9y6H0CTVhwTF0kLv2GijiLnmtDuvV6mL2d3t/gEmbOA+U6rEMZ4O7PUNRVaBP1kywQ3GwvYiCv7Zz330pT7QC45MtPQ+BEX+5GeV8ZA7dPhwrKlY+nqYFBaAwfw3wsZaV33CBTG0ha9SS+bgvobs+/pSa00hLrdYuwWYHpu6dlSocLX1klCXdgfc2RcvSdxDn7OgD19uA8WEcD2NQ5g9KKmj3UZHDQ81PCQtQ1Lx3vUt+vMgM+BDDQfoGV3qUGi59+JLQJnLWWkCFJbaBVFMgArWGKGWgLV+fvT+CX49c/Xz/LicqQxPFisRj5mXqBmSbnR87PYz9T/Md2z0dwkaNHKOQSUgSZZZo5pYF7rUIoUemZVkCur3rjDKepPUAeF3q7utqo1qAPKq+3zuYJfDmfgs7Qkp4ttZ1vUzd7ZrIyjCFTV1GSGmlvxH29tkk3WUJVFNIPO3pAwNojSVX44bn86uUWNmvlt4uLM2ghQLkM78+zjoiDKLTVRVWI5Hg8jkQh79pfb8bjmjG54k+IxALelUbaprU2w9EWCuex658msF6z/1FlnNdzvck7eqCdronftRH14jnafcp04gEj1Q2L3ugMZEU5s7byAeWxcUKaMDqI7CCyg8j2iuzVjn9CnE91lqFt2nOtNx34LgjSGLfA7KCrg64Outqvq+MdzxiOYOYqmx2kc5DOQTp7pPN610PTxAJn2XMfovfOg1Oq8h4zWOTarJ83e+7unvXoEOOgtYPW/tdaqyNRIOWuG4Y2w0zKRSLifpIS4tVgFlfzMBL9bT/rrLwRiVi1uqmTOF7lLlCdrErnqY5vuShrJA6Ul1t99X1jnJKmubyrfrxg14M5hBNZVDaT8BbOTz9fwAdJuJDLJp9M+RD67fjteCcqm+5BnJxNoY2w7b7BidDDsrR3wrbGTwFuxrQBVeU1LT/ztjY9KUqPflJxDdZd0fE16M3oqTESUfflfd8rv3+9aMqt7cw127uybzvCVUEfWs/Ho6PtFjubNkpRrigq2xyXdg4LTTnIQWDKVIE4oEgYrdCGppm7AXhv9rFdgcuWEY5GXJS2c/pTcq4pr9KRckWs2m3rz9S4NC6ktnFHEeKTyZ9fPr2bvPg4PTn99Pn0xdFoPKI7aoIvXaBC2oEfPMi/3TPlX93fGf7BwL+rC+EdxaWR2nKnNAGtOv1cDbo+Eslwmn0ddSK4EqtVKgN+8aau+fL3Cv1SJFfX95ppRJbpwN8zkcykCZsvDIYhPDvvXi08h12vEXb63V2UdinWk20hInHDI94Hc/j6uo5EjjJD3/jVWkyUwpIGe7feEnCvr0+ZD6fcpH8D1zXSZg== +sidebar_class_name: "get api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Get variable

+ + + +Get the variable by the variable key. + +## Request + +

Path Parameters

+ +The variable is successfully returned. + +
Schema
+ +Bad request + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/migrate-process-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/migrate-process-instance.api.mdx index 71484fe2255..62f988d0fc9 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/migrate-process-instance.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/migrate-process-instance.api.mdx @@ -5,7 +5,7 @@ description: "Migrates a process instance to a new process definition." sidebar_label: "Migrate process instance" hide_title: true hide_table_of_contents: true -api: eJztWN1v2zYQ/1cOfFmLKZLTpV2nhwFekm7emi5InO0h9gMtnS22EqmSlB3D0P8+HCnZjiU33cfLgAQIEpvH+/z9juRtmOULw+J7dq1VgsaAkMZymSCbBixFk2hRWqEki9mVWGhu0QCH8kAYrAIOElfblRTnQgraGU7kOBMGNH6u0FhIuIREScuFhKLKrShzhIKXpZALp1BXCe0zpNSp2S3P0K4QJdgMgSdWLHEiD335xgDmWKC0BrhMwXK9QNvj2FYsnMiJvDMIlvy0CqpyoXmKXwx0idqQEjXfE1MarJpIDqmYz1Gj7LMbAIaLkDR9QixhrSoNupKyjZ/sGKjKE6tOUm4RVsJmFPJE5pT/nU5RlFott0GwgJVc8wItaqrphkleIItZIz9qlP+GaxYwQTUtuc1YwKg2QmPKYqsrPCz8OEP4hGsKlRLfzUnGLZhMVXkKM4TC4yQNWcBMkmHBWbxhdl2SK0JaXKBmAZsrXXDrv3pzxup66v1AY39S6Zr2HLpFsEFpaYmXZS4STh5GHw25uekaU7OPmFhKi1YlaivQuFWHiAbwF9u6UFrizfHYexBkVRvtkcwoSsKToQesAfhoD/57cXCtuSuZxcL0xdf1uZdQXmyGkKmV852XO67MtSpcFEZVOsEj4ZLAUUY1GHyUbK/t0lsZpf0ZbpyA0cV+SsmjvfwZq4VcULa8A/9Ip5CPatLqrPc5cN9xumtyGjArbI67vnj9mGRXnYo6K5Qah9obdA0iwa73Q9DtosNekimDEmZrl/3KoPaUW4k8J8KVXFvXh/IcNCZKp9RtDfVWuWjrKgxsbYdwVRlLW3+EAQgH7aVIMfUFfAqvASuEFEVVsPj0MHNf4FY/yp9M5I3vCayuvS1TKmk8tl4Nzvpr3yGiMHttqQ7Y2WBwdKfLBKTcctomlYUlz4VrZ0caUKnVLMfi224jOizstZeEFC0XOXgCA99RMwUh4f7m3Tn8cPb6++mLzNrSxFG0Wq1CPU9OMBVW6VDpRaTnCf2S3MsQxhlqov2aysrT1KWd57BjI5gSEzEXScvjxm2gej+q/LG+6VY3B9zZg0elRacZDeHuZgQiRWnFfE2A7Jh2e+a8ykkHn6nKxrOcy0+O6h4bXaOHVkxVFFzvH1J7BuqAGcttZZ48ib571dtOfxmPr8GrgESlCHM66olUjaFwnxVngwGB/cF/ejMY1KSTKv4VkUjAhzLn0kHrMBy6MymNDX5cYNsL239TGaXFQhzaDV3vannagPjCR+R5efZ3qEicmqtKPnPqmVPPnDrKqdd9h9RQ0h0GNeEQtVYaVJJUWmMKq0zkW8K1tpsr9TPTnpn2zLR+ptH7C22mUnoQK+OgQ+/imEUNmU627/Jo031M15G/W5JjATOol+37u9I5i9nGM6qOo2iTKWPreFMqbetoSeVaci34LPcgpWXPvBZRuUp4nnmvupWlBXrktxGf86KSKYe3cHN5O4afucUVX7tMk8nHqt8O3g56tZLoEY3D6xH4CD0u93pFq5ZI36vWC3+NYjcMMJhUWtj1LW3z6Zkh16iHFRVni5fGntNOn70QC5p/3rUo+vXPsQMC9bmb3aDh8oEXZcPSLwwGBkce6fc9T9wdhDsv1e2Lc9r/Fhw4TsyVc6dBbDdTBBs/f2IxG4SnXXZcjxzJE1UUlXSdXi78IInvZT7JK2Mp4wHLRYLSuDQ0U6NW7L1fgT+aiddpSKjx0G4b/ELYrJqFiSqixG/b/p3lahYVXMioMWGi8+HV3YeL4cn70fnlh9vLk9NwENoH66pDBCy43POjeRZ2bpKHQW92B9zzrPJ/MKts6GvxwUZlzoWkhuJgtWn67z3r9F8WsLh3nLlrwdOgaaP3bLOZcYN3Oq9r+vpzhXrN4vvprus6AqfC0P8pi+c8N4fzz31cvbhpZh0v4V9MRXsjb2d9cu0OhbyiTyxgn6j79I1w62kdsAx5itpF4QXPva8nY1K3U9SZl9ZBu2OYJFjaI7KPLofUtLYn5fXvt2Nqss2stlAp7dV8RfNkvmIxm7AJBaDK7TzTfb9hOZeLii9I3uuln78AXVihRg== +api: eJztWF1v2zYU/SsXfFmLKZLTpV2nhwFekm7emi5InO0h9gMtXVtsJVIlKTuGof8+XFKyHUtuuo+XAQkQJDbJ+3nOkXg3zPKFYfE9u9YqQWNASGO5TJBNA6ZK1NwKJUcpi1khFppbbDaO2n0BS9EkWpS0kcXsym8zwKE8sAlWAQeJq+1KinMhBZ0MJ3KcCQMaP1doLCRcQqKk5UJCUeVWlDlCwctSyIUzqKuEzhky6szslmdoV4gSbIbAEyuWOJGHsXxjAHMsUFoDXKZguV6g7Qlsuy2cyIm8MwiW4rQKqnKheYpfTHSJ2pARNd/bpjRYNZEcUjGfo0bZ5zcADBchWfqEWMJaVRp0JWWbP/kxUJUnVp2k3CKshM0o5YnMqf47m6IotVpuk2ABK7nmBVrU1PoNk7xAFrPycWd/wzULmKCeltxmLGDUG6ExZbHVFR42fpwhfMI1pUqF79Yk4xZMpqo8hRlCA6c0ZAEzSYYFZ/GG2XVJoQhpcYGaBWyudMGt/+rNGavrqY8Djf1JpWs6cxgWwQalpSVelrlIHIajj4bC3HSdqdlHTCyVRRPirUDjVh0iGrhfbPtCZYk3x3PvQZBVbbZHKqOoCE+mHrAG4KM9+HdjufR4bdlgYK5V4RwbVemk179bPqBAu7wXG9eaO1BYLExfBbtV6aWs3zZDyNTKVYeXOzYeC/dxQXsCfiQmnXZ6a01xSND6etgEAaOL/aZRRHtVMFYLuaB++AD+kU0hH3W9tVnvs+y+E3TX5TRgVtgcd8p7INBXHcw4L1ttv0EnQQl2ox+CbhcdupNMGZQwW7vqVwa1J/VK5DlRuuTaOqXLc9CYKJ2SnhtSb7lo+yoMbH2HcFUZS0d/hAEIR56lSDH1DXyKEQErhBRFVbD49LByX2BvP4+eLOSNVx1W196XKZU0HluvBmf9ve9QTZg94asDdjYYHD3pKgEpt5yOSWVhyXPhBPOIxJVazXIsvu1K3WFjr/1OSNFykYMnMPAdNVMQEu5v3p3DD2evv5++yKwtTRxFq9Uq1PPkBFNhlQ6VXkR6ntAv7XsZwjhDTbRfU1t5mrqy8xx2bARTYiLmIml53IQN1O9HnT+mzG51c8CdPXhUWnTEaAh3NyMQKUor5msCZMe1OzPnVU42+ExVNp7lXH5yVPfY6Do99GKqouB6/zG456AOmLHcVubJZ913r3rl9Jfx+Bq8CUhUijCnlwkiVeMo3GfF2WBAYH/wn94MBjXZpI5/RSYS8KHMuXTQOkyH3sqUxgY/LrHtm+N/0xmlxUIc+g2ddrU8bUB84TPyvDz7O1QkTs1VJZ859cypZ04d5dTrvofUUNI7DGrCIWqtNKgkqbTGFFaZyLeEa303L+0ei89ce+baM9e6XKM7HtpM0bylVMZBh+7eMYsaOp1s7/7RpnthryP/dkmBBcygXrZ3/ErnLGYbz6g6jqJNpoyt402ptK2jJbVrybXgs9yDlJY981pE5Srheeaj6naWFmiQ0GZ8zotKphzews3l7Rh+5hZXfO0qTS4fm347eDvotUpbj1gcXo/AZ+hxuacVrVkifa9Zv/lrDLuBg8Gk0sKub+mYL88MuUY9rKg5W7w0/px1+uw3saD5512Lol//HDsgkM7d7IYZlw+8KBuWfmH4MDgyCLjvueTuINy5q27vnNP+2+DAcWKuXDgNYruVItj4GReL2SA87bLjeuRInqiiqKRTernwwyq+V/kkr4yligcsFwlK48rQTKbabe/9CvzRTNVOQ0KNh3Yr8Aths2oWJqqIEn9s+3eWq1lUcCGjxoWJzodXdx8uhifvR+eXH24vT07DQWgfrOsOEbDgci+O5mLYeZc8THqze8A9z0P/B/PQhr4WH2xU5lxIEhQHq02jv/eso78sYHHvyHQnwdOgkdF7ttnMuME7ndc1ff25Qr1m8f10p7qOwKkw9H/K4jnPzeGMdR9XL26aacdL+BeT197M22mfXLuHQl7RJxawT6Q+fWPieloHLEOeonZZ+I3nPtaTMZnbGerMZOugPTFMEiztkb2PXg5JtLZPyuvfb8ckss08uFApndV8RTNrvmIxm7AJJaDK7czUfb9hOZeLii9ov7dLP38BfdTUmw== sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -42,7 +42,11 @@ latest process improvements. ## Request -

Path Parameters

Body

required
    mappingInstructions object[]required
  • Array [
  • ]
  • = 1`"} schema={{"description":"A reference key chosen by the user that will be part of all records resulting from this operation. Must be > 0 if provided.\n","type":"integer","format":"int64","minimum":1}}>
+

Path Parameters

Body

required
    mappingInstructions object[]required
    + +Element mappings from the source process instance to the target process instance. + +
  • Array [
  • ]
  • = 1`"} schema={{"description":"A reference key chosen by the user that will be part of all records resulting from this operation. Must be > 0 if provided.\n","type":"integer","format":"int64","minimum":1}}>
The process instance is migrated. diff --git a/docs/apis-tools/camunda-api-rest/specifications/modify-process-instance.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/modify-process-instance.api.mdx index bc8eba6ccf8..0828b8177da 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/modify-process-instance.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/modify-process-instance.api.mdx @@ -5,7 +5,7 @@ description: "Modifies a running process instance." sidebar_label: "Modify process instance" hide_title: true hide_table_of_contents: true -api: eJztWd9v4zYS/lcGfLkWJ8tOL223fijgy2bv3NvdBom3fbDzQEtjiw1FqiRlxzD0vx+GlGTZsjdb9NB7SYAktkTOz+8bksM9c3xt2XjO7oxO0FoQyjquEmSPEUvRJkYUTmjFxuyDTsVKoAUOplRKqDUUJ5PihZplwoLB30u0DhKuINHKcaEgL6UThUQ/2JQJSbXgNPDEiQ13CFwBSsxROdArcBm28rVZKKfBocmFqof6adiqpikHAfFCLdQni+DIHKfBYMGFAd4zGVzGHQgL1pXJE2h1ZIYBp/UTPSqVUA5ViikU3GXxQr3TBvCZ54XECJaY8NIGH54dGsUl2J11mJNspR3wDReSLyWS1FSjVX9zYNAWWqXALeBzgYnDNF4oFrGCG56jQ0PJ2TPFc2RjVhs/rW3/D+5YxAQlh2xiEaPAC4MpGztT4mkGZxnCE+5Oo3scCpvpUqawRMhDwtOYRcwmGeacjffM7QoyhaKxRsMittIm5y48+u6aVdVjsAOt+6dOdzTn1CzCBCpHr3hRSJFwsnD4myUz931levkbJo7CYnSBxgm0fm6NnGkHUZ1Z3BjuA+Qwt+ekHUenKwXCqyWBfJuJJGsgYTvxadSnIFQ9yia6IIaotH6w4UZQ0rvzEoP8kOgjj2ot05S+9JM3fdvkrkHoScpakyhntbvWGaHWrIoYJdk6bW5rLR0YnVXXwUozNXh4ZEILn56HIFS8UA/oiIGDK/ob3vj5Crd9GVvhMhE4+Cyso/j3xgSTFmol9TbYE0L5EjIp4SteSsfGg6sqYk1u/gr4fA4HPRS0g/tp+enh54+Q6qQ8ZH8rpKyD40QT3IO+FRWxDMFo7drHdRovFIJ4oaYO8tI6jynwSoPbEZWqg3Cvm0oFL4rAA06oGWy4LBFW3GZCq5AdnqaCnODyruNsqAfHka2o3OgCv5wFx/w7CcA54r1HvkHAvHC7E1Qe5gnlH6ylXlIx/1zAuvCr6dYBG2NV1S3N806CHyPmhJPYrK+7u+MS/0sfo6w6FXeoGi+Jm/QrpjeuXVn/T6W01X+GDfiF1erF4nis5OV17HyQO2a8FO3Zmah6ueSeX/LucYUGabvVc2kCpnnpC3GSaYsKljvvYmnRdOi/RCi48fsmLiUYTLRJaR9madel1rAyOg+boVZ3DB9qhv8IIxArQvVGpM3C9HI1zYUSeZmz8ZWH0OcicR/2AwRdH1ba9diQ3m9G1+fz2dueCNvZklQRux6NLs70jkDKHW/2XxsuhU/8hc1HYfRSYv73/ibkNC93YSSk6LiQdWGkulgjPRTC+f27G/jh+tvvH7/KnCvseDjcbrexWSUDTIXTJtZmPTSrhH5p3NcxzDI0VEx3vu62BRMOhABbYCJWIqGyVZcibwyl6yhxF/ZM4e2+V63a7JZG9Pg8gU/3UxApKidWO8JTT/VRweNLXbrxUnL1xA7Q6Cs91WLLPOemu0HtKKBlwXFX2hd3of/4piebYPHv2ewOgghIdIr10ihsoyjugvp6NIpYzp/Dt+9Go4pkUsa/wBPavhSSKw+tU3foMKQN1vjxjrWnrv9NZrQRa3GqN2ZdmtYgfhs8Cry8/iNUJE6tdKleOfXKqVdOXeTUt+cWqYkCirLvEqAx2oBOktIY9MdG2RKu0V0fp1+Z9sq0V6adZ1oVsRxdplNqhmnroUM9sTEb1mQaNGba4b7fSKuGYW8Z+EQdLzSbpv1WGsnGbB9IVY2Hw32mravG+0IbVw03lLGjQzu9DuRrQCV1wmUWDOsnl15Qj69x+obnpUo5vIH724cZ/Is73PKdDzapPBb9ZvRmdFYqDb0gcXI3heBhgGanXDRiifdnxYbBXyLY9wItJqURbvdA00J4lsgNmklJ+WkhU+vz0ul7GMSi+sO7Bkg//TrzWKBSd3/oM96GVuzlvuD8qL12QOjnumKDq0ttovlxm6bbs2g7bo/k/oWj9fz8wXZEU84dD0eeZyvtA1azoB96wiEaG3I1iq/6jLub+sKR6DwvlUc7ncuFy4B3UpnI0jpKYcSkSFBZH9e6C90Mex/ewC9BI1zFBMPAlWbRWAuXlcs40fkwCdPa/0upl8OcCzWsVdjhzeTDp49vJ4P305vbjw+3g6t4FLtn59NNpM656tgRTpq9zempz/vDmvl6h/FX3mHUvHb47IaF5EJRpfHw2Ne1ec56tZlFbHz2muOoPD9GdYmds/1+yS1+MrKq6PHvJZodG88fD8T1ZEuFpc8pG6+4tKdXI12MfHVfd32+hj9xYXLW+aabpnZ+wZAlfWMReyLqn7vdqR6riGXIUzTeizDwJtg6mJG4g6DeVUoVNTMmSYKFuzD2aO9I9addSO9+fphRAa6vcXKd0lzDt3TVxLdszBZsQQ7oou0Y+ud7Jrlal3xN44Nc+vkvSlgCIw== +api: eJztWd9z47YR/lcweGkypSg5ddIrHzqj+nyt0ruLx9alD5IeIHAlIgYBBgAlazT83zsLkBQlSid3kubJnrEtkcD+/L7Fcrmnjq0tTWb0wWgO1hKhrGOKA11EVBdgmBNaTVKa0FynYrWr102aZRFNwXIjClxHE/oJVwmwhBFTKiXUmhQnouO5mmbCEgO/lmAd4UwRrpVjQpG8lE4UEvxiU3KUaonThHEnNswBYYqAhByUI3pFXAatfG3mymniwORC1Uv9NmhV45aDgHiu5uqLBeLQHKeJgYIJQ1jPZOIy5oiwxLqSPxOtjswwxGn9jJdKJZQDlUJKCuayeK4+aEPgheWFhIgsgbPSBh9eHBjFJLE76yBH2Uo7wjZMSLaUgFJTDVb9yREDttAqJcwSeCmAO0jjuaIRLZhhOTgwmMI9VSwHmtDiOEX/hh2NqMDkoE00ohh4YSCliTMlnGZwmgF5ht1pdI9DYTNdypQsgeQh4WlMI2p5BjmjyZ66XYGmYDTWYGhEV9rkzIVLP9zSqloEO8C6f+h0h3tOzUJMgHJ4ixWFFNxjcfiLRTP3fWV6+Qtwh2ExiFwnwPq9NXImHUTh9WOvu3dJuLVE8G4zwbMm1bbjdyM2JULVqyzXBSJfpfWFDTMCk9ndxw3gLgxXbTczhvkUOcjtOX9eZ2kHkScpak2tUXMUnnoPUvw0JoiEyfsGCFeFHzyyzgi1plVEETHWaXNfa+lg8qy6DvCarSGsRya0WOyFlQgVz9UTOKTz4Ab/hjt+v4JtX8ZWuEwEQr8I6zCUvTXBpLlaSb0N9oRQXoM55m7FSuloMripItoA4rdg8Q8F1atUHgOqXdx37Mennz6TVPPyAKStkLKOsxNNng76VlhcMyBGa9derhFxoUDFczVxJC+t8/AkXmlwO8ISehDudWMJY0UReMwQgIMNkyWQFbOZ0CokmqWpQCeYfOg4G+rUcWQrLIO6gNcT6rh+nASgH/C5+ghsAwTywu1OAH7YJ5S/sJZ6iYfM1wLWRXLN3A5uKa2q7pEx6yR4EVEnnITm3D/tDn7uw51Wp+IOBeiauHG/knvj2hP/dy7xrdz/G7GuKj1fq69V0atF+6xnXzmsz2esY8a11E3PpMjLbXvMR1iBAWwpey6NiWlu+gOCZ9qCIsudd7G0YDq1ZAmkYMY3h0xKYoBrk2KzabG1VGuyMjoPHV+rOyaf6nLxdzIiYoUU2Yi0OTCvV/lcKJGXOU1uPB6/FonH0PQgD3xYsbWzIb3fjW7P57PXgwnb6buqiN6ORhd3ekdIyhxrmswNk8In/kKHVRi9lJD/ud9pneblIawkKTgmZF1lscjWSA9Vdfb44Y787fb7vy6+yZwrbDIcbrfb2Kz4AFLhtIm1WQ/NiuMvrvs2JtMMDFbmnS/ibfUlB0IQWwAXK8GxBtZ1zRuD6TpK3IXGMNzd90pfm93SiB6fx+TL44SIFJQTqx3iqaf6qHqypS5dspRMPdMDNPpKT7XYMs+Z6XbhHQV4xjjmSnu11f7Ldz3ZCIt/TacPJIggXKdQn7PCNoriLqhvR6OI5uwlfPthNKpQJmb8FZ5gW1VIpjy0Tt3BJz5toMaPd6x9AP19MqONWItTvTHt0rQG8fvgUeDl7f9CReTUSpfqjVNvnHrj1EVOfX/ukBorglH2oxAwRhuiOS+NAf8MLVvCNbrrmUHA4hvX3rj2xrU+16qI5uAyjWPbQlsPHRz9JXRY02nQmGmH+/68sBqG7jLwCQd7YDbNlLE0kiZ0H0hVJcPhPtPWVcm+0MZVww1m7GgGgLcD+RpQSc2ZzIJh/eTiDRxlNk7fsbxUKSPvyOP905T8kznYsp0PNqo8Fv1u9G50ViouvSBx/DAhwcMAzU65aMQi78+KDYtfI9iPPC3w0gi3e8JtITxLYAbMuMT8tJCp9Xnp+D0solH94UMDpB//M/VYwFL3eBin3oeJ8+Xx5+xo8HdA6NfmdYObSwOs2fHUpzsCaWeBC3T/wpP67Pyj7ahanH9AHHmerbQPWM2CfugRh2BsyNUovukz7mHiCwfXeV4qj3Z8MhcuI6yTSi5L6zCFEZWCg7I+rvWwvVn2MdwhPweN5CZGGAauNIfGWrisXMZc50MetrX/l1IvhzkTalirsMO78acvn9+PBx8nd/efn+4HN/Eodi/OpxtJnTPVsSM8a/ba01Of94cz8+1VzR/5qqbmtYMXNywkEworjYfHvq7NM9qrzTSiydm3OUfleRHVJXZG9/sls/DFyKrCy7+WYHY0mS0OxPVkS4XFzylNVkza0zdAXYx881jPfb4lv+G90Fnnm3ma2vkDQ5b4jUb0Gal/7iVWtagimgFLwXgvwsK7YOtgiuIOgnpvjKqo2THmHAp3Ye1R74j1pz1IH356mmIBrt9W5TrFvYZt8Y0a29KEzukcHdBFO4D01/dUMrUu2RrXB7n4818P6WWE sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null @@ -41,12 +41,24 @@ For example, because an external system is not available or doesn't respond as e ## Request -

Path Parameters

Body

required
    activateInstructions object[]
  • Array [
  • variableInstructions object[]
  • Array [
  • variables objectrequired
    +

    Path Parameters

    Body

    required
      activateInstructions object[]
      + +Instructions describing which elements should be activated in which scopes and which variables should be created. + +
    • Array [
    • variableInstructions object[]
      + +Instructions describing which variables should be created. + +
    • Array [
    • variables objectrequired
      JSON document that will instantiate the variables for the root variable scope of the process instance. It must be a JSON object, as variables will be mapped in a key-value fashion. -
    • ]
    • ]
    • terminateInstructions object[]
    • Array [
    • ]
    • = 1`"} schema={{"description":"A reference key chosen by the user that will be part of all records resulting from this operation. Must be > 0 if provided.\n","type":"integer","format":"int64","minimum":1}}>
    +
  • ]
  • ]
  • terminateInstructions object[]
    + +Instructions describing which elements should be terminated. + +
  • Array [
  • ]
  • = 1`"} schema={{"description":"A reference key chosen by the user that will be part of all records resulting from this operation. Must be > 0 if provided.\n","type":"integer","format":"int64","minimum":1}}>
The process instance is modified. diff --git a/docs/apis-tools/camunda-api-rest/specifications/patch-authorization.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/patch-authorization.api.mdx deleted file mode 100644 index 9e39abb1ef6..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/patch-authorization.api.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: patch-authorization -title: "Patch authorization" -description: "Manage the permissions assigned to the authorization." -sidebar_label: "Patch authorization" -hide_title: true -hide_table_of_contents: true -api: eJztWVtz2jgU/iua89TOOkDatNv6jYKbejcBlkt3ukmmI+wDVmtLriSHsIz/+44kO8FA2mynjzCTCZKlc/2+I3y0AU2XCvwr6BY6EZL9SzUTHG48iFFFkuV26MMl5XSJRCdIcpQZU4oJrghVii05xkQL+4xuS2mBByJHaQdhDD7kVEdJU5MHOZU0Q43S2LEBTjMEH8SKo/wT1+AB425rAh5I/FYwiTH4Wha4a+U0QfIV10QsrDVWRj3YM01FCWYU/A3odW5UMq5xiRI8WAiZUe2mXp9BWd44zaj0OxGvzZ5IcI1cm680z1MWWbntL8oYstkXLuZfMNLGXWliohkquzdypm92PAl5bESiImzRiLhKRJHGZI6ExjHGREgiMRO3GBufKmVKS8aX4AHyIrPZ7ffBg3FwOfwYwE1pvFGikBFO7YZd7SaORpSJXb3SpJjGcdtpMzbVJmnRXkiRtbb1zaYfhuPwn+40HA7Ag8vuaBQOzj+PZxeBGQaTSffcfHvXnfY+gAfd0egi7NXLJ58m0+ASPJgGg+5gCh70g9HF8NNlYAej8bAXTCaf+8H7cBBWe/pBL5yEw8HncfDXLBwHZu1jSxqz5+PhbAQezCbB2ARpeOFCtBX1rTxSKakB5X7AtrPUiJUJDNOYWTHN9D/sOZyISY4RWzBUFsJ1SrY0bQe9Nw6608DmuWvSPRv13bgfXATTZt7D+MdOdUnKlG5gIOyrnRpAJKYWp1o0/GwisbQfDzTTqZlu1ICRqQpjRy+wy5osd2bngisXtBedF4ch25BKVlQRW3AwJqqIIlRqUaTpunXNofTgrNN5ipTI0o0LbShXiWtd80shkcSoKUsVoRJJLsUtM4Rk3EaotpjMRWxVeo+VjFyKeYrZb/ulYzcdI7ey0ktcSSFUEbdw7rRfjd/3yNuzV7/fPEu0zpXfbq9Wq5ZcRCcYMy1kS8hlWy4i82fWPW+RaYISSUbXVWFhRidNyQNaiXJQjOpSX5ltQen8+36l0xXAd0vUfa0tJDsAwdk4JCxGrtlizfhyX7Xds6BFamTQuSi0P08p/woPcNtXuqtFFVlG5f3B0VRQeqA01YX64Vnx8sXByvBhOh0RJ4JEIkayEJLohKlakXEiY5xlhshnnY4HGb1zo9edTmlkmow/wRNO8C5PKXfg3XGHcZI94NY6xrjSlEe/KjNCsiXb1duCbepXIO47j1xZOOucHuZidegazFnu7Z/kluYFr+eO5DyS80jOX0zOl08kJ+UHuBkjZ0dWHll5ZOUvZuXZYVa6l17DPPOjdSEKHh+5dOTSkUvf49LbH55wNN7qtey96x86+Jh5eeSLlEX6p06/0oNXh95Ru5yY9EtDEJRSSCKiqJASY7JKWGolm5fdOiiVE61jCTiWgGMJOFgCSg8y1Im471HbtrROwId2g9iqvakb06VpIaO8rfvWhUzBh43jTum325tEKF36m1xIXbZvTWJuqWR0njo4mseOYzV2UhHR1E4fyqF5YJrjtW89mhU8puQNGQeTKTmnGld0bWNqVDZFv+m86RzuWgqpH5HYHYXEeegQuFUVarGG3gfFusVPEWyb6wqjQjK9nphtLjxzpBKlacZtIaPSZ6WbsVsEXvXlfY2XP/6e2pSbijZ+aNwHdzTLHR/rxnvVGm80e682zT7pVY3Im/LGInYhrE0Vnva9M6lGqZyCTut0H7uj0FIwEllWcFuH+ZKsmE4I3YpWlBZKmyh5kLIIubKmVzck9bIL94R8dBrJactk2sGxLr9LppNi3opE1o7ctvv/81TM2xllvF2pUO1e93I26HdPLsJeMJgEJ6etTkvfaRvRXCidUb5lx8i9++3c6jT83bou+embpAoBGu90O08pswek9XJTcfUKmlwFD/z7a6QbryLcFWw2c6pwJtOyNNPfCpRr8K9uHvhpIRAzZb7H4C9oqnbvmrZdejau+tXPyf+6gTroUt2S52tbMNLCjMCDr7jevhazSEyQxiitte5xz9l0Ym8THrbv3VCVXr2jG0WY60fWNn4YGJ7eV8lRdW8zr67DMhGbzZKuzCUdXYEP13AN9gZQ11codn4DKeXLgi7NeifYfP4Dl+XV8Q== -sidebar_class_name: "patch api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Patch authorization

- - - -Manage the permissions assigned to the authorization. - -## Request - -

Path Parameters

Body

required
    permissions undefined[]
    - -The permissions to add/remove. - -
  • Array [
  • ]
- -The Authorization was patched successfully. - -
- -The Authorization could not be patched. -More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The request to patch the authorization was unauthorized. -More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The request to patch an authorization was denied. -More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The owner was not found. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The request to add or remove permissions to an authorization was in conflict. -More details are provided in the response body. - -
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/pin-clock.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/pin-clock.api.mdx new file mode 100644 index 00000000000..ff19081ac91 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/pin-clock.api.mdx @@ -0,0 +1,55 @@ +--- +id: pin-clock +title: "Pin internal clock (alpha)" +description: "Set a precise, static time for the Zeebe engine’s internal clock." +sidebar_label: "Pin internal clock (alpha)" +hide_title: true +hide_table_of_contents: true +api: eJztV19v2zYQ/yoHPrWYIrld2mV689J0y9AVQeKswOw8nKmzxVYiVZKKYxgC9jX29fZJhiOl2IkdrA8D9pIASUzyeP9+vzsfN8Lj0ol8Kk4rI7+Im0SYhix6ZfR5IXLRKB1PElGQk1Y1fCRycUUeEBpLUjlKwHn0SoJXNcHCWPAlwR9EcwLSS6Xp7z//cqC0J6uxAskq05n+VJIOomEDlINGaU1FAsqDpRqVdoA+iLiGpFooKqIR1AUUhhxo4wGLW9SS0pmeGJAl6iWFOyyZ7BioW+dhTr0VwCUqDSvlS0DQtAryzmPdpDM903mea+NppielckC6aIzSnr1EDVOsmhJhQehbSzcvMmnqxmjS3mWEtlofoZTkXBbk4t+jXtqldfEyRFDjmv1x7fwzSQ9+8H6mlYZFy8JgqSJ05NLg0UyLRFj62pLzP5liLfJNWCpLhci9bSkR0mhP2vMRNk2lZMAz++wYuY1wsqQa+ZNfNyRyYYJ1kYjGMvpekQunQzZ48RD9SUlAd8guMxhKAzVGllCrqlKOpNGF42hWpZLlDgCuNG1VbCFIRTI4weRYkhWJWBhbo49bb49F1yU7EU533LpJhFe+4tuBpBdKX8bMiK6L11xjtIvhvB4dHw4kurZCB64NmC3aqloPJPHmEP0ORpzOtOgScTwaHTY0RLHlGTRosSZPlmlVK+eUXoKxzH/lQNMSvbolTtMToDbWzCuqv9sH96H9MVxESSjIo6oggg7oIArOqeCoppfvT+HH4zc/3LwovW9cnmWr1Sq1C3lEhfLGpsYuM7uQ/MtyL1OYlGRp4DIWhWKbWMGWTkP65JDO3m1g8NPA6X/hYji9Z6zzVunlLldaq/Z61BiuL89BFaS9Wqw5s3umw50FthXrwLlpfT6vUH9hHHtu7Rt9bMW1dY12DWZxwECXCG6O7U4YT3D9+9d7upk1v0wmFxBVgDTF0F+5WUZDHESttKrbWuTHo1EiaryLq7ejUcc6GfFviEQD3TUV6kCtx+EoDbWx1PMnBKa089x4/yNkjFVL9dhuGhrAUOc9id/FiGKVvzlUb2O9/boha40FI2VrufpWpaqCeq71wXbfUiMXn2vtudaea22/1rpE1ORLEybDNjAHfSlykcl+SHRkb8nyRLkRra1ELjaxSro8yzalcb7LN42xvstuGYJbtArnVSQeH8dqGlhSGYlV2D6EFh9orGmI4hTrVhcIJ3B5djWBn9HTCtche2zyoeqT0cnooFYWfULj+OIcYoSRazv1P6jlQj6oNgp/i+Kuu+FEytYqv77iazE9c0JLdtxyxu850NsL2nkdhUTSf3g/MOPXT5MALveuy+34eHaHddNX3nbcGwW+LUzc7iesPY8ZPrIuhjhKX+0z7+I8FJA0dd3q0EX1cpi3t/pk1TrPkSeiUpK0C+4wsDtmP8QT+D1ahFcpoxcpNjTPpfJlO0+lqTMZr93/n1dmnvF7IutNuOx0/Nv1x3fjow/np2cfr86OXqWj1N/5kKXGOF+j3vHjQulH7xd4EYb6l4/D3my/Pp5fSf/nK6mvEE93PmsqVGE4D4zZ9H1rKuTw7I29Zyo2mzk6urZV1/H215bsWuTTm22r4lWXiJKwIBsa3RdaM1Mj6kcTNsviVcvm915gXTLcGEtJjX9C9sGwwR3hvvNeXE+4vvvHX20KvmpxxQ9DXIlczMRMCH7Js6LQOsL+RlSoly0uWT6q5Z9/AGWmkiU= +sidebar_class_name: "put api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Pin internal clock (alpha)

+ + + +Set a precise, static time for the Zeebe engine’s internal clock. +When the clock is pinned, it remains at the specified time and does not advance. +To change the time, the clock must be pinned again with a new timestamp. + +:::note +This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change +in future releases. +::: + +## Request + +

Body

required
+ +The clock was successfully pinned to the specified time in epoch milliseconds. + +
+ +The required timestamp parameter is missing or it is negative. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/pin-internal-clock-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/pin-internal-clock-alpha.api.mdx deleted file mode 100644 index 9b9de17b8a2..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/pin-internal-clock-alpha.api.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: pin-internal-clock-alpha -title: "Pin internal clock (alpha)" -description: "Set a precise, static time for the Zeebe engine’s internal clock." -sidebar_label: "Pin internal clock (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztV19v2zYQ/yoHPrWYIrld2mV689J0y9AVQeKswOw8nKmzxVYiVZKKYxgC9jX29fZJhiOl2IkdrA/DnmJAsEge73d/fnciN8Lj0ol8Kk4rI7+Im0QU5KRVjVdGi1xckQeExpJUjhJwHr2S4FVNsDAWfEnwB9GcgPRSafr7z78cKO3JaqxAss50pj+VpINomADloFFaU5GA8mCpRqUdoA8iriGpFoqKCIK6gMKQA208YHGLWlI60xMDskS9pLCHJZMdgLp1HubUowAuUWlYKV8CgqZVkHce6yad6ZnO81wbTzM9KZUD0kVjlPZsJWqYYtWUCAtC31q6eZFZWpAlLSkLK0f9ikvr4mWwtsY1Y7t2/pmkBz9YOtNKw6JlYbBUETpyaUCfaZEIS19bcv4nU6xFvglDZakQubctJUIa7Ul7XsKmqZRETlD22XGWNsLJkmrkN79uSOTCBHSRiMaahqxX5MLq4DkPHmZ6UhLQHbLJHHilgRojS6hVVSlH0ujCsTerUslyJ9iuNG1VbMOdimQwgomwJCsSsTC2Rh+n3h6Lrkt2PJzumHWTCK98xbsDIy+UvoyREV0Xt7nGaBfdeT06PuxING2FDlwrJTm3aKtqPRDCm0NUO+hxOtOiS8TxaHQYaPBiyylo0GJNnixTqFbOKb0EY5nryoGmJXp1SxymJ5LaWDOvqP5uP7kP8cdwESWhII+qgph0QAdRcE4FezW9fH8KPx6/+eHmRel94/IsW61WqV3IIyqUNzY1dpnZheSH5V6mMCnJ0sBlLArFmFjBlk5D+OQQzt5s4OSngdP/wsWwes9Y563Sy12utFaJx/1oDNeX56AK0l4t1hzZPeiwZ4FtxTpwblqfzyvUXziPPbf2QR+juLau0a7BLA4AdIngRtjuuPEE179/vaebWfPLZHIBUQVIUwy9lBtjBGInaqVV3dYiPx6NElHjXRy9HY061skZ/wZPNNBdU6EO1HrsjtJQG0s9f4JjSjvPTfY/yoyxaqke46ahAQx13pP4XfQoVvmbQ/U21ttPC1lrLBgpW8vVtypVFdRzrQ/YfUt9rrTnSnuutMOV1iWiJl+aQuSiaQNz0JciF1n4gopEOLK3ZPmIuBGtrUQuNrFKujzLNqVxvss3jbG+y245BbdoFc6rSDxejtU0sKQyEqswfShbvKCxpsGLU6xbXSCcwOXZ1QR+Rk8rXIfoMeRD1Sejk9FBrSz6hMbxxTlEDyPXdup/UMuFfFBtFP4WxV13w4GUrVV+fcXbYnjmhJbsuOWI33OgxwvaeRyFRNK/vB+Y8eunSUgu967L7eHx7A7rpq+87WFvFPi2MHG6P1/tWczpI+uii6P01T7zLs5DAUlT160OXVQvh5P1Vp+sWufZ80RUSpJ2wRxO7A7sh7gCv0dEeJVy9iLFhua5VL5s56k0dSbjtvv/eWXmGd8csh7CZafj364/vhsffTg/Pft4dXb0Kh2l/s6HKDXG+Rr1jh0XSj+6qcCLcKR/+djtzfbz8Xwf+r/uQ301eLrzWVOhCsfwwI5N36OmQg531thnpmKzmaOja1t1HU9/bcmuRT692bYlHnWJKAkLsqGpfaE1szJm+GjCsCxetQy/d9fqkmHHWEpq/BOyDw4WXP33XfbiesK13F/zalPwVosrvgLiSuRiJmZCJMIE8oU2EeY3okK9bHHJ8lEt//4Bv315Tg== -sidebar_class_name: "put api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Pin internal clock (alpha)

- - - -Set a precise, static time for the Zeebe engine’s internal clock. -When the clock is pinned, it remains at the specified time and does not advance. -To change the time, the clock must be pinned again with a new timestamp. - -:::note -This endpoint is an [alpha feature](/reference/alpha-features.md) and may be subject to change -in future releases. -::: - -## Request - -

Body

required
- -The clock was successfully pinned to the specified time in epoch milliseconds. - -
- -The required timestamp parameter is missing or it is negative. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/publish-a-message.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/publish-a-message.api.mdx deleted file mode 100644 index 19adcc96426..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/publish-a-message.api.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: publish-a-message -title: "Publish a message" -description: "Publishes a single message." -sidebar_label: "Publish a message" -hide_title: true -hide_table_of_contents: true -api: eJztWEtz2zYQ/is7OCVTWlJSJ015U22nVZqHxpbbg60DSK5ExCDA4GFZo+F/7yxBmZQoOe5Mb41mNBKIxT6/bwFwwxxfWhbfsE9oLV8im0csQ5saUTqhFYvZ1CdS2BwtcLBCLSVCEWQHt6pZZYEbhLKRzMBpsCWmYiFSKLlxgnRZSHVReocZLIwuwOUoDKTaGJScBOAO13Zwq2Y5Aqqs1EI5yDRaUNrBigsHC22A76wxaL10g1t1bZFUbp3bEXrURuutT3PwFiHlFskei5jBbx6t+01naxZv6qEwmLHYGY8RS7VyqBxN8bKUIq3VDr9aytCG2TTHgtM/ty6RxUwnXzF1LGKl0SUaJ9DSrOIF0u9ugilcmgG96AYwYNFWnXVGqCWryJHHoP7E9WFdexn9rloq+IJ76VjMyIYTBc70R3F/yFdRoC25ghdCQWFfUqUTv1ig2cm9VvUwMfoOTceiUA6XaFjEFtoU3IVHb0+7PoyqiDV6JtnhCL0S3zzC5Hw/Nvii5Jpqu/CSXENlvSF35Bq0av1bCZfXC5fiHhVMzm/VSkgJeI8Gki6SX2SekgTCWZBigZSclwEz+1lUXkqeSAygqSJ2z42gB/ZwFFtnHsWAW/hw9eUzZDr1BSpHmeNZVrOHy2kHSwGW+2jreeBQceWOpTHM7uUQLKpsp2jHAqyiDk9uArh7CJ0Tnhyt2XaYup8EAl0G1rGqCspsqZUN6Xo9Gj2dtRW3bZkG7F9w9LjODltaBPR5c4TdjeBRXva5+AxiPLeIfS7020f1dC1C9ptinB7Lf2n0vcgwg4w7DiL05nsuxVNFKI1OJBY/fa8YY5gGScjQcSEhpJqIEQQTzEAouLl8fwa/nr75Zf4id6608XC4Wq0GZpGeYCacNgNtlkOzSOlLci8HMMvRIBR8TQRvWQVtEds9y+kAgsYZyuMO548gIMxuesR5rKk3gu3vr2O4vpyAyFA5sVhTr+mZ3mnRPNHexYnk6o61Fe0b3bdifVFw0wJ8x0AVMeu4850wjmDy59c93QSLP2azKQQVkOoM653W5cJuDVEQhVCi8AWLT0ejiBX8IYzejqjnh4o/IxIF+FBKrsIOtxcObUvaYIOfOjChrOMq/a8qo41Yin27gx12NSA+DxEFQr05RKiJcmgIhBYNbT1ojDY/ePSDRz94dJhH9dnQ5TpjMSu1raHDXc5iNmz2PTss202NRSwwi644G+aNZDHbBNZU8XC4ybV1VbwptXHV8J5KsnNoo+nAri1qpE65zIPlfvVoonuUP+OFVxmHd3B5cTWD37nDFV/X2SSTu6rfjd6NDmol0SMax9NJ0zsC9jr9YKuWiH1QbRB+juKqmlMiU2+EW1/RspCeBLlBM/ZUgEdMNPZq7TQOQixq/rzfIuXD37O62NTLLtvr18UDL8rAxHBdakG2f/lpZ7pXltHO9aGV6RZ251TVuV4JtdB1KA0A+0khRWhsyOJo8KoP9umk5myqi8KrGodqGS4cvJPkVHrrwjFbihTp4NVGvBX7GGbgr2ARXg0IIAHF2369FC73ySDVxTANyx5/E6mTYcGFGjYm7PBs/On68/n45OPk7OLz1cXJq8Fo4B5cXQjiU8FVx4/m6g+8c2DdiXbTblT/j/cEDcYdPrhhKblQhJq6IJumEd1s0WepN3Va0Txq2skN22wSbvHayKqix988mjWLb+YtSGlURSxHnqGpe9ddjfizkO6TGflB4tKTP70LTxVtV4zTFEv3pOy801SnX65mxNXmRUihM1pj+IpekvAVi9ktu2UsYrqGQN0G6ucbJrlaegJJzIJe+vwDhDJj0A== -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Publish a message

- - - -Publishes a single message. -Messages are published to specific partitions computed from their correlation keys. -The endpoint does not wait for a correlation result. -Use the message correlation endpoint for such use cases. - -## Request - -

Body

required
    variables objectnullable
    - -The message variables as JSON document. - -
- -The message was published. - -
Schema
- -The provided data is not valid. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Internal server error. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/publish-message.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/publish-message.api.mdx new file mode 100644 index 00000000000..f57ef229677 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/publish-message.api.mdx @@ -0,0 +1,55 @@ +--- +id: publish-message +title: "Publish message" +description: "Publishes a single message." +sidebar_label: "Publish message" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/is7OCVTWlJSJ015U22ndZqHxpbbg60DSK5ExCDA4CFZo+F/7yxBmZQoOe5Mb41mNBKJxT6/b/HYMMcXlsW37BNayxfIZhHTJRruhFaXGYtZ6RMpbL4dj1iGNjWiJAEWs0kYRgscrFALiVAE0cGdaiZZ4AahUYQZOA22xFTMRQolN06QLgupLkrvMIO50QW4HIWBVBuDsvYG7nFtB3dqmiOgykotlINMowWlHay4cDDXBvjOHIPWSze4UzcWSeXWuR2hR2003/o0B28RUm6R7LGIGfzm0brfdLZm8aZ+FAYzFjvjMWKpVg6VoyFellKktdrhV0sZ2jCb5lhw+ufWJbKY6eQrpo5FrDSUayfQ0qjiBdLvboIpXBoBPe8GMGDRVp11RqgFq8iRx6D+xPVhXXsZ/a5aKvice+lYzMiGEwVO9UexPOSrKNCWXMELoaCwL6nSiZ/P0ezkXqv6MTH6Hk3HolAOF2hYxObaFNyFV29Puz6Mqog1egiehyL0SnzzCJfn+7HBFyXXVNu5l+QaKusNuSPXoFXr30q4vJ64EEtUcHl+p1ZCSsAlGki6SH6ReUoSCGdBijlScl4GzOxnUXkpeSIxgKaK2JIbQS/s4Si2zjyKAbfw4frLZ8h06gtUjjLHs6xmD5eTDpYCLPfR1vPAoeLKHUtjGN3LIVhU2U7RjgVYRR2e3AZw9xA6Izw5mrNtQHU/CQS6CqxjVRWU2VIrG9L1ejR6OmsrbtsyDdi/4OhxnR22tAjo8+YIuxvBo7zsc/EZxHhuEftc6LeP6ulahOw3xTg9lv/S6KXIMIOMOw4i9OYll+KpIpRGJxKLn75XjDFMgiRk6LiQEFJNxAiCCWYgFNxevT+DX0/f/DJ7kTtX2ng4XK1WAzNPTzATTpuBNouhmaf0JbmXA5jmaBAKviaCt6yCtojtmuV0AEHjDOVxh/NHEBBGNz3iPNbUG9FbX8dwc3UJIkPlxHxNvaZneqdF80R7FyeSq3vWVrRvdN+K9UXBTQvwHQNVxKzjznfCOILJn1/3dBMs/phOJxBUQKozrFdalwu7NURBFEKJwhcsPh2NIlbwh/D0dkQ9P1T8GZEowIdSchVWuL1waFnSBhv81IEJZR1X6X9VGW3EQuzbHeywqwHxeYgoEOrNIUKNFVCWDeEQjdEGdJp6YzCDVS5krT5Fa7e2m21KwOIPrv3g2g+u9blW7x9druuzjbY1dLjLWcyGzdpoh2W78LGIWTRLNHRK2jBvJIvZJrCmiofDTa6tq+JNqY2rhksqyc7GjoYDu7aokTrlMg+W+9Wjge52/4wXXmUc3sHVxfUUfucOV3xdZ5NM7qp+N3o3OqiVRI9oHE8uIUQYsNfpB1u1ROyDaoPwcxRX1YwSmXoj3PqapoX0JMgNmrGnAjxiorFXa6fnIMSi5s/7LVI+/D2ti0297Ko9ol088KIMTAxHqhZk+wekdqR7rBntHDFamW5hd3ZenSOYUHNdh9IAsJ8UUoTGhiyOBq/6YJ9c1pxNdVF4VeNQLcKhhHeSnEpvXdiKS5Eibc7aiLdiH8MI/BUswqsBASSgeNuvF8LlPhmkuhimYdrjbyJ1Miy4UMPGhB2ejT/dfD4fn3y8PLv4fH1x8mowGrgHVxeC+FRw1fGjuR7obGl3Yt20y9T/4yahQbjDBzcsJReKMFOXY9O0odst9ix1pk4jmkVNM7llm03CLd4YWVX0+ptHs2bx7ayFKD1VEcuRZ2jqznVf4/0spPtkSn6QuPTkT+9IVEXbGeM0xdI9KTvrtNTJl+spMbW5Kil0RnMMX9E1Cl+xmN2xO8bolqmuVX2fQu83THK18ASSmAW99PkH3OV8EA== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Publish message

+ + + +Publishes a single message. +Messages are published to specific partitions computed from their correlation keys. +The endpoint does not wait for a correlation result. +Use the message correlation endpoint for such use cases. + +## Request + +

Body

required
    variables objectnullable
    + +The message variables as JSON document. + +
+ +The message was published. + +
Schema
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-decision-definitions-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-decision-definitions-alpha.api.mdx deleted file mode 100644 index 41d5c79fd99..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-decision-definitions-alpha.api.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: query-decision-definitions-alpha -title: "Query decision definitions (alpha)" -description: "Search for decision definitions based on given criteria." -sidebar_label: "Query decision definitions (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWUtz2zYQ/is76CWZUo+kSZrypthO6zYP11bSg+zDilyKaECAAUDLGg3/ewcAqbcsOeOZXuwZjw1isdhd7PcBWMyZxYlh8YidUsINVxJSyrjklivJbiKWkkk0L30zZleEOskhUxrSbXkDYzSUgpIw4bckIdHckubYvZbXMo5jqSxdy2HODZBMS8WlBW4AJaAoc4SM0FaaAGUKUlkgiWMRFJ5gUckUIRGVsaQNqMqCysDmBGN1172WV0S+NWpF38Ll2dUQBhfnoG5J33Ka3jzrYclNxyolTC8Jgh0seUeTsVsfOu24bpH+9L0iPXNdz6+lC0BWaZuTC4RFLkzXe3gtWcQ0fa/I2HcqnbF47ptcU8riDIWhiCVKWpLW9WFZCp6gi17vX+NiPGcmyalA3yvE54zFozmzs5JYzNT4X0osi1ipVUnacjJ+hNJeWyOFWuOMRYxbKsxD9GScRLqiyFjN5YTVEVM6Jb3d4/Ijw0pYN6tJWF1HK+6OGoU3EbPcClok0N8ulFdK28sQKVbfuJElTuhB5mpVrNjEpaUJaRaxTOkCbfj0y0tnv+AFt0fKGm/jILNrHm8Fdd22ejHwHWVK00NG1jvjc4ETWsYn2jHfjkH7B2yvtWgcPDbcLeJPF4D/i2YHY/rmFdtkkWFOgMbwiaQUvtEsgmnOkxwwsQbQAEIl+feKgKckLc84aU851hHHDt7pulXbtu58RyrvMuX04yfgaUsm+/RLLOh4fU76kMZb0oYHyB/Kynsj2Og5NF37/TLgsyBpzTEhsvtDpFdUwURjmYPN0e4zw3F9idrx9l6LHimlHttUSxKlPTajgvTBpFqB8OlW9r73+FwnyP3SuyigDmxsSiVNQPDLfj8AeStlW/uWGqHZ602VJGRMVglodXX9Lvfou9hD6d8qi+J8g1T3ZkwdsYxrY9228xVFReZh5C7wR8feR+8hoA/h970jNsOzac8BO5/Y/ontn9j+/2J7R2PrVHGA41sa8GNe/QCvez2QIReUduGj0tTeZAA1QanVLU8pBS69Ty33w1ils3s3gFKrsaDi5+2NYN2+AVwEyWZeCETkKCEIjsPso8v3J/Dbq9e/3jzLrS1N3OtNp9OuzpIOpdwq3VV60tNZ4n6d3PMuDHPSBAXOYEyAaeo9RwFLigNTUsIznoBV3sHGbHDrHfw7sPv43u38WKRupflWugzgy+V5S3QzLifbU6/fqcaqsvFYoPzGlrlxKCkHYKqiQL3AxfoE7qZi0VaHt809XPTHcHgBQQUkKqUlXTcTOScKLnlRFSx+1e9HrMC70HrT73sucCt+hCcS6K4UKH1qbbrDJRTLvPWOcWksyuSxVkZpPuGb865juUni0+BRwOPrXXgcSHBR1i4PSWulQSVJpTWlbkMUXr07abVzN1WEJ6w9Ye0Ja/uwVkesIJurlMWsVManDtqcxazX7sKdlQJlL5RomKvVaHeS8teMSgsWs3kATx33evNcGVvH81JpW/du3crcouauGOkX0nUHkLXJI1SCIg8GbC+i61g9I25UKH9HS1Oc+aCWTSlvqfpt/21/p1Ynukejq3kGD0MKrtBCq9bhe6faIHyM4tpdQgwlleZ2duWGhfCMCTXpQeXWYZEazXxeu2sHIRY1/7xvE+bPf4Z+zR2lXS4LqWd3WJQBkMvK1Z47Sn/f/WCZn+F8v2wvTuf9/UfnVeDsOcr2V8+OK+XTUKAdLSqsS1VNYTVUT2+WNdBQ2+wvKpf9jbrkaB6Cv1pxdN9qD8xM+cg3sNlew1WPWb/7YhuiF+eeaRJVFJX0242cwJTbHHAlJ5pqvOMgwRNyJ9J43ga3FfsQeuBrc3N50XX5HEDX7jITbvNq3E1U0RbhF3/HQo17BXLZa6YwvZPBxy+fTgedD+cnZ5+uzjovuv2uvQuXZscCBcoVO8JJd+d7xTP/7vB80/35cr99evI46smjAbmlO9srBXLpst4v8bwh5BHbRciszWH3SBBodcTmcxfUL1rUtfvsrWDx6GbJwq5VRywnTAMY2DcHP3YSlq0zdOY4cVH50sdmfaqO2hGDJKHS3it7s7LHXHy+GjrOap52CpW6MRqn7tkHpyxm1+yaOVyXwT/3/uO+z5lAOak8ulnQ637+A/PjmqM= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query decision definitions (alpha)

- - - -Search for decision definitions based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -The Decision Definition Search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The Decision Definition Search Query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-decision-instances-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-decision-instances-alpha.api.mdx deleted file mode 100644 index dcd42465390..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-decision-instances-alpha.api.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: query-decision-instances-alpha -title: "Query decision instances (alpha)" -description: "Search for decision instances based on given criteria." -sidebar_label: "Query decision instances (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWW1v2zYQ/isE96XF5Jd2bdfpmxs7m9c0yWKnHWAHBS2dLLYUqZKUHcPQfx9ISrZsy29D0E8JECSSeHe8l+fuyFtiTaYK+yPchYAqKjiiXGnCA8APHg5BBZKmmgqOfTwAIoMYRUKicHu1QhOiIESCoymdAUeBpBokJc0xH3Pf97nQMObDmCoEPEwF5RpRhQhHhKUxQREQnUlAhIeIC42AkwlzDC9IkvGQoIBlSoNUSGQaiQjpGNBEPDbHfABgn0bl0vforjcYos5tH4kZyBmF+cOLFkmpamghmGoFbmGDpLQhQemdF42SrpmEv/zIQC7Mp5djbtSPMqljMGbQhDLVtBqOOfawhB8ZKP1BhAvsL+0jlRBiPyJMgYcDwTVwbb6RNGU0IMa4rW/KWHiJVRBDQuxXxm4i7I+WWC9SwD4Wk28QaOzhVIoUpKagLIWQlluxikhJFtjDVEOizuETUWBhhZHSkvIpzj0sZAhy94uJjohkTBupKsB57lXUHRUMHzysqWawCp9/jCkHQuo7ZymcPxjKlEzhrO1KkVT2RLmGKUjs4UjIhGj36rfXZv+MJlSfuFbZPXYivaHxjlE395avCD9AJCScQ5nX2ueWTGFtH69GXg3RfoJdX7NCwVPNXeK9X8D9IyyOWvTdG7ydQYYxoO+wKMG7k0Wa1geaaHBCt2ntpwPUHgaeJSb6ep87V/edYa+LPXzZ6V/Zf+6vB7e9i/5lv3j6eH3z5boaot0tNQdGXs+wzD0MM8IyC9dLQlm24ecKJrb3vCZDkaM7rH4qRQBKdSGinBqyp7F1wRaFK75VaU/r11LWhl6ltk+tWMl3j5B+TVKr49jvlgy7n66PMb0myYne5ySBMxh/BqmoKwXHslWdtJkjP9E0QyuhDmdG9i6TNbq6vYv+oH9z/XXY+XDVwx6+6g97d52rr71/b+96A/PpHLhtbqnEmwZOuD7Vf251xY116Mr3Q/3S5sTNorQ3LdQk3dzVP5UKrlzOfN1u11t3Z2fIVQ+kssDgJsoYKjk1bVfx5F3DueVWC01Yf6uI7UVs7uGISqVNmf9MWAbqvGLKyP+lPVROnUHPqad7KbbNs72fI/t8rq6Hqmu32OA25lfah0RDQ9MEjtXb8LA+zzX9uaY/1/SfWtMlKHtiPMXMbu1hYP3cHsEUwM0ic7AzKMuHpXhzdjdgLx1sfoGwiT4JCeV9AyLSAnJGQwgR5VaXsmNAExEuDrYNqRQTBsmvu+3D5u466NatLOQiV74QMRnHLJw46aO7ywv0x5u3vz+8iLVOld9qzefzpoyCBoRUC9kUctqSUWB+zbqXTTSMQQJKyAJNAJEwtJFCGFoXRqRSCGhEA6RFmYHsZoyfnX5HepYCCnvrSCbpTph00P1dH9EQuKbRgvLprujNm4+JyLQ/YYR/x+u4OBaMHaSyJCGyml0rAooynR1vtvbkjL+Gw1vkWKBAhGDv7LS5eysEGSUSymlisP+m3fZwQh7d07t222YW4/ETNOEIHlNGuKuGW+pQjpJ13FrFVteLT+MZIemUbsvdRHERxF2nkUPj2zo0dgwGNUgThyClkEgEQSYlhGgeU7aqgaXs4q7vGWvPWHvG2j6s5R5OQMcixD5OhbKhQ3SMfdwqK19jNURoudKHzX2qNI2PPZpmkmEfLx10cr/VWsZC6dxfpkLqvDUzfpkRSc3AwLrRfHYQK0OHiYCw2InfdaH5UG3qtqYIfxINc7JwvXVx3b5m/b79vl3L1Szdw9HMJZyGLgArSaFka9Bdy9YtPoVxbg6uCoJMUr0YGDJnngkQCbKTGS+sAqOQZ7mbZ7cIe8U/l2W4/P1laD1uEtrdetjReyRJ6uC4vl2uPde2a09d68CsPyu164817b1nkNovplGsoqv+DHBoxaqZb1ebz8q0xM1jRquByppZMUdxw5KH9cjDjTLaq0FFe2sMMVo6P1YHDOZdbhEeCevEAn+74WCwUe4Zt5uvdrF+27cpKxBJknFbt/gUzamOEamEVzF8M8mM0QBMY+svMXcGK5dduS+osBJ61TTQcPgty9WU6jibNAORlDO31d8JE5NWQihvFSJU66Lz6f6622lc9S9614Ne41Wz3dSP7s7GpJOE8Mo+bONdN5x8YYeML7eVX67L9vN084TpZpErNDzqVsoI5SbirXuXRVYf4d2sjsvoNedEl5tHeLk0Jr2XLM/Na7sH7I8e1qncPOUejoGEDgb4uwE2vnAua9ijrlnOMnvntn0xmnslRScIINUH1z5UytTtzWBoEl8xw01EaGgkmZv5LpljH4/xGBtE2zCyOdW+X2JG+DSzuMaOr/n5DwV54SE= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query decision instances (alpha)

- - - -Search for decision instances based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -The decision instance search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The decision instance search query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-decision-requirements-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-decision-requirements-alpha.api.mdx deleted file mode 100644 index d109b49eaa4..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-decision-requirements-alpha.api.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: query-decision-requirements-alpha -title: "Query decision requirements (alpha)" -description: "Search for decision requirements based on given criteria." -sidebar_label: "Query decision requirements (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWN9v2zYQ/lcI7qUFZMntiqHQW5qkQ7Y2zeK0e7D9QEsniy1FqiRlxxD0vw9HSrZiy3FSDBgwNC+OxPvF7+4+8lRTy5aGxlN6AQk3XEmi4XvFNRQgraHzgKZgEs1Ly5WkMZ0A00lOMqVJOqRBFsxASpQkS74CSRLNLWjOwpmcyTiOpbIwk3c5NwRkWiouLeGGMEmYKHNGMmC20kCYTIlUloBkC+ENnrOikikjiaiMBW2IqixRGbE5kIW6D2dyAuCepp3oW3J7ObkjZzdXRK1Arzis5y8iVnIzskoJEyVecMRKPtJg7MGLUacXFukv3yvQG1x6OZOIQFZpmwMiYRkXJnQ7nEkaUIQEjH2n0g2Na9oilNI4Y8JAQBMlLUiLa6wsBU8Y4ht9NQhyTU2SQ8HcqhCfMhpPa2o3JdCYqsVXSCwNaKlVCdpyME5DaWetlWJasw0NKLdQmOfYyTiItGfIWM3lkjYBVToFfbiCBZKxSlj0ahLaNEFvu9PW4DygllsB2wr6C6GcKG1vPVK0maNmyZbwrHC1KnoxcWlhCZoGNFO6YNa/+vU1xi94we0TZY2L8SyzD3Z8AOrD2Jqt4jvIlIbnaDaD+NywJezwCQb8DSgdVzjMtWg3+FS4u5a/7XX8n7A5iepvb+g+kdzlQJgxfCkhJd9gE5B1zpOcsMQawgxhpJL8ewWEpyAtzzhoxzoWqWOQekLMnGQFDBfpvvOLj9cEpTsKOW5zBdpw35inaufRXbZ2TjscgvlqoCv3vdl2Wzw97cOCZNI+xSruwUuTq4tThns1eTGwi/eu5B72/GPyQ3XdeIoxpZLGl+Xr8dhX537Yw0eUb1JiqiQBY7JKkM5a6Mj7Xyfn57KaVZaJqz2uONpZTUAzro1FNv3CRAXmeZwl2I/qPsZaHtDn0NZRjX149uM5EedPEvvfkpgGoyqdwPWTAeuD1WkTvEW0mTueGbJmhpRMG0j/QwJFVnjYeSdps+srp/Xmh6jSXXxJxriANCQflYbuzkuYBlJqteIppITLDljnkyxUukFORc8F2FylNKalMq4pmc1pTKPO8ajvOPKOKd6oNJacY81KCxrT2hNwE0dRnStjm7gulbZNtMLSXTHNcWRwrY7LfrvdFVWohIncR3AIAi70K2RvjvidWVizjUt/2V64d6bfjt+OB62i6BGLOJn4HTqju6NlZza3thw064WfYrhBTjWQVJrbzQTVPDwLYBr0WYWJ2BZx689Zx2cvRIP2n/cdV/zx953LKx6Lt7tx5/KeFaWAh/fLo5Q77shu1ztbohof55Cd9K4Le6OKH4am22lmJ94OMX5Sme/mDT9HjLdTwnhvBpjWHsL+7R7fIQBcZsrh1zbkYSb6u6Lj8NVBOjFbeC4kqigq6S4ccknW3OaE9TLbTr4hxTgTwK6O6w7ATuyDXyFfWqJ+FWJV+tbB3Jo4ipbc5tUiTFTRDbzb34VQi6hgXEatCxOdn338fH1xNvpwdX55PbkcvQrHob33Jzk2c8FkLw7HOkfo5IUb8l/u77/eXbl+fmB42geGtlkt3NuoFIxLrHuX5Lpl1ikdZFbalTHO5J4fp7SuEdXPWjQNvnZh0Hg639EpPjUBzYGlvh/oN+xfeu4TN7rDeFBcVO5Ktn9vboJO4yxJoLSPys57p8XNp8kdkk/7JaVQKepotsavLGxNYzqjM4qt7YrJ8Zp7X1PB5LJyDU69Xfz7B/wspJU= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query decision requirements (alpha)

- - - -Search for decision requirements based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -The decision requirements search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The decision requirements search query failed. More details are provided in the response body. - -
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-flow-node-instances-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-flow-node-instances-alpha.api.mdx deleted file mode 100644 index 341e4fc1455..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-flow-node-instances-alpha.api.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: query-flow-node-instances-alpha -title: "Query flow node instances (alpha)" -description: "Search for flow node instances based on given criteria." -sidebar_label: "Query flow node instances (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWu2P2jga/1cs34dtdRmg3e5ej28ZJrObLWVYEqa3mhmNTPKEeJvYqe0MgxD/+8l2AgFCC93qdKdrpWrw2/Pi5+1nOyusyFzi/h2+zvgCMR4D8plUhEWAHxwcg4wELRTlDPdxAEREKUq4QMlmOq2mSzQjEmLEGZrTJ2AoElSBoKRzz+5Zv99nXME9C1MqEbC44JQpRCUiDJGsSAlKgKhSACIsRowrBIzMMktwQPKSxQRFWSkVCIl4qRBPkEoBzfhz554FAKZ1V099iyZeECJ37CP+BOKJwuLhRZcUVF4ozjPZjezEC1LQCwFSHXRc1Os6efy3TyWIpR56ec+M/qVQKQgUgyI0kx2j4T3DDhbwqQSpLnm8xP2VaVIBMe4nJJPg4IgzBUzpMVIUGY2I3t3un1Jv8QrLKIWc6F+7e399sOFIWnNUDLGDSZbdJLh/t8JqWQDuYz77EyI9UghegFAUpOHBheFfzSJCkCV2MFWQm/FT6SQUsrhBSCpB2RyvHcxFDOJwRDtUQspMaa4ywuu109igu4rgg4MVVRlsPO53vfkBF2pSqbp+0CsLMoezxBU8b8hEmYI5COzghIucKNv142stf0Zzqk6ca83gJmpH44NN3ZVtvVl4CQkXcM7Kdev+jMkctvvjtPBrWXR8waGts0rBk7c744sRj6HOJ+9g2bqju34epoCIlHTOIEYfYemgRUqjFJFISUQkIqhk9FMJiMbAFE0oCJOQlM4rh1mps2eyn99okxWCRyDl+ZJVC7cx+BGWWlweUaIgRop/lSBXkFBGNZ/zRYk3a7+5MH58mIeOsKbx6ZwPsoVURMEhq0B36zR/SEZ7guEOMZJgasETyUqQh/QdDKzMdXZxB6F/62EHD27ej4de6F1hB4fe5L0/cnXjYV2vPVB6WewJ8jX8p6Ng7A38a98wHk9uBl4QYAcH08vHbcu79Ubh425fELqT8NGMYAf7Iy20d+W7ofc4cMPBr+1D4a+Tmw+bocub6ejKnfyx6fBGV5vfgTe59QfeY+gG77CDJ97A8283zWngTerf793R1B3WreqP96/BcBroFb+4offB/cOIctg3difucOgNG11W3Us38K4avYH3+9QbDbzH6+HNB811Ogz9R38UhK7uvby50rMG7nD4aKzqh7p9OQ38kRcEj5PpcCN8MJj443DT0lrXao3ejW4+jIzdN8mqpZ61RV/DJePP558G8RHJ4VzyjOSWiBIAY6LS0wgURKXaKz/CUiJd+A5T1/EgRRIKIkwkz5boh+4PRoCUSJ9FJu02ZJhxngFhB0IEKV9ItEjBwKRjjFJiACCt6CIBWZVADMu6//SsqFMgT7YEaYIMqGj0SKREeSQFKmCEqWN5z44iGrdksUZ9vd4rfNemdO5il2Nz22rz2sIkWXAmbWl93eu1S3gcJcoy0uZPygzVpDoGr56MR0+t++fCMsUVyfw9sNMOuKyREiqk0nDw1qTc80BXRr527edgl93Qc3DX0RX727Mvzxfk/J9HYd8adJ3gUMdQ2LcHXV8jzH8QhQl11YrEdK/O5ay1WOh10JITG9rFRMGFojloRsDir2CjlZTpOXx2a/pfqeEtu9Usx0fKb3vZzYDElM3Ptc53YPr/CUz/i85GewDwG+C9DvITg8YQySRvjryDpUZqheBPNN4J+hpwHsLDXYFqUfcT9IKq9C+m6BNRYn1DudH2LNSoEdEu6vgsVqzxhFnx5gx8WF3qGjIoITSDuIPecwH13SYiAjaGQJQZnWoMiWY8Xn4WSBaCzzLI//6lC04Xje3Mii+ygMZ6s544s9zvJtcD9M83P/3j4UWqVCH73e5iseiIJLqAmCouOlzMuyKJ9H8972UHhSkIQDlZohkgEsemWpIMbaESkgVENKGRzctGYSOMNpjV7wsotsrPR0tTKegBzHLRdOLXGGppqsI+69070xkvVX+WEfYRbx3jS+dBF8kyz4lY1g65y6BKMuWX4fePr1uB4q9hOEaWBIq0b21qacVIK5FTRnOda970eg7OybNt/dzrrTVNbfETNGEInouMMONa++pQhvKt31YHyOot49tYhgs6p/t8OzthXDnxldXIhuNPbeHo6mSnQGg/BCG4QDyKSiF0ikpptsF3Ne/qmv97rH2Pte+xdizW1g7OQaU8xn1ccPMoVhiQjru62uvKd7F5sOzaqxGsX2LEEwhpLitKkeE+XtnQWfe73VXKpVr3VwUXat190nZ5IoLqx0ljRj1sQ6x2nYxHJEst+0MT6gF9q1ertvdi+QtRsCBLs6VF9VC3Jf2297bXfuHHhTpCUb+BWg2tAzaSQk1WR3crWTv5FMJrfZUhISoFVctAL7PbMwMiQLjlzs1lxc9QN6DOTMJO9eO6dpffPoTG4jqhTbYPq94zyQsbjtt3qdabjl77NUPv2KG/bUADva0rV4B8i5grlXYPUc2zZyMMdi6Ct/3bw+S2bwdta4y8h3d7TRzaPMYbn7nbvM5uKVaPsvbl9WH7fmrfRXubV8/e3pvm3cqatvlaqfvWJugTbuxaheShh+hwASGtS/U6rw7Df+ybLBbxPC+ZKWVsbmE6aXhc9fav81tGI9Bgt7/CzO5kPW1oR9Ct5YhedXS02JCuK9icqrScdSKe10/+m7+zjM+6OaGsW7GQ3YH7Xh9PL4b+wBsF3sWrTq+jnu3Fns4wOWENOSoU3fJxxAvzkcPLfe1X21L+/fOKUz6vqKJNwbPqFhmh5iBoDLyqUv0dPkz1uPZf/XGBTdh3eLXSWzoV2Xqtu40MuH/3sM3vuqUPvkBiGwj4ow49PLA2u9DXL6YcZKW5mt2/P1879Qo3iqBQn5370Khd45tA337Mqo9Ich7rNYIs9AcmZIH7+B7fYx3Txo9MojX9K5wRNi9NZGNLV//7NwE5QRA= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query flow node instances (alpha)

- - - -Search for flow node instances based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -The Flow node instance search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The Flow node instance Search Query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-incidents-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-incidents-alpha.api.mdx deleted file mode 100644 index b1dfdf09ad1..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-incidents-alpha.api.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -id: query-incidents-alpha -title: "Query incidents (alpha)" -description: "Search for incidents based on given criteria." -sidebar_label: "Query incidents (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWllz2zgS/iso7EtSSx0+ksnyTZFoD2dsSivJnqmRXSqIbIpISIABQMtalf77FgBSt2N5k5etkatcNnF0NxpfH2z2AisyldgdYZ+FNAKm8KODI5ChoLminGEXD4CIMEExF4iWiySaEAkR4gxN6RMwFAqqQFBSf2APzHVdxhU8sGFCJQIW5ZwyhahEhCGS5glBMRBVCECERYhxhYCRSWoJtklWsIigMC2kAiERLxTiMVIJoAl/rj+wAYB5GlVLP6G+NxiiVs9H/AnEE4XZ47sGyamsKc5T2QjtwhrJaU2AVHsDtWpfPYv+8a0AMddT7x+YPnVcCJWAQBEoQlNZNyd8YNjBAr4VINVnHs2xuzCPVECE3ZikEhwccqa0Tt0FJnme0pBonTa+SK3YBZZhAhkxs2najbE7WmA1zwG7mE++QKiwg3PBcxCKgjQ7uDDUylVECDLHDqYKMvkWOjGFNNogJJWgbIqXDuYiArE/o0ERkyJVmqsM8XLpbBx3VBJ8dLCiKoUVav6tVTngQvWtpvDyUe/MyRTeJK7g2YZMlCmYgsAOjrnIiLJDF+da/pRmVB25VhoZW7HaOvGeUrdlW642foaYC3jLzuVB/fTIFNb6cQ7wO7Dp5Q37d52WBzxW3ZWZ/w7zVzX58RLvOoxhAohISacMIvQV5g6aJTRMEAmVREQiggpGvxWADBMaUxDGuyjtLiredX1BueAhSNmBmDKqqf+ARCUtFK2Iadm0oDykREGEFD9GBP+A3RzJj0bHsvOZVISF8BPOS0tSR50WhOBiaLi9dsYqYCCzB+nFaEZVgog9MkRIgvHcTyQtQNaxg4EVmfYWd8Gg57X9K9/rYAffBb8H3T8C7GC/O75t9Xp+cD32+v1uHzv4t+7ncdAd971h3/cG2MHtbtDxh343WC3x/hz2W+3h+L51c+etRtutmxuvM/ZuvFsvGK6G74JfW0HHzOiRsXfvBUPs4FtvMGhde+OB/5c39v5se17HCFeS6Xhtf7DJdD2g2ba2BLrq9m/HQXc4vureBR38WCn2FqQsPd/3desZlWZ2eWk9dsUEpIl9VumUoYwLKCOTucE45bOAR3AsTPV6xHgEx6BzRfzH4bnB9y0ADQWYGDqk2RF67BAFGoEVCVRtr28KGREFNaUJateuiDqC8kCVpLdEPNoAWu2hf+9p2PnX/dbQIK3vDbo39+bfnhd0/ODaIOcLn/yAmr/wiXXBNEbwTKWSzqaOjbz7WlYCoEdUcqSnIypBOkTv+5wnSrYQJk3Gd/juJUqBRJRND9+8AkaYsrDel8HOagyXqeJ6r7Ob5GwE1MqJXZkYuZ2k7K45FHyXNg+SOWfSxs7zZvOwhCuM2OQBySLUuoqLFFUE6iap/OlJ41uzLcUVSf2dHOZF1Gm3QIVUOsu7N0B/Wy6Vkv917/eyKavQt6RTL+7Yy4525HlFzlNydUquTsnVKbk6JVf/X8nVT8mlDkXLv11ypVOJ7XB9MKWqArBZeXlsGmWKdSgmNIWojm7XBisREcZRP9EIIm3NWvIq1UITHs11vmV4ne3zumOkUAkX9D8QvZyU5YJPUsj+uZ+cbVNroZ5dWQqHbHKgA3bleoyIo/5VG/3r8sMvj+8SpXLpNhqz2awu4rAGEVVc1LmYNkQc6l+97n0dDRMQgDIyRxNAJIpMOCQpWqcdSOYQ0piG9vKNVoww+u5s0vlKRvhC0FqBuxB0D70tdNf3qzRkbqC3y3q7rDjhhXInKWFf8Rorr9lIC8kiy4iYV9jcZlA6neL1VPbi/KD9/Toc9pAlgUJtVatkqmSkD5FRRjPtfC6bTQdn5Nk+fWw2l5qmvvEjTsIQPOcpYQZau8c5EI0q+/5JN8MFndJdvvUtiy5B3LEnqiz1Yh/vV1xMaBTBd95nTqZzMp2/velc7uM94ArFvGCnqHMynZPpvGQ6Hw7lhz5TIDQIJYgnEPY97mRGJzM6mdFBM1o6OAOV8Ai7OOfSQMe8mOLGqtOjYd+zsP7orY1KmgJyIVLs4oW1mKXbaCwSLtXSXeRcqGXjSV/HExFUd3WY29PT1rIqxKQ8JGliue7fnJ5gJIPqRDutHtdEwYzMbVWv7IlYk/7U/NQ8/DLNhXqBom4esSe0uNvwBRVZbdQHydrFxxBe6vKyhLAQVM0HeptVzwSIANEqtqoCJT9DXT/bRdgp/7mqUPLbH0Nz0dqP9dcdKd4zyXJrhesWgK3qc/OlIvChCV0cWEP0UCm1uVXi3KlDbtfoNrC+UVI7MLpDf7tChc+b55e1s2btrDk8++A2P7oXl/XL84u/8KrUtC4GVaWe5mYFZs1xXQHZaIax7TajVb/MennZJmN7YR7XHS22U6W56kNp7nSZjBYWAZv9I3psaVxCzM31lwa7DyRtVSCkRV6zfrbvHHq+8XEhz7KCmUDHplXhbE2v7K3S3i+lIeiyi7vA2t422N7YGXRvOaKzujYqa/lVfJtSlRSTesizqqVq9XeS8kkjI5Q1Shay0W7d3gWdVu3Gb3vBwKud1Zt19Wy/yWj/kxG2IYcpC220nL0zrWPvd8+8WIf3U6vay61qpU9R8KwaeUqoqXyZy1yUTn+0cg4SVxDVHV3WdY/wYqE1eSfS5VIPG9bYHT2uPb1+Wjo4ARJZrOOv2uJw215QzTgGvTwtzIez3a+bS6fa0QpDyNV31z5uBK9ed6A/M0zKPryMR3qPIDPdo0dm2MUP+AFrszWgMS7XjC9wSti0sD7J0tU//wVavBwO -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query incidents (alpha)

- - - -Search for incidents based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -The incident search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The incident search query failed. More details are provided in the response body. - -
- -Unauthorized - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Forbidden - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Not found - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Internal server error - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-process-and-local-variables-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-process-and-local-variables-alpha.api.mdx deleted file mode 100644 index d19cf90fd42..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-process-and-local-variables-alpha.api.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: query-process-and-local-variables-alpha -title: "Query process and local variables (alpha)" -description: "Search for variables based on given criteria." -sidebar_label: "Query process and local variables (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWUtz2zYQ/isY9JJMJUpJnDTlTXHs1m0erq04B0kHiFyKiEGAAUDJGg3/e2cBUqQk+pXJIQd7JuOQ2Af28X0A1xtq2cLQcEKvmOZsLoDOejQGE2meW64kDeklMB2lJFGaLCshQ+bMQEyUJAu+BEkizS1ozoKpnMowDKWyMJXjlBsCMs4Vl5ZwQ5gkTOQpIwkwW2ggTMZEKktAolln8JhlhYwZiURhLGhDVGGJSohNgczVTTCVlwDuaVKLviUXJ5djMjo/I2oJeslhNXs2YDk3fauUMIPIC/ZZzvsajD140a/1giz+7XsBeo1Lz6cSo04KbVPQJAbLuDCBi3AqaY9q+F6Ase9UvKbhxj1yDTENEyYM9GikpAVpcY3lueARw5wOvhlM7IaaKIWMuVUhPic0nGyoXedAQ6rm3yCytEdzrXLQloNxGko7a5UU05qtaY9yC5l5jJ2Eg4hbhozVXC5o2aNKx6APV7ApElYIi15NRMuy1wp3Uhmc9ajlVsC2a/7DVF4qbS98pmg5Q82cLeBR29Uqa+2JSwsL0LRHE6UzZv2rVy9x/4Jn3D5Q1rg9jhK7E/FBUnf3Vm4V30GiNDxGs+zMzzlbQJOfXoe/DqW2wi5eayQTv0vi2plUrRrQffuHrSGqfNxi1gs83GBNGv/C+tDqOAVyDWvHLhbpopZu2e0u4ZsjLKFkGRxa/cQyqDmjw2DT7Usmig79sdMTxcOMmEjld0dXWXGC/uHxkeZaRWDMmTSWyehB/ioVwiudH3VtQTJpz+IOh26F8Pgu002muBnrQkbMQoetryk4mrXb5HNDbC1PlMaTomV2rpQAJncAUjfpqevRLUY6JLqgVHpWM7mSxvfuy+Hwtu7YBZkpIkx1UghSGwjcEfHTj4DHcqdVlomzPUa6q9oJ18YiZ19hEczjmFGwH9W9ixt9Qh9Dji2Nh7FjVbN72Wx//wdx3e2udkRQ/hfnzvvQfAt5XjXEeZ+FpBDiqtvKaSFEm4PvM/VEwr8cCSPp7AK7k3xrqDrJo9sItzC4K2aud4GbMC4gDshHpaG+oBOmXdqXPIaYcOmC2QJvruL1ndScazUXkP1+SNG7uxqRcy9Z+SUew4QZ4gXn3vvk4vSY/Hn0+o/Zs9Ta3ISDwWq1CnQS9SHmVulA6cVAJxH+Q7nnARmnoIFkbE3mQFgcc/TJBGnYgZgcIp7wiFhV95nbDBbGx3fPueBWD+/5284rNKf77DkiXy7OCI9BWp6suVwcut79VJirwoZzweQ1bfqg6+Ni14spsozpNoZaDhDtltni/gPt1csD29hLf4/H58SbIJGKoWHQyhEGkXHJsyKj4dFw2KMZu/FPb4bDEm1ixR8QiSRwkwsmXWvth8MlyZq+dYHVLPGTKqM0X/B9v8EOWqsmfu8j8ih83YXCkSSYZY19CForTVQUFVpDTFYpF1umq33XHwhPWHvC2hPWbsFa2aMZ2FTFNKS5Mq51mE1pSAfbYdvAn3gU5w56Cdq4W3+hBQ3pxiOmDAeDTaqMLcNNrrQtB0ssx9YEhojLHll1xwgVMZF6r4eVwwXZ+pDem7b9xSys2NplMq/GUo3pt8O3w06rKHqLRZzf+Qh937W4oDaLoO4064UfYrjEbwIDUaG5XV+imk/PHJgGPSow+dt+qPw56+6644Ror/rPad0l/3wdu0Ijj100Q8GTG5blHoXNWGXnTj+sr+FN61XX6uZFc7Uddt9Ah+3bYaO3c9GzugBMqqvUZDsBbKSrwZ+f7s2aGZ2fvQ23k7Xh3txssvEJbU/E8F3pEJYol82q/w/rgvGCNr6Qw+DFIdbOzxxlRCrLCunODbkgK25Twlp1rqbFSCaCR4AXynBTp7YW++BXyJX3SF4E2KMeSPVxseA2LeZBpLJ6SLz9PRdqPsgYl4PKhRkcjz5++fR+1P9wdnzy6fKk/yIYBvbGf5cinDMmW/twF97tBwHOvh0CW2P1Z248/nw/C5vm/Hwax98+jq9Aa+HGDnLBuEQaceXdVKw6aVFi3bQ4tfbcOKGbDWbyixZlia+daxpOZg2V4lPZoymw2Hc/vUYI0mNfoP4Y99CA+GDmU/ZqjVEUQW7vlJ21Tofzz5djJJ7qbw2ZilFHsxX+HYKtaEindEoRyK5pHKe59xsqmFwUDs7U28Wf/wG7EQEd -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

- Query process and local variables (alpha) -

- - - -Search for variables based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
    - -Variable filter request. - -
- -The variable search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The user task search query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-process-instances-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-process-instances-alpha.api.mdx deleted file mode 100644 index 5e6cfd15811..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-process-instances-alpha.api.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: query-process-instances-alpha -title: "Query process instances (alpha)" -description: "Search for process instances based on given criteria." -sidebar_label: "Query process instances (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWltv2zgW/isE92FarGwradrt6s2TuLuZbVNv4naAdfJAS0cWJxSpIak4hqH/vjik5KvsOME8OkAQWzoXHp7bR54sqGVTQ6MxHWoVgzGES2OZjIE+BDQBE2teWK4kjegdMB1nJFWaFFvEhkyYgYQoSab8CSSJNbegOevey3sZRZFUFu7lKOOGgEwKxaUl3BAmCRNFxkgKzJYaCJMJkcoSkGwivMBLlpcyYSQWpbGgDVGlJSolNgMyUc/de3kH4L6NG9LP5HZwNyL94TVRT6CfOMwe3vVYwU3HKiVML/aEHVbwjgZjdx50Gr5unvztzxL0HF+9v5dofVpqm4EmCVjGhek6C+8lDaiGP0sw9leVzGm0cF+5hoRGKRMGAhoraUFafMeKQvCY4d72/jC4wQtq4gxyhp82d37bN8R4V9TqujSgTIjvKY3GC2rnBdCIqskfEFsa0EKrArTlYJwKpZ36moppzeY0oNxC7t4fKyflIJI1QcZqLqe0CqjSCejdNxhNKSuFRa0mplUVrO3PuBb4EFDLrYBluP0X9/5OaXvrbaXVA3IWbAqvWq5W+dqauLQwBU0DmiqdM+sffTjH9Quec3skrfdDP7UbFu9s6ubaqiXjr5AqDa/hrFr3Z8imsNqfoEVfC9N+hl1fi9rA1XYfF6CeE+PzsIa6nlzX7P+BeasDNpWOMiCPMPe1gJudotTdctmnC3RZTXUFKZccJV23hHGbqkZ8suQkPOm2irxhObxZqGQ5tIv9CdpwXymO2JoW0U9ewPbG+Fjep27Epm+2pVZILJu2m3S8p1ukP8K83cdaKTt8a0gh804s7ddVMA3y7do8+6v1fRFqdqMSeLPCVKgZkSqBI1RaDTBkNjsyDJjNMCcfYW6IVe25GRADBdPMQkImc/JL7xcXH8Yyba+YPTJ7HDlJmN3MdXzQsTwHFAkyOV4gyOQFccbWwlpXYyEgSgKa378cXf8cBOTy+7fh18FocBWQy/7N5eDr4AqlgyxzbHuejAZ0SYefa8L1drgVYXeobIBCqoBmzFzLmCc1tKjtnCglgMkdQ3/PwMGXVseQjBnCiAbhfMNrsURpxGXOSRYkk/bYsumpfams9przxTWKzR6/z/KWFlZ5NGEKJY3vKOdh2O6lHYPrVmXKGJ+npSCNIPTTX47YVqKPxS6vxTpWWSautxBEO4rx+Z1ybSxirJ9MlGBeh2QEeyvvISzjd+k1YGYvx/b2bK9nZ52v8yRBvhPCOSGcE8I5IZwTwjkhnD3mYEPebHqHcE3TzRzDxWuxjLuwIinjApIu+aY0NHdVhGnH9sQTZ7u7N1u2solK5l13kbUH9hRaTQTkf38J/vTJ0FPWeonvioRhdUPCidc+vv1ySf558fEfD+8yawsT9Xqz2ayr07gDCbdKd5We9nQa4y/Sve+SUQYaSM7mZAKEJYmrlEyQVb8lpoCYpzz2yekMdotBX3r7XkBQ7u2u75cJU2q+Ewp98uP2mrhY4umcy+mu6s1LsIkqbTQRTD7SVVS8FHB9Yso8Z3reXIFuKqjzt3wZ+n04bw3mf49GQ+JFkBiLJ154NnmEitCInEueY3ZfhGFAc/bsv30KwwplosePsEQSeC4Eky60ts3hkuSruHWGLW+m/xrPKM2nfFvvTgrjwytvkU/Gj23J2JcEd1ljHILWShMVx6XWkJBZxsUyUxvdaxe3p0w7Zdop03YzrQpoDjZTCY1ooYwLHYfQaK9Ops5y+tTzfY/ivbpGRO5OyaUWNKILnzlV1OstMmVsFS0KpW3Ve0K3PDHNcdLkvIivfYY1kSNUzETmte96EF/gWaaxbGv89C9mYcbm/lhQj11Woj+Hn8N2dKm03SMRB1reQh9/azWhEYvJ3Y4kHfExgis8QxuIS83t/A7Z/PZMgGnQ/XIDJtf6nHQHyRwRDeoPX5po+e33kXM41rPb1ZRs8MzywmfjasrQdlYO95xrV4G554x6gGB52mwTvn42PCCjWdy+A1l46PwUHjzshOvnktUS1k4Q9Dw8v+ichZ2zcHT2MQo/RR8uuhdnH/9H104Fh4g2YLXVJaxj37WZnp8ajpdjv9Vq6mmfH+k9rAZzfuAWLsdp4dawbLzwUbY+BsNnlSs/qXIhVheH3WDFzG2cR8Pu2W4hGl67ehqrPC+la6pySmbcZoStBX89U8ZKK3gMCLmjBZU+cBqyr/4NqSOCnHUxcX11aXrplNusnHRjlTej5OXfiVCTXs647NUqTO+y/+3HzVW/8/X6cnBzN+icdcOuffaXW1jrcibX1uGOBC0j93dudP5+2/bFClKcRvYvj+zrOmbh2fYKwbjEeHfOXdQNZ0x3Gg5tQhfPr75tjOligTv6Q4uqwsduCTQaP6y6DH7DwyywxOcAfcREp5feYZ0RrgXJReluJrcvnKug4ejHMRT2IO3DWgMdfr8bYU2u/y8hVwnyaDbD/1lgMxrRe3pPMZ1dELly754vqGByWrqkpl4u/vwfiB/isw== -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query process instances (alpha)

- - - -Search for process instances based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
    - -Process instance search filter. - -
- -The process instance search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The process instance search query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/query-user-tasks-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/query-user-tasks-alpha.api.mdx deleted file mode 100644 index 834df9cb457..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/query-user-tasks-alpha.api.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -id: query-user-tasks-alpha -title: "Query user tasks (alpha)" -description: "Search for user tasks based on given criteria." -sidebar_label: "Query user tasks (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWVlz2zYQ/isY9CWZ6qBzNeWbYudwm8O15fRB1gNELkUkIMAAoGWNhv+9swApUhItS53MtA/OjCcisbvAXt9ilytq2dzQcEKvDWhimflOpz0ag4k0zy1Xkob0CpiOUpIoTYqaypAZMxATJcmc34IkkeYWNGeDG3kjwzCUysKNHKfcEJBxrri0hBvCJGEiTxlJgNlCA2EyJlJZApLNhBd4yrJCxoxEojAWtCGqsEQlxKZAZupucCOvANzTpCZ9TS7fXo3J6OKcqFvQtxwW0ydDlnPTt0oJM4w8YZ/lvK/B2J0X/ZpvkMW//ChAL3Hp6Y1EtZNC2xQ0icEyLszAaXgjaY9q+FGAsW9UvKThyj1yDTENEyYM9GikpAVpcY3lueARQ6MOvxm07IqaKIWMuVUhviQ0nKyoXeZAQ6pm3yCytEdzrXLQloNxHEo7aRUV05otaY9yC5k5Rk7CQcQtQcZqLue07FGlY9C7KxgVCSuExV1NRMuy11J3Ugmc9qjlVsA6bP5CU14pbS+9pWg5Rc6czeGo42qVtc7EpYU5aNqjidIZs/7V82d4fsEzbg+kNe6Mo8RuaLxj1M2zlWvGN5AoDcdwlp32uWBzaOzT69ivg6nNsJmw61wm/pjExTOpYnVAtzfYjQ1RGeQ+uZ7icImIG2Nmvv8Jywc98+qF84xlFjrjkxnD5xK6F0FABtKed4d2xGTMY2bhvVZFvp8Ele2kyLWKwJgzSLjkaJnDlapYz6WxTEZwOKMFyVArc9iJ7lH/lmmOKGsOj9htR0qWdRv+lomia6UVu9dVEHytjvHOBdE6jLtIt0l2KboSovTYZHIljT/2syDYDedxCk1Bq1PFFBHaMikEqSUMHNL/dCQ/FgKtskycb7lpX9AkXBuL0PsVfXOE09F8gv1b3n0Q5w16DMa1OA4EucppD2LStgJHZsL/BNLq1aMh5V4svM8OrYy+DyQPYD0Uq/4LlMU3RxhQg4OAs06vtjjQUH3LM3BcKssFHM+XKCHU4jo/jisu4DiG/YUG7ixoycQ7pbNLSECDjLqDdsd9X0Eb7qHygHtZVBirsg/AYtCducji2Ill4mITJDsCjivNbbdXdwtCTY4NB2vKw4DgYsrn2AZg6+HqnfvVelkzI/hkXPKsyGgY9GjG7vzvkyBo3aNfBl0VDSG+uxp2AqOjfHFwffMomTAuIB6QT0pD3dUQplEDdctjiAmXTqMaTslMxcu9hTDXaiYg+3W3IG6eakQuPGW1L/E+JcwQTzjzu08u352S31+8/G36JLU2N+FwuFgsBjqJ+hBzq/RA6flQJxH+Id1T5yMNJGNLMgPShAhpkJuYHCKe8IhYVbnMHwZDw+v3QBV2q3vSqdB8J7BG5PrynPAYpOXJksv57tab/dVMFTacCSa/0yYOujqyzV1MkWVML+tmeXODqgIVD18fnj/rTI0P4/EF8SJIpGJwEwGLHX610UbYvwjagf8qCBCOnMcP0EQSuMsFky60ttXhkmRN3DrFeIX1P8kzSvM53953sHFJqYL4zGvks/BlVxaOJEErI2wS0FppoqKo0Bpiski5cOIRKuu9657qMdcec+0x1+7JtbJHM7CpimlIc2Vc6DCb0pAOseL13Yhy6EsexWmNvnUXicmKFlrQkK58ypThcLhKlbFluMqVtuXwFv2x0Sjjsk+tOmSEiphI/ba7rsMFbJNrlbZmlO+ZhQVbOlPm1TCvEf06eB1030uUtvdIxKmn19AHXgsMarGY1Z1iPfEhgktswQxEBV5wrpDNm2cGTIMeFWj9dUBU+znp+OyJaK/68a4Okz/+HjtPI5BdNqPUt3cMb8mbs6iNhitYt1NN8DVdVPOu1Tw1L7dbn44V39g0C93tSNDdbAQb9+g9QjaP1Yq6ST1paa+6Acv6ajtFV7v4maynuQ11NcT1k9ppM2/1c9RgPSUNtmagk5V3c3u6ie9Kl/iJcj6u0nI3WvCY9T2fBoOTXQi4OHdIFqksK6QrZ3JOFtymhLWir5r8I8YJHgHec9ezp/W2H/0KqToLcjLAzPHpXVexObdpMRtEKqsH/uv/Z0LNhhnjclhtYYano0/Xn89G/Y/np28/X73tnwyCgb3zwwlEmYzJ1jncPbz9ReSJ+7LxdFvpVVPFHz+l7PmUUkGHhTs7zAXjEsHMuXNVgfuENuBO6yjFTw4eoid0tUJTXmtRlvja7U3DybTJLXwqezR1naVLne+YsfTUu6g/xkM0ybYz6St7NccoiiC3e2mnrSp18eVqjPhXfSjKVIw8mi3wIxJb0JDe0BuKmevCxkGre7+igsl54fKXern47x+WW52c -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Query user tasks (alpha)

- - - -Search for user tasks based on given criteria. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
    - -User task filter request. - -
    variables object[]
  • Array [
  • ]
- -The user task search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • customHeaders object
  • ]
- -The user task search query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/remove-group-from-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/remove-group-from-tenant.api.mdx new file mode 100644 index 00000000000..73e4cd9c8ec --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/remove-group-from-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: remove-group-from-tenant +title: "Remove a group from a tenant" +description: "Removes a single group from a specified tenant without deleting the group." +sidebar_label: "Remove a group from a tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/Ss7OCVTRlQSJ0150zh26jbxeBy5Pbg+gORKRAICDLC0rOHwv3cWIGXZUtJ0xkcdNBTB/d73QGI7QXLpRXYt5mikIXGTCNugk6SsOStFJhzW9hY/ONs2p87Wg1giSvSFUw3LiUxcBikPErwyS42wZAVYOFvzWoOFWigsgYI6rBRVtiUoUSMpswSqBpWJSEQjnayR0HFgnTCyRpGJqPonrkUiFPtsJFUiEQ6/tcphKTJyLT4ObF4htEZ9axFUiYY4DAd2ETxGk+zSFxXWUmSdoHXD3pQhXKITiVhYV0uKS2+PRN8nm5BCxE8a0aYG/yegG3bpG2s8etZ4NX3Fl123sSkr6cG3RYHeL1qt1xBbXMZubdelT8TRdLrfVuPsrSqxhFKSBOXBWIJbqVXJ4RfWEBpiVdk0WhUBUGnjbK6x/uWLZzvdVpYPHczgIkpCiSSVBpt/wYJAeoiCOZagDFxfnh7Db0dvfr15VhE1PkvT1Wo1cYviBZaKrJtYt0zdouAfyz2fwLxCh1DLNeQIsiwV+5SaE2rQkUI/4rUAsqEeQ9jArZj8Y0QyNiWGxZDdKN+3bNM6T06Z5XbnWqd2KDSDq8uzERLrkRQPXAedhWw125C5bSnLtTRfuVGkSO91+tiLb+tauvWIuIcO+kR4ktT6/0Te61c7thkWv8/nFxBNQGFLhIV1QJXyoyNOolZG1W0tsqPpNBG1vIt3b6fTnm1yx38iEwN412hpArQep6MM1NbhgJ+QmDKepCmeqjPWqaV67HfC+8PYCzGA+H3MqO/7QKjXu3g/tS5XZYkmwBN4A0FPI6uk1naFB14deHXg1Q94dbSL93NLsLCtKSOvhq8P67behGYjcqDXgV4Hen2HXm/2fQfODHCVHeMQnbMObFG0zmEJq0rpYJ4/M0ffw3stYvHAtQPXDlzb5VqfiBqpsnz4DudjDEdiqkQm0vgG82m3OQ33aXiX+bQbT6M9Hx/R3Y7n59ZpkYkuEqnP0rSrrKc+6xrrqE9vuUu30imZ64hNfhwJNwJJ20LqsLyvofyAT8Rjoseybk0p4R1cnnyewwdJuJLrUGB2+dD0u+m76V6rLPodi7OLM4gZRjhubRGjWeb6XrNR+GcMh4O1x6J1itafWS2WJ0fp0M1a7sgGJoO/YJ3vo5BIhj+nI3j++Hse+q/Mwgb1AQe7gXBX0PkY+XTychdzF2eBOoWt69aE/dMsw2gF5FZihW49cUKJ0KpA4wO6hwnGKPYxPoG/okd4OeGmROSM2+ZSUdXmk8LWaRHVNtdc2zytpTLp4MKnx7NPV+fvZy8+nh2fnH8+efFyMp3QHYXkG+uplmYrjjg/AvlwbkR7h03d/avjaedOQysJ7yhttFSGwRVq0A0EvB6mUF4kItseSEUO8upmJnSTDES6Fl2XS49XTvc9L39r0a1Fdn1zz7tA1FJ5/l+KbCG1fzw42k772eUwYnoOPzng2pvbsCjNOmwBuuU7kYivuH4wcOOR19MH9+O6/zi2TZX7mz4RFcoSXahhfDwrCmxoS/G7XxjM8c12+/7k48n8RPT9v0pURKI= +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Remove a group from a tenant

+ + + +Removes a single group from a specified tenant without deleting the group. + +## Request + +

Path Parameters

+ +The group was successfully removed from the tenant. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found. The tenant or group was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/remove-mapping-rule-from-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/remove-mapping-rule-from-tenant.api.mdx new file mode 100644 index 00000000000..c800f404151 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/remove-mapping-rule-from-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: remove-mapping-rule-from-tenant +title: "Remove a mapping rule from a tenant" +description: "Removes a single mapping rule from a specified tenant without deleting the rule." +sidebar_label: "Remove a mapping rule from a tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/is7OCVTWlQSJ0150zh26zbJeBylPbg+gMRKRAICDB6WNRz+984CpCxZSpt20psOGorkvvf7AGI75vnSseKGzVFz7dltxkyLlntp9KVgBbPYmDt8x9tW6uV1UHhhTTMIZ0ygq6xsSZoV7DrKOuDgpF4qhCapgQ0KYWFNQ69arORCogAfrcBK+toEDwIVepL2NUaNCctYyy1v0KOlIDumeYOsYEnzN1yzjEny3HJfs4xZ/BKkRcEKbwM+Dm9eIwQtvwQEKVB7isKCWUSHySS5dFWNDWdFx/y6JW9Se1yiZRlbGNtwnx69OmV9n21CGlL9rjFtl+/fRnZLnl1rtENHGs+nz+my732nRyvuwIWqQucWQak1pPaL1LztOvUZO51OD5tsrbmTAgUI7jlIB9p4uONKCsqiMtqj9qTK21bJKoItb60pFTY/fHJkp9tKdtfBDK6SJAj0XCow5SesPHAHSbBEAVLDzfXFGfx0+vLH2ye1960r8ny1Wk3sojpBIb2xE2OXuV1U9CO5pxOY12ipImsoEbgQknxyRQm1aL1EN8K3Am9iPYawgToy+VOzbOxNCosgvFF+6Nymg85bqZfbDQxW7hFrBh+vL0eArEeO7LiOOgseFNngpQm+KBXXn6lRXnp10OljLy40DbfrEX+7DvqMOc99cP8IwBfP92wTLH6Zz68gmYDKCISFseBr6UZHlEQjtWxCw4rT6TRjDb9Pd6+m055sUse/IRMNeN8qriO0HqcjNTTG4oCfmJjUznNdfa/OGCuX8rHfCa0XYy/YAOI3KaO+7yOhXuzj/cLYUgqBOsITaDlB50dWcaXMCo+8OvLqyKu/4dXpPt7fGw8LE7RIvBo+Rozd3xD1RvLIsiPLjiz7CsteHvocnGmgKlvCIVprLJiqCtaigFUtVTRPX5ub7/60vSUsHrl25NqRa/tc6zPWoK8Nnc/jqRnjSdnXrGB52shc3m0OyX0+bGkntKW5vHs4q/Z0qkR7N56vg1WsYF1iVF/keVcb5/uia431fX5H7brjVvJSJZDS68S8EVHKVFzFx4c6Sy/oxDxmfMaboAWH13B9/mEOP3OPK76OlSaXu6ZfT19PD1ol0a9YnF1dQsow4XJrrRjNEukPmk3C32I4nrcdVsFKv/5Aaqk8JXKLdhaoNRu8DP6idbpPQiwb/lyMKPr1j3kEgtQLE9UHQOwHQl1B61Lk08mzffBdXUYOVaZpgo4LqV7GyQvwrcQqFZynhDKmZIXaRZgPE45R7G16A78nj/BsQk1JyBnXz6X0dSgnlWnyKqltrqUyZd5wqfPBhcvPZu8+vn8zO3l7eXb+/sP5ybPJdOLvfUy+Nc43XG/FkaZMwA9Ol/zByVT3sJX8H0Oqoa8e733eKi41IS0WpBtoeTOMrBzLWLE9vdphJr3cmiPdZgO9bljXldzhR6v6nh5/CWjXrLi5fWBjpK+Qjv4LViy4co+HTdtVeHI9jKWewjeOxQ4mOTzkeh0XBhXojmXsM653xnQ0KPv+wT2ej/2HELfK3d/2GauRC7SxmElgVlXY+i3Vr36J0BKwWZbfnL89n5+zvv8LKRtgdA== +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Remove a mapping rule from a tenant

+ + + +Removes a single mapping rule from a specified tenant without deleting the rule. + +## Request + +

Path Parameters

+ +The mapping rule was successfully removed from the tenant. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found. The tenant or mapping rule was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/remove-user-from-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/remove-user-from-tenant.api.mdx new file mode 100644 index 00000000000..a03636c960b --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/remove-user-from-tenant.api.mdx @@ -0,0 +1,59 @@ +--- +id: remove-user-from-tenant +title: "Remove a user from a tenant" +description: "Removes a single user from a specified tenant without deleting the user." +sidebar_label: "Remove a user from a tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/Ss7OCVTRlQSJ0150zh26zbNeBy5Pbg+gORKRAICDLC0rOHwv3cWIGXZUlx3xkcdNBTB/d73QGI7QXLpRXYl5mikIXGdCNugk6SsOStFJhzW9gYvPbpTZ+tBKhEl+sKphsVEJi6CkAcJXpmlRmg9Olg4W/NSg4VaKCyBgjasFFW2JShRIymzBKqixkQkopFO1kjoOKpOGFmjyETU/APXIhGKPTaSKpEIh99b5bAUGbkWH4Y1Z7tGfW8RVImGOAoHdhEcRpPs0hcV1lJknaB1w96UIVyiE4lYWFdLikvvj0TfJ5uQOOBnDWiswP8J55o9+sYaj5413kzf8GWPV27ISnrwbVGg94tW6zXE5paxU9tF6RNxNJ3uN9U4e6NKLKGUJEF5MJbgRmpVcvSFNYSGWFU2jVZFgFLaOJtrrH/66tlOt5XkfQczOI+SUCJJpcHmX7EgkB6iYI4lKANXF6fH8MvRu5+vX1REjc/SdLVaTdyieIWlIusm1i1Ttyj4x3IvJzCv0CHUcg05gixLxT6l5oQadKTQj1gtgGyoxxA2cCcm/xiRjD2JYTFeN8p3Hdt0zpNTZrnduNapHfbM4PLibATEeiTEPddBZyFbzTZkblvKci3NN24UKdJ7nT704tu6lm494u2+gz4RniS1/j+B9/bNjm2GxW/z+TlEE1DYEmFhHVCl/OiIk6iVUXVbi+xoOk1ELW/j3fvptGeb3PEnZGIAbxstTYDWw3SUgdo6HPATElPGkzTFc3XGOrVUD/1OeHMYeyEGEH+MGfV9Hwj1dhfvp9blqizRBHgCbx/oaWSV1Nqu8MCrA68OvHqEV0e7eP9sCRa2NWXk1fDlYd3di9BsJA7sOrDrwK4fsOvdvs/AmQGusmMconPWgS2K1jksYVUpHczzV+boe3itRSweuHbg2oFru1zrE1EjVZZP3eFojOE4TJXIRBpfYD7tNifhPuVXmU+74SDa89ER3c14cm6dFpnoIo36LE27ynrqs66xjvr0hnt0I52SuY7I5MeRbiOMtC2kDsv72skP+Cw8pnks69aUEj7AxcmXOfwqCVdyHcrLLu+b/jD9MN1rlUV/YHF2fgYxwwjGrQ1iNMtM32s2Cj/FcDhUeyxap2j9hdVieXKUDt2s5X5sQDL4C9b5PgqJZPhzOkLn97/nofvKLGxQH1CwGwh3BZ2PkU8nr3cRd34WiFPYum5N2D3NMsxUQG4lVujWUxwoaFWg8QHbw+xiFPsUn8Bf0SO8nnBTInLGTXOpqGrzSWHrtIhqm2uubZ7WUpl0cOHT49mfl58/zl59Ojs++fzl5NXryXRCtxSSb6ynWpqtOOLcCOS9eRHtnTF1d++N5xw3DX0kvKW00VIZRlYoQDdw72oYPnmRiGx7DhXox4vjJOg6GTh0Jboulx4vne57Xv7eoluL7Or6jnKBo6Xy/L8U2UJq/3BctJ3yi4thsPQSnjjV2pvZsCjNOrBft3wnEvEN1/embDznev7gHi3646GNNe6v+0RUKEt0oYLx6awosKEtvR9+WDC5N7vsx5NPJ/MT0ff/AuVcPcI= +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Remove a user from a tenant

+ + + +Removes a single user from a specified tenant without deleting the user. + +## Request + +

Path Parameters

+ +The user was successfully removed from the tenant. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found. The tenant or user was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/report-error-for-job.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/report-error-for-job.api.mdx deleted file mode 100644 index 81fb73addfc..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/report-error-for-job.api.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: report-error-for-job -title: "Report error for job" -description: "Reports a business error (i.e. non-technical) that occurs while processing a job." -sidebar_label: "Report error for job" -hide_title: true -hide_table_of_contents: true -api: eJztWE1z2zYQ/Ss7OCVTWlRS54s31XFap3HikZX24OgAgisRNgkwAChZw+F/7yxA6ttpDu1NmtFIBBa72N33QPI1zPG5Zckd+6hTNo1YhlYYWTmpFUvYGCttnAUOaW2lQmsBjdEGnskBDkBpdeZQ5EoKXjwHl3MHWojaWFjmskCojBZorVRz4HCv08E3xSJWccNLdGgocMMUL5El7F6nf+KKRUxS5Iq7nEXM4PdaGsxY4kyN+9ub5AgPuAI9A5ejD8AiZkWOJWdJw9yqIs9SOZyjYRGbaVNyF4Zen7O2nYYQaN1vOlvRmv2IQiuHytEUr6pCCk7B43tLO2gOg+n0HoWjLI2u0DiJlmZ92S50hnRxmEWoqtAZhiouZVFAilByJ3LMYCldDlz1djQKuEDlQkW72NYZqeasjUK4a7SWz49EHPWOymARYlZGL2SGFniWSbLkBfjsH49GiZiqi4KnBYZStRFbcCNpwGe88XKzVYljbfx4++UzhLptZS+VdVw5yR367q6dA3d+oNCCF2CFrrBHwEF5gj8/gNabuNzoZZf/Tlrrxu2l1W6j8G6rkdOIOenIjshzSePjgCXWtmGZrbSyoRwvh+c/aIS0/cZm2mzA3EbsfDg8DpmuXRlk3HFyoLSDBS9kFtJ6AriV0WmB5S+HAN7bGtwES8jQcVn0DeIWgmGKGUgFd+MPF/Du/NWb6bPcucomcbxcLgdmJs4wk06bgTbz2MwEfcnu+QAmORrC9oogvoW2DWXAVijkTApw2pej2zZQs463bZdvYbY5wOz6BKiNZPtAHMHX8RXIDJWTsxWdWgeh/ZoZrwvywVNduyQtuHqgVnVoOAy6H8XWZcnN+tzaDdBGzDruavuvJ9ivLw98EzD+mExuILgIJ0qAlLR9IEqilEqWdcmS8+EwYiV/DFevh8OWfFLHfyITBfhYFVx5aO2nIxWU2mCHH59Y4LT4rzqjjZzL/bgD1m56wToQvw8ZBV6eH6PiJHAunLTkbi4XqCDclXp6zXStTvQ60etErx/S691P0+shcEsqP7Y0Ws19bRFEbQwqV6wGcL1J0wI3W7e+bl1/o4VUZ6sTPU/0PNHzaXq+OvZAOVJAVTaEw/BE6t/jDL167L/JBcb5B90T105cO3HtKa61ESvR5TojPUNbDx2SNRIW3+vUxk14uGxjzzhSLtAselWkNgVLWBNI0yZx3OTaujZpSJFp4wV1ZOd9m6YDuXrQ+BfkPAQ+bB5NkPTSJ3XBy1plHN7C+PJ2Ar9zh0u+8sWkkLuu3w7fDo96JdMnPI5uriBkGKC3dRz0bonXR90G459x7DUdi6I20q1uaVkoT4rcoBnVVP81JLp43jtdByMWdX8+9ED5+PfE95qOsvFGL7p85GUViLil72yAtqvCbMa3+9Z6sM6031UHpcP8aBEaGwoyHLw4hO3NlWef0GVZK38Eq3knG23VSxS1dVSniBVSoLJ+850C15t9CjPwV4gILwbU6wDI/uSdS5fX6UDoMhZh2fo3LXQal1yquAth44vR9dfP70dnn64uLj/fXp69GAwH7tH5mhIzSq629hFUx+4+REnd63Q/4WZz1/mfVMoOIiR/xVXBpSLQ+iI0HY3vSLO0LGLJWrsMTJ5GHRvvWNOk3OJXU7QtDX+v0axYcjfdgMCzPZOW/mcsmfHC7itk28k+G3di1HM4Ln8e3Xc3yNXKw6+o6YpF7AFXG+m1nbYRy5FnaPymwuRFCH02IRebxQdiaBv1K0ZCYOWesN15KiCuro/Imy+3E6JeJ8SWgUyGL0kH5kuWsG/sG21a+7J4VvvxhhVczetAsuCXPv8AAA/kTg== -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Report error for job

- - - -Reports a business error (i.e. non-technical) that occurs while processing a job. - -## Request - -

Path Parameters

Body

required
    variables objectnullable
    - -JSON object that will instantiate the variables at the local scope of the error catch event that catches the thrown error. - -
- -An error is thrown for the job. - -
- -The provided data is not valid. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The job with the given jobKey is not found. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The job with the given key is in the wrong state currently. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/report-job-error.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/report-job-error.api.mdx new file mode 100644 index 00000000000..2c64b0381f5 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/report-job-error.api.mdx @@ -0,0 +1,60 @@ +--- +id: report-job-error +title: "Report error for job" +description: "Reports a business error (i.e. non-technical) that occurs while processing a job." +sidebar_label: "Report error for job" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/iuYPSVTWlRS58Wb6jit3Tw8stIeHB1AcCXCJgEGACVrOPzvnQVIvZ3m0N6kGY1EYLHP71uS24DjcwvJHVzrFKYR6AoNd1KrqwwSMFhp4651emmMNhBBhlYYWZEAJDD225ZxltZWKrSWIQmyZ3KAA6a0OnMociUFL54zl3PHtBC1sWyZywJZZbRAa6WaM87udTr4piCCihteokNDfjWgeImQwL1O/8QVRCDJcsVdDhEY/F5LgxkkztS4794kR/aAK6ZnzOXoDUAEVuRYckgacKuKNEvlcI4U3Uybkruw9Poc2nYaTKB1v+lsRWf2LQqtHCpHW7yqCil88uJ7Sx40h8Z0eo/CUZSGUu0kWtr1abvQGdLFYRQhq0JnGLK4lEXBUmQldyLHjC2lyxlXvRytMlygciGjnW3rjFRzaKNg7hNay+dHLI56RWWQCDYroxcyQ8t4lkmS5AXz0T8etRKBqouCpwWGVLURLLiRtOAj3mi52crEsTJe3375zELetqKXyjqunOQOfXXXyhl3fqHQghfMCl1hj4CD9AR9fgGtF3G50csu/p2w1oXbC6vdRuHdViGnETjpSA56Ao0DlqBtwzFbaWVDOl4Oz39QCGl7x2babMDcRnA+HB6HTFeujGXccVKgtGMLXsgshPUEcCuj0wLLXw4BvOcauwmSLEPHZdEXiFsWBFPMmFTsbvzhgr07f/Vm+ix3rrJJHC+Xy4GZiTPMpNNmoM08NjNBX5J7PmCTHA1he0UQ30LbhjLMVijkTArmtE9H5zajYh0v2y7fwm5zgNl1B6iNPGh3I/Z1fMVkhsrJ2Yq61oFpf2bG64J08FTXLkkLrh6oVB0aDo3uW7F1WXKz7lu7BtoIrOOutv/awX59eaCbgPHHZHLDgorQUQKkpO0NURClVLKsS0jOh8MISv4Yrl4Phy3ppIr/RCSK4WNVcOWhtR+OVKzUBjv8+MACp8V/VRlt5Fzu2x1Au6kFdCB+HyIKvDw/RsVJ4FzotKRuLheoWLgr9fSa6Vqd6HWi14leP6TXu5+m10PgllR+bWm0mvvcIhO1MahcsRqwT5swLeNm69bXnetvtCzV2epEzxM9T/R8mp6vjj1QjhSjLBvCYXgi9e9xhl499t/kAuP8g+6Jayeunbj2FNfaCEp0uaZBS6Wthw6NNRKI73Vq4yY8XLYxdsMXi2bRT0VqU0ACTSBNm8Rxk2vr2qShiUwbL6giO+/btB3I1YPGvyDnwfBh8WiDRi99UBe8rFXG2Vs2vrydsN+5wyVf+WSSyV3Vb4dvh0e1kugTGkc3VyxEGKC31Q56tcTro2qD8M8o9jMdi6I20q1u6VhIT4rcoBnVlP81JDp7XjtdByGIuj8feqBc/z3xtaZWNt7Miy4feVkFIm7NdzZA253CbNa369Z6sM6096qD0mF8dAiNDQkZDl4cwvbmyrNP6LKslW/Bat6NjbbyJYraOspTBIUUqKx3vpvA9WIfww77K1hkLwZU6wDIvvPOpcvrdCB0GYtwbP2bFjqNSy5V3Jmw8cXo09fP70dnH68uLj/fXp69GAwH7tH5nBIzSq62/AhTx+4+REHd63Q/4GZz1/mfppQdRGj8FVcFl4pA65PQdDS+o5mlhQiS9ewyMHkadWy8g6ZJucWvpmhbWv5eo1lBcjfdgMCzPZOW/meQzHhh9ydk28E+G3fDqOfs+PjzqN/dIlcrD7+ipiuI4AFXm9FrO20jyJFnaLxTYfMimD6bkIrN4YNhaBv1J0ZCYOWekN15KiCurlvkzZfbCVGvG8SWgUyGL2kOzJeQwDf4Rk5rnxbPar/eQMHVvA4kC3rp8w8ixe97 +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Report error for job

+ + + +Reports a business error (i.e. non-technical) that occurs while processing a job. + +## Request + +

Path Parameters

Body

required
    variables objectnullable
    + +JSON object that will instantiate the variables at the local scope of the error catch event that catches the thrown error. + +
+ +An error is thrown for the job. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The job with the given jobKey is not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The job with the given key is in the wrong state currently. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/reset-clock.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/reset-clock.api.mdx new file mode 100644 index 00000000000..0da0637d761 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/reset-clock.api.mdx @@ -0,0 +1,51 @@ +--- +id: reset-clock +title: "Reset internal clock (alpha)" +description: "Resets the Zeebe engine’s internal clock to the current system time, enabling it to tick in real-time." +sidebar_label: "Reset internal clock (alpha)" +hide_title: true +hide_table_of_contents: true +api: eJztVs1u3DYQfpUBTwkqS5s0aVPdDCdpXaSpYW9aoLYPI2q0YkyRCn+8XiwE9DX6en2SYkitvbEdIIcCvXSBXUnL4Xwz830z4lYEXHlRn4sjbeWVuCyEHclhUNYct6IWjjyFvFaIlrx0auRFUYtTXvIQeoI/iBoCMitl6O8///KgTCBnUIPkrRBsMpPROTIB/MYHGiCogQogg41WZgUqJDslr0AZcIT6gC3KC7PslYfbuEB5iJ66qKGzDhyF6Aw7SBAz3oUx1g2ooaEer5V1gF0gxyA9emiIDIzKGGoZFMGPJFWnJMyQF6aua2MDzehk2tEqExgcDZyjHnuEjjBER5dPKmmH0RoywVeETm8OUEryvkp2+fdgtvbl0D4FNC0MuIGGwMfmI8mUvezRrOjCKANdZGNwpAk9+TJFdGFEwZyM1njyot6K54sXfPmcmuVtJdbowccUSxe13kAidEfIHhGlmArxcrF46OzQ3NFJzlkHViYmW1j3ShOMzrL/HQWOPkXyoUyxSmsCmcBecRy1konCanS20TR889EzxFZ42dOAj2DDSbaElgIqDTaXCj1kw4ZaVsv56dsj+OHFy+8vn/QhjL6uqvV6XbpOHlCrgnWldavKdZK/bPe0hGVPjnYcYNsqxkTN6YzkgiK/p4pcrzlsCJuRcn58J2qRwxKFuNvMyeTV3VX44JRZiUJ0rM0gahGdetBXh/Dh9BhUSyaobrOr6mfQaU+HUbMPbGwMdaPRXDGHQQX9KOh9FB+HAd0GbPcIwFQIHzDEvTQEq2BFbj9+ZcK3zx/4Zvn9tFyeQHYB0raUejVwK81AnMSgjBriIOoXi0UhBrzJT98tFhP7ZMa/IhMDdDNqNHk63EtHGRiso1k/KTFlfEAj/y1mrFMrdR+3FNMdF2IW8euc0cSfQgwUesszdrQ+SQdDL2pRpcatUp+KQnhy1+R4RG9FdFrUYpubZaqrattbH6Z6O1oXpuqambhGp7DRWX+8nJtqJxZtJeo+Az4kjRcMDrRL5giHaFqEV3D65mwJP2KgNW5SERnyc9evFq8Wj3pl0y94PDw5hpxhltzeGNi55X5+1G02/hrH03TJhZTRqbA54225PA2hI3cYue63Upjxknd+zkaimG/e7gTy8+/LxLEynU3bZ64fBsKskPM58kX57KGuTo5Te0g7DNGkGWlWsFahB9xLTOroAydUCK0kGZ8UzHztwb7LK/BbRoRnJZOSlbMbjSsV+tiU0g6VzNtur422TTWgMtUM4aujw18+vH99ePDu+OjN+7M3B8/KRRluQkqepTug2YsjnQruv/+fpBfg0/uJb+9eD/+fJv6r08Qs+0A3oRo1KsONmPSynUfSuZDzATAPpctiHiznYrtt0NMHp6eJ//4UyW1EfX55N4f4aSpET9iSS1PsijaseSlpDGlg6cgBfPF4wM17OytPfj3jtvsHd4G96Q== +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Reset internal clock (alpha)

+ + + +Resets the Zeebe engine’s internal clock to the current system time, enabling it to tick in real-time. +This operation is useful for returning the clock to +normal behavior after it has been pinned to a specific time. + +:::note +This endpoint is an [alpha feature](/components/early-access/alpha/alpha-features.md) and may be subject to change +in future releases. +::: + +## Request + +
+ +The clock was successfully reset to the system time. + +
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/reset-internal-clock-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/reset-internal-clock-alpha.api.mdx deleted file mode 100644 index 12e6a222866..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/reset-internal-clock-alpha.api.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: reset-internal-clock-alpha -title: "Reset internal clock (alpha)" -description: "Resets the Zeebe engine’s internal clock to the current system time, enabling it to tick in real-time." -sidebar_label: "Reset internal clock (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztVttu3DYQ/ZUBnxJUljZp0qZ6M5ykdZGmhr1pgdp+oKjRijE1VHjxerEQ0N/o7/VLiiG19sZ2gDy0b11godVyOIdnzpmRtiLIlRf1uTgyVl2Jy0K06JXTY9CWRC1O0WPwEHqEPxAbBKSVJvz7z788aAroSBpQvBeCTWEqOocUwG98wAGCHrAAJNkYTSvQIcVpdQWawKE0BxxRXtCy1x7siE4yNGgP0WMXDXTWgcMQHXGCBDHjXRBZN0gDDfbyWlsHsgvoGKSXHhpEglETYcugEvyISndawQx5QXVdkw04oyO1o9UUGFwSnEsz9hI6lCE6vHxSOezQISms0srBvOLLoX0KkloY5AYaBB+bj6gSU9VLWuEFaYIucjA4NCg9+vKCRCEc+tGSRy/qrXi+eMGXzxVY3hJeSw8+KoXed9GYDTgWZ1f3vXqXYirEy8XiYbJDulMNnbMOrEqCtbDutUEYneX8u0o7/BTRh1IUQlkKSIFzynE0WiWdqtHZxuDwzUfPAFvhVY+DfAQZTnIktBikNmBzjaSHHNhgy5Y4P317BD+8ePn95ZM+hNHXVbVer0vXqQNsdbCutG5VuU7xl+OelrDs0eGu+LJtNWNKw2RGdEGj35M+V2s+NoTNiFkJ/iVqkY8lCnG3mcnk1d1V+OA0rUQhOjZgELWITov7zXMIH06PQbdIQXebXU0/g057OhkN55CNjaFujKQrVjDoYB4FvY/i4zBItwHbPQIwFcIHGeIeDcEeWKHbP7+m8O3zB7nZfD8tlyeQU4CyLaaGDNwvMxCTGDTpIQ6ifrFYFGKQN/nuu8Vi4pys+FcwIcCb0UjKI+AeHU0wWIezfxIxTT5IUv+WMtbplb6PW4rpTgsxm/h1ZjTxpxADht62ohaj9ck6MvSiFlVq2yp1qSiER3eNjoftVkRnRC22uVmmuqq2vfVhqrejdWGqrlmJa+m0bEz2Hy/nptqZxVglTZ8BH4rGCyQH3JE5kkOkVsIrOH1ztoQfZcC13KQiMuTnqV8tXi0ezcqhX8h4eHIMmWG23N4Y2KXlfn40bQ7+msTTdMmFVNHpsDnjbbk8DUqH7jBy3W+tMOOl7Hyfg0Qx/3i7M8jPvy+Txpo6m7bPWj88CKuCzueTL8pnD311cpzaQ9lhiJRmJK1grUMPco+YMtEHJlQIoxWSTw5mvfZg3+UV+C0jwrOSRcnO2Y3GlQ59bEplh0rlbbfXxtimGqSmaobw1dHhLx/evz48eHd89Ob92ZuDZ+WiDDchkWfrDpL2zpEe/fcf8k/Sk+/pfeLbu8fD/68M//Erw+zugDehGo3UxP2WbLGdJ8+5SHTz6wUGfqvL8+NcbLeN9PjBmWnivz9FdBtRn1/ejRu+mwrRo2zRpWF1hRu2tlI4hjSXTOQDfPEtgHv0diSe/HrG3fUPIRGhRg== -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Reset internal clock (alpha)

- - - -Resets the Zeebe engine’s internal clock to the current system time, enabling it to tick in real-time. -This operation is useful for returning the clock to -normal behavior after it has been pinned to a specific time. - -:::note -This endpoint is an [alpha feature](/reference/alpha-features.md) and may be subject to change -in future releases. - -## Request - -
- -The clock was successfully reset to the system time. - -
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/resolve-incident.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/resolve-incident.api.mdx index 0e3b9b60610..8397707e33e 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/resolve-incident.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/resolve-incident.api.mdx @@ -5,7 +5,7 @@ description: "Marks the incident as resolved; most likely a call to Update job w sidebar_label: "Resolve incident" hide_title: true hide_table_of_contents: true -api: eJztV8tu4zYU/RXirmZQxXKmmWmqroxMpk3n0SCPdpF6QVHXFhOK1JBUHEEQ0N/o7/VLikvKju04yCy66MILQ5Z4eR8855C8HXg+d5DdwJkWskDtYZpAgU5YWXtpNGTwmds7x3yJTA42jDtm0Rl1j8VPrDLOMyXvULWMM8GVYt6w67rgHtmtydlCKsVyZBoFOsdtS+MWHfrg9dbk//z1N3n0VqJL2MwoZRZYsLxlvpQu+Bz9qSGBmlteoUdLOXegeYWQwTKvj9hCApKSrrkvIQGLXxtpsYDM2wa3K/uILTOzzdJialTaCBJwosSKQ9aBb+sYyuMcLSQwM7biPn56dwR9P6VwrjbaoaMZb8ZH9NgMebUeSzpWcXuHxfqCjqBP4Gg83j23tuZeFliwgntODrTx7J4rWVC6wmhPGGYd8LpWUnCamtbW5Aqr724d+enWqtoMMGHn0ZIV6LlUzOS3KALc0TDHgknNbi4+nLAfj97+MH1Vel+7LE0Xi8XIzsQBFtIbOzJ2ntqZoB/ZvR6xqxItsoq3xAReFJJickUF1Wi9RMdcjULOpCAMfKw1JENLH+EfQIhpER1Wkx8hWkHlvJV6vo5UYyVsc2DCri/OWMBDzlqp509Dhzkz3ijywXPT+CxXXN8RUF56tTPodhTXVBVRf+DbZoA+Aee5b9yLTPv+zRPfRItfrq7OWXTBhCmQzYyN2hkCURGV1LJqKsiOxuMEKv4Q396Nxz35JMS/oRLN8KFWXAdqbZcjNauMxYE/oTCpneda/FfIGCvncjvuCPpHLGAg8ftYUd/3QVAviXEhfbmxFdDuMAhsZhq9F9heYHuBPSuwt7tOrIlmtMqWeIjWGsuMEI21WLBFKVVwT3eCZWw6rtH5vdL2StsrbbfS+gQq9KUp6JJrXKAO3XUzSJcHl0u7tTOsT8PNsgmpJODQ3i+vz41VkEEXNdRnadqVxvk+62pjfZ/eE0D33Eqeq0hLGo5aW3JIGcFVGfN4iiUN0B19WeMJrxpdcHbMLk4vr9jP3OOCt2FtKeSm6+Px8XinVzJ9xuPk/IzFCiMT13aHpVuS+U630fhbHIe7vkPRWOnbS5oWlydHbtFOGoJjxZAhXvBO79EIkuHPhyVvfv3jKkAv9cyE6QMFniZCqKB1MfPx6PAp3c7PgmqEqapGh61Tz+MFh68VJlTjPBWUgJICtQvEHnqqpdmnOMJ+jxHZ4YhAicxZ7phz6csmHwlTpSJOWz1zZfK04lKnQwiXnkw+X395Pzn4dHZy+uXy9OBwNB75Bx+KJ0ZXXK/lcRHbotWtbLvY7vGk+F91qgP6Hh98WisuNfExLFs3CPZm1bc6SCDbbGLXNDtNBt3dQNfl3OG1VX1Pn782aFvIbqaPMg26LqSj/wVkM67cdtu7vmKvLoYG+TV7uRneWdLwkes2bBaqoTdI4A7brc68n/YJlMgLtCHLaDERAmu/NvfZ055Et9r6zn+7JLn8C8tKx/c= +api: eJztV8ty4zYQ/BXUnHYrtChvvBuHOam83sTZR1y2nBwcHUBwJMIGAS4AWmaxWJXfyO/lS1IDULIednkPOeTgg0qiOJiZRnfj0YHnCwfZNZxpIQvUHmYJmBot99LoswIysOiMusN1QAIFOmFlTRGQwWdubx3zJTI5hDDu2DCq+IlVxnmm5C2qlnEmuFLMG3ZVF9wjuzE5W0qlWI5Mo0DnuG3pvUWHPmS9Mfk/f/1NGb2V6BI2N0qZJRYsb5kvpQs5R39qSKDmllfo0RKmDjSvEDJY9fURW0hAUtM19yUkYPFrIy0WkHnb4C6yj9gyM9+GFlsjaCNIwIkSKw5ZB76tYymPC7SQwNzYivv417sj6PsZlXO10Q4djXgzPqKv7ZLTzVrSsYrbWyw2J3QEfQJH4/HjY2tr7mSBBSu455RAG8/uuJIFtSuM9kRh1gGvayVFYDmtrckVVt/dOMrTbaDaLjBh5zGSFei5VMzkNygC3TEwx4JJza4vPpywH4/e/jB7VXpfuyxNl8vlyM7FARbSGzsydpHauaAPxb0esWmJFlnFW1ICLwpJNbkiQDVaL9ExV6OQcymIAx+xhmZo6iP9AwmxLZLDevADRWuqnLdSLzaZaqzcU/eEXV2cscCHnLdSL/ZLhzFz3ijKwXPT+CxXXN8SUV569WjR3SquqSqS/qC37QJ9As5z37hnlfb9m73cJItfptNzFlMwYQpkc2Ojd4ZCBKKSWlZNBdnReJxAxe/j07vxuKecxPg3INEM72vFdZDWLhypWWUsDvoJwKR2nmvxXzFjrFzI3boj6B+4gEHE7yOivu+DoZ4z41L6cmspoNVhMNjcNPrFYC8GezHYkwZ7+9iONdGMZtmSDtFaY5kRorEWC7YspQrp6Uywqk3bNToftfjitRevvXht32t9AhX60tDZvTYuSIdOuxmkq63Lpd3GLtan4WzZhFYScGjvVgfoxirIoIse6rM07UrjfJ91tbG+T++IoDtuJc9VlCW9jl5baUgZwVUZ+9jnkl7QKX2F8YRXjS44O2YXp5dT9jP3uORtmFsquZ36eHw8fjQrhT6RcXJ+xiLCqMSN1WGVlmz+aNoY/C2Jw2nfoWis9O0lDYvTkyO3aCcN0bFWyFAvZKfnGATJ8OPDSje//jEN1Es9N2H4IIH9RogVtC52Ph4d7svt/Cy4RpiqanRYOvUiHnH4BjChGucJUAJKCtQuCHu4Va3CPsU37PdYkR2OiJSonNWKuZC+bPKRMFUq4rD1d65MnlZc6nQo4dKTyeerL+8nB5/OTk6/XJ4eHI7GI3/vA3hSdMX1Rh8X8WK0Ppftgu0edor/1V11YN/jvU9rxaUmPYZp6wbDXq9vrg4SyLavsRuenSWD766h63Lu8Mqqvqe/vzZoW8iuZw82Db4upKPfBWRzrtzuxXdzxl5dDFfk1+z56/CjkIY/uW7DYqEaeoIEbrHduZv3sz6BEnmBNnQZIyZCYO03xj6525Pp1kvf+W+XZJd/AVYm1Fs= sidebar_class_name: "post api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/return-user-task-by-a-user-task-key.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/return-user-task-by-a-user-task-key.api.mdx deleted file mode 100644 index 7b2e0188d92..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/return-user-task-by-a-user-task-key.api.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: return-user-task-by-a-user-task-key -title: "Return user task by a user task key." -description: "Get the user task by the user task key." -sidebar_label: "Return user task by a user task key." -hide_title: true -hide_table_of_contents: true -api: eJztWN9T20YQ/lduri/JVLZMStJUbxQIoU0zDOD2AXhYSyv7gnSn3O1hXI//986epCAjwcBMH/3AGN/tj7vd/T5Z31oSzJ1MruTUoRUE7lbeRDJDl1pVkTJaJvIESdAChW9NxGz1aOEWV+Nrfa2TJNGG8FpfLpQTqLPKKE1COQFaQFEtQOQI5C0K0JnQhgRqmBWYCaPFIZReZyDSwjtC64TxJEwecs3M/fhaXyCGb1et6UdxfnxxKQ7OToW5Q3uncHnzJoZKuREZU7g4rQ1HUKmRRUe9hVHrNy6zn757tCveenutc2NF7i0t0IoMCVThxuGG11pGsgILJfIpZXK1lhpKlInkilyCu/0TVzKSiqtXAS1kJC1+98piJhOyHh+X+LJXTRlJly6wBJmsJa0qjq404RytjGRubAlUL33Yl5vNDWdwldEOHXu8m0z447ksygnn0xSdy31RrIRF8lZjNg73S40m1MRBoKoKlQIHib85jrTun83MvmFKXBhrKrSk6nN0C/KSi0TSERB2bB1Zpee8A86pucbhTSywRE2n2bO72hHoFF9+mhR0pjIgPLHGVx0nsBZCjwlLN5Cz68vYep1rZQ035ghzpRUX/ol79exefrPG9fUl4ZVXFNBimJyjwa52PLhQI1IlBi9TVgW+3i83RWGW0+p1XpnH1zkQauBZG+peJPGe0GooPhlbnmOOFnU6PLS99v2N1qkaYc/W9pd3oUrekSk/I2SBhvpYhCwLYaE420Ll0MApYxUNd7XPIq05szM8cMpY8OZCzZkzmafvoPA1Y3cWW2fmuFJpVfpSJpNIlnBf/783mXDSHHxBMnk/4QOSooJPNW0I5ZSwlJsNb+0Pcd3vkAnmXHT0NJtV1swKLH/us9p2sANxVls2DwJRV1iAE7XhDDOhtLg6/3Qoftt//+vNmwVR5ZI4Xi6XY5unI8wUGTs2dh7bPOU/tnsbKmZRlLASMxQPDRMPPCpchanKVSrINAWsD8ONqtn6eRKud58Zbm9Vr80HYnp+KlSGmlS+UnreTy07XZIwM56SWQH6Vj70q5/0cRbnyxLsqn3ObydongfevQQRQ4P6+fLyTNQhRGoyFPxUJ/5x0iTaGsL9SXcMP0wmTA6h4y+4iRZ4XxWgw2g9vo7SojQWm/kJF1MN8/5PnTFWzdXjvGPZxU4zxEf1jVrw7PXnfarB08JY9S9mO/Ts0LNDz5Po2e/P+1dDIjde76Czg84OOk9B5/3Qr7ZTXf90Fg7tHVqB1hq7g9EORjsYDcJoE8kSaWEymcg5hslhuSmRMb+TjfidzMXrjgizYV0pYKvWrbwtZCLXNXA2SRyvF8bRJllXxtImvuOu3IFVrNGFJvJ2DbB2cAqTQhGWhxrIGyyOtRd7JNydAOESVqGgnHI79MfJx8nw+6ex9ERElgLrG9bj16GENixjezBsbfySwEFxc5h6fpG9YLe6PDMEi/bAcxN+jEWTL0Tn77WRjJp/PrXD8sc/l6HfSucmuDd97x+Eu9JKBXIy3uvP2NlpgEpqytLrwJd6LpaKFgI6F2uUVgZRoVLULkxzI2a2Zl/qHdGIE2JvzE2pJ6elybmihZ+NU1O2AuuPz1lhZnEJSsdNChcfHvw1/Xp0MPpyenj89eJ4tDeejOmeaiXCOCpBd85xHoTJbfkZ+nLpVgHWD4+MnX49pF83o0l4T3FVgNIMltDTdcMhV/KBQ2Qkk66UexM1RHAl1+sZOJzaYrPh5XACmVzdPPBGIJpMuVAnmeRQuMcCeLddb84bqfytGJTFB0/eSqt6FQir8PxNRvKWhcotFXpzs4nkImhm4WC1xUGaYkUd357ozYD/wbUnx4zU/wCyKbai -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Return user task by a user task key.

- - - -Get the user task by the user task key. - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The user task is successfully returned. - -
Schema
    customHeaders object
- -Bad request - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Unauthorized - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Not found - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Internal server error - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/return-user-task-form.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/return-user-task-form.api.mdx deleted file mode 100644 index 314aa24e14c..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/return-user-task-form.api.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: return-user-task-form -title: "Return user task form" -description: "Get the form of a user task." -sidebar_label: "Return user task form" -hide_title: true -hide_table_of_contents: true -api: eJztWE1z2zYQ/Ss76CWZUqScOmnKm+I4qdrE47Hl9iDrAJJLCTEJsMDSsqrhf+8sSH3LSTrTmV508NgkFrvY3feWxlsKklMn4rG4c2hhJN2DmAQiQ5daVZEyWsTiIxLQDCE3tgSTg4SajUm6h/Be3+s4jrUhvNejmXKAOquM0gRzVRRgdLEAi1RbDYXSD5h5Ny6EXePMoANtCFxdVcYSYJlglq3NfZSvBFMOpAZZVDMJOUqqLYLUmXeJWiYFZmA0XMiy1pmEtKgdoXVgauKUOL3EPIX3+hbRP41Xpm/h5vJ2BIPrIZhHtI8K55MXkayU65ExhYvS1rAnK9Wz6OjgRW+1LyyzH/6q0S546eW9zo2FvLY0QwsZklTFOlERiEpaWSKfUsTjpdCyRBELLj236XdciEAo7k8laSYCYfGvWlnMREy2xv0mjma4aRs84CIUgXDpDEsp4qWgRcXelSacohWB4LJLal+9ORdNM+EIrjLaoeMdr/p9/nUYxcNEOXB1mqJzeV2sIYBZ6FNLjSbUxPtlVRUqlbw/+uLYyfLwWCb5gilxTayp0JJqj8CRuA5Hj/GAi1Vn2Y7T/WaSgSDUUtMwO+6zXYXh+2c8O7JKT9lPUpX6OS/vrj9ffY+PTRkOfbRrz7joytUE4hGtU21RD310i/+6SlwmRQUbfTC2HBKWomn49av++fFQG+TNpYPc1DoLIKkJtFnjRTpnUiUJM5grmoEiBksTiPNjSHsnM2DEo6PnAVVZkxRY/ngIrF1nA7huLTsaQltBkA5awwQzUBrGNx8u4Jfz1z9PXsyIKhdH0Xw+D22e9jBTZGxo7DSyeco/bPeS5xxahFIuIEGQWaY4pixgA2VwFaYqVymQ8Z3ojg3ciZYwX+dBu7rcQ9BW42qrxP5AGMDdzRBUhppUvlB6ehja78llXbAPmZia4qSQ+kFsEHAYdD+Kq8tS2jUXdwMwyklS7b45g356deCbgfXraHQNrQtITeZRDMSfhi4QJ1Eqrcq6FPF5vx+IUj61T2/6/YZ9cse/IxMN+FQVUnto7aejNJTGYocfn5jSjqRO/6vOGKumaj9uuMPGDsTv24xaSp73zw7xfqdlTTNj1d+YndhzYs+JPc+y58gH7cpQ+wk7UedEnRN1nqHO62P/tQ01oWUQOrSPaAGtNfZEoxONTjQ6SqMmECXSzGQiFlP0yOHLfiwivlH1+EblouWWJtBEfDS+2nuCtdJBbQsRi2XLniaOouXMOGriJSstTfTIrXmUVrFM4jvJyy3LVugpTCoL//pYF3mB9YlVdnvayUdJOJcLX1UOuev6bf9t/6hXrwMd98hqTJthuH9TXrllgh91u3t1/ppjL3o4TGuraHHL29ryJCgt2kHNnVhjo4vnvfNzaySC7o8PK8T89ufIN13p3PjtXfMPDyK27u+iH54dAu166PmSmrKstR+aetpenuVWYp3YxUwqVIraeUh3etLK7FO7An90osBZyE1pkbOalVNFszoJU1OuNK7176QwSVRKpaMuhIsuBp/vrt4Pep+GF5dXt5e9s7Af0hP55CvjqJR66xw3rUa4EQo6HO9kvNx8KE6q5P+jSnZoJ3yiqCqk8gqNh8mym01jsZlNIhDxrmLp2zoJuhEzFstlIh3e2aJp+LU/iIjHk81E8iMsU86XS8S5LNy+urmNixc3nQ76Eo5qnkcT6F5KvfCjsKj5SQTigdXFHc21mTSBmKHM0PqDtRaDNMWKtvYeyJo8Staj/OMlz4B/ADyfGzk= -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Return user task form

- - - -Get the form of a user task. - -:::note -This endpoint will only return linked forms. This endpoint does not support embedded forms. -::: - -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Path Parameters

- -The form is successfully returned. - -
Schema
- -The user task was found, but no form is associated with it. - -
- -Bad request - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Unauthorized - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Not found - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -Internal server error - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/search-groups.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/search-groups.api.mdx new file mode 100644 index 00000000000..50358585666 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/search-groups.api.mdx @@ -0,0 +1,76 @@ +--- +id: search-groups +title: "Query groups" +description: "Search for groups based on given criteria." +sidebar_label: "Query groups" +hide_title: true +hide_table_of_contents: true +api: eJztWVtz2jgU/isaPe3OukDatNv1G6VJN7u9ZAPpPlBmItsHrEaWXEkOYRj/950j2WDAAbrTfVoywwSso3P9viNZWlLLZoaGY/pOqyKnk4CqHDSzXMmrhIbUANNx6gYNDWgCJtY8x2Ea0qEbJFOlycxJkIgZSIiSZMYfQJJYcwuas84XSQOq4VsBxr5RyYKGS/eTa0hoOGXCQEBjJS1Ii2MszwWPnRvdrwaNLamJU8gYftv0wjlHvKOkstGhAWVCfJrScLykdpEDDamKvkJsaUBzjUFaDsbpVdruah0qbcmUg0jWUdCgVsW0ZgsaUG4hc0qONeY07lobpVAZs4qgQyRaNMwZq7mc0TKgSieg2+e7IcIlmac8TleKbApEg2AWEm8C9YIsMqx6fzigAX17MRxg6ROYskJYGlbPLbcCqlx8Qu0XOK0sg0btxlVIk4a4K8VfBegFzrzxJaHlBGfmbAa7/l+zGZeu3BvZPjqrWmXtSeEygUeipsRVyiXFMm0ruHA5Izi3kWouLcxA04BOlc6Y9Y9ePMfkC57xFqignYw98qzIiCyyCPSGQQ220BILoyQ0AXqERe9mf2rbin61Csljn6FYhwyU9vV2Y1j+O8GMxVJ8ZqIAc0eq7C3QT0ZyDQ9cFWbNIZMraWAf3jfrUa5cfQNTpVsKvO1r5ORanZ1y/V97W7aC9ZrNYA3WoCXGlklPT9ilvmgtI8Jn1uxhXtBsEqCt53nBGlL0kAOSZdDIRtVUtps6uoOSmG1bu9bZCN9Zv3TGN8m9IdCWpNI3D18x59TzXm9PSppVLoRjzY9ZJdaYObbHHGxcXHr+4ncWqcK33g3/99b0X6ja77FVlomrmgNbGcaxqkdlzPpWiEZaFrv2BvXqHBvUFlnbS+nWoQcnUKPKzXP2sTH6NQrDIgZsh9waIDblph67azaXu2qlxG4gCUhToO+7ffWInrXZF492Hqd9r++uh/8w1/c1MA/tLUZukHElcQhB/AnwuAWvAo3n6T7njyCiy+ZhRDtbf8KikZI1OJ/qqvfgtlK72D2+H3pFKN/BicwYPpOQfABc7P+ERbNGdfgtSAKLlb+HhUNS5mYbUmurF8Cq4e5W/2ke7vReZH3L41YUOLHzvW24Ltc3nEmmjAtIOuSD0kASsIwLQ5gGXKgfeAJJgxa+upFKFv4d4InunWsVCch+OdTF++TaS1Z2iQcKYYZ4wchbH99cDshv5y9/nfyUWpubsNudz+cdPY2fQcKt0h2lZ109jfGDcj93yCgFjbhekAgISxKONpkgawgSk0PMpzyuS1W5TbBAPr4DHdmN7iJuVc9C8x3o9MntzRXhCUjLp4u6T2+Yps29u1swwkgweU/XADgE8z4xRZYxvaib3KYB3OBZZovDeHzxvBX8v49G18SrILFKwL01uj5ZGcIgMi5xF03D814voNWemoaver0SdWLFj4hEEnjMBavW0q1wuCTZGrcuMC6NZTL+UZVRms/4tt3NDVQF4rc+opqBZ+0MrJYHIlh8b3Al4glhhU3Raly/M4FzggmzZ4t0ItmJZP97kr3Yxful0hFPEpAOniu+cUOksoQJoeaQnHh14tWJV0/x6mXb9rGPL7IWNOIQtFaaqDgutIYEjyiFUx+DMbXtxnvQiWknpp2Ytsu0MqAZ2FTh1Uyu3JlfzmxKQ9r17+Fd/6ZG8TxWP4A27qCp0IKGdOnpUobd7jJVxpbhMlfalt0HrMUD05xFwiMQhz2targIFTORepO7ZcOB5qHhgGWFTBh5TW4uhiPyjlmYs4VLY766bqlVv+697rVqRdEnNPavr4iP0IOu0Qhqtcjo9vdwJ3yM4hKPSAzEheZ2McRpPj0RMA26X2DmV2Co7Dnt+NsL0aD6cllD5I+/R67K2MRu1ldiF48syz0F1yfF/oiicf/jL6vGq4uk1dBkfbXib0N6q/uK3tY9wnjpw2qe1+Oz0oF8qlxMFQR3s4NQAW18Onuds124X1851sYqywrpWreckTm3KWGNbMeiMHhfQdHPGPAoYh1wLfbej5DP3iI56yBSPJzrjj3jNi2iTqyybuynrf5HQkXdjHHZrUyY7qD/4fbj2/6z91eDi4/Di2dnnV7HPvrjQGRUxmTDD3dUUh1xbQe6XK9S33sTWiHGwqPt5oJxiaV1US0rPo/pymjF6ElQsXJMl0vUfqtFWeJjdypDw/FkTWL8VQY0BZb4itN7PDWjA+/xsxE6gOKicGdW2wfpZVDP6Mcx5Hav7KTRlK4/DUcI+eqON1MJztFsjve/bE5D+oV+oRSvmVGDY5N7vqSCyVnhIEy9Xvz7B9HxoJE= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query groups

+ + + +Search for groups based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Group filter request + +
+ +The groups search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching groups. + +
  • Array [
  • ]
+ +The group search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/search-process-definitions-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/search-process-definitions-alpha.api.mdx deleted file mode 100644 index a71f9439b29..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/search-process-definitions-alpha.api.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: search-process-definitions-alpha -title: "Search process definitions (alpha)" -description: "Search for process definitions based on given criteria." -sidebar_label: "Search process definitions (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWd1z0zgQ/1c0uheYc5wAheP8Fkq56x2UXBu4hyQPir2OBbJkJLlpJuP//WYlO07i9CMMLze0M0yxtdov7e+n9XZNLVsYGk3oSKsYjCEJpFxyy5Wks4AmYGLNC/cY0StgOs5IqjQpOuKGzJmBhChJFvwaJIk1t6A5C6cyiiKpLEzlOOOGgEwKxaUl3BAmCRNFxkgKzJYaCJMJkcoSkGwuvLpTlpcyYSQWpbGgDVGlJSolNgMyVzfhVF4BuKdJI/qaXJ5djclwdE7UNehrDsvZkz4ruOlZpYTpx16wxwre02Bs50Wv2RfmyS/fStArXHo6lRh9WmqbgSYJWMaF8RFOJQ2ohm8lGPtGJSsard0j15DQKGXCQEBjJS1Ii2usKASPGeau/8VggtfUxBnkzK0K8TGl0WRN7aoAGlE1/wKxpQEttCpAWw7G7VDaaaulmNZsRQPKLeTmGD0pB5FsKTJWc7mgVUCVTkB3V7A4UlYKi1ZNTKsq2Ap3UiucBdRyK2BTPf9gKq+Utpc+U7Sa4c6CLeAod7XKt3zi0sICNA1oqnTOrH/14jn6L3jO7QNljfNxmNqdiDtJ3fWt2mx8A6nScMzO6mB+RmwBbX6CA/YObLp9Q/esRR3gQ9Ndw/3tBu1/g6vvXX4YZ0C+wsoRhEWkd1kipMHd5/DqBM9Bshy6+i9YDh749+luq1eDUaWO4eKgxst6lcjvUH0N2nCP212tn/3Cw/TdVY61hTFb3G7EssWxjndO8zzp6t+5EPAd4cmxhixIJu0h7WO3crTKrcof7QfxzlX1Lq3cKnwIN5WnMFMoaXzZPx8MDld511niGYCYMsaVtBSkURW6m+GHM/+xlGmVZeJ8j4juAmHKtbFI1Z+ZKMEcR4iCfe/euyjRJ/QYTrx1x3569v25x8+fkyEf5vdPSZH8/8CRCP9diN3NjA163JaT49nQNc4kZVxAEpIPSkPTMxOm3cZrnkBCuHT9e8OYZK6S1Z20WWg1F5D/2qXPXfeGZOQla7vEw5cw9BYF59765PLdKfn95OVvsyeZtYWJ+v3lchnqNO5Bwq3SodKLvk5j/IdyT0MyzkADydmKzIGwJHGRM0FaYiCmgJinPCZWuQBrtwkemY/vHs52q93We4ODUnO6/5k2JJ8uzwlPQFqerrhcdE3vdu9zVdpoLpj8StvKONTv71oxZZ4zvWo+xXYNYE9smS3vv2xePO/oxqL6czweEa+CxCrZ5R40hEHkXPK8zGl0MhgENGc3/unVYFChTjzxB0QiCdwUgklXWvvhcEnytm5dYFway2T8o05Gab7g+3bDfSDjy7c+Ig/Hl4fgOJQEs6yxDkFrpYmK41JrSMgy42KD1cZ2/b36iLVHrD1i7TasVQHNwWYqoREtlHGlw2xGI9qv4dTbmoP1/d1HcSigsT1xvXmpBY3o2mOnivr9daaMraJ1obSt+td4MNdMc5x6uXPEZY+xpnaEipnIvP3uGeJC+xULZG8U9gezsGQrl9Oinhm1ql8PXg8OakXRWzTicM1H6CtwixUatQjvg2q98EMUV9i5G4hLze3qCrf59MyBadDDEo9hUxm1Pacdn70QDer/vGvq5a9/x+7IkdEu24nd2Q3LC4/HdkRyuLEfNB14W427fXT7ftMDD3a71VbiYLPZLrcd4lbb6Od+k83grhWv53V+KDdrR2t+ZDbYDMQGe+OuydqnenuQhe8qh8JUuTzXGOme2HagdBA+6+JxdO5oJVZ5Xkp3t8gFWXKbEbZVAfWQFwlH8Biw+4zWTa4bsfd+hTQN/rMQq9dDrLlSFtxm5TyMVd7Mdje/50LN+znjsl+bMP3T4YdPF2+Hvffnp2cXV2e9Z+EgtDf+uxIhnzO55Uc9CD80BH/ixtlP98Nft5fr4xj9/jF6jWcLN7ZfCMYllrw733VNvRN6gHppU784d/YEOqHrNWb0kxZVha+dEzSazFq+xacqoBmwxAOBfkWM01N/ZL0xeoPionSTgf3xTRU0O4ZxDIW9U3a2dZmMPl6NkZ3qvxbkKsE9mi2RTtiSRnRKpxQxXfj48E8K+H5NBZOL0iGber348x+WagL6 -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Search process definitions (alpha)

- - - -Search for process definitions based on given criteria. -:::note -This endpoint is an alpha feature and not enabled on Camunda clusters out of the box. -See the [Camunda 8 REST API overview](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) -for further details. -::: - -## Request - -

Body

    sort object[]
  • Array [
  • ]
  • page object
    filter object
- -The process definition search successful response. - -
Schema
    page object
    items object[]
  • Array [
  • ]
- -The process definition search query failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/search-roles.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/search-roles.api.mdx new file mode 100644 index 00000000000..81cf8bef87c --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/search-roles.api.mdx @@ -0,0 +1,72 @@ +--- +id: search-roles +title: "Query roles" +description: "Search for roles based on given criteria." +sidebar_label: "Query roles" +hide_title: true +hide_table_of_contents: true +api: eJztWVtv2zYU/isEnzZMlZ026zq9uW66Zesli53uwTUQSjq22FCkSlJxDEP/fTikZMu2nLhY9zQHMGKL5Ll+3yF1uKKWzQ2NJvRaCaDTgKoCNLNcycuURtQA00mGY4YGNAWTaF7gKI3oyI2RmdJE4wQSMwMpUZLM+T1IkmhuQXMWfpY0oBq+lmDsa5UuabRyP7mGlEYzJgwENFHSgrQ4xopC8MQZ0ftiUNeKmiSDnOG3bSPQNOKtJLWKkAaUCfFxRqPJitplATSiKv4CiaUBLTR6aDkYJ1Zpuy90pLQlMw4i3ThBg0YU05otaUC5hdwJOVaZk7ivbZxBrcwqggaReNlSZ6zmck6rgCqdgu5e74YIl2SR8SRbC7IZEA2CWUi9CpQLsswx44PRkAb0zcVoiHlPYcZKYWlUP7fcCqhj8RGlX+CyqgpaqZvULk1b010q/ipBL3HltU8Jraa4smBz2Lf/is25dNneivbRUdUq7w4Klyk8EDUjLlMuKJZpW8OFyznBta1Qc2lhDpoGdKZ0zqx/9OI5Bl/wnHdABfXk7IHnZU5kmcegtxRqsKWWmBgloQ3QIzR6Mwcz25X0y7VLHvsMp4VkqLTPtxvD9N8KZiym4hMTJZhbUkdviXYyUmi456o0Gw6ZQkkDj+F9Ox/V2tTXMFO6I8G7tsZuXqexM67/a2urTrBesTlswBp0+Nix6PCCzXQsUF1LKk8lb78z9Hm/340vX103LpfCQei7VMxN/I7l25Mk5tJjGb+zWJW+DG2Zb7YV/mtRj1tslWXissHDTnxxrOZrzqwvC6iko/B3k/XlOZJ1B7jdiXQ1+d5NQDxbV/i1sU4/Fglfr9EtYsCG5MYAsRk3zdhtm2i39a6BzJAEpCnR9v0acwR/t2vE0cbjsm+13dWz72b6Y2T20K63nk72rmc8hSB+ADyu+NegcSx9zPanaehi+TSe72DZisUGld2Vg9yBO07sY1ayHFqC6oPGQTk4PcR1zBg+l5C+B9zv/oRlOzWN2x0AAosJv4OlA1DuVhvSSGv2ANQVduT8MPva+cWwItX3n3Zm3s06f6zwNjn6igvJjHEBaUjeKw0kBcu4MIRpwH3qnqeQtpjgUxqrdOlPwAfqdaFVLCD/6am6PSBXfmatl3h0EGaInxh77ZPrt0Py6/nPv0x/yKwtTNTrLRaLUM+SZ5Byq3So9LynZwl+cN6PIRlnoBHKSxIDYWnKUScTZIM7YgpI+IwnTZpqswlmx/v3RBF2o/tgWyez1HwPNgNyc31JeArS8tmyKc1bqmn76Or2iCgWTN7RTfqfQviAmDLPmV42dW1bAZ5vLLPl02B88bwT+L+Px1fEiyCJSsG9MrnSWCtCJ3Iu8RBJo/N+P6D1kZJGL/v9CmVixo/wRBJ4KASrt88dd7gk+Qa3zjEujWUy+V6ZUZrP+a7ecOv4VIP4jfeoIeDZAQL6HYEIltwZ3Hx4SlhpM9SaNK8M4IxgwjxyKDqR7ESy/z3JXuzj/a3SMU9TkA6ea75xQ6SyhAmhFpCeeHXi1YlXh3j1c9fpcYDvrhY04hC0VpqoJCm1hhQ7dMKJT8CYRnfr1efEtBPTTkzbZ1oV0BxspvBaolDGQYfZjEa05169e/5FjWI3Ut+DNq61VGpBI7rybKmiXm+VKWOraFUobavePabinmnOYuEBiMOeVQ1ahEqYyLzG/azhAL4ZN94MWV7KlJFX5PpiNCa/MQsLtnRRLNaXDY3oV/1X/U6pOPWAxMHVJfEeesy16kAjFgnd/QruJh8juMKmiIGk1NwuR7jMhycGpkEPSgz8Ggu1Picdf/tJNKi/vG0Q8sffY5dkrGHXm/ugiweWF56B/j5msr4rWd9+TDe3B77h31+35Ps7rfLJytvebknjs8oBeaac4TXM9kOAeABtfMz64dk+pK8uHTMTleeldOVZzsmC24ywVkgTURpsyVO0MwHsNkSruuWyVvvOj5BPXiM5CxEOHrNNVZ5zm5VxmKi8l/hl6/+xUHEvZ1z2ahWmNxy8v/nwZvDs3eXw4sPo4tlZ2A/tg+/yIWtyJlt2uG6I71zt+rnabETfeNNXg8LCg+0VgnGJMHU+rWrGTmijsubsNKh5N6GrFQq/0aKq8LFru9BoMt3QFH9VAc2ApT7dviFGh97eZ2PUj9NF6RpSu73xKmhWDJIECvvo3Gmr6lx9HI0R1PUVZq5SXKPZAq832YJG9DP9TCneoaIExxf3fEUFk/PS4Zd6ufj3D0PWNyI= +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query roles

+ + + +Search for roles based on given criteria. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
+ +The roles search result. + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching roles. + +
  • Array [
  • ]
+ +The role search query failed. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/search-tenants.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/search-tenants.api.mdx new file mode 100644 index 00000000000..79a284f5608 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/search-tenants.api.mdx @@ -0,0 +1,80 @@ +--- +id: search-tenants +title: "Query tenants" +description: "Retrieves a filtered and sorted list of tenants." +sidebar_label: "Query tenants" +hide_title: true +hide_table_of_contents: true +api: eJztWtty2zYQ/RUMntopLcmJk6Z8U3xp1dxcX9IHxTOByKWIBAQYALSs0fDfOwuQIiVStpJJ35QZTyQC2F3snnMAAlpRy+aGhlN6A5JJS+8CqnLQzHIlJzENqQGmo9S3GhrQGEykeY7tNKRXYDWHezCEkYQLCxpiwmRMjNIWYiK4sUQlxPrxAxpQDd8KMPa1ipc0XLmvXENMw4QJAwGNlLQgLbaxPBc8crEMvxh0uKImSiFj+GkzEh8h8eGSygkNqF3mQEOqZl8gwu9MiA8JDaerTkuuceaWg3F+lLZdL9dKW5JwEDGJNLegORs0TpjWbEkDyi1kzsi+zpzFnjmlUDmzymWUzJYtd8ZqLue0DKjSMej+8a6JcEkWKY/StSGbAtEgGBbJuUC7IIsMsTC+PqUBPTu/PkU8xJCwQlgaVs8ttwKqXHxA6+c4rCyDVi2n1ZTuWt1dZf4pQC9x5FVVofIOR+ZsDt34L9mcS1f+jWzvnVWtsv6kcBnDA+LSVcolxTJdo4fLOcGxrVRzaWEOmgY0UTpj1j96/gyTL3jGe6CCfjL2wLMiI7LIZqA3HGqwhZZYGCWhxuueHn2Y48T2FX2ynpKnAsNuA3KqtK+3a8PyfxbMWCzFRyYKMJ9Jlb0lxslIruGeq8I0lDK5kgYew/tmPcp1qK8hUbqnwNuxzly/3mATrv/vaMtesF6yOTRg3RaUHYPaA7apLnrLhnCxGxrme5pNxPeKnu/5iOhthuDdTOJWDiop2ZZ3DGpy5gR8Hd4AEShZBvsNx54dA62k+SlcuBlsasJmj77kll50fKXd1J6NRo+ltg0PlLSftto0YNtXnJ5UPC498fEzm6nCa/ZG/I9i4wdMPQEbZZmY1OTZyge2VeKWMes1FJ30rJL9yvbyBHG1xfL+WroF7N51qJHlxjn/qKh+ccNpEQN2QG4NEJtyU7d9bqvS52qJRRmRBKQpMPauIO8hdpuCunfwOOx7Y3fi/9NCf0z5PLS3OLnBxnWPpxDEd4DHrZQ1aJqd4q7o96Giy+cemHaD38CylZUGn90wC8m/FUDM0ljIjuYgcasMMcExWjJRS/jkbNCL7/11szKEA5zifp9oV3HCQzesMqDMGD6XEL8D3Ja8gWUbFHW6e6ALbi//FZYOupkbbUhtrV6qa5Xv4m0387t6j0LT97wXea7fyS7tz7W65zHEJGaWEW6IVI6C3G16d6wAuVYzAdlvT60EY3Lpe5IYLOOCeKQRZojvOHPwINOri1Pyx8mL3+9+Sa3NTTgcLhaLgU6iI4i5VXqg9Hyokwj/sN+vA3KTgkZqLMkMCItjjj6ZIA2Gickh4gmP6uRXYRPM+OCTfJoArrULqXWBCs07YBiT26sJ4TFIy5NlLfUbrmn7vcGtOeFMMPmVNgV9CsdjYoosY3pZ6+SmA9xcWmaLpwH2/FkvnP+6ubkk3gSJVAwkUdpLbeUIJ5FxiTt4Gp6MRgGt9vM0fDkalWgTK77HTCSBh1ywajnemg6XJFMaKvy4iXFpLJPRz6qM0nzOt/1ubsMqEJ/5GdWEOu4nVLXCEMGir8YzibDCpug1qt/XwAXBhDmQ7ECyA8l2k+x5F+8XSs94HIN08FzzrVq6mBBqAYfF68CrA68e4dVJF+/vlSWJKmR8oM6BOgfq7KDOi74XqbFsXnJBa6WJiqJC433PIuXCmY/AmNp3fQrhsHjg2oFrB651uVYGNAObKrxozZU7us+ZTWlIh9U52NCfa1G8SdH3oI076S20oCFdeb6U4XC4SpWxZbjKlbbl8B6Lcc80ZzPhIYjNnlc1XoSKmEi9z27dsKF9cn/KskLGjLwiV+fXN+RPZmHBli6P+fqitDb9avRq1GsVu+6wOL6cED9Dj7qWEtRmkdL951Ku8z6GSzyjNBAVmtvlNQ7z6ZkB06DHBaZ+jYbKn7OO330nGlQfLmqM/P3vjSszqthVc7l9/sCy3HOwufNpTvEasPkzwdZtrr96nq6vhddNd81Fqb/bHK1vH0dbt4LTlZ9q+/YNn5UO+YlywVS47GYM4QPa+BSPBsddDlxOHJUjlWWFdHou52TBbUpYqwKRKAzePlKMMwI8qwtX9YTrbm99C/noPZLjAaLHQ7yW8Tm3aTEbRCobRn7Y+v+ZULNhxrgcVi7M8HT87vb92fjo7eT0/P31+dHxYDSwD/6MHmmWMdmKw50l1ufO2zNdNWvXj/zYoYKShQc7zAXjEuvrpraqmD6ljeeK63dBxdcpXa1mzMCtFmWJj79hqDSc3jX0xm9lQFNgsa87/Yqn2fTUh310gxFgd1G4093tS64yqEeMowhy+2jfu5ZeXX64vkEyVL/jyFSMYzRb4G882IKG9BP9RCn+ngQtOJ655ysqmJwXDsjU28V//wFI1fp4 +sidebar_class_name: "post api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Query tenants

+ + + +Retrieves a filtered and sorted list of tenants. + +## Request + +

Body

    sort object[]
    + +Sort field criteria. + +
  • Array [
  • ]
  • page object
    + +Pagination criteria. + +
    filter object
    + +Tenant filter request + +
+ +The tenants search result + +
Schema
    page object
    + +Pagination information about the search results. + +
    items object[]
    + +The matching tenants. + +
  • Array [
  • ]
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/sidebar.js b/docs/apis-tools/camunda-api-rest/specifications/sidebar.js index 3114264856b..f8ca4248cdc 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/sidebar.js +++ b/docs/apis-tools/camunda-api-rest/specifications/sidebar.js @@ -5,452 +5,668 @@ module.exports = [ }, { type: "category", - label: "Cluster", + label: "Authentication", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-cluster-topology", - label: "Get cluster topology", + id: "apis-tools/camunda-api-rest/specifications/get-authentication", + label: "Get current user", className: "api-method get", }, ], }, { type: "category", - label: "License", + label: "Authorization", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-status-of-camunda-license", - label: "Get status of Camunda license", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/update-authorization", + label: "Update authorization", + className: "api-method patch", }, - ], - }, - { - type: "category", - label: "Job", - items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/activate-jobs", - label: "Activate jobs", + id: "apis-tools/camunda-api-rest/specifications/find-authorizations", + label: "Query authorizations", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/fail-job", - label: "Fail job", + id: "apis-tools/camunda-api-rest/specifications/find-user-authorizations", + label: "Query user authorizations", className: "api-method post", }, + ], + }, + { + type: "category", + label: "Clock", + items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/report-error-for-job", - label: "Report error for job", - className: "api-method post", + id: "apis-tools/camunda-api-rest/specifications/pin-clock", + label: "Pin internal clock (alpha)", + className: "api-method put", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/complete-job", - label: "Complete job", + id: "apis-tools/camunda-api-rest/specifications/reset-clock", + label: "Reset internal clock (alpha)", className: "api-method post", }, + ], + }, + { + type: "category", + label: "Cluster", + items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/update-a-job", - label: "Update a job", - className: "api-method patch", + id: "apis-tools/camunda-api-rest/specifications/get-topology", + label: "Get cluster topology", + className: "api-method get", }, ], }, { type: "category", - label: "Incident", + label: "Decision definition", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/resolve-incident", - label: "Resolve incident", + id: "apis-tools/camunda-api-rest/specifications/find-decision-definitions", + label: "Query decision definitions", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-incidents-alpha", - label: "Query incidents (alpha)", - className: "api-method post", + id: "apis-tools/camunda-api-rest/specifications/get-decision-definition", + label: "Get decision definition", + className: "api-method get", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-incident-by-key-alpha", - label: "Get incident by key (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-decision-definition-xml", + label: "Get decision definition XML", className: "api-method get", }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/evaluate-decision", + label: "Evaluate decision", + className: "api-method post", + }, ], }, { type: "category", - label: "User task", + label: "Decision instance", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/complete-user-task", - label: "Complete user task", + id: "apis-tools/camunda-api-rest/specifications/find-decision-instances", + label: "Query decision instances", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/assign-user-task", - label: "Assign user task", + id: "apis-tools/camunda-api-rest/specifications/get-decision-instance", + label: "Get decision instance", + className: "api-method get", + }, + ], + }, + { + type: "category", + label: "Decision requirements", + items: [ + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/find-decision-requirements", + label: "Query decision requirements", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/return-user-task-by-a-user-task-key", - label: "Return user task by a user task key.", + id: "apis-tools/camunda-api-rest/specifications/get-decision-requirements", + label: "Get decision requirements", className: "api-method get", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/update-user-task", - label: "Update user task", - className: "api-method patch", + id: "apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml", + label: "Get decision requirements XML", + className: "api-method get", + }, + ], + }, + { + type: "category", + label: "Document", + items: [ + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/create-document", + label: "Upload document (alpha)", + className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/unassign-user-task", - label: "Unassign user task", + id: "apis-tools/camunda-api-rest/specifications/get-document", + label: "Download document (alpha)", + className: "api-method get", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/delete-document", + label: "Delete document (alpha)", className: "api-method delete", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-user-tasks-alpha", - label: "Query user tasks (alpha)", + id: "apis-tools/camunda-api-rest/specifications/create-document-link", + label: "Create document link (alpha)", className: "api-method post", }, ], }, { type: "category", - label: "User Task", + label: "Element instance", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/return-user-task-form", - label: "Return user task form", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/create-element-instance-variables", + label: "Update element instance variables", + className: "api-method post", }, ], }, { type: "category", - label: "Variable", + label: "Flow node instance", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-process-and-local-variables-alpha", - label: "Query process and local variables (alpha)", + id: "apis-tools/camunda-api-rest/specifications/find-flow-node-instances", + label: "Query flow node instances", className: "api-method post", }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/get-flow-node-instance", + label: "Get flow node instance", + className: "api-method get", + }, ], }, { type: "category", - label: "Clock", + label: "Group", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/pin-internal-clock-alpha", - label: "Pin internal clock (alpha)", - className: "api-method put", + id: "apis-tools/camunda-api-rest/specifications/create-group", + label: "Create group", + className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/reset-internal-clock-alpha", - label: "Reset internal clock (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-group", + label: "Get group", + className: "api-method get", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/update-group", + label: "Update group", + className: "api-method patch", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/delete-group", + label: "Delete group", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/add-user-to-group", + label: "Assign a user to a group", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/unassign-user-from-group", + label: "Unassign a user from a group", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/search-groups", + label: "Query groups", className: "api-method post", }, ], }, { type: "category", - label: "Process definition", + label: "Incident", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/search-process-definitions-alpha", - label: "Search process definitions (alpha)", + id: "apis-tools/camunda-api-rest/specifications/resolve-incident", + label: "Resolve incident", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-process-definition-by-key-alpha", - label: "Get process definition by key (alpha)", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/find-incidents", + label: "Query incidents", + className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-process-definition-xml-alpha", - label: "Get process definition XML (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-incident", + label: "Get incident", className: "api-method get", }, ], }, { type: "category", - label: "Process instance", + label: "Job", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/create-process-instance", - label: "Create process instance", + id: "apis-tools/camunda-api-rest/specifications/activate-jobs", + label: "Activate jobs", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-process-instance-alpha", - label: "Get process instance (alpha)", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/fail-job", + label: "Fail job", + className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-process-instances-alpha", - label: "Query process instances (alpha)", + id: "apis-tools/camunda-api-rest/specifications/report-job-error", + label: "Report error for job", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/cancel-process-instance", - label: "Cancel process instance", + id: "apis-tools/camunda-api-rest/specifications/complete-job", + label: "Complete job", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/migrate-process-instance", - label: "Migrate process instance", - className: "api-method post", + id: "apis-tools/camunda-api-rest/specifications/update-job", + label: "Update job", + className: "api-method patch", }, + ], + }, + { + type: "category", + label: "License", + items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/modify-process-instance", - label: "Modify process instance", - className: "api-method post", + id: "apis-tools/camunda-api-rest/specifications/get-license", + label: "Get license status", + className: "api-method get", }, ], }, { type: "category", - label: "Flow node Instance", + label: "Mapping rule", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-flow-node-instances-alpha", - label: "Query flow node instances (alpha)", + id: "apis-tools/camunda-api-rest/specifications/create-mapping-rule", + label: "Create mapping rule", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/delete-mapping-rule", + label: "Delete a mapping rule", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/find-mappings", + label: "Query mappings", className: "api-method post", }, ], }, { type: "category", - label: "Flow node instance", + label: "Message", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-flow-node-instance-by-key-alpha", - label: "Get flow node instance by key (alpha)", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/publish-message", + label: "Publish message", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/correlate-message", + label: "Correlate message", + className: "api-method post", }, ], }, { type: "category", - label: "Decision definition", + label: "Process definition", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-decision-definitions-alpha", - label: "Query decision definitions (alpha)", + id: "apis-tools/camunda-api-rest/specifications/find-process-definitions", + label: "Query process definitions", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-decision-definition-by-key-alpha", - label: "Get decision definition by key (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-process-definition", + label: "Get process definition", className: "api-method get", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-decision-definition-xml-alpha", - label: "Get decision definition XML (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-process-definition-xml", + label: "Get process definition XML", className: "api-method get", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/evaluate-decision", - label: "Evaluate decision", - className: "api-method post", + id: "apis-tools/camunda-api-rest/specifications/get-start-process-form", + label: "Get process start form", + className: "api-method get", }, ], }, { type: "category", - label: "Decision requirements", + label: "Process instance", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-decision-requirements-alpha", - label: "Query decision requirements (alpha)", + id: "apis-tools/camunda-api-rest/specifications/create-process-instance", + label: "Create process instance", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-decision-requirements-by-key-alpha", - label: "Get decision requirements by key (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-process-instance", + label: "Get process instance", className: "api-method get", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-decision-requirements-xml-alpha", - label: "Get decision requirements XML (alpha).", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/find-process-instances", + label: "Query process instances", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/cancel-process-instance", + label: "Cancel process instance", + className: "api-method post", }, - ], - }, - { - type: "category", - label: "Decision instance", - items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/query-decision-instances-alpha", - label: "Query decision instances (alpha)", + id: "apis-tools/camunda-api-rest/specifications/migrate-process-instance", + label: "Migrate process instance", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/get-decision-instance-by-key-alpha", - label: "Get decision instance by key (alpha)", - className: "api-method get", + id: "apis-tools/camunda-api-rest/specifications/modify-process-instance", + label: "Modify process instance", + className: "api-method post", }, ], }, { type: "category", - label: "Authorization", + label: "Resource", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/patch-authorization", - label: "Patch authorization", - className: "api-method patch", + id: "apis-tools/camunda-api-rest/specifications/create-deployment", + label: "Deploy resources", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/delete-resource", + label: "Delete resource", + className: "api-method post", }, ], }, { type: "category", - label: "User", + label: "Role", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/create-user", - label: "Create a user", + id: "apis-tools/camunda-api-rest/specifications/create-role", + label: "Create role", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/find-all-users", - label: "Query users (alpha)", + id: "apis-tools/camunda-api-rest/specifications/get-role", + label: "Get role", + className: "api-method get", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/update-role", + label: "Update role", + className: "api-method patch", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/delete-role", + label: "Delete role", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/search-roles", + label: "Query roles", className: "api-method post", }, ], }, { type: "category", - label: "Message", + label: "Signal", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/publish-a-message", - label: "Publish a message", - className: "api-method post", - }, - { - type: "doc", - id: "apis-tools/camunda-api-rest/specifications/correlate-a-message", - label: "Correlate a message", + id: "apis-tools/camunda-api-rest/specifications/broadcast-signal", + label: "Broadcast signal", className: "api-method post", }, ], }, { type: "category", - label: "Documents", + label: "Tenant", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/upload-document-alpha", - label: "Upload document (alpha)", + id: "apis-tools/camunda-api-rest/specifications/create-tenant", + label: "Create tenant", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/download-document-alpha", - label: "Download document (alpha)", + id: "apis-tools/camunda-api-rest/specifications/update-tenant", + label: "Update tenant", + className: "api-method patch", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/get-tenant", + label: "Get tenant", className: "api-method get", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/delete-document-alpha", - label: "Delete document (alpha)", + id: "apis-tools/camunda-api-rest/specifications/delete-tenant", + label: "Delete tenant", className: "api-method delete", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/create-document-link-alpha", - label: "Create document link (alpha)", + id: "apis-tools/camunda-api-rest/specifications/assign-user-to-tenant", + label: "Assign a user to a tenant", + className: "api-method put", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/remove-user-from-tenant", + label: "Remove a user from a tenant", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/assign-mapping-rule-to-tenant", + label: "Assign a mapping rule to a tenant", + className: "api-method put", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/remove-mapping-rule-from-tenant", + label: "Remove a mapping rule from a tenant", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/assign-group-to-tenant", + label: "Assign a group to a tenant", + className: "api-method put", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/remove-group-from-tenant", + label: "Remove a group from a tenant", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/search-tenants", + label: "Query tenants", className: "api-method post", }, ], }, { type: "category", - label: "Resource", + label: "User", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/deploy-resources", - label: "Deploy resources", + id: "apis-tools/camunda-api-rest/specifications/create-user", + label: "Create user", className: "api-method post", }, { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/delete-resource", - label: "Delete resource", + id: "apis-tools/camunda-api-rest/specifications/find-users", + label: "Query users", className: "api-method post", }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/delete-user", + label: "Delete user", + className: "api-method delete", + }, ], }, { type: "category", - label: "Element instance", + label: "User task", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/update-element-instance-variables", - label: "Update element instance variables", + id: "apis-tools/camunda-api-rest/specifications/complete-user-task", + label: "Complete user task", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/assign-user-task", + label: "Assign user task", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/get-user-task", + label: "Get user task", + className: "api-method get", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/update-user-task", + label: "Update user task", + className: "api-method patch", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/get-user-task-form", + label: "Get user task form", + className: "api-method get", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/unassign-user-task", + label: "Unassign user task", + className: "api-method delete", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/find-user-tasks", + label: "Query user tasks", + className: "api-method post", + }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/find-user-task-variables", + label: "Query user task variables", className: "api-method post", }, ], }, { type: "category", - label: "Signal", + label: "Variable", items: [ { type: "doc", - id: "apis-tools/camunda-api-rest/specifications/broadcast-signal", - label: "Broadcast signal", + id: "apis-tools/camunda-api-rest/specifications/find-variables", + label: "Query variables", className: "api-method post", }, + { + type: "doc", + id: "apis-tools/camunda-api-rest/specifications/get-variable", + label: "Get variable", + className: "api-method get", + }, ], }, ]; diff --git a/docs/apis-tools/camunda-api-rest/specifications/unassign-user-from-group.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/unassign-user-from-group.api.mdx new file mode 100644 index 00000000000..3c5afc6296f --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/unassign-user-from-group.api.mdx @@ -0,0 +1,60 @@ +--- +id: unassign-user-from-group +title: "Unassign a user from a group" +description: "Unassigns a user from a group." +sidebar_label: "Unassign a user from a group" +hide_title: true +hide_table_of_contents: true +api: eJztWE1z2zYQ/SsYnJIpQ8qOk7q8afyRuk0yHltuD44PILgUEYMAgw/LHA7/e2cB0lYsufV4etRBI4lc7MNi38MC21PHlpbm1/ST0b6lNwnVLRjmhFZnJc2pV8xasVRXFsyp0U00S2gJlhvRoh3N6dVoZQkj3oIhldENYWSJ1uk3RRPaMsMacGAQraeKNUBzGgz+hI4mVKCjlrmaJtTADy8MlDR3xsNTtEUN0TO5hS6lCbW8hobRvKeua9GtUA6WYGhCK20a5uKjjwd0GJIHbJzoa6BDgK9AvkHnttXKgsUR+7N9/HoGYMUsmRYfSmI952Bt5aXs4uq6aRVSOiT0YDb7F2dce1kSpR0pYM1r+k190QZICY4JaQkzQFqj70QJJREqQExTJoUuu5hKrpUD5RCPta0UPNAla40uJDS/fLcI3q+tzc+zmpPzaDniEl18B+4IsyQaFhH9+uL0iPx28OHXmze1c63Ns2y1WqWm4u+gFE6bVJtlZiqOH7R7m5JFDQZIwzoMlJWlQEwmMaoWjBNgiW2Bi0pw4nQIcJw2wQTG+MZUxmkhdR8GPyb6IeHWGaGW6/n2RmwIZE6uLs6IKEE5UXVCLTehw5iKeYk+WKG9ywvJ1C1m1wknt4I+RbG+aZjpiK62AAwJtY45b/+Tr+/3N3wjl35fLM5JdEG4LoFU2hBXCzsBYRCNUKLxDc0PZrOENuw+/vs4mw3oEzP+gkgUgftWMhWo9TQcoUjzyNsQmFDWMcX/r8xoI5biKW6K28eUCzqS+DhGNAxDUOH7Tb6falOIsgQV6ElwgwHriLBBkExKvYIy3elqp6udrp7V1cH26haPAdqMNVO4OtZFcQcKa3QooyizSntVJiSs61gUJwFOJTbwVtipqO70uNPjTo/b9fhh22lzrgiuskEegjHaEM25NwZKsqqFDO7xDDthj4Vwd6bcaW2ntee0NiS0AVdrvIuXIMFBuEu7muY0C5XKZv10ix4yLGw268er7YA3VDB306XbG0lz2kcRDXmW9bW2bsj7Vhs3ZHeYoTtmBCtk5CW+jmKbSCQ1ZzI83pZMfIG36ynII9Z4VTJySC5OLhfkE3OwYl1YXIT82fXh7HC21SuaPuNxfn5GYoSRimvbw+QWdb7VbTR+ieNwd7fAvRGuu8RhcXkKYAbM3GM2Higy4gXv+D8a0WT8cToR54+/FyH3QlU6DB85sDkRzAoYG2c+S/c2+XZ+FmTDddN4FfZOtYynIbYWGJfeOgwooVJwUDYwe+yGTGaf4xvyV0QkeykmJTJn2jKXwtW+SLluMh6HPXwXUhdZw4TKRgibHc2/XH09nr/7fHZ08vXy5N1eOkvdvQvBt9q6hqm1eUytpG2dpKeB949l4yUtqDFBDu5d1komFFImRNaPkrqODSlLE5qvtaaCqvDZ1DK6SUZpXNO+L5iFKyOHAR//8GA6ml/fPCopSK8UFn+XNK+YtE/7SuuBvLkYO1BvyUaja2sE40OmuiBf6fEfTegtdOsdNux8vXoW6z2vV0xiWrfhZkhoDawEE1Ylvp1zDq1bG/fsCQB1+LAdHp98Plmc0GH4Bz4JJLY= +sidebar_class_name: "delete api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Unassign a user from a group

+ + + +Unassigns a user from a group. + +## Request + +

Path Parameters

+ +The user was unassigned successfully from the group. + +
+ +The user could not be unassigned. +More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The group or user with the given key was not found, or the user is not assigned to this group. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/unassign-user-task.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/unassign-user-task.api.mdx index 5dcc35a72d6..42c6740b0bf 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/unassign-user-task.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/unassign-user-task.api.mdx @@ -5,7 +5,7 @@ description: "Removes the assignee of a task with the given key." sidebar_label: "Unassign user task" hide_title: true hide_table_of_contents: true -api: eJztWE1z2zYQ/SuYPSVTmpRTJ3V409hK69bJeGy5Pbg6gOBKREwCDABK1mj43zsLkJIsyXUmk6MOGooksG8f9j3iYwWOzyykD3Bv0TDH7SNMIsjRCiNrJ7WCFG6x0nO0zBXIuLVyphCZnjLu27OFdIV/N5NzVOwRlzFEUHPDK3RoKPoKFK8QUmgsmjG3j3/hEiKQFL3mroAIDH5rpMEcUmca3E1hXCAFJlRCavpkCcmKAisO6QrcsiYQqRzO0EAEU20q7sKjD2fQthMCsrVWFi31eDc4o8s+2BqBLbhljep458w2QqC106YslzG0EZwNBq+G2BshJrhS2rEMt2LH7LM2yHJ0XJaWcYOsNnouc8yZVD5AnzzLdL6M/1UQgdDKoXKUA6/rUgpOOSS10VmJ1S9fLSW02hql55kO2U1o2eEynX1F4Ri3LDTMAvrD7acL9vHs/W+TN4VztU2TZLFYxGYqTjCXTptYm1lipoJ+1O5tzMYFGmQVXxJPnueSMHlJrGo0TqJltkYhp1Iwpz3BLm1GpQz8uqKGtEhY686bkq9Lb52RarZd+cZI2FXTkN3fXjGZo3JyupRqtg/t+0x5U1IMnunGpVnJ1SNV3ElXHgTdRbFNVXGzVu1zgDYC67hr7KvK/fXdXmzS1x/j8Q0LIZjQObKpNswV0vZARKKSSlZNBenZYBBBxZ/C3YfBoKWYVPHvYKIYPtUlV15au3SkYtVGt56YVNZxJX5WZbSRM7mLG0O7qQV0Ir4MjNq29c583dz7ziS/kzWnulF5fDTY0WBHg71osI8/YDBp+9lsYbSa+RFGJhpjULlyeZwFjyY9mvTnmfT9ofXpUDEaZUM6RGO0YVp4B+ZsUcjSh6dlbo9Nq3O07ui1o9eOXnvJa20EFbpC55BCjiU69PtgV0AKCU2JJzQl2mS1tQtuk35HTTtZNPN+w9yYElJYBRO1aZKsCm1dm65qbVybzKlCc24kz8qgS3odzNaLqNSCl/7xoWLSC9qV9yQveNWonLNzdju6G7PfucMFD1tcgnwe+nxwPjgYlZq+EHF4c8UCwyDFrc9DH5Z8fjBsaPw9gf0e36JojHTLO+oWhidDbtAMG6rGWiIdno9O96ERRN2fT71w/vxn7Gsv1VT77p0G9hOhqqCxIfNBfLqvt5srbxuhq6pR/tupZmGZxLeIibKxjghFUEqBynpld6cofbPr8Ib9HRDZaUxFCcrpP5kz6Yomi4WuEhG6ra9ZqbOk4lIlHYRNLoaf779cDk+ury5GX+5GJ6fxIHZPzpOvtXUVV1t53HdHF5v13i7d1Way+LFjpK5QDp9cUpdcKpKOZ7jqrPUAG2tBBOnzI6a1uyZR55AHWK0ybvHelG1Lj781aJaQPkw2hvIOzKWl/zmkU17a3SOpbWZvbrvDq7fs/w6qDnLpHnK19IYuG7qDCB5xuXNe1k7aCArkORqfX2gxFAJrt9X3xSmZjLH+Pl2OrkfjEbTtf/wi5PA= +api: eJztWMFy2zYQ/RXMnpIpTcqpkzq8aWyldetkPLbcHlwdQHAlIiYBBgAlazT8984CpCRLcp3J5KiDhiK52LeLfY/AYgWOzyykD3Bv0TDH7SNMItA1Gu6kVlc5pNAobq2cKTIZk0UEOVphZE0mkMItVnqOlrkCWTBFZHrKuHfIFtIV/t1MzlGxR1zGEEHNDa/QoSH4FSheIWF1GH/hEiKQ5L3mroAIDH5rpMEcUmca3A1hXCA5JlRCavpsCMmKAisO6QrcsiYQqRzO0EAEU20q7sKjD2fQthMCsrVWFi2NeDc4o8s+2BqBLbhl/RRhzmwjBFo7bcpyGUMbwdlg8KqLvRligiulHctwy3fMPmuDLEfHZWkZN8hqo+cyx5xJ5R30wbNM58v4XwURCK0cKkcx8LoupfCVTWqjsxKrX75aCmi1NUvPIx2ym2DZ4TKdfUXhGLcsGGYB/eH20wX7ePb+t8mbwrnapkmyWCxiMxUnmEunTazNLDFTQT+yexuzcYEGWcWXlCfPc0mYvKSsajROomW2RiGnUjCnfYJd2IxKGfLrihrCImKtB29Kvi69dUaq2XblGyP3CD1k97dXTOaonJwupZrtQ/sxU96U5INnunFpVnL1SBV30pUHQXdRbFNV3KxZ+xygjcA67hr7KnN/fbfnm/j1x3h8w4ILJnSObKoNc4W0PRAlUUklq6aC9GwwiKDiT+Huw2DQkk+q+Hdkohg+1SVXnlq76UjFqg1vfWJSWceV+FmV0UbO5C5uDO2mFtCR+DJk1LatV+br4t5XJumdpDnVjcrjo8COAjsK7EWBffwBgUnbr2YLo9XMzzAy0RiDypXL4yp4FOlRpD9PpO8P7U+HitEsG+IhGqMN08IrMGeLQpbePW1ze2zanaN1R60dtXbU2ktaayOo0BWamuocS3To+2BXQAoJLYkntCTaZLXVBbdJ31FTJ4tm3jfMjSkhhVUQUZsmyarQ1rXpqtbGtcmcKjTnRvKsDLyk10FsPYlKLXjpHx8qJr2grrxP8oJXjco5O2e3o7sx+507XPDQ4hLkc9fng/PBQa9k+oLH4c0VCxkGKm59Hnq3pPODboPx9zj2Pb5F0Rjplnc0LExPhtygGTZUjTVFOjzvne6DEUTdn089cf78Z+xrL9VU++EdB/YDoaqgsSHyQXy6z7ebKy8boauqUf7bqWZhm8S3EhNlYx0lFEEpBSrrmd2dovRm1+EN+zsgstOYihKY038yZ9IVTRYLXSUiDFtfs1JnScWlSjoIm1wMP99/uRyeXF9djL7cjU5O40HsnpxPvtbWVVxtxXHfHV1s9nu76a42i8WPHSN1hXL45JK65FIRdXyGq05aD7CRFkSQPj9iWqtrEnUKeYDVKuMW703ZtvT4W4NmCenDZCMor8BcWvqfQzrlpd09ktrO7M1td3j1lv3fQdXBXLqHXC29oMuG7iCCR1zunJe1kzaCAnmOxscXLIZCYO22xr64JJMw1t+ny9H1aDyCtv0PUDTw9g== sidebar_class_name: "delete api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-a-job.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-a-job.api.mdx deleted file mode 100644 index a2e48664100..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/update-a-job.api.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: update-a-job -title: "Update a job" -description: "Update a job with the given key." -sidebar_label: "Update a job" -hide_title: true -hide_table_of_contents: true -api: eJztWNtu20YQ/ZXFPiUtTSmpkybsk+o4jd0kNXxpH2wDWXKH0jp7YfYiWRAI9Df6e/2SYnZJSbbkxmjzqABGRO7cZ86QPAvq2djR4pIem5JeZ5SDq6xovDCaFvSi4cwDYeTGlGQm/IT4CZCxmIImn2Ge04w2zDIFHixaWVDNFNCC3pjyV5jTjAo00zA/oRm18CUIC5wW3ga47+t8AmiTmDo6QY/ekBAjQEeumoBitFhQP2/Qh9AexmBpRmtjFfPp1st92rbXyRk4/7Phc9S577sy2oP2eMSaRoqKYRiDG4exLDadmfIGKo/5WtOA9QIcnlYTpsfgIBq6m8/x2W8fSVJLpUuyPGbGvLeiDB7IlMkALr/SVxorUBspzUzo8UrEkYppUgJh/CY4D7zvhHAENG+M0D4jjHOBnplc07zSMyEl6oqxNhZ4gX6+I58seCvAfSJ7BL1qmBGmTNAey98dktrYvhU/ERWcj0GQxjjhxRSIDqoEm0eDXigwwfcGebCxoH030UEnQoQmymXEeWY9Jlpbo6JQFawF7YkyCrSPJTmxZip4LIfuRsM4WK9NrAUjOkiZakmMJUYJH20LTxoLDuwUXPTRgHUiFnFp4+8//3JJddkFbFHFtDYx5cqoRgLqGEtqJuQDLQgOUh6dfDTTqcTfvagjQjsPjOdXmmZfGbGuG18d/B+e022Q+l+tpRnFwrJSQgJOm9GujY8B4rZ4vsVkbIYV4/J4iYvsYIlKPFhB/3INr9d3NNKiO007g7Zt0nON0S6V/vlwfxPi/ajMmOsWFScuVBU4Vwcp5zltM7o/HG7XbOJsAyeceUaEIzhuUyZFNxYPrKjGmlKC+n5zVd11MSInSZJw8DiB3S5ijiTBEjhW/PL07QF5vf/ix+snE+8bVwwGs9kst3W1B1x4Y3NjxwNbV/iHck9zhLgFotg8raXl5llNLnENVKIWFS5xn7KNweDMPGbs0+lyxpy3Qo/XRyxYsTFgI3JxekQEB+1FPccR2nAddWoWJNpgpQm+KCXTn+lqhDad3vfiglLMLp9Vdx20GXWe+fCfIfvu/PyEJBOkMhw6tArXO8IklNBCBUWL/eEwo4rdpquXw2GLNrHjj8hEE7htJNN3INmng3A0Frr5iYnh3mK6+ladMVaMxX2/OV2HczfEb1JGCZn7/wrG/jUlvYP0wKpN0DzfwWoHqx2sHoTV60fAavn2j8gSOt6bWaPHsbbLZ7Wc5+TDKk1HmF175HV6/SOWlIbPd0+9HTx38HwYni+2vUiONMEqW5xDsBa/faqIQE5mEyGjeXwf7X13n8U7rO2wtsPaQ1hrM6rATwxP3FU1iTyXn9CCDm5M6QaL9G7ZIi2F3EJHfgUraUEXCS9tMRgsJsb5tlg0xvp2MMVmTJkV+OEae4fHCVf9vEhTMRlvb+sbHiDD1udzwFTQnJFX5PTw7Jz8wjzMWPrsRJd3Tb8avhputYqiD1gcnRyRlGGaurVN0JtFSG81m4QfYzgSdg6qYIWfn6FaKk8JzIIdBaz8cho6f9E6XichmnU/3vYzcvzHeWwzbrHTFRl4eMuQndkg75Y0y3CN4hi2cSJrE/1387KZCbYVeaWY+jB/tjmbJ0cRYpVRKui4Z/W4p65W9iqJ/F6kXaSoQLsYZkep9mLv0wn5PXkkz3Lsahq9fr2OhZ+EMq+MGlRJbfl/KU05UEzoQefCDQ5GHy4+vhntvT86OPx4drj3LB/m/jaRJ41xXjG9Fsc6J3w/0cXqkfIY7rjrqIdbP2gkExpnLGay6PB2iUyyoxktOkb5OutAc0kXi5I5uLCybfH2lwB2TovL6xXGIii5cPib06Jm0t2nndcjfnLaMUVPydfI6K2RdzeZnkecy4BXNKOfYb6ixNvrNqMTYBxsDC8dHqQg9s7RxEp5g5pus15jVFXQ+Adk7zzBEVzLdXYyOj94h2DpeHFlOCpbNkOCns1oQa/oFUZtYoUSBYn3F1QyPQ5sHPn9aBj//QNn9mWy -sidebar_class_name: "patch api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Update a job

- - - -Update a job with the given key. - -## Request - -

Path Parameters

Body

required
    changeset objectrequired
    - -JSON object with changed job attribute values. - -The following attributes can be adjusted with this endpoint, additional attributes -will be ignored: - -- `retries` - The new amount of retries for the job; must be a positive number. -- `timeout` - The duration of the new timeout in ms, starting from the current moment. - -Providing any of those attributes with a null value or omitting it preserves the persisted attribute’s value. - -The job cannot be completed or failed with this endpoint, use the complete job or fail job endpoints instead. - -
- -The job was updated successfully. - -
- -The provided data is not valid. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The job with the jobKey is not found. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -The job with the given key is in the wrong state currently. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-authorization.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-authorization.api.mdx new file mode 100644 index 00000000000..d62e3a8b37b --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/update-authorization.api.mdx @@ -0,0 +1,73 @@ +--- +id: update-authorization +title: "Update authorization" +description: "Manage the permissions assigned to the authorization." +sidebar_label: "Update authorization" +hide_title: true +hide_table_of_contents: true +api: eJztWV+T2jYQ/yoaPbVTB0h6bRPeKPgSt3dAjUmnvdzcCFtgNbLkSPJxlPF376xkgw2+JO30kZu5wZJW+/+3srV7bMhG4+EdHhUmlYr9TQyTAt97WOZU2UGQ4CEu8oQY2ibycEJ1rFhuR0N8SwTZUGRSinKqMqY1k0IjojXbCJogI+0aaTLpYQ/nRJGMGqpAkT0WJKN4iOVWUPUr3WEPM+CeE5NiDyv6qWCKJnhoVEFPVYhSij7SHZJrK8ryqAdncnWc0ozg4R6bXQ4imTB0QxX28FqqjBg39eMVLst7J5lq87NMdrAnlsJQYeCR5DlnseXb/0uDIvtz5nL1F40NmKvAtYZRbffGTvX9iSWBSIAl1YitW+7UqSx4glYUkSShCZIKKZrJR5qATZUwbRQTG+xhKorMhncywR4O/dvZex/fl2CNloWKaWQ3nEoHPwIr8F1NCfEjSdJ30lo6GdlfK5k1FDhYSzifrW1c/y8JB5OW0btZGPw5ioLZFHv4djSfB9O3D+Hyxoehv1iM3sLTz6No/A57eDSf3wTjmnzxxyLyb7GHI386mkbYwxN/fjP749a3g3k4G/uLxcPEvw6mQbVn4o+DRTCbPoT+b8sg9IH2OZLW7NtwtpxjDy8XfghxmN34gDHDDAdvhY1g+GBeCSFq2N/II6IU2Z1hL0pP/dXwJHiNGZrpL6fjkUd3YixyGrM1o9pCqg5gQ/K/yYGvYnYI9xjigT08Dv1R5B8eHupIBdNFNJqOGyuHWDSWQn80qX66dtp5iNJDNFr8Wk90MVrOJ04N99DFrFppspv4N77d5R7qXceJSTg5Dq5n4e0Z7VFCI4Pmrbgdc6iGVpB8OYdGiDNtWoAMJvqkniNFuS1LRkJ0CsE+FTRwyeUq8mmmVbWoLMsudSfRrL3UOmPmxMRp6AqvI2vXf2dhLoV26ftq8Kq7mLWKP9oSjXJgTROkizimWq8Lzne9DwKXHr4aDL6GS2wLsZAGinHFrvdB3EpFUUINYVwjoijKlXxkUKqZsM6sNUYrmViR3nOHSa7kitPsu/ND5TRyc0dZyUUOeoho5AhXTvpdeD1Gb65++On+m9SYXA/7/e1221Pr+AVNmJGqJ9Wmr9Yx/APdtz0UpVRRlJFddeQwkEk4OtYNpB2O4/qEr9S2iHb2fb7omKrUnB5eh1O4UKwjW5dhgFhChWHrHRObc9F2z5oUHHiQlSzMcMWJ+IiP6XYu9FSKLrKMqMMrRVtA6WFtiCn0F98ivn/VWbPfRdEcORYolglFa6mQSZmuBYERGRMsgyp4NRh4OCNPbvTjYFACT4j4V1giEH3KOREueU/MYQJlx7y1hjGhDRHx/xUZqdiGncrt4VZVcJMTZ1Fp8X41eNmNxep1DHESf9TokXCWWHyC1LgCqKJWCcLtSXIB2QVkF5B1g+z783y/lmrFkoQKm54HvDFtjzzCudy6b44Lri64uuCqG1dX3YeXu5iA11DA0loWIrm8CF6wdMHS57D05vMvgu7K43gfdnYfIjq+Ahl8xok1Z7H5T59upYd/6PpaHAkE4VcAEKqUVEjGcaEUTdA2Zdxyhs/O2imVEZcicCkClyLwXBEoPZxRk8rEtQPi1DYPTIqHuN+Ctu7v6/ZBCRf9VD3W3YVCcTzEe4edctjv71OpTTnc51KZsv8IgXkkipEVd+kIyw5jde5wGRNup7tiCAvQwqhtG5OsEAlBr1HoLyL0lhi6JTvrUxDZZv168HrQfbcrlXmG42geIGehy8BGVajZArw72Trir2FsWyCaxoViZreAbc49K0oUVXBp18iMSp7lDmNHhL3q4brOl19+j2zIoaKFx/aK/0Sy3OGxbo9UDYzWlfjdvn29eVdn5L29+mRiLa1OVT6dWwehpko7AYPey/PcnQcWgrHMskLYOiw2aMtMikjDWzEvtAEveZizmAptVa/6WDXZjVtB751E9LIHkXbpWJffDTNpserFMuvHbtvhd8Xlqp8RJvqVCN0fj26X08noxU0w9qcL/8XL3qBnnoz1aC61yYho6LG0Hbz2EXhqcKOr9Z+7eVUKGPpk+jknzJ6R1sx9BdY73AYr9vDw0O279yrE3eH9fkU0XSpeljD9qaBqh4d390eAup4C0/Cc4OGacH3aEmya9E1YXR5/i/5Vo7DTpPoqXexsxeAFjLCHP9Jds3tpUzGlJKHKauuWx06nF7bJctx+1kgsvXrHKI5pbp6hbb0ZAFAPZXJe9b5WVdcykwlsVmQLvVSyxUP8AX8AvWVu6k6Tnd9jTsSmIBugd4zh7x8KYlaz +sidebar_class_name: "patch api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Update authorization

+ + + +Manage the permissions assigned to the authorization. + +## Request + +

Path Parameters

Body

required
    permissions object[]
    + +The permissions to add/remove. + +
  • Array [
  • ]
+ +The authorization was patched successfully. + +
+ +The authorization could not be patched. +More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The owner was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request to add or remove permissions to an authorization was in conflict. +More details are provided in the response body. + +
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-element-instance-variables.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-element-instance-variables.api.mdx deleted file mode 100644 index 3fb51b8f195..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/update-element-instance-variables.api.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: update-element-instance-variables -title: "Update element instance variables" -description: "Updates all the variables of a particular scope (for example, process instance, flow element instance) with the given variable data." -sidebar_label: "Update element instance variables" -hide_title: true -hide_table_of_contents: true -api: eJztWN1u4zYWfpUD3iTBKraTzbSzBraAm6a7aaczQeLpXsQBeiwdWZyhSJWk4hiGgL7Gvt4+yeKQkvwbtBe92p0AQSLy/P98h+RaeFw4MX4UN4pK0h6kdh51SuIpERm51MrKS6PFWHysMvTkAJUCXxA8o5U4V+TA5IBQofUyrRVacKmpCE5zY4FesKwUJVBZk5JzvfwEcmWWQHtqz2ApfRHkL+Qz6V4LZOhxMNMPFaUyXwWKfWaQOqz/0m7ctus/0uoXtg9L8mQHMy0S0X+y92uhsSQxFoeMIhGSva/QFyIRln6tpaVMjL2taT9E04LgM604IEft8wbqEMW9AOaGrZoW0kGKGuZxfz9kQfQpOjBzj1JTBlltpV5sCFJLyKacJWAsYAzhTLd2JODqtAB0gODIPku2CN1nOHVErwfOxKB+MnMoyTlc0FkMoUsLKlGM18KvKo6e1J4WZEUicmNL9HHpqyvRNE8xdOT8tyZbMc9+JFOjPWnPW1hVSqbBk+Enx5FdHyoz80+Ues6kNRVZL8nxbh9U/tjNzg8PH95DZANLlSVH2nP8dpPhDTjyXS21EfnPb/92sa4HIhGYZZKForrbUh792DWvSYQyKapDa27zoMYbCHx7RqAlKMkuKAPnrUy9WoHUTFwQBIldl6EDF3pCUgbz1at55KR98AXZpXRH1XEccYGeslinFbWd7AB1FoxFH/hM7cmWxnkwmgYzPdPvKEYoNdrJjGwgy41SZskBblFgzKTTgiwFhX5pOgUnFydBycnlCfd48IzXpIudgJb7KHps8kAG3GyU55R6+byHRszTmW5Z7QX8/RtYw0zkxswEjOESmpm+7FbnaMPqBa/O9ER3bdrWbASlw6ByL51cniRb2reVvIEmCW61CSM/022+QRE+U1tSwdVapwVqTjhzYPapdr7fvzzhnOyYmvSK3kATkvDtCjLKsVY+iQb3apk5R8V53yhcSqUYanqLg8EzHZLdq92m2gpTRIC21ufGKEJeaPWLcdDWJIK7IzTyPeVkiQfLQSdMwHabAeLSwjjSXS3XLpQT+t4UnjRh5igFllJjMweWXK1CM+fWlOAZSnvdA/ipdp5Zv4ERyJxL/VlmlO148Sp6JaKUWpZ1KcYXTbM9Ax634OYpEV56xZIeyP/crt/HAhJNEzldZbSL4HQ5ujqMxXSnLZfcKrEUswFjydVodJyn8yiMSW4bbTw8o5IZ49Ur2FpZM1dU/uUQY/cTdBcpISOPUnUgig4i4ZwyxsvH+++v4W9Xb75+Oi28r9x4OFwulwObp+eUSW/swNjF0OYp/zLdWehihjpccXo2uAobVO/gLYUW/lqzgfO2k8FXRkLc7QcH46lebKe5tlLsj/IJfLy/BZnxiMhX3ZTYUb1d7wLnpvbjuUL9mRPVlsKh0n0tri5LtP2RYVdBkwjn0dfud4fsXy8PZHNZ/HM6vYMoAlKTMSbb2BytosF2dV+NRoko8SV+fTUaNSyTM/4HPNFAL5VCHUpr3x2poTSW2voJjvUHzT8nM8bKhdzXOxDNJheiLeLvokexI98ca6iJ5llLluuQrDUWTJrW1lIGy0Kq/mTW6W6nxJdO+9JpXzrteKc1iSjJFybjq5RxoXT4RjUWw/ZYdd6Z6Ybrw5NWM9xM2kTw7aW7udVWibFYx45qxsPhujDON+N1Zaxvhs+crp1LAW/HzusqKpyTimjVYWZ5g6+HncfXWNY6Q3gL9zcPU/gHelriKkSaVe6Kfjt6OzoqlUlfkTi5u4XoYazLLazoxHLTHxUbif+I4HAnc5TWVvrVA7PF8MwJLdlJzcnp66XVF6SHE18gEkn7z/ddFf3wr2koBMa5+8197yae//fvZ5vLUTycHj0tjkIF5yZY09bXoV+cZLIuBmI0uDis5bvb0JKpKctaB1zWi3hMxq04pap2/EYgEqFkStoFo9vXgY7sXdyBn6NGuBhwjmMhdnC8kL6o54PUlMM0svV/58rMhyVKPWxVuOH15KeP77+bnL+7vb55/3BzfjEYDfyLD7HkdilRb9kRX2IOnxe2W2TH+/VmLv2PPeO09enpxQ8rhVJzx4RMrFuAeRQHACMSMT760rNzmo848SjW6zk6+mhV0/DyrzXZlRg/Pm3IAw5l0vH/WV/Mr2bg9L69QJzB/9eD0dFktYuoYwJUzV8iEZ9pdfxBrnlqElEQZmRD4CPhdQzv+ZTFbQQdPCU1SccxSVOq/Cu0Owc2Rsp+et19eJgy8LXPWKXJmNfikl8HcSnGYiZm7IAJqQ+YGtbXQqFe1Lhg+iiXf/4LCmd7LA== -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Update element instance variables

- - - -Updates all the variables of a particular scope (for example, process instance, flow element instance) with the given variable data. -Specify the element instance in the `elementInstanceKey` parameter. - -## Request - -

Path Parameters

Body

required
    variables objectrequired
    - -JSON object representing the variables to set in the element’s scope. - -
    { \"foo\" : 2 }\n2 => { \"bar\" : 1 }\n\nAn update request with elementInstanceKey as '2', variables { \"foo\" : 5 }, and local set\nto true leaves scope '1' unchanged and adjusts scope '2' to { \"bar\" : 1, \"foo\" 5 }.\n\nBy default, with local set to false, scope '1' will be { \"foo\": 5 }\nand scope '2' will be { \"bar\" : 1 }.\n","type":"boolean","default":false}}>= 1`"} schema={{"description":"A reference key chosen by the user that will be part of all records resulting from this operation. Must be > 0 if provided.\n","type":"integer","format":"int64","minimum":1}}>
- -The variables were updated. - -
- -The provided data is not valid. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
- -An internal error occurred while processing the request. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-group.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-group.api.mdx new file mode 100644 index 00000000000..d33a746bea8 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/update-group.api.mdx @@ -0,0 +1,60 @@ +--- +id: update-group +title: "Update group" +description: "Update a group with the given key." +sidebar_label: "Update group" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/isYnJopLcqpk6a8qYqTuE1TjyO3B0eHFbgiEYMAA4CWNRz+984C1FtOfOhRntFY4mLf3weA23IPhePZHX9vTVPzacJNjRa8NPoq5xlv6hw8RmHCc3TCypqkPOO3QcaAFSRmC+lL5ktkhXxAze5xOeAJr8FChR4teWm5hgp5xoPGn7jkCZdkqgZf8oRb/NZIiznPvG1w39+kRLLKzDy6CV69YTFGcuZEiRXwrOV+WZMfqT0WaHnC58ZW4OOj1xe866bRHTr/u8mXpLPvXRjtUXsSQV0rKUJZ0q+OomkPnZnZVxSecrZURC/RkVSUoAt0GAwdZuTQU0ZxVd5nBd5bOWs8OkoLlPp7Hsq3qz56jvL3g4v9WKfgvJW6OOg0xRmrnLNculrBkpHmTisGvOsS7qVXZClAZrxOvZuScFPhu62yTPe0Iq5uYnN410VNVxvtYtAvhxfHa9kDEdw6WtcIgc7NG6WWA94l/GI4PK5bW/Mgc0oQPDDpmDaePYCS+eCL5k+iobZmprD6+RAV+726jitZjh6kYrEhDByLC2eYM6nZ3c27Mfvt4tWv059K72uXpelisRjYuTjDXHpjB8YWqZ0L+tC6FwM2KdEiq2DJZsggzyX5BMU2rWauRiHnUhBffMw2BEN9j/l9HydReoiTNa0aKw9gM2K3N1dM5qi9nC+lLg5dB505NIpswMw0Ppsp0Pd8A6UfgXPEXFNVYNcbw66DLuHOg2/cD7eFX14eBf6HyeSaRRNMmBzZ3FjmS+lWjiiJSmpZNRXPLobDhFfwGH+9Hg47skkdf0YmmuFjrUAHaO2nIzWrjMUePyExqZ0HLf6vzhgrC7nvd5fWPYjfxowiNy+G58cp1e+vTIG4d5FLDBpfktdIHyYshiBAhc3qRLITyU4kO06y759568tXf7NanWBz0+j8RK0TtU7Ueopar45dCUeaUZUt4RCtNZYZIRprMWeLUqpgnm6WK9/9WXe6K564duLaU1zrEl6hL00eX/lFGQYEvuQZT8PB5dJ2dYB19D6P9mE1OWis4hlvI2e6LE3b0jjfZW1trO/SB2rIA1gJMxVhSOLIrRVmlBGgwuNjvSPB9kvtGKpG58DesJvLzxP2HjwuIL5Ekstd02+Gb4ZHrdLSJyyOrq9YzDAib2s3WJklWh81Gxc/x3CYdDgUjZV++ZnUYnlmCBbtqKHqrxHR+wvW6XdcxJP+y7sVTv74dxJaTTvZzWaKcvkIVR15uDP16Kc+Pd66gMa5CX57rBxmQO1E62LKw8H5IS6vrwK9hKmqRoc9VhfxIgRbFRGqcZ4qkXAlBWqHWxGtln2MEvZP9MjOB9TNCLnV1lpIXzazgTBVKqLa+v9MmVlagdRp78Kl49Fft5/ejs4+Xo0vP32+PDsfDAf+0Yfka+N8BXorjn6UVhybs7Wb4+R5I7e+lx4ffVorkJrQFXJpe7bdxQGc4wnP1qO4adJT5o637Qwc3lrVdfT4W4N2ybO76YZhcRolHX3PeTYH5fbnddtx/3TTT35esB9P8Y7G3z8EvQw8Vw394gm/x+X2PLGbdgkvEXK0IcQoHsdAziZkZKN+MNXrkpXGSAis/RNrd85xotd6U7seTcYfiC79SLEyOSlbWNB0ExY841/4F4rbhCoFJobnLVegiwYKWh8N099/P6100Q== +sidebar_class_name: "patch api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Update group

+ + + +Update a group with the given key. + +## Request + +

Path Parameters

Body

required
    changeset object
    + +A set of changed group attributes. + +
+ +The group was updated successfully. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The group with the groupKey is not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-job.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-job.api.mdx new file mode 100644 index 00000000000..cf3600b8052 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/update-job.api.mdx @@ -0,0 +1,70 @@ +--- +id: update-job +title: "Update job" +description: "Update a job with the given key." +sidebar_label: "Update job" +hide_title: true +hide_table_of_contents: true +api: eJztWNty2zYQ/ZUdPCUtTSmpkybsk+okjdMk9fjSPtieCUisJDggwOAiWaPhTH+jv9cv6SxASrJlN542j8qMJyKB3cXunrMkz5J5PnGsOGfvTMkuM2YatNxLow8FK1hoBPdISxkT6CorG1pjBTuLK8DhypQwl34KfoowkTPU8BkXOctYwy2v0aMl/0umeY2sYFem/BUXLGOS3DTcT1nGLH4J0qJghbcBb8c6nSL5BDOOQSiiN5DORoFcNcWas2LJ/KKhGFJ7nKBlGRsbW3Ofbj3fZ217mYKh8z8bsSCb27Eroz1qT0u8aZSsYjkGV47OstwOZsorrDzla6l4XqKj1WrK9QQdRkc383l38ttHSGapdGmviJlx760sg0eYcRXQ5Rf6QlMFxkYpM5d6st7ioOIaSgQuroLzKPpOSAeoRWOk9hlwISRF5mrD8kLPpVJkKyfaWBQFxfkOPln0VqL7BHtAUTXOgdcmaE/l7xZhbGzfip+gDs7HQ0BjnPRyhqBDXaLNo0MvazTB9w5FSPjqu0kBui0gNdQuA+e59ZTo2Jo6bqqCtag91KZG7WNJjqyZSRHLoTtoGIebtYm14KCDUqmWYCyYWvroW3poLDq0M3QxRoPWyVjElY+///zLJdNVF6hFFdfaxJQrUzcKycZYGHOp7mlBcJjy6PZHN51J/N1vdSC188hFfqFZ9hWIdd34KvB/eLpF3//dWpYxKiwvFSbitBnr2vgQIt51nm+BjO1jxXN5uqQRd7BiJS2sqX++wdfLGxZp0B2nmcHaNtm5xmiXSv90uL9N8R4qc+66QSXAhapC58ZBqUXO2oztD4d3WzYR2yhAcM9BOiC4zbiSHSzuGVGNNaXC+vvtUXUzxAiO0k4Q6AmB3SziDtLGEgVV/Pz4zQG83H/24+WjqfeNKwaD+Xye23G1h0J6Y3NjJwM7ruiP9j3OieIWoeaLNJZWk2eNXHANVnIsKxriPmUbD0OYeQjs0+oKY85bqSebEAtWbgFsBGfHhyAFai/HC4LQVuhoM+ZBkQ9emuCLUnH9ma0htB30dhQX6prb1bPqZoA2Y85zH/4zZd+enh5BcgGVEdixVbo+ECVRSy3rULNifzjMWM2v09Xz4bAln9TxB2SiAa8bxfUNSvbpEB2NxQ4/MTGaW1xX36ozxsqJvB03Z5t07kD8KmWUmLn/r2TsX1PSO0hPrLEJWuQ7Wu1otaPVvbR6+QBard7+iVlSx3tza/Qk1nb1rFaLHD6s03TA7cYjr7PrH7FQGrHYPfV29NzR8356PrvrRXKkgapsCYdoLX37VJGBAuZTqaJ7eh/tY3efxTuu7bi249p9XGszVqOfGpG0q2oadS4/ZQUbXJnSDZbp3bIlWYq0hU78Claxgi0TX9piMFhOjfNtsWyM9e1gRs2YcSvpwzX2jpYTr3q8KFNxFW/f1TdaIIWtz+eA10ELDi/g+PXJKfzCPc55+uykkDddvxi+GN7plbbe43F0dAgpw4S6jUnQuyVK3+k2bX6I4yjYOayClX5xQmapPCVyi3YUqPIrNHTxone6TptY1v1402Pk3R+nsc00xY7XYuDra07qzJZ4t5JZhhsSx7CNiBybGL/Dy3Ym1FbSlWLqw/zJNjaPDiPFKlPXQcc5qye9dLX2VynS96LsomSF2sVjdpJqv+19WoHfU0R4klNXE/T68TqRfhrKvDL1oEpmq/9LZcpBzaUedCHc4GD04ezjq9He+8OD1x9PXu89yYe5v07iSWOcr7neOEenCV9ty8XL9QPlIcpx10+P137QKC41ISzmsezYdk46smMZKzo9+TLrKHPOlsuSOzyzqm3p9peAdsGK88s1wyIlhXT0W7BizJW7LTpvnvjRcacTPYavSdF3nry7yfUislwFumIZ+4yLtSDeXrYZmyIXaOPx0uJBOsTeKblYG28J023WW4yqCht/z94bz2+i1mqYHY1OD94SVTpVvDaCjC2fkzzP56xgF+yCTm1ihZIASfeXTHE9CXwS1f3omP79A6Cibjs= +sidebar_class_name: "patch api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Update job

+ + + +Update a job with the given key. + +## Request + +

Path Parameters

Body

required
    changeset objectrequired
    + +JSON object with changed job attribute values. + +The following attributes can be adjusted with this endpoint, additional attributes +will be ignored: + +- `retries` - The new amount of retries for the job; must be a positive number. +- `timeout` - The duration of the new timeout in ms, starting from the current moment. + +Providing any of those attributes with a null value or omitting it preserves the persisted attribute’s value. + +The job cannot be completed or failed with this endpoint, use the complete job or fail job endpoints instead. + +
+ +The job was updated successfully. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The job with the jobKey is not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The job with the given key is in the wrong state currently. More details are provided in the response body. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-role.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-role.api.mdx new file mode 100644 index 00000000000..d3c4375064d --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/update-role.api.mdx @@ -0,0 +1,60 @@ +--- +id: update-role +title: "Update role" +description: "Update a role with the given key." +sidebar_label: "Update role" +hide_title: true +hide_table_of_contents: true +api: eJztWEtz2zYQ/isYnJIpLcqpk6a8qYrTuE1TjyK3B0eHJbgSEYMAA4CWNRz+984C1MuSEx96lGc0lrjY9/cB4Lbcw8Lx7JZPjEI+S7ip0YKXRl8VPONNXYDHIEt4gU5YWZOQZ/wmiBgwaxSypfQl8yWyhbxHze5wNeAJr8FChR4tuWi5hgp5xknhT1zxhEsyVIMvecItfmukxYJn3jb42Nu0RDLKzDx4CT69YTE+cuVEiRXwrOV+VZMXqT0u0PKEz42twMdHby54182iN3T+N1OsSOexc2G0R+1JBHWtpAglSb86CqY9dGbyryg8ZWypgF6iI6koQS/QYTB0mJBDTwnFVUVMCry3Mm88OsoKlPp7Hmq3rz16hu73Q4u92CTgvJV6cdBkijLWuGCFdLWCFSPN3T4MeNcl3EuvyBBhZbxJu5uRbFvd252SzPaVIp4msS+866Kiq412MeJXw4vjZYwABLeJ1DVCoHPzRqnVgHcJvxgOj6vW1tzLgpIDD0w6po1n96BkMfii+ZM4qK3JFVY/HeLhcZuu40pWoAepWGwGA8fiwhwLJjW7nbwfs18vXv8ye1F6X7ssTZfL5cDOxRkW0hs7MHaR2rmgD617OWDTEi2yClYsRwZFIcknKLZtM3M1CjmXgpjiY7YhGOp5zO/7GInSQ4xsCNVYeQCZEbuZXDFZoPZyvpJ6ceg66MyhUWQDctP4LFeg7/gWRz8C5oi5pqrAbnaEfQddwp0H37gfbgg/vzoK+g/T6TWLJpgwBbK5scyX0q0dURKV1LJqKp5dDIcJr+Ah/nozHHZkkzr+jEw0w4dagQ7QepyO1KwyFnv8hMSkdh60+L86Y6xcyMd+9zndg/hdzChS82J4/gQbI4OZAnHnIpcYNL4kr5E+TFgMQYAKG9WJZCeSnUh2nGTfPfLWd67+QrU+v+am0cWJWCdinYj1FLFeH7sQjjSjKlvCIVprLDNCNNZiwZalVME83SvXvvuT7nRTPHHtxLWnuNYlvEJfmiK+6YsyjAV8yTOe0rnl0rY/vjp6i0d7v54WNFbxjLeRMV2Wpm1pnO+ytjbWd+k9teMerIRcRRCSODJrjRhlBKjw+FjnSLD7MjuGqtEFsLdscvl5yn4Hj0uIL5Dkct/02+Hb4VGrtPQJi6PrKxYzjLjb2QvWZonUR83Gxc8xHOYbDkVjpV99JrVYnhzBoh01VPsNHnp/wTr9jot40n95v0bJH/9OQ6NpH5tsZyeXD1DVkYV7s45+0tOjrQtYnJvgt0fKYQbUTrQupjwcnB+i8voqkEuYqmp02GH1It6CYKciQjXOUyUSrqRA7XAnovWyj1HC/oke2fmAuhkht95YF9KXTT4QpkpFVNv8z5XJ0wqkTnsXLh2P/rr59G509vFqfPnp8+XZ+WA48A8+JF8b5yvQO3H00zN7ZLLWbs+SZw3Z+k56fPBprUBqwlbIpO2ZdhtGbo4nPFvP3mZJT5db3rY5OLyxquvo8bcG7Ypnt7Mtu+IASjr6XvBsDso9HtDtBv1i0s97XrIfju2OBt8/BL0KFFcN/eIJv8PVzviwm3UJLxEKtCHAKB3HMM6mZGOrfTDF65K1xkgIrP0Ta/fObyLWZjO7Hk3HH4go/QixMkUYbsKShpmw5Bn/wr9Q2CbUKHAwPG+5Ar1oYEHro2H6+w++d2q6 +sidebar_class_name: "patch api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Update role

+ + + +Update a role with the given key. + +## Request + +

Path Parameters

Body

required
    changeset object
    + +A set of changed role attributes. + +
+ +The role was updated successfully. + +
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The request lacks valid authentication credentials. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +The role with the roleKey is not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-tenant.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-tenant.api.mdx new file mode 100644 index 00000000000..2fed02030c9 --- /dev/null +++ b/docs/apis-tools/camunda-api-rest/specifications/update-tenant.api.mdx @@ -0,0 +1,56 @@ +--- +id: update-tenant +title: "Update tenant" +description: "Updates an existing tenant." +sidebar_label: "Update tenant" +hide_title: true +hide_table_of_contents: true +api: eJztWFFv2zYQ/isEn1pMsZw27Tq9eWm6ZuuKIHW2h8QPJ/FssaVIlaTiGIb++3CkFDuW02RAHx0gsEQe73j3fR8l3Zp7WDieXfMpatCezxJuarTgpdHngme8qQV47GYTLtAVVtY0zTN+FSYdA83wTjov9YL5YDriCa/BQoUeLQVYcw0V8ozH+b9wxRMuyUkNvuQJt/i9kRYFz7xtcDfStETWaPm9QSYFai/nEi0zc+ZL3ArpihIr4Nma+1VN0aT2uEDLEz43tgIfh96e8LadxZjo/O9GrGhNYbRH7ekS6lrJIpQh/epoC+uhc5N/xYKqUlsqmpfoaDYmem/lvJV6MagdZaRxych6N5G23a7HdfQ4S7iXXpHLCEes/mXMgbcPF1ERw4CrjXZxY6/GY/oZ7iPGZUtwLOItmGuKAp2bN0qtqLI/qTYb9Pdh9CjmbuU8VkcL1MRNFIzWWA2q3/r5+z0QJ104IvJz4OiC4d3Ad5v8H1QfQXQ/ehEfgq9N+MljCNXW3EqBggnwwKRj2nh2C0qKH4BTW5MrrH4ZgvQwwIRdREsm0INULILHwLFomIeKs+vLD6fst5M3v85elN7XLkvT5XI5svPiCIX0xo6MXaR2XtA/2b0csWmJFlkFK5YjAyEkxQTFNrRgrsZCzmXBvAk167bNqNKjG82TpzgVZofI3NOhsXIA1IRdXZ73Z8kqHFy7ocOaOTSKfEBuGp/lCvQ3vsHyKTpMmGuqCuyq58PDAG3CnQffuCdPrNev9lLt43R6waILVhiBbG4s86V0fSBKopJaVk3Fs5PxOOEV3MW7t+NxSz4J8WdkQkd8rUAHau2mIzWrjMWOPyExqZ0HXfwsZIyVC7kb96GuOhK/jxn1gno95PsHY3MpBOpAT9Y9BXpVgVJmiQddHXR10NUPdHUy5Ptn49ncNFpEXW29Vej7mYOqDqo6qOoRVb3Z9/o30Zu3XbTWWGaKorEWBVuWUgX39Krex+4eZ5GLB60dtHbQ2lBrbcIr9KURsQFRlKFf4Uue8TQ+t1y6vv9Ybam1gPa2b2U0VvGMr6Nq2ixN16Vxvs3WtbG+TW8JkluwEnIViUjTUV09a5QpQIXhfejRxPZn5ClUjRbA3rHLsy9T9gd4XMIqVJNCPnT9bvxuvNcrmT7icXJxzmKGkXtb50HvloS91200fo7j0HRxWDRW+tUXWhbLkyNYtJOG6n/PiS5e8E730Ygn3cWHnil//jsNYNNZdrlp6JzdQVVHJXadp45kgYFzEyJ1/BjumQBE62KS49HxkIsX50FShamqRodzVS/YUvqSwVYNCtU4T7knXMkC6Ut/s6He7FOcYf/EiOx4RPhFkvXH6UL6sslHhanSIi67/82VydMKpE67EC49nfx99fn95OjT+enZ5y9nR8ej8cjf+VCn2jhfgd7aR2xFdO9ru5lutcWe6Pd1uHm882mtQGoqdchi3WnrumvIOJ7wbNMKmiWdQK75ep2Dwyur2paGvzdoVzy7nm30FAQopKNrwbM5KLfbLdze8ovLriX2kj2zh7g3jW4Q9CpIWzV0xxP+jTpZW12tdtYmvEQQaMNG4/xp3M7RlLxs1g8aaW3Sr5gUBdb+h7azrSPsYjI9/UjS6DqZlRG0yMKSOquw5Bm/4Te0YRNqFFQXxtdcgV40sCD76Jj+/gPNmZ2D +sidebar_class_name: "patch api-method" +info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api +custom_edit_url: null +hide_send_button: true +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Markdown from "@theme/Markdown"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + +

Update tenant

+ + + +Updates an existing tenant. + +## Request + +

Path Parameters

Body

required
+ +The tenant was updated successfully. + +
Schema
+ +The provided data is not valid. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Forbidden. The request is not allowed. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +Not found. The tenant was not found. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
+ +An internal error occurred while processing the request. + +
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx index a58a6de44ea..455e45a6940 100644 --- a/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx +++ b/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx @@ -5,7 +5,7 @@ description: "Update a user task with the given key." sidebar_label: "Update user task" hide_title: true hide_table_of_contents: true -api: eJztWW1v2zYQ/isHftmbLCut03X65qXdlq7tgsTZgCUBSktnm41EqnyJYxj678ORki2/pMm67psLBHVM3gvv7nmOOS6Z5VPD0it2aVCD5eaW3UQsR5NpUVmhJEvZZZVzi8DBtXtgLuwM7AxhKu5Qwi0uYhaximteokVNGpdM8hJZykhqxM3t77hgEROkseJ2xiKm8ZMTGnOWWu1w2+xohqQY1MRbWhu3Cpx3iWyabIYlZ+mS2UVF5oS0OEXNIjZRuuQ2fPViwOr6JphEY39W+YJk1h5MeGEwYpmSFqWlNV5Vhcg4edP/aMil5a41Nf6ImaWza1WhtgINrWYzLqdo0CvaPNabiz/eQxALYQx783A2bq0WY2cR7njh0MTX8lpSJCaqKNRcyOl6i4GMSxgj8PyjMxbzNi3CAMq8UkLaCHieCzLNi47ktZyLoiBZMZVKY56Sne/hQ8ZlLii2v2rlKvMBeqDpHDBeQKXVnci9CxKwrOwCCmHsphzV0dPFcoevuMXH9l9YLeTUS4Q4XFb/UqzSQmlhFyRSCilKV0ISQcnv/cejJIkgxwl3hYXjhIJx1tHXFKEy2I2+jzaHD9IVxYeQL1AaVCmsJTlhoSL39B0FnGq4Qm2Ez9RKzTcmSK4SzY0RU4lIyZXKPppfZ9DjY+jlQhW1qzGp9LuN01QvSmuqO7xDaQFLYYxQEiZKd8yG0o2vJYt2qpyOyscFtohdF9dZp/7D2iYimkx3oGN8dro4pfrpWVEi28cFuUPwRNQQAp00hvNH8x/v+F2TzXUZ/Qefgpqeq76eZ5tI6vjGteaLvV4QnMj2StSTpXm6O6SAnBEWS7Mbjvpzbgai+EI/p174f3W0Rf6jLeL5s71Ot/IbTrKINSzC0oSkPHOw9DiJWMMpLCVS2fanjpgVln5lJ6seUUeMZ8HkdrcYQuaMVSWEDQ3L2Bmn5hEYnGcZGiPGBcJEq7LTKD3MDVGkKzwj+fUN/gAh71RocjGcToAYJ8Qd84gYzFtpmdEquGah916zDYZYAefh814294BwnTgPjZjVNe3RaColTaCKZ8lgNxCjjSvAnJvmDpCDcT4AE1cUi5hiOUiSR+V37i8dum0Ux/BOaYQcLReFAa5xFRkQ0ku3bsNY5YsQjwcuEJVW4wLLH3YvEtv5Pgs7G7vtTYEbCBvHwfrV+S8n8NPg+Mebb2fWVibt9+fzeawnWQ9zYZWOlZ729SSjH9r3XQyjGWqEki9CT1ldC9ZEDabCTExERpn2DatxhrK8vyNssnxY/QydOi12UDaEy/NTEDlKKyYLqtMd06yDMcbHytl0XHB5y9b1tWt024pxZcn1CsibBuqIGcutM1/KE7+NRmcQVECmcvRd1WOtMbRBGoOkyxQvkqQmnZTxJ5xEAt5XBZe+tLaPIySU67r1BxPSWC6zr5UZpcVUbNuNWRfrTRG/CicKGB88Ada7sCSkEy4nysk8PgDsALADwB4E2E9fADBh2m4210pOfYQRMqc1SlssDl3wANIDSL8eSI/3XU6HEijKmuoQtaYhQuYRmMN8Jgqv3l/yG9vNFOuAtQPWDlh7CGt1xEq0M5WHgXM28xNqO2Mp61NH7FFHNP1lZz5d00CZ5nXNBNvpgqVsGaBTp/3+cqaMrdNlpbSt+3eUlzuuBf3B69NIywFibekUKuOF/3pfCmmBxuTt0U546WTO4SWcv74Ywa/c4pyHv2rJ5Kbql8nLZP/IQmn7gMbh2SmEE4YC7JBCq5bQvVdt2PwUxX7UbjBzNDi5ILEQnjFyjXroKAmrwmjsee30e9jEoubDL225vPlr5DNOhHa+HuO/vudlFeC4OXVvB47sWfJs0DtKekfJ6Og4TV6kzwfxIHn+N9seA35u5/ZY7qot6Js9s7Du4nr+dJx0Bj2dkZWQE+Xj0Q6GdiJLZUajYy+YxEe7sDk79ejPVFk66VuAnLYD6rW+rKA5siZeKESG0viwNe807ba3YQX+DBbhKKYqC1BomX8q7MyN40yV/SyIrf4fF2rcL7mQ/caE6Z8M312+fzXsvT09ef3+4nXvKE5ie299NitlbMllx4/muWl1ad0+7HLd8Z7+NNXUmsV7268KLiQF3p9p2ZDCFVuTAotY2n22uokaZF+x5XLMDV7qoq7p608O9YKlVzdrIvDMkQtDn9cPSw8e4dvz5g3qO3jai9feo7SzV7nwlFQ4+o1F7BYXW09w9U0dsRnyHLX3NOw4Cf70RqRnrWHnDayOWolhlmFlH9i7cfkgMlgx8dlwdPIbgbt5gStVTsKaz+lBkM9Zyq7ZNbmufLA8b/jvl6zgcur4lPYHxfTvH1kR+eY= +api: eJztWW1v2zYQ/isHflm3ybKSOF2rb17abdlLFyTOBiwJUFo622wlUuNLHMPQfx+OlGz5JU3Wdd9cIKhj3hvv7nnIHJfM8qlh6Q27NqjBcvOR3UVMVai5FUqe5yxlrsq5RRIY0XrEcjSZFhUJsJRd+2Xg4FoTMBd2BnaGMBX3KOEjLmIWsYprXqJFTQ6XTPISyXpj9xdcsIgJslhxO2MR0/i3ExpzllrtcNvtaIZkGNTEe1o7twpCxOTTZDMsOUuXzC4qciekxSlqFrGJ0iW34auXA1bXd8ElGvu9yheks45gwguDEcuUtCgtrfGqKkTms9T/YCik5a43Nf6AmaW9a8qpFWhoNZtxOUWD3tDmtn6++v0dBLWQxiCbh71xa7UYO4twzwuHJr6Vt5IyMVFFoeZCTtciBjIuYYzA8w/OWMzbsggDKPNKCWkj4HkuyDUvOpq3ci6KgnTFVCqNeUp+voH3GZe5oNz+qJWrzHvogaZ9wHgBlVb3IvchSMCysgsohLGbetRFz1fLHb7hFp+Sv7JayKnXCHm4rv6lWqWF0sIuSKUUUpSuhCSCkj/4j0dJEkGOE+4KC6cJJeOiY69pQmWwm32fbQ7vpSuK96FeoDSoUlhLesJCReHpe0o49XCF2ghfqZWZr0zQXBWaGyOmEpGKK5V9sr7OoMfH0OuFLmpXYzLppY3T1C9Ka+o7vEdpAUthjFASJkp33IbWjW8li3a6nLbKxwW2iF0310Wn/8PaJiKaSnegY3x1ujil/ulZUeIOBVFicofgiaghBNppDJdP1j/eibsmn+s2+g8xBTM9V325yDaR1ImNa80Xe6MgOJHvlaonS/P8cMgABSMslmY3HfWnwgxE8ZlxTr3y/xpoi/wnj4iT471Bt/obQbKINSzC0oS0PHOw9DSJWMMpLCVS2Y6njpgVln5lZ6szoo4Yz4LL7dNiCJkzVpUQBBqWsTNOh0dgcJ5laIwYFwgTrcrOQelhbogiXeEZya9v8AcIea/CIRfD+QSIcULeMY+IwbyXlhmtgtvmtnDLNhhiBZzH99veL8J14jIcxKyuSUajqZQ0gSqOk8FuIkYbV4A5N80dIAfjfAImrigWMeVykCRP6u/cXzp02xiO4TelEXK0XBQGuMZVZkBIr92GDWOVL0I+HrlAVFqNCyy/3b1IbNf7Ikg2ftubAjcQBMfB+83lD2fwenD63d2LmbWVSfv9+Xwe60nWw1xYpWOlp309yeiH5L6OYTRDjVDyRThTVteCNVGDqTATE5FRpf2B1QRDVd5/ImyyfFj9BJ06LXZQNoTry3MQOUorJgvq0x3XrIMxxsfK2XRccPmRrftr1+m2F+PKkusVkDcd1BEzlltnPpcnfhqNLiCYgEzl6E9Vj7XG0QZpDJIuU7xMkppsUsWfsRMJ+FAVXPrW2t6OkFCu+9ZvTEhjucy+VGWUFlOx7TdmXaw3Tfwm7ChgfPAMWO/CkpBOuJwoJ/P4ALADwA4AexRgrz8DYMK0p9lcKzn1GUbInNYobbE4nIIHkB5A+uVAerrvcjqUQFnW1IeoNQ0RMo/AHOYzUXjz/pLf+G6mWAesHbB2wNpjWKsjVqKdqTwMnLOZn1DbGUtZn07EHp2Ipr/szKdrGijTvK6ZYDtdsJQtA3TqtN9fzpSxdbqslLZ1/57qcs+1oD94fRlpOUCsbZ1CZbzwX+8rIS3QmLzd2hkvncw5vILLt1cj+JFbnPPwVy253DT9KnmV7B9ZKG0fsTi8OIeww9CAHVJozRK695oNws8x7EftBjNHg5MrUgvpGSPXqIeOirBqjMaft06/ByEWNR9+aNvl5z9HvuJEaJfrMf7bB15WAY6bU/d24MiOk+NB7+i4d3QyOjpJj16ng+N4cHLyF9seA35Kcnssd9M29N2eWVh3cT1/Ok06g57OyErIifL5aAdDO5mlNqPRsVdM4qNd2Fyce/Rnqiyd9EeAnLYD6rW9rKA5siZeKESG0vi0Ne80rdivYQX+CB7hKKYuC1BomX8q7MyN40yV/Syorf4fF2rcL7mQ/caF6Z8Nf7t+92bY+/X87O27q7e9oziJ7YP11ayUsSWXnTia56bVpXV7s8v1iff8p6mm1yw+2H5VcCEp8X5Py4YUbtiaFFjE0u6z1V3UIPuGLZdjbvBaF3VNX//tUC9YenO3JgLPHLkw9Hn9sPToFl5cNm9QX8PzXrz2bqWdvcqFp6TC0W8sYh9xsfUEV9/VEZshz1H7SIPEWYinNyI7aws7b2B11GoMswwr+4jsxuWDyGDFxBfD0dlPBO7mBa5UOSlrPqcHQT5nKbtltxS68snyvOG/X7KCy6njU5IPhunfP2aHBSg= sidebar_class_name: "patch api-method" info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api custom_edit_url: null diff --git a/docs/apis-tools/camunda-api-rest/specifications/upload-document-alpha.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/upload-document-alpha.api.mdx deleted file mode 100644 index 828c2d62a4d..00000000000 --- a/docs/apis-tools/camunda-api-rest/specifications/upload-document-alpha.api.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: upload-document-alpha -title: "Upload document (alpha)" -description: "Upload a document to the Camunda 8 cluster." -sidebar_label: "Upload document (alpha)" -hide_title: true -hide_table_of_contents: true -api: eJztWEtz2zYQ/is7OCVTmpJTN015U+2kVSdOPbbcHmwfQGIlIgEBBgAtsxr9984CpN5O3DSHTqae8YgiFt++vl1gtWCezxzLbtiZKZoKtXfsLmECXWFl7aXRLGPXtTJcAAfRyYA34EuEU141WnB4BYVqnEeb3upbnWWZNh5v9aSUDlCL2kjtQTrgGriqSw5T5L6xmMLYQ9FYi9qrFoxWLbimro31QVjqoworY9u1ZueNxeRWz0tZlISpjYcKufYwNRZqa0RTkN3QOEyDMbeaJazmllfo0ZKzC6Z5hSxjAW0sWMIkOfqxQduyhFn82EiLgmVTrhzuxmNSIozPwExDELZNo9A0MV5bi96kLGGuKLHiLFsw39bRACv1jC2XycqmfsvXMmtlUArjaQhXbc29FCgS4KBxTpvmUinIEWao0XKPIoWrGgs5baWeUSrwQTpPz72wRdcoD1KHVWuNBbmjmSuLXLRxr0tDHh6PwF10EJ3/2YiWJNb+ettgwgqjPWpPS1WjvKy59YOpsdWR4D5g7qGb/D0WnvJvTY3WS3S0OpUK921IGIFxzzKWS81ty5YJq9DzHn4XlAshKfhcXWzAR2O3FXamTwLAvt79RHYbgCR3U5qSXeTCu8CYp8ARt3oY2hkg8KGWFt3IfzIUgns88rLCg8C0ClwLIAmYl6i3OdDpCPqc/GvTXKk9ztBu6pLavzw5qIf27lFbashbH9CXCfPSU1ZXney8zxwtrrl0E7N/RwHQhRHkLjFqI89b2WK8rpUsOBkzeO+MZkv6I0hXG+1igl8Mj+njQIB6Y+fcdYWIAlxTFOjctFGqpcawQe09df+A172yp/GsDxQISa8rqbk3NoWRmvPWgcPQPG5ZEdv8LSNLUTcVBbF7GeLYt9EnUfGxzhkostH8vhAs/b9ov5mi3V28xCla1AWyWIEnw+Fniq47iqdcKjrTzumEFui5VA64xdVRSFaRlX1NQ25EG0+sR0qztiZXWH23X6Lb5ozgIkp2eiESEbiDKJhH7TeXb07hp5Mffrx7Vnpfu2wwmM/nqZ0WRygk1aWxs4GdFvRPcs9TmJRoESre0tG95jWsaQwunOKy6K9sndmBotG/T3cU/wj5V9lvrNzL/QiuL8cgBWrf3SD2VIc9U94owuC5aXyWK64/sHXaP1ciI3BNVXHb9hTbVhD6EveN+yx7v39xkL2/TiYXECGgMALDBdPTnbZTRE5UUsuKOuLJcJiwij/Eby+HQ+pmIeNP8ITuV7XiOlBr1x2poVrzNjgmtfOc6uDrZMZYOZO7erfrsyPxWfSoOwEr9KURLGO1cYE63JcsYwOxmiUS5tDe97fuxiqWsUUslWU2GCxK4/wyW9CNfzm4pzzccyt5riL7aDmWVE8VZQquyqhuP2W0sNk11wPK5eurCfzCPc55G0JIKrehXw1fDQ+ikugjiKOLMUQPI+E2mkAPS9V8uDsG4acAh8uxw6Kx0rdXtC2GJ0du0Y4aivqKCJ2+gE7foxBLuoc3PT1++3MSMiz11ITtXab3DaGsoHXR8mF6vM+qi3EojsJUVaNDh9QzmEtfAj8wJLKEKVmgdoG/3djTi72NK/BH1AjHKSUlMqdvjDPpyyZPC1MNumvI6jNXJh9UXOpBp8INTkfn1+/ORkdvx6ev3129PjpOh6l/8MF5Im7F9YYd3bC7OkGehYn1+a7Pi/W58E3Mxx15PD74Qa241ETnEPVFV9Y3TGz+RBBL84YtFjl3eG3Vckmv46xKxS6kozp+ZFrdjN+Xz9MHrf6A7dZof89VQ1KMhux/bdV/a5z+RAC2fkdYx+Bu3WJZdkO39xK5QBtyFneexhgchYvveu+hmZtCGjeNigJrvyG+PzbdbZwYF79fTagndaN+ZQTtIeCAm6wfycjl8m8VCHnL -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

Upload document (alpha)

- - - -Upload a document to the Camunda 8 cluster. - -:::note -This endpoint is an alpha feature. It currently only supports an in-memory document store, -which is not meant for production use. -::: - -## Request - -

Query Parameters

Body

required
    metadata object
- -The document was uploaded successfully. - -
Schema
    metadata object
- -The document upload failed. More details are provided in the response body. - -
Schema
    = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
diff --git a/docs/apis-tools/community-clients/c-sharp.md b/docs/apis-tools/community-clients/c-sharp.md deleted file mode 100644 index f4b5346fe53..00000000000 --- a/docs/apis-tools/community-clients/c-sharp.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: c-sharp -title: "C#" -description: "Take a deeper look at the source code, Nuget package, and API docs alongside C#." ---- - -The C# client is a community library. Take a closer look at [the maintainer(s) and source code](https://github.com/camunda-community-hub/zeebe-client-csharp). - -- [Nuget package](https://www.nuget.org/packages/zb-client/) -- [API docs](https://camunda-community-hub.github.io/zeebe-client-csharp/) -- [Bootstrap C# applications](https://github.com/camunda-community-hub/zeebe-client-csharp-bootstrap) diff --git a/docs/apis-tools/community-clients/cli-client/cli-get-started.md b/docs/apis-tools/community-clients/cli-client/cli-get-started.md index 5e75e07fa03..3cdc65abac5 100644 --- a/docs/apis-tools/community-clients/cli-client/cli-get-started.md +++ b/docs/apis-tools/community-clients/cli-client/cli-get-started.md @@ -2,6 +2,7 @@ id: cli-get-started title: Getting started with the CLI client sidebar_label: "Getting started with the CLI client" +description: "Get started with this tutorial that shows you how to interact with Camunda 8 using the community-supported CLI client and command line interface `zbctl`." --- In this tutorial, you will learn how to use the [community-supported](https://github.com/camunda-community-hub) `zbctl` CLI client to interact with Camunda 8. diff --git a/docs/apis-tools/community-clients/go-client/go-get-started.md b/docs/apis-tools/community-clients/go-client/go-get-started.md index 78db5a571b4..0b45f4c99ff 100644 --- a/docs/apis-tools/community-clients/go-client/go-get-started.md +++ b/docs/apis-tools/community-clients/go-client/go-get-started.md @@ -2,6 +2,7 @@ id: go-get-started title: Getting started with the Go client sidebar_label: "Getting started with the Go client" +description: "Get started with this tutorial that shows you how to interact with Camunda 8 using the community-supported Go client in a Go application." --- import Tabs from "@theme/Tabs"; @@ -143,7 +144,7 @@ Now, we need a simple process we can deploy. Later, we will extend the process w ![model-process-step-1](assets/order-process-simple.png) -3. Set the ID (the BPMN process id) to `order-process` instead of the autogenerated value so it's easier to work with in this example. +3. Set the ID (the BPMN process ID) to `order-process` instead of the autogenerated value so it's easier to work with in this example. 4. [Optional] Download the BPMN file to the root of the project. @@ -157,7 +158,7 @@ Now, we need a simple process we can deploy. Later, we will extend the process w ![model-process-step-1](assets/order-process-simple.png) -3. Set the ID (the BPMN process id) to `order-process` instead of the autogenerated value so it's easier to work with in this example. +3. Set the ID (the BPMN process ID) to `order-process` instead of the autogenerated value so it's easier to work with in this example. 4. Place the BPMN diagram in the root of the project. @@ -169,7 +170,7 @@ Now, we need a simple process we can deploy. Later, we will extend the process w Next, we want to deploy the modeled process to the broker. -The broker stores the process under its BPMN process id and assigns a version. +The broker stores the process under its BPMN process ID and assigns a version. - [Ballerina](https://github.com/camunda-community-hub/ballerina-zeebe) -- [C#](c-sharp.md) +- [C#](https://github.com/camunda-community-hub/zeebe-client-csharp) - [CLI](cli-client/index.md) - [Delphi](https://github.com/camunda-community-hub/DelphiZeeBeClient) - [EJB](https://github.com/camunda-community-hub/zeebe-ejb-client) - [Go](go-client/index.md) - [Micronaut](https://github.com/camunda-community-hub/micronaut-zeebe-client) -- [Python](python.md) -- [Ruby](ruby.md) -- [Rust](rust.md) -- [Quarkus](quarkus.md) +- [Python](https://gitlab.com/stephane.ludwig/zeebe_python_grpc) +- [Quarkus](https://github.com/quarkiverse/quarkus-zeebe) +- [Ruby](https://github.com/zeebe-io/zeebe-client-ruby) +- [Rust](https://github.com/camunda-community-hub/zeebest) diff --git a/docs/apis-tools/community-clients/micronaut.md b/docs/apis-tools/community-clients/micronaut.md deleted file mode 100644 index 3bc9235d315..00000000000 --- a/docs/apis-tools/community-clients/micronaut.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: micronaut -title: "Micronaut" ---- - -The Micronaut integration is a community extension allowing you to leverage Zeebe within your Micronaut environment. - -The integration provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md), and is preconfigured with sensible defaults so you can get started with minimal configuration. Add a dependency, implement a worker, and add your credentials in your Micronaut project. - -The Micronaut Framework is known for its efficient use of resources. Native images created with [GraalVM](https://www.graalvm.org/) reduce startup times to milliseconds. - -- [Documentation and source code](https://github.com/camunda-community-hub/micronaut-zeebe-client) -- [Integrate Camunda's External Task Clients into Micronaut Framework projects](https://github.com/camunda-community-hub/micronaut-camunda-external-client) -- [Create application with Micronaut Launch](https://micronaut.io/launch?name=jobworker&features=camunda-zeebe) -- [Releases on Maven Central](https://search.maven.org/artifact/info.novatec/micronaut-zeebe-client-feature) diff --git a/docs/apis-tools/community-clients/python.md b/docs/apis-tools/community-clients/python.md deleted file mode 100644 index 053cf07a900..00000000000 --- a/docs/apis-tools/community-clients/python.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: python -title: "Python" -description: "Take a deeper look at the source code and pip package alongside Python." ---- - -## Zeebe Python gRPC - -Take a closer look at the Python client [maintainer(s) and source code](https://gitlab.com/stephane.ludwig/zeebe_python_grpc). - -- [Pip package](https://pypi.org/project/zeebe-grpc/) - -## Pyzeebe - -Take a closer look at this Python client's [maintainer(s) and source code](https://github.com/camunda-community-hub/pyzeebe). - -- [Pip package](https://pypi.org/project/pyzeebe/) -- [Documentation](https://pyzeebe.readthedocs.io/en/stable/) diff --git a/docs/apis-tools/community-clients/quarkus.md b/docs/apis-tools/community-clients/quarkus.md deleted file mode 100644 index 502e94f5823..00000000000 --- a/docs/apis-tools/community-clients/quarkus.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: quarkus -title: "Quarkus" ---- - -The [Quarkus](https://quarkus.io/) integration is a community extension that allows you to easily leverage Zeebe within your Quarkus environment. - -Essentially, Quarkus provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md), and is preconfigured with sensible defaults so you can get started with minimal configuration. Add a dependency, implement a worker, and add your credentials in your Quarkus project. - -The integration also provides Quarkus developer services to start everything you need as Docker containers for local development. [Zeebe-dev-monitor](https://github.com/lorislab/zeebe-dev-monitor) UI is also included as a developer service. - -- [Documentation and source code](https://github.com/quarkiverse/quarkus-zeebe) -- [Releases on Maven Central](https://search.maven.org/artifact/io.quarkiverse.zeebe/quarkus-zeebe) -- [Create application with Quarkus](https://code.quarkus.io/?e=io.quarkiverse.zeebe:quarkus-zeebe&extension-search=quarkus-zeebe) -- [Documentation and source code zeebe-dev-monitor](https://github.com/lorislab/zeebe-dev-monitor) diff --git a/docs/apis-tools/community-clients/ruby.md b/docs/apis-tools/community-clients/ruby.md deleted file mode 100644 index 2c2535500a6..00000000000 --- a/docs/apis-tools/community-clients/ruby.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: ruby -title: "Ruby" -description: "Take a deeper look at the source code and Ruby gem alongside Ruby." ---- - -Take a closer look at the Ruby client [maintainer(s) and source code](https://github.com/zeebe-io/zeebe-client-ruby). - -- [Ruby gem](https://rubygems.org/gems/zeebe-client) diff --git a/docs/apis-tools/community-clients/rust.md b/docs/apis-tools/community-clients/rust.md deleted file mode 100644 index 8ee0e3997a9..00000000000 --- a/docs/apis-tools/community-clients/rust.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: rust -title: "Rust" -description: "Take a deeper look at the source code, Rust crate, and a podcast interview alongside Rust." ---- - -:::note -The Rust client, Zeebest, was previously maintained by [Mackenzie Clark](https://github.com/xmclark), and is currently seeking a new maintainer! -::: - -- [Source code](https://github.com/camunda-community-hub/zeebest) -- [Rust crate](https://docs.rs/zeebest/0.20.0/zeebest/) -- [Podcast interview with Mackenzie Clark](https://zeebe.buzzsprout.com/454051/1478953-zeebe-and-rust-interview-with-mackenzie-clark) diff --git a/docs/apis-tools/frontend-development/01-task-applications/01-introduction-to-task-applications.md b/docs/apis-tools/frontend-development/01-task-applications/01-introduction-to-task-applications.md index f6624f0b26f..9f29b2dc237 100644 --- a/docs/apis-tools/frontend-development/01-task-applications/01-introduction-to-task-applications.md +++ b/docs/apis-tools/frontend-development/01-task-applications/01-introduction-to-task-applications.md @@ -13,7 +13,7 @@ Task applications are the interface between humans and Camunda processes to orch ## What are task applications? -Task applications are end-user applications that allow humans to perform work orchestrated with a process. A [user task](/components/modeler/bpmn/user-tasks/user-tasks.md/#user-task-forms) (also referred to as a **human task**) represents a single **work item** to be performed by an individual or a group. The jobs of a task application include: +Task applications are end-user applications that allow humans to perform work orchestrated with a process. A [user task](/components/modeler/bpmn/user-tasks/user-tasks.md/#user-task-forms) (for [human task orchestration](/guides/getting-started-orchestrate-human-tasks.md)) represents a single **work item** to be performed by an individual or a group. The jobs of a task application include: - Listing available tasks and allowing users to select a task to work on. - Providing filter and search options for users so they can more easily find the right next task to work on. @@ -121,7 +121,7 @@ flowchart end ``` -The lifecycle of human tasks is mostly a generic issue. There is no need to model common aspects into all your processes, as this often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. +The lifecycle of human task orchestration is mostly a generic issue. There is no need to model common aspects into all your processes, as this often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. Learn how to define and implement your task lifecycle on the [user task lifecycle](./02-user-task-lifecycle.md) page. @@ -131,7 +131,7 @@ Every task can be assigned to either a group of people, or a specific individual Task assignment in group and personal queues -As a general rule, you should assign human tasks in your business process to groups of people instead of specific individuals. This avoids bottlenecks (such as high workloads on single individuals or employees being on sick leave) and can greatly improve your process performance. +As a general rule, you should assign user tasks in your business process to groups of people instead of specific individuals. This avoids bottlenecks (such as high workloads on single individuals or employees being on sick leave) and can greatly improve your process performance. In the [XML of a user task](/components/modeler/bpmn/user-tasks/user-tasks.md#xml-representations), this is represented as follows: diff --git a/docs/apis-tools/frontend-development/01-task-applications/02-user-task-lifecycle.md b/docs/apis-tools/frontend-development/01-task-applications/02-user-task-lifecycle.md index cabf0ea6826..823710fc9f0 100644 --- a/docs/apis-tools/frontend-development/01-task-applications/02-user-task-lifecycle.md +++ b/docs/apis-tools/frontend-development/01-task-applications/02-user-task-lifecycle.md @@ -78,19 +78,19 @@ Make sure that you create your own validation logic that matches your use case. ## Implement the life cycle with the task API -To implement task life cycle operations with the Zeebe task API, call the respective endpoints: +To implement task life cycle operations with the task API, call the respective endpoints: -- [`POST /user-tasks/:taskKey/assignment`](/apis-tools/zeebe-api-rest/specifications/assign-a-user-task.api.mdx) or [`DELETE /user-tasks/:taskKey/assignee`](/apis-tools/zeebe-api-rest/specifications/unassign-a-user-task.api.mdx) to change task assignment. -- [`PATCH /user-tasks/:taskKey`](/apis-tools/zeebe-api-rest/specifications/update-a-user-task.api.mdx) to update a task. -- [`POST /user-tasks/:taskKey/completion`](/apis-tools/zeebe-api-rest/specifications/complete-a-user-task.api.mdx) to complete a task. +- [`POST /user-tasks/:userTaskKey/assignment`](/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx) or [`DELETE /user-tasks/:userTaskKey/assignee`](/apis-tools/camunda-api-rest/specifications/unassign-user-task.api.mdx) to change task assignment. +- [`PATCH /user-tasks/:userTaskKey`](/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx) to update a task. +- [`POST /user-tasks/:userTaskKey/completion`](/apis-tools/camunda-api-rest/specifications/complete-user-task.api.mdx) to complete a task. All these endpoints (except `DELETE`) allow you to send a custom `action` attribute via the payload. The `action` attribute carries any arbitrary string and can be used to track any life cycle event, including those mentioned above. -#### [`POST /user-tasks/:taskKey/assignment`](/apis-tools/zeebe-api-rest/specifications/assign-a-user-task.api.mdx) +#### [`POST /user-tasks/:userTaskKey/assignment`](/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx) Use the `assignment` endpoint to change the task assignment. Use the `action` attribute to indicate the cause of the change, including `claim`, `reassign`, or `assign`. -#### [`PATCH /user-tasks/:taskKey`](/apis-tools/zeebe-api-rest/specifications/update-a-user-task.api.mdx) +#### [`PATCH /user-tasks/:userTaskKey`](/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx) Use the `update` endpoint to change candidate users, groups, the due date, or the follow-up date by defining the `changeset`. You can also send it with an empty `changeset` and just pass an `action`. Use it to send `start`, `pause`, and `resume` actions. Additionally, you can send anything of interest or relevant for the audit log such as `escalate`, `requestFurtherInformation`, `uploadDocument`, or `openExternalApp`. @@ -105,7 +105,7 @@ An example request payload could look like this: } ``` -#### [`POST /user-tasks/:taskKey/completion`](/apis-tools/zeebe-api-rest/specifications/complete-a-user-task.api.mdx) +#### [`POST /user-tasks/:userTaskKey/completion`](/apis-tools/camunda-api-rest/specifications/complete-user-task.api.mdx) Use the `completion` endpoint to complete a task. Pass along with it the outcome of the task via the `action` attribute, such as `approve` or `reject`. diff --git a/docs/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md b/docs/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md index 50dfb21ef17..b0027b95827 100644 --- a/docs/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md +++ b/docs/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md @@ -6,10 +6,10 @@ description: "Understand and decide on the architecture of your task application A typical task application architecture consists of a task application frontend, a backend-for-frontend, and one or more data sources or services that contain business data relevant for the application users to perform their work. The backend implements Camunda Zeebe and Tasklist clients to retrieve and interact with tasks via Camunda APIs. For historical process instance data, Operate is also required. -Depending on the user task implementation type (job-based vs Zeebe user task) you use in your processes, you need to run either the Tasklist or Zeebe client to run operations on task. Task, form, and variable retrieval happens via the Tasklist API. Learn more about the differences of the task implementation types in the [migration guide for Zeebe user tasks](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md). +Depending on the user task implementation type (job worker-based vs Zeebe user task) you use in your processes, you need to run either the Tasklist or Zeebe client to run operations on task. Task, form, and variable retrieval happens via the API. Learn more about the differences of the task implementation types in the [migration guide for Zeebe user tasks](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md). :::tip -Starting a completely new project? Use only Zeebe user tasks to simplify your implementation. +Starting a new project? Use Zeebe user tasks to simplify your implementation. ::: Click on any element of this diagram to jump to the documentation page for the respective component: @@ -72,14 +72,14 @@ style Tasklist stroke:#10c95d,color:#000 click Forms "../../forms/introduction-to-forms" click Rest "../../../tasklist-api-rest/tasklist-api-rest-overview" -click Job "../../../tasklist-api-rest/migrate-to-zeebe-user-tasks" -click ZeebeTasks "../../../tasklist-api-rest/migrate-to-zeebe-user-tasks" +click Job "../../../migration-manuals/migrate-to-zeebe-user-tasks" +click ZeebeTasks "../../../migration-manuals/migrate-to-zeebe-user-tasks" click ZeebeRest "../../../zeebe-api-rest/zeebe-api-rest-overview" ``` Follow these resources to learn more about the individual components: -- Familiarize yourself with the [Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) for task, variable, and form retrieval, and to run operations on job-based user tasks. -- Learn how to use the [Zeebe API](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) to run operations on Zeebe-based user tasks. +- Learn how to use the [Camunda 8 API](/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx) for task, variable, and form retrieval, and to run operations on Zeebe user tasks. +- Familiarize yourself with the [Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) to run operations on job worker-based user tasks. - Understand how to design, embed, and customize [forms](/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md). - Understand how this architecture fits into the overall Camunda architecture with the [Java greenfield stack](/components/best-practices/architecture/deciding-about-your-stack.md). diff --git a/docs/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md b/docs/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md index aaf60920878..e79fcdd2332 100644 --- a/docs/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md +++ b/docs/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md @@ -1,13 +1,13 @@ --- id: introduction-to-forms title: "Introduction to forms" -description: "Forms play a key role in guiding work processes, gathering necessary information, and aiding in decision-making for human tasks." +description: "Forms play a key role in guiding work processes, gathering necessary information, and aiding in decision-making for human task orchestration." --- import FormEditorImg from './img/form-editor.png'; import GHIcon from "@site/src/mdx/GitHubInlineIcon"; -Forms play a key role in giving work instructions, collecting information and making decisions on human tasks. Forms are lightweight user interfaces, tailored for focused data input in specific steps of a process, rendering the orchestration of human tasks more efficient than simply routing users to the applications that are orchestrated. +Forms play a key role in giving work instructions, collecting information and making decisions within human task orchestration. Forms are lightweight user interfaces, tailored for focused data input in specific steps of a process, rendering the orchestration of human tasks more efficient than simply routing users to the applications that are orchestrated. Forms are commonly used in [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md/#user-task-forms), but also as [start forms](/components/tasklist/userguide/starting-processes.md) to start a new process instance, or even as [public forms](/components/modeler/web-modeler/advanced-modeling/publish-public-processes.md), e.g. to capture user input at scale or to allow your customers to trigger a self-service process. diff --git a/docs/apis-tools/frontend-development/03-forms/_category_.yml b/docs/apis-tools/frontend-development/03-forms/_category_.yml index 0cd2d4a9d0e..a420c248c6d 100644 --- a/docs/apis-tools/frontend-development/03-forms/_category_.yml +++ b/docs/apis-tools/frontend-development/03-forms/_category_.yml @@ -1,4 +1,4 @@ label: "Forms" link: null customProps: - description: Forms are a crucial piece to provide work instructions, collect information, and drive decisions in human tasks. + description: Forms are a crucial piece to provide work instructions, collect information, and drive decisions in human task orchestration. diff --git a/docs/apis-tools/java-client-examples/cluster-topology-request.md b/docs/apis-tools/java-client-examples/cluster-topology-request.md index ac1a9a952fc..31a4108466e 100644 --- a/docs/apis-tools/java-client-examples/cluster-topology-request.md +++ b/docs/apis-tools/java-client-examples/cluster-topology-request.md @@ -11,7 +11,7 @@ This example shows which broker is leader and follower for which partition. This ## Prerequisites -Run the Zeebe broker with endpoints, `localhost:8080` (default REST) and `localhost:26500` (default gRPC). +Run the Zeebe Broker with endpoints `localhost:8080` (default REST) and `localhost:26500` (default gRPC). ## TopologyViewer.java diff --git a/docs/apis-tools/java-client-examples/data-pojo.md b/docs/apis-tools/java-client-examples/data-pojo.md index b73b7bece7c..fc3e9e792f9 100644 --- a/docs/apis-tools/java-client-examples/data-pojo.md +++ b/docs/apis-tools/java-client-examples/data-pojo.md @@ -10,7 +10,7 @@ description: "Let's analyze the prerequisites and code to handle variables as PO ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 2. Run the [deploy a process example](process-deploy.md). ## HandleVariablesAsPojo.java diff --git a/docs/apis-tools/java-client-examples/decision-evaluate.md b/docs/apis-tools/java-client-examples/decision-evaluate.md index b500b4da8b4..c6302c35694 100644 --- a/docs/apis-tools/java-client-examples/decision-evaluate.md +++ b/docs/apis-tools/java-client-examples/decision-evaluate.md @@ -6,7 +6,7 @@ description: "Let's dive deeper into Zeebe and Java to evaluate a decision." ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 1. Run the [deploy a process example](process-deploy.md). Deploy [`demoDecision.dmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoDecision.dmn) instead of `demoProcess.bpmn`. ## EvaluateDecisionCreator.java diff --git a/docs/apis-tools/java-client-examples/job-worker-open.md b/docs/apis-tools/java-client-examples/job-worker-open.md index 9471c7072a3..8d4c2392038 100644 --- a/docs/apis-tools/java-client-examples/job-worker-open.md +++ b/docs/apis-tools/java-client-examples/job-worker-open.md @@ -10,7 +10,7 @@ description: "Let's analyze the prerequisites and code to open a job worker." ## Prerequisites -- Run the Zeebe broker with endpoint `localhost:26500` (default). +- Run the Zeebe Broker with endpoint `localhost:26500` (default). - Run the [deploy a process example](process-deploy.md). - Run the [create a process instance example](process-instance-create.md) a few times. diff --git a/docs/apis-tools/java-client-examples/process-deploy.md b/docs/apis-tools/java-client-examples/process-deploy.md index 34ae1c4542d..373844a46be 100644 --- a/docs/apis-tools/java-client-examples/process-deploy.md +++ b/docs/apis-tools/java-client-examples/process-deploy.md @@ -11,7 +11,7 @@ description: "Let's analyze the prerequisites and code to deploy a process using ## Prerequisites -Run the Zeebe broker with endpoint `localhost:26500` (default). +Run the Zeebe Broker with endpoint `localhost:26500` (default). ## ProcessDeployer.java diff --git a/docs/apis-tools/java-client-examples/process-instance-create-nonblocking.md b/docs/apis-tools/java-client-examples/process-instance-create-nonblocking.md index 531191dc7b5..aae09cf30e3 100644 --- a/docs/apis-tools/java-client-examples/process-instance-create-nonblocking.md +++ b/docs/apis-tools/java-client-examples/process-instance-create-nonblocking.md @@ -6,7 +6,7 @@ description: "Let's analyze the prerequisites and code to create non-blocking pr ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 2. Run the [deploy a process example](process-deploy.md). ## NonBlockingProcessInstanceCreator.java diff --git a/docs/apis-tools/java-client-examples/process-instance-create-with-result.md b/docs/apis-tools/java-client-examples/process-instance-create-with-result.md index 1c9d0c22e4d..e2fbe45d708 100644 --- a/docs/apis-tools/java-client-examples/process-instance-create-with-result.md +++ b/docs/apis-tools/java-client-examples/process-instance-create-with-result.md @@ -6,7 +6,7 @@ description: "Let's analyze the prerequisites and code to create a process insta ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 1. Run the [deploy a process example](process-deploy.md). Deploy [`demoProcessSingleTask.bpmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoProcessSingleTask.bpmn) instead of `demoProcess.bpmn`. ## ProcessInstanceWithResultCreator.java diff --git a/docs/apis-tools/java-client-examples/process-instance-create.md b/docs/apis-tools/java-client-examples/process-instance-create.md index b12e0c0d562..1408e781deb 100644 --- a/docs/apis-tools/java-client-examples/process-instance-create.md +++ b/docs/apis-tools/java-client-examples/process-instance-create.md @@ -6,7 +6,7 @@ description: "Let's dive deeper into Zeebe and Java to create a process instance ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 1. Run the [deploy a process example](process-deploy.md). ## ProcessInstanceCreator.java diff --git a/docs/apis-tools/java-client/index.md b/docs/apis-tools/java-client/index.md index 0768a607df7..30212b1bdc8 100644 --- a/docs/apis-tools/java-client/index.md +++ b/docs/apis-tools/java-client/index.md @@ -52,8 +52,8 @@ In Java code, instantiate the client as follows: .build(); try (ZeebeClient client = ZeebeClient.newClientBuilder() - .grpcAddress(zeebeGrpc) - .restAddress(zeebeRest) + .grpcAddress(URI.create(zeebeGrpc)) + .restAddress(URI.create(zeebeRest)) .credentialsProvider(credentialsProvider) .build()) { client.newTopologyRequest().send().join(); diff --git a/docs/apis-tools/java-client/job-worker.md b/docs/apis-tools/java-client/job-worker.md index c0af82e7dad..6f03d44423f 100644 --- a/docs/apis-tools/java-client/job-worker.md +++ b/docs/apis-tools/java-client/job-worker.md @@ -14,7 +14,12 @@ keywords: ["backpressure", "back-pressure", "back pressure"] The Java client provides a job worker that handles polling for available jobs. This allows you to focus on writing code to handle the activated jobs. :::caution REST API limitation -The 8.6.0 Java client cannot maintain the long-lived polling connections required for job polling via the REST API. For example, this applies to performing long-polling job activation when activating jobs larger than the maximum message size, or receiving additional job activation requests while the long-polling connection is still open. +The 8.6.0 Java client cannot maintain the long-lived polling connections required for job polling via the REST API. For example, this applies when: + +- Performing long-polling job activation when activating jobs larger than the maximum message size. +- Receiving additional job activation requests from the same Java client while the long-polling connection is still open. +- Receiving additional job activation requests from a Java client running on the same JVM while the long-polling connection is still open. +- Receiving additional job activation requests from a Java client running on a different JVM while the long-polling connection is still open. If you encounter this issue, consider switching to the Zeebe gRPC protocol for job activation, or use job activation via the REST API with long polling disabled. diff --git a/docs/apis-tools/migration-manuals/migrate-to-camunda-api.md b/docs/apis-tools/migration-manuals/migrate-to-camunda-api.md new file mode 100644 index 00000000000..a9b4bf9626c --- /dev/null +++ b/docs/apis-tools/migration-manuals/migrate-to-camunda-api.md @@ -0,0 +1,113 @@ +--- +id: migrate-to-camunda-api +title: Migrate to the Camunda 8 API +description: "Migrate from Camunda's V1 component REST APIs to the V2 Camunda 8 REST API to interact with Camunda 8 clusters, activate jobs, and run user task state operations." +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +This document offers a comprehensive guide to migrate from Camunda's V1 component REST APIs (the Tasklist REST API, for example) to the V2 [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md). + +Camunda is streamlining the developer experience by creating a unified REST API for Zeebe, Operate, Tasklist, and the Identity components with endpoint parity. This will be a single REST API for the orchestration cluster for a consistent and intuitive API experience to help your teams develop process automation solutions faster. + +:::note +The Administration and Web Modeler APIs will not be part of the Camunda 8 REST API, as these are platform APIs outside the cluster’s scope. +::: + +Overtime, there will be a deprecation process for the individual component APIs starting with the former Operate and Tasklist APIs. These will continue to be in the product for the short-term, but it is recommended to begin the adoption of the new API. In addition, we will begin to deprecate several Zeebe gPRC endpoints as well. See [the official blog announcement](https://camunda.com/blog/2024/11/camunda-8-7-releasing-february-2025/). + +## Migrate endpoints + +This section considers all public endpoints existing in the component REST APIs and the Camunda 8 API counterparts or required migration changes. + +### General changes + + + + + +- The new API can be found at `/v2/…>` instead of `/v1/…>`. +- All endpoints are no longer separated by component concerns and all endpoints receive similar support. For example, process definitions, user tasks, and user authorizations were previously spread across separate Tasklist, Operate, and Identity APIs. +- Naming, response codes, and type handling have been streamlined for all endpoints to provide a consistent UX. +- Endpoints with similar concerns (variable search, for example) have been consolidated into single endpoints. + + + + + +- Unified search request structure. + - Attributes `filter`, `page`, and `sort` on root level. + - Endpoint-specific filter attributes in the filter object, not at the root level. + - Pagination information in the `page` object. For example, the attributes `from`, `limit`, `searchBefore`, and `searchAfter`. + - Sorting configuration in sort object array, each object containing the field name and order (descending or ascending). +- Unified search response structure. + - Attributes `items` and `page` on root level. + - List of endpoint-specific response items in `items` attribute. + - Page information in `page` attribute, for example the attributes `totalItems`, `firstSortValues`, and `lastSortValues` to use in `searchBefore` and `searchAfter` in follow-up requests. + + + + + + + +### Name changes and mappings + +The following conventions apply to all attributes: + +- `key` and `id` fields contain the entity as a prefix, for example, `userTaskKey`, `processDefinitionId`. This applies when referencing other resources like `formKey` in the user task entity, in the respective entities themselves like `userTaskKey` in the user task entity. +- The full entity is the prefix to avoid confusion, for example `processDefinitionKey` instead of `processKey` (the latter could be interpreted as process instance or process definition). +- Other attributes of entities themselves have no prefix to avoid clutter, for example version in the process definition entity. In other resources, they have to be referenced with a prefix, like `processDefinitionVersion` in the process instance entity. +- The `bpmnProcessId` is now called `processDefinitionId` to be easily relatable to the entity (process definition) and the accompanying `processDefinitionKey`. +- The `decisionKey` and `dmnDecisionKey` are now aligned to `decisionDefinitionKey`, the `decisionId` and `dmnDecisionId` to `decisionDefinitionId`. Similar to the `processDefinitionId`, those attributes are now related to the entity `decisionDefinition`. + + + +### Tasklist + +#### Search tasks + +- **V1 endpoint**: `POST /v1/tasks/search` +- **V2 endpoint**: `POST /v2/user-tasks/search` + + + + + +- Filter attribute `assigned (boolean)` removed + - Use filter attribute `assignee` with condition `{ "$exists": false }` +- Filter attribute `assignees (string[])` removed + - Use filter attribute `assignee` with condition `{ “$in”: [ “xyz”, ... ] }` +- Filter attribute `taskDefinitionId` renamed + - Use filter attribute `elementId` +- Filter attribute `candidateGroups (string[])` removed + - Use filter attribute `candidateGroup` with condition `{ “$in”: [ “xyz”, ... ] }` +- Filter attribute `candidateUsers (string[])` removed + - Use filter attribute `candidateUser` with condition `{ “$in”: [ “xyz”, ... ] }` + + + + + + + + + + + + + + + + diff --git a/docs/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md b/docs/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md new file mode 100644 index 00000000000..4b6a81d4d47 --- /dev/null +++ b/docs/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md @@ -0,0 +1,381 @@ +--- +id: migrate-to-zeebe-user-tasks +title: Migrate to Zeebe user tasks +description: "Learn how to migrate job worker-based user tasks to Zeebe user tasks." +--- + +import DocCardList from '@theme/DocCardList'; +import FormViewer from "@site/src/mdx/FormViewer"; +import YesItem from "../tasklist-api-rest/assets/react-components/YesItem"; +import NoItem from "../tasklist-api-rest/assets/react-components/NoItem"; +import TableTextSmall from "../tasklist-api-rest/assets/react-components/TableTextSmall"; +import userTaskMigrationDecisionHelperForm from "../tasklist-api-rest/assets/forms/userTaskMigrationDecisionHelperForm.js"; +import "../tasklist-api-rest/assets/css/condensedTable.module.css"; +import styles from "../tasklist-api-rest/assets/css/cleanImages.module.css"; +import ZeebeTaskSelectionImg from '../tasklist-api-rest/assets/img/zeebe-user-task-selection.png'; + +Camunda 8.5 introduced a new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type: Zeebe user tasks. + +Zeebe user tasks have several benefits compared to job worked-based user tasks. It includes: + +- Running directly on the automation engine for high performance. +- Removing dependencies and round trips to Tasklist. +- A powerful API that supports the full task lifecycle. + +In this guide, you will learn: + +- Under which circumstances and when you should migrate. +- How to estimate the impact on a project. +- Steps you need to take for a successful migration without interrupting your operations. + +## Decide on your migration path + +Zeebe user tasks require migration of the user tasks in both your diagrams and the task API. + +With this in mind, you can migrate at your own pace. If you should migrate now or later, and what is required to migrate depends on your current setup and future plans. + +### Task type differences + +Learn the differences between both task types and make an informed decision, and understand the new capabilities of Zeebe user tasks. Refer to this table for important high-level differences of the two task types: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
Job worker-based user tasks
+ Existing implementation +
+
Zeebe user tasks
+ Recommended for existing and new projects +
Implementation locationTasklist +
Zeebe
+ Does not require Tasklist to run +
Compatible versions8.0 +8.5 +
Supports Tasklist UI
API
Supports Tasklist API + + Full support + +
Partially
+ Queries, GET tasks, forms, variables + ℹ Currently, you must use the Camunda 8 and Tasklist APIs to use Zeebe user tasks +
Supports Camunda 8 API + + Task state operations (assign/update/complete) +
Supports job workers
Supports task lifecycle events + + Basic only: created/completed/canceled + + + Full lifecycle events including custom actions +
Supports task listeners + +
Extras
Custom actions/outcomes + + Custom actions can be defined on any operation excluding unassign (DELETE assignment, send update beforehand) +
Supports task reports in Optimize
Recommendations + You can continue to use this task type on existing projects when you have a custom task application running on it and do not require any of the above features. + + Recommended for existing and new projects when you run Tasklist. + Migrate existing projects and task applications/clients to this task type when you require one of the features above, or the following use cases: + +
    +
  • Implement a full task lifecycle
  • +
  • React on any change/events in tasks, such as assignments, escalations, due date updates, or any custom actions
  • +
  • Send notifications
  • +
  • Track task or team performance
  • +
  • Build an audit log on task events
  • +
  • Enrich tasks with business data
  • +
+
+
+ +## Switch the implementation type of user tasks + +We recommend you migrate process-by-process, allowing you to thoroughly test the processes in your test environments or via your [CI/CD](/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md). To do this, take the following steps: + +1. Open a diagram you want to migrate. +2. Click on a user task. +3. Check if the task has an embedded form. + - If a form is embedded, [transform it into a linked form](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked) before you change the task type implementation. Press `Ctrl+Z` or `⌘+Z` to undo if you accidentally removed your embedded form. +4. Open the **Implementation** section in the properties panel. +5. Click the **Type** dropdown and select **Zeebe user task**. The linked form or external form reference will be preserved. + +Task Type Selection + +Repeat these steps for all user tasks in the process. Then, deploy the process to your development cluster and test it by running the process and ensuring your custom task applications work. + +## Use Camunda 8 API + +:::note +The Tasklist REST API is not deprecated, and you still need it for queries on both task types. +::: + +The following table provides a breakdown of which operations are supported in which API, and for which user tasks. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OperationTasklist APICamunda 8 API
Deprecated on 8.7Supported from 8.6+
Query user task Job worker-based user tasks Zeebe user tasks
Get user task Job worker-based user tasks Zeebe user tasks
Retrieve task variables Job worker-based user tasks Zeebe user tasks
Get user task form Job worker-based user tasks Zeebe user tasks
Change task assignment Job worker-based user tasks Zeebe user tasks
Complete task Job worker-based user tasks Zeebe user tasks
Update task- Zeebe user tasks
Save and retrieve draft variables Job worker-based user tasks -
+ +The following table outlines the respective endpoints. Click the endpoints to follow to the API documentation and inspect the differences in the request and response objects. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OperationTasklist APICamunda 8 API
Query user tasks + + POST /tasks/search + + + + POST /user-tasks/search + +
Get user task + + GET /tasks/:taskId + + + + GET /user-tasks/:userTaskKey + +
Retrieve task variables + + GET /variables/:variableId + + + + POST /tasks/:taskId/variables/search + +
Get task form + + GET /forms/:formId + + + + GET /user-tasks/:userTaskKey/form + +
Assign a task + + PATCH /tasks/:taskId/assign + + + + POST /user-tasks/:userTaskKey/assignment + +
Unassign a task + + PATCH /tasks/:taskId/unassign + + + + DELETE /user-tasks/:userTaskKey/assignee + +
Complete task + + PATCH /tasks/:taskId/complete + + + + POST /user-tasks/:userTaskKey/completion + +
Update task- + + PATCH /user-tasks/:userTaskKey + +
Save and retrieve draft variables + + POST /tasks/:taskId/variables + + -
+ +### Zeebe Java client + +Use the Zeebe Java client when you are building your task application in Java. The client assists with managing authentication and request/response objects. + +### API differences + + + +Refer to the dedicated sections and API explorers to learn details about the APIs. + + + +## Troubleshooting and common issues + +If your task application does not work properly after migration, check the following: + +- **The endpoints return specific error messages when you run them on the wrong task type**: Ensure to call the right endpoint for the right task type, c.f. above [table](#use-the-new-camunda-8-api). +- **Forms do not appear**: Ensure you have extracted embedded forms, if any, and [transformed them into linked forms](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked), before you change the task type implementation. +- **Task update operation does not work**: The update operation is only available to Zeebe user tasks. diff --git a/docs/apis-tools/node-js-sdk.md b/docs/apis-tools/node-js-sdk.md index 3186f048f21..b11e5f327d7 100644 --- a/docs/apis-tools/node-js-sdk.md +++ b/docs/apis-tools/node-js-sdk.md @@ -1,7 +1,7 @@ --- id: node-js-sdk title: Node.js -description: Get started with the official Camunda 8 JavaScript SDK for Node.js, available via npm. +description: Get started with the official Camunda 8 JavaScript SDK for Node.js. --- As of 8.5.0, the official [Camunda 8 JavaScript SDK for Node.js](https://github.com/camunda/camunda-8-js-sdk) is available via [npm](https://www.npmjs.com/package/@camunda8/sdk). @@ -234,20 +234,20 @@ This will start a service task worker that runs in an asynchronous loop, invokin The handler must return a job completion function - `fail`, `complete`, or `forward`. This is enforced by the type system and ensures you do not write code that does not have code paths that do not respond to Zeebe after taking a job. The `job.complete` function can take an object that represents variables to update. -### Create a programmatic human task worker +### Create a programmatic user task worker -Our process has a [human task](/guides/getting-started-orchestrate-human-tasks.md) after the [service task](/guides/getting-started-orchestrate-microservices.md). The service task worker will complete the service task job, and we will complete the human task using the Tasklist API client. +Our process has a [user task](/guides/getting-started-orchestrate-human-tasks.md) after the [service task](/guides/getting-started-orchestrate-microservices.md). The service task worker will complete the service task job, and we will complete the user task using the Tasklist API client. Add the following code beneath the service worker code: ```typescript -console.log(`Starting human task poller...`); +console.log(`Starting user task poller...`); setInterval(async () => { const res = await tasklist.searchTasks({ state: "CREATED", }); if (res.length > 0) { - console.log(`[Tasklist] fetched ${res.length} human tasks`); + console.log(`[Tasklist] fetched ${res.length} user tasks`); res.forEach(async (task) => { console.log( `[Tasklist] claiming task ${task.id} from process ${task.processInstanceKey}` @@ -258,19 +258,19 @@ setInterval(async () => { allowOverrideAssignment: true, }); console.log( - `[Tasklist] servicing human task ${t.id} from process ${t.processInstanceKey}` + `[Tasklist] servicing user task ${t.id} from process ${t.processInstanceKey}` ); await tasklist.completeTask(t.id, { - humanTaskStatus: "Got done", + userTaskStatus: "Got done", }); }); } else { - console.log("No human tasks found"); + console.log("No user tasks found"); } }, 3000); ``` -We now have an asynchronously polling service worker and an asynchronously polling human task worker. +We now have an asynchronously polling service worker and an asynchronously polling user task worker. The last step is to create a process instance. @@ -278,7 +278,7 @@ The last step is to create a process instance. There are two options for creating a process instance: -- For long-running processes, use `createProcessInstance`, which returns as soon as the process instance is created with the process instance id. +- For long-running processes, use `createProcessInstance`, which returns as soon as the process instance is created with the process instance ID. - For the shorter-running process we are using, use `createProcessInstanceWithResult`, which awaits the completion of the process and returns with the final variable values. 1. Locate the following line in the `main` function: @@ -295,11 +295,11 @@ console.log( const p = await zeebe.createProcessInstanceWithResult({ bpmnProcessId: `c8-sdk-demo`, variables: { - humanTaskStatus: "Needs doing", + userTaskStatus: "Needs doing", }, }); console.log(`[Zeebe] Finished Process Instance ${p.processInstanceKey}`); -console.log(`[Zeebe] humanTaskStatus is "${p.variables.humanTaskStatus}"`); +console.log(`[Zeebe] userTaskStatus is "${p.variables.userTaskStatus}"`); console.log( `[Zeebe] serviceTaskOutcome is "${p.variables.serviceTaskOutcome}"` ); @@ -315,14 +315,14 @@ You should see a output similar to the following: ``` Creating worker... -Starting human task poller... +Starting user task poller... [Zeebe] Deployed process c8-sdk-demo [Zeebe Worker] handling job of type service-task -[Tasklist] fetched 1 human tasks +[Tasklist] fetched 1 user tasks [Tasklist] claiming task 2251799814895765 from process 2251799814900881 -[Tasklist] servicing human task 2251799814895765 from process 2251799814900881 +[Tasklist] servicing user task 2251799814895765 from process 2251799814900881 [Zeebe] Finished Process Instance 2251799814900881 -[Zeebe] humanTaskStatus is "Got done" +[Zeebe] userTaskStatus is "Got done" [Zeebe] serviceTaskOutcome is "We did it!" ``` diff --git a/docs/apis-tools/operate-api/overview.md b/docs/apis-tools/operate-api/overview.md index 33f45476b82..9d126e74bac 100644 --- a/docs/apis-tools/operate-api/overview.md +++ b/docs/apis-tools/operate-api/overview.md @@ -5,6 +5,10 @@ slug: /apis-tools/operate-api/overview description: "Operate API is a REST API and provides searching, getting, and changing Operate data. Requests and responses are in JSON." --- +:::note +To migrate from Camunda's V1 component REST APIs to the V2 [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md), review [migrating to the Camunda 8 API](/apis-tools/migration-manuals/migrate-to-camunda-api.md). +::: + Operate API is a REST API and provides searching, getting, and changing Operate data. Requests and responses are in JSON notation. Some objects have additional endpoints. For example, `process-definitions` has an endpoint to get the process-definition as XML representation. @@ -19,7 +23,7 @@ Work with this API in our [Postman collection](https://www.postman.com/camundate For SaaS: `https://${REGION}.operate.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Operate component. ::: @@ -35,7 +39,7 @@ A Swagger UI is also available within a running instance of Operate, at `https:/ For SaaS: `https://${REGION}.operate.camunda.io/${CLUSTER_ID}/swagger-ui.html`, and for Self-Managed installations: `http://localhost:8080/swagger-ui.html`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). ::: ## Multi-tenancy diff --git a/docs/apis-tools/operate-api/specifications/by-id.api.mdx b/docs/apis-tools/operate-api/specifications/by-id.api.mdx index eef02cea212..5bd23389b1c 100644 --- a/docs/apis-tools/operate-api/specifications/by-id.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-id.api.mdx @@ -5,7 +5,7 @@ description: "Get decision instance by id" sidebar_label: "Get decision instance by id" hide_title: true hide_table_of_contents: true -api: eJzlV1tv2zYU/isCn7bOiZw2HQpjGOAuSqHVsI3YaQcEQUFLxzEbidRIyqkh6L/vHOpix5a9bE8b+iSK537Vp4JZ/mDY4I5dQSSMUDKUxnIZAbvvMZWB5pYuYzZgiw0+eizjmqdgQZNYwSS+IFEQSUg8Zdyu8ByDibTISBovw9hTSy+ubXiiMdJjGv7MhQY0YHUOPWaiFaScDQpmNxlpNlYL+cDK8p6YTaakAUP01/0+PZ4bmuVRBMag4khJC9ISC8+yREQuEv+rIb7i0I5afIXIUoCa4raisoKBHfrSY4+w2bkXaOoBNAovlU65ra5+viROjNTCoY4eA5mnlPnrYTgKrvAi+DQc3Q7n7nw7/jiefB6702wa/BZeh3h/j/pgzZPcxXLVqfgZyzUXSa67uTBOStUVLIUUxPzxxTHVok2vvFyw6YCwO6sNeevT3zCOXfedYPiEfSqqip90783rXam5Yz1esissyCycjL/Mh+9HARJG4Ty4GY6+BH9Mb4IZkfYqt60o1RD7OE/sqdpBHMost2aHh2vNNzRlFlLzrxtXHssY2e2iIMkKm9DV/o5wLjqO1utJbv8bbmOS8wSOtI8jyRi+vagvTmSgCrdiAcml7TR4QgESiXzZtctCicGJ2KMVCcYe32mYwUUC6U//dLfRasrNC2cjxYHnD92pbvd5F9F2DtNuUgKtlW4z8eYwE9dKL0Qcg3yeg1f+q/9/uJeH4d5UBQcqvVG5jsCTynpLlcv4++iCt13zMJyG3k7AHjiB7yAfhCIgyrWwG4e5FsA16DOHQe7we1JgCtSjAPd2v4+9PoA9RF7eYuM5zIZQbqUI3j2ASwWhtwHz1xd+I3PWyBi/EHHJyBm9bvBfrhPkL6rklgPfL1bK2HJQZEpbYl5zLTgWwmWUaFVhl9x9AVmiIp64632/5yvwiECrn9CjxXdqgcr6ucMhaOO5unf9d/1OTcR6RMu2MbZ6VtZmnXoq5k5NDqI2hZoRXxV0U5ztJzETBJmarxqbTBE8zIOzWQsdHJKu5dDD3YK3WmoXnUP0XjGxhvu66dXfP89dXwm5VE687q+JQ/fgTfMFzgyFchiw8rjD0/iwYg0el7EXqTRLgLZTDQPbpqKT16jFWfFShShOUWc7SatVTp2wUspWgIpGF1WTW1UjUVAGu+jp6ek84ikuPH6OBikJ6CQg9ifeOm+j+qa3JxyryLTSQrl3X8MSNKCXfq3I+A4/NACRXZz3z/tVVxmbcrlj6PQIPctZWx4L36yfJRxLWdYOFvV43bH1Bduize2A4eUAVWIbVYNyx4piwQ3c6qQs6Rq/C9oN/Xau3BTGwtAZ53jJEwMHPrUrkv1wU/9w/egd/ynrDKFpXrlhLexirP4VItxWErRdAY+xC8mpijDEBsrsjsjB3xiNTbuGPgRz5OU55anN5V5rO+2d7vzy3jF4c/UI8tfWOUuv5F9Z/gXHqxkU +api: eJzlV21v2zYQ/ivCfVo7JXLadCiEYoBbK4VWwwlipx0QBAVNnWM2EqmSlFND0H8fjnqxY8tetk8b+skS74V3z734UQmW3RsIb2GEXBihZCyNZZIj3PmgctTM0mECIczXcQI+5EyzDC1qMitBsgwhBEEiISGEnNkl+JCg4VrkZA0hxImnFl7S3OGJ9hIfNH4vhMYEQqsL9MHwJWYMwhLsOifPxmoh76Gq7kjZ5EoaNCR/NRjQz9OLpgXnaAz4wJW0KC2psDxPBXeZBN8M6ZX796j5N+SWEtSUtxX1LSLpicWHB1xvnQtp8R41+LBQOmO2PvrtnDSNZRb3ffiAssgI+YthPI5G4EP0eTi+Gc7c883k0+Tyy8Q9Ta+iD/FFHI3grvIBVywtXC6jXsdPVC6YSAvdr5VrRVCNcCGkIOVPz86pMW175fmGbQfE/ai24k1Mf6M4cd13ROEzaiPqih8N7/WrbauZUz1cslH0IZ7Gl5Ovs+H7cQQ+jONZdD0cf43+vLqOpiTaqdymolRDjaZI7bHaYRLLvLBmS4dpzdY0ZRYz868bVx5CjO7tk1Q+WGFTOtrdES5Ep9FFfVnY/0bYPugixQPt40QywR/P6osjCNTp1ioombS9Fx5xAFVF4vO+XRbLFUtF4tGKRGMP77Rcq3mK2a//dLfRairMM2cjQ2PYfT/U3T7vE9reYdoGJdJa6Q6J1/tIXCg9F0mC8ikGL4OX//90z/fTva4LjlR6owrN0ZPKegtVyOTn6II3ffMwvIq9rYQ9dAY/AR7EIpAXWti141xzZBr1ieMgt3eVXwJX6kGge7vb5V4f0e4zL2++9hxny9AuFdG7e3RQEHsLIVidBa3NSWtjglIkFVAwetXyv0KnEEJZg1uFQVAulbFVWOZKW1JeMS3YPK2xJVld2AVz/4CQKs5Sd7wb92yJHglo9RN7tEv0qAXq208dD1F6x93bwdtBrydSPeBl0xgbP0tr814/tXKvJ0dR20JNSa9Oui3O5i8xF0SZ2n81uLyKroez6GTaUQfHpBu7yn9S8M5LE6ILiN5rJWi1L9pe/ePLzPWVkAvlzJv+unTsHr2rYp4KTqnsJ6w85vi0x7gVK/SYTDyusjxF2k4NDeyaip681u1CaS9TUlhFne0srVYFdcJSKVsTKhpdxl0N60aipEwYBI+Pj6ecZYVM2ClXGYGQCo7SOBwb3MbNib9jnChuOmuh3HugcYEaJcegcWQCxx9agghnp4PTQd1VxmZMbl10fISeYNaVx+IPG+QpE5J8ugDLZrxuYXUGG7a5GTDwIRQJfXrVg3ILZTlnBm90WlV0/L1A7YZ+M1duChNh6DmBcMFSg3sxdSsSfrluPrheeIc/ynpTaJtXrqGjXQDNpxDxtoqo7RJZgtoFVQuGnGNut0z2vsZobLo19DGagQ+sIJw6LHda23nvDefde6fgzdQDyt+74Cy9UnxV9RfHqxkU sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/by-key-1.api.mdx b/docs/apis-tools/operate-api/specifications/by-key-1.api.mdx index 79260b9703a..3975e1c8537 100644 --- a/docs/apis-tools/operate-api/specifications/by-key-1.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key-1.api.mdx @@ -5,7 +5,7 @@ description: "Get process instance by key" sidebar_label: "Get process instance by key" hide_title: true hide_table_of_contents: true -api: eJzlVllv4zYQ/isEn/ZwLGc3LRZGsYA3cRZu0sSI3e1DEBSUNLa5kUiVpJwagv57Z6jDjq0E3sdi/WDxmHu+4UzBnVhaPrznU6MjsHairBMqAv7Q4zoDI5zUahLzIQ83V7D5+5T3eCaMSMGBIcaCK9zg/SNs8E4qXGbCrXAdg42MzEgCHiI30wuWVXqYbBT1uIF/cmkAlTiTQ4/baAWp4MOCu01GoqVysASDpAttUuGqo1/PeFk+ELvNtLJgiePDYECf56pneUQ6kT/SKEo5IhFZlsjI+xd8t0RXHGrW4XeIHPlsKBpOVlrI12PM83yk+hsGS1Y6XmX6+IGYwixVTT7iHR7rjFRLL1YYdOPqeDM8/WWin250DE2Sj+dHeuMuhINOa0DFL94hZ9cNMeUpAW90Pp98G+PB+e0f0+vxfHxB69HN+fgalw/bGF7AQipJ+Trebky2UO7VIO4B38vmkTRRngjzZu/2rRcqXUKS9kumxF+Pn3UhcKLWIpExI6iDdS8jEX0NE0jf/ygiKcy5PRJfKVotlt35auuy67I6OLjYicnYGG3aSHw8jMSlNqGMY1DPY/AuePf/d/fs0N27KuFAqbc6NxEwpR1b6FzFPwcKfumqh9F0wnYcZuAZfoJ40IsIUW6k2/jmGQI+QubEN5R7fOwKDIF+lOB3D/s99Cu4gwbKwg2rei/25JWmVr0EHwrqwkMerE+Dmuek4bFBgSwlJ2PMumnkuUmQvqiCWw6DoFhp68phkWnjiHgtjBSYCB9RuqsSuxB5QpFMdCQSf7xv93wFjC5oVKAhwOGeIFBp7/vHGHU8F/dp8GnQKYlIX5CyBcZWzsq5rFNORdwpyQ8WTaJmRFc53SSnTbbI5JUPfj0F3U7Hd6P5+GQ2ns0mtzfNRFTzUW/fSXgrpTbRG0T7iog31JcNVn//a+5xJdVCe/YaX7d+UgM2zUOsGXLl0GHNhJ+C8OPkGphQMYt0miVAr9M+qmjFGrFYKyzV2Hs1IdtzOqNzQsJKa1f1cypdFE1mVUAipyyi6OnpqR+JFB880UeFFAQ0EnBiI9o6btf1SW+POdaRbbml9vvAwAKwdUcQ1IJsQFLXzZDFT/uD/qBClXWpUDuKXi+hZzFr0+PgXxdkicBUlrWBRV1e93x9ytsRZVtgeDYkkQijqlDueVGEwsKfJilLOsa+YHzRb+vKV2EsLa2xjhcisXBgU/tE8jd39eD8lr08XHe60IBXbXxVJzntcOlR6f9LGrxWIGKEIVlV3YwQQZnb4TkYoqlu2nfo63iOtCKnQLXB3MO2l95pz29fPAGb60dQn1vrHG3JvrL8DzUuW/w= +api: eJzlVt9v2zYQ/leIe2o7JXLabiiEYUCWOIWXNAkSr3sIjIGizjYbiVTJkzND0P8+HCU5jq0E2ePQF1sS77uf3/GuBpILD8kdXDur0PuJ8SSNQphFYEt0krQ1kwwSSNfnuP77CCIopZMFEjoG1mBkgZDAPa4hAm0ggVLSEiLI0CunS9YACZzjWti5KFs7QveGInD4vdIOM0jIVRiBV0ssJCQ10Lpk1doQLtBBBHPrCkntp18+QtPMGO5Lazx6RrwfjfjvqenbSrFNiEBZQ2iIRWRZ5lqF+OJvnuXqfcs2/YaKOGbH2SDdWuFYX+NewLHpr+i8bm28CPrwfh80lYstnCenzYKl0rIwfdWyQYlSOjR0/npng/xZbh8ubYY9FV6P9yQdnUrCQW/QZM+eeRo8YVBVMD2PT6aTr2OI4OTqy/XFeDo+5efjy5PxxfgUZg0zT+msq22nJbU2R2m2UnqKc2001/z1UREaaejFFO80T9ANSjtV5dK92Tl9G5RqylnTbts1TdNE8HGIxROzkrnOBLcLenqezaWzaY7FT/+V1VyEyr+SowV6LxfD1dz09tBh+2HvYCsnY+es22Tiw34mzqxLdZaheZqDd/G7/3+4H/fDvWkLjlx6byunUBhLYm4rk/0YLPh5qB+OrydiK2CBAfAD5IPvS1SV07QOAzhF6dAdhKF0N2uiGpS19xrD22x3Dn9G2hvCIl2Ldn4XSEvL436BIRU8yROIV0dxhznoMT6u73HdADvjVv0yULkcEqjb5DZJHNdL66lJ6tI6YuGVdFqmeZtbPmsLO5dVzpnMrZJ5+Lzr93SJgg943eBFgpYomAKt9cNwGVu3o+7T6NNoUBOLPqPlkRiPepZE5aCeVnhQU1hO+kLdslwbdF+cTbFlqc9D8rtN6up6fHM8HR/cjm9vJ1eX/VbV4XjybxV8o6VzMTgUhl8Qgl76rOfqH39NA6+0mdsA7/h1FbY9FNdVmmvFoewHbIUMm5SQivQKhTSZULYoc+TbaZdV/CR6tXPrRGGNJsvMDkhytmImLK2ldtpz60oVatgSiYPySRw/PDwcKllUJpOHyhachFwrND7kscvbRfcl2gFnVvkNWtvwHjuco0OjMO4U+Zi1rvpFDY4OR4ejllWeCmm2DL3cQk9ytikP4T8Ul7nUYR0JDtZde93B6gg2K8pjg0EECaucRV2j3EFdp9Ljny5vGv78vUIXmv6xr0IXZtrzcwbJXOYe93zaXJHw5qZbvt+K5xf0wRB68pp16Oq84jeI2t04/Da8li1RZuiCV+3JsVJY0hZmbxHnvtncQ5/HU4hAVpyox8XuKbeD9kF/fv09CIipvUfz28Y74lf2r2n+BXkpdA4= sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -40,7 +40,7 @@ Get process instance by key Success -
Schema
+
Schema
Invalid request diff --git a/docs/apis-tools/operate-api/specifications/by-key-2.api.mdx b/docs/apis-tools/operate-api/specifications/by-key-2.api.mdx index e2d2de91467..39f7967ede2 100644 --- a/docs/apis-tools/operate-api/specifications/by-key-2.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key-2.api.mdx @@ -5,7 +5,7 @@ description: "Get process definition by key" sidebar_label: "Get process definition by key" hide_title: true hide_table_of_contents: true -api: eJzlVltv0zAU/iuWn7h0TTcGmiqENESHBohNaxEP04Sc5LT1ltjBdjqqKP+dc+wkXdsw4BHx0sY+9+/cXHEnFpaPr/ml0QlY+w7mUkknteI3A64LMIIO5ykf83j9EdbfjviAF8KIHBwYEq24wgPS72CNNKnwsxBuid8p2MTIwqsbc5Rmes6KYImlG1MDbuB7KQ2gGWdKGHCbLCEXfFxxty5IuVQOFmCQda5NLly4enXM6/qGxG2hlQVLEkejEf1tG5+WCVlF+USjKuWIRRRFJhMfYXRria/at6zjW0gcRW0IDyeDFYr2T9wbNPB0rNYZqRZEWCGAMlh9VM2LI+KOi1w1WaJ09OjDsIRyvUSiSpfR1X6m65rox32wnauVyGTKKD9g3a/hQ3DiDPLnfwujdcKV9g8hyNFvsegHUypUpZJ+Yrh4DJWJMdp0SLzYR+JMm1imKahtDJ5Fz/79cI/3w70KCQdKvdWlSYAp7dhclyr9P6rgZV8/nF6eswcBM/AC/wEeeGkhKY10az/zYxAGzIGfgtc39aBCCPSdBH+62R3978H1zH0Wr1lYGrhMlpp2zAI8GLQ+xjxaHUaN1MFGykYVCtWcHDKrdgeVJkOJKgBcj6OoWmrr6nFVaOOIeSWMFJgMjyrRQnLnoswIzUwnIvPXu77PlsCIQGOc9pfDM5VBsD4kIMnGtrqT0cmoVxOx/kLLpjg2epbOFb16AnOvJr8R22RNiS8E3SaoS7go5EcPf7PALy4nV6ezycF0Mp2eX3xul3kjRyvoQdI7LY2L3iE6Bybecp+19frh68zXllRz7cWbGrvwjwxgl2WMfUOh7AesmfDrG/+cXAETKmWJzosMaEK1ldUWO32xVi32C8s1lo6m6vaSzuiSKmGptaOKD+2LqsmtUEgUlMUqur+/HyYix6EnhmiQQEAnAZ8axNvg9qm5GewIpzqxnbTU/hwZmIMB9DJqFNlo6y3AD4ej4ShUlXW5UA8M/a6NtlDrEuTgh4uKTGAy68bFqmmxa746DMNnt8nwdkxKsZRCs1zzqoqFhS8mq2u6xv1gfPNvest3YiotfWM3z0VmYc+rblTyJ1fNq+8pe+xt2BtGW8Jq7Xs7K+mEn742/W+NU4kvQaRYjORXoJxiHRXugczeG5C6p5tH7ycz5BUlgdUBulPhXnuvP6/fegY203eg3nTeOTqSf3X9Ez6D8Xg= +api: eJzlVt9v4zYM/lcEPm03N0573XAwhgEdljt0HdaiybCHIhhkmY51tSWfJKcLDP/vAyXb+eXrbo/DvSSWRH4kP5KiWnB8YyF5ggejBVr7C+ZSSSe1gnUEukbDaXGbQQLp7g53f11BBDU3vEKHhlRbULxCSOAZdxCBVJBAzV0BEWRohZG1h0vgDndM56wOlli2NxWBwU+NNJhB4kyDEVhRYMUhacHtagKXyuEGDUSQa1NxF7Z+uIauW5O6rbWyaEnjaj6nv2Pjy0aQVYhAaOVQORLhdV1K4SOMP1qSa88t6/QjCkdRG+LDyWCFov0S96KenlHUOiPVhg62aKwMVl+FeXt1IL3im0mwtK5Un0TK1oSEQ8WVmzykU+lK2jovhK6j8+spVm/VlpcyY5Q+tO7z7NZGpyVW3/1Xlq3jrrFfyFCF1vLNNNdSWceVmD4MG6+xsjBGm5GJt+dMvNcmlVmG6piDN/Gb/3+41+fhPoaEI6Xe6sYIZEo7lutGZV9HFXw/1Q83D7fsIGCGXuEr4KOLwKJojHQ7PxJS5AbNhb8kn9Zd1ILQ+lmiX61PJ8MHdBNjgaU7FmZKha7QNII26Mmg6ZJAvL2Me62LvZaN22fcdUAOme0wohpTQgJtILhL4rgttHVd0tbaOBLeciN5WgZ+6SwkN+dNSWyWWvDSb5/6viqQ0QHd8jTeXIGMyiBYnxGRZOMY7t383XwSiUQ/g7Ivjj1O4Vw9iROEJ5H8wByStSS5EPSQoDHhvJZ3nv5+vt8/LB5vVouL5WK5vL3/fZj1vR6NoIOkjyi9i94hWgchGKTfD/X6658rX1tS5dqr9zV2798gyB6atJSCQjkPWDPupzvjwsktMq4yJnRVl0g31FBZQ7HTFxtgc21YpZV0mqrbazqjG6qEQmtHFR/alwufw1BIFJRN4vjl5WUmeNWojM+EroiEUgpU1vPY8/ZbvxOdKGda2FFbar+ODeZoUAmMeyAbHz0V4HI2n81DVVlXcXVg6N/a6Ii1MUEO/3ZxXXKpCNW72PYt9gTby3D5nDYZRJAQ6Drqm+UJ2jblFv8wZdfR9qcGjW/+fW/5Tsykpe8MkpyXFs+8Gq9K+OaxfxR+y157Ok6GMZSw2vneLhtaQRRebf63W3cRFMgzNN6vcHIjBNbuQOfsiUjdM95HHxYriIA3RNZI6EmFe/RJf3782QuwlX5G9dPonaMl+dd1/wB6G/xX sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -40,7 +40,7 @@ Get process definition by key Success -
Schema
+
Schema
Invalid request diff --git a/docs/apis-tools/operate-api/specifications/by-key-3.api.mdx b/docs/apis-tools/operate-api/specifications/by-key-3.api.mdx index 15e65cd3da4..8e4bbe0400e 100644 --- a/docs/apis-tools/operate-api/specifications/by-key-3.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key-3.api.mdx @@ -5,7 +5,7 @@ description: "Get incident by key" sidebar_label: "Get incident by key" hide_title: true hide_table_of_contents: true -api: eJzlVm1v2zYQ/isCP61dGjtNNhTGMMCNFU+NIxmynRYLAoOWzzETWdRIypkh6L/3jnqJE6tr83HoF5s63utzd7zLmeF3mvVumJdEYgmJYbdHTKaguBEy8Zasxxa7S9jNT9kRS7niGzCgSCJnCX7g/QPs8E4keEy5WeN5CTpSIiUNSERpR64cURs4Ygr+yYQCVG5UBkdMR2vYcNbLmdmlpFIkBu5AIetKqg03Jen3M1YUtySuU5lo0CTxvtulv+cmJ1kUgdYoH0lUhUaRhadpLCIbV+deE19+aFku7iEiH1NFKBhRWqEYf8Q9K0emB7ASiSBbl68V9RJteBLBjwuWLA2rNkokd8gJSbah3M78ydg99y48d4DUmX/pB599PHnB/Ko/Hnv+cO6GYRAi6VPwce4H89Cdhp47QcJ54A+8qRf4DYv7ZRr2z6fz6/5o5jbU8/5o5A7m7si9cv1pQ575f/X9gb0hyty9xlukX7mTSX/ozife36jjy7nrDqxzlZoBujvZN/pEILP9Zw5dBOEVOj2dXwQzf8BuEZEN4sjvWkDBu0iBLYKp2LQzIPzmP/HE6L1rl6LwhmF/ah0P3UkwurbHsYuQ+UPryL1cvCKPkPDEUNMdeEW3wsREajq1KIh81tYAXrLlsVg61GmgzbcbAWtuEcPm19c2BGGU6e/GdfqefScboqr21sv2wt4Hw1VKqgaJ00MkLqRaiCXi9RyDt523//9wzw7DDcuEA6Vey0xF4CTSOCuZJcufowp+a+uH/thz9gJ2wAr8BHjQcwZRpoTZ2Zm9AK5AvbPz7AYfqBwhkA8C7Nfty9E9BNPMbWexc8pRjyvAWtJmcAcWAhr6PdbZnnRqXt3JkbVgZFxt630hUzHy5SWYRa/TyddSm6KXp1IZYt5yJTgCbxGkuzKRK57FhFwsIx5b8ks/p2tw6II2Eto1DH5Tykvrx3a4oo3n6j50P3RbNRHrN7Q8FcKTnrUxaauekrlVk91j6sRMiK8Muk5Gk1yeiksLerVsBWOXJs67Cc5PnIH14lXJoYf7CW60VC5ah+i7ZGI190Vdm58+T20diWQlrXhVT4FdCMEZZwvsEQrlMGDpcLt04Z8RW3B4snQiuUljoNeo2m2curDp5NRqsTecjcR1SVIlW0mjZEaVsJbSlMOXWhVVk1tlIVFQGqvo8fHxOOIbfOD4MRokENBJwAWReCvcRhXl6IXwUka6kRbSfncUrEABetmpFOkOaaVCLoM9Oe4ed8uq0mbDkz1D7S3zDKsmLQb+NZ005pjConIsr9rphm1PbHKrhsJzj1Rh2ZSNccPyfME1zFRcFETGd1/Zpn7qI9t1S6HpjP264rGGA1+aJ5D9ElZ7+RvncGdvdbku0mRnuzfO6AuPtvrsb0FL0Br4EsuNvClv+lgpqdmTOdjNqT+ad2bo0srIMwKmAe9FDVvtrf788dEyOFP5AMmfjXeGPsm/ovgKBVte4w== +api: eJzlVm1v2zYQ/ivCfdo6NUr6MhTCMMC1mUyNIxu2nAYLAoGWzjEbiVRJKp1h6L8PR8nKi921/Tj0iy0d7/W5O/HZguW3BsJriGQmcpQWbnxQFWpuhZJRDiEsN+e4SV+DDxXXvESLmiy2IHmJEMIdbsAHISGEits1+JCjybSoyAOEcI4bT608sQvgg8bPtdCYQ2h1jT6YbI0lh3ALdlORSyEt3qIGH1ZKl9y2ot/fQNPckLmplDRoyOLV8TH9PQ05r7MMjQEfMiUtBQ23wKuqEJmrK/hkSG+7H1ktP2FGOVaaULCijUI1fk96zo5Cj3AlpKBY5z9qGkljuczw+w1blV7VWC3kLfiAsi6pt4t4PmXD6DRiI/BhEZ/Hk48x+BBN0ovBdBrFZymbzSYz8OHD5H0aT9IZS2YRm4MP7IoNF0k0idNxNE9YzGZPz4eTeBS5850LdpXMBsMkvRyMF6yXDgfjMRulbMwuWJz04kX81yAeuROSpOySxQn4cMHm88EZS+fR3yxlV0PGRi75zs2IDaP546APAgo7eJLQ6WR2kcaTJD2dLOIR3DQ+lGgMvz0AWuNDptENSSLKwwrGcvufeA+GSXTJqIrobDZIXOIzNp+ML93jlMWjKD5ziXxSyx/oM0ouLS3lXlZ0KmxBon6Tm4bEbw4tSCTveSFyjzYRjf36olRaLQssf/vRhSGMavPNul6/gm90Q3TbcPDw8OA/BoNprXSPxOt9JE6VXoo8R/kUgxfBi/9/uW/2y521DUdqvVG1ztCTynorVcv855iCt4f2YTCNvEcFe+gMfgI86HOGWa2F3bg7fYlco37p7rvrm8bfQqbUnUD3dvP8aj9D29/r3nLjtVSgRLtWxBxu0UFApCCE4P4k2OmaYHuHmwYouL7f8YlaFxDCtgWzCYNgu1bGNuG2UtqS8j3Xgi+LFks6axu54nVByBUq44UTP88zWaNHB8RYiIvYNXrU8jb6kbt8lX7m7t3xu+ODnkj1K14eBuHBz9ra6qCfVvmgJ8dzdo2Zk15b9K4ZfXN5Jc4d6B0Zm0wZ3Tgv52xO1+GOmHV2jf+kwb2XLkWXEL23SrDTPt3N5oePiZsjIVfKmXfzNHGEEb1pvSxERqXsF6w87kiZxzMr7tHjMvcyVVYF0teo4z7ebrDpydu5XSntlUoKq2iSnaXVqqZJWCtl28uXVpVnroftIFFRJgyCL1++HGW8rGXOjzJVEgiFyFAah2OH27iT+M+Mc5WZ3loo9x5oXKFGmWHQOTIBeaVBbos9OTo+Om6nytiSy0eBDq/ME6z6tlj8xwZVwYUkXy6xbbdO13B/4prbLRT4EJKrG79bjGvYbpfc4EIXTUPizzVqt9QPe+S2LheGnnMIV7wwuJdL/wmEX2Ydb//V2+f0B1PeDancuO0tanoDv6XT7rchErRGnqN22bQngyzDyj6y2ePutB/9d+aMEWXkNQHTg/dshp33g/n88d4peIm6Q/lnn52lV8qvaf4FfTtoRQ== sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -37,7 +37,7 @@ Get incident by key Success -
Schema
+
Schema
Invalid request diff --git a/docs/apis-tools/operate-api/specifications/by-key-4.api.mdx b/docs/apis-tools/operate-api/specifications/by-key-4.api.mdx index 2d0d3a29525..43f441aff9b 100644 --- a/docs/apis-tools/operate-api/specifications/by-key-4.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key-4.api.mdx @@ -5,7 +5,7 @@ description: "Get flow node instance by key" sidebar_label: "Get flow node instance by key" hide_title: true hide_table_of_contents: true -api: eJzlV1tv4kYU/iuWn9ptEshuWq1QVcmAyboQQ22TdBVFaLCHMBsz447HpMjyf+854wsQnN3sY7UvyZz7Zb4zPuSmIo+p2bs3R7F45iKiDk8V4SE1H85MkVBJFBPcicyeudyN6W5xZZ6ZCZFkQxWVaJmbHAiQP9EdyBiHY0LUGs4RTUPJEvQATLA2xMpYVYEMVkc6MyX9J2OSQhQlM3pmpuGabojZy021S9A344o+UgmqKyE3RJWs367MonhA8zQRPKUpWrzvdvHfcWw/C0OapmAfCnDFFaqQJIlZqAvsfElRLz+NLJZfaKiwaIntUKyMgsW+JT1th6Hrvo6/13BIV4wzTPLtphBKqiFR9EA/VZLxR5RSHr0qw9txEQbRV8WuvvIWBcZDFkF/355rqfLSFWaZbRCYc9ef2QNn5NhD4M686cD2fTj58/5iT9m3thssjnl+YHnBQkuActzA9m7soWMF9mJgBYNP7aLgkze9a0T96dwdWt7nhmG7w+bs296tMwATyx8D6dkD27ltyDmI6/ON5c6tSU1V/+y/B5O5jxbXEPjO+qxTOeXNLM+aTOzJAasst2/59vCA69t/zW0XEhpNpncYdT4JnIXjQiOQ258OUWsAzhbWIHBunQDpPsRzoWMLbz5pkvcHnjMLGgqrrstyx+70zjUfSpy14Wh/eTqMjUGnN7OJHeg7xGY7roXEwwFkDvwshYgp4RoelBOuWuGIUqZiZI1q2NZvSlGg+KrtLXD4lsQsMvDRoal6/U2AAVzGdPPL974N2JUs/Sb8P7zH+jYw4uTxtWGqqmkTts/NYVNsKYVsOvHhtBMjIZcsgt4f9+Bd593/v9yr03K98sIpXn0qMhlSgwtlrETGox8DBb+2zYM1c4yDgg2qDX6AfuADRsNMMrXTe8ySEknluf6038PLlEMLxBOjmnp4uc5cU6V3GeNomTGWO6NchGBBWgvcmx6pbgauRD2zs73s1BvQeW2UdnKwKUzMR27rtSqTMRjkZX+LXqeTr0Wqil6eCKlQeUskI3AXuqkoK+92RbIYmxmLkMSa/TL1YE0NFODihiuZAhpRUEa/0MsHxDh297H7sdvqCVVf8bLHxt7PWqmk1U+p3OpJb3n1XfmoVxZd309z3yRhY939aiedzmwPPjTnPnzfnKlb76eVHWR4eOeNlypFnZD+HGkls9Ye1XD98y7Q0GJ8JbR5BbGp3pupMcuWMDZYymnBwiB6JYV/im2pQXhkhGKTxBQfqGr3a2CFJ6N2C+NibATshALBrS2VFBkiYS2EKr/AOL3gGtMqgYRFpYCi5+fni5Bs4M0jFxAQmwBJUlifUbfq26TinL0wjkSYNtZMaLoj6YpKCll2KkdpB70ikMtiLy+6F90SVanaEH4Q6FtTdNS15oIU/Vd1kpgwvSLoFPNqwu7N7aVZLqrHMwbMHvoEJJWzcm/m+ZKkdC7jokA2fB2kHv39aOlBjFiKZ5jlFYlTepJU81CaP3nVD5mfja/82mktogYw3+nJjjOk4KiRqf8WuCytKYkAiphWKbEARYk6sDn5VYOz0zxG1zYuriTDVu3XrWN8a++t+fze1wpGIJ4o/6PJTiGJ+RXFf+pfx+g= +api: eJzlV1tv2zYU/isCn7ZOjdI2HQphGKDYTKvFlT1JTlYEhkFLxzEbmVRJKplh6L8Ph7rEjp1eHoe+2CLPhefyHerTlhh2q4l/Qy4K+SBkDqHQhokMyMwlsgTFDJcizIlPFptL2MzPiEtKptgaDCi03BLB1kB8cgcb4hIuiE9KZlbEJTnoTPESPRCfXMLGkUtn2R7k8O4klyj4UnEFOfGNqsAlOlvBmhF/S8ymRN9cGLgFRVyylGrNTLP1+xmp6xma61IKDRotXp+e4t/+2UmVZaA1cUkmhQFhUIWVZcEzm6D3WaPe9vBkufgMmcGkFZbD8OYUTPZ7wrN2eHRX18sfNRzCkguOQX6/qTZMmSEzsKOvjeLiFqUg8mdl2J0IYZB/VRzZlh9R4CLjOQjz/bE2Kk9dYZTVGoE5jZIJHYQXIR0Sl0zi8YAmCXFJMj2fP67oFY3S+f5ekgZxOrcS4pIwSmn8kQ7DIKXzQZAOPhwXpR/i8XUvOh9Po2EQf+o3aDTsnxMaX4UDOk+D5JK4JKYDGl71y2lC4+75YxBNg1G3av/oP4PRNEGL90FKr4NPNpTDvUkQB6MRHe1sNemeBwkd7uwm9O8pjQZ0fjEaX+Op01EazsMoSQPcPR8PUWsQjEbzYJCGV2GK6/NpEkY0SebxdNQHnwzicJL2K8y6Syu6jMbXEZk1ODuGo8fm2WMoHjr+OBnR1PYQix1GAS5mO5DZ8bOQsgAmLDxAMGGOwhGl3BS4ddHBtrtT6hrFZ8fuglDcs4LnDl46oM3zd0Kp5KKA9W8/ejdgVSr9Tfi/eY35rUFrdvvcMLXZHBMen5vdolClpOor8eawEhdSLXieg9ivwQvvxf8/3bPDdOOm4YCt17JSGThCGmcpK5H/HCh4e2wegkno7CTsgDX4CeqBFxhkleJmY3nMApgC9dK+2m9mtbslmZR3HOxq9pTOvAdjuYyzR2acxcZpiNAazEoib7oFWwykRD7x7l95HQN62Rlpb3sHm5pgPOq+o1WVKohPtk19a9/ztiupTe1vS6kMKt8zxdmiaMqLsqa3S1YVWMxCZqyw209DT1fgoACJG1IyswIHUdCcfmLJh1RP3L07fXd61BOqPuPlERuPflbGlEf9NMpHPVmW1/UqQb0m6a4/fb9ZyS9t9VtOOp7QOEjpy4QmSTiOOn7a2tXuXs97L22INiD7OrJKpNO+6OD613VqocXFUlrzFmJjy5vBmVSLgmeYymHC0mGWkjosM/weHCZyJ5PrsgC8oFru18MKn5zO7VIqZy0FNxLBbS2NkhUiYSWlad7AOL0ssz1sgIRJad/zHh4eTjK2rkTOTjK5xiIUPAOhbR3buo3aHfeJcS4z3VtzadeegiUoEBl4rSPtoVcEcpPsq5PTk9MGVdqsmdg56FtTtFe1vkEG/jVeWTBuKYINcdtO2A25f0Uaoro/Y8QlPvqcue2s3JDtdsE0TFVR17j9pQJlR/9xtOwg5lzjc078JSs0HATVX5Tkl7j9kPnV+crXztEkOgCLjZ3sosIVcZsPDftbI1laActB2bAaSZBlUJodm4OvGpyd/jJ6T5G4sgpL9Ui39vFtvR+N549zq+Ck8g7En310BpcYX13/B+pfx+g= sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/by-key-5.api.mdx b/docs/apis-tools/operate-api/specifications/by-key-5.api.mdx index 3782eb8c1bf..1b457025295 100644 --- a/docs/apis-tools/operate-api/specifications/by-key-5.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key-5.api.mdx @@ -5,7 +5,7 @@ description: "Get decision requirements by key" sidebar_label: "Get decision requirements by key" hide_title: true hide_table_of_contents: true -api: eJzlVt9v2zYQ/lcIPq2dazltOhRGMSDD3MLr0ASxhz0EwUBLZ5uNRGrkyakh6H/fHSkpdqx02ePQF1sk7+d33/FYS1QbL6c38ldItdfWXMPflXZQgEEvb0fSluAU0sE8k1O52n+C/V9v5UiWyqkCEBxr19LQgs7vYE9n2tBnqXBL3xn41OmSLdAmaQu7FlnrTLhDbyPZLskTugpG0qdbKJSc1hL3JdvXBmEDjkTX1hUK49ZP57Jpblndl9Z48KzxejLhv2P/iypNwbOr1JIpgyyiyjLXaUgy+eJZrj71bFdfIEVO3DEkqKMXnR3IeHTabGQzCjg8J2rG5xT3+bDRiPHAwY6qoGPc3/T45jVLE0q2cil8fsoc4aIMDgbBpxpz3hokTNOwyPkQ9HOzU7nOQsnB49MlIIBXORQ//tdSeFRY+WeCUBAL1GY4f23IlEmfACdsfAuYmXPW9Ui8OUXig3UrnWVgjjF4mbz8/6d7fprudSw4cOkj84SxKNa2Mtn3wYK3Q/1wcTUXBwkLCArfAR606SGtnMZ9GB0rUA7cq3Bl3tw2o5ogsHcawur28QT5CDg8PsRqL+L4obG0tTytNhDw4EE0lcnuLMlcltQk1EiOwe266VW5nCTqiGkzTZJ6az0207q0Dll4p5xWhH8Aks9iPdeqyhnA3KYqD9uPw11uQfAB3908+ZDWXPnofczYsY9jc+8m7yaDllj0CSsPfHiws0UsB+1E4UFLYZB29VmwXEy6q0lfY1XqTwHudvRfXs2uL5azV4vZYjG//Nw9A1o9ivCwzr2VNsQQEK+jkOykP3QU/e3PZaCTNmsb1FtaXYbnCYirakWtwqmcJmyFClOf/lDvQCiTidQWZQ58KVG7hMOO3/wlOrPUIqKwRqNlQgdNdLZiJmytRSZ57FgyzWFFInFSnlh0f38/TlVB95wak0MGgYIEeqGwbIvb7+3O6JFyZlPfa2sb1omDNTigKJPWkE+OHgDybDwZTyKrPBbKHDh6RuccAdfXCOErJmWuqJ5NG2XddtWN3J2xnuObfMpGiD2xP25kXa+Uhz9c3jS8TVPAhRZ/aKfQfJn2/E0Nu1a5h5Mo+gtR/tC+NbIX4l8ekoORd8Q1+9DRecUr2T7Wwm9D14/cgsqIghxaPLkg9pR4oHPyYOSe6W+dj7MlyaqK8ekxfMTrYH0wnve/BAGxtHdgfu6jQ15yfE3zD5p7AyA= +api: eJzlVt9v2zYQ/leIe9o61XLadCiEYUCKuoWXoQliD3sIjIKmzjYbiVTJkzNB0P8+HPUjdqx06ePQF1sk7767++6OxxpIbj0kt/Aelfbamhv8WmqHORrysIrAFugkaWvmKSSwri6x+vwGIiikkzkSOtauwcgcIYE7rCACbSCBQtIOIkjRK6cLRoAELrESdiPSzphwh9Yi6JYpJORKjMCrHeYSkhqoKhhfG8ItOohgY10uqd369RyaZsXqvrDGo2eNV9Mp/x3bX5RKoWdTyhpCQywiiyLTKgQZf/EsV59atusvqIgDd0wJ6daKTg9kPDltttBEgYfneM38nPI+HwdtOR452KPzuvX7mxZfv2Jph96WTuGnp+AIjTQ06gSfasp4a7RgmoZFzseon5u9zHQaUo6enk5B4ew6w/yX702FJ0mlfyYJOXovt+Pxa+NJGvUEOWHjW8TMnLNuYOL1KRMfrFvrNEVzzMGL+MX/P9zz03Bv2oQjp76tPGEsiY0tTfpjVMGbsX64uJ6Lg4AFBoUfgI8mAo+qdJqqMDrWKB26l+HKvF01UQ3K2juNYbV6PEE+Io2PD7GuRDt+cqSd5Wm1xcAHD6IE4v1ZnLo0ru+waoB9cPt+epUugwTqltMmieN6Zz01SV1YRyy8l07LddZSymdtPjeyzJjAzCqZhe3H7i53KPiA726efLRDwZlvrU+YO7ZxDPd2+nY6isSiT6A81MMDzo6oGMVphUeRwiDt87NguTboPidDjmWhLwPd3ei/up7dXCxnLxezxWJ+9al/BnR6TXSU5wGlczE4xOtWCHrpD32J/vH3MpSTNhsb1LuyugrPExTX5TrTikM5DdgKGaa+kIr0HoU0qVA2LzLkS6lwNhz29c1foofdWCdyazRZLuigSc6WXAk7a4mLvO1YqUIO20LioHwSx/f39xMl89KkcqJsziRkWqHxgceOtz+7neiRcmqVH7S1DevY4QYdGoVxB+TjowcAnE2mk2lbVZ5yaQ4MPaNzjogbckT4D8VFJrVh4OBl3XXVLezPWM/xTZ4wyCrq+uMW6notPf7lsqbh7a8lutDiD+0Umi/Vnr9TSDYy83jixXAhwk/dWyP9WfzHQ3LU875wTRU6Oit5Bd1jLfw2qyaCHcoUXXCtPblQCgs60Dl5MHLPDLfOx9kSIpAl8zNw+KiuA/qoP7+9CwJiae/Q/D54R7xk/5rmX5p7AyA= sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/by-key-6.api.mdx b/docs/apis-tools/operate-api/specifications/by-key-6.api.mdx index fc0487f647b..57d1ad2a758 100644 --- a/docs/apis-tools/operate-api/specifications/by-key-6.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key-6.api.mdx @@ -5,7 +5,7 @@ description: "Get decision definition by key" sidebar_label: "Get decision definition by key" hide_title: true hide_table_of_contents: true -api: eJzlVt9v0zAQ/lcsP8Homg4GQhVCGqKgMsSmtcDDNCE3ubZmiR1sp6OK8r9zZydpu2aDPaK9tLF9Pz9/57uSO7GwfHjJ30MsrdTqPcylkg6/+FWP6xyMoMU44UM+W5/C+scr3uO5MCIDB4Z0S65wgefXsMYzqfAzF26J3wnY2Mjcmxty1GZ6zpLaFX60vnrcwK9CGkA/zhTQ4zZeQib4sORunZN1qRwswKDoXJtMuLD16phX1RWp21wrC5Y0ng8G9LfrfVLEMViL+rFGU8qRiMjzVMY+xeinJbly37Oe/YTYUdqGAHEyeJHJlox1RqoFr3oehX+JmtAJQIy7DQVUOw5WiLsMsd7r5cXzbS8XAeAMM7d3eOwSPX1wOtvaX+7KoUv424PywisUynVmQqfSpbTVweuqIoHjLo6M1UqkMmFERrDubq4gE2YpZM8eyhnrhCvsP2aYIV3Fohs/qdCUirsPw8Z9sIyM0aZF4sU+Eh+0mckkAbWLwUF08P+ne7yf7kW4cKCrt7owMTClHZvrQiWPgwUvu+rh5HzMthJm4BUeAR64aSEujHRr3+FmIAyYQ/+2X15VvRIh0NcS/OrqdqP7CK6ry7HZmoUeib1zqamlLsCjQd1yyKPVUdSoHW7UbFSiVsUpJLNqem5hUlQpA8TVMIrKpbauGpa5No6EV8JIgdfhcaWzcL1zUaSEZ6pjkfrt29FPl8DogPoP9WuHayJC8N4nKMnHrrnXg9eDTkskeoeVDT02dpbO5Z12gnCnJT8ANNc1IbmQdHNF7ZWLXJ56/OuB5ex8dHEyHR1ORpPJ+OxLM7zUehjh9rW3VuoQfUC0DkK8kf7QMPbT96lnl1Rz7dVrlp35oQrYeTHDyqFU9hPWTPhpBf+cXAETKmGxzvIU6I3C6vGHDd3pizVmsWJYppE6mvjtNZ3RBTFhqbUjzocCRtMUViASJWWRRTc3N/1YZPjsiT46JBAwSMDJimRr3D7XO71byomObasttV9HBuZgAKOMakM22hli+FF/0B8EVlmXCbXl6K+FtANbe0MOfrsoTwXeZlXHWNZFdslXR3wzfmyXGW4PySqSKZTLJS/LmbDw1aRVRdvYI4x/ADbV5WsxkZa+saDnIrWwF1b7XPIn9byTPGX3TsOdiTQsVmtf3mlBK15PnP63wqeJL0EkyEcKLJycIJVyt6WzN/VSAbVv0sfRFGVFQXC1kN4iubfeGc+bd16ATfU1qLdtdI6WFF9V/QHkj0fJ +api: eJzlVm1v2zYQ/ivEfdo6xXLarCiEYUCGuoWXoQlib/sQGANNnW02EqmSJ6eGoP8+HCm/xUrWfBz6xRbJe334HO8aILn0kN3Be1Taa2ve40IbTdoamCVgK3SSF+McMphvrnDzz1tIoJJOlkjoWLcBI0uEDO5xAwloAxlUklaQQI5eOV0Fcxlc4UbYhcg7VyLf+0rA4ZdaO8whI1djAl6tsJSQNUCbiq1rQ7hEBwksrCslxa23F9C2M1b3lTUePWu8Hg7579j7pFYKvYcElDWEhlhEVlWhVUgx/exZrjn1bOefURGn7RgQ0tGLzg9kPDltltAmAYVviZrRiUCM+w1FVHsO1ui8jrE+6+XN60MvtxHgEg35Jzz2iV69OJ1D7U9P5dAn/NeL8iI00lBvJnyqqeCtHl63LQtc9HFkbNay0LlgMqKnp7lSOTsvsPzppZzxJKn235hhid7LZT9+2niSRvUfxo3nYBk5Z90OiTenSHywbq7zHM0xBq/SV///dC9O072NF4589d7WTqEwlsTC1ib/Pljwc189XN6MxUHCAoPCd4BHm4BHVTtNm9Dh5igdurPwtt/N2qQBZe29xrCaPW50H5H6upyYb0TskSXSynJLXWJAg7tlBun6PN2qne3VfNrc46YFDsmttz23dgVk0ESI2yxNm5X11GZNZR2x8Fo6LedFRJjP4vUuZF0wnoVVsgjbj6OfrlDwAfcf7te0QsFEiN4HDCX7ODb3bvhu2GuJRZ+wsqfH3s6KqOq1E4V7LYUBYHtdE5aLSW+vaHflstJXAf9uYLm+Gd1eTkdnk9FkMr7+tB1eOr02Obr2nZUuxBAQr6MQbKU/bBn7+9/TwC5tFjaodyy7DkMVipt6XmjFqZwmbIUM04qQivQahTS5ULasCuQ3qnI2HG7pzl9ia3ZhnSit0WSZ30GTnK2ZCStriTkfC1iqcIeRSJyUz9L04eFhoGRZm1wOlC0ZhEIrND7g2OH2R7eTPFLOrfI7bW3DOnW4QIdGYdoZ8unREAPng+FgGFnlqZTmwNF/FtIRbLsbIvxKaVVIbdhsiLHpiuwO1uewHz8OywwSyNjqLOnK5Q6aZi49/umKtuXtLzW68ADsqyvUYq49f+eQLWTh8SSs3XMJP3TzTv6jeHYa7k1ky2KzCeVd1LyCbuIMv+2sTWCFMkcXAosnl0phRQc6J1MvF9DuTfo4mkICsma4dpA+Inmw3hvPL78FATG192h+3UVHvOT42vZf5I9HyQ== sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/by-key.api.mdx b/docs/apis-tools/operate-api/specifications/by-key.api.mdx index 06dc3839efd..1e50a6414fb 100644 --- a/docs/apis-tools/operate-api/specifications/by-key.api.mdx +++ b/docs/apis-tools/operate-api/specifications/by-key.api.mdx @@ -5,7 +5,7 @@ description: "Get variable by key" sidebar_label: "Get variable by key" hide_title: true hide_table_of_contents: true -api: eJzlVt9v2zYQ/lcIPq2dazltNhRGMSAD3MLrsASxtz0EeaCks81GIjXy5NQQ9L/vjpSUOFbb7HHoiy2S9/O773hsJKqtl/Mb+ZdyWqUFyNuJtBU4hdqaZS7nMj18hIOcyEo5VQKCY/lGGlrQ6V0404Y+K4U7+s7BZ05XrE+bpCvsRux78xPp4J9aOyDT6GqYSJ/toFRy3kg8VGxSG4QtOBLdWFcqjFs/n8u2vWV1X1njwbPG69mM/45druosA+9JP7NkyiCLqKoqdBaySj55lmtOPdv0E2TIuTrGAHX0wjk+J7ygx66XxqMyGXx8tqLPyOHzxSP4g6hHp82WD/aqqMdPCGxD+TPuw2lqbQHKhGMwyuAyH9HlU40Fbw0saVvePh+Df2koCJ0LrjN4/HIZCCwyVf74X8tB2GLtv4nUm9ecV0nlUNtxSHRXpnG8wsbXwFg4Z92AxJtTJN5bl+o8B3OMwcvk5f8/3fPTdK9jwYFL723tMhDGotjY2uTfBwt+GuuHi6uleJSwgKDwHeDB1xpktdN4CBMjBeXAvQq36c1tO2kIAnunIaxunw6OD4DD1BDpQcRBQwNoZ3kqbSFAwCNnLpP9WdLL+qQh0Vayc7fvp1XtCpJrIpjtPEmanfXYzpvKOmThQZ2z47NYyI2qC0ausJkqwvbTONc7EHzAVzJPOqQ1lzx6n4apQD6Ozb2dvZ2NWmLRL1h5IMKDnR1iNWonCo9aClO0L8yK5WLSfTGG4qpKx8nfjfrLq8X1xXrxarVYrZaXf/Rjv9OjCB8XeLDShRgCCjMnCMle+n3Pzd/+XgceabOxQb3j02V4jIC4qlPqEU7lNGErVBj59Id6D0KZXGS2rArg26gbyqInNn+J3iz1hiit0WiZyUETna2ZCTtrkdkdW5VMc1iRSJyUJxbd399PM1XSBaem5JBBoCCBnics2+H2e7czeaKc28wP2tqGdeJgAw4oyqQz5JMw14nIMdmz6Ww6i6zyWCrzyNF4yxxhNZQF4TMmVaF0mP4hsKZrpxu5PzvqiImcsymiTWyMG9k0qfLwpyvalrfp3nehqR+0Qtfl2vM39etGFR5OYhmuQPnDdfcqfCFOX4yjIfckNQc5PHskfQb2hd+W7hi5A5UT3TiaeHJBTKnwkc7Jy5D7Y7hnPizWJKtqBubh3XTM4WB9NJ53vwYBsbZ3YH4ZokNecnxt+y+MIezQ +api: eJzlVt1v2zYQ/1eIe9pa1XLabCiEYkAKuIWXYQlir30I/EBRZ4uNRKrkyakh6H8fjvpIHKtr+zj0xRZ5H7z73R1/bIDkzkNyCx+k0zItEDYR2AqdJG3NMoME0sMlHiCCSjpZIqFj/QaMLBESuAsybSCBSlIOEWToldMV20MCl3gQdiv2g/sIHH6utcMMEnI1RuBVjqWEpAE6VOxSG8IdOohga10pqdv6/RzadsPmvrLGo2eLl/M5/x0fuaqVQu8hAmUNoSFWkVVVaBWyij951mtOT7bpJ1TEuTrGgHR3Cuf4PeEFOz56aTxJo/Dyuw29stUPqHfgj6qenDY7FuxlUU9LyNVGSWLcR2lqbYHSBDEaaWiZTdiyVFPBW2OXtC1vn0/BvzR7WehMcJ3R09fLUDmbFlg+/9FyeJJU+28i9eol51Wi93I3DYnuyzSNV9j4LzAWzlk3IvHqFIl31qU6y9AcY/Asfvb/T/f8NN2bruDIpfe2dgqFsSS2tjbZz9EFv03Nw8X1UjxKWGAw+Anw4GsNVe00HQJjpCgduhfhNr3dtFEDyto7jWG1eUoc75FG1hDpQXREUyLllllphwECppwE4v1ZPOj6uLnDQwt8uNsPbFW7AhJoOjDbJI6b3Hpqk6ayjlh5NOfsWNYVcivrgpErrJJF2H4a5zpHwQK+kpnpKEfBJe9OnwVWsO6Ju9fz1/NJT6z6FS8PjfDgJyeqJv10ypOeAosOhVmxXpf0UIyxuLLSHfP3VH91vbi5WC9erBar1fLq74H2e7s2Oirw6KUPMQQUOCcowaD9bujNPz+uQx9ps7XBvO+nq/AYQXFdp4VWnMppwlbIQPlCKtJ7FNJkQtmyKpBvo56UxdDY/CUGt1vrRGmNJsudHCzJ2Zo7IbeWuLu7UZUq1LBrJE7KJ3F8f38/U7KsTSZnypYMQqEVGh9w7HH7q9+JnhhnVvnRWtuwjh1u0aFRGPeOfBx4HZ3vkj2bzWfzrqs8ldI8Omh6ZI6wGstC+IXiqpA6sH8IrOnH6Rb2Z0cTEUHCrjZRPxi30DSp9PiPK9qWtz/X6MJQP1iFqcu05+8Mkq0sPJ7EMl6B8MtN/yr8VZy+GCdDHprUHGB89gBE3WMt/LabNoIcZYYuRNNJLpTCih7ZnLwMeT7Ge+b9Yg0RyJqBeXg3Hfdw8D4Zz5u3QUGs7R2aP8boiJccX9v+C4wh7NA= sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/delete.api.mdx b/docs/apis-tools/operate-api/specifications/delete.api.mdx index 87a0f9a05dd..a187e6f28d3 100644 --- a/docs/apis-tools/operate-api/specifications/delete.api.mdx +++ b/docs/apis-tools/operate-api/specifications/delete.api.mdx @@ -5,7 +5,7 @@ description: "Delete process instance and all dependant data by key" sidebar_label: "Delete process instance and all dependant data by key" hide_title: true hide_table_of_contents: true -api: eJzlVktv20YQ/iuLPTWpLMqJWwRCUcBtFEBNUBuWihwMH1bkSNyY2mV3h3IFgv+9M7skLYlyX7ciF2kf8/z2mxnWEtXGy+m9vHU2Be/nxqMyKciHkbQlOIXamnkmpzKDAhDkSJbKqS0tHevV0tCGrh9hT3fa0LJUmNM6A586XbIBOvwIe2HXooxuhO78jKSD3yvtgHygq2AkfZrDVslpLXFfsmltEDbgSHRt3VZhPPr+SjbNA6v70hoPnjXeTCb8d+x6UaXsk/RTS6YMsogqy0KnIb3ki2e5eujZrr5AipyzYzBQRy9bsqY2cCDo0Wmzkc2ohSn7Z+GPJGosWOjnXJkNLFBh5emCr67O5TI3O1XoTDBo4PHlnCjiVQHbb/9tbj6G8Hfhv33Dyf4VEP0Ln7uMB4OLA0BmzlnXI/F2iMQH61Y6y8AcY/A6ef3/T/dqmO5dfHDgp/e2cikIY1GsbWWyr4MF352rh+vbuThIWEBQ+ArwoEMPaeU07kMbXoFy4C64DU/vH5pRTRDYRw1h93Dajd+HLjXoxkKZTKiiEBmUYDJlUGQKlVjtRezv1PdzezINqNlPZbK7TFprF501n9Sk1UiO1O26eVG5guTriHwzTZI6tx6baV1ahyy8U04reqUAN9/FV1+rqmCYC5uqIhyfJrXMQfAFTySeNUh75kf0PmaE2cexuXeTd5Ozllj0BSvPrHm2kyOWZ+1E4bOWwvzqXnHBcjHp7uV6JqhSfwz4t8P25nZ2d72cXSxmi8X85tdu8LZ6FOEhG3orbYghIN5HIdlJf+iI/MvnZSCdNmsb1Fvy3YTvARC31YoKilMZJmyFCsOW/lDvIqFSuy3DUBzwjVeiM0uFJLbWaLRM+6CJzlbMhNxa5FKIdU2mOaxIJE7KE4uenp7GqdpSN1RjcsggUJBAHwYs2+L2qT0ZnShnNvW9trZhnzhYgwOKMmkN+YStMpFjspfjyXgSWeVxq8yBo/9aX0do9g+H8AcmZaHokZs29LotvHu5u4zd6rj06GzKJolgsYTuZV2vlIffXNE0fEzjxIVe8VxxoT4z7XlNRb5WhYdBTH1nld/ctV9ur8TLX3dnU+hobfah3ouKd7QMfA2/DbUwmYPKiKAcVby5Jm6VeKAz+Irjiuqb1PvZp9lyRuKqYqx6PE+IHxycDemHn4KAWNpHMD/2ASJvOcSm+RMnstvx +api: eJzlVktv3DYQ/ivEnNpUXq0TtwiEooDbbAA3QW14t+jB2AOXml0xlkiFHK2zEPTfiyEleV/u61bkJJGc58dvZtgCyY2H7AHunFXo/Y3xJI1CWCZga3SStDU3OWSQY4mEkEAtnayQ0LFeC0ZWCBk84g4S0AYyqCUVkECOXjldswHI4APuhF2LOroRevCTgMPPjXaYQ0auwQS8KrCSkLVAu5pNa0O4QQcJrK2rJMWtH66g65as7mtrPHrWeD2d8ufQ9bxR7BMSUNYQGmIRWdelViG99JNnufbUs119QkWcs2MwSEcvFXovN7gn6Mlps4Eu6WHK/1n4CZCmkoV+KaTZ4JwkNR66jo+uzuVyY7ay1Llg0NDTyznVzq5KrL77t7n5GMLfhf/mNSf7V0CMN3zuMG6cHOwBMnPOuhGJN6dIvLdupfMczSEGr9JX//90r07TvY8Xjnz13jZOoTCWxNo2Jv86WPD9uXq4vrsRewkLDApfAR5dAh5V4zTtQhteoXToLrgNZw/LLmlBWfuoMayWx934XehSJ91YSJMLWZYixxpNLg2JXJIUq52I/b1CKuzRNKACMki3l2lv7WKw5tP2EXcdcKRuO8yLxpWQQRuR77I0bQvrqcva2jpi4a10Wq7KCDyfxVtfy6ZkmEurZBm2j5NaFCj4gCcSzxoqUDA/ovcJI8w+Ds29nb6dnrXEoi9YeWbNs52CqD5rJwqftRTm13CLc5aLSQ83NzJB1vpDwL8ftrd3s/vrxexiPpvPb25/GwZvr9clB2wYrfQhhoB4HYVgkH4/EPnXPxaBdNqsbVDvyXcb3gMo7ppVqRWncpqwFTIMWyEV6W0klLJVHYbiCd/4Twxm19aJyhpNlmkfNMnZhplQWEtcCrGupQp3GInESfksTZ+eniZKVo3J5UTZikEotULjA449bh/7neRIObfKj9rahnXqcI0OjcK0N+RTtspEjsleTqaTaWSVp0qaPUf/tb4O0BwvjvALpXUptWFvIfS2L7wH2F7GbnVYepBAxiaXSV9CD9C2K+nxd1d2HW9/btCFXvFccaE+c+35P4dsLUuPJzGNnRW+ue9fbt+Kl193Z1MYaG12od7LhleQhEdkfEp2yy6BAmWOLkQVT66Vwpr2dE5ecVxRY5N6N/s4W8wgAdkwViOeR8QPDs6G9OPPQUAs7COan8YAiZccYtf9CSey2/E= sidebar_class_name: "delete api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/get-statistics.api.mdx b/docs/apis-tools/operate-api/specifications/get-statistics.api.mdx index c3098e28e3c..51c027b1625 100644 --- a/docs/apis-tools/operate-api/specifications/get-statistics.api.mdx +++ b/docs/apis-tools/operate-api/specifications/get-statistics.api.mdx @@ -5,7 +5,7 @@ description: "Get flow node statistic by process instance id" sidebar_label: "Get flow node statistic by process instance id" hide_title: true hide_table_of_contents: true -api: eJzlV99P5DYQ/lcsP7XXZbPc0eq0qipRCU70qgOxVH1APHiT2V0fiZ3ak+VWUf73zthJCCSogrfqeIDYnp+fv/EMtUS19XJ5K6+cTcH7C+NRmRTk3UzaEpxCbc1FJpdyC7hCWnrUqZczWSqnCkBwrF5LQwuSuocDnWlDn6XCHX1n4FOnS7ZDm5/hIOxGlNGb0J27mXTwT6UdkCt0FcykT3dQKLmsJR5KNq0NwhYciW6sKxTGrV9OZNPcsbovrfHgWeP9YsF/nrpeVSn7nItrwMoZL3yfjiCLAncgtnoPZhTdTGydrUrIxPogNrl9EMaSbYoktRSUQXamyjLXacAr+erZYz3OQTmnAkAIhR/s2/VXSJFRdYw66piHSlHvNR74AnpZj06b7QjZGwpfZwwuJ9JHGVJ72Ol0F/YJpipHL5QDobZbB1uFhHkzi75gEu+xI7SocmGqYg2OXUblHjA/CmPi1gg9ls0he6PTTv21brVJdUa35t/ot9fvafMfadqizAHfnmen/7pEyTNqzNnbOYl9IalBATf8M5MnU5VyYfYqJy5xSYLHl3lObF3nUPz0It9f4DVXXjUN/zCLD+8ZvoJKUW1hXAHhKtv3Y+owbowOBricOWed7JD4MEbi3Lq1zuiyn2LwLnn3/0/3ZJzudbxw4Kv3tnIpELmQaF6Z7Ptgwc9T9XB6dSEGCQsICt8BHrTpIa0ctaDQ5NdAfcMdcZNf3t41s5ogsPcawuru+Vv2CXDQh/puy030eYelziU5J9zZdtYIEwYNEEuZ7I+TVv6ofwCTmoJoEj+cSDy4fTeOVC4n1TpC3yyTpN5Zj82yLq3DhoT3ymlF1xTw5rN47RtF7ZE0c5uqPGxPvdB8wANP9wwzQaL3OUPMPp6a+7j4uJi0xKIvWHmkzaOdHWI5aScKT1oK41F3jSuWi0l3V/c4nJT6cxjf2lnu8urs+vTm7Gh1tlpdXH7p5rpWjyIc0qG30oYYAuJ1FJKd9HnH5D/+vgms02Zjg3rLvsswdYK4qtZUUZzKOGFLA0fgTzt3KJMNuuRzcvGX6Mxyyy6s0WiZ90ETabhjJuysxThZcWGTaQ4rEomT8sSih4eHeaoKeg7VnBwyCBQk0NzJsi1uf7Y7s2fKmU19r61tWCcONuCAokxaQz5hq0zkmOzxfDFfRFZ5LJQZOHp1gT2Bsb8xhG+YlLmi223amOu2+G7l/ji+U0/Lj/aWcdQfVCDRLBbSrazrtfLwl8ubhrepq7jwZDzWXajSTHv+ppLfqNzDKMD+gZU/XLf/HvwoXv4XYjKfjtzmEKo+r3hFn4G14XdDL5ncgcqIphxVPDklhpU40BkN+FxX/ZP16eyGZFXFqPXIPuN+sD4Zz6+/BwFxY+/B/NZHh7zk+JrmX71Gt3o= +api: eJzlV0tv4zYQ/ivEnNqtYjm7abEQigIpkCzSLTZB7KKHwAeaGlvcSKSWHNk1BP33YqhH7EhBkdyKzSXmY17ffDMc1UBy6yF5gDtnFXp/YzxJoxBWEdgSnSRtzU0KCWyRFiRJe9LKQwSldLJAQsfiNRhZICTwiAeIQBtIoJSUQQQpeuV0yXoggc94EHYjytaa0L25CBx+q7TDFBJyFUbgVYaFhKQGOpSsWhvCLTqIYGNdIand+uUCmmbF4r60xqNniffzOf87Nb2oFNuciXukyhkv/BCO2FgnKEOx1Ts0I+8isXW2KjEV64PY5HYvjE2RQVDWEBpiY7Isc60CXvFXzxbrcQzSORkAIiz80b5df0VFjKpj1Em3cUhFeqfpwAkY7npy2mxHyC4zFDplcDmQwcsQ2j7TKgv7Dn2VkxfSoZDbrcOtJEyhiVpbOIn32BBZkrkwVbFGxyZb4QEwP3JjImsRKL6bY/pGo734a81qo3SKhvwb7Q7yA23+I0xblDnS2+Ps5V8XaBMBacrZ2nVu919sikcF3PBfBBdTlXJjdjLXqeCSRE8v87x0dp1j8dOLfH+B11x51TT8x1F8eM/wFei93OK4AkIqu/4xddhujA6OcLlyzjrokfgwRuLaurVOUzSnGLyL3/3/w70Yh3vfJhw59d5WTqEwlsTGVib9Pljw81Q9XN7diKOABQaB7wCPJgKPqnKaDuGRX6N06M74kU8eVk1Ug7L2UWNYrZ73sk9IR+/Q8NryI/r8hRWa+VUgZbabNcKEQRkkEO/O4+7+2dAA4/oRD03sjycSj27XjyOVyyGBuoW+SeK4zqynJqlL66iBCHbSabnOW+T5rE37RlY545xbJfOwPdWh+YAHnr4NM0Fa6zOGmG2cqvs4/zif1MRXX9DyRJsnPRlROamnvTypKYxHfRoXfK8Nuk/d03BS6s9hfOtmudu7q/vL5dXZ4mqxuLn90s91nVwTndBh0NK5GBzidXsJ+tvXPZP/+HsZWKfNxgbxjn23YepEcVetc604lHHAVsgwy/VzhzTp0Sv5nFz8S/Rq+ckurNFkmfdBkpytmAmZtdROVlzYUoUctkTioHwSx/v9fqZkUZlUzpQtGIRcKzQ+4Njh9me3Ez0TTq3yg7S2YR073KBDozDuFPmYtTKR22DPZ/PZvGWVp0KaI0OvLrATGIeMEf5DcZlLbdhM8Lnuiu8BdudtnzotP4ggaUf9owpcRV0hPUBdr6XHv1zeNLz9rUIXWsZT3YUqTbXn3ykkG5l7HDk4NFj44b77PPhRvPwJMRlPT25zCFWfV7yCKHyptN8rzaqJIEOZogtetSeXSmFJRzKjAZ/ramhZn66WEIGsGLUB2WfcD9on/fn193BBLO0jmt8G74iX7F/T/Au9Rrd6 sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/search-1.api.mdx b/docs/apis-tools/operate-api/specifications/search-1.api.mdx index 58f971eb4ff..57e0fc11820 100644 --- a/docs/apis-tools/operate-api/specifications/search-1.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-1.api.mdx @@ -5,7 +5,7 @@ description: "Search process instances" sidebar_label: "Search process instances" hide_title: true hide_table_of_contents: true -api: eJztWF9v2zYQ/yoEX9J2TiwncZsGwwA3cYBsWZLFXvaQBAMt0TZbSVRJyoln6LvvjpQtWVJcG9jLtiJALJF3x7vf/aUW1LCJpqcP9FZJn2t9GWvDYp/TpxaVCVfMCBlfBvSUas6UP/2zQ1tU8a8p1+aTDOb0dEEDrn0lEqQEuoGlI4mTR0QuUAOfL2PDY4M8LElC4Vvp7c8aGRdU+1MeMXwy84SDKDn6zH0DjCAMdDECpMDuWISGq2/TfeHzEpGAsyfA1qJjqSJm3NL7Y5pZPlT2nistnC4bmY4OkWmURPEStaDEo40S8cSKZQrM/WV7NSz9RSifr2XAl67Ynh/olTlnhjdqw+Pg1T3gbNpBpjTC8OidDS/v+7BwdvPr7VV/2D/H5971Wf8KHp8KDM/5WMQC/bq93hAULDYbQayEp5VNfaH8NGTqTWX3rRUqTIiSqoGN1oq/+JZOdlHfG68HHFOKzYFeGB7peiBmyCiV2Z6jHuI8bIZDqmBNlbqnBmfwdt6Hn6eshMMAFcrKK7+lXM0b4OEvLEpCp0gvDOspfsdNqmJNWBjW85y8CfiYpaEhypKRUGhDEHIiNOl4b0G7GQtT9ABqg3rxgIzmZGV082n1k7RjZdqH2AYUUMh6VhZH5Q55WGFLq5Q5shbA7KmkGosDkrAJPD0LMyXL6GnWMmIv5KhBV4YhRPZGYnLPVH7snhUNZYq8/9DtHn38eHzc8brdkw/et00jj/FQkgk3xEw5ifmLsToSXyZzu2QNJ3JM9lDWPb7BgRDYElaKqN5zhAePMVpLdMJ9Mc4rM9FTmYYB2GSgoqPQEuM66RrSFqCj1q6QV7Ltga6BRVtVkKyPLmwzyL3kUNvsnK5X906LjJdiAOf1boAQHu4WakWDqjaWw1X16Xo745NlNn2x+wqFZhqVcrugExlrl7CHntfQk1PfQfhPteBqGduxvH3vzN87c0Nnztumq1W7tVsjDQu3MqqkwB3X0Kd0XQ8kOm5KJHATg7IBmS6C17MJAB+FPPph16xCX6d6yyCPQGUo+I2eWRa2xk23UNso4dJXSqoVDEd1GC6kGokg4PE6Bu/a7/795h7Xzb2WhlzINA7+c+Z2m4K8d3tJSuFMuGX4H0S7nff9VAkzt215BOMIV/u2Wz1AJV0ABPKL4PYNxpWtL74RN1OJDT6R2gLBzBTe2rNOOyffX5G33RBEURc1g75nVUkVVDe6cNhmp+32YgqistNFAgUzs8OHEmyUT+245/xqJ3HgDKXPwqk7fV3tIYx1uBGzyI6LOOZhBLjTD2yhz+8yhbgT78RrlISkr0gp4qKQMzUmaZTjiBslZW5SdH4aIJ0zeumbom0kArtRi6Jp8H5z27/rDfv7g/5gcHlzjQ0Fj8v5cG4o+XslJVfRKoTvjoguqS+WofrzH0MbVpgGd8UHkr67TK2PhfYErz7deLXZpej3pZHF2ziQeGvjRnlgCKpL+XBRzBDNo4JXHgTKs4mdZL3q3L6wDqrOtqtDK0MtemEsLeB5Qt7Y706c3KYjKDLo/HqISMLsSAs/Rsy4nf59iUjjmF6/fomYLMVCcSGRBPMkKmQ5jZIp5s5USuOUxFoHolEtl3oYBhry7vn5+cBnETQDdgAHIgygJIfxG2nzSLvKV1oV5kD6esUtpH1vKz6Gmwdo2c4F6TZKnS2DgnYOvAPP5aE2EYtLB22oOWuAraLZwEWxnYQMIj/LtVvk5eiBzjpFCOyXZeUlCdzqKssDXSxGTPPfVZhluPwVPyVgWSwKkSuZdMpZ4KLCRT09c11kf4gKrS5N9dsIllvH0QNPJ2Yj7VOpxt7eDIaYnvnXyQjyA1YVe8Yvl/D/lD7CHwaiBcfWDru+oCGLJ6ltM9TJxYRmKaKzQrCS/9ayZcGJ5yUtf/xkCchQfuHxT7DhrDH46m5zfwMoNl4Y +api: eJztWG1PIzcQ/ivWfOHuupDl7V6iqhIHQeJ6BUpS+gGiyvFOEh+79p7tBdIo/70ae5NssgsXpPZD2+MTsWfG42eeefFOwfGRhfYNXBot0NozZR1XAqEfgc7RcCe1OkugDRa5EeM/diECg18LtO6jTibQnkKCVhiZkyS0oevlWB7sMVkatBCB0MqhcqTD8zyVwltvfbGkOAUrxphx+s9NcoQ26MEXFA4iyA354iRa2h3K1KH5ttwdTipCUjkcoYEIhtpk3IWltwcw83rk7DUaK4Mvzyrt79WVenxU0bPOSDUiqUGeqTm2SaNEzg0q9/Pmznr501Q/nOsE5wHbXN86btwJd9joDarkyT3rGndIqciIREfHvbPrDkRwfPHL5edOr3NC/x+dH3c+d06gP4tAKiGTkgOllYHWKXJVgfQEh1JJ4sbmt3KouHLPQrxGcW8bhDSiSLl5tbb72huVLiVL68lBWMg/cUOihMw5Gq6SlhvDJxCBdJjZOplnpKiN21yjniaYNsOhTbLiSj2O3WOI4KTTPYb+rIJDlxyaVVd+LdBMGuDBR57laXDkKE3rZeIKXWGUZTxN67WCvUpwyIvUMePFWCqtYwQ5k5btxq8hgnueFhQB8ob8woQNJmxx6ebT6ifZoMqtQJVINSIjqzm7PKoMyM0CW1iXLJH1AM76Fde4SljOR5iwB+nGbM6eZi8z/sj2G3zlRCG2NZCja27KY7e86TucsLfvDg/3P3w4ONiNDw/fv4u/fTV2q3qajdAxN0am8NF5H5nQ+cQv+YszPWRbZOuaftktJpXTbKvC6q0guHOr6LbM5ijksKzuzI51kSYs406MvdGK4qroCtIeoP3opZCvZdsNrIAF0TpIPkanvqGUUQqoPR+cw7genYgN52YGE7baHAjCvZdRbdnk1pvT3qL6HMYvxmfm/6iF21wrGxJ0L44b+nghAmR/V9teL1svLGffu/n3bv4PdfOy1Yb69rIW7bTj6UaXqjhwhbZIna37QUIHTcl4wh1nUt3zVCZPZ2Ru9CDF7IeXZiYxobAbJkqG1vJRM6XmxbBxMyzUNiq4dIzRZgHDfh2GU20GMklQrWLwpvXm33/dg/p1z7Vjp7pQyX/uuodNJD+6PGMVOjP0Cv8Dtvs3giiMdBPfygfIDZpt3/Fu+rNoCkLrO4n+Vz/a/MGdoRtrGgpybT0Q3I2hDa373VYpvr0Qb4XBCcgXc4/GelcKk0IbpgHbWbvVmo61dbP2NNfGzfzAYiQflJM+7YW4+ukd2pBqwdNxOH3V7d4YGW0onvkRk0ZDYkA4fccX+vL9szT3Pn4fN1oi0SesLHmxtDN2Lm+0E4QbLc3CdBni1CW5cOl5bJZtI5fUjSKgq0EbLi47V0e9zna30+2eXZxTQ6HjSj2aKirxXlgpXfQO+cbqhWAufTqn6qffe55WlAZXyw8znfAAWx0l/QlxfUKqLfn5ZzkRrI09y43KtBM/O8vEK5NKddZI1pfKuWQ5fiynDGcKfGquiKtTQ3XM8aNyvP4wmPporg/PCyfWpmZyYqh9dMrsvfAfx5BdFoNUCmJKnU+acT9DMy6cvEf/vBCawkLvgPr7Tio2NzvUhmVaSafJIa/pjC4o0cZau+AkFUYufIqEPCXO2Har9fDwsCN4VqiE7widEQypFKis50NJy8/lSrSmnGhhF9pS+98tg0M0qAS2SkO2RVbv5wyC3Z14Jw5Ja13GVeWgZwrUCmAL6jt8dK085dKPkd67aVm7buB+FxYU2K7aKutXPyrL0A1MpwNu8TeTzma0/JW+VVANXVatUF9hjDwJrAgpAseh5Wz3yKHFq6z+/KHaHDSOhMDcPSvbrxTky4tujxKr/ISa6YR0DH+gz6v8AdpwC7dARPTg+ELj16eQcjUqfE+CYJeynxeEznIKXy0W/mbz6qQmFS9//OgFWE/fofoJovI2jn6G5+Jf7YeYrw== sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -36,15 +36,15 @@ Search process instances ## Request -

Body

required
+

Body

Search process instances -
    filter object
    sort object[]
  • Array [
  • ]
+
    filter object
    sort object[]
  • Array [
  • ]
Success -
Schema
    items object[]
  • Array [
  • ]
+
Schema
    items object[]
  • Array [
  • ]
Data invalid diff --git a/docs/apis-tools/operate-api/specifications/search-2.api.mdx b/docs/apis-tools/operate-api/specifications/search-2.api.mdx index 6dd740755aa..56feec86ac4 100644 --- a/docs/apis-tools/operate-api/specifications/search-2.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-2.api.mdx @@ -5,7 +5,7 @@ description: "Search process definitions" sidebar_label: "Search process definitions" hide_title: true hide_table_of_contents: true -api: eJzlWFtv2zYU/isEX7J1jm9N2tQYBriJA6Qbmiz2tgcnGGiJttlIpEpSST3B/33nkLQsW0qaDHtZhwCxSZ4bz+U7hy6oZQtDB1N6pVXEjTnjcyGFFUrS2xZVGdcMFxcxHVDDmY6Wf/Zpi2r+OefGvlfxig4KGnMTaZE5tgEdOzrCv7A0S7gB8khJy6VFUpZliYic0M4ng/QFNdGSpwy/2VXGQYKafeKRBcZMowlWgBQ4nYvEcv11uju+qhAJ0L0AthadK50y67feHNF1i0qW8gqpsVrIBR7cc22Et+5JMa/7SD3LUhkciJ5qkAfXZ9I2HuKpsAlu1YMAh0b8xZ9ph4/QcL7rJaY1WwG9sDw1de+hAUZp+3yOelx40nxtpeMdU8JJi3KZp5h2w/EprM5G8HFb9cQYDVpXd37NuV41OqhMNNAzTJJ6QsImyTwnEdJYJuEr+S7mc5YnlqCDiTCk1/0ebLlnSY7+Rt1jPFFzornNteQxSYSxjyZ8XQWTceB1nCjKKTuu6AnxPXb6QhheIB4jR2Yr4jK5KtWJmpbBoYEgRMQ5fn27UepFhUR7nvaWV43EXMYQ1Y0V/qomuK7pqq2vWucSYs+8jC1eYl7VMSU/RMNAyM02Em1yI8lEkQW3xC45kfyL9aSRylZuy5mPHAco83dcmQPQZBXsbCvuwBO2b6Qz2WQ8EvMAdcQsVZ7EBOoVjEWhFcZd0n/kLx/NPQCY0lOWJJC1oWqAvt8/7r199+6k9/rNydv+Sc8X3bnD1W061b0cKMCdARh3cm8X/ir2bwG7xNNeiWjH3YZ77YvaT1cHCdh9hOZAb3XO3YbJlDQeAvrdbkOa5FFwwb/Ui/ah8YWQ+S21qNA/fGG8rO9YZVnyLEdUTLj2NdxkCZIdNcX/jFkGJQuZKeLHkwBiNEt4+sNLkwEwx+bmmZFIwegAZbUYbOCrOUBu46ngjLRWunTD64Y6Vnom4pjLXR+86rz671/3qH7dj8qSc5XL+Ju77nFTkg+vLkglnQl3DP+DbHejb5RrYVeum8ygDXJ96EB2ertuFeACdSe4W0GbfHKEiEtAwXaRcrtU2JkyZZwrmF3CqnPf6wSGwwpDxzdgivZoBGhnTq4B42jh/bsedDrFEoStB0UGsLl2DVMLNgsjLJ752LrxFDgTFbFk6fXvmj6BSQIP3JADEwpOFpgFXnsb/ZiVM+VG3En3pNsoCUkfkbLNja2cpbVZoxxP3Chp7acUH6sx0vlLb+KzbR6Z+Jlj9/D9j15eja6Hk9HheDQeX1x+xLaC6gIfNrhKzEspwURnEK49Ed1Qn2/S9cMfE5daWArX27ftyL8sdkcZp6FbGlY+acp+3K112y3RtslWUt3PQ9398a1wvtqfkEpRe6MROmSu3N1DfVy61zsnV/kMah7jUI+WIswNRvBhxb0flCOFl7YwONafHEKSjViodZIqSHyFBjlOq1WOabxUynojEXpANJrlqwAjYqAEHh4e2hFLAZtZGxSiG8BIDkMc0gbf/hJ2WnvMsYpMyS2UW3c0n3PNwcpOEGQ6O2MS7bW77a4vCWNTJiuKnoSAHZeVqWXhodDJEibcG9TZVwR0mNL7nkfNfXygmxjjryu+0Ke0KGbM8N90sl7j9md86CJSbXHBoxhdchb7zPBJSE89sB9O0KRy7q7PtYiAnmMI0c7sk7S3FdC7uhxPsFrC7zypipFHswf8DQj+D+gN/GEyZv5+QOT2C5owucgd8lMvF+uL5eif0od75ehutql/uapY+eN7RwAPtTsuf4IDfxuLS/8u+BvLQG6S +api: eJzlWN9v2zYQ/leIe8nWKf6RJm0qDAPcNAXaDU1We9tDEgy0dLbZUqRKUkk9wf/7cCQty7aaJsNetr5Z5N3x+N3dd0fX4PjcQnoFl0ZnaO0rnAklnNAKbhLQJRpOH29ySMEiN9nizyNIwOCnCq17qfMlpDXkaDMjSq+WwtjLMfzMi1KihQQyrRwqR6K8LKXIvNH+B0vyNdhsgQWnX25ZIqSgpx8wc5BAacgFJ9DS7kxIh+brch9x2RISyuEcDSQw06bgLiw9O4ZVAooX2BK1zgg1p41bNFYE7+418/SoJT3h805j07JQEV8CskPCoeLKdW7SrnCSlvZjtErAir/wgW6GAI5m2yByY/gSEhAOC7sPLjlgtXEP19gPG8rua2uTb7kSdxJAVRWUlaPxGSTw6nx8BjdtJMbk0Kq98muFZtkJUJOHaQ0jKffzdSQlK4MmE8o6rjK07LscZ7ySjhHATFg2HHwPCdxyWRHedPaYdvSMGXSVUZgzKaz7Yj3sH8FVHnW9Jpnyh520zonxPfHnxTA8wjxFjk2XzCd626o3ddUEB6JAjIgHfnWzPjSYion2sNOTcDQJo8qFmq+9CFe1EbquqyZf9c4nxI57JZ8/xr02MI0+M2gr6ewmEj12rdhEszk65hbIFH52QTTT5dIvefdJ44Bs/k5f9oAJ5TQ7aFXcQRDsXSvvsi0xE7PIhMwudCVzVnCXLbzRluK26D/CK0RzhwCu4IxLiTmLVQMJHB2dDJ+/eHE6fPrs9PnR6TAU3WtPu5t02kc5SkyXLDLhVu5t01/L/w2fN3Q7bBjtZNBxr11Tu+m68pxg0JZa2VDyR4NBR1pUWbzyv9SadqnwkRT5DXWs2E5CnTyuDTntuHwQTi0X3oeS7vKExI670uMVd5wJdculyL+cI6XRU4nFD4/NFeu4q+wDA1WgtZHZ9mKwZrPuAPmF+4Jzbow2DQxPO8pam6nIc1TbGDzpP/nvX/d4/7rvtGOvdaXy/911T7qSfHT5hrXSmaFX+Aay3U/CWWWEW/rmMkVu0Bx6Dr66WSU1ZFp/FOi/bpL7J4q8IRTqJgW6haZGVWrroeBuASn0b4f9qHDYUuiHfgzkjyFG9u5URkIKdcB3lfb79UJbt0rrUhu38v3TCD6NEy3thdj6aRVSkDrjchHO33Z9skBGG37m0TM/aFAWhNN7hGPZjJhrc6eD00GnJRL9gpVNbmzsLJwrO+0E4U5LqzC0hFiNSS5ceh2fTfMoxc9I3SO0R7i4PH8/mpwfjs/H4zcX76it0HFRjxpcK+aNleiid4i+gxCspV+v0/XtHxOfWlQK7zcv4fPw0NiebPwJg8ax5oXTtOvBdjPeSOz04M3GpvW2CiAMTYPdGa/2CO6OUY2pnfmJYJppj0ismgv/DwCyy2oqRUbR2Y+hZtxPU4xnTtyGaTrTBIXDvONdIhRbm51pwwqthNPkkNd0RleU3AutXXCSCIlnPi1DbVCcbNrv393d9TJeVCrnvUwXBIMUGSrrYxAR/yWuJDvKuc5soy20/+4bnKFBlWE/GrL9rdkKhr1BbxAKxbqCq9ZB9xLDFmRNwjn87Pql5MI/VL1/deSMK7gdBi7dZQ1Yx5j+oQnlfwV1PeUWfzNytaLlT/QaJv7asEXgNlggz0NmhNSEs0D3hxNyqRnO94dh4sWgMcoyLN29sjctKry8GE8oneN/RYXOScfwO/ofid9BCtdwDZSMZbhfWof1GiRX88r3Awh2qep4Rfg0GO4Uqb/ZmhXUsuXljy+9AJvoj6h+giTextFneDz8DaxJhiw= sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -36,15 +36,15 @@ Search process definitions ## Request -

Body

required
+

Body

Search examples -
    filter object
    sort object[]
  • Array [
  • ]
+
    filter object
    sort object[]
  • Array [
  • ]
Success -
Schema
    items object[]
  • Array [
  • ]
+
Schema
    items object[]
  • Array [
  • ]
Data invalid diff --git a/docs/apis-tools/operate-api/specifications/search-3.api.mdx b/docs/apis-tools/operate-api/specifications/search-3.api.mdx index dd38bd07ef8..2c30e8922de 100644 --- a/docs/apis-tools/operate-api/specifications/search-3.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-3.api.mdx @@ -5,7 +5,7 @@ description: "Search incidents" sidebar_label: "Search incidents" hide_title: true hide_table_of_contents: true -api: eJztWFtz2kYU/is7eiFJHRAY2+DpdIYY2SWxgQJ2MnU9zCItRomkVXZXdijDf+85u0III1+Y9iFtM54x0u7Zc/nOdbWwFL2V1vG11Ylc32ORsm72LB4zQZXPo45nHVuSUeHOxvvWniXY14RJ9Y57c+t4YXlMusKPkRLohpqO+CkjCfQujxTyBFoax4Hvaq6VzxIPLCzpzlhI8UnNYwYs+OQzcxUcjAXqoHwmcXfqB4qJ5+m+sHmOyAfZt3Bsz5pyEVJllg7r1lKfc5mUbTb1Ix91+rDr0U4kFY1c9vKDhiQjlUr40S1QsigJ0QOX3WHfOemcdpw2rF52P3R7H7vw1OmNL1r9fqd7NnYGg94Alt733o27vfHAGQ06zhAWTnrddmfU6XUzEufTaNA6GY2vWueXTrZ60jo/d9pj59y5cLqjbPmy+2ur29Y7uDJ2rmAX1i+c4bB15oyHnd+Bx6cTx2lr5VI2bVB3mBe6XkCxrQ2FTnuDC1B6ND7tXXbb1g0gEgKO9LYAFNhzBdPBMvLDYgKAXz2JJ1jfuXLQis7ZoDXSig+cYe/8Sj/2HYCse6YV+cwnO/iRRTRSmBpbWuGurwJcyvIJVfX/ZM8y369pUp1ErelmvFMh6BzofcVCuZ0HKFdyoV5+YjvDWFBkENQC4W2osg3z8ER7Hn5u8gAMUaFlfuW3hIl5Hhf2jYZxYDRoBcF2SRkwlYhIEhoE67pCXnlsSpNAEaG3SeBLRRBj4ktStV+XQZ87GiSIOco3XEjNJhkaxWJC+k1TrSTlGaVOrNnIEW0jkznJYCvmt1YZvcM8gmQs8gBAPF3Kx3hpQ5Z25nXml81syJxiUF9q2E91jdxJKVNWQS3UBd1bKpNTPExKaWaWiEujEpjKSCKBEOI1PUVo5Gmr8mpv1eniurLM6Zux2dJ3bZDRTZOWigp3ae8l+Jaf1PSRwljcJ6xa7aB61Gw2qvuHjcPq4ZGVJeBOPusDxKho2reK/RUDkXZRItGuWLA7nycyDX7YKqHkK7RMloi2kLzS6f96M37zleU6b8HRUeMgF0MGTQ13nFbn3SPp73orEw9BJ3i4Nnvb2u/MsQ9q+HX1sH7YtOt246DebO41bfuo2mzWDupH9f1aowG463TAycoHBK1jJRKmF2TMI2mKY822C+atxEUT/rkx62Gv2LGH/Ji+fkxf39/0lSawqRi7jVSKKxq8SK2c5AGTMJrInAK4Wy9K4DZVFIoo1C7fezyLIeInAQt/2jWb0UOJfFZ9M3Y+FQt+mmuFm8VplQfEEYKLDIb9gkbPxcT3AKxNDN5U3vz7za1vm9vlipzyJPL+c+YeFAV5q98huXAmTB/4H0S7vsy5ifDVXE8PE5gKmHiru+Q1lL0FQMC/+Ey/wdTw7MeUkKkZx/kj5lIDQNUM3ip31UpGVjGzh4WyxR0TUotOBJQxa2GwXB5XKosZsFgeL2KojEs9PgmfTtKbGO4ZP+pbFpwMuEuDmZG6qeZoxghuRDRkhE+Jgnf0uJFe1h07m+5X7Bp2wy7khKSPcFnHwZrPTKm4kI8hLuS0NAOa8csQ6YzRK1+s+0PsY1Pas9A0eO/1HWxjb4fQlKGxYudAcek50DDv34xLqqJWCN8NkbWiPl2F5vuPIx1GGPaD9Uc2x1yQNwdbLcF+bIqyi2ckexW8DwadLBXWHXyz96/X05a/7uyrvm3nu3J+RtA3ZvvhQLzQLng4VGdiVuN0y1yTEOcp15CmKdbTXycZ6ScTKBvo3u0g4ITq4Rh+lH/H9G3C5YglXjxSgMgq1/GJrNjiHTfkgChHhfRJJXiC2THjXKUYQfUC1qiWSS50tITMur+/L7s0hPJOyyAQYQAlGQzySJvG0nm6svfgsMddmZ32uX6vCDaFqxVoWUkZyQpyxeQ2xlbLdtk2mSZVSKOcoIIqsgFUFqeKfVOVOKAQ08tUq0VaYK6tu6qO9jWPtMiAG02tuLYWiwmV7FIEyyUuf8UvPVjY1qXFFD1rxqhnosDEsXVi+sDbESqSXeS27zFYMM2JFng2Vk/S3uSqZb83xGF6kn6zDrmHZwS9x+/Z8P/Y+gP+MPA0KLoa6PWFFdDoNjHZYfhiitIEUcmQe5DR2rJVCYnmOS1/fqcJyIh/YdEvsGGsUfiqvwcs/wI+Wtph +api: eJztWG1v2zYQ/ivEfXG7qbGc9xjDANdWOrWp7NlOWiwNDFqiY7aSqJJU0szwfx+OlGU5VpoG24d1qz9Z5PHuufcjF6DptYL2JfhpyCOWarhyQGRMUs1F6kfQBsWoDOeTPXBAss85U/qliO6gvYCIqVDyDCmhDSNDR3jBSIEDoUg18mwvgGZZzEPDtflR4YEFqHDOEor/9F3GoA1i+pGFGhzIJGLQnCncnfFYM/k43Sd2VyHiqWbXTIIDMyETqu3S4T4szbmQKdVjM55yxPTmqUf9VGmahuzbD1qSklRpydNrcICleYIeOA9GA6/rn/peDxw4D94E/XcBOOD3J287g4EfvJp4w2F/CA687r+cBP3J0BsPfW8EDnjvve752O8HkzN/NPYCb7i53+0HPd/sr1h478fDTnc8ueicnXvlardzdub1Jt6Z99YLxuXyefBbJ+iZHVyZeBdeMAYH3nqjUeeVNxn5f3gT733X83oGfMGm53X9UVXoegHFdjYAnfaHbydBfzw57Z8HPbhaOpAwpeh1jdGWDoSSmWAa86SeQGmqv2rvTnfsX3iohf9q2Bkb4ENv1D+7MH8HXtDzg1cGyEcxfYKfWUpTjamzhQp3uY5xqcw3hMr/ZI8y39s1pCbJOrPNfKBS0jtwgGuWqO08QblKSP3tJ7YzkMV1CjkgZLQBZdvMo67x/KgLV1UDjBDQsrrye87kXdUu7AtNstgi6MTxdskZMp3LVBEax+u6Q55FbEbzWBNptknMlSZoY8IVabnPd8CBGxrnaHOUb7mQXZeU1qgXk9AvhmolqcqocOKuixxRNzK9I6XZ6vmtIaN3WESQjKURT6/xdKMa440NWcaZl6VfNrOhdIq1+tKY/dTU0CeBsmWXRQYLurexQ07xMGkUmdkgIU0bmkwZyRWLyEzI4hShaWS0qsLequP1dWVZwVuy2cK7VshiM6SNusLecL7FvjtfRfpAYazvI7C7e9A6Ojk5bu0dHh+2Do+gTMAn+WxArxkCLfpavb8yem1dlCvUK5PshotcFcHPItJAyReomWoQoyF5ZtL/+Wb8VivLZVWDo6Pjg0oMWWsac2dFdX56JP1db5XiIzKTIlmrva3tv8yx92r4Zetw//DE3XePD/ZPTpwT1z1qnZzsHuwf7e/tHh9fLc0PRy+ViVTZYrjrujXzVx4i5H9u7LrfG57YM35MYz+mse9vGisS2laQp41YWmgafxOsiuQhU3msVQUA7u7XJXiPakp4ekNjHj2c5ZkU05glPz8129FDuXoUvh1DvxYLvMjF2s36tKsaxJNSyNIMezWNX8gpjyKWbtrgp+ZP37+6+9vqBkKTU5Gn0X9O3YO6IO8MfFIJZ8LMgf9BtJvLXZhLru/MNDFlVDL5wnTRy6uls4BQiE+cma8r5/HHl4TpucB5JBPKGIDqObShedNqlmRNO4sAypY3TCojOpcxtGFhbblsN5uLuVB62V5kQuqlGackp9PiZoZ71o/m1gVtiEVI47mVuglzPGcEN1KaMCJmRM8ZQY9b6Tumo5fT/ordsXvs1nJC0ge4rONgzWeudVbLxxLXclragc36ZYR0VumVL9b9IePYlBxA1aAN/YGHbezFyBthj8XOgeKKc0tnw78llwKiAYTflghW1Ker0Hz9bmzCCMN+uH6U8+yFeXPQNRLch6Yst36GclfBe28QKlNh3cE3e/96vWj5686+6ttutStXZwRzg3bvD8gL44L7Q3YpZjVed+y1Ce08E8akRYr1zWsmI4N8GvMQ3bsdBIJQMzwTGmp+w8ztIhRoS7yIFAYiq1zHf2TFFu+8iUi5FgjInNRS5JgdcyF0YSORahqauLbJhY5W7Wbz9vZ2J6RJnkZ0JxQJmiHmIUuVcWIRS2fFinPvcCRCVZ7mwnw3JZsxydKQNQtGqolcMbmtsq0dd8e1maZ0QtOKoJoqsmGoMk41+6KbWUx5iowMqkVRYC7hpmWifc2jKDJXTlErLmGxmFLFzmW8XOLyZ3z5wcK2Li226MGc0chGgY1j6No+8GKMQMqL3fY9BwumPdEJQ5bpr9JeVarloD/CYXpavHEnIsIzkt7i+ze9hTZ8gA+AgWeMYqqBWV9ATNPr3GaH5YspSnO0Smm5exltNFuVkPSugvKXl4aAjMUnlv4KTqGNxk/zPrD8C/QS51o= sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -33,15 +33,15 @@ Search incidents ## Request -

Body

required
+

Body

Search incidents -
    filter object
    sort object[]
  • Array [
  • ]
+
    filter object
    sort object[]
  • Array [
  • ]
Success -
Schema
    items object[]
  • Array [
  • ]
+
Schema
    items object[]
  • Array [
  • ]
Data invalid diff --git a/docs/apis-tools/operate-api/specifications/search-4.api.mdx b/docs/apis-tools/operate-api/specifications/search-4.api.mdx index 86413414c9d..cac9c73f3f8 100644 --- a/docs/apis-tools/operate-api/specifications/search-4.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-4.api.mdx @@ -5,7 +5,7 @@ description: "Search flownode-instances" sidebar_label: "Search flownode-instances" hide_title: true hide_table_of_contents: true -api: eJztWW1z2jgQ/isaf0nbI8EQkkDm5mYccK5cqaEYkuvkMozAIrg1FpXlJBzDf79dydgGnDa56Yd76XSmIGm1+2h39WiXrAxJ7yLj/Ma4DPhDyD3WDiNJwwkzbksGXzBBpc/DtmecGxGjYjIb1YySIdiXmEXygntL43xleCyaCH+BkiDnKjkyTRQe+onGCDZOeChZKHETXSwCf6LUlz9FuHNlRJMZm1P8JpcLBrr4+BObSNi4EAhG+qAFVqd+IJn4ttxntswJ+WD7DraVjCkXcyr11GnNWKt9gDDanP7dSze22NQPfTzM87eCKSFbVLKcfCSFH97hKgu9J9fQtw4Gy/vqskPnxfv9cOJ7EIfnY9Uiu6oQZTzH9Bk6bs9uti/bdgtme/1u03Zd+OYOL0bZyL6yncFoe84dWP3BSK3AqO0M7P57u9W2BvaoaQ2ab4uXBm/73et06aI7dFpW/2M6YTut9Ltr96/aTdhiue9g2LebdvsqHQ5hefP9veUMrc5mlHzYvzc7Qxd3/AqGr62PCsr+XM/qW52O3clN6eNeWK7dys269oeh7QCgy073Gq0OO4P2qO2AI3D2ottCqSYoG1nNQfuqPcDxBdhzwGOj/rCTgneb/XZvkI7w1JtjOe+c7rVj3Oo8K8qjLHjKjI1Gu+97HXugYojObjsWDm5zKZPTM+Y8YDRU6cFCGsrCdMRVXwY4dblJ2w3HIDj/T/bNHDyuKlFFLNZ0++pTIegS5H3J5tE+JaD9iAv5/B37ZMOC4nvGhbcFZd+xbhNGLRs+bvOOcBHQOj/zIWZiWeQf9kjni0AjsYJgn277TMYijAgNgpRzScq55JXHpjQOJBFKjgR+JAk6nfgRqZivjwDgPQ1iDAIC0upI1SSpe4rtzemjkkryIsorSqJaNVEjHpaMlyT1Y7G+AuwYN+YRlAc2BNeimoOEGA+27KkI36TBStkzDZKOwlqF4VI9H38Pk356ABVC2Rz+YOvw2euUXRopYrbO2aahRzZpuW07A5epJ6UnfJE+Ic9EUNr3VPYKFfqqR+8Ymkqe0mI/LUBIeSSOENlCsHufx1GSc7B0gGavEF50QBRM8kpdw9dHpDnjPGLgEcIeITlRARgjU8HnmSZ998H/khMplkTO/J2My5PDTbV6UjlrNOqV49P62Vn9JBd27UoVAIT9HYKfRYc+EZzUmrdzrH2/fNc4WiqMO8x5UzmtnTbMmlk/qTUapYZpnlUajepJ7ax2XK3XwVUqUbHG8+GsqUXBogUPI01FVdMsqPziCdZC36/O22XoFzL3j/LvR/n3o/z7B5d/CY9pAnxZTSe5pMGzsjeHoM8iKIWiAiAoVSviNLiEFF4AoGTfe5rYgAbGAZv/9FKCw9jE0TePoevfORBN8mIVXOnkKEWLxbc37xhbCC5SNxwXVCVcjH0PMmDbB2/Kb/79x63tH9fhklzyOPT+c8c9KUpyq9cmuXQmTG34H2S76ionsfDlUhVTYyiUmDhUhcMN8N4KXMA/+0yNoJB6/i9dcyZnHOuyBY+UJ6icwah8Xynvy5d1gWYgGnHPRKTAxAIIzlhp767Py+XVDHStz1cL4My1qhOFT8dJc4hrOrKq34OdAZ/QYKbNbwMfzBjBhRAqBMKnUE0zgjmgrR+pwiZtTjbq6mbdLNSEok9oyTIj0zOTclGoRwsXalrrKlZHykU5fehNdLKXY+FjVVMyQlX8GN2e3YdH7NCFt7PddfBNQXPJPkCYj3iqJYGoAKmnTgkZG+nLTbL+dj1QiYUXoZ/9Jmrrnn27glcWzOJS03yqkDS3ysT80+3tTuWLwv1ZXQtm81sloLm5NDt1XFI5ZAXCdheSf/dz90/3/uZu17FSIdzvWhJA2y2LuuNTrkKSXNqu+jGakV48BiLC9NhPIk6o6kDgQ/r3TLVdE46xwO4scXGun/NDslELBETmHFzPEZDaKQWP8XZBfyo1SORDUI2w9OXERIngZj48PBxN6BweDHoEBtENAJJBt4SySS52kpnSzmaPT6J0t8/VuCzYFDpNQFlOFEVl1IrkoA9bOTKPTH1TIzmnYc7Q13hpy2Npwkv2KMuLgPqqnFPwVgll3Rj3lSSR9pQltAWB1exzY6xWYxqxoQjWa5z+gj9rIXlmZKWJ1Zgx6um80DfDaOq35nCAiNIeeL99RFLWOyyI9UJ+VfY2R8S9rov9wDj5o8UcDgOzgj7AJP5/bvwB/zAVlXcUv6j5lRHQ8C5Wj5Gh9eKlpzG6JyuHtzlCnWxDSuEyh/LnCyVABvwzC3+BBX0aiUP1o8v6L6yxhBk= +api: eJztWVtz4jYU/isavaQXb3CyZJMwnc444LR0WUOxSdpJGUbYIqhrJCrJyVKG/945krENOLtJZx962X1ZJB2d+/l0jrPGmtwr3LrD16l45CKhXa404THFYweLJZVEM8G7CW5hRYmM55MmdrCkf2RU6SuRrHBrjROqYsmWQIlbODR0aJYzfMVyjgo7OBZcU67hElkuUxYb9o3fFdxcYxXP6YLAL71aUtzCYvo7jTV28FKCMppRBaczlmoqP033nq4qRIxrek8ldvBMyAXRdutNE2/MvZgqtbX+7UsvduiMcQbGPP+q0kTqDtG0Qq+0ZPweTilPnjwD3wYQrOSjxwFZ1N9nPGYJ5fr5ulqSfVagZbaA9BkF4cBvd6+7fgc7eDDst/0wxA4OR1eTcuXf+EE02d0LI28YTcwJdnA3iPzhO7/T9SJ/0vai9o/1R9GPw/5tcXTVHwUdb/hrseEHneJ36A9vum1/EnnhW+zgod/2uzfFchT6w+3vd14w8nrbVf6f/0u7Nwrhxg9e5N96vxpVDvcG3tDr9fxeZcuae+WFfqeyG/o/j/yg7U+ue/1bkDrqRd1JNwgjD3av+h2ganu93sRrR92bbgTrq1HYDfwwnAxHvUL5sD3sDqJiBVZvzQreBv3bAI9tntXlURk8I8YHof13g54fmRiCs7uBB4txJWUqfKZCpJRwkx6UE65r0xFOmU5h63qbtluMAeXYn/STOfj61JAaYPFmu6VPpCQr7GCm6UIdQgLIV0Lq5984BBua1teZkMmOKoeODdvYwR0/bONx1REhKLSp7vycUbmq8w/9QBbL1Gripekh3A6pziRXiKRpgbmowFz0VUJnJEs1koYOpUxpBE5HTKET9+tj7OAHkmYQBFDIskOnLircUy9vQT4YqjwvVJVRHtVTFziCsWi6QoUf6/nV6A5xowkCesoTxu+BzVEOjEc78kyE74pgFehZBMlGYWPCcG2ej7+nk316aGJU2Rp/tGN8+TqVRaNlRjcV2YQnaJuWu7JL5Ur2yHnCF8UT8kwNnENPla9Qra8G5J6CqPwprffTktxbj2QKNFtK+sBEpvKcowk6ArE3oJ46QkZN9JUpw6+PUXsuhKKIcEQ/MKWBwXu6QjMpFiUnW/tUIS2Qliuk52wv46rgcHd6enZyfnl5cfL6zcX5+cVZJezWlSYAoPZnCH4ZHfJEcAppyZ5Zh375rHH0TBj3kPPu5E3zzaXbdC/OmpeXzqXrnp9cXp6eNc+br08vLsYb8w+aPLUUXFnoOXXdmk4vi6H3+Xx93T4ivxCpv7R7X9q9L+3eP7jdy3HLAt7LejgtNEmflb0VDYZUZalWNYoAVbMO0zpEE8T4A0lZ8jSwLaWYpnTx7UsBDmKTqU+aYfvdBVUqf6FqSjo3pe6wvnqrjvGlFLJww+uaLkTIKUsSynd98E3jm3+/uc1DcwOh0bXIePKfM/esLsm9QRdV0hlRc+F/kO1miowzyfTKNE9TSiSVr0zjcDfeOGscC/GeUbMaOy/4srWgei6gD1sKZTxB9By3cOPhpHFI37ANGQZt5AOVyiiTyRS38Np6d9NqNNZzofSmtV4KqTemL5SMTPNhEM5sZM18h1s4FTFJ51b8ruLRnCI44GRBkZghPacIcsBKPzaNTTGMbNlduBduLScgfYJLmRkln7nWy1o+lriW08Z2rTZSIdBZo7fRKV+OJYOuxsHcND+4P/CHXuS/Cv0w7PYDeFNAXH5v4+xEvOCSq2gUMk+dIcJb6uttsv50G5nEgkIYlt9AfTuj73bsRoJb32q6TzWS7k6bWH26k/2talN4uGt7wXJ/pwV0t0Wz18flnUPZIOxOHdV3v1J/dtZ396eMtQnh4ZSSK7Q7opganwkTkrxo++bjM0WDbJqyGNLjMIkEImYCQSTW7IGaMSsWEAuYxnIXV+Y3xtGW7UxItBCcaQEKmZtaigyqay6EtkoCHpLY1IUtTkgU1Wo0Hh8fj2OyyHhCjmOxADekLKZcmSTIc7GX7zh7lxMRq+I2E2bdkHRGJeUxbeSMVAO4AjhYY0+O3WPXVqrSC8Irgj6GSzseKxJe0w+6sUwJM+2cUW+dQ9YdfjjJE+mAWQ5bYydHnzu8Xk+JoiOZbjaw/Qd8xgLwLMHKAiueU5LYvLCVgdv2rXkVgUbFzHs4PgIo2xteHNOl/ijtuALEg34I88A0/yPFQiRwR5JH+AMGecQt/Bv+DUMqGu8YfDH7a5wSfp+ZxwhbvlD0JAP3lO3wLkYYy7agxFcVLb+7MgQoEu8p/x47uTUaluYjy+YvhYB+Tg== sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -36,7 +36,7 @@ Search flownode-instances ## Request -

Body

required
+

Body

Search flownode-instances diff --git a/docs/apis-tools/operate-api/specifications/search-5.api.mdx b/docs/apis-tools/operate-api/specifications/search-5.api.mdx index 6fc8223eeaa..30256e1e35f 100644 --- a/docs/apis-tools/operate-api/specifications/search-5.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-5.api.mdx @@ -5,7 +5,7 @@ description: "Search decision requirements" sidebar_label: "Search decision requirements" hide_title: true hide_table_of_contents: true -api: eJztWFlv4zYQ/isEX9JuHR/ZOM0GRQFnkwDZFkkau+1DNihoaWxzI4laknLWNfzfO0Pqsi3nWPSlBwLEFjkz/DjHNyMvuRVTw0/u+BkE0kiV3MLnTGqIIbGG37e4SkELixuXIT/hBoQOZn/0eYtrFARjT1W44CdLHoIJtExJEuWGTo7BFxGnERgUD1Ri0SaJijSNZOCMdj4Zkl9yE8wgFvTNLlJAC2r8CQKLiqkmCFaiFdydyMiCfl5OhjUZY7VMpnzV4g+wqK1LhDRFay0+UToW1i8dHZJk2OCPy2ajiYihcWMO2kh/vydPfHtA0hqMynQAV7vMof9EYhtB0K60ES01BhL3jfwTXojER3kwWfe00FosUF5aiM12BAiDUdq+XGM7thA1e1jpcA1KvtPikGQxZe9g+B6fzs7x477ujCEBWtVXfslAL3b5qMxXPGoQRdt5jYusyAyma9rsmxAmIossIz8zaViv+y1CmosoI7cThCHtqAnq2UwnELJIGruzdpqPEUmY6zttMucO7NfOykPdd2fmEXnlERRIJkwASYiOZuMFc0leP8MZviujxnOBPFQuIqv7AoI3mmfgy7G0PBBSWEfSepUjWs+idZmzATcV06+Du+W3NbRktkLb/piMFJuCZXYGLIEv1gm0WKDShVtzlyGFPTL+Gz2ZPYa1qnClKtQ9L4j23AVMigAnOcsyM1NZFDIsc4RORmuK66Ltr3KfD/YGcdzxy2SuZADsNDMyAWNYUXfUEA4O+r3v37077r09Oj7q97u+cC8cv1c5uB2AXAJdm9NrJYxrO2i7dqmqg5T03Cvpsd9tuOxOm5u57ogmTwhUtDoDz+spXtmzykG325BTWRCge/6+LrlJuK8k4v+b53pjyDubr73XdUSrrIhe5LUailsw2EnMDjAkediURmfCCiQGzHQZ7s4lDPU4gvi71+aUscJm5oXhiDGdc/rcioRM0FQS7AiTW3gqROdaK1264W0DQSg9lmEIyboP3nTe/POve7h93Stl2YXKkvBfd91+U5IPbi5ZLZ0ZOIX/QLa7uTzItLQL153G2GtB7ztSvrtftZboAvUgwT3dt14xtnC6gZ0p6nWpMs4Zws7wqTPvdUIddnxf54RAEy87AJlGYuNL79HVSaeznKHy6mSZIleuXMvVUozzcZr2fDTdmIyakQpENPPnrYMd4YRCG9QbaPKhiYXi7k9vk+fScq4tzB13j7uNlkh0h5UqGyo7M2vTRjteuNHSyg8/PjpDkvOXLiJSdYxU/gTUMnzb49c357eD0fn+8Hw4vLy+ol5Cx+V6iLAe5dJKDtEBomcvxAvpiyJBP/w+cslEyX9bva6f+7ec9WFIhvX3Knded3cjryTzi5TPZdvubjblSqbqxbVa8ANYd3OIXDrXbo5kpamNEYz8N1HOVXkBXbvfL4DdZGMkBQrbdnAVE24Aww8r5+AGykCRjyy+pSExuM2ikukbK8wiGbBYJdIqAuQ0rVYZZf1MKetBEjehaYLli4YCaLBiHh8f24GIkbxFGw8kNyBIwGGRZHPX/pyvtDaUQxWYUlsq99zRMAENiLKTGzKdtWGK99rddtdXkLGxSGoHPcMRa04rc9HiK0snjQTm7SpHuMzp447Pe6SnQ15ElX5R8kxwx5fLsTDwq45WK1r+TC/mRF4VcXhi4zMQoc8Fn5f8vef6/RFBKEf77YmZSNFrDDC+qX1S9r7GgjfXwxGVU/7bVqxC0tHikX73wv8n/CP+Ufo5d7had+tLHolkmrlmwL1dKkCRkT9Kn23Uq7tZQRDJoobyh1MnwEbqAZIfi7rEoRIf/RvHX2u/yG0= +api: eJztWG1v2zYQ/ivEfcnWqZad1l0qDAPcNgWyDU0We9uHJBho6mxzlUiVpJx6hv77cKQsy7bcJsW+7OWbRN4dH97LcyetwfG5heQG3qCQVmp1jR9KaTBH5SzcRaALNNxJrS5SSMAiN2Lx+xAiMPihROte6XQFyRpStMLIgiQhgbGXY/iR50WGFiIQWjlUjkR5UWRSeKPxH5bk12DFAnNOT25VICSgp3+gcBBBYQiCk2hpdyYzh+bzcjJtyVhnpJpDFcF7XLXWpXI4RwMRzLTJuQtLL56TZNrhj4tuo4rn2LmxRGNluN8nT3x2StIGrS6NwHfHzDlUXLlOELQrXUZLnYGsIrDyT3wgkhDl0WzX09wYvoIIpMPcHkaAMFht3MM1DmOLWbeHtUl3oNQ7EaAqc8re0fg1RPDmfPwa7trOGBOgqr3yc4lmdcxHTb4maxhl2WFej7KMbTKDmZY2+yrFGS8zx8jPTFo26H8NESx5VpLbCcKYdvSMGXSlUZiyTFp3tHa6j+EqrfW9NpnzBw5bZ9WhHvoz64g88ggKJONWoEqlmrPpivkkb5/hDd80UYNaoA6Vj0h1t4EQjNYZ+HAsUQBCCrtIokc5IvosWp85e3ALPv8yuAd+20FLZrdoe7dqotkcHXMLZAo/Oi8QMaGLlV/zlyGFEzL+K73ZEyaV0+ykVagnQbB3q/wFbIFCzmqWZXahyyxlOXdi4Y22FHdFe1/kvhDsPeK4gQu11FIge1VaqdBatqk7aginp8PBty9fng2evTh7MRz2Q+G+9fy+zcHDANQS0xWr6XUrPF2xI7TdutS2gzT0PGjocdjvuOxRm/u5XnmmMWgLrWxgkdN+vyOHSiHQ/o1dcZ9gH0m8/zfL3UZQd7JQa4/rgE47nj3Iay0U12jLzNkjYEjyeVcaveGOM6mWPJPp8VwqjJ5mmH/z2JyyjrvSPjAcOVpb0+VBJKSyjitxJEx+4VMhOjdGm8YNzzoIQZupTFNUuz54Ej/551/3+eF132nH3upSpf+66w67knx0dcFa6czQK/wHst3P4aI00q18N5oiN2ieelK+uauiNQit30v0b3fRI8YUoBu4habeVmjrncHdAhKIl4M4NWkc+jgQAkO87AGUJoME1sGjVRLH64W2rkrWhTau8i3WSD6tx2faC9H0YzEkkGnBs0U4bxfsZIGMNqg30KRDEwrFPZzeI88VzRy7MXfWP+t3WiLRI1a22bC1s3Cu6LQThDstVWHYCdEZk1y49CYi245RyB+RWkZoe3B5dX49mpw/HZ+PxxeX76iX0HG1XhXtRLmxUkP0gOg9CMFG+u0mQX/4beKTiZL/evt5fh6+anaHH5m2v6P8ef3jjXwrWV+keW/adn+/KW9ltr24VQth4OrvD41r79r9EawxtTdykf9m2ruqLqBL/78C2VU5zaSgsB0GVzPuBzDGhZNL9AOk0OQjhykrjPabm0qmJ7YxO9OG5VpJpwmQ13RGl5T1C61dAEncxIXP11A0FECbxPH9/X1P8LxUKe8JnZMbMilQWR+c2rU/1SvRnnKqhW20pfbvscEZGlQC49qQjXeGKRj0+r1+qCDrcq5aB32GI3ac1uSiw48uLjIuFRn1CNc1fdzAckB6JoVNVOkPUmCCG1ivp9ziLyarKlr+QB/iRF5b4gjEBgvkaciFkJfwOnD90wlBaEb5w4mZSDFojITAwn1S9q7FgleX4wmVU/0vK9cp6Rh+T/+5+D0kcAu3QOnn3eFr3a+vIeNqXvpmAMEuFSAvyR+Nz/bq1d9sQxBq1UL53SsvwCb6ParvN3UJjl7DF8ZfAH7Cog== sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -33,7 +33,7 @@ Search decision requirements ## Request -

Body

required
+

Body

Search examples diff --git a/docs/apis-tools/operate-api/specifications/search-6.api.mdx b/docs/apis-tools/operate-api/specifications/search-6.api.mdx index 84f7b4a4691..eefeae0235d 100644 --- a/docs/apis-tools/operate-api/specifications/search-6.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-6.api.mdx @@ -5,7 +5,7 @@ description: "Search decision instances" sidebar_label: "Search decision instances" hide_title: true hide_table_of_contents: true -api: eJztWW1vGjkQ/isrf+ldj8CSllwanU6iCZG4Rkku0PYkiiqza4Kvi721vUk4xH+/GXvfWBZK2n64lyhSYO3xzHhm/MzjZUkMvdXkZETOWMA1l6IvtKEiYGTcIDJmihocDMkJ0YyqYPbxiDSIYp8Tps1rGS7IyZKETAeKxygJcgMr57EHOo8jpkE8kMIwYVCUxnHEA6u09adG+SXRwYzNKX4zi5iBBjn5kwUGFsYKXTActMDslEeGqS/L8bAko43i4pasGuQTW5TGObh0C9oaZCrVnBo3dPQSJSEChm3qaBAmkjkG67zbv+idwUDvXffibXdov7+9fHN59f7Sfhtc9077530YH4M+dkejxO75rFbxmsg55VGi6qVgnwHT+oxNueAo/GbvPaVLs/TuvzDMCqM+qtl04dMXBC/pvH5zmcA7pjR3lbHTvReH5VVDK7o9ZWeQkEH/6vLjsPv6ogcTF/1h76Z78bH3x/VNb4BTlcwVGcUcKqaTyOzKHQv7Ik6MLslQpegCFHHD5vqrC1dsixjarZuBKcNNhEPVY21dtBK511eJ+We4DUFOIralfOyUCNnDXnWxIwJuu06ECSpMrcEdCixC8L/YngXqYLM7XYeuL4QY7WupzNcnZcpZVB9JqcI1VzaOSndwCk9nPfgYlwMxQIdW5ZHfE6YWdfHJwR/MdKNos0nAoJcdXY+nK7X3Q8imFM6ZhwH2uPba/o+kqBi0PcAZOfUUM4kSLPQirs3WLlRjg4owXWyXoi5rrVMylCa4Yw2meXiMfsydR3XARAix9SYLbw3+yoas9lGeMFIRTLNlk7IaZ/44I2kR7ulYw3mF0vVuNR4VmsberttKqvge09uv8H1rRNdcR92F680PYii9W2Y8M2OeYA/GCjS8QMYLO2Z3hgueoZF3+KSfgW0jYaQ4vc+cIOizu9Ax2J+mXMbTM5lEoQdnH/xHpaWF66LNb4qlK4MKqoxIX9xJHjDvNKJa56Zg3eFhp/3zq1fH7RdHx0edju9O9LllUkWlbmYilSiFOW3LxaLSHEBoaVMFT9to6e0cOzv+jk1bfdXKt8iDzJMrBsJGJcz15VgK7aDm0PdriioJkPl8Px5aReDv0y6f6OkTPX2ip0/09NvpacodXSN7HOc00tBor5NX8uDG1p6ucQSlXtZhMgANhQ4LoebhdmCGPE4iNv/psQCNAJnoPU/oHGAnJSMbWc34R+2kqT3S5cD0lJIqD8OLmi4r1YSHIRPrMXjeev7v3+7Lze1eSuOdy0SE/7ntduqKvHvd90rl7DG74H9Q7fbWGySKm4WldxMgq0wdWIYzgm61hBDIT5zZp3Fj30sAQffNTCJTjKW2kaBmBk+tu3Yrkz/I5VuOJBP0Rt1B27bOJAoAjixddFcnrdZyBrpWJ8sYMHNleazidJJeXnHOZdbeS2FlJAMazZz5dceHQPtxAjsLXifwGoA14Kw3Lc3J75KZumP/2K/VhKJbtBSVUeiZGRPX6nHCtZpW7ibhMjVAObfpLDtF54g5MrKsaZKra+Amw97BIGcmHM2l68DDcsZzLamL1iF8dkIkkz7PivW390NbWHgQboo3zT33TmH9hsHDMpuy9vycIRdEuEpxSwRsk9kWk/WE1q+nq/46GS201HPQzXlHPTfHc8bpV/nkJm3MeODGBgv6N6pGTVTspgQoP+PjOjb2WC0Fe6qMONLko5WC8JSAx10X/eqVd2lrt3qBzFVXLo9YoFNpazFFqyv72wbzrpMJIDCei83TIz1qr47wYfgds1ffQGIRQiS8tAZKb3648DK1gLzeXEK2JTpkVxolE4SVmZTGOYmNAFSjWw6V8IRogKT7+/tmQOfQKWkTDGIYwEkG11yUTSN9kY40KotDGeh8NZf2uaXYlCkGXrZSRbpluW5WWqTd9Ju+gyht5lSUDO0C5LWI5SfdsAfTiiMKqLBK3VumWD0id+1SbR+UlaV4DYl1sDsiy+WEavZWRasVDn/G943YNQqUdh2FzBgNXV04ECCnrske2INSVOPGvR+7kVvRhVzHZqfsuNSBrq8GQ8Su9DewuQxxjaL3WNjw/4R8gD8sRRsdC6x2fEkiKm4T24WJ04toRxMMTx7CCjjanWVoLBYlL395bQW8ofzExK8ZCAKTh0f33uRv3C2Gpg== +api: eJztWW1PGzkQ/ivWfOGutySBFo5Gp5NSCFKuCDiStidBVJndCfF1Y29tL5CL8t9PY+9bNgsNbT/cC98Se94883jmcbIAy28MdC/hCENhhJIDaSyXIcI4AJWg5pYWI+iCQa7D6cd9CEDj5xSNfaOiOXQXEKEJtUhIErowdHIM7/ksidFAAKGSFqUlUZ4ksQid0fafhuQXYMIpzjh9svMEoQvq+k8MLQSQaArBCjS0OxGxRf1lORFVZIzVQt7AMoBPOK+sC2nxBjUEMFF6xq1f2n9FksZyi+s2AkCZzihZx73BSf8IAui/7528643c53enb0/PPpy6T8Pz/uHgeNA/gvEyALzlcerOfNRoeEXkmIs41c1SiVYhGnOEEyEFCb/d+EyZal7ezRWjHBjNWc23y5i+IHjKZ82HywXeozbCI+PR8F7uVrVGTvThkh31DwfDwdnpx1HvzUkfAjgZjPoXvZOP/T/OL/pD2qpVrqwo1VCjSWP7WO0wGsgktaYiw7XmcwhAWJyZrwaufChj5LdpZxmAFTampfq1diE6iSLqs9T+M8IOQKcxPgAftyUjvN8IF49kwB/Xi6Dk0jY6fMSA6xDiL9wQoL5t9iarresLKSb/Rmn79UWZCIybM6l0tBLK2lXpDQ8hgKP+8BDG1UQMKaBldeX3FPW8KT9F8+8uoBfH60OiF8csv7pMZJqG/RDhhKexZZRgJgzb6fwIJWLI95B21IRptKmWGLFYGPvgFGrwwWWUKTtVsuW87VUcZQXecw6zOjzFPtWOcROijIS8YddzttL+qo6c9cuiYFATzKrlirIc5/F4JxkINwws8FGRdHNYwZNSE2wcukNSLfaE33xF7A9mdCV0sl2G3rqSI8Vu0DI7RSbx3jqBgIUqmbs1dzJS2CIn7+mb2WJCWsW2Krd3ywu2rqQ7hUkwFJOMyzAzVWkcsRm34dQZrSiuira+KZceBrWucgkDeatEiOww5sYUriCA3d29nZ9fvz7Yebl/sL+31/E3+tgxqRKp65XIJCppzsZyqVTZG0TVQ5U8bW2k7xS9c6/zyKGdvTryl671aDSJksa3lt1OpwFEaUhM5/vxznrH/T7j8ZmOPtPRZzr6TEe/nY5mXNEPrqdxTKssjze6eZUILhz2TEMgJPWqqScfccuZkLc8FtHDjTnR6jrG2U9PbdDUIFOz4Q2doTEZ+Viras43Gjdt45WuJqavtdJFGl42TFWlr0UUoVzNwYv2i3//cV+tH/dUWXasUhn954671wTy3vmAVeDM0Cn8D9DuXrlhqoWdOzp3jVyj3nYM53K8DBYQKvVJoPs2DjYl/UDh26kiZpgo4zLB7RS60L7daefy24V825NioGj0LWrjgkl1DF1Y+Owuu+32YqqMXXYXidJ26XirFvw6e6zSnq+se4dCF2IV8njq3a8GPpoiow2aLPR8INpPGPDeW47mFG/H3NxB56DTaIlEH7BSIqO0M7U2abTjhRstLf3LwVdqSHL+0Hl1ysmRCGJk+dCEs/P+RW/U3x4WzESQu0xvGaxUvLCShegCou9eCHLp4xysv30YOWDRRbgof1nu+98QVl8UIqqyKeevUzDkkgjXKW6FgK0z23KzmdB2mulqZ5WMllaaOej6vqee6+sF4+zU+eQ6bcx54NoBS/p3Wc+arPnNCFBxx8dNbOypVkr2VFvxpKlDXkrCU2k8/nnYqT9xFw679QdjYbr2WCSATpTDYtatztx/GcjO0+tYhHQv1m+PYtw9HRkPrbhF99QNFYHQYsQyDFR+6RGS5WYnSrOZksIqCshpWq1SaitTpawPkgYBD11D8F2Jbojpttt3d3etkM9SGfFWqGaUhliEKI1Df5bpk2wlqClHKjSFtlDue1vjBDXKENuZIdN2XDeHFuy0Oq2Ob1HGzrisOHqsIa9krLjpFu9tO4m5kGTRhbfIevUl3O5UsL1dNZb163GQtd1LWCyuucF3Ol4uafkz/b5IU6Ps0n6iwBR55HHhmwAc+iG77S5Kica1dz9NI6/RC0NM7KOy48oEOj8bjqh3Zf95zVREOprfEbD5HXThCq6AoOiy4xqrW19AzOVN6qYweLvU7XhK6SlSWGuO7mR5N5bzSpS/vHECbKQ+ofw1b4Jg6av/neRvuEiA2w== sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -36,7 +36,7 @@ Search decision instances ## Request -

Body

required
+

Body

Search examples diff --git a/docs/apis-tools/operate-api/specifications/search-7.api.mdx b/docs/apis-tools/operate-api/specifications/search-7.api.mdx index cec8aac2145..9a22cd167ee 100644 --- a/docs/apis-tools/operate-api/specifications/search-7.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search-7.api.mdx @@ -5,7 +5,7 @@ description: "Search decision definitions" sidebar_label: "Search decision definitions" hide_title: true hide_table_of_contents: true -api: eJztWFtv2zYU/isEX7J1ji0ncZoawwC3cYBsQ5LFXveQGgMt0TYbSVRJKqkn+L/vHFKSJVtO7WIv3YYAsUiey8dzlzJq2FzT/gO95L7QQsaXfCZiYeCJTlpUJlwxXFwHtE81Z8pf/Pmatqjin1KuzVsZLGk/owHXvhKJZevTkaUj/DOLkpBrIPdlbHhskJQlSSh8K7TzUSN9RrW/4BHDJ7NMOEiQ04/cN8CYKIRgBEiB05kIDVdfphNBhUYbJeI5XbXoI19W9gVAmoO0Fp1JFTHjts7PkDLIrXHdLChmEW88eOJKC3enF7WcnlS13IMxheIRWEjv0NhE+svB16ly3+y6QxPx+4PuBa5msWm8CZ4KE+JWQ8TBqRZ/8T3VuHAczOohwZRiS6AXhkd6O1QQgZbK7M+xHYQ8bHaSVEENSn7SojxOI0yywegdrC6H8DOpmmKEgFbVnd9SrpbNFirTChQNwnA7/WCTFE6Eh4JXk+9gwdLQEDQyEZp0ve8BzxMLU7Q56h/hiZwRxU2qYh6QUGizM8MbtbA4yNktM0qz+noVVbmbe1Zl7o3DNKAPCdM+jwOwMZkuiU3Kqgor96F0GM0Jci9ZZ6wmBQInNA++vaG0HA6krwNpHWSG1hfB2pjZQJuw+Veh3bJaDSxKXYNtf4jHksy5IWbBScw/G0vQIr5MlnbP3gUZjlD4e1zpIwJJKmFnnaFHjhDkWfw6AXyzvA8QvZBpGBDIb0COQiuMddL2V1nPuXqjYrieF3Cr8FLoBTCcnPS6r9+8ueienl+c93qeS9Mr23bWYbdt9JwC7Jl3gDUx7FW6SQX9upmVXaNbFsCe13CrmpzNMLblQ7maDcRGpdxu6AQ876rFiec1xEvq+1z/g016s4weWF7/793faO/OG6urAIc1ZCMNC/eyRwXDPdfQy3QjFKQ7a4r2S2YYFCdIQhHsDnmIyGnIox8ODX1tmEn1npaOIOvyCr7lRxGDqNhvPnQbL7lnqJRUpRlOG+qVVFMRBDyu2+BV59W3f92z7eveSEOuZBoH/7rr9pqCfHB3TSrhTLhl+A9Eu30p8FMlzNI2zin0e66Obe94mKxaGZhAPgpuV5PW/pMTxQuYhcQunEhtbcHMAladp26n4DiucHTcrEERkcLWYgGlCsoczZyFV/1OJ1uAtFU/S6Byrux0oASb5tM9njnv2rEdOEPps3DhANTBj2GIwQNsbziN4VCDceC0t9GSSTloF+IuvAuvURKS7pCyjo61nIUxSaMcR9woaeUGMuetEdK5SxceWvePRGC3LDo3vb0b3g/Gw+PRcDS6vr3BzoLqcj5AWPV6KSWHaAHh2hHRgvqqCNif/xjb4MJkuF9/5Bi6l6763CaC6kue1efV54/1aQ6+XJfThrd7llhT7xwhdnDfbGh7cS7wql2/knluEvU2x+bMOm5zNi1Vbcyl6J2ZtI7I0/XWflPi5C6dQgnCoNgOHUmYnUrhx4gnbqdpX6IHDLyTQhmyh0XdwCdSiIXSQyIJWSgRkOU0SqaYUwspjQOJlRBEIyyXkhgeGvLx+fm57bMIWgVrg0I0A4DkMEEjbe7EX/Od1gZzIH1dcgtp1x3FZ1xxQNnJBelObdqk3bbX9lx+ahOxuKLo5YpUs1kZ6Abe0TpJyIT9WGABZnmxeqBP3UowHNfF5QULnOvqzgPNsinT/HcVrla4/Qm/SWDpXJcpV1bpgrPAxYbLAvrOdZrjMWIq33m2XyuwJDuOAfg7MS/STipF+O52NMbkzb8/RjJAHsWe8dsk/O/TD/CH4Zi4+wGR3c9oyOJ5alsRdXIx3VmKBiqNuFEd7M2KchQvKyh/fGsJyFg+8vinogrAQAtL91r2Nx50W08= +api: eJztWFtv2zYU/ivEecnWKb6kdZsKwwC3SYFsQ5LFXveQGAMtHVtsJVIlqaSe4P8+HFKWZVtO42Iv3fYmkefynfuRSrB8biC8hTOMhBFKnuFMSGGFkjAJQOWoOb1cxBCCQa6j5M9XEIDGTwUa+0bFCwhLiNFEWuSOLYSRo2P4mWd5igYCiJS0KC2R8jxPReSEdj8Yoi/BRAlmnJ7sIkcIQU0/YGQhgFwTBCvQ0O1MpBb1l+lE3KAxVgs5h2UAH3HROBfS4hw1BDBTOuPWH718QZRx5Y2LdkGSZ9h6cY/aCG/To1qenzS13OCnQmjMUFqzR2Mb6S8Hm9PkvtxnQxvx+4Pssii5tK2W0K2wKR21ZNwyACP+wieq8ek4nG2mBNeaLyAAYTEzu6lCCIzS9ukcu0mIaXuQlI43oFQ3AaAsMiqy4egtBHB2PnoLk6YrRgRo2Tz5rUC9aPdQXVZhCcM03S2/YZqyVRBZXPMa9l2MM16klpGTmTCs3/seArjnaUE+J/0julEzptEWWmLMUmHs3gpv1cJlXLE7ZpLm9A0aqqowD5zKKhqHaaAYMm4ilLGQczZdMFeUTRVO7m0dMKgIqii5YCwnKwReaJV8T4YSeBxEvwkkOMgNwRfBupzZQpvz+Veh3fHaBliSugbbuZNjxeZomU2QSfxsHUHAIpUv3JmzhRiOSPh7ejNHTEir2FGjQo88YedOOvwmx0jMqjnATKKKNGYZt1HihDYYN0k7X+U9H+qtjuFnXoxO4ZkwCQRwcjLov3r9+rT//OXpy8Gg58v0nRs767TbdXpFMV2wagKsiacL1pgmDfTrYVZPjX7dAAe9Fqs25Gyn8dL1D40mV9L47nDS67XkRxFFaP7BobzdNg9sp//P6m90VleD1Ff8YQPYKsvTJ/mjgeEGTZFa0wqF6F60ZfsZt5wJec9TEe9P+VyraYrZD4emvrHcFuaJns7QmKpj78RRSGO5jNov/cFj4TnXWunaDc9b+pPSUxHHKDd98Kz77Ns398WuuZfKsneqkPG/ztxBW5IPry9YI50ZOob/QLa7j4Co0MIu3KCcIteoj93suJ0sgxIipT4KdG+T4OmbEpABNlE0dXNlnC+4TSCE7n2/u+I4bnB0/W4BhEjTaHGACp1CCKX38DLsdstEGbsMy1xpu3TbgBZ8Wm3zdOej69Z0CCFVEU8TD2AT/DhBRhc03mj7oiWG8sBr75An83qxXok77Z32WiUR6R4p6+xYy0mszVvleOJWSUu/gPlojYjOG72K0Hp+5IKm5Wpyw9X1+c1wfH48Oh+NLq4uabKQuopvGWxEvZZSQXSA6N0TwYr63Sphf/5j7JKLiuFm/VPj3H9kbe5pIm5+1Dl9vc39Y31bga/f622jt3+XWFPvXSH2cF9uaXt0L+g1p36j8vzm2dtek0sXuO1dtFa1tYdSdGbKBaIq1yv3DwnZdTFNRURJsZs6inG3lTIeWXGPbnuOFEXAYsxyrdzlqm/QE1uJnSnNMiWFVQTIcVqtCqqpRCnrQVIn5JGrBl+SlB4m7HYfHh46Ec8KGfNOpDJyQyoilMaFvgrir9VJsMUcq8jU3EK5967GGWqUEXYrQaa7sW1Cv9Pr9Hx9Gptx2VD0eEfa8Fmd6BY/226ecuF+DjiAZdWsbuG+30iG401xVcOaBFXfuYWynHKDv+t0uaTjT/QPglrnuk35tgoJ8tjnhq8CeOsnzfGYMNXfOLufFdSSPccwijC3j9JOGk34+mo0puKt/jdmKiYezR/oXyR/gBDu4A4oHXNvX1j68xJSLueFG0Xg5VK584IcVDtxqzs4y1btSC4aKH984wjYWH1E+dOqC4ClV/8Z9jeJZlWE sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -36,7 +36,7 @@ Search decision definitions ## Request -

Body

required
+

Body

Search examples diff --git a/docs/apis-tools/operate-api/specifications/search.api.mdx b/docs/apis-tools/operate-api/specifications/search.api.mdx index 4abf8ac4b93..24b38ff9331 100644 --- a/docs/apis-tools/operate-api/specifications/search.api.mdx +++ b/docs/apis-tools/operate-api/specifications/search.api.mdx @@ -5,7 +5,7 @@ description: "Search variables for process instances" sidebar_label: "Search variables for process instances" hide_title: true hide_table_of_contents: true -api: eJztWOtv2zYQ/1cIfnHbObGcpo8Ew4A0TYBsQ5PFWffBywdaom02EqmSlFNP8P++u6Msy7aSOMAwYA8EiE3y7niP3z3oknsxcfx4yD8Lq8Qolfy2y00urfDK6IuEH3MnhY2nvMut/FpI5z+YZM6PS55IF1uVIx1QDYiKzSoxDuhjo73UHmlFnqcqJpm9Lw4ZSu7iqcwEfvPzXIIIM/oiYw+MuUUNvAIpcDpWqZf2abo7OW8QKbh7AmxdPjY2Ez5svT3kC+KLpXMX2nmhY/nTzowuhgt3J9cikw1S563SEzyYibRoP/G20OAnmTROR8akUmg6llpof5G08OKp8ilu1bFEldUf8kl1Xx8QKYXwZLzubWGtmAO98jJz21HAe52xfneO7fjKtM0gwKFN1lSpTrpc6iJD0J4MTmH18Qw+bpsOGKBCi+bOL4W086Zf5DeR5WnQ4CRNtwF9LX1htWMiTVeoZi8SORZF6pmlY5Yq5xn6mCnH+tFLvgouXj+ovN8u+iBqSG5wVkE7iFDEOcGfCZ2wpZ93UfRe+SnrbCO9wzpHUfSuf3R08Obw3WG/f/S2Q4JlwoSLpU7AxWw0ZwTehk6rNGxLH74plNe4GNYh5pXMKq4UvgUF7kpMMLIP2qblN98wDrDbbtuaBR28rrPPXpyafM7IEGbGrINqfcaV6zDSDHdzK2fKFA4C6yC+7iX7W4zfyLohdxnEkXfXJB5EEXiJ8Iw1WFksD1ApJG243GgXgAyELZW5iFHjv64gb+b1M/P9/zq9XacrvARUPq/4euNFupOhjZuvA8gbCuDpYRt8PgovmNLgCpU8jCEIFcjJvnsuliCyvnBPqh8aVAZgEJP2gKgKJO3Roo3HQnFmrbG1G15vu+Hc2JFKEqnXffCq9+qfb+7htrmfjGfnptDJv87cN20gP7m6YA04M0kM/wG009gXF1b5OTWrEXQjafeoRg9vF90SXGDulKQVdKvHh35qzFXVZksdsfNk0k8NNsHcOHKL8FNY9Wb9Xs3cqx8aTtqZtI4UKiwUN14GDy+Oe71yCiIWx2UO9XJBPXr54gCT8SxEl6Y04ExNLNJpuHVd+ZupZHiA1R8nAA9rxEG4fZ8aUD1wLcW9j95HrZKQ9AEpK3Ss5Ey9z1vlBOJWSYswLoRoDZAuGL2M0Kpr5Aqb37Kx8curs+uTm7O9wdlgcHH5CfsJXlfxgYbNqNdSKhVJIWpvRMSX1OdLwP742w2BC5PhevVEPAsD9vr0RDdE7Z09avbtqFa+HvqrZrzaaPRgHIaaXbeRDWGWjjYHrZKcuTmj1bI3RlT02NiQc6oUuqQ3smRXxQjKAgZqO5yGCRq94MOrmaQZPjboFZy2t/IEvrGlWEykzGjlDSpEnN6aAnE+NcYHJbE6gWhUK6QJhsxBjtzf3+/HIoPyLfbhQnQDKClhTETayrE/VzvdDebExK7mVobWPSvH0krQslcJcj0akCBNg7H9/Wg/CjnjfCZ046Kdq8Sa+2ocepj9e3kqFE1WpGtZFZAhn/XXSsAyyvgrRqgFQ16WI+HkrzZdLHD7K74EsZyt+EKp41MpkoCNgFN+Gqr/3g0qsgLg1uyMZTJwnEC8c/8o7W2jGl5dDm4woapfVDKTII8V9/hrC/w/5r/DH8KRnELZTvslT4WeFNQeeJCLKSgK9MpqIF3PWLJsWSL0vKHl9x+IgN2YO6l/gINgjcclvdAWfwKwvivT +api: eJztWFtv2zYU/ivEeXHbqZGcppcYw4A0TYB0Q5PFWfeQ5oGWjm22FKmSlBNP0H8fDinLsq02CTAM2CVPEc+F5/KdC12B4zMLo2v4yI3gE4lwE4Eu0HAntDrLYAQWuUnnEIHBryVa91ZnSxhVkKFNjSiID0Yw9lxs0aixEEGqlUPliJcXhRSp1xl/tiRQgU3nmHP6zy0LhBHoyWdMHURQGLLACbREnQrp0NzP9wWXHSahHM7QQARTbXLuwtGrA6i9XIrWninruErx5wcL2lQXj2BXPMcOq3VGqBkRFlyW/RRnSpVyh1mHOtFaIleejIord5b1yBJVOElHbS7JZPEH3mvui33P6lN4NN2MNjeGLyEC4TC3u1mge6027uESu/lF2edQBNpkG6Y0lAhQlTmB9mh8DBG8Oxkfw003AGMyqO6e/FqiWXbjgnc8L2Sw4EjKXUBfoiuNsoxLuUY1e5LhlJfSMePJTArrGMWYCcuGyVNYJ5euHzfR71e9n3Q0dySbpO0npOLUw59xlbFVnB9i6K1wczbYRfqADQ6T5PXw8HD/5cHrg+Hw8NXAK8aMcZuiyoSascmSefB2bFqXYV/5wLZSaHFx3aYYGp1NXn36ap+4Cz6jzH7TN4V3ruPcVJt+3zY8GNB1gz325FgXS+YdYXrKBmTWR/qyA+Yto9PC4ELo0jKDtpTOPmV/i/NbVXcNNudSQrShcT9Jbmr/R03YFlrZANz9JOnpxGVKFv51DXi7jh9Z3//35d2+3OAjoPBxzdZpx+WDHO3cfBlA3TGAqAd98HnHHWdCLbgU2bcxVBg9kZj/8FgsWcddae81PwykHK3ls/6EiAYk/dnyB99LxYkx2rRheLEbhlNtJiLLUG3G4Fn87J/v7sGuux+0Y6e6VNm/zt2XfSA/ujhjHTgz9AL/AbT7NS8tjXBLP5wmyA2a575HX9/UUQWp1l8E+q+b6J4l3w/ipmuzlY00eXJ0c01Dr9DWh4W7OYwgXgzjVjhuHxYWzQKN9QaVRsIIqhDhehTH1VxbV4+qQhtX+5m8emGMKiBayK7fymAEUqdczsOtm8ZfzZERgbo/TXw3R0Y4CLfv+QHULlgrdW+SN0mvJmL9hpY1OtZ65s4VvXoCc6+mOqwHIVtj4gtOrzK0nhqFoOG3GmxwfnFyeXR18nx8Mh6fnX+geULXNXJ1tJH1VktjojfIjzfPBCvu0xVg3/9+5cFFxXC5fhKehIV6c1vyNyT9kz3pzu2kNb5d8pthvD7ozGBnSuxO3U41hN052V6sKh/M7Z2s1b21klLEptoHpymhc/8mRnZRTqRIKVG76dSM+9WL8dSJBfqdPdUUFdqud+qECcVWaqmQcq2E02SQl3RGl4TzudYuGEndiaceoaFMKGV2FMe3t7d7Kc9LlfG9VOcUBilSVNanownsL81JtCWc6dS20kL779jgFA2qFONGkY39goTGBmeHe8leEmrGupyrzkUP7hIb4Wtx6PDOxYXkwm9W3taqaSDXsBhutIBVlulXi9ALrqGqJtzib0bWNR1/pZcftbO1XGh1MEeeBWwEnMJx6P7Pr8iQNQB3dmdqk0HiKE2xcN/lvel0w4vz8RUVVPMLSq4zkjH8ln5d4bcwgk/wCQiOPii+2v15BZKrWenHAwS9VIK8pKisF9LNivWerVqEWnas/PGtZ2BX+guqnyBqvHH06V9k9Z/w5iYI sidebar_class_name: "post api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null @@ -33,7 +33,7 @@ Search variables for process instances ## Request -

Body

required
+

Body

Search variables diff --git a/docs/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx b/docs/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx index 6d1356d49e5..e8f3d08b782 100644 --- a/docs/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx +++ b/docs/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx @@ -5,7 +5,7 @@ description: "Get sequence flows of process instance by key" sidebar_label: "Get sequence flows of process instance by key" hide_title: true hide_table_of_contents: true -api: eJzlVktv2zgQ/isET93WsZw2WwTGokAKOIXbogliFz0EOdDS2GYjkSo5ctYQ9N87Q0qOHyp297hoDhE5nPd8M+Naolp5Ob6Xt86m4P3UeFQmBfkwkLYEp1BbM83kWHr4UQG9XOf2yb/ffoKtHMhSOVUAgmMdtTR0IdbH8KYNHUuFazpn4FOnS1ZGRJIVdinKaFLozuZAOjKiHZA9dBUMpE/XUCg5riVuS1atDcIKHLEurSsURtLbC9k0DyzuS2s8eJZ4PRrx59D0rErZJsmnllQZZJaXyUv+nBhTzqkQCULh9+genTYrMkl/A3nRZ2dqNirXmeCAwOOhPVWWuU5DahNKwiKH4tV3z3I9TtjFd0hZAXFSQVDH6ChlWPl/zMyb15JcLChktYKeELhMbfb7HiPhNHB60ZgzaeKcdbLLxJvTTFxbt9BZBubf5fx/Fe7Fabh3seDApfe2cikIY1EsbWWy3wMFf/b1w9XtVOwFLCAI/Ab5IKKHtHIat2FELkA5cGc8Isf3D82gphTYRw3h9nA8KT8Aim7wiiVP3r7BKRZbEWcuzeK15Wm9gpAcnr5jmWzOk1bmrJPxSU0iTdJpPwvaJXvrNt08r1xO4nXMfjNOknptPTbjurQOG2LeKKcVVSqknN9i5ZeqyjnVuU1VHsjHgc3XIPiBNwaHhHRnjETrQ84y2zhUdzm6HPVqYtZfaHlGzrOeNWLZqycy92oK+6Wr5Iz5YtBd9Z6XRqnjbmyX4c3t5O5qPjmbTWaz6c2XbjG2cuThPiJ2WloXg0N8j0yy477uwPzx2zwAT5ulDeItAG/C7gZxWy2oqTiU04CtUGEZ0gf1BoQymUhtUebA4+sYZHwSnVpqJlFYo9Ey9IMkOlsxEtbWIrdD7G1SzW5FIHFQnlD09PQ0TFVBE1ENySAngZwEWtzM2+btc0sZHAlnNvU7aW3DPXGwBMcwTlpFPmGtDOQY7PlwNBxFVHkslNkz9F977CCLu4Ih/I1JmSsqbtO6XLf9dy8353FSHXYg0cZR5VETEtJiL93Lul4oD19d3jRMJjYXBsdz64VGzbTnM3X+UuUeTpzcjVn54q79ifWH+PXPsN6YOnybbWj8vOIbHQNww/+G5plcg8oIqexVfLkikJW4J8Orn7tpN6w+TOb0rCpO1i6hR4gPCntd+Ot9YBBz+wjm3c4h5Cu71DQ/ARnwu5g= +api: eJzlVt1v2zYQ/1eIe9o6xnLabCiEYUAKOIXXYQmSFH0I/EBTZ5uNRCrkyZkh6H8fjpSc2Fax7XGoX2R+3O/ufvfFFkitA+QPcOOdxhDmNpCyGmEhwdXoFRln5wXkEPCpQavxqnTP4cPuE+5AQq28qpDQM0YLVlUIOTzGM2Mhh1rRBiQUGLQ3NYNBDp9wJ9xK1EmlMINOCR6fGuOxgJx8gxKC3mClIG+BdjVDG0u4Rg8SVs5XitLWLxfQdQsWD7WzAQNLvJ1O+XOo+q7RrBMkaGcJLfGVN9kb/pwoU96r6AlhFV7tB/LGrqHjn4SLMT1zu1WlKQQ7hIEO9am6Lo2O1Ga1d8sSq5++BpYbMcItv6JmgNpzQMgk7wIpasI/MvPuLXQSKgxBrXHEBQ5Tz/7YYdo4dVwCGSp5a+a98zAw8e6UiSvnl6Yo0P47zv9X7l6cunubAo4c+uAar1FYR2LlGlt8H1nw81g9XN7MxSuHBUaB74CPTkJA3XhDu9gil6g8+jNukfnDopMtaOceDcbV4rhTfkQSQ+MVK+68Y41TLHci9dwKaeO4W68xksPdN4dse571MmeDTMjaR9x12YB+FtGBrfXboZ83voQc2sR+l2dZu3GBurytnacOJGyVN2pZJvL5LEV+pZqSqS6dVmXcPnbsfoOCD3hisEu0QcE5krRPmGXWcQj3fvp+OorEV7+B8pI5LzgbonoUJ10eRYrzZYjkHd9LTg/RexkatUmzsR+G1zez28v72dnd7O5ufv3nMBh7uU4eZMQepTcxGsTrdAmG21dDMv/+5T4mnrErF8X7BLyOsxvFTbMsjWZXTh12QsVhKJQms0WhbCG0q+oSuX0dJxn/EwPsynlROWvIcepHSfKu4UzYOEdcDqm2lY4xTInEToU8y56fnydaVY0t1ES7ikkojUYbIo89b3/0O/JIuHA67KWNi+vM4wo9p3HWA4WMUTmRk7Pnk+lkmrIqUKXsK0X/tcYOWNwHjPAvyupSGctaosltX38PsD1PneqwAkFCniCPinAh+1p6gLZdqoCffdl1vP3UoI+N46X0YqEWJvD/AvKVKgOeGLlvs/DDbf/E+lF8+xk26tOQ33YXC79seAUyvvbSm69bdBI2qAr00ap0cqk11vRKhkc/V9O+WX2c3YME1TBZe0KPMj4Cjprw64d4Qdy7R7S/7Q0iXrJJXfc3GfC7mA== sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx b/docs/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx index e9df1fff308..6769dce1f10 100644 --- a/docs/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx +++ b/docs/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx @@ -5,7 +5,7 @@ description: "Get decision requirements as XML by key" sidebar_label: "Get decision requirements as XML by key" hide_title: true hide_table_of_contents: true -api: eJzlVm1v0zAQ/iuWP/FSmu4FhCqEtIluKhtsWotAqibkJtfWW2IH2+moovx37uwka9cg+Ir40vrl7rm3584puRNLy4cz/gFiaaVWN/CjkAYyUM7y2x7XORjh8GKc8CH/maWnmwvYfD/gPZ4LIzJwYAig5Ao3KHIPG7yTCpe5cCtcJ2BjI3MCwUPUZnrBktoeM9sGe7zeojFnCuhxG68gE3xYcrfJCV8qB0swKLrQJhMuHL055lV1S+o218qCJY3DwYD+du1PijgGS6ZijVDKeWz46SIMjtZ7Fq0zUi0Rv6p6/LgLc6zWIpWJjwWs28UWeZ7K2Ocwyo2ep5C9vLOk12FLz+8gJgCUxMw7GSKxTrjC/jELR4ccXcwwPLGEjgioLgil4u7LcLAfN95Il9LRyBht2kwc7WfiTJu5TBJQuzl4Eb3498M93g/3JhQcqPRWFyYGprRjC12o5P9gweuufji5HrOtgBl4hf8gH3hoIS6MdBs/E+cgDJhXNBOHs9uqV2IK9L0Ev7t9OhrPwXXPRSYs+/bpks03LIxXHLsrTQN5CT4tNGiHPFofRIlJohKFKj/OyB2zbiZ0YXDA8TKktxpGUbnS1lXDMtfGVSi8FkYKLIXPKd2F0i5EkVIuUx2L1B8/9Xy6AkYX9AbQdHe4JxIE631KI9nYhXs7eDvoRCLR36A8UuMRZ+Vc3okThDuR/GPRlGpCciHopjxtuUUuL3zK6+ft6np0czIdvZqMJpPx1efmqav10MPtkrcotYveIdoHId5InzVs/fh16pkl1UJ79ZphV/4VBnZdzLFrKJT9gDUT/mXDPyfXwIRKWKyzPAWaT9g5/rKhOq1YA4vdwjKtpNPEba/pjC6ICSutHfE9NC9Ck1uBSBSURRY9PDz0Y5HhyBN9NEhJQCcBX2GSrfN2WZ/0nignOratttR+HxlYgAH0MqqBbESoROQQ7EF/0B8EVlmXCbVl6O+baCd/ban8p0CeCixrVTtb1g0242v67sEWw99hAKEuQyqFZpnxspwLC19MWlV0jK+D8a3/2Fu+ExNpaY0dvBCphT1f2kHJn9UfZMlz9ocvp07/GxarjW/vtKAdLj09/W+FY4mvQCTIR3It3JwglXK3pdN+IVHjtOPnfDRFGVFQdtoMPiG3R+30492pF2BTfQ/qfeuVoy35VVW/APihoiw= +api: eJzlVm1v2zYQ/ivEfVo71nLarCiEYUCKuUWWdgliFy0QGANNnW02EqmSlBND4H8vjpSUONbQfR36SeLLPXf33AuvBS82DvIb+BOlcsroa/zWKIsVau9gycHUaIVXRp8XkMN9Vb7dX+D+nxPgUAsrKvRoCaAFLSqEHG5xDxyUhhxq4bfAoUAnraoJBHK4wD0za1Z0+ph9rJBDtywg97ZBDk5usRKQt+D3NeEr7XGDFjisja2ET1uvTyGEJYm72miHjiReTqf0OdQ/b6RER6qk0R61j9h477P7qqT/I43OW6U3EEIIHE7HMM/1TpSqiL6g84fYoq5LJSOHWW3NqsTq16+O5EZ0mdVXlARQW2Leq+SJ88I37ocsvHoJgUOFzokNjnhAcXFeaDl+mDaO/ebglS9pa2atsQMTr46ZeGfsShUF6kMOnmfP///unh67e50CjhR6ZxorkWnj2do0uvg5suC3sXo4uzpnjxxmGAV+Aj4CB4eyscrvY09cobBoX1BPzG+WgbcgjblVGFfLp63xPfrxvsiEY18+fmCrPUvttUK/NdSQNxhpoUabQ7Y7yQpbZO0t7kNsZ2SO3fUdurEl5NAmekOeZe3WOB/ytjbWB+CwE1aJVZnYpbMU2rVoSuKyNFKUcfup5YstMjqgN4C6u98ioyRI2idEI+k4hHszfTMdRaKr/4LykBoPOFvv61GcdHkUKT4WfajmdC853YdnCLeo1UWkvHveLq9m12eL2Yv5bD4/v/y7f+o6ucAPQj6gdCZGg2idLkF/+12frX99XsTMUnptoniXYZfxFUZ21axKJcmVY4cNE/FlY0J6tUMmdMGkqeoSqT/V1sTDPtXpj/Wwa2NZZbTyhnI7SnprGsqErTGe8j0Vr5AxhimRyCmXZ9nd3d1EiqrRhZhIUxEJpZKoXeSx4+1Dt8OfCBdGukFambjOLK7RopaYdUAuI1RK5OTsyWQ6maascr4S+pGi/15EB/wNoYqjQF0KpQk/Gtt2BXYDO5p7Cku9PU8gVGVL3hXLDbTtSjj8ZMsQaPtbgzaW/kNtxUoslKP/AvK1KB0e2TI0SvilG8iKZ+wHk9Oo/X0W630s77KhFfA4paVZLSwDhy2KAm00LZ2cSYm1fyQzTEhUOEP7eT9bAAfREDsDg0+SO6KO2vH723iBLcwt6j8Gqzwtya4QvgP4oaIs sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/specifications/xml-by-key.api.mdx b/docs/apis-tools/operate-api/specifications/xml-by-key.api.mdx index a4d4935afc8..c2c500718a5 100644 --- a/docs/apis-tools/operate-api/specifications/xml-by-key.api.mdx +++ b/docs/apis-tools/operate-api/specifications/xml-by-key.api.mdx @@ -5,7 +5,7 @@ description: "Get process definition as XML by key" sidebar_label: "Get process definition as XML by key" hide_title: true hide_table_of_contents: true -api: eJzlVltv0zAU/iuWn7h0TccGQhFCGqJDg8GmtQikaQ9uctp6S+xgO92qKP+dc+wkXdtweUW8tL6c853bd45TcScWlsfX/NLoBKx9D3OppJNa8ZsB1wUYQZuzlMf8Ic/erT/Bmg94IYzIwYEh3Yor3KDAnb+TCpeFcEtcp2ATIwuPF3PUZXrOimCKpRtbA27gRykNoB1nShhwmywhFzyuuFsXBC6VgwUYFJ1rkwsXjl4d87q+IXVbaGXBksaL0Yj+to1PyoSson6iEUo5jw0PLsK4aL1n0Toj1QLx63rAj/swz9RKZDJl5DxYt40tiiKTiU9fhCHPMsif31rS67GlZ7eQEABKYtKdDJFYJ1xp/5iFoxccXcwxPLGAngioKAilkv7LcLAfN95Il9HR2Bhtukwc7WfiVJuZTFNQ2zl4Fj3798M93g/3KhQcqPRWlyYBprRjc12q9P9gwcu+fji5PGOPAmbgFf6DfOChhaQ00q39QJyBMGAOaCDG1zf1oMIU6DsJfnezOxc/gOsZikxY9v3zOZutWRisOHCXmgbxAnxOaMTGPFodRo3ywUbZRhUq1X62kW9m1c7q0uC041XIdR1HUbXU1tVxVWjjahReCSMF1sUnmO5CneeizCixmU5E5o93w5gugdEFvQY05x3uiRHB+pBySja24V6PXo96kUj0Fygbnmxwls4VvThBuBfJvxxt3SYkF4Jua9XVXhQyvHvNQ3dxOb46mY4PJuPJ5OziS/voNXro4eP6dyiNi94h2gch3kqfttT9+G3qaSbVXHv1hm4X/jUGdlnOsIUolP2ANRP+mcM/J1fAhEpZovMiAxpWLcla3tOKtbDYOizXSB9NRPeazuiSmLDU2hH5QycjNLkViERBWWTR/f39MBE5zj8xRIOUBHQS8Ekm2SZv583JYEc51YnttKX2+8jAHAygl1EDZCNCJSKHYA+Ho+EosMq6XKhHhv6yo7aS19XJfxQUmcCa1o2nVdNt13x1GMbRbr/haRxAqeWQV6FzrnlVzYSFryarazrGd8P4obBpNN+WqbS0xvaei8zCnm/dCOVPrppPpafsdx9UvcG0fFZr3+hZSTtceqL63xqnFV+CSJGZ5Fe4OUFSFe6RTvfhRC3UDaYP4ynKiJJS1aVzh+YetdePN++8AJvqO1BvO68cbcmvuv4JhPioRg== +api: eJzlVm1v2zYQ/ivEfdo6xnLabCiEYUCKuUXWbgliDxsQ+ANNnW02EqmSlBND4H8fjpTk2NZevg79JPHlnrt77o0teLFxkD/AnTUSnfsZ10orr4yGJQdToxW0uCkgh+eqfLf/iHvgUAsrKvRoSbYFLSqEHB7jmdKQQy38FjgU6KRVdcTL4SPumVmzOqlixUEXB4tfGmWxgNzbBjk4ucVKQN6C39cErrTHDVrgsDa2Ej5t/XAFISxJ3NVGO3Qk8Xo6pc+x8nkjSStwkEZ71D5i47PPnquS/s80Om+V3kAIIXC4GsO80TtRqoKR8ej8Mbao61LJSF9WW7MqsfrusyO5EV1m9RklAdSWSPcqeeK88I37VxbevIbAoULnxAZHPKCgOC+0HD9MG+d+c/DKl7Q1s9bYgYk350y8N3aligL1MQevslf/f3evzt29TwFHCr0zjZXItPFsbRpdfB1Z8P1YPVzf3bAXDjOMAl8BH4GDQ9lY5fexIa5QWLQX1BDzh2XgLUhjHhXG1fK0L35AP9IUmXDsz18/sdWepcZaod8aasQbjJxQi80h211mnfDFQdhl7SPuQ+xtZJvd9b26sSXk0CauQ55l7dY4H/K2NtYH4LATVolVmaimsxTntWhKIrY0UpRx+9SNxRYZHdA0oD7vt8goI5L2CXFKOo7h3k7fTkeR6OrfoBzy5ICz9b4exUmXR5Hi5OjjNqd7yek+VkPsRa3S3OsG3e3d7P56MbuYz+bzm9vf+qHXyQV+FP8BpTMxGkTrdAn62+/71P3lj0VMM6XXJop36XYbpzGyu2ZVKkmunDtsmIhjjgnp1Q6Z0AWTpqpLpGbVJ1mf9/THeti1sawyWnlDiR4lvTUNZcLWGE/JnypZyBjDlEjklMuz7OnpaSJF1ehCTKSpiIRSSdQu8tjx9qnb4SfChZFukFYmrjOLa7SoJWYdkMsIlRI5OXs5mU6mKaucr4R+oeg/VtQReUOc4qOgLoXSBB4tbbtqe4DdZWpHp/UGHPIESiW35F3lPEDbroTD320ZAm1/adDGpnAotFiWhXL0X0C+FqXDM9uGFgrf3HdPpW/ZPz2oRp3p81nvY6GXDa2Ax5dber+FZeCwRVGgjXalk2spsfYvZIaHE5XQ0Jg+zBbAQTRE1UDnSZpH1FE7fnwXL7CFeUT902CVpyXZFcJfhPioRg== sidebar_class_name: "get api-method" info_path: docs/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null diff --git a/docs/apis-tools/operate-api/tutorial.md b/docs/apis-tools/operate-api/tutorial.md index c69436c108b..b353e6462d2 100644 --- a/docs/apis-tools/operate-api/tutorial.md +++ b/docs/apis-tools/operate-api/tutorial.md @@ -124,7 +124,7 @@ body, ```js async function fetchDiagram() { return fetch( - // Replace {PROCESS_DEFINITION_ID} with a process definition id. + // Replace {PROCESS_DEFINITION_ID} with a process definition ID. // http://localhost:3030 is the URL of the Proxy server, which should stay the same. "http://localhost:3030/v1/process-definitions/{PROCESS_DEFINITION_ID}/xml", { @@ -180,12 +180,12 @@ async function fetchDiagram() { ## Show statistics on the diagram -1. Add a new function to the `api.js` file that fetches the flow node statistics for a specified process instance id: +1. Add a new function to the `api.js` file that fetches the flow node statistics for a specified process instance ID: ```js async function fetchStatistics() { return fetch( - // Replace {PROCESS_INSTANCE_ID} with a process instance id. + // Replace {PROCESS_INSTANCE_ID} with a process instance ID. // http://localhost:3030 is the URL of the proxy server, which should stay the same. "http://localhost:3030/v1/process-instances/{PROCESS_INSTANCE_ID}/statistics", { @@ -230,12 +230,12 @@ fetchStatistics() ## Highlight processed sequence flows on the diagram -1. Add a new function to the `api.js` file that fetches the processed sequence flows for a specified process instance id: +1. Add a new function to the `api.js` file that fetches the processed sequence flows for a specified process instance ID: ```js async function fetchSequenceFlows() { return fetch( - // Replace {PROCESS_INSTANCE_ID} with a process instance id. + // Replace {PROCESS_INSTANCE_ID} with a process instance ID. // http://localhost:3030 is the URL of the Proxy server, which should stay the same. "http://localhost:3030/v1/process-instances/{PROCESS_INSTANCE_ID}/sequence-flows", { diff --git a/docs/apis-tools/spring-zeebe-sdk/configuration.md b/docs/apis-tools/spring-zeebe-sdk/configuration.md index b4bfe5f0bf6..6cff515bf51 100644 --- a/docs/apis-tools/spring-zeebe-sdk/configuration.md +++ b/docs/apis-tools/spring-zeebe-sdk/configuration.md @@ -170,6 +170,29 @@ public void handleJobFoo(final JobClient client, final ActivatedJob job) { } ``` +You can also control auto-completion in your configuration. + +**Globally:** + +```yaml +camunda: + client: + zeebe: + defaults: + auto-complete: false +``` + +**Per worker:** + +```yaml +camunda: + client: + zeebe: + override: + foo: + auto-complete: false +``` + Ideally, you **don't** use blocking behavior like `send().join()`, as this is a blocking call to wait for the issued command to be executed on the workflow engine. While this is very straightforward to use and produces easy-to-read code, blocking code is limited in terms of scalability. This is why the worker above showed a different pattern (using `exceptionally`). Often, you might also want to use the `whenComplete` callback: @@ -357,7 +380,7 @@ camunda: execution-threads: 1 ``` -For a full set of configuration options, see [ZeebeClientConfigurationProperties.java](https://github.com/camunda/camunda/blob/main/clients/spring-boot-starter-camunda-sdk/src/main/java/io/camunda/zeebe/spring/client/properties/ZeebeClientConfigurationProperties.java). +For a full set of configuration options, see [CamundaClientConfigurationProperties.java](https://github.com/camunda/camunda/blob/main/clients/spring-boot-starter-camunda-sdk/src/main/java/io/camunda/zeebe/spring/client/properties/CamundaClientProperties.java). :::note We generally do not advise using a thread pool for workers, but rather implement asynchronous code, see [writing good workers](/components/best-practices/development/writing-good-workers.md) for additional details. diff --git a/docs/apis-tools/spring-zeebe-sdk/getting-started.md b/docs/apis-tools/spring-zeebe-sdk/getting-started.md index ce41f8b036c..86ac4f519b1 100644 --- a/docs/apis-tools/spring-zeebe-sdk/getting-started.md +++ b/docs/apis-tools/spring-zeebe-sdk/getting-started.md @@ -4,40 +4,24 @@ title: Getting started description: "Leverage Zeebe APIs (gRPC and REST) in your Spring Boot project." --- -This project allows you to leverage Zeebe APIs ([gRPC](/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md)) in your Spring Boot project. Later on, we’ll expand the Spring Zeebe SDK to deliver a Camunda Spring SDK that provides a unified experience for interacting with all Camunda APIs in Java Spring. +This project allows you to leverage Zeebe APIs ([gRPC](/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md)) in your Spring Boot project. Later on, we’ll expand the Spring Zeebe SDK to deliver a Camunda Spring SDK that provides a unified experience for interacting with all Camunda APIs in Java Spring. ## Version compatibility -| Camunda Spring SDK version | JDK | Camunda version | Bundled Spring Boot version | -| -------------------------- | ------ | --------------- | --------------------------- | -| 8.5.x | \>= 17 | 8.5.x | 3.2.x | -| 8.6.x | \>= 17 | 8.6.x | 3.2.x | +| Camunda Spring SDK version | JDK | Camunda version | Bundled Spring Boot version | +| -------------------------- | ---- | --------------- | --------------------------- | +| 8.5.x | ≥ 17 | 8.5.x | 3.2.x | +| 8.6.x | ≥ 17 | 8.6.x | 3.2.x | ## Add the Spring Zeebe SDK to your project -Add the following repository and Maven dependency to your Spring Boot Starter project: - -```xml - - - - true - - - false - - identity - Camunda Identity - https://artifacts.camunda.com/artifactory/camunda-identity/ - - -``` +Add the following Maven dependency to your Spring Boot Starter project, replacing `x` with the latest patch level available: ```xml io.camunda spring-boot-starter-camunda-sdk - 8.6.3 + 8.6.x ``` @@ -134,6 +118,7 @@ camunda: rest-address: http://localhost:8080 prefer-rest-over-grpc: false audience: zeebe-api + scope: # optional ``` ## Obtain the Zeebe client diff --git a/docs/apis-tools/tasklist-api-rest/assets/img/api-architecture.png b/docs/apis-tools/tasklist-api-rest/assets/img/api-architecture.png deleted file mode 100644 index 93cf8ef7c8f..00000000000 Binary files a/docs/apis-tools/tasklist-api-rest/assets/img/api-architecture.png and /dev/null differ diff --git a/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md b/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md index 2fd6e935f61..d403d964086 100644 --- a/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md +++ b/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md @@ -168,7 +168,7 @@ curl -X 'PATCH' \ ### Unassign task -Unassign a task with the provided id. This returns the task. +Unassign a task with the provided ID. This returns the task. #### URL diff --git a/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md b/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md index 2eed6411de4..48ce994ad29 100644 --- a/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md +++ b/docs/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md @@ -10,7 +10,7 @@ The Variables API controller provides an API to query variables. ### Get variable -Get the variable details by variable id. +Get the variable details by variable ID. #### URL diff --git a/docs/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md b/docs/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md deleted file mode 100644 index b4f17bacb49..00000000000 --- a/docs/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md +++ /dev/null @@ -1,388 +0,0 @@ ---- -id: migrate-to-zeebe-user-tasks -title: Migrate to Zeebe user tasks -description: "Learn how to migrate job worker-based user tasks to Zeebe-based tasks." ---- - -import DocCardList from '@theme/DocCardList'; -import FormViewer from "@site/src/mdx/FormViewer"; -import YesItem from "./assets/react-components/YesItem"; -import NoItem from "./assets/react-components/NoItem"; -import TableTextSmall from "./assets/react-components/TableTextSmall"; -import userTaskMigrationDecisionHelperForm from "./assets/forms/userTaskMigrationDecisionHelperForm.js"; -import "./assets/css/condensedTable.module.css"; -import styles from "./assets/css/cleanImages.module.css"; -import APIArchitectureImg from './assets/img/api-architecture.png'; -import ZeebeTaskSelectionImg from './assets/img/zeebe-user-task-selection.png'; - -Camunda 8.5 introduces a new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type: Zeebe user tasks. -Zeebe user tasks have several benefits, including: - -- Running directly on the automation engine for high performance. -- Removing dependencies and round trips to Tasklist. -- A more powerful API that supports the full task lifecycle. - -In this guide, you will learn: - -- Under which circumstances and when you should migrate. -- How to estimate the impact on a project. -- The steps you need to take for a successful migration without interrupting your operations. - -## Decide on your migration path - -Zeebe user tasks require migration of the user tasks in both your diagrams and the task API. - -With this in mind, you can migrate at your own pace. If you should migrate now or later, and what is required to migrate depends on your current setup and future plans. - -Use the following decision helper questionnaire to figure out what's right for you: - - - -### Task type differences - -Learn the differences between both task types and make an informed decision, and understand the new capabilities of Zeebe user tasks. Refer to this table for important high-level differences of the two task types: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
Job worker-based user tasks
- Existing implementation -
-
Zeebe user tasks
- Recommended for new projects -
Implementation locationTasklist -
Zeebe
- Does not require Tasklist to run -
Compatible versions8.0 +8.5 +
Supports Tasklist UI
API
Supports Tasklist API - - Full support - -
Partially
- Queries, GET tasks, forms, variables - ℹ Currently, you must use Zeebe and Tasklist APIs to use Zeebe user tasks -
Supports Zeebe API - - Task state operations (assign/update/complete) -
Supports job workers
Supports task lifecycle events - - Basic only: created/completed/canceled - - - Full lifecycle events including custom actions -
Supports task listeners - - Task listeners will be introduced in a future release -
Extras
Custom actions/outcomes - - Custom actions can be defined on any operation excluding unassign (DELETE assignment, send update beforehand) -
Supports task reports in Optimize
Recommendations - You can continue to use this task type on existing projects when you have a custom task application running on it and do not require any of the above features. - Refer to the decision helper above for a tailored recommendation. - - Use this task type on any new projects when you run Tasklist. - Migrate existing projects and task applications/clients to this task type when you require one of the features above, or the following use cases: - -
    -
  • Implement a full task lifecycle
  • -
  • React on any change/events in tasks, such as assignments, escalations, due date updates, or any custom actions
  • -
  • Send notifications
  • -
  • Track task or team performance
  • -
  • Build an audit log on task events
  • -
  • Enrich tasks with business data
  • -
-
- Refer to the decision helper above for a tailored recommendation. -
- -## Switch the implementation type of your user tasks - -We recommend you migrate process-by-process, allowing you to thoroughly test the processes in your test environments or via your [CI/CD](/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md). To do this, take the following steps: - -1. Open a diagram you want to migrate. -2. Click on a user task. -3. Check if the task has an embedded form. - - If a form is embedded, [transform it into a linked form](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked) before you change the task type implementation. Press `Ctrl+Z` or `⌘+Z` to undo if you accidentally removed your embedded form. -4. Open the **Implementation** section in the properties panel. -5. Click the **Type** dropdown and select **Zeebe user task**. The linked form or external form reference will be preserved. - -Task Type Selection - -Repeat these steps for all user tasks in the process. Then, deploy the process to your development cluster and test it by running the process and ensuring your custom task applications work. - -## Use the new Zeebe Task API - -:::note -The Tasklist REST API is not deprecated, and you still need it for queries on both task types. -::: - -Operations on Zeebe user tasks which modify the task state have to be performed using the new Zeebe REST API. However, queries and adjacent operations still require the Tasklist REST API. The following table provides a breakdown of which operations are supported in which API, and for which user tasks. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OperationTasklist APIZeebe Task API (8.5)
Query tasks All types← Use Tasklist API
Get task All types← Use Tasklist API
Retrieve task variables All types← Use Tasklist API
Get task form All types← Use Tasklist API
Change task assignment Job worker-based tasks Zeebe tasks
Complete task Job worker-based tasks Zeebe tasks
Update task- Zeebe tasks
Safe and retrieve draft variables All types← Use Tasklist API
- -You can also operate both task types at the same time in the same application utilizing both APIs. We recommend this for a smooth migration, but you should eventually update all processes to use the new task type to use all benefits. The following image illustrates how to route API calls to the respective APIs: - -Task API Architecture - -The major changes are: - -- Create and maintain new, additional secrets for the Zeebe REST API. -- Call dedicated endpoints on separate components (Zeebe vs. Tasklist) for all state modifications on tasks for the respective task types. -- Manage new request/response objects. - -The following table outlines the respective endpoints. Click the endpoints to follow to the API documentation and inspect the differences in the request and response objects. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OperationTasklist APIZeebe Task API (8.5)
Query tasks - - POST /tasks/search - - ← Use Tasklist API
Get task - - GET /tasks/:taskId - - ← Use Tasklist API
Retrieve task variables - - GET /variables/:variableId - -
- - POST /tasks/:taskId/variables/search - -
← Use Tasklist API
Get task form - - GET /forms/:formId - - ← Use Tasklist API
Assign a task - - PATCH /tasks/:taskId/assign - - - - POST /user-tasks/:taskKey/assignment - -
Unassign a task - - PATCH /tasks/:taskId/unassign - - - - DELETE /user-tasks/:taskKey/assignee - -
Complete task - - PATCH /tasks/:taskId/complete - - - - POST /user-tasks/:taskKey/completion - -
Update task- - - PATCH /user-tasks/:taskKey - -
Safe and retrieve draft variables - - POST /tasks/:taskId/variables - - ← Use Tasklist API
- -### Zeebe Java client - -Use the Zeebe Java client when you are building your task application in Java. The client assists with managing authentication and request/response objects. - -### API differences - - - -Refer to the dedicated sections and API explorers to learn details about the APIs. - - - -## Troubleshooting and common issues - -If your task application does not work properly after migration, check the following: - -- **The endpoints return specific error messages when you run them on the wrong task type**: Ensure to call the right endpoint for the right task type, c.f. above [table](#use-the-new-zeebe-task-api). -- **Forms do not appear**: Ensure you have extracted embedded forms, if any, and [transformed them into linked forms](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked), before you change the task type implementation. -- **Task update operation does not work**: The update operation is only available to Zeebe user tasks. diff --git a/docs/apis-tools/tasklist-api-rest/sidebar-schema.js b/docs/apis-tools/tasklist-api-rest/sidebar-schema.js index 517c97c4df8..ebb41a15e74 100644 --- a/docs/apis-tools/tasklist-api-rest/sidebar-schema.js +++ b/docs/apis-tools/tasklist-api-rest/sidebar-schema.js @@ -7,6 +7,5 @@ module.exports = { { Specifications: require("./specifications/sidebar.js"), }, - "apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks", ], }; diff --git a/docs/apis-tools/tasklist-api-rest/specifications/assign-task.api.mdx b/docs/apis-tools/tasklist-api-rest/specifications/assign-task.api.mdx index 14546a99a99..e15045dcb79 100644 --- a/docs/apis-tools/tasklist-api-rest/specifications/assign-task.api.mdx +++ b/docs/apis-tools/tasklist-api-rest/specifications/assign-task.api.mdx @@ -44,7 +44,7 @@ When using REST API with JWT authentication token following request body paramet On success returned. -
Schema
+
Schema
An error is returned when the task is not active (not in the CREATED state).
An error is returned when task was already assigned, except the case when JWT authentication token used and `allowOverrideAssignment = true`. diff --git a/docs/apis-tools/tasklist-api-rest/specifications/complete-task.api.mdx b/docs/apis-tools/tasklist-api-rest/specifications/complete-task.api.mdx index 1f4e3b31ea8..a2205914bde 100644 --- a/docs/apis-tools/tasklist-api-rest/specifications/complete-task.api.mdx +++ b/docs/apis-tools/tasklist-api-rest/specifications/complete-task.api.mdx @@ -44,7 +44,7 @@ Variables to update or add to task during the task completion On success returned. -
Schema
+
Schema
An error is returned when the task is not active (not in the CREATED state).
An error is returned if the task was not claimed (assigned) before.
An error is returned if the task is not assigned to the current user. diff --git a/docs/apis-tools/tasklist-api-rest/specifications/get-task-by-id.api.mdx b/docs/apis-tools/tasklist-api-rest/specifications/get-task-by-id.api.mdx index c9ba57ae4b2..ca8de33aad9 100644 --- a/docs/apis-tools/tasklist-api-rest/specifications/get-task-by-id.api.mdx +++ b/docs/apis-tools/tasklist-api-rest/specifications/get-task-by-id.api.mdx @@ -37,7 +37,7 @@ Get one task by id. Returns task or error when task does not exist. On success returned. -
Schema
+
Schema
User has no permission to access the task (Self-managed only). diff --git a/docs/apis-tools/tasklist-api-rest/specifications/search-tasks.api.mdx b/docs/apis-tools/tasklist-api-rest/specifications/search-tasks.api.mdx index b4d862e180d..e8e2b223ace 100644 --- a/docs/apis-tools/tasklist-api-rest/specifications/search-tasks.api.mdx +++ b/docs/apis-tools/tasklist-api-rest/specifications/search-tasks.api.mdx @@ -69,7 +69,7 @@ An array of the task's variables. Only variables specified in `TaskSearchRequest The draft value of the variable. -
  • ]
  • ]
  • +
  • ]
  • ]
  • An error is returned when more than one search parameters among `[searchAfter, searchAfterOrEqual, searchBefore, searchBeforeOrEqual]` are present in request diff --git a/docs/apis-tools/tasklist-api-rest/specifications/unassign-task.api.mdx b/docs/apis-tools/tasklist-api-rest/specifications/unassign-task.api.mdx index 2ecc8512e03..2e474e4b709 100644 --- a/docs/apis-tools/tasklist-api-rest/specifications/unassign-task.api.mdx +++ b/docs/apis-tools/tasklist-api-rest/specifications/unassign-task.api.mdx @@ -40,7 +40,7 @@ Unassign a task with `taskId`. Returns the task. On success returned. -
    Schema
    +
    Schema
    An error is returned when the task is not active (not in the CREATED state).
    An error is returned if the task was not claimed (assigned) before. diff --git a/docs/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md b/docs/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md index 51b479a2a4a..0c1655c7af8 100644 --- a/docs/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md +++ b/docs/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md @@ -2,9 +2,13 @@ id: tasklist-api-rest-overview title: "Overview" sidebar_position: 1 -description: "Build applications for human-centered processes by querying human tasks, assigning users, and completing tasks with the Tasklist API." +description: "Build applications for human-centered processes by querying user tasks, assigning users, and completing tasks with the Tasklist API." --- +:::note +To migrate from Camunda's V1 component REST APIs to the V2 [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md), review [migrating to the Camunda 8 API](/apis-tools/migration-manuals/migrate-to-camunda-api.md). +::: + ## Introduction The Tasklist API is a REST API designed to build task applications for human-centered processes. The API allows you to query user tasks, assign users to these tasks, and complete these tasks. @@ -18,7 +22,7 @@ Ensure you [authenticate](./tasklist-api-rest-authentication.md) before accessin For SaaS: `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Tasklist component. ::: @@ -34,7 +38,7 @@ A detailed API description is also available as a Swagger UI at `https://${base- For SaaS: `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/swagger-ui/index.html`, and for Self-Managed installations: [`http://localhost:8080/swagger-ui/index.html`](http://localhost:8080/swagger-ui/index.html). :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). ::: ## API in Postman diff --git a/docs/apis-tools/tasklist-api/tasklist-api-tutorial.md b/docs/apis-tools/tasklist-api/tasklist-api-tutorial.md index 3dad8bb79cd..5ecdd5ba456 100644 --- a/docs/apis-tools/tasklist-api/tasklist-api-tutorial.md +++ b/docs/apis-tools/tasklist-api/tasklist-api-tutorial.md @@ -250,9 +250,8 @@ export class TasklistModule implements OnModuleInit { logger.log("Tasklist credentials fetched"); axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; + axiosRef.defaults.headers["Authorization"] = + `Bearer ${credentials.access_token}`; axiosRef.defaults.headers["Content-Type"] = "application/json"; setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds } diff --git a/docs/apis-tools/testing/getting-started.md b/docs/apis-tools/testing/getting-started.md index 6101c53b3fe..3861fc270b3 100644 --- a/docs/apis-tools/testing/getting-started.md +++ b/docs/apis-tools/testing/getting-started.md @@ -16,7 +16,7 @@ CPT is based on [JUnit 5](https://junit.org/junit5/) and [Testcontainers](https: - Elasticsearch :::warning Disclaimer -For Camunda 8.6, CPT is in an [alpha version](/reference/alpha-features.md#alpha). +For Camunda 8.6, CPT is in an [alpha version](/components/early-access/alpha/alpha-features.md#alpha). For a full-featured testing library, take a look at [Zeebe Process Test](/apis-tools/java-client/zeebe-process-test.md). ::: @@ -277,7 +277,7 @@ The test runtime uses [SLF4J](https://www.slf4j.org/) as the logging framework. - `tc.camunda` - The Camunda Docker container - `tc.connectors` - The Connectors Docker container - `tc.elasticsearch` - The Elasticsearch Docker container -- `org.testcontainers` - The Testconainers framework +- `org.testcontainers` - The Testcontainers framework For most cases, the log level `warn` (warning) is sufficient. diff --git a/docs/apis-tools/web-modeler-api/index.md b/docs/apis-tools/web-modeler-api/index.md index 43d1d5d9c14..5c339f39f0a 100644 --- a/docs/apis-tools/web-modeler-api/index.md +++ b/docs/apis-tools/web-modeler-api/index.md @@ -44,11 +44,11 @@ On Self-Managed instances no limits are enforced. ### What is the difference between _simplePath_ and _canonicalPath_? -In Web Modeler you can have multiple files with the same name, multiple folders with the same name, and even multiple projects with the same name. Internally, duplicate names are disambiguated by unique ids. +In Web Modeler you can have multiple files with the same name, multiple folders with the same name, and even multiple projects with the same name. Internally, duplicate names are disambiguated by unique IDs. -The API gives you access to the names, as well as the ids. For example, when requesting a file you will get the following information: +The API gives you access to the names, as well as the IDs. For example, when requesting a file you will get the following information: - **simplePath** contains the human-readable path. This path may be ambiguous or may have ambiguous elements (e.g. folders) in it. -- **canonicalPath** contains the unique path. It is a list of **PathElementDto** objects which contain the id and the name of the element. +- **canonicalPath** contains the unique path. It is a list of **PathElementDto** objects which contain the ID and the name of the element. -Internally, the ids are what matters. You can rename files or move files between folders and projects and the id will stay the same. +Internally, the IDs are what matters. You can rename files or move files between folders and projects and the ID will stay the same. diff --git a/docs/apis-tools/web-modeler-api/tutorial.md b/docs/apis-tools/web-modeler-api/tutorial.md index 946f47749aa..518ff87028b 100644 --- a/docs/apis-tools/web-modeler-api/tutorial.md +++ b/docs/apis-tools/web-modeler-api/tutorial.md @@ -26,12 +26,16 @@ Make sure you keep the generated client credentials in a safe place. The **Clien ## Set up authentication -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. +If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client ID and client secret. Then, we return the actual token that can be passed as an authorization header in each request. To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `MODELER_CLIENT_ID`, `MODELER_CLIENT_SECRET`, `MODELER_AUDIENCE`, which is `modeler.cloud.camunda.io` in a Camunda 8 SaaS environment, and `MODELER_BASE_URL`, which is `https://modeler.camunda.io/api/v1`. These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CONSOLE_CLIENT_ID`, `CAMUNDA_CONSOLE_CLIENT_SECRET`, and `CAMUNDA_CONSOLE_OAUTH_AUDIENCE`. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/docs/apis-tools/working-with-apis-tools.md b/docs/apis-tools/working-with-apis-tools.md index 3b83aed9623..e413dd21ab8 100644 --- a/docs/apis-tools/working-with-apis-tools.md +++ b/docs/apis-tools/working-with-apis-tools.md @@ -2,7 +2,7 @@ id: working-with-apis-tools title: "Working with APIs & tools" sidebar_label: "Working with APIs & tools" -description: "Interact programmatically with Camunda 8 using official Zeebe client libraries and APIs." +description: "Learn more about the integration concepts involved in using the Camunda Zeebe client libraries, APIs, and SDKs to interact programmatically with Camunda 8." --- import DocCardList from '@theme/DocCardList'; @@ -44,6 +44,10 @@ Other components in Camunda 8, such as [Tasklist API (GraphQL)](../apis-tools/ta ## Learn about Camunda Components and their APIs +:::note +To migrate from Camunda's V1 component REST APIs to the V2 [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md), review [migrating to the Camunda 8 API](/apis-tools/migration-manuals/migrate-to-camunda-api.md). +::: + Camunda 8 components have APIs to enable polyglot developers to work with in their programming language of choice. Below are links to available component APIs. ![Architecture diagram for Camunda including all the components for SaaS](./img/ComponentsAndArchitecture_SaaS.png) @@ -72,6 +76,9 @@ Additionally, visit our documentation on [Operate](../self-managed/operate-deplo ### SDKs ### Postman diff --git a/docs/apis-tools/zeebe-api-rest/sidebar-schema.js b/docs/apis-tools/zeebe-api-rest/sidebar-schema.js deleted file mode 100644 index af9f3d63522..00000000000 --- a/docs/apis-tools/zeebe-api-rest/sidebar-schema.js +++ /dev/null @@ -1,12 +0,0 @@ -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ - -module.exports = { - "Zeebe API (REST)": [ - "apis-tools/zeebe-api-rest/zeebe-api-rest-overview", - "apis-tools/zeebe-api-rest/zeebe-api-rest-authentication", - "apis-tools/zeebe-api-rest/zeebe-api-tutorial", - { - Specifications: require("./specifications/sidebar.js"), - }, - ], -}; diff --git a/docs/apis-tools/zeebe-api-rest/specifications/assign-a-user-task.api.mdx b/docs/apis-tools/zeebe-api-rest/specifications/assign-a-user-task.api.mdx deleted file mode 100644 index 3dd58a62c99..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/assign-a-user-task.api.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: assign-a-user-task -title: "Assign a user task" -description: "Assigns a user task with the given key to the given assignee." -sidebar_label: "Assign a user task" -hide_title: true -hide_table_of_contents: true -api: eJztWNty2zYQ/RUMXppMbVFJ3TTRm+M6rdtcPLbcTmN7xiAJSYhJgAVAySpH/96zAKl7LtPmMckkw8tiz+7inCVWDfdi7Pjgml85aZkX7p7fHvBcusyqyiuj+YAfO6fG2jHB6s6IzZSfMD+RbKymUrN7OWferD0QYY2UPX7AK2FFKb20BNRwjRt4JV9DuPpdzmGjCKgSfoJrK/+ulZU5H3hby+1ohsAgODMKcKuQgB9RCdNlE1kKPmi4n1cEp7SXY2nxamRsKXx89OyILxa3EVI6/9Lkc1qzHUFmsFp7eiWqqlCZoGCSD44ianbBTPpBZp5St6aS1ivpwtq2KHS9m1T3liHAzdR6bON9WTvPtPEslUyWlUctLLvTdVHcUeptDM5bpce4pxciLfBsJAonFwdcFIWZvZtKa1W+J5iXc5bLkagLfxDiCNVVjlnZhpAzNWLKs5kAKQo8zudddHmPXUqPZbDHhtwFyDu6tNLXFsTQDLgIWGnm6mzCMuGkixkGoJAdYDUbKetCkrVeAsNRuixFzsRYKN1jIG+EnNG6uanZRExjAR2rVHaPSrCRNSUbW1NXEQg7jj0nj5WV4KxnVmSSYa9zRZVwvRu9KmdqTCGF3qgnkYPKmcXCbdfxmGXIBaDRgE1FUVOcApVTRRESyTKJXOAthrdic4iIau6wD8vwQ5JS55UBeVHCqYlU7LGzUaAECDfFpuYHYX8Ipd1KyvOmZeAN30htD1NCZkjNK0+3oTuQWGMrKBHaRVQM5EN2CLNCxSLNn/aP9jN8md13rt1C8hRplH9ArUAfDm9H/f6nJBIW7ch/T0fKhG5lkpmyKiT5Z2+MlSiKF6oALK67khEjaX2XC0vRDWKhPqJ/rES1yu93+8A2Ec6jZYvLYn9AOiwaphH9+uLVCXtx9ONPt48m3ldukCSz2axnR9mhBCeN7Rk7TnBL/8jucdANcijFPNApj9QVBVt1HuYqmamRyroO3YbNaPs3iPCRthXfNjt0WTbS2iq+89FgVxdnDHXVXo3mROAd6LAmkBP2IjW1H6SF0Pd8Rbxd0G0UV5elsMvPwSYAHDkvfO0++yH44emObyLcr8PhOYsuwKG8680QYQtESZRKq7Iu+QC8xZ14iHfP+v0F+aQd/4JM0BcfKqQfqLWdDshRrngbElMacensa+2MsWqstnEBtNYEWhL/HDOKwj/6rNb3CZMUT8ocmVp3kn/xH/xgH1rRzqxBIrRTEHttLZIr5t/E/k3s38T+tcSOlzjFT0xOJ3XjAnXowD7gCYn0kETqkmbtXL9IVh9sOpNLO+2GgNqiTLyJIlqA+80ELheDpjLWL5LpE9hPhVV0HAkbSq+j2DoSFTj7FJMYyO5m0guaNChJwd5LCcX8gu4wE/NQUcLZ9Pe8/7y/1xWZdrWKji5OL4fs+PyMxZQi99b6QeeShL3XZTT+nNMwmjiJfqb8/JKWxFqkEq3MHtdU/CUfWqzgORxYgxGexItXHUt++3MYNpr62MVq7Dl9EHRE2hxTViTbGhniWNQdfDuzQNSRCUG1NNpMjTYVDIiL+r0nu3RF+qQ6nNfKWofWC8ouO3/0lhV0VLSkRnRnie5NiO1YGU1O4nqMRq+jBfsj4rInATXSr+u7Y/iv0x5Ak0xgXS6Sf8hNkhYmTUqMGEkL5JL3p6cvTw9P3r15c/X2bPjX4euzk9O3l6eH8NvzDz6UltRRCr0WVTw3r0/Q26k3q+/O/x64W0Z4+eATdBnM1os25aaV7DVfSRYLBpvD+JpqQcCovGveNClGtStbLBb0GMyxGJavb1dCDcrOlaPrvB03P5Hmo4t2zH7Mvmyo35tX+1DoeWgZGLFwh0u42/qVYXEL8wmmVciCIo0WJzGewyH5WXnYmfMXB92KYwxtlf+I7cYxgfS77Jnn7y6HJMf2N4bSkJC4FTP6yQP/D/gN/uLGhFoFpYfnDceXYlyLMdlHv/TnX93IKMU= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

    Assign a user task

    - - - -Assigns a user task with the given key to the given assignee. - -## Request - -

    Path Parameters

    Body

    required
    - -The user task's assignment was adjusted. - -
    - -The assignment of the user task with the given key cannot be completed. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    - -The user task with the given key was not found. - -
    - -The user task with the given key is in the wrong state currently. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    diff --git a/docs/apis-tools/zeebe-api-rest/specifications/complete-a-user-task.api.mdx b/docs/apis-tools/zeebe-api-rest/specifications/complete-a-user-task.api.mdx deleted file mode 100644 index 76903ffe6b9..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/complete-a-user-task.api.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: complete-a-user-task -title: "Complete a user task" -description: "Completes a user task with the given key." -sidebar_label: "Complete a user task" -hide_title: true -hide_table_of_contents: true -api: eJztV8Fy2zYQ/RUMTslUFuXUTRPdHFVp3caJR5bbaWwfIBKSkJAEA4CSVQ7/vW8JUpREe+x2crQ99hDE4i12970lUHAnFpYPr/mVlYY5Yb/y2x6PpA2NypzSKR/ykU6yWDppmWB5Y8bWyi2ZW0q2UCuZsq9y0+c9ngkjEtgaAi14igEQaNUUi/6QG9goAs2EW+LZyG+5MjLiQ2dyeeh5CngAMz2vPLXOnWZhvSvyasOlTAQfFtxtMnKoUicX0mBqrk0inH/1+oSX5a13Kq17p6MNrWn3MBexxSZCjeWpozmRZbEKBe0n+GJpU0XXm559kaGj6I3OpHFKWppdCaPELPYDEUWKUER8sWP0UNDbpbuRHuSACkDBH24izeOY1nr0ssdF6JGLA0enLMyt0wnzBnAa5+REOGDHMZtJzITSWgU0NjewbL1LFN1ZZqTNY6fShZ93S2WZTKNMI91MpSvtc9dnZ3OWaseQoZWKZNRjqvYSybkABAV6w5tQb3j/Jm1js87ARTc2BOeUo2HFX6JYTVb4nPgqo+Rkh41mOrW+GK8GJ910TPezK+w28RGzeZWIOdyD5oA7GQweRegIhIUipSQgsVvoPjvXRiILTqgYCsNzkyPkr1rfbJ3NQFiflwcYipVITvJDl6mHlb/wlrVf5snDELM3nHnv15P3I/b25Kefb18sncvsMAjW63XfzMMjCTpr09dmEWBIf2T3ss+QBMSQiE3Fny3rWasNZjMZqrkKqeYUYL1tRtXeq/sDwvKzRYcdW63nRvFDWZ2yq8kZQ15Tp+YbYmzHdbWmYiPsxUznbjiLRfqVtzzrOj30YvMkEWbbs/YdAMg64XL7aK/68VUHm+j123R6wTwEOBRBltp41dWOKIhEpSrJEz4ESzESd370ejAoCZMq/oRIUibvMoRfUeswHJAjaXlbBaZS7CsNv1dltFELdegXjnY0X5P4Fx+R1/nJE6TdFSapnZQ513ka1QJ/+z9wUIdatGujEQhVCmLPjUFwaB3PYn8W+7PYv5PYMYmj5lJHdJzUtqIOnSqHPCCRHpFIbVDsHD7LINweD+jYKM2qOanmBmnihRdRCe4XS0CWwyLTxpXB6hj2ewc6mvZia0gU47ATL/1GusWkCToOU5CCfZYSivkV3WEt/ImC/OzjvRm8GdwLRaZNrjzQZHw5ZacXZ8yH5Lm30w8aSBL2vZDe+DHQ6vRsJfqZcptLWuJzMZNoZeY0p+Rv+VD7qpBp7I3wxj+8b1jy+1/TqtDUxybtyXx8J6hUhwfpnfNsw7iKjnNdua7Jsh8AlQ519osG/eMuKREkaQvsSPK0arAg5ra/e7QwxmmZ0tDj6MESPZo81jccbzLy692GffAW7E/vlx1XXj3Jmu66AH4+68NpEAqsi0TwD8EEs1jPgkSoNKgd2eDzePxufDT6dH5+9fFs+vfRh7PR+OPl+Ai4fXfnqgSSBhKR7uyqubntXtwOgy/a78t/uunVVXbyzgXoHLjUlXWARS3Da97KEAuG+7fAHSWCVF5N17woZsLKKxOXJb0GGwzuaNe3rfgqtUbK0nN7ZXswpBeT+nb3kj31NnlvZPVLkW6qRoCbEkZ4BODBBbe8hflSighkp716i5Hf0dGUcFqEzv2y7DUrTnHlyNwDtnsff1LlthNefLqcksjqy22C7wbeGrGm2zb+D/kNfjHQVbYq/VbvC47+v8jFguw9Lv38C0iulxI= -sidebar_class_name: "post api-method" -info_path: docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

    Complete a user task

    - - - -Completes a user task with the given key. - -## Request - -

    Path Parameters

    Body

      variables objectnullable
      - -The variables to complete the user task with. - -
    - -The user task was completed successfully. - -
    - -The user task with the given key cannot be completed. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    - -The user task with the given key was not found. - -
    - -The user task with the given key is in the wrong state currently. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    diff --git a/docs/apis-tools/zeebe-api-rest/specifications/get-cluster-topology.api.mdx b/docs/apis-tools/zeebe-api-rest/specifications/get-cluster-topology.api.mdx deleted file mode 100644 index c07bf7f8421..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/get-cluster-topology.api.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: get-cluster-topology -title: "Get cluster topology" -description: "Obtains the current topology of the cluster the gateway is part of." -sidebar_label: "Get cluster topology" -hide_title: true -hide_table_of_contents: true -api: eJy1Vttu2zgQ/RWCT7uAEzndlyJvqesGXvQSJO4utoEfKHkss6VELUk5dQX/+86QlCXbai5AFzBgiRqeM8M5M8OGO5FbfnnPJ6q2DgxfjPgSbGZk5aQu+SX/lDohS8vcGlhWGwOlY05XWul8y/QqrIfN/jkXDh7ElknLKmEcmpzzETdgK11aQK6GvxqP6e//4Ml06XAnwYuqUjITBJ98tcTRcJutoRCn5HMEbF0kMtFRG/i3BusI3G0rQGOdfoXM4XtldAXGyRBVavQ3MPYU/IopaclBFk3Qf+GYMNB6jgsYRoyuxySMEVt8LWulRKpwyZkaRlw6KAaIbozeSFxislxpU/jQGf5EJGalXsLTgZDVbDl8SHUp8TjYbw8SXSbk6PTvHpvN3jJk9vkJlD02iZnJUWEjHpwLS3+84rsRX2vrhgnpSykK8LgGRIa0+TCBdQa/EVylzU/g6MszoB7zlXImCfLRVHdWrBClyGHJPGvQJL2VIesn3G3SX5zkPSXbZ+dFed/vfyL5mORWsvstzzw7o0nEx9hv/VsKofhvxcoxMmyrPgZBWRMslxsoB3lj+kccyrqglqZALKMfSukH/yhLkTmE4AtSHQjl1k/50zajYN069SwPwhZKZl12z0v0C/nRAUSg8+A3LRin1Q22CBn61WkO4mFEmwH192DfeNsZ6sQvx1K9kz8GckDY6HWK2L1GJct+331Oko+b1UHBTHRd/qQwO+5e5VCLtBWW6pKJzGhrf4E3bQkiwTvUgjbD/uAgWcm8NlipvR1s5bfEJjfYs1/iS5xhfz2W8JjpVndfAFJg12HjkPaOSHpymMeRdhsHHYpiR98LcGuNJc9z8E1BUE3wpJ2AuGTBbPxou294bRR+bcIk3V0mSUMtenfZUGvdJZsLtN8II8kH31a63r4StaLzUDoTyi8fXzYOOr6fw0fxHjT3Fu/1+PV4EMp3+4ODu53ezdnVzYyFkDxg/1LQQq6dqwYhg/FToLvdgk4NW4d02zvaEu8IgJI2V3XoOjF1kcsj03swwpXw8K6V0Z9/z30dU+P322NeD53gvQbCx+cXJ1GQo6TfTBcFdnQSNo5BGhm9kHq6Ru0DqYVuBqLoCCdhv9uy98GCRR2zC88ahELBWVRJjvh1eo6kSSZw31IkPwgmSZVOkwKvfkkkssmX6fTN9Gzy6cOHzx9n83/O3s8m04930zPEPXffnT+EClWCg7Xn1TW47oLYafcg+Ka7IP6iC2fMoYPvLqkUApKifOhNrKR7vvdm0d507nnTpMLCZ6N2O1rGuWq2uL7oiofewpCiKUal9w22lL8sg8r5KlO1vzAcX3RJfPuivp6Sav4D0YQjZg== -sidebar_class_name: "get api-method" -info_path: docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

    Get cluster topology

    - - - -Obtains the current topology of the cluster the gateway is part of. - -## Request - -
    - -Obtains the current topology of the cluster the gateway is part of. - -
    Schema
      brokers object[]nullable
      - -A list of brokers that are part of this cluster. - -
    • Array [
    • partitions object[]
      - -A list of partitions managed or replicated on this broker. - -
    • Array [
    • ]
    • ]
    diff --git a/docs/apis-tools/zeebe-api-rest/specifications/sidebar.js b/docs/apis-tools/zeebe-api-rest/specifications/sidebar.js deleted file mode 100644 index 25b13ac45ac..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/sidebar.js +++ /dev/null @@ -1,48 +0,0 @@ -module.exports = [ - { - type: "doc", - id: "apis-tools/zeebe-api-rest/specifications/zeebe-rest-api", - }, - { - type: "category", - label: "Cluster", - items: [ - { - type: "doc", - id: "apis-tools/zeebe-api-rest/specifications/get-cluster-topology", - label: "Get cluster topology", - className: "api-method get", - }, - ], - }, - { - type: "category", - label: "User task", - items: [ - { - type: "doc", - id: "apis-tools/zeebe-api-rest/specifications/complete-a-user-task", - label: "Complete a user task", - className: "api-method post", - }, - { - type: "doc", - id: "apis-tools/zeebe-api-rest/specifications/assign-a-user-task", - label: "Assign a user task", - className: "api-method post", - }, - { - type: "doc", - id: "apis-tools/zeebe-api-rest/specifications/update-a-user-task", - label: "Update a user task", - className: "api-method patch", - }, - { - type: "doc", - id: "apis-tools/zeebe-api-rest/specifications/unassign-a-user-task", - label: "Unassign a user task", - className: "api-method delete", - }, - ], - }, -]; diff --git a/docs/apis-tools/zeebe-api-rest/specifications/unassign-a-user-task.api.mdx b/docs/apis-tools/zeebe-api-rest/specifications/unassign-a-user-task.api.mdx deleted file mode 100644 index b9b6246ae24..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/unassign-a-user-task.api.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: unassign-a-user-task -title: "Unassign a user task" -description: "Removes the assignee of a task with the given key." -sidebar_label: "Unassign a user task" -hide_title: true -hide_table_of_contents: true -api: eJztV01z2zYQ/SsYnJKpJcqpmya6qY7SurUTjyy107g6gORKQkwCDD4kqxz+9+4SpL5bdzI5+uAxAe6+t4vdBy1L7sTc8v49n1gwzAn7wKdnPAWbGFk4qRXv8xHkegmWuQUwYa2cKwCmZ0zU9mwl3aJ+N5dLUOwB1l1+xgthRA4ODKGXXOECoTyyjNHpN1ijjST0QrgFPhv44qWBlPed8XAYwhjhEZhYicm3wRKTTRaQC94vuVsXRCKVgzkYfDXTJhcubL2+4FU1JSJbaGXBkser3gX9OybbMLCVsMyrJu+UWZ8kYO3MZxnmWZ3xi17vSYijE2KJUEo7FsMOdpfdaAMsBSdkZpnA58LopUyRVqoaoA2exTpdd/9SmGOiMV3lKAZRFJlMBMUQoWecQf7dZ0sBlTuntB/pgN0Gy4aX6fgzJA4LzYJhHNjvR+8v2duLH36cvlg4V9h+FK1Wq66ZJR1IpdOmq808wiX9kd3LLsNTwBxysaY8RYpmyCkyyqoA4yT2lC0gkTOZMKfrBJuwGZUy5NcUNYRFjbVx3pZ8U3rrjFTz3cp7I/lhNw3YZHTF8FyVk7M1OhxT1z4z4TPCELH2rh9nQj1QxZ102UnSQxbr81yYTdfuEyCQdcJ5+2Tnfv/qCJv665fx+JYFCJboFBj6II+0LRElkUslc5/zPrYprsRjWL3u9SrCpIr/j0wUg8cC069b6zAdbI5827d1YlJhXCr5VpXRRs7lIS8SbWvBmyZ+FzKqqqpW5tPiPlYm6Z2kOdNepY3C334FDtahEe3KaEyEKgUs8cZgcnh3PIv9WezPYv9GYseXOGwsNI4PSJLh3FGPIDhZ9HlEMu2QTG1U7gwgVdQOMzREgFm2s4o3eEy8DCKqsPfLhbau6peFNq6KludovxRGCgyiLii9DmJrmyjTicjq7VPFpBc0EIUh6hMAKuZnvB1WIowUxLOP96b3pncSikzbswpAo+HdmA1ur1hIKfTezn3QQpKwT0IG46dA61nKAt5n0q3vyCWcRQx4lZmBp6Pf9EPDVSPTOhjhTnh433bJr3+M60JLNdO1e1Pw/SDo+LFWIeJe9/y4sTBQ0kei89yr+pLE5trc0QEtybx1lMoZx3sU8J4lxmZODSaXwd+t2XWwYL8HXnZes4ZGaW/IOeL7uIukUSLQLxXR3wQTxZmOo1xIFTVENvo0HP407Fx+vLmZfLga/9m5vrocfrgbdhC36x5dfQgFdkku1E5Uk2ZUxKbZ/PQcJl9ufyO+bnBvSubg0UV4DeCMXjWZlo2i7vlWUejQ3x/qN6LC/gjCuOdlGQsLE5NVFW1/8WDWuD/d6qgWXiotPaOIZyKzhx8Bu5m9GDWfCy/Zf30anMyl2RRqXes487TCRwQ5+EKppmi+AJFir1J8wWKAHwCF2/H9119iksjmWno3vB6Oh1jZfwDR0aEr -sidebar_class_name: "delete api-method" -info_path: docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

    Unassign a user task

    - - - -Removes the assignee of a task with the given key. - -## Request - -

    Path Parameters

    - -The user task was unassigned successfully. - -
    - -The user task with the given key cannot be unassigned. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    - -The user task with the given key was not found. - -
    - -The user task with the given key is in the wrong state currently. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    diff --git a/docs/apis-tools/zeebe-api-rest/specifications/update-a-user-task.api.mdx b/docs/apis-tools/zeebe-api-rest/specifications/update-a-user-task.api.mdx deleted file mode 100644 index 71af92194c3..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/update-a-user-task.api.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: update-a-user-task -title: "Update a user task" -description: "Update a user task with the given key." -sidebar_label: "Update a user task" -hide_title: true -hide_table_of_contents: true -api: eJztWFlz2zYQ/isYvjRprcOOkyZ6UxQldZrDI0vtNJZnDJKghIQEWACUrGr037uLpUTqcOym6Uwf7DgeksDe+y0Wuwwcn9igcxmMrDDMcfsluDoKYmEjI3MntQo6wSiPuROMs2K9h82lmzI3FWwiZ0KxL2LRDI6CnBueCScMclwGCl6AHKmGQPSrWMAeiRxz7qbwbMSfhTQiDjrOFGJX7BDYA2OmEy+pEu40K7xKKNNGU5HxoLMM3CJHcVI5MREGlhJtMu7o07PTYLW6IpHCupc6XiBNpUHCUwsqRBrIlcM1nuepjDhq0/psUaXlvjQdfhaRQ9uNzoVxUlhcjaZcTYQVxChNPybeI9sGvr34+IERA3IoUcVkJXfOyLAAx894Cjo3x2qs0CeJTlM9l2pSbbEs4oqFEKP4c2EdsCgDJC0TKs41eOAIFmOJonlaoxyruUxTpJUTpcEVHZTzI7sGjrFEL78xusjtNWswgxaxcMHA2JmMvQqKiSx3C5ZK67bpMKPuTxYX4hUQ3bX/AvRWE09Bfhjl/4RsrM5ri2VuaSvqrvSu4+xaFWl6Tc5n2jCdSeeQTjqQALLMDL2HqQmBt9K7fcPmB0uUm6hxa8HBQmCklHZ3BgvS3ad919NRSqxXm8jS77aFweBrYzCJBGAR/mYSaLSCRDE1sZSRQArJupu8VWac19KYQLmd2GWYagiw3rV1uGHwG04C+A9BGjgwX09KXKNlTTa4M3gIdgwJD1NBuq1QZpUD/0InYtMo8u+n2TYMarpxY/jioBaIBZS9IfU1z95fHWSAykgnMrvvjtXX1CSUf6OeE0/8HyoKX5x0+Br0NqV1tXdSPVTUh4r6/6ioe0DjEeXobhPQZRGorDNGG0rvuCnHDKY04lEkQChwY4mBnVUn5NWzGKci9Z7061t2M6lmmrqYJjtLGHqKwiniI/S8lxKLhAMLbK3GATVX42DLsk1JvR2Zo7LRo35xQJ0WtF24B1TMtbJ0iJy0T/cdMdzq8ebclk1ezGzhHZCAYGgzgdlpu30n/V6DWkuTknGTvQd4gu2Oy9QyDs9rz4DXPPVabRZCw0j+uKVDBEpwSvbTfqe4G+9z2lnKXZcrsJc2hiT9cvC6x16cPv356tHUudx2Wq35fN40SdQQUHS0aWozacEr/sd9j5sMXAA2ZHxBWNjUpuoIZzYXkUxkhJH2QCuVwSgfzuTt859Wv3LQFkbuHRpdNhqcMfCrcjJZYJ7uifY0Pgfx6Al14TphytWXoMqvfaG7UmyRZdxsbgzbAoCRddwV9s67wpOTg8feL8PhOSMWUB9i4auBx1opCI3IpJJZkQUdyFF44zf09qzdXiFPjPg9LIEie5OD+T61ds2B5MiqvPWGSQV6qeh7RUYbOZG7ckFQDetlEr8iiwjjp/eA9T4sEemIy0QXKi7h/eIb+EAcStDOjQZDMFJQpAso5MpB4XgA+wPYH8D+ncAOi5mAPi+mYU409dMfN4XXFqK0gSi1rWVt9rPCYQ22eOV0qDDgnGBJ0FlBxi+hbXSrzjLXxq1as2PYP+NGYq/hw4jLBLF16qTQ2KT+86EQ4gKOoNA0zj4JATh5AzVhzqmLQDnb/J63n7cPssKtaw8Ro0H/Ysi652eMTKKMq1WBNUuE80GWtPkupn5mZQVUMekWF0hCvggFFDDTLdDjmywoZXnO+E6b4As9vF7nxtvfhz68WL0G1Tysf8OznLC3Nb7aXPmhbzs5bbTh98nwpN05OekcP28+aT/9FOxexL+2c/difLnO3qsDt9FqsdZC166JUiXae6DM1G0/YgbhRcITtZvH+4gAXyOwI51lhfLVHVCxOVyIW5TincIg4OEAgBuCd1E53KQtPaKHa9E72sF+I7ns2EulXF+X9gnwL8ImCG1FHOhi3voL2bTCVIetjEvVKgXZ1qd+/2W/0fv4/v3ow9nwj8a7s17/w0W/AXyb7sb5OOaQ6BlXNa32J7a7pi+ro+3+890yz5y4cS2oWFJhCLxtyxL9l0GFfiDo1Ge/EF+C8GWwXIbcipFJVyv8DCloFvD9qkI8jUulxedqOnurCY8G5SD3Mbvf2PigKevJh1r42gMXMXiDR2C3M8deYT5OBY8BX6gp7eiRPo0h8qk47A2SV0drii7cbHJ3y96tLgMLwabknneHvV8Q2OUYO4MTCj4bPsepOvztBGP4By/aO8vXDP99GcBJMyn4BPcTY/z5G77OagE= -sidebar_class_name: "patch api-method" -info_path: docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api -custom_edit_url: null -hide_send_button: true ---- - -import ApiTabs from "@theme/ApiTabs"; -import DiscriminatorTabs from "@theme/DiscriminatorTabs"; -import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; -import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; -import MimeTabs from "@theme/MimeTabs"; -import ParamsItem from "@theme/ParamsItem"; -import ResponseSamples from "@theme/ResponseSamples"; -import SchemaItem from "@theme/SchemaItem"; -import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; -import OperationTabs from "@theme/OperationTabs"; -import TabItem from "@theme/TabItem"; - -

    Update a user task

    - - - -Update a user task with the given key. - -## Request - -

    Path Parameters

    Body

      changeset objectnullable
      - -JSON object with changed task attribute values. - -The following attributes can be adjusted with this endpoint, additional attributes -will be ignored: - -- `candidateGroups` - reset by providing an empty list -- `candidateUsers` - reset by providing an empty list -- `dueDate` - reset by providing an empty String -- `followUpDate` - reset by providing an empty String - -Providing any of those attributes with a `null` value or omitting it preserves -the persisted attribute's value. - -The assignee cannot be adjusted with this endpoint, use the Assign task endpoint. -This ensures correct event emission for assignee changes. - -
    - -The user task was updated successfully. - -
    - -The user task with the given key cannot be updated. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    - -The user task with the given key was not found. - -
    - -The user task with the given key is in the wrong state currently. More details are provided in the response body. - -
    Schema
      = 400` and `<= 600`"} schema={{"type":"integer","format":"int32","description":"The HTTP status code for this problem.","minimum":400,"maximum":600}}>
    diff --git a/docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api.info.mdx b/docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api.info.mdx deleted file mode 100644 index 66890419109..00000000000 --- a/docs/apis-tools/zeebe-api-rest/specifications/zeebe-rest-api.info.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: zeebe-rest-api -title: "Zeebe REST API" -description: "API for communicating with the Zeebe cluster." -sidebar_label: Introduction -sidebar_position: 0 -hide_title: true -custom_edit_url: null ---- - -import ApiLogo from "@theme/ApiLogo"; -import SchemaTabs from "@theme/SchemaTabs"; -import TabItem from "@theme/TabItem"; -import Export from "@theme/ApiExplorer/Export"; - -

    Zeebe REST API

    - -API for communicating with the Zeebe cluster. - -
    -

    - Authentication -

    - - -
    - - - - - - - - - - - - - - - -
    Security Scheme Type:http
    HTTP Authorization Scheme:bearer
    Bearer format:JWT
    -
    -
    -
    -
    - diff --git a/docs/apis-tools/zeebe-api-rest/tutorial.md b/docs/apis-tools/zeebe-api-rest/tutorial.md deleted file mode 100644 index ec299cbe3a6..00000000000 --- a/docs/apis-tools/zeebe-api-rest/tutorial.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -id: zeebe-api-tutorial -title: Tutorial -description: "New to the Zeebe API? Step through our tutorial to assign and unassign a user to and from a Zeebe user task." ---- - -In this tutorial, we'll step through examples to highlight the capabilities of the Zeebe API, such as assigning and unassigning a user to and from a Zeebe user task. - -## Prerequisites - -- If you haven't done so already, [create a cluster](/guides/create-cluster.md). -- Upon cluster creation, [create your first client](/guides/setup-client-connection-credentials.md). Ensure you check the `Zeebe` client scope box. - -:::note -Make sure you keep the generated client credentials in a safe place. The **Client secret** will not be shown again. For your convenience, you can also download the client information to your computer. -::: - -- In this tutorial, we utilize a JavaScript-written [GitHub repository](https://github.com/camunda/camunda-api-tutorials) to write and run requests. Clone this repo before getting started. -- Ensure you have [Node.js](https://nodejs.org/en/download) installed as this will be used for methods that can be called by the CLI (outlined later in this guide). Run `npm install` to ensure you have updated dependencies. - -## Getting started - -- You need authentication to access the API endpoints. Find more information [here](./zeebe-api-rest-authentication.md). - -## Set up authentication - -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. - -To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `ZEEBE_CLIENT_ID`, `ZEEBE_CLIENT_SECRET`, `ZEEBE_BASE_URL`, and `ZEEBE_AUDIENCE`, which is `zeebe.camunda.io` in a Camunda 8 SaaS environment. For example, your audience may be defined as `ZEEBE_AUDIENCE=zeebe.camunda.io`. - -These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). - -Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. - -:::note - -In this tutorial, we will execute arguments to assign and unassign a user to and from a Zeebe user task. You can examine the framework for processing these arguments in the `cli.js` file before getting started. - -::: - -## Assign a Zeebe user task (POST) - -:::note -In this tutorial, you will capture a **Zeebe user task** ID to assign and unassign users in this API. Camunda 8.5 introduced this new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type, and these Zeebe user tasks are different from job worker-based user tasks. See more details on task type differences in the [migrating to Zeebe user tasks documentation](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md#task-type-differences). -::: - -First, let's script an API call to assign a Zeebe user task. - -To do this, take the following steps: - -1. In the file named `zeebe.js`, outline the authentication and authorization configuration in the first few lines. This will pull in your `.env` variables to obtain an access token before making any API calls: - -```javascript -const authorizationConfiguration = { - clientId: process.env.ZEEBE_CLIENT_ID, - clientSecret: process.env.ZEEBE_CLIENT_SECRET, - audience: process.env.ZEEBE_AUDIENCE, -}; -``` - -2. Examine the function `async function assignUser([userTaskKey, assignee])` below this configuration. This is where you will script out your API call. -3. Within the function, you must first generate an access token for this request, so your function should now look like the following: - -```javascript -async function assignUser([userTaskKey, assignee]) { - const accessToken = await getAccessToken(authorizationConfiguration); -} -``` - -4. Using your generated client credentials from [prerequisites](#prerequisites), capture your Zeebe API URL beneath your call for an access token by defining `zeebeApiUrl`: - -`const zeebeApiUrl = process.env.ZEEBE_BASE_URL` - -5. On the next line, script the API endpoint to assign a Zeebe user task.: - -```javascript -const url = `${ZeebeApiUrl}/user-tasks/${userTaskKey}/assignment`; -``` - -6. Configure your POST request to the appropriate endpoint, including an authorization header based on the previously acquired `accessToken`: - -```javascript -const options = { - method: "POST", - url, - headers: { - Accept: "application/json", - Authorization: `Bearer ${accessToken}`, - }, - data: { - // The body contains information about the new assignment. - assignee: assignee, - }, -}; -``` - -7. Call the assign endpoint, process the results from the API call, and emit an error message from the server if necessary: - -```javascript -try { - // Call the assign endpoint. - const response = await axios(options); - - // Process the results from the API call. - if (response.status === 204) { - console.log(`User task assigned to ${assignee}.`); - } else { - // Emit an unexpected error message. - console.error("Unable to assign this user!"); - } -} catch (error) { - // Emit an error from the server. - console.error(error.message); -} -``` - -8. In your terminal, run `node cli.js zeebe assign `, where `` is the Zeebe user task ID you've captured from Tasklist, and `` is the assignee's email address. Include your own email address if you would like to see these results in your user interface. - -:::note -This `assign` command is connected to the `assignUser` function at the bottom of the `zeebe.js` file, and executed by the `cli.js` file. While we will assign and unassign users in this tutorial, you may add additional arguments depending on the API calls you would like to make. -::: - -If you have a valid user and task ID, the assignment will now output. If you have an invalid API name or action name, or no arguments provided, or improper/insufficient credentials configured, an error message will output as outlined in the `cli.js` file. If no action is provided, it will default to "assign" everywhere, except when unassigning a user. - -## Unassign a Zeebe user task (DELETE) - -To unassign a user from a Zeebe user task, you can use the same Zeebe user task ID from the previous exercise and take the following steps: - -1. Outline your function, similar to the steps above: - -```javascript -async function unassignUser([userTaskKey]) { - const accessToken = await getAccessToken(authorizationConfiguration); - - const ZeebeApiUrl = process.env.ZEEBE_BASE_URL; - - const url = `${ZeebeApiUrl}/user-tasks/${userTaskKey}/assignee`; -} -``` - -2. Configure the API call using the DELETE method: - -```javascript -const options = { - method: "DELETE", - url, - headers: { - Accept: "application/json", - Authorization: `Bearer ${accessToken}`, - }, -}; -``` - -3. Process the results from the API call. For example: - -```javascript -try { - // Call the delete endpoint. - const response = await axios(options); - - // Process the results from the API call. - if (response.status === 204) { - console.log("User task has been unassigned!"); - } else { - // Emit an unexpected error message. - console.error("Unable to unassign this user task!"); - } -} catch (error) { - // Emit an error from the server. - console.error(error.message); -} -``` - -4. In your terminal, run `node cli.js zeebe unassign `, where `` is the Zeebe user task ID. - -## If you get stuck - -Having trouble configuring your API calls or want to examine an example of the completed tutorial? Navigate to the `completed` folder in the [GitHub repository](https://github.com/camunda/camunda-api-tutorials/tree/main/completed), where you can view an example `zeebe.js` file. - -## Next steps - -You can script several additional API calls as outlined in the [Zeebe API reference material](./zeebe-api-rest-overview.md). diff --git a/docs/apis-tools/zeebe-api-rest/zeebe-api-rest-authentication.md b/docs/apis-tools/zeebe-api-rest/zeebe-api-rest-authentication.md deleted file mode 100644 index b5f444e4a52..00000000000 --- a/docs/apis-tools/zeebe-api-rest/zeebe-api-rest-authentication.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -id: zeebe-api-rest-authentication -title: "Authentication" -description: "Describes authentication options that can be used to access Zeebe REST API." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -All Zeebe REST API requests require authentication. To authenticate, generate a [JSON Web Token (JWT)](https://jwt.io/introduction/) and include it in each request. - -## Generate a token - - - - -1. [Create client credentials](/guides/setup-client-connection-credentials.md) in the **Clusters > Cluster name > API** tab of [Camunda Console](https://console.camunda.io/). -2. Add permissions to this client for **Zeebe**. -3. Once you have created the client, capture the following values required to generate a token: - - | Name | Environment variable name | Default value | - | ------------------------ | -------------------------------- | -------------------------------------------- | - | Client ID | `ZEEBE_CLIENT_ID` | - | - | Client Secret | `ZEEBE_CLIENT_SECRET` | - | - | Authorization Server URL | `ZEEBE_AUTHORIZATION_SERVER_URL` | `https://login.cloud.camunda.io/oauth/token` | - | Audience | `ZEEBE_TOKEN_AUDIENCE` | `zeebe.camunda.io` | - | Optimize REST Address | `ZEEBE_REST_ADDRESS` | - | - - :::caution - When client credentials are created, the `Client Secret` is only shown once. Save this `Client Secret` somewhere safe. - ::: -4. Execute an authentication request to the token issuer: - ```bash - curl --request POST ${ZEEBE_AUTHORIZATION_SERVER_URL} \ - --header 'Content-Type: application/x-www-form-urlencoded' \ - --data-urlencode 'grant_type=client_credentials' \ - --data-urlencode "audience=${ZEEBE_TOKEN_AUDIENCE}" \ - --data-urlencode "client_id=${ZEEBE_CLIENT_ID}" \ - --data-urlencode "client_secret=${ZEEBE_CLIENT_SECRET}" - ``` - A successful authentication response looks like the following: - ```json - { - "access_token": "", - "expires_in": 300, - "refresh_expires_in": 0, - "token_type": "Bearer", - "not-before-policy": 0 - } - ``` -5. Capture the value of the `access_token` property and store it as your token. - - - - - -1. [Add an M2M application in Identity](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). -2. [Add permissions to this application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for **Zeebe API**. -3. Capture the `Client ID` and `Client Secret` from the application in Identity. -4. [Generate a token](/self-managed/identity/user-guide/authorizations/generating-m2m-tokens.md) to access the REST API. Provide the `client_id` and `client_secret` from the values you previously captured in Identity. - ```shell - curl --location --request POST 'http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token' \ - --header 'Content-Type: application/x-www-form-urlencoded' \ - --data-urlencode "client_id=${CLIENT_ID}" \ - --data-urlencode "client_secret=${CLIENT_SECRET}" \ - --data-urlencode 'grant_type=client_credentials' - ``` - A successful authentication response looks like the following: - ```json - { - "access_token": "", - "expires_in": 300, - "refresh_expires_in": 0, - "token_type": "Bearer", - "not-before-policy": 0 - } - ``` -5. Capture the value of the `access_token` property and store it as your token. - - - - - -## Use a token - -Include the previously captured token as an authorization header in each request: `Authorization: Bearer `. - -For example, to send a request to the Zeebe REST API's `/topology` endpoint: - - - - - -:::tip -The `${ZEEBE_REST_ADDRESS}` variable below represents the URL of the Zeebe REST API. You can capture this URL when creating an API client. You can also construct it as `https://${REGION}.zeebe.camunda.io/${CLUSTER_ID}/`. -::: - - - - - -:::tip -The `${ZEEBE_REST_ADDRESS}` variable below represents the URL of the Zeebe REST API. You can configure this value in your Self-Managed installation. The default value is `http://localhost:8080/`. -::: - - - - - -```shell -curl --header "Authorization: Bearer ${TOKEN}" \ - ${ZEEBE_REST_ADDRESS}/v1/topology -``` - -A successful response includes [information about the cluster](/apis-tools/zeebe-api-rest/specifications/get-cluster-topology.api.mdx). For example: - -```json -{ - "brokers": [ - ... - ], - "clusterSize": 3, - "partitionsCount": 3, - "replicationFactor": 3, - "gatewayVersion": "8.6.0" -} -``` - -## Token expiration - -Access tokens expire according to the `expires_in` property of a successful authentication response. After this duration, in seconds, you must request a new access token. diff --git a/docs/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md b/docs/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md deleted file mode 100644 index 0c8473dd07c..00000000000 --- a/docs/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: zeebe-api-rest-overview -title: "Overview" -description: "Interact with Zeebe clusters. Run user task state operations for Zeebe user tasks." ---- - -## Introduction - -The Zeebe REST API is a REST API designed to interact with the Zeebe process automation engine. - -:::note -Ensure you [authenticate](./zeebe-api-rest-authentication.md) before accessing the Zeebe REST API. -::: - -## Context paths - -For SaaS: `https://${REGION}.zeebe.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. - -:::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). - -For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Zeebe component. -::: - -## API Explorer - -See [the interactive Zeebe REST API Explorer][zeebe-api-explorer] for specifications, example requests and responses, and code samples of interacting with the Tasklist REST API. - -[zeebe-api-explorer]: ./specifications/zeebe-rest-api.info.mdx diff --git a/docs/apis-tools/zeebe-api/gateway-service.md b/docs/apis-tools/zeebe-api/gateway-service.md index cab216dc3c7..7868477371f 100644 --- a/docs/apis-tools/zeebe-api/gateway-service.md +++ b/docs/apis-tools/zeebe-api/gateway-service.md @@ -36,7 +36,7 @@ message ActivateJobsRequest { // if the requestTimeout = 0, a default timeout is used. // if the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. int64 requestTimeout = 6; - // a list of tenant IDs for which to activate jobs + // a list of IDs of tenants for which to activate jobs repeated string tenantIds = 7; } ``` @@ -79,7 +79,7 @@ message ActivatedJob { // JSON document, computed at activation time, consisting of all visible variables to // the task scope string variables = 13; - // the id of the tenant that owns the job + // the ID of the tenant that owns the job string tenantId = 14; } ``` @@ -118,7 +118,7 @@ message BroadcastSignalRequest { // the signal variables as a JSON document; to be valid, the root of the document must be an // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. string variables = 2; - // the id of the tenant that owns the signal. + // the ID of the tenant that owns the signal. string tenantId = 3; } ``` @@ -129,7 +129,7 @@ message BroadcastSignalRequest { message BroadcastSignalResponse { // the unique ID of the signal that was broadcasted. int64 key = 1; - // the tenant id of the signal that was broadcasted. + // the tenant ID of the signal that was broadcasted. string tenantId = 2; } ``` @@ -160,8 +160,6 @@ message CancelProcessInstanceRequest { // the process instance key (as, for example, obtained from // CreateProcessInstanceResponse) int64 processInstanceKey = 1; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 2; } ``` @@ -254,10 +252,8 @@ message CreateProcessInstanceRequest { // will start at the start event. If non-empty the process instance will apply start // instructions after it has been created repeated ProcessInstanceCreationStartInstruction startInstructions = 5; - // the tenant ID of the process definition + // the tenant id of the process definition string tenantId = 6; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 7; } message ProcessInstanceCreationStartInstruction { @@ -279,7 +275,7 @@ message ProcessInstanceCreationStartInstruction { ```protobuf message CreateProcessInstanceResponse { // the key of the process definition which was used to create the process instance - int64 processKey = 1; + int64 processDefinitionKey = 1; // the BPMN process ID of the process definition which was used to create the process // instance string bpmnProcessId = 2; @@ -288,7 +284,7 @@ message CreateProcessInstanceResponse { // the unique identifier of the created process instance; to be used wherever a request // needs a process instance key (e.g. CancelProcessInstanceRequest) int64 processInstanceKey = 4; - // the tenant ID of the created process instance + // the tenant identifier of the created process instance string tenantId = 5; } ``` @@ -309,21 +305,24 @@ Start instructions have the same [limitations as process instance modification]( ### Input: `CreateProcessInstanceWithResultRequest` ```protobuf -message CreateProcessInstanceRequest { - CreateProcessInstanceRequest request = 1; - // timeout (in ms). the request will be closed if the process is not completed before - // the requestTimeout. - // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. - int64 requestTimeout = 2; +message CreateProcessInstanceWithResultRequest { + CreateProcessInstanceRequest request = 1; + // timeout (in ms). the request will be closed if the process is not completed + // before the requestTimeout. + // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. + int64 requestTimeout = 2; + // list of names of variables to be included in `CreateProcessInstanceWithResultResponse.variables` + // if empty, all visible variables in the root scope will be returned. + repeated string fetchVariables = 3; } ``` ### Output: `CreateProcessInstanceWithResultResponse` ```protobuf -message CreateProcessInstanceResponse { +message CreateProcessInstanceWithResultResponse { // the key of the process definition which was used to create the process instance - int64 processKey = 1; + int64 processDefinitionKey = 1; // the BPMN process ID of the process definition which was used to create the process // instance string bpmnProcessId = 2; @@ -332,9 +331,10 @@ message CreateProcessInstanceResponse { // the unique identifier of the created process instance; to be used wherever a request // needs a process instance key (e.g. CancelProcessInstanceRequest) int64 processInstanceKey = 4; - // consisting of all visible variables to the root scope + // JSON document + // consists of visible variables in the root scope string variables = 5; - // the tenant ID of the process definition + // the tenant identifier of the process definition string tenantId = 6; } ``` @@ -400,7 +400,7 @@ message EvaluateDecisionRequest { // [{ "a": 1, "b": 2 }] would not be a valid argument, as the root of the // JSON document is an array and not an object. string variables = 3; - // the tenant ID of the decision + // the tenant identifier of the decision string tenantId = 4; } ``` @@ -435,7 +435,7 @@ message EvaluateDecisionResponse { string failedDecisionId = 9; // an optional message describing why the decision which was evaluated failed string failureMessage = 10; - // the tenant ID of the evaluated decision + // the tenant identifier of the evaluated decision string tenantId = 11; // the unique key identifying this decision evaluation int64 decisionInstanceKey = 12; @@ -461,7 +461,7 @@ message EvaluatedDecision { repeated MatchedDecisionRule matchedRules = 7; // the decision inputs that were evaluated within this decision evaluation repeated EvaluatedDecisionInput evaluatedInputs = 8; - // the tenant ID of the evaluated decision + // the tenant identifier of the evaluated decision string tenantId = 9; } @@ -475,7 +475,7 @@ message EvaluatedDecisionInput { } message EvaluatedDecisionOutput { - // the id of the evaluated decision output + // the ID of the evaluated decision output string outputId = 1; // the name of the evaluated decision output string outputName = 2; @@ -484,7 +484,7 @@ message EvaluatedDecisionOutput { } message MatchedDecisionRule { - // the id of the matched rule + // the ID of the matched rule string ruleId = 1; // the index of the matched rule int32 ruleIndex = 2; @@ -524,12 +524,12 @@ Note that this is an atomic call, i.e. either all resources are deployed, or non message DeployResourceRequest { // list of resources to deploy repeated Resource resources = 1; - // the tenant ID of the resources to deploy + // the tenant id of the resources to deploy string tenantId = 2; } message Resource { - // the resource name, e.g. myProcess.bpmn, myDecision.dmn or myForm.form + // the resource name, e.g. myProcess.bpmn or myDecision.dmn string name = 1; // the file content as a UTF8-encoded string bytes content = 2; @@ -544,7 +544,7 @@ message DeployResourceResponse { int64 key = 1; // a list of deployed resources, e.g. processes repeated Deployment deployments = 2; - // the tenant ID of the deployed resources + // the tenant id of the deployed resources string tenantId = 3; } @@ -573,7 +573,7 @@ message ProcessMetadata { // the resource name (see: ProcessRequestObject.name) from which this process was // parsed string resourceName = 4; - // the tenant ID of the deployed process + // the tenant id of the deployed process string tenantId = 5; } @@ -594,7 +594,7 @@ message DecisionMetadata { // the assigned key of the decision requirements graph that this decision is // part of int64 decisionRequirementsKey = 6; - // the tenant ID of the deployed decision + // the tenant id of the deployed decision string tenantId = 7; } @@ -612,7 +612,7 @@ message DecisionRequirementsMetadata { // the resource name (see: Resource.name) from which this decision // requirements was parsed string resourceName = 5; - // the tenant ID of the deployed decision requirements + // the tenant id of the deployed decision requirements string tenantId = 6; } @@ -626,7 +626,7 @@ message FormMetadata { int64 formKey = 3; // the resource name string resourceName = 4; - // the tenant ID of the deployed form + // the tenant id of the deployed form string tenantId = 5; } ``` @@ -725,11 +725,9 @@ message ModifyProcessInstanceRequest { repeated ActivateInstruction activateInstructions = 2; // instructions describing which elements should be terminated repeated TerminateInstruction terminateInstructions = 3; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 4; message ActivateInstruction { - // the id of the element that should be activated + // the ID of the element that should be activated string elementId = 1; // the key of the ancestor scope the element instance should be created in; // set to -1 to create the new element instance within an existing element @@ -746,13 +744,13 @@ message ModifyProcessInstanceRequest { // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a // valid argument, as the root of the JSON document is an array and not an object. string variables = 1; - // the id of the element in which scope the variables should be created; + // the ID of the element in which scope the variables should be created; // leave empty to create the variables in the global scope of the process instance string scopeId = 2; } message TerminateInstruction { - // the id of the element that should be terminated + // the ID of the element that should be terminated int64 elementInstanceKey = 1; } } @@ -779,11 +777,11 @@ Returned if: Returned if: - At least one activate instruction is invalid. An activate instruction is considered invalid if: - - The process doesn't contain an element with the given id. + - The process doesn't contain an element with the given ID. - A flow scope of the given element can't be created. - The given element has more than one active instance of its flow scope. - At least one variable instruction is invalid. A variable instruction is considered invalid if: - - The process doesn't contain an element with the given scope id. + - The process doesn't contain an element with the given scope ID. - The given element doesn't belong to the activating element's flow scope. - The given variables are not a valid JSON document. - At least one terminate instruction is invalid. A terminate instruction is considered invalid if: @@ -807,8 +805,7 @@ message MigrateProcessInstanceRequest { int64 processInstanceKey = 1; // the migration plan that defines target process and element mappings MigrationPlan migrationPlan = 2; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 3; + message MigrationPlan { // the key of process definition to migrate the process instance to int64 targetProcessDefinitionKey = 1; @@ -817,9 +814,9 @@ message MigrateProcessInstanceRequest { } message MappingInstruction { - // the element id to migrate from + // the element ID to migrate from string sourceElementId = 1; - // the element id to migrate into + // the element ID to migrate into string targetElementId = 2; } } @@ -886,7 +883,7 @@ message PublishMessageRequest { // the message variables as a JSON document; to be valid, the root of the document must be an // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. string variables = 5; - // the tenant ID of the message + // the tenant id of the message string tenantId = 6; } ``` @@ -897,7 +894,7 @@ message PublishMessageRequest { message PublishMessageResponse { // the unique ID of the message that was published int64 key = 1; - // the tenant ID of the message + // the tenant id of the message string tenantId = 2; } ``` @@ -935,8 +932,6 @@ problem, followed by this call. message ResolveIncidentRequest { // the unique ID of the incident to resolve int64 incidentKey = 1; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 2; } ``` @@ -979,8 +974,6 @@ message SetVariablesRequest { // be unchanged, and scope 2 will now be `{ "bar" : 1, "foo" 5 }`. if local was false, however, // then scope 1 would be `{ "foo": 5 }`, and scope 2 would be `{ "bar" : 1 }`. bool local = 3; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 4; } ``` @@ -1103,12 +1096,14 @@ message Partition { enum PartitionBrokerRole { LEADER = 0; FOLLOWER = 1; + INACTIVE = 2; } // Describes the current health of the partition enum PartitionBrokerHealth { HEALTHY = 0; UNHEALTHY = 1; + DEAD = 2; } // the unique ID of this partition @@ -1137,8 +1132,6 @@ message UpdateJobRetriesRequest { int64 jobKey = 1; // the new amount of retries for the job; must be positive int32 retries = 2; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 3; } ``` @@ -1177,8 +1170,6 @@ message UpdateJobTimeoutRequest { int64 jobKey = 1; // the duration of the new timeout in ms, starting from the current moment int64 timeout = 2; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 3; } ``` @@ -1210,11 +1201,9 @@ Returned if: ```protobuf message DeleteResourceRequest { - // The key of the resource that should be deleted. This can be the key - // of a process definition, the key of a decision requirements definition or the key of a form definition. + // The key of the resource that should be deleted. This can either be the key + // of a process definition, the key of a decision requirements definition or the key of a form. int64 resourceKey = 1; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 2; } ``` @@ -1300,7 +1289,7 @@ message ActivatedJob { // JSON document, computed at activation time, consisting of all visible variables to // the task scope string variables = 13; - // the id of the tenant that owns the job + // the ID of the tenant that owns the job string tenantId = 14; } ``` diff --git a/docs/components/best-practices/architecture/extending-human-task-management-c7.md b/docs/components/best-practices/architecture/extending-human-task-management-c7.md index 93400dc74c7..b51f68fcaad 100644 --- a/docs/components/best-practices/architecture/extending-human-task-management-c7.md +++ b/docs/components/best-practices/architecture/extending-human-task-management-c7.md @@ -83,7 +83,7 @@ Now you can use a task _filter_ with criteria checking the follow-up date and if ### Enforcing deadlines for tasks -There are different ways of enforcing deadlines for Human Tasks. Typical actions for overdue tasks are: +There are different ways of enforcing deadlines with human task orchestration. Typical actions for overdue tasks are: - Sending reminder mails - Changing the assignee/group @@ -96,7 +96,7 @@ There are different ways of enforcing deadlines for Human Tasks. Typical actions | Bulk actions possible (e.g. one mail with a list of all due tasks) | | | yes | | | No custom component required | yes | yes | Querying has to be done by external trigger or BPMN process | yes | | Use when | The escalation is business relevant and has to be visible in the process model | Overdue tasks can be easily monitored via tasklist application, actions are taken manually | Sophisticated, automated actions should take place | A timely escalation mechanism is desired | -| Don’t use when…​ | Each and every User Task has a due date and explicit modeling would clutter your process model | You need an action to be executed automatically | You do not want to run your own scheduling infrastructure | The escalation should be visible in the process model | +| Don’t use when…​ | Each and every user task has a due date and explicit modeling would clutter your process model | You need an action to be executed automatically | You do not want to run your own scheduling infrastructure | The escalation should be visible in the process model | #### Modeling an escalation @@ -176,7 +176,7 @@ In case you need _dynamically calculated values_ or specific _fields derived fro - using task variables as a kind of _caching_ mechanism, - being filled by "calculating" the values using _expression language_ -- e.g. by means of an _I/O Mapping_ of a User Task: +- e.g. by means of an _I/O Mapping_ of a user task: ```xml @@ -268,6 +268,6 @@ If you target a _TaskInfoEntity_: If you target a _ProcessInstanceInfoEntity_: -- Create a new instance by an _ExecutionListener_ on the process instance start event. The process instance id might not yet be known at this time. So either you create your own id and set it as a process variable (to SQL "join" on this later), or you can add a safe point before the listener triggers to make sure the process instance was committed to the database. +- Create a new instance by an _ExecutionListener_ on the process instance start event. The process instance ID might not yet be known at this time. So either you create your own ID and set it as a process variable (to SQL "join" on this later), or you can add a safe point before the listener triggers to make sure the process instance was committed to the database. - Decide when you have to update information in the entity, this depends on various factors (like amount of data, frequency of changes, way of changing data, ...). diff --git a/docs/components/best-practices/architecture/sizing-your-environment.md b/docs/components/best-practices/architecture/sizing-your-environment.md index 4c800642b2a..adc1deacb3c 100644 --- a/docs/components/best-practices/architecture/sizing-your-environment.md +++ b/docs/components/best-practices/architecture/sizing-your-environment.md @@ -85,7 +85,7 @@ The payload size also affects disk space requirements, as described in the next ### Disk space -The workflow engine itself will store data along every process instance, especially to keep the current state persistent. This is unavoidable. In case there are human tasks, data is also sent to Tasklist and kept there, until tasks are completed. +The workflow engine itself will store data along every process instance, especially to keep the current state persistent. This is unavoidable. In case there are user tasks, data is also sent to Tasklist and kept there, until tasks are completed. Furthermore, data is also sent from Operate and Optimize, which store data in Elasticsearch. These tools keep historical audit data for the configured retention times. The total amount of disk space can be reduced by using **data retention settings**. We typically delete data in Operate after 30 to 90 days, but keep it in Optimize for a longer period of time to allow more analysis. A good rule of thumb is something between 6 and 18 months. @@ -120,7 +120,7 @@ Using your throughput and retention settings, you can now calculate the required ## Understanding sizing and scalability behavior -Spinning up a Camunda 8 Cluster means you run multiple components that all need resources in the background, like the Zeebe broker, Elasticsearch (as the database for Operate, Tasklist, and Optimize), Operate, Tasklist, and Optimize. All those components need to be equipped with resources. +Spinning up a Camunda 8 Cluster means you run multiple components that all need resources in the background, like the Zeebe Broker, Elasticsearch (as the database for Operate, Tasklist, and Optimize), Operate, Tasklist, and Optimize. All those components need to be equipped with resources. All components are clustered to provide high-availability, fault-tolerance and resiliency. @@ -146,16 +146,16 @@ Now you can select a hardware package that can cover these requirements. In this Camunda 8 defines four [cluster sizes](/components/concepts/clusters.md#cluster-size) you can select from (1x, 2x, 3x, and 4x) after you have chosen your [cluster type](/components/concepts/clusters.md#cluster-type). The following table gives you an indication of what requirements you can fulfill with each cluster size. :::note -Contact your Customer Success Manager if you require a custom cluster size above these requirements. +Contact your Customer Success Manager to increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. ::: -| Cluster size | 1x | 2x | 3x | 4x | -| :---------------------------------------------------------------------------------- | ---------------------------------: | ----------------------------------: | -------------------------------: | -------------------------------: | -| Max Throughput **Tasks/day** **\*** | 4.3 M | 8.6 M | 12.9 M | 17.2 M | -| Max Throughput **Tasks/second** **\*** | 50 | 100 | 150 | 200 | -| Max Throughput **Process Instances/day** **\*\*** | 3 M | 6 M | 9 M | 12 M | -| Max Total Number of Process Instances stored (in Elasticsearch in total) **\*\*\*** | 75 k | 150 k | 225 k | 300 k | -| Approximate resources provisioned **\*\*\*\*** | 11 vCPU, 22 GB memory, 64 GB disk. | 22 vCPU, 44 GB memory, 128 GB disk. | 33 vCPU, 66 GB mem, 192 GB disk. | 44 vCPU, 88 GB mem, 256 GB disk. | +| Cluster size | 1x | 2x | 3x | 4x | +| :---------------------------------------------------------------------------------- | ----------------------------------: | ----------------------------------: | -------------------------------: | -------------------------------: | +| Max Throughput **Tasks/day** **\*** | 9 M | 18 M | 27 M | 36 M | +| Max Throughput **Tasks/second** **\*** | 100 | 200 | 300 | 400 | +| Max Throughput **Process Instances/second** **\*\*** | 5 | 10 | 15 | 20 | +| Max Total Number of Process Instances stored (in Elasticsearch in total) **\*\*\*** | 75 k | 150 k | 225 k | 300 k | +| Approximate resources provisioned **\*\*\*\*** | 11 vCPU, 22 GB memory, 192 GB disk. | 22 vCPU, 44 GB memory, 384 GB disk. | 33 vCPU, 66 GB mem, 576 GB disk. | 44 vCPU, 88 GB mem, 768 GB disk. | The numbers in the table were measured using Camunda 8 (version 8.6), [the benchmark project](https://github.com/camunda-community-hub/camunda-8-benchmark) running on its own Kubernetes Cluster, and using a [realistic process](https://github.com/camunda/camunda/blob/main/zeebe/benchmarks/project/src/main/resources/bpmn/realistic/bankCustomerComplaintDisputeHandling.bpmn) containing a mix of BPMN symbols such as tasks, events and call activities including subprocesses. To calculate day-based metrics, an equal distribution over 24 hours is assumed. @@ -164,12 +164,20 @@ The numbers in the table were measured using Camunda 8 (version 8.6), [the bench **\*\*** As Tasks are the primary resource driver, the number of process instances supported by a cluster is calculated based on the assumption of an average of 10 tasks per process. Customers can calculate a more accurate process instance estimate using their anticipated number of tasks per process. **\*\*\*** Total number of process instances within the retention period, regardless of if they are active or finished. This is limited by disk space, CPU, and memory for running and historical process instances available to ElasticSearch. Calculated assuming a typical set of process variables for process instances. Note that it makes a difference if you add one or two strings (requiring ~ 1kb of space) to your process instances, or if you attach a full JSON document containing 1MB, as this data needs to be stored in various places, influencing memory and disk requirements. If this number increases, you can still retain the runtime throughput, but Tasklist, Operate, and/or Optimize may lag behind. +The provisioned disk size is calculated as the sum of the disk size used by Zeebe and Elasticsearch. -Data retention has an influence on the amount of data that is kept for completed instances in your cluster. The default data retention is set to 30 days, which means that data that is older than 30 days gets removed from Operate and Tasklist. If a process instance is still active, it is fully functioning in runtime, but customers are not able to access historical data older than 30 days from Operate and Tasklist. Data retention is set to 6 months, meaning that data that is older than 6 months will be removed from Optimize. Up to certain limits data retention can be adjusted by Camunda on request. See [Camunda 8 SaaS data retention](/components/concepts/data-retention.md). +The max throughput numbers should be considered as peak loads, and the data retention configuration considered when defining the amount of data kept for completed instances in your cluster. See [Camunda 8 SaaS data retention](/components/concepts/data-retention.md) for the default retention times for Zeebe, Tasklist, Operate and Optimize. + +- If process instances are completed and older than the configured retention time of an application, the data is removed. +- If a process instance is older than the configured retention time but still active and incomplete, it is fully functioning in runtime and is _not_ removed. + +Data retention can be adjusted by Camunda on request (up to certain limits). You should consider retention time adjustments and/or storage capacity increases if you plan to run more than [max PI stored in ES]/ [configured retention time]. **\*\*\*\*** These are the resource limits configured in the Kubernetes cluster and are always subject to change. -You might wonder why the total number of process instances stored is that low. This is related to limited resources provided to Elasticsearch, yielding performance problems with too much data stored there. By increasing the available memory to Elasticsearch you can also increase that number. At the same time, even with this rather low number, you can always guarantee the throughput of the core workflow engine during peak loads, as this performance is not influenced. Also, you can always increase memory for Elasticsearch later on if it is required. +:::note +Why is the total number of process instances stored that low? This is related to limited resources provided to Elasticsearch, yielding performance problems with too much data stored there. By increasing the available memory to Elasticsearch you can also increase that number. At the same time, even with this rather low number, you can always guarantee the throughput of the core workflow engine during peak loads, as this performance is not influenced. Also, you can always increase memory for Elasticsearch later on if it is required. +::: ### Camunda 8 Self-Managed diff --git a/docs/components/best-practices/architecture/understanding-human-tasks-management.md b/docs/components/best-practices/architecture/understanding-human-tasks-management.md index eae24bd4232..a2edbfae66c 100644 --- a/docs/components/best-practices/architecture/understanding-human-tasks-management.md +++ b/docs/components/best-practices/architecture/understanding-human-tasks-management.md @@ -12,13 +12,13 @@ description: "Use Camunda task management features or implement your requirement ## Using task assignment features -The lifecycle of human tasks (like assigning, delegating, and completing tasks) is mostly a generic issue. There is no need to model common aspects into all your processes, if often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. +The lifecycle of human task orchestration (like assigning, delegating, and completing tasks) is mostly a generic issue. There is no need to model common aspects into all your processes, if often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. ![Task assignment](understanding-human-tasks-management-assets/human-tasks.png) So every task can be assigned to either a group of people, or a specific individual. An individual can 'claim' a task, indicating that they are picking the task from the pool (to avoid multiple people working on the same task). -As a general rule, you should assign human tasks in your business process to _groups of people_ instead of specific individuals. +As a general rule, you should assign human tasks, like [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) or [manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md), in your business process to _groups of people_ instead of specific individuals. ```xml @@ -51,11 +51,11 @@ While assigning users to groups is advised, it's not the only option. You could ## Deciding about your task list frontend -If you have human tasks in your process, you must make up your mind on how exactly you want to let your users work on their tasks and interact with the workflow engine. You have basically three options: +If you are orchestrating human tasks in your process, you must make up your mind on how exactly you want to let your users work on their tasks and interact with the workflow engine. You have basically three options: - [Camunda Tasklist](/components/tasklist/introduction-to-tasklist.md): The Tasklist application shipped with Camunda. This works out-of-the-box and has a low development effort. However, it is limited in terms of customizability and how much you can influence the user experience. -- Custom task list application: You can develop a custom task list and adapt this to your needs without compromises. Human tasks are shown inside your custom application, following your style guide and usability concept. You will use the [Camunda Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) in the background. This is very flexible, but requires additional development work. +- Custom task list application: You can develop a custom task list and adapt this to your needs without compromises. User tasks are shown inside your custom application, following your style guide and usability concept. You will use the [Camunda Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) in the background. This is very flexible, but requires additional development work. - Third party tasklist: If our organization already has a task list application rolled out to the field, you might want to use this for tasks created by Camunda. You will need to develop some synchronization mechanism. The upside of this approach is that your end users might not even notice that you introduce a new workflow engine. diff --git a/docs/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md b/docs/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md index 5d2e5d98d88..dfff460628f 100644 --- a/docs/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md +++ b/docs/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md @@ -5,9 +5,7 @@ description: "To sketch the basic architecture of your solution, learn how to co One of your first tasks to build a process solution is to sketch the basic architecture of your solution. To do so, you need to answer the question of how to connect the workflow engine (Zeebe) with your application or with remote systems. -This document predominantly outlines writing some custom glue code in the programming language of your choice and using existing client libraries. In some cases, you might also want to leverage existing Connectors as a starting point. - -The workflow engine is a remote system for your applications, just like a database. Your application connects with Zeebe via remote protocols, [gRPC](https://grpc.io/) to be precise, which is typically hidden from you, like when using a database driver based on ODBC or JDBC. +The workflow engine is a remote system for your applications, just like a database. Your application connects with Zeebe via remote protocols (like [gRPC](https://grpc.io/) or REST), which is typically hidden from you, like when using a database driver based on ODBC or JDBC. With Camunda 8 and the Zeebe workflow engine, there are two basic options: @@ -197,7 +195,7 @@ As discussed in [writing good workers](../writing-good-workers/), you typically ## Connectors -The glue code is relatively simple, but you need to write code. Sometimes you might prefer using an out-of-the-box component, connecting Zeebe with the technology you need just by configuration. This component is called a **Connector**. +The glue code is relatively simple, but you need to write code. You might prefer using an out-of-the-box component, connecting Zeebe with the technology you need just by configuration. This component is called a **Connector**. A Connector can be uni or bidirectional and is typically one dedicated application that implements the connection that translates in one or both directions of communication. Such a Connector might also be helpful in case integrations are not that simple anymore. @@ -217,15 +215,7 @@ This is a bidirectional Connector which contains a Kafka listener for forwarding ### Out-of-the-box Connectors -Most Connectors are currently community extensions, which basically means that they are not officially supported by Camunda, but by community members (who sometimes are Camunda employees). While this sounds like a restriction, it can also mean there is more flexibility to make progress. A list of community-maintained Connectors can be found [here](https://github.com/camunda-community-hub/awesome-camunda-cloud#connectors-and-bridges). - -Camunda itself is also working on improving the Connector infrastructure as such to be able to provide more Connectors easier in the future. - -### Using Connectors in SaaS - -Currently, Connectors are not operated as part of the Camunda 8 SaaS offering, which means you need to operate them yourself in your environment, which might be a private or public cloud. - -![Connectors in SaaS](connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png) +As well as Camunda-maintained Connectors, additional Connectors are maintained by the community (made up of consultants, partners, customers, and enthusiastic individuals). You can find a list of Connectors in the [Camunda Marketplace](https://marketplace.camunda.com/). ### Reusing your own integration logic by extracting Connectors @@ -245,7 +235,7 @@ Whenever you have such glue code running and really understand the implications ## Recommendation -As a general rule of thumb, prefer custom glue code whenever you don’t have a good reason to go with an existing Connector (like the reasons mentioned above). +As a general rule of thumb, prefer custom glue code whenever you don’t have a good reason to go with an existing Connector. A good reason to use Connectors is if you need to solve complex integrations where little customization is needed, such as the [Camunda RPA bridge](https://docs.camunda.org/manual/latest/user-guide/camunda-bpm-rpa-bridge/) to connect RPA bots (soon to be available for Camunda 8). @@ -253,7 +243,7 @@ Good use of Connectors are also scenarios where you don’t need custom glue cod Some use cases also allow you to create a **resuable generic adapter**; for example, to send status events to your business intelligence system. -But there are also common downsides with Connectors. First, the possibilities are limited to what the creator of the Connector has foreseen. In reality, you might have slightly different requirements and hit a limitation of a Connector soon. +But there are also common downsides with Connectors. First, the possibilities are limited to what the creator of the Connector has foreseen. In reality, you might have slightly different requirements and hit a limitation of a Connector. Second, the Connector requires you to operate this Connector in addition to your own application. The complexity associated with this depends on your environment. diff --git a/docs/components/best-practices/development/invoking-services-from-the-process-c7.md b/docs/components/best-practices/development/invoking-services-from-the-process-c7.md index 4e05898330d..55a56b87420 100644 --- a/docs/components/best-practices/development/invoking-services-from-the-process-c7.md +++ b/docs/components/best-practices/development/invoking-services-from-the-process-c7.md @@ -160,7 +160,8 @@ Only if the increased latency does not work for your use case, for example, beca -

    Call a named bean or java class implementing the +

    + Call a named bean or java class implementing the JavaDelegate interface.

    @@ -168,14 +169,17 @@ Only if the increased latency does not work for your use case, for example, beca

    Evaluate an expression using JUEL.

    -

    Use a configurable Connector +

    + Use a configurable Connector
    (REST or SOAP services provided out-of-the-box).

    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    +

    + Pull a service task into an external worker thread and inform process engine of +completion. +

    Execute a script inside the engine.

    @@ -183,9 +187,10 @@ completion.

    -

    Use with +

    + Use with
    - BPMN elements. + BPMN elements.

    @@ -252,17 +257,20 @@ completion.

    -

    Implement +

    + Implement
    - via + via

    Java (in same JVM)

    -

    Expression Language -(can reference Java code)

    +

    + Expression Language +(can reference Java code) +

    BPMN configuration

    @@ -377,9 +385,11 @@ completion.

    Configure via

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -390,9 +400,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -401,9 +413,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -412,9 +426,11 @@ completion.

    -

    BPMN Ext. Element+ +

    + BPMN Ext. Element+ - serviceTask + + serviceTask
    camunda:
    @@ -423,9 +439,11 @@ completion.

    -

    BPMN Attributes +

    + BPMN Attributes
    - serviceTask + + serviceTask
    camunda:
    @@ -438,13 +456,15 @@ completion.

    -

    BPMN Element +

    + BPMN Element
    script or
    BPMN Attribute
    - scriptTask + + scriptTask
    camunda:
    diff --git a/docs/components/best-practices/development/routing-events-to-processes-c7.md b/docs/components/best-practices/development/routing-events-to-processes-c7.md index 94278a8e592..cacd64f7c15 100644 --- a/docs/components/best-practices/development/routing-events-to-processes-c7.md +++ b/docs/components/best-practices/development/routing-events-to-processes-c7.md @@ -89,7 +89,7 @@ This could end with a successful income confirmation. However, it could also end 3 -In this case, a **conditional event** watching this data (e.g. a process variable changed by the human task) triggers and causes the process to reconsider the consequences of the new findings. +In this case, a **conditional event** watching this data (e.g. a process variable changed by the user task) triggers and causes the process to reconsider the consequences of the new findings. A conditional event's condition expression is evaluated at it's "scope" creation time, too, and not just when variable data changes. For our example of a boundary conditional event, that means that the activity it is attached to could principally be left immediately via the boundary event. However, our process example evaluates the data via the exclusive gateway - therefore such a scenario is semantically impossible. @@ -327,10 +327,11 @@ public class InvoiceMDB implements MessageListener { The provided REST API can be directly used to communicate with the workflow engine remotely. -``` POST /process-definition/key/invoice/start Request body: + +``` { "variables": { "invoiceId" : {"value" : "123456", "type": "String"} @@ -387,7 +388,7 @@ If messages are exchanged between different processes deployed in the workflow e 1 -Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN id in Java using a JavaDelegate: +Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN ID in Java using a JavaDelegate: ```java public class SendOrderReceivedMessageDelegate implements JavaDelegate { diff --git a/docs/components/best-practices/development/routing-events-to-processes.md b/docs/components/best-practices/development/routing-events-to-processes.md index 158f5a94900..9865f5635ec 100644 --- a/docs/components/best-practices/development/routing-events-to-processes.md +++ b/docs/components/best-practices/development/routing-events-to-processes.md @@ -87,7 +87,7 @@ This could end with a successful income confirmation. However, it could also end 3 -In this case, a **conditional event** watching this data (e.g. a process variable changed by the human task) triggers and causes the process to reconsider the consequences of the new findings. +In this case, a **conditional event** watching this data (e.g. a process variable changed by the user task) triggers and causes the process to reconsider the consequences of the new findings. :::caution Camunda 8 Camunda 8 does not yet [support a **conditional event**](/components/modeler/bpmn/bpmn-coverage.md). @@ -102,7 +102,7 @@ Most events actually occur somewhere external to the workflow engine and need to - Using API: Receive the message by means of your platform-specific activities such as connecting to a AMQP queue or processing a REST request and then route it to the process. - Using Connectors: Configure a Connector to receive messages such as Kafka records and rote it to the process. Note that this possibility works for Camunda 8 only. -### Starting process instance by BPMN process id +### Starting process instance by BPMN process ID If you have only one starting point (none start event) in your process definition, you reference the process definition by the ID in the BPMN XML file. @@ -210,7 +210,7 @@ If messages are exchanged between different processes deployed in the workflow e 1 -Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN id in Java: +Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN ID in Java: ```java @JobWorker(type="routeInput") diff --git a/docs/components/best-practices/development/service-integration-patterns.md b/docs/components/best-practices/development/service-integration-patterns.md index 60b84274266..4dacb210f09 100644 --- a/docs/components/best-practices/development/service-integration-patterns.md +++ b/docs/components/best-practices/development/service-integration-patterns.md @@ -114,13 +114,13 @@ This is also balanced by the fact that service tasks are simply very handy. The Using send and receive tasks means to use [the message concept built into Zeebe](/components/concepts/messages.md). This is a powerful concept to solve a lot of problems around cardinalities of subscriptions, correlation of the message to the right process instances, and verification of uniqueness of the message (idempotency). -When using messages, you need to provide the correlation id yourself. This means that the correlation id is fully under your control, but it also means that you need to generate it yourself and make sure it is unique. You will most likely end up with generated UUIDs. +When using messages, you need to provide the correlation ID yourself. This means that the correlation ID is fully under your control, but it also means that you need to generate it yourself and make sure it is unique. You will most likely end up with generated UUIDs. You can leverage [message buffering](/components/concepts/messages.md#message-buffering) capabilities, which means that the process does not yet need to be ready to receive the message. You could, for example, do other things in between, but this also means that you will not get an exception right away if a message cannot be correlated, as it is simply buffered. This leaves you in charge of dealing with messages that can never be delivered. Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/contact) to discuss such a scenario in more depth. +A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/reference/contact.md) to discuss such a scenario in more depth. **Summary And recommendations** @@ -132,7 +132,7 @@ The following table summarizes the possibilities and recommendations. | | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send task](/img/bpmn-elements/task-send.svg) | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send and receive task](/img/bpmn-elements/send-and-receive-task.png) | | Technical implications | | Behaves like a service task | A unique correlation ID is generated for you. You don’t have to think about race conditions or idempotency. Timeout handling and retry logic are built-in. API to flag business or technical errors. | Correlation ID needs to be generated yourself, but is fully under control. Message buffering is possible but also necessary. Timeouts and retries need to be modeled. BPMN errors cannot be used. | | Assessment | Very intuitive. | Might be more intuitive for fire and forget semantics, but can also lead to discussions. | Removes visual noise which helps stakeholders to concentrate on core business logic, but requires use of internal job instance keys. | More visual clutter, but also more powerful options around correlation and modeling patterns. | -| Recommendation | Default option, use unless it is confusing for business stakeholders (e.g. because of fire and forget semantics of a task). | Use for fire and forget semantics, unless it leads to unnecessary discussions, in this case use service task instead. | Use when response is within milliseconds and you can pass the Zeebe-internal job instance key around. | Use when the response will take time (> some seconds), or you need a correlation id you can control. | +| Recommendation | Default option, use unless it is confusing for business stakeholders (e.g. because of fire and forget semantics of a task). | Use for fire and forget semantics, unless it leads to unnecessary discussions, in this case use service task instead. | Use when response is within milliseconds and you can pass the Zeebe-internal job instance key around. | Use when the response will take time (> some seconds), or you need a correlation ID you can control. | ## Integrating services with BPMN events diff --git a/docs/components/best-practices/development/testing-process-definitions.md b/docs/components/best-practices/development/testing-process-definitions.md index d6bc992865d..edb3ff10769 100644 --- a/docs/components/best-practices/development/testing-process-definitions.md +++ b/docs/components/best-practices/development/testing-process-definitions.md @@ -56,15 +56,19 @@ When using Java, most customers use Spring Boot. While this is a common setup fo ### Technical setup using Spring +:::caution +Spring support with Zeebe Process Test uses the community-maintained project Spring Zeebe. +The new Camunda Spring SDK (Camunda 8.6+) is not supported. You could still use the testing library but without hooking into the Spring lifecycle. +::: + :::caution JUnit 5 You need to use JUnit 5. Ensure you use JUnit 5 in every test class: the `@Test` annotation you import needs to be `org.junit.jupiter.api.Test`. ::: 1. Use [_JUnit 5_](http://junit.org) as unit test framework. -2. Use the [Spring Zeebe SDK](../../../apis-tools/spring-zeebe-sdk/getting-started.md). -3. Use `@ZeebeSpringTest` to ramp up an in-memory process engine. -4. Use annotations from [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/) to check whether your expectations about the state of the process are met. -5. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. +2. Use `@ZeebeSpringTest` to ramp up an in-memory process engine. +3. Use annotations from [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/) to check whether your expectations about the state of the process are met. +4. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. A test can now look like the following example. The complete source code is available on [GitHub](https://github.com/camunda-community-hub/camunda-cloud-examples/blob/main/twitter-review-java-springboot/src/test/java/org/camunda/community/examples/twitter/TestTwitterProcess.java): diff --git a/docs/components/best-practices/development/understanding-transaction-handling-c7.md b/docs/components/best-practices/development/understanding-transaction-handling-c7.md index a22bccca414..e5b710d0f26 100644 --- a/docs/components/best-practices/development/understanding-transaction-handling-c7.md +++ b/docs/components/best-practices/development/understanding-transaction-handling-c7.md @@ -90,29 +90,29 @@ Aside a general strategy to mark service tasks as being save points you will oft **Do** configure a savepoint **after** -- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. +- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. -- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. +- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. **Do** configure a savepoint **before** -- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. +- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. -- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. +- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. -- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. +- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. **Don't** configure save points **before** -- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. +- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. -- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. +- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. ### Adding save points automatically to every model diff --git a/docs/components/best-practices/development/writing-good-workers.md b/docs/components/best-practices/development/writing-good-workers.md index b47389515a3..0f02d645f01 100644 --- a/docs/components/best-practices/development/writing-good-workers.md +++ b/docs/components/best-practices/development/writing-good-workers.md @@ -109,13 +109,7 @@ client.newWorker().jobType("retrieveMoney") }).open(); ``` - - -:::caution -[Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) is a community-maintained project. -::: - -The community-maintained [Spring Zeebe integration](https://github.com/camunda-community-hub/spring-zeebe) provides a more elegant way of writing this, but also uses a normal worker from the Java client underneath. In this case, your code might look like this: +The [Spring Zeebe SDK](/apis-tools/spring-zeebe-sdk/getting-started.md) provides a more elegant way of writing this, but also uses a normal worker from the Java client underneath. In this case, your code might look like this: ```java @JobWorker(type = "retrieveMoney", autoComplete = false) diff --git a/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md b/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md index f263474207a..f34b1de93cd 100644 --- a/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md +++ b/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md @@ -56,7 +56,8 @@ Camunda 8 supports the following binding types:

  • This option ensures predictable behavior by tying the two versions together, and allows you to deploy future versions of the target resource without disrupting ongoing process instances.

  • It is ideal for self-contained projects without external or shared dependencies.

  • -

    To use the deployment binding option, create and deploy a process application in Web Modeler, +

    + To use the deployment binding option, create and deploy a process application in Web Modeler, or deploy multiple resources together via the Zeebe API.

  • diff --git a/docs/components/best-practices/modeling/creating-readable-process-models.md b/docs/components/best-practices/modeling/creating-readable-process-models.md index 266ce8bd86d..fc27c8907ce 100644 --- a/docs/components/best-practices/modeling/creating-readable-process-models.md +++ b/docs/components/best-practices/modeling/creating-readable-process-models.md @@ -107,7 +107,7 @@ Make your models easier to understand by modeling _explicitly_, which most often #### Using gateways instead of conditional flows -Model splitting the process flow by always using _gateway symbols_ like instead of conditional flows . +Model splitting the process flow by always using _gateway symbols_ such as instead of conditional flows .
    diff --git a/docs/components/best-practices/modeling/naming-technically-relevant-ids.md b/docs/components/best-practices/modeling/naming-technically-relevant-ids.md index 86c06417168..617d4b693bb 100644 --- a/docs/components/best-practices/modeling/naming-technically-relevant-ids.md +++ b/docs/components/best-practices/modeling/naming-technically-relevant-ids.md @@ -36,11 +36,11 @@ The following table provides you with a guideline that we would use in a context ### Editing IDs with Camunda Modeler -We recommend using Camunda Modeler's properties panel to edit technical identifiers and change them according to your naming conventions, like it is shown here for the process id: +We recommend using Camunda Modeler's properties panel to edit technical identifiers and change them according to your naming conventions, like it is shown here for the process ID: ![Properties Panel](naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png) -We especially do not recommend editing identifiers in the XML directly, as it might accidently corrupt your BPMN file. You have to keep the identifiers in the section about the graphical layout (so called "DI" for diagram interchange) further down in sync with the execution semantics at the top of the XML. +We especially do not recommend editing identifiers in the XML directly, as it might accidentally corrupt your BPMN file. You have to keep the identifiers in the section about the graphical layout (so called "DI" for diagram interchange) further down in sync with the execution semantics at the top of the XML. However, we include an XML example of all those identifiers mentioned for illustration: @@ -77,16 +77,16 @@ Elements in the diagram interchange section (DI) reference identifiers from abov Changing IDs can potentially break your tests or even process logic if done at a late stage of development. Therefore, consider using meaningful IDs right from the beginning and perform the renaming as part of the modeling. -### Aligning the BPMN file name with the process id +### Aligning the BPMN file name with the process ID It is a good practice to _align_ the _file name_ of your BPMN models with the _process id_ of the executable process that is inside the file. ![BPMN file name](naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png) -## Generating id constants classes +## Generating ID constants classes If you have lots of process, case, and decision definitions with lots of IDs, consider generating constant classes (e.g. via XSLT) directly from your BPMN or DMN XML files. For example, this can be used for testing. ## Using a Camunda Modeler plugin to generate meaningful ids -You can use [this modeler plugin community extension](https://github.com/camunda-community-hub/camunda-modeler-plugin-rename-technical-ids) to automatically convert your ids to comply with our best practices. Of course, you could also use this as a basis to create your own modeler plugin to generate ids that follow your custom naming conventions. Or, you could implement a similar plugin to implement checks if all relavant ids follow your naming conventions. +You can use [this modeler plugin community extension](https://github.com/camunda-community-hub/camunda-modeler-plugin-rename-technical-ids) to automatically convert your IDs to comply with our best practices. Of course, you could also use this as a basis to create your own modeler plugin to generate IDs that follow your custom naming conventions. Or, you could implement a similar plugin to implement checks if all relavant IDs follow your naming conventions. diff --git a/docs/components/best-practices/operations/securing-camunda-c7.md b/docs/components/best-practices/operations/securing-camunda-c7.md index 84cfe8e18ee..a6b2163d9f8 100644 --- a/docs/components/best-practices/operations/securing-camunda-c7.md +++ b/docs/components/best-practices/operations/securing-camunda-c7.md @@ -30,7 +30,7 @@ The core of the Camunda engine treats **users**, **groups**, and **tenants** as ``` -Or, claim that user task for a specific user via the Java API by referencing the user with a text string-based user id: +Or, claim that user task for a specific user via the Java API by referencing the user with a text string-based user ID: ```java taskService.claim(taskId, "fozzie"); @@ -178,5 +178,5 @@ From Camunda 7.9 on, it is much easier to implement SSO by making use of the [Co You can get started by looking at some examples showing how this can be achieved for different authentication frameworks: - [Very basic authentication filter](https://github.com/camunda-consulting/camunda-webapp-plugins/tree/master/camunda-webapp-plugin-sso-autologin) for the Camunda web apps that reads the user from a provided URL parameter. -- Many _application servers_ support single sign-on out of the box (or through plugins) and can provide the user id to the application. Have a look at the [Single Sign-On Community Extension](https://github.com/camunda/camunda-sso-jboss/). +- Many _application servers_ support single sign-on out of the box (or through plugins) and can provide the user ID to the application. Have a look at the [Single Sign-On Community Extension](https://github.com/camunda/camunda-sso-jboss/). - It is quite easy to [integrate Camunda with Spring Security](https://github.com/camunda-consulting/code/tree/master/snippets/springboot-security-sso) so that the framework handles authentication and passes the authenticated user on to Camunda. diff --git a/docs/components/concepts/assets/access-control/group-management.png b/docs/components/concepts/assets/access-control/group-management.png index 914e0a39f25..bf482faa910 100644 Binary files a/docs/components/concepts/assets/access-control/group-management.png and b/docs/components/concepts/assets/access-control/group-management.png differ diff --git a/docs/components/concepts/clusters.md b/docs/components/concepts/clusters.md index 00432521a6a..249c751a945 100644 --- a/docs/components/concepts/clusters.md +++ b/docs/components/concepts/clusters.md @@ -12,8 +12,8 @@ When [creating a cluster in SaaS](/components/console/manage-clusters/create-clu Prior to 8.6, clusters were configured by hardware size (S, M, L). -- To learn more about clusters prior to 8.6, see previous documentation versions. -- To learn more about migrating your existing clusters to the newer model, contact your Customer Success Manager. +- This documentation covers the SaaS cluster model introduced in 8.6. To learn more about clusters prior to 8.6, see previous documentation versions. +- To learn how you can migrate your existing clusters to the newer model, contact your Customer Success Manager. ::: @@ -29,13 +29,13 @@ You can choose from three different cluster types: ### Cluster availability and uptime -| Type | Basic | Standard | Advanced | -| :---------------------------------------------------------------------------- | :------------------------------------------------------------------------------------- | :-------------------------------------------------------- | :------------------------------------------------------------------------------------ | -| Usage | Non-production use, including experimentation, early development, and basic use cases. | Production-ready use cases with guaranteed higher uptime. | Production-ready use cases with guaranteed minimal disruption and the highest uptime. | -| Uptime Percentage
    (Core Automation Cluster\*) | 99% | 99.5% | 99.9% | -| RTO/RPO\*\*
    (Core Automation Cluster\*) | RTO: 8 hours
    RPO: 24 hours | RTO: 2 hours
    RPO: 4 hours | RTO: < 1 hour
    RPO: < 1 hour | +| Type | Basic | Standard | Advanced | +| :-------------------------------------------------------------------------- | :------------------------------------------------------------------------------------- | :-------------------------------------------------------- | :------------------------------------------------------------------------------------ | +| Usage | Non-production use, including experimentation, early development, and basic use cases. | Production-ready use cases with guaranteed higher uptime. | Production-ready use cases with guaranteed minimal disruption and the highest uptime. | +| Uptime Percentage
    (Orchestration Cluster\*) | 99% | 99.5% | 99.9% | +| RTO/RPO\*\*
    (Orchestration Cluster\*) | RTO: 8 hours
    RPO: 24 hours | RTO: 2 hours
    RPO: 4 hours | RTO: < 1 hour
    RPO: < 1 hour | -

    * Core Automation Cluster means the components critical for automating processes and decisions, such as Zeebe, Operate, Tasklist, Optimize and Connectors.

    +

    * Orchestration Cluster means the components critical for automating processes and decisions, such as Zeebe, Operate, Tasklist, Optimize, and Connectors.

    ** RTO (Recovery Time Objective) means the maximum allowable time that a system or application can be down after a failure or disaster before it must be restored. It defines the target time to get the system back up and running. RPO (Recovery Point Objective) means the maximum acceptable amount of data loss measured in time. It indicates the point in time to which data must be restored to resume normal operations after a failure. It defines how much data you can afford to lose. The RTO/RPO figures shown in the table are provided on a best-effort basis and are not guaranteed.

    :::info @@ -46,7 +46,7 @@ See [Camunda Enterprise General Terms](https://legal.camunda.com/licensing-and-o The cluster size defines the cluster performance and capacity. -After you have chosen your cluster type, you can choose the cluster size that best meets your cluster environment requirements. +After you have chosen your cluster type, choose the cluster size that best meets your cluster environment requirements. To learn more about choosing your cluster size, see [sizing your environment](/components/best-practices/architecture/sizing-your-environment.md#sizing-your-runtime-environment). @@ -54,14 +54,10 @@ To learn more about choosing your cluster size, see [sizing your environment](/c - Larger cluster sizes include increased performance and capacity, allowing you to serve more workload. - Increased usage such as higher throughput or longer data retention requires a larger cluster size. - Each size increase uses one of your available cluster reservations. For example, purchasing two HWP advanced reservations for your production cluster allows you to configure two clusters of size 1x, or one cluster of size 2x. +- You can change the cluster size at any time. See [resize a cluster](/components/console/manage-clusters/manage-cluster.md#resize-a-cluster). :::note - -Contact your Customer Success Manager to: - -- Increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. -- Increase the cluster size of an existing cluster. - +Contact your Customer Success Manager to increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. ::: ## Free Trial clusters @@ -74,19 +70,16 @@ When your Free Trial plan expires, you are automatically transferred to the Free ### Auto-pause -Free Trial `dev` (or untagged) clusters are automatically paused eight hours after a cluster is created or resumed from a paused state. Auto-pause occurs regardless of cluster usage. +Free Trial clusters are automatically paused after a period of inactivity. Auto-pause occurs regardless of cluster usage. -You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume your cluster](/components/console/manage-clusters/manage-cluster.md#resume-a-cluster). +You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume a cluster](/components/console/manage-clusters/manage-cluster.md#resume-a-cluster). -- Clusters tagged as `test`, `stage`, or `prod` do not auto-pause. -- Paused clusters are automatically deleted after 30 consecutive paused days. You can change the tag to avoid cluster deletion. -- No data is lost while a cluster is paused. All execution and configuration is saved, but cluster components such as Zeebe and Operate are temporarily disabled until you resume the cluster. +- Clusters tagged as `dev` (or untagged) auto-pause eight hours after the cluster is created or resumed from a paused state. +- Clusters tagged as `test`, `stage`, or `prod` auto-pause if there is no cluster activity for 48 hours. +- Cluster disk space is cleared when a trial cluster is paused. + - You will need to redeploy processes to the cluster once it is resumed from a paused state. + - Cluster configuration settings (for example, API Clients, Connector secrets, and IP allowlists) are saved so you can easily resume a cluster. :::tip - -To prevent auto-pause, you can: - -- Tag the cluster as `test`, `stage`, or `prod` instead of `dev`. -- [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. - +To prevent auto-pause, [upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. ::: diff --git a/docs/components/concepts/data-retention.md b/docs/components/concepts/data-retention.md index aa10e6a970c..55dc9678644 100644 --- a/docs/components/concepts/data-retention.md +++ b/docs/components/concepts/data-retention.md @@ -15,7 +15,7 @@ The following time-to-live settings are configured in SaaS for each application. - **Tasklist**: 30 days - **Zeebe**: 7 days -If there are specific requirements for your use-case, [reach out to us](/contact/) to discuss your data retention needs under an Enterprise plan. +If there are specific requirements for your use-case, [reach out to us](/reference/contact.md) to discuss your data retention needs under an Enterprise plan. For more information on development clusters in the Starter or Professional plans, refer to our [fair usage limits of those plans](https://camunda.com/legal/fair-usage-limits-for-starter-plan/). ## Additional information diff --git a/docs/components/concepts/encryption-at-rest.md b/docs/components/concepts/encryption-at-rest.md index 7c5148e168b..22f38e0ea53 100644 --- a/docs/components/concepts/encryption-at-rest.md +++ b/docs/components/concepts/encryption-at-rest.md @@ -23,7 +23,7 @@ By default, Camunda 8 SaaS cluster data at rest is protected with a provider-man Enterprise customers requiring a higher level of protection can select a dedicated Camunda-managed software or hardware (HSM) encryption key when creating a new cluster. The encryption key is managed by Camunda using Google Cloud Key Management Service (KMS). -- You can only select the encryption type when [creating a cluster](/docs/components/console/manage-clusters/create-cluster.md). You cannot change the encryption type after cluster creation. +- You can only select the encryption type when [creating a cluster](/components/console/manage-clusters/create-cluster.md). You cannot change the encryption type after cluster creation. - You can configure encryption keys on a per-cluster basis so that each cluster has a dedicated encryption key. Encryption keys can be configured for all cluster versions. - You can view cluster encryption key details in **Cluster Details** on the **Console Overview** tab. diff --git a/docs/components/concepts/expressions.md b/docs/components/concepts/expressions.md index 755b710d09a..3771b92ae32 100644 --- a/docs/components/concepts/expressions.md +++ b/docs/components/concepts/expressions.md @@ -1,7 +1,7 @@ --- id: expressions title: "Expressions" -description: "Expressions can be used to access variables and calculate values dynamically." +description: "Expressions can be used to access variables and calculate values dynamically. This is useful when automating a process using BPMN and orchestrating human tasks." --- Expressions can be used to access variables and calculate values dynamically. diff --git a/docs/components/concepts/job-workers.md b/docs/components/concepts/job-workers.md index 73e9c189e6d..97008846a83 100644 --- a/docs/components/concepts/job-workers.md +++ b/docs/components/concepts/job-workers.md @@ -1,7 +1,7 @@ --- id: job-workers title: "Job workers" -description: "A job worker is a service capable of performing a particular task in a process." +description: "Learn more about job workers, a service that can perform a particular task in a process. When this task needs to be performed, this is represented by a job." --- A **job worker** is a service capable of performing a particular task in a process. Each time such a task needs to be performed, this is represented by a **job**. diff --git a/docs/components/concepts/messages.md b/docs/components/concepts/messages.md index e17a04e4ebf..5f3d0aea9a2 100644 --- a/docs/components/concepts/messages.md +++ b/docs/components/concepts/messages.md @@ -4,7 +4,7 @@ title: "Messages" description: "Learn how process instances can respond to incoming messages." --- -Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called **[message correlation](/guides/message-correlation.md)**. +Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called [message correlation](/components/modeler/bpmn/message-events/message-events.md#message-correlation). ## Message subscriptions @@ -12,7 +12,7 @@ A message is not sent to a process instance directly. Instead, the message corre ![Message Correlation](assets/message-correlation.png) -A subscription is opened when a process instance awaits a message; for example, when entering a message catch event. The message name is defined either statically in the process (e.g. `Money collected`) or dynamically as an expression. The correlation key is defined dynamically as an expression (e.g. `= orderId`). The expressions are evaluated on activating the message catch event. The results of the evaluations are used as message name and as correlation key of the subscription (e.g. `"order-123"`). +A subscription is opened when a process instance awaits a message; for example, when entering a message catch event. The message name is defined either statically in the process (e.g. `Money collected`) or dynamically as an expression. The correlation key is defined dynamically as an expression (for example, `= orderId`). The expressions are evaluated on activating the message catch event. The results of the evaluations are used as message name and as correlation key of the subscription (e.g. `"order-123"`). When a message is published and the message name and correlation key match to a subscription, the message is correlated to the corresponding process instance. If no proper subscription is opened, the message is discarded. @@ -52,7 +52,7 @@ zbctl publish message "Money collected" --correlationKey "order-123" --ttl 1h ## Message cardinality -A message is correlated only _once_ to a process (based on the BPMN process id), across all versions of this process. If multiple subscriptions for the same process are opened (by multiple process instances or within one instance), the message is correlated only to one of the subscriptions. +A message is correlated only _once_ to a process (based on the BPMN process ID), across all versions of this process. If multiple subscriptions for the same process are opened (by multiple process instances or within one instance), the message is correlated only to one of the subscriptions. When subscriptions are opened for different processes, the message is correlated to _all_ the subscriptions. @@ -60,14 +60,14 @@ A message is _not_ correlated to a message start event subscription if an instan ## Message uniqueness -A message can have an optional message id — a unique id to ensure the message is published and processed only once (i.e. idempotency). The id can be any string; for example, a request id, a tracking number, or the offset/position in a message queue. +A message can have an optional message ID — a unique ID to ensure the message is published and processed only once (i.e. idempotency). The ID can be any string; for example, a request ID, a tracking number, or the offset/position in a message queue. -A message is rejected and not correlated if a message with the same name, the same correlation key, and the same id is already buffered. After the message is discarded from the buffer, a message with the same name, correlation key, and id can be published again. +A message is rejected and not correlated if a message with the same name, the same correlation key, and the same ID is already buffered. After the message is discarded from the buffer, a message with the same name, correlation key, and ID can be published again. -The uniqueness check is disabled when no message id is set. +The uniqueness check is disabled when no message ID is set.
    - Publish message with id via zbctl + Publish message with ID via zbctl

    ``` @@ -81,7 +81,7 @@ zbctl publish message "Money collected" --correlationKey "order-123" --messageId By combining the principles of message correlation, message uniqueness, and message buffering, very different behaviors can be achieved. Please note that a message name is mandatory, so it is omitted from the table. -| Correlation key | Message Id | Time to live | Receiver type | Behavior | +| Correlation key | Message ID | Time to live | Receiver type | Behavior | | --------------- | ---------- | ------------ | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | set | not set | set to 0 | Start event | A new instance is started if no instance with the correlation key set at start is active, see [single instance](./#single-instance). | | set | not set | set to 0 | Intermediate event | The message is correlated if a matching subscription is active. | @@ -118,6 +118,10 @@ The first message creates a new process instance. The following messages are cor When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. +:::note +You may also use TTL to wait for messages that may arrive earlier when combining [start events and intermediate catch events](/docs/components/modeler/bpmn/events.md). +::: + ### Single instance **Problem**: Create exactly one instance of a process @@ -134,7 +138,7 @@ The first message creates a new process instance. The following messages are dis Publishing a message is a fire-and-forget action. As a user, you do not know if the correlation is a success. -To know if a published message was correlated (and to which process instance), use the [message correlation endpoint](../../apis-tools/camunda-api-rest/specifications/correlate-a-message.api.mdx). +To know if a published message was correlated (and to which process instance), use the [message correlation endpoint](../../apis-tools/camunda-api-rest/specifications/correlate-message.api.mdx). The message correlation endpoint works similarly to the message publish endpoint. However, the message correlation endpoint does not support [message buffering](#message-buffering). Any message published using this endpoint is either immediately correlated, or not correlated at all. This is due to the synchronous nature of requiring a response. diff --git a/docs/components/concepts/process-instance-creation.md b/docs/components/concepts/process-instance-creation.md index 6a43c5143a0..04f1d5369c8 100644 --- a/docs/components/concepts/process-instance-creation.md +++ b/docs/components/concepts/process-instance-creation.md @@ -14,7 +14,7 @@ Camunda 8 supports the following ways to create a process instance: ## Commands -A process instance is created by sending a command specifying the BPMN process id, or the unique key of the process. +A process instance is created by sending a command specifying the BPMN process ID, or the unique key of the process. There are two commands to create a process instance, outlined in the sections below. @@ -22,13 +22,14 @@ There are two commands to create a process instance, outlined in the sections be A process that has a [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events) is started explicitly using **[CreateProcessInstance](/apis-tools/zeebe-api/gateway-service.md#createprocessinstance-rpc)**. -This command creates a new process instance and immediately responds with the process instance id. The execution of the process occurs after the response is sent. +This command creates a new process instance and immediately responds with the process instance ID. The execution of the process occurs after the response is sent. ![create-process](assets/create-process.png) -

    - Code example -

    Create a process instance: +

    + Code example +

    +Create a process instance: ``` zbctl create instance "order-process" @@ -38,16 +39,16 @@ Response: ``` { - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 + "processKey": 2251799813685249, + "bpmnProcessId": "order-process", + "version": 1, + "processInstanceKey": 2251799813686019 } ``` -

    -
    +

    +
    ### Create and await results @@ -67,7 +68,8 @@ When the client resends the command, it creates a new process instance.
    Code example -

    Create a process instance and await results: +

    +Create a process instance and await results: ``` zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' @@ -112,7 +114,7 @@ To start the process instance at a user-defined element, you need to provide sta By default, the instruction starts before the given element. This means input mappings of that element are applied as usual. Multiple instructions can be provided to start the process instance at more than one element. -You can activate the same element multiple times inside the created process instance by referring to the same element id in more than one instruction. +You can activate the same element multiple times inside the created process instance by referring to the same element ID in more than one instruction. :::note Start instructions have the same [limitations as process instance modification](/components/concepts/process-instance-modification.md#limitations), e.g., it is not possible to start at a sequence flow. @@ -123,7 +125,7 @@ Start instructions are supported for both `CreateProcessInstance` commands.

    Code example

    - Create a process instance starting before the 'ship_parcel' element: +Create a process instance starting before the 'ship_parcel' element: ```java client.newCreateInstanceCommand() diff --git a/docs/components/concepts/process-instance-migration.md b/docs/components/concepts/process-instance-migration.md index a48724ed25e..9e13aa0d367 100644 --- a/docs/components/concepts/process-instance-migration.md +++ b/docs/components/concepts/process-instance-migration.md @@ -370,7 +370,7 @@ The following limitations exist that may be supported in future versions: - An element that becomes nested in a newly added subprocess - An element that was nested in a subprocess is no longer nested in that subprocess - Mapping instructions cannot change the element type -- Mapping instructions cannot change the task implementation, e.g. from a job worker user task to a Zeebe User Task +- Mapping instructions cannot change the task implementation, e.g. from a job worker user task to a Zeebe user task - The process instance must be in a wait state, i.e. waiting for an event or external input like job completion. It may not be taking a sequence flow or triggering an event while migrating the instance A full overview of error codes can be found in the migration command [RPC](/apis-tools/zeebe-api/gateway-service.md#migrateprocessinstance-rpc) or [REST](/apis-tools/camunda-api-rest/specifications/migrate-process-instance.api.mdx). diff --git a/docs/components/concepts/resource-deletion.md b/docs/components/concepts/resource-deletion.md index dfca210ff4e..5315a73e4e6 100644 --- a/docs/components/concepts/resource-deletion.md +++ b/docs/components/concepts/resource-deletion.md @@ -31,7 +31,7 @@ Zeebe's state. As a result, it is not possible to create new process instances f to create one will result in a `NOT_FOUND` exception. Deleting a process definition also deletes historical data. Zeebe will **never** reuse a process version. When deleting a process definition, it keeps track of the version number. -Deploying a new process with the same id will increment the version as usual. +Deploying a new process with the same ID will increment the version as usual. ### Deleting the latest version @@ -53,8 +53,8 @@ new `latest` instead. ### Call activities -A [call activity](/components/modeler/bpmn/call-activities/call-activities.md) references a process by id. It's -possible that all process definitions for this process id are deleted. In this case, Zeebe creates an [incident](/components/concepts/incidents.md) on the +A [call activity](/components/modeler/bpmn/call-activities/call-activities.md) references a process by ID. It's +possible that all process definitions for this process ID are deleted. In this case, Zeebe creates an [incident](/components/concepts/incidents.md) on the call activity, informing you that the process cannot be not found. ### Limitations @@ -74,5 +74,5 @@ a `NOT_FOUND` exception. Deleting a DRG also deletes historical data. ### Business rule tasks A [business rule task](/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md) references a decision -by id. It's possible that all versions of this decision are deleted. When this happens, an incident is created on the -business rule task with the message that no decision with the given decision id is found. +by ID. It's possible that all versions of this decision are deleted. When this happens, an incident is created on the +business rule task with the message that no decision with the given decision ID is found. diff --git a/docs/components/concepts/variables.md b/docs/components/concepts/variables.md index 9d705f466f8..c3ead682e69 100644 --- a/docs/components/concepts/variables.md +++ b/docs/components/concepts/variables.md @@ -1,7 +1,7 @@ --- id: variables title: "Variables" -description: "Variables are part of a process instance and represent the data of the instance." +description: "Variables are part of process instances and represent their data. Leverage the scope of variables and customize how they are merged into process instances." --- Variables are part of a process instance and represent the data of the instance. diff --git a/docs/components/concepts/what-is-camunda-8.md b/docs/components/concepts/what-is-camunda-8.md index 1374cbd8d1f..3a65f8d66ee 100644 --- a/docs/components/concepts/what-is-camunda-8.md +++ b/docs/components/concepts/what-is-camunda-8.md @@ -105,7 +105,7 @@ The platform and tools are usable in your environment right away, with full publ ## Next steps -- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/contact/) page. +- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/reference/contact.md) page. - [Introduction to Camunda 8](/guides/introduction-to-camunda-8.md) - [Create a Camunda 8 account](/guides/create-account.md) - [Migrate from Camunda 7 to Camunda 8](/guides/migrating-from-camunda-7/index.md) diff --git a/docs/components/concepts/workflow-patterns.md b/docs/components/concepts/workflow-patterns.md index ace1b2cbd69..8d148ee15e5 100644 --- a/docs/components/concepts/workflow-patterns.md +++ b/docs/components/concepts/workflow-patterns.md @@ -276,10 +276,6 @@ An important problem to solve is how to roll back a business transaction in case In BPMN, you can use [compensation events](/components/modeler/bpmn/bpmn-coverage.md) to easily implement compensations in your processes. -:::note -The compensation event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: -

    1 diff --git a/docs/components/connectors/custom-built-connectors/connector-sdk.md b/docs/components/connectors/custom-built-connectors/connector-sdk.md index f2da5c2c7fd..b79128ec119 100644 --- a/docs/components/connectors/custom-built-connectors/connector-sdk.md +++ b/docs/components/connectors/custom-built-connectors/connector-sdk.md @@ -1075,8 +1075,9 @@ For example, you can spin up a custom client with the [Zeebe Java client](/apis-tools/java-client/index.md) as follows: ```java -import io.camunda.connector.MyConnectorFunction -import io.camunda.connector.runtime.jobworker.outbound.ConnectorJobHandler; +import io.camunda.connector.MyConnectorFunction; +import io.camunda.connector.runtime.core.outbound.ConnectorJobHandler; +import io.camunda.connector.validation.impl.DefaultValidationProvider; import io.camunda.zeebe.client.ZeebeClient; public class Main { @@ -1087,7 +1088,7 @@ public class Main { zeebeClient.newWorker() .jobType("io.camunda:template:1") - .handler(new ConnectorJobHandler(new MyConnectorFunction())) + .handler(new ConnectorJobHandler(new MyConnectorFunction(), new DefaultValidationProvider())) .name("MESSAGE") .fetchVariables("authentication", "message") .open(); @@ -1102,5 +1103,5 @@ it with your job handler implementation that handles invoking the Connector func Your custom job handler needs to create a `OutboundConnectorContext` that the Connector function can use to handle variables, secrets, and Connector results. You can extend the -provided `io.camunda.connector.impl.outbound.AbstractConnectorContext` to quickly gain access +provided `io.camunda.connector.runtime.core.AbstractConnectorContext` to quickly gain access to most of the common context operations. diff --git a/docs/components/connectors/custom-built-connectors/connector-template-generator.md b/docs/components/connectors/custom-built-connectors/connector-template-generator.md new file mode 100644 index 00000000000..09eeaf978fe --- /dev/null +++ b/docs/components/connectors/custom-built-connectors/connector-template-generator.md @@ -0,0 +1,55 @@ +--- +id: connector-template-generator +title: Generate a Connector template +description: Learn how to generate Connector templates for easier custom Connector creation. +--- + +You can configure and automatically generate a custom [Connector template](/components/connectors/custom-built-connectors/connector-templates.md) in Web Modeler. + +You can start from a blank template or import an existing API definition such as an [OpenAPI specification](https://swagger.io/resources/open-api/), [Swagger specification](https://swagger.io/resources/open-api/), or a [Postman collection](https://www.postman.com/collection/). For example, download a Postman collection as a YAML file, import this into the generator, and choose which methods to include in the generated template. + +## Generate a Connector template + +To generate a Connector template: + +1. Select the Modeler project you want to create the template in. +1. Click **Create new**, and select **Connector template** to open the **Create new Connector template** screen. + ![fCreate the new Connector template](./img/configure-template-details.png) + +1. Select the template starting point. + + - **Start from API definition**: Import an existing API definition file as a starting point for the template. If you select this option, the **Import data source** section is shown below the template details. + + - **Start from blank**: Start from a blank template. + +1. Configure the template details in the **Configure template details** section. + + - **Name:** Enter a clear and easily understood name for the template. For example, include the brand name if the template connects to a service or tool, or indicate the template's main feature. + + - **Description:** Enter a description for the template. For example, describe the template's main features and benefits. + + - **Icon:** Use a default BPMN symbol as the template icon in a BPMN diagram, or upload a custom icon. Supported icon formats are SVG, PNG, and JPEG, with a maximum file size limit of 8 KB. Icons must be a minimum of 512 x 512 pixels in size. + + - **Import from URL**: Enter the URL for the image you want to import, and click **Import icon**. + - **Upload file**: Drag and drop a file into the upload area, or click the link and select a file to upload. + + :::note + If you do not configure the template details at this point, a default name is generated and a default BPMN symbol selected. You can edit these template details after the template is created. + ::: + +1. If you selected the **Start from API definition** option, the **Import data source** section is shown. Select and upload an API definition. JSON and YAML file formats are supported, with a maximum file size limit of 1024 KB. + + 1. Select the format you are going to upload (OpenAPI or Postman). + 1. Upload the API definition. + + - **Import from URL**: Enter the URL for the API definition you want to import, and click **Import icon**. + - **Upload file**: Drag and drop a file into the upload area, or click the link and select a file to upload. + + 1. After the import is complete, select which actions to include in the template from the generated list of supported methods. + ![List of imported methods](./img/Imported-methods.png) + +1. Click **Create template** to create and open the newly generated Connector template in the [template editor](/components/connectors/manage-connector-templates.md). + +:::info +For more information on working with and configuring Connector templates, see [Connector templates](/components/connectors/custom-built-connectors/connector-templates.md). +::: diff --git a/docs/components/connectors/custom-built-connectors/connector-templates.md b/docs/components/connectors/custom-built-connectors/connector-templates.md index 4b1dad5a0f3..d9f3b816c46 100644 --- a/docs/components/connectors/custom-built-connectors/connector-templates.md +++ b/docs/components/connectors/custom-built-connectors/connector-templates.md @@ -7,8 +7,10 @@ description: Learn how to modify BPMN elements with Connector templates to creat import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -**Connector templates** are JSON configuration files, which customize how a BPMN element is shown, -and how it can be configured by process developers. Connector templates are a specific kind of [element template](/components/modeler/desktop-modeler/element-templates/about-templates.md). +Connectors are available [out-of-the-box (OOTB)](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) and come with [Connector templates](/components/connectors/manage-connector-templates.md) which customize how a BPMN element is shown, +and how it can be configured by process developers. Connector templates are a specific kind of [element templates](/components/modeler/desktop-modeler/element-templates/about-templates.md), which can also be used when creating custom Connectors via the [Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). + +Additionally, the [Camunda Marketplace](/components/modeler/web-modeler/camunda-marketplace.md) provides Connectors by Camunda partners and community contributors. Before developing one, you'll need to decide what you would like to achieve with your Connector. Currently, the options are: @@ -442,6 +444,566 @@ a simple JSON configuration: +## Inbound boundary event Connector templates + +You can, for example, allow the user to model and configure the following **HTTP webhook Connector** for boundary events by providing +a simple JSON configuration: + + + + + +![Webhook Inbound boundary Connector Example.png](img/custom-connector-template-inbound-boundary.png) + + + + + +```json +{ + "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", + "name": "Webhook Boundary Event Connector", + "id": "io.camunda.connectors.webhook.WebhookConnectorBoundary.v1", + "description": "Configure webhook to receive callbacks", + "documentationRef": "https://docs.camunda.io/docs/components/connectors/out-of-the-box-connectors/http-webhook/", + "version": 11, + "category": { + "id": "connectors", + "name": "Connectors" + }, + "appliesTo": ["bpmn:BoundaryEvent"], + "elementType": { + "value": "bpmn:BoundaryEvent", + "eventDefinition": "bpmn:MessageEventDefinition" + }, + "groups": [ + { + "id": "endpoint", + "label": "Webhook configuration" + }, + { + "id": "authentication", + "label": "Authentication" + }, + { + "id": "authorization", + "label": "Authorization" + }, + { + "id": "webhookResponse", + "label": "Webhook response" + }, + { + "id": "activation", + "label": "Activation" + }, + { + "id": "correlation", + "label": "Correlation", + "tooltip": "Learn more about message correlation in the documentation." + }, + { + "id": "output", + "label": "Output mapping" + } + ], + "properties": [ + { + "value": "io.camunda:webhook:1", + "binding": { + "name": "inbound.type", + "type": "zeebe:property" + }, + "type": "Hidden" + }, + { + "id": "inbound.method", + "label": "Webhook method", + "description": "Select HTTP method", + "optional": false, + "value": "any", + "group": "endpoint", + "binding": { + "name": "inbound.method", + "type": "zeebe:property" + }, + "type": "Dropdown", + "choices": [ + { + "name": "Any", + "value": "any" + }, + { + "name": "GET", + "value": "get" + }, + { + "name": "POST", + "value": "post" + }, + { + "name": "PUT", + "value": "put" + }, + { + "name": "DELETE", + "value": "delete" + } + ] + }, + { + "id": "inbound.context", + "label": "Webhook ID", + "description": "The webhook ID is a part of the URL", + "optional": false, + "constraints": { + "notEmpty": true, + "pattern": { + "value": "^[a-zA-Z0-9]+([-_][a-zA-Z0-9]+)*$", + "message": "can only contain letters, numbers, or single underscores/hyphens and cannot begin or end with an underscore/hyphen" + } + }, + "group": "endpoint", + "binding": { + "name": "inbound.context", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "inbound.shouldValidateHmac", + "label": "HMAC authentication", + "description": "Choose whether HMAC verification is enabled. See documentation and example that explains how to use HMAC-related fields", + "optional": false, + "value": "disabled", + "group": "authentication", + "binding": { + "name": "inbound.shouldValidateHmac", + "type": "zeebe:property" + }, + "type": "Dropdown", + "choices": [ + { + "name": "Enabled", + "value": "enabled" + }, + { + "name": "Disabled", + "value": "disabled" + } + ] + }, + { + "id": "inbound.hmacSecret", + "label": "HMAC secret key", + "description": "Shared secret key", + "optional": true, + "feel": "optional", + "group": "authentication", + "binding": { + "name": "inbound.hmacSecret", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.hmacHeader", + "label": "HMAC header", + "description": "Name of header attribute that will contain the HMAC value", + "optional": true, + "feel": "optional", + "group": "authentication", + "binding": { + "name": "inbound.hmacHeader", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.hmacAlgorithm", + "label": "HMAC algorithm", + "description": "Choose HMAC algorithm", + "optional": false, + "value": "sha_256", + "group": "authentication", + "binding": { + "name": "inbound.hmacAlgorithm", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "Dropdown", + "choices": [ + { + "name": "SHA-1", + "value": "sha_1" + }, + { + "name": "SHA-256", + "value": "sha_256" + }, + { + "name": "SHA-512", + "value": "sha_512" + } + ] + }, + { + "id": "inbound.hmacScopes", + "label": "HMAC scopes", + "description": "Set HMAC scopes for calculating signature data. See documentation", + "optional": true, + "feel": "required", + "group": "authentication", + "binding": { + "name": "inbound.hmacScopes", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.type", + "label": "Authorization type", + "description": "Choose the authorization type", + "value": "NONE", + "group": "authorization", + "binding": { + "name": "inbound.auth.type", + "type": "zeebe:property" + }, + "type": "Dropdown", + "choices": [ + { + "name": "None", + "value": "NONE" + }, + { + "name": "Basic", + "value": "BASIC" + }, + { + "name": "API key", + "value": "APIKEY" + }, + { + "name": "JWT", + "value": "JWT" + } + ] + }, + { + "id": "inbound.auth.username", + "label": "Username", + "description": "Username for basic authentication", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.username", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "BASIC", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.password", + "label": "Password", + "description": "Password for basic authentication", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.password", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "BASIC", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.apiKey", + "label": "API key", + "description": "Expected API key", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.apiKey", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "APIKEY", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.apiKeyLocator", + "label": "API key locator", + "description": "A FEEL expression that extracts API key from the request. See documentation", + "optional": false, + "value": "=split(request.headers.authorization, \" \")[2]", + "constraints": { + "notEmpty": true + }, + "feel": "required", + "group": "authorization", + "binding": { + "name": "inbound.auth.apiKeyLocator", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "APIKEY", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.jwt.jwkUrl", + "label": "JWK URL", + "description": "Well-known URL of JWKs", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.jwt.jwkUrl", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "JWT", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.jwt.permissionsExpression", + "label": "JWT role property expression", + "description": "Expression to extract the roles from the JWT token. See documentation", + "optional": false, + "feel": "required", + "group": "authorization", + "binding": { + "name": "inbound.auth.jwt.permissionsExpression", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "JWT", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.jwt.requiredPermissions", + "label": "Required roles", + "description": "List of roles to test JWT roles against", + "optional": false, + "feel": "required", + "group": "authorization", + "binding": { + "name": "inbound.auth.jwt.requiredPermissions", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "JWT", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.responseExpression", + "label": "Response expression", + "description": "Expression used to generate the HTTP response", + "optional": true, + "feel": "required", + "group": "webhookResponse", + "binding": { + "name": "inbound.responseExpression", + "type": "zeebe:property" + }, + "type": "Text" + }, + { + "id": "inbound.verificationExpression", + "label": "One time verification response expression", + "description": "Specify condition and response. Learn more in the documentation", + "optional": true, + "feel": "required", + "group": "webhookResponse", + "binding": { + "name": "inbound.verificationExpression", + "type": "zeebe:property" + }, + "type": "Text" + }, + { + "id": "activationCondition", + "label": "Activation condition", + "description": "Condition under which the Connector triggers. Leave empty to catch all events", + "optional": true, + "feel": "required", + "group": "activation", + "binding": { + "name": "activationCondition", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "consumeUnmatchedEvents", + "label": "Consume unmatched events", + "value": true, + "group": "activation", + "binding": { + "name": "consumeUnmatchedEvents", + "type": "zeebe:property" + }, + "tooltip": "Unmatched events are rejected by default, allowing the upstream service to handle the error. Check this box to consume unmatched events and return a success response", + "type": "Boolean" + }, + { + "id": "correlationKeyProcess", + "label": "Correlation key (process)", + "description": "Sets up the correlation key from process variables", + "constraints": { + "notEmpty": true + }, + "feel": "required", + "group": "correlation", + "binding": { + "name": "correlationKey", + "type": "bpmn:Message#zeebe:subscription#property" + }, + "type": "String" + }, + { + "id": "correlationKeyPayload", + "label": "Correlation key (payload)", + "description": "Extracts the correlation key from the incoming message payload", + "constraints": { + "notEmpty": true + }, + "feel": "required", + "group": "correlation", + "binding": { + "name": "correlationKeyExpression", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "messageIdExpression", + "label": "Message ID expression", + "description": "Expression to extract unique identifier of a message", + "optional": true, + "feel": "required", + "group": "correlation", + "binding": { + "name": "messageIdExpression", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "messageTtl", + "label": "Message TTL", + "description": "Time-to-live for the message in the broker (ISO-8601 duration)", + "optional": true, + "constraints": { + "notEmpty": false, + "pattern": { + "value": "^(PT.*|)$", + "message": "must be an ISO-8601 duration" + } + }, + "feel": "optional", + "group": "correlation", + "binding": { + "name": "messageTtl", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "messageNameUuid", + "generatedValue": { + "type": "uuid" + }, + "group": "correlation", + "binding": { + "name": "name", + "type": "bpmn:Message#property" + }, + "type": "Hidden" + }, + { + "id": "resultVariable", + "label": "Result variable", + "description": "Name of variable to store the response in", + "group": "output", + "binding": { + "name": "resultVariable", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "resultExpression", + "label": "Result expression", + "description": "Expression to map the response into process variables", + "feel": "required", + "group": "output", + "binding": { + "name": "resultExpression", + "type": "zeebe:property" + }, + "type": "Text" + } + ], + "icon": { + "contents": "data:image/svg+xml;base64,PHN2ZyBpZD0naWNvbicgeG1sbnM9J2h0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnJyB3aWR0aD0nMTgnIGhlaWdodD0nMTgnIHZpZXdCb3g9JzAgMCAzMiAzMic+CiAgPGRlZnM+CiAgICA8c3R5bGU+LmNscy0xIHsgZmlsbDogbm9uZTsgfTwvc3R5bGU+CiAgPC9kZWZzPgogIDxwYXRoCiAgICBkPSdNMjQsMjZhMywzLDAsMSwwLTIuODE2NC00SDEzdjFhNSw1LDAsMSwxLTUtNVYxNmE3LDcsMCwxLDAsNi45Mjg3LDhoNi4yNTQ5QTIuOTkxNCwyLjk5MTQsMCwwLDAsMjQsMjZaJy8+CiAgPHBhdGgKICAgIGQ9J00yNCwxNmE3LjAyNCw3LjAyNCwwLDAsMC0yLjU3LjQ4NzNsLTMuMTY1Ni01LjUzOTVhMy4wNDY5LDMuMDQ2OSwwLDEsMC0xLjczMjYuOTk4NWw0LjExODksNy4yMDg1Ljg2ODYtLjQ5NzZhNS4wMDA2LDUuMDAwNiwwLDEsMS0xLjg1MSw2Ljg0MThMMTcuOTM3LDI2LjUwMUE3LjAwMDUsNy4wMDA1LDAsMSwwLDI0LDE2WicvPgogIDxwYXRoCiAgICBkPSdNOC41MzIsMjAuMDUzN2EzLjAzLDMuMDMsMCwxLDAsMS43MzI2Ljk5ODVDMTEuNzQsMTguNDcsMTMuODYsMTQuNzYwNywxMy44OSwxNC43MDhsLjQ5NzYtLjg2ODItLjg2NzctLjQ5N2E1LDUsMCwxLDEsNi44MTItMS44NDM4bDEuNzMxNSwxLjAwMmE3LjAwMDgsNy4wMDA4LDAsMSwwLTEwLjM0NjIsMi4wMzU2Yy0uNDU3Ljc0MjctMS4xMDIxLDEuODcxNi0yLjA3MzcsMy41NzI4WicvPgogIDxyZWN0IGlkPSdfVHJhbnNwYXJlbnRfUmVjdGFuZ2xlXycgZGF0YS1uYW1lPScmbHQ7VHJhbnNwYXJlbnQgUmVjdGFuZ2xlJmd0OycgY2xhc3M9J2Nscy0xJwogICAgd2lkdGg9JzMyJyBoZWlnaHQ9JzMyJy8+Cjwvc3ZnPg==" + } +} +``` + + + + + ## Outbound Connector templates You can, for example, allow the user to model and configure the following **REST Connector** by providing a JSON configuration for a service task: diff --git a/docs/components/connectors/custom-built-connectors/img/Imported-methods.png b/docs/components/connectors/custom-built-connectors/img/Imported-methods.png new file mode 100644 index 00000000000..5c3ce9071bb Binary files /dev/null and b/docs/components/connectors/custom-built-connectors/img/Imported-methods.png differ diff --git a/docs/components/connectors/custom-built-connectors/img/Launch-template-generator.png b/docs/components/connectors/custom-built-connectors/img/Launch-template-generator.png new file mode 100644 index 00000000000..2cf3fcb582b Binary files /dev/null and b/docs/components/connectors/custom-built-connectors/img/Launch-template-generator.png differ diff --git a/docs/components/connectors/custom-built-connectors/img/configure-template-details.png b/docs/components/connectors/custom-built-connectors/img/configure-template-details.png new file mode 100644 index 00000000000..27012b099e4 Binary files /dev/null and b/docs/components/connectors/custom-built-connectors/img/configure-template-details.png differ diff --git a/docs/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-boundary.png b/docs/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-boundary.png new file mode 100644 index 00000000000..933dd0df8d5 Binary files /dev/null and b/docs/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-boundary.png differ diff --git a/docs/components/connectors/custom-built-connectors/update-guide/010-to-020.md b/docs/components/connectors/custom-built-connectors/update-guide/010-to-020.md deleted file mode 100644 index e71a7316283..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/010-to-020.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -id: 010-to-020 -title: Update 0.1 to 0.2 -description: "Review which adjustments must be made to migrate from Connector SDK 0.1.x to 0.2.0." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.1.x to 0.2.0. - -:::caution - -Be aware that the update from 0.1 to 0.2 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.2.0, we introduce the following structural changes: - -- Input validation and secret replacement move from writing imperative code to declaratively using annotations. -- The outbound aspect of APIs is more explicit. Classes have been moved to more explicit packages and have been renamed. -- New required annotation for outbound Connectors. - -### Declarative validation and secrets - -Input objects previously had to implement the `ConnectorInput` interface to participate in validation and secret replacement -initiated from the `ConnectorContext` using its `validate` and `replaceSecrets` methods respectively. - -With version 0.2.0, we remove the imperative approach for validation and secret replacement from the SDK. -Instead, you can use annotations to describe the constraints of input attributes and mark those that can contain -secrets. - -These are two input objects written with the SDK version 0.1.x: - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class MyConnectorRequest implements ConnectorInput { - - private String message; - private Authentication authentication; - - @Override - public void validateWith(final Validator validator) { - validator.require(message, "message"); - validator.require(authentication, "authentication"); - validateIfNotNull(authentication, validator); - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - replaceSecretsIfNotNull(authentication, secretStore); - } -} -``` - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class Authentication implements ConnectorInput { - - private String user; - private String token; - - @Override - public void validateWith(final Validator validator) { - validator.require(user, "user"); - validator.require(token, "token"); - if (token != null && !(token.startsWith("xobx") || token.startsWith("secrets."))) { - validator.addErrorMessage("Token must start with \"xobx\" or be a secret"); - } - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - token = secretStore.replaceSecret(token); - } -} -``` - -You can express the same input objects with SDK version 0.2.0 as follows: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty - private String message; - - @NotNull - @Valid - @Secret - private Authentication authentication; -} -``` - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.Pattern; - -public class Authentication { - - @NotEmpty - private String user; - - @NotEmpty - @Pattern("^(xobx-|secret).+") - @Secret - private String token; -} -``` - -As a result, you have to remove the `ConnectorInput` interface implementation and the imperative code that comes with `validateWith` -and `replaceSecrets`. You can now concisely describe the constraints of attributes rather than express them in imperative code. - -To use annotaion-based validation out of the box, you can include the new artifact `connector-validation` that -comes with the SDK. - - - - - -```xml - - io.camunda.connector - connector-validation - 0.2.0 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.2.0' -``` - - - - -You can read more about validation and secret replacement in our -[SDK guide](/components/connectors/custom-built-connectors/connector-sdk.md). - -### Explicit outbound aspect - -With version 0.2.0 of the SDK, we make the outbound aspect of those components specific to outbound connectivity -more visible. This separates those SDK components that are tightly coupled to outbound from those that -will be reusable for inbound. - -With this change, the names of the following classes need to be adjusted: - -- Rename `io.camunda.connector.api.ConnectorContext` to `io.camunda.connector.api.outbound.OutboundConnectorContext`. -- Rename `io.camunda.connector.api.ConnectorFunction` to `io.camunda.connector.api.outbound.OutboundConnectorFunction`. -- Rename `io.camunda.connector.api.SecretProvider` to `io.camunda.connector.api.secret.SecretProvider`. -- Rename `io.camunda.connector.api.SecretStore` to `io.camunda.connector.api.secret.SecretStore`. -- Rename `io.camunda.connector.test.ConnectorContextBuilder` to `io.camunda.connector.test.outbound.OutboundConnectorContextBuilder`. - -As a result, you must replace all occurrences of the old class names and imports with the new ones. This includes the -SPI for the Connector function itself. Therefore, rename the file `META-INF/services/io.camunda.connector.api.ConnectorFunction` to -`META-INF/services/io.camunda.connector.api.outbound.OutboundConnectorFunction`. - -### `@OutboundConnector` annotation - -For best interoperability, Connectors provide default meta-data (`name`, `type`, `inputVariables`) via the `@OutboundConnector` annotation: - -```java -@OutboundConnector( - name = "PING", - inputVariables = {"caller"}, - type = "io.camunda.example.PingConnector:1" -) -public class PingConnector implements OutboundConnectorFunction { - ... -} -``` - -## Connector runtime environment - -If using the -[pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that comes with the SDK does not fit your use case, you can create a custom runtime environment. - -With version 0.2.0 of the [job worker runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#connector-job-handler), you need to make the following changes: - -- Rename `io.camunda.connector.runtime.jobworker.ConnectorJobHandler` to `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler`. -- Rename Connector-related env variables from `ZEEBE_` to `CONNECTOR_`. Zeebe configuration properties remain unchanged. - -As a general change in behavior the module will now pick up Connectors from classpath unless it is explicitly configured via environment variables. - -Also, take the name changes in the [SDK core](#explicit-outbound-aspect) into account. - -Implementing your own Connector wrapper you need to provide a Connector context specific to -your environment. Consider extending the `io.camunda.connector.impl.outbound.AbstractConnectorContext` -instead of implementing the `io.camunda.connector.api.ConnectorContext` yourself. Most of the commonly needed functionality -is already provided in there. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md b/docs/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md deleted file mode 100644 index 61142a93750..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 0100-to-0110 -title: Update 0.10 to 0.11 -description: "Review which adjustments must be made to migrate from Connector SDK 0.10.x to 0.11.0." ---- - -Beginner - -:::note -Migrate directly to version 0.11.2 of the SDK. This contains a fix for several issues in the 0.11.0 release. -::: - -This SDK release is not backwards-compatible. We are moving towards a stable Connectors release and continue to improve the experience of developing custom Connectors. - -In this SDK version, we changed the `OutboundConnectorContext` and `InboundConnectorContext interfaces significantly.` You can no longer use the `getVariablesAsType` or `getPropertiesAsType` methods in outbound and inbound Connectors, respectively. -Use the new `bindVariables` method instead, as it takes care of secret replacement, payload validation, and deserialization automatically. - -We are moving away from a mandatory `@Secret` annotation. -From this release onwards, secrets are automatically replaced in all input variables/properties without the need to explicitly declare an annotation. - -To migrate your Connector implementations, complete the following: - -1. If you used the `OutboundConnectorContext::getVariablesAsType` method in you outbound Connector functions, replace it with `OutboundConnectorContext::bindVariables`. -2. If you used the `InboundConnectorContext::getPropertiesAsType` method in you inbound Connector executables, replace it with `InboundConnectorContext::bindProperties`. -3. Remove calls to `OutboundConnectorContext::replaceSecrets` and `InboundConnectorContext::replaceSecrets` methods. The secrets are now replaced automatically. -4. Remove calls to `OutboundConnectorContext::validate` and `InboundConnectorContext::validate` methods. The validation is now performed automatically. -5. If you used the `@Secret` annotation in your Connector implementations, you can safely remove it as it has no effect. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/020-to-030.md b/docs/components/connectors/custom-built-connectors/update-guide/020-to-030.md deleted file mode 100644 index 248d7e7cc31..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/020-to-030.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: 020-to-030 -title: Update 0.2 to 0.3 -description: "Review which adjustments must be made to migrate from Connector SDK 0.2.x to 0.3.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.2.x to 0.3.0. - -:::caution - -Be aware that the update from 0.2 to 0.3 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.3.0, we introduce the following structural changes: - -- Input validation moves from Jakarta Bean Validation API version 3.0 to 2.0. -- SDK artifacts have to be in scope `provided`. - -### Update to Validation API 2.0 - -To better integrate in the current Java ecosystem and widely used frameworks like Spring 5 and Spring Boot 2, the `connector-validation` module -now operates on Jakarta Bean Validation API version 2.0 instead of version 3.0. Adjust your Connector input objects using validation as follows: - -Replace all class imports starting with `jakarta.validation` by `javax.validation`. A Connector input class on SDK 0.2.x with the following imports: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -changes to the following: - -```java -import io.camunda.connector.api.annotation.Secret; -import javax.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -This way, the Connector runtime environments are able to pick up your validations correctly. - -### Provided SDK artifacts - -The Connector runtime environments can execute multiple Connectors at once. The environments also provide the base SDK artifacts and their classes -to any Connector they execute. This comprises runtime-specific classes related to the Connector context as well as the Connector core and the validation -classes. To minimize the possibility of incompatible classes being on the same classpath, Connectors are required to depend on `connector-core` and -`connector-validation` in Maven's dependency scope `provided`. Other dependency management frameworks like Gradle offer similar scopes. - -As a result, you need to include the SDK artifacts as follows in Maven: - -```xml - - io.camunda.connector - connector-core - provided - - - io.camunda.connector - connector-validation - provided - -``` - -## Connector runtime environment - -The SDK provides a [pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that you can start manually. With version 0.3.0, this runtime moves from the [SDK repository](https://github.com/camunda/connector-sdk/tree/stable/0.2/runtime-job-worker) -to [Connector Runtime](https://github.com/camunda/connectors/blob/main/connector-runtime/README.md). This also means that the provided runtime now is -a Spring Boot application, based on Spring Zeebe. Thus, it offers all out-of-the-box capabilities Spring Zeebe provides. - -The Connector runtime JAR for manual installation can now be fetched from https://repo1.maven.org/maven2/io/camunda/spring-zeebe-connector-runtime/ -(starting with version `8.1.3`) instead of https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-job-worker/. You can start the runtime -environment with the following command: - -```bash -java -cp 'spring-zeebe-connector-runtime-VERSION-with-dependencies.jar:connector-http-json-VERSION-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` - -The Docker image is still accessible at https://hub.docker.com/r/camunda/connectors/tags. - -### Custom runtime environments - -If you are building a custom runtime environment, note the following adjustments: - -- The `runtime-util` artifact replaces the `runtime-job-worker` artifact. -- The `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` has moved to `import io.camunda.connector.runtime.util.outbound.ConnectorJobHandler`. -- The `io.camunda.connector.impl.outbound.AbstractOutboundConnectorContext` has moved to `io.camunda.connector.impl.context.AbstractConnectorContext`. -- To build your own context class, we recommend using the following signature: - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext {} -``` - -- The `SecretStore` class has been removed. Initialize your context class with a `super(SecretProvider)` call. Remove the `getSecretStore` method if you used it. - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext { - - public MyContext(final SecretProvider provider) { - super(provider); - ... - } -} -``` diff --git a/docs/components/connectors/custom-built-connectors/update-guide/030-to-040.md b/docs/components/connectors/custom-built-connectors/update-guide/030-to-040.md deleted file mode 100644 index 43916603f22..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/030-to-040.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 030-to-040 -title: Update 0.3 to 0.4 -description: "Review which adjustments must be made to migrate from Connector SDK 0.3.x to 0.4.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.3.x to 0.4.0. - -:::caution - -Be aware that the update from 0.3 to 0.4 requires manual migration steps as described below. - -::: - -With SDK version 0.4.0, we introduce many basic structural changes: - -- Switching default Connector Runtime to Spring Boot/Spring Zeebe for outbound Connectors. -- Introducing webhook inbound Connector. -- Moved out-of-the-box connectors to mono-repo at https://github.com/camunda/connectors-bundle/tree/main/connectors to ease dependency management and conflict resolution. -- Build Connector bundle artifact and Docker image by Maven as default (done by adding various fat jars to one Docker image). -- Adding GCP Secret Provider used in Camunda SaaS. - -### Inbound webhook - -Spring Zeebe runtime with version `0.4.0` SDK introduces support of inbound webhook capabilities. -See the [list of available inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). - -To function properly, Spring Zeebe runtime requires connection to [Operate API](/apis-tools/operate-api/overview.md). Read more on [how to connect to Operate or disable it completely](/self-managed/connectors-deployment/connectors-configuration.md#local-installation). - -### What happens if I don't properly configure connection to Operate API? - -If you don't configure properly connection to Operate API, it will be not possible to poll process definitions from Operate. Therefore, the webhook functionality won't work. -Additionally, you may observe exception spam in your log file every 5 seconds complaining of inability to connect to Operate. -Overall, this is not critical and given there are no other issues, the Connector runtime will function properly. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/040-to-050.md b/docs/components/connectors/custom-built-connectors/update-guide/040-to-050.md deleted file mode 100644 index 637cabc8899..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/040-to-050.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: 040-to-050 -title: Update 0.4 to 0.5 -description: "Review which adjustments must be made to migrate from Connector SDK 0.4.x to 0.5.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.4.x to 0.5.0. - -With SDK version 0.5.0, we introduced minor changes: - -- Removing Spring Zeebe dependency management -- Managing the GCP Secret Provider module version diff --git a/docs/components/connectors/custom-built-connectors/update-guide/050-to-060.md b/docs/components/connectors/custom-built-connectors/update-guide/050-to-060.md deleted file mode 100644 index 46124442521..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/050-to-060.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: 050-to-060 -title: Update 0.5 to 0.6 -description: "Review which adjustments must be made to migrate from Connector SDK 0.5.x to 0.6.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.5.x to 0.6.0. - -With SDK version 0.6.0, we introduced the following changes: - -- Replacing secrets in parent classes -- Supporting intermediate inbound events -- Defining interfaces for inbound Connectors -- Fixing failing datetime serialization diff --git a/docs/components/connectors/custom-built-connectors/update-guide/060-to-070.md b/docs/components/connectors/custom-built-connectors/update-guide/060-to-070.md deleted file mode 100644 index bc84e1e1940..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/060-to-070.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: 060-to-070 -title: Update 0.6 to 0.7 -description: "Review which adjustments must be made to migrate from Connector SDK 0.6.x to 0.7.0." ---- - -Beginner - -Beginner - -With the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), we made -breaking changes to the inbound Connectors. Update -[HTTP Webhook](https://github.com/camunda/connectors/tree/main/connectors/webhook/element-templates) -and [GitHub Webhook](https://github.com/camunda/connectors/tree/main/connectors/github/element-templates) -element templates to the latest versions. - -If you have used inbound webhook Connectors with Connector Runtime 0.6.x, you need to **manually** -apply the new element template version to your diagrams: - -1. Download the new element template from the [GitHub release page](https://github.com/camunda/connectors-bundle/releases/tag/0.17.0). -2. Follow the [installation guide](/components/modeler/desktop-modeler/element-templates/configuring-templates.md) to reinstall the element template. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/070-to-080.md b/docs/components/connectors/custom-built-connectors/update-guide/070-to-080.md deleted file mode 100644 index 1145b3450fb..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/070-to-080.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 070-to-080 -title: Update 0.7 to 0.8 -description: "Review which adjustments must be made to migrate from Connector SDK 0.7.x to 0.8.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.7.x to 0.8.0. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/080-to-090.md b/docs/components/connectors/custom-built-connectors/update-guide/080-to-090.md deleted file mode 100644 index 51055c0aefc..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/080-to-090.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 080-to-090 -title: Update 0.8 to 0.9 -description: "Review which adjustments must be made to migrate from Connector SDK 0.8.x to 0.9.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.8.x to 0.9.0. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/090-to-0100.md b/docs/components/connectors/custom-built-connectors/update-guide/090-to-0100.md deleted file mode 100644 index 1e6172bb692..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/090-to-0100.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 090-to-0100 -title: Update 0.9 to 0.10 -description: "Review which adjustments must be made to migrate from Connector SDK 0.9.x to 0.10.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.9.x to 0.10.0. diff --git a/docs/components/connectors/custom-built-connectors/update-guide/introduction.md b/docs/components/connectors/custom-built-connectors/update-guide/introduction.md deleted file mode 100644 index 034d80e7388..00000000000 --- a/docs/components/connectors/custom-built-connectors/update-guide/introduction.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: introduction -title: Connector SDK updates ---- - -These documents guide you through the process of updating your Camunda 8 -Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). - -There is a dedicated update guide for each version: - -### [Connector SDK 0.10.x to 0.11](../0100-to-0110) - -Update from 0.10.x to 0.11.2 - -### [Connector SDK 0.9 to 0.10](../090-to-0100) - -Update from 0.9.x to 0.10.0 - -### [Connector SDK 0.8 to 0.9](../080-to-090) - -Update from 0.8.x to 0.9.0 - -### [Connector SDK 0.7 to 0.8](../070-to-080) - -Update from 0.7.x to 0.8.0 - -### [Connector SDK 0.6 to 0.7](../060-to-070) - -Update from 0.6.x to 0.7.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.7.0) - -### [Connector SDK 0.5 to 0.6](../050-to-060) - -Update from 0.5.x to 0.6.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.6.0) - -### [Connector SDK 0.4 to 0.5](../040-to-050) - -Update from 0.4.x to 0.5.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.5.0) - -### [Connector SDK 0.3 to 0.4](../030-to-040) - -Update from 0.3.x to 0.4.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.4.0) - -### [Connector SDK 0.2 to 0.3](../020-to-030) - -Update from 0.2.x to 0.3.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.3.0) - -### [Connector SDK 0.1 to 0.2](../010-to-020) - -Update from 0.1.x to 0.2.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.2.0) diff --git a/docs/components/connectors/img/connector-templates/create-connector-template-1.png b/docs/components/connectors/img/connector-templates/create-connector-template-1.png index 9ea40b01675..fbf95aa1be2 100644 Binary files a/docs/components/connectors/img/connector-templates/create-connector-template-1.png and b/docs/components/connectors/img/connector-templates/create-connector-template-1.png differ diff --git a/docs/components/connectors/img/connector-templates/create-connector-template-3.png b/docs/components/connectors/img/connector-templates/create-connector-template-3.png index 6e9cefb4615..a0ec13ff48f 100644 Binary files a/docs/components/connectors/img/connector-templates/create-connector-template-3.png and b/docs/components/connectors/img/connector-templates/create-connector-template-3.png differ diff --git a/docs/components/connectors/img/connector-templates/edit-connector-template-1.png b/docs/components/connectors/img/connector-templates/edit-connector-template-1.png index 47953c0fbce..a5dd1e313a5 100644 Binary files a/docs/components/connectors/img/connector-templates/edit-connector-template-1.png and b/docs/components/connectors/img/connector-templates/edit-connector-template-1.png differ diff --git a/docs/components/connectors/img/connector-templates/fix-connector-template-problems-2.png b/docs/components/connectors/img/connector-templates/fix-connector-template-problems-2.png index 92158564df1..a52b92e1528 100644 Binary files a/docs/components/connectors/img/connector-templates/fix-connector-template-problems-2.png and b/docs/components/connectors/img/connector-templates/fix-connector-template-problems-2.png differ diff --git a/docs/components/connectors/img/connector-templates/fix-connector-template-problems.png b/docs/components/connectors/img/connector-templates/fix-connector-template-problems.png index 341de0b0184..2a66c9d8b7d 100644 Binary files a/docs/components/connectors/img/connector-templates/fix-connector-template-problems.png and b/docs/components/connectors/img/connector-templates/fix-connector-template-problems.png differ diff --git a/docs/components/connectors/img/use-connectors-error-general.png b/docs/components/connectors/img/use-connectors-error-general.png index 11e33cfc2b6..b095efcf94d 100644 Binary files a/docs/components/connectors/img/use-connectors-error-general.png and b/docs/components/connectors/img/use-connectors-error-general.png differ diff --git a/docs/components/connectors/manage-connector-templates.md b/docs/components/connectors/manage-connector-templates.md index efa7120409c..f175b4d1103 100644 --- a/docs/components/connectors/manage-connector-templates.md +++ b/docs/components/connectors/manage-connector-templates.md @@ -14,7 +14,7 @@ You can create and manage [Connector templates](/components/connectors/custom-bu Take the following steps to create a new Connector template: -1. Navigate to the project of your choice and click **New**. +1. Navigate to the project of your choice in Web Modeler and click **New**. 2. Click **Connector Template**. @@ -27,7 +27,7 @@ Take the following steps to create a new Connector template: The components of the editor interface are as follows: - In the **breadcrumbs bar** at the top of the screen, you can rename your template by clicking the chevron next to the template name. Note that you cannot change the name of your template in the template JSON, but only with this action. - - On the left, you observe the **template JSON editor**. Here, you define the actual template descriptor. The descriptor follows the [most recent element template schema](https://github.com/camunda/element-templates-json-schema). The editor is divided into two sections: a read-only section, containing the schema reference, the template name, the template id, and an editable section, where you can [define your template descriptor](/components/modeler/desktop-modeler/element-templates/defining-templates.md). + - On the left, you observe the **template JSON editor**. Here, you define the actual template descriptor. The descriptor follows the [most recent element template schema](https://github.com/camunda/element-templates-json-schema). The editor is divided into two sections: a read-only section, containing the schema reference, the template name, the template ID, and an editable section, where you can [define your template descriptor](/components/modeler/desktop-modeler/element-templates/defining-templates.md). - On the right, you observe the live **Visual Preview**. The live preview shows how the properties panel will look when you apply the template to an element. It automatically updates on every valid change, and reflects the latest valid state of the template. The preview allows you to interactively check your template before publishing, enhancing its usability. - In the upper left, you can **Upload an icon** for your template. You can upload any image file you want, however we recommend to use squared SVG graphics. The icons get rendered 18x18 pixels in the element on the modeling canvas, and 32x32 pixels in the properties panel. diff --git a/docs/components/connectors/out-of-the-box-connectors/amazon-comprehend.md b/docs/components/connectors/out-of-the-box-connectors/amazon-comprehend.md index a1e44783cba..2860bf983c3 100644 --- a/docs/components/connectors/out-of-the-box-connectors/amazon-comprehend.md +++ b/docs/components/connectors/out-of-the-box-connectors/amazon-comprehend.md @@ -74,27 +74,23 @@ For more details on the fields that can be configured during asynchronous execut ### Sync execution fields - **Text (mandatory)**: The document text to be analyzed. -- **Document read action**: This field defines the Amazon Textract API operation that Amazon Comprehend uses to extract text from PDF files and image files. For more details, refer to [document read action](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-DocumentReadAction). -- **Document read mode**: Determines the text extraction actions for PDF files. For more details, refer to [document read mode](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-DocumentReadMode). -- **Analyze tables**: Returns additional information about any tables that are detected in the input document. For more details, refer to [feature types](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-FeatureTypes). -- **Analyze forms**: Returns additional information about any forms that are detected in the input document. For more details, refer to [feature types](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-FeatureTypes). - **Endpoint Arn (mandatory)**: The Amazon Resource Number (ARN) of the endpoint. For more details, refer to [Classify Document](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ClassifyDocument.html#API_ClassifyDocument_RequestSyntax). ### Async execution fields -- **Document read action**: This field defines the Amazon Textract API operation that Amazon Comprehend uses to extract text from PDF files and image files. For more details, refer to [document read action](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-DocumentReadAction). - **Document read mode**: Determines the text extraction actions for PDF files. For more details, refer to [document read mode](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-DocumentReadMode). +- **Document read action**: This field defines the Amazon Textract API operation that Amazon Comprehend uses to extract text from PDF files and image files. For more details, refer to [document read action](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-DocumentReadAction). - **Analyze tables**: Returns additional information about any tables that are detected in the input document. For more details, refer to [feature types](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-FeatureTypes). - **Analyze forms**: Returns additional information about any forms that are detected in the input document. For more details, refer to [feature types](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_DocumentReaderConfig.html#comprehend-Type-DocumentReaderConfig-FeatureTypes). -- **Input S3 URI (mandatory)**: The Amazon S3 URI for the input data. For more details, refer to [S3 URI](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_InputDataConfig.html#comprehend-Type-InputDataConfig-S3Uri). -- **Input Format**: Specifies how the text in an input file should be processed. For more details, refer to [InputFormat](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_InputDataConfig.html#comprehend-Type-InputDataConfig-InputFormat). +- **Inputs' S3 URI (mandatory)**: The Amazon S3 URI for the input data. For more details, refer to [S3 URI](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_InputDataConfig.html#comprehend-Type-InputDataConfig-S3Uri). +- **Input file processing mode**: Specifies how the text in an input file should be processed. For more details, refer to [InputFormat](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_InputDataConfig.html#comprehend-Type-InputDataConfig-InputFormat). - **Client request token**: A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one. -- **Data access role ARN (mandatory)**: The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. -- **Document classifier ARN**: The Amazon Resource Name (ARN) of the document classifier to use to process the job. -- **Flywheel ARN**: The Amazon Resource Number (ARN) of the flywheel associated with the model to use. +- **Data Access Role's ARN (mandatory)**: The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. +- **Document Classifier's ARN**: The Amazon Resource Name (ARN) of the document classifier to use to process the job. +- **Flywheel's ARN**: The Amazon Resource Number (ARN) of the flywheel associated with the model to use. - **Job name**: The identifier of the job. -- **Output S3 URI (mandatory)**: The Amazon S3 location where you want to write the output data. For more details, refer to [output data config](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_OutputDataConfig.html). -- **Output Kms Key Id**: The ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. For more details, refer to [output data config](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_OutputDataConfig.html). +- **Output's S3 URI (mandatory)**: The Amazon S3 location where you want to write the output data. For more details, refer to [output data config](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_OutputDataConfig.html). +- **Outputs KMS Key Id**: The ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. For more details, refer to [output data config](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_OutputDataConfig.html). - **Tags**: Tags to associate with the document classification job. A tag is a key-value pair that adds metadata to a resource used by Amazon Comprehend. **Example:** @@ -102,8 +98,8 @@ For more details on the fields that can be configured during asynchronous execut = {"status": "active"} ``` -- **Volume Kms Key Id**: ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. -- **Security group ids**: The ID number for a security group on an instance of your private VPC. For more details, refer to [security group](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_VpcConfig.html#comprehend-Type-VpcConfig-SecurityGroupIds). +- **VolumeKmsKeyId**: ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. +- **Security group Ids**: The ID number for a security group on an instance of your private VPC. For more details, refer to [security group](https://docs.aws.amazon.com/comprehend/latest/APIReference/API_VpcConfig.html#comprehend-Type-VpcConfig-SecurityGroupIds). **Example:** ```feel diff --git a/docs/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md b/docs/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md index d02a79c4f93..37cd2ca1002 100644 --- a/docs/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md +++ b/docs/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md @@ -284,7 +284,7 @@ The **Result Expression** allows you to access specific attributes from the resp } ``` -In this example, we are using the **Result Expression** to extract the **ID** and **price** attributes from the response variable and assign them to the id and price process variables, respectively. You can then use these variables in subsequent steps of your process. +In this example, we are using the **Result Expression** to extract the **ID** and **price** attributes from the response variable and assign them to the ID and price process variables, respectively. You can then use these variables in subsequent steps of your process. :::note The syntax for accessing attributes in the **Result Expression** may vary depending on the structure of your response object. You can refer to the [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md) documentation for more information on how to use the **Result Expression**. diff --git a/docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md b/docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md index 5ff312afc75..a374ce7f305 100644 --- a/docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md +++ b/docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md @@ -105,10 +105,6 @@ There are two options to authenticate the Connector with AWS: The **Amazon EventBridge Webhook Connector** is an inbound Connector enabling you to start a BPMN process instance triggered by an event from [Amazon EventBridge](https://aws.amazon.com/eventbridge/). -:::note -If you have used the **Amazon EventBridge Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an Amazon EventBridge Webhook Connector task 1. Start building your BPMN diagram. You can use the **Amazon EventBridge Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/docs/components/connectors/out-of-the-box-connectors/available-connectors-overview.md b/docs/components/connectors/out-of-the-box-connectors/available-connectors-overview.md index e16bbb360d8..cc14649f27b 100644 --- a/docs/components/connectors/out-of-the-box-connectors/available-connectors-overview.md +++ b/docs/components/connectors/out-of-the-box-connectors/available-connectors-overview.md @@ -13,3 +13,66 @@ Explore our library of prebuilt Camunda Connectors for SaaS and [Self-Managed](/ :::tip Don't see the Connector you need? Build your own [custom Connector](/components/connectors/custom-built-connectors/build-connector.md), or explore the [Camunda MarketPlace](https://marketplace.camunda.com/) for more Connectors developed by Camunda, Partners, and the Community. ::: +======= +Beginners to Connectors may want to get familiar with Connectors using a [guide to configuring out-of-the-box Connectors](/guides/configuring-out-of-the-box-connector.md). + +## Outbound Connectors + +- [Amazon Bedrock Connector](/components/connectors/out-of-the-box-connectors/amazon-bedrock.md) - Interact with [Amazon Bedrock](https://aws.amazon.com/bedrock/) from your BPMN process to experiment with and evaluate foundation models (FMs) from leading AI companies. +- [Amazon Comprehend Connector](/components/connectors/out-of-the-box-connectors/amazon-comprehend.md) - Interact with the [Amazon Comprehend service](https://aws.amazon.com/comprehend/) from your BPMN process. +- [Amazon DynamoDB Connector](/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md) - Interact with [Amazon DynamoDB NoSQL database service](https://aws.amazon.com/dynamodb/) within your BPMN process, enabling you to store and retrieve data from tables, as well as perform queries and scans. +- [Amazon EventBridge Service Connector](/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md) - Send events using [Amazon EventBridge service](https://aws.amazon.com/eventbridge/) within your BPMN process. +- [Amazon SageMaker Connector](/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md) - Interact with the [Amazon SageMaker service](https://aws.amazon.com/sagemaker/) from your BPMN process. +- [Amazon SNS Outbound Connector](/components/connectors/out-of-the-box-connectors/amazon-sns.md) - Send messages to [Amazon Simple Notification Service](https://aws.amazon.com/sns/) from your BPMN process. +- [Amazon SQS Connector](/components/connectors/out-of-the-box-connectors/amazon-sqs.md) - Send messages to [Amazon Simple Queue Service](https://aws.amazon.com/sqs/) from your BPMN process. +- [Amazon Textract Connector](components/connectors/out-of-the-box-connectors/amazon-textract.md) - Interact with the [Amazon Textract Service](https://aws.amazon.com/textract/) from your BPMN process. +- [Asana Connector](/components/connectors/out-of-the-box-connectors/asana.md) - Manage [Asana](https://asana.com/) projects and tasks from your BPMN process. +- [Automation Anywhere Connector](/components/connectors/out-of-the-box-connectors/automation-anywhere.md) - Orchestrate your [Automation Anywhere](https://www.automationanywhere.com/) queue from your BPMN process. +- [AWS Lambda Connector](/components/connectors/out-of-the-box-connectors/aws-lambda.md) - Invoke [AWS Lambda Functions](https://aws.amazon.com/lambda/) from your BPMN process. +- [Azure OpenAI](/components/connectors/out-of-the-box-connectors/azure-open-ai.md) - Interact with [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) from your BPMN process. +- [Blue Prism](/components/connectors/out-of-the-box-connectors/blueprism.md) - Orchestrate your [Blue Prism](https://www.blueprism.com/) queue items from your BPMN process. +- [Camunda Operate Connector](/components/connectors/out-of-the-box-connectors/operate.md) - Fetch process execution data from [Camunda Operate](https://camunda.com/platform/operate/). +- [Easy Post Connector](/components/connectors/out-of-the-box-connectors/aws-lambda.md) - Create addresses, parcels, and shipments, as well as purchase and verify shipments with [EasyPost](https://www.easypost.com/) from your BPMN process. +- [GitHub Connector](/components/connectors/out-of-the-box-connectors/github.md) - Manage [GitHub](https://github.com/) issues and releases from your BPMN process. +- [GitLab Connector](/components/connectors/out-of-the-box-connectors/gitlab.md) - Manage [GitLab](https://about.gitlab.com/) issues and releases from your BPMN process. +- [Google Drive Connector](/components/connectors/out-of-the-box-connectors/googledrive.md) - Create folders or files from a [Google Drive](https://www.google.com/drive/) template from your BPMN process. +- [Google Maps Platform Connector](/components/connectors/out-of-the-box-connectors/google-maps-platform.md) - Validate addresses, retrieve postal addresses, and calculate distances with [Google Maps Platform Service](https://mapsplatform.google.com/) from your BPMN process +- [Google Sheets Connector](/components/connectors/out-of-the-box-connectors/google-sheets.md) - Allows you to work with an existing or new empty spreadsheet on [Google Drive](https://drive.google.com/) from your BPMN process. +- [Google Gemini Connector](/components/connectors/out-of-the-box-connectors/google-gemini.md) - Allows you to work with an existing or new empty spreadsheet on [Google Drive](https://drive.google.com/) from your BPMN process. +- [Hugging Face Connector](/components/connectors/out-of-the-box-connectors/hugging-face.md) - Interact with [Hugging Face](https://huggingface.co/) models from your BPMN process. +- [Kafka Producer Connector](/components/connectors/out-of-the-box-connectors/kafka.md) - Produce messages to [Kafka](https://kafka.apache.org/) from your BPMN process. +- [Microsoft Teams Connector](/components/connectors/out-of-the-box-connectors/microsoft-teams.md) - Interactions with [Microsoft Teams](https://www.microsoft.com/microsoft-teams/) from your BPMN process. +- [Microsoft 365 Connector](/components/connectors/out-of-the-box-connectors/microsoft-o365-mail.md) - Interactions with [Microsoft 365](https://outlook.office.com/mail/) mail from your BPMN process. +- [OpenAI Connector](/components/connectors/out-of-the-box-connectors/openai.md) - Interact with [ChatGPT](https://chat.openai.com/) and [OpenAI Moderation API](https://platform.openai.com/docs/guides/moderation/overview). +- [RabbitMQ Producer Connector](/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md) - Send messages to [RabbitMQ](https://www.rabbitmq.com/) from your BPMN process. +- [Salesforce Connector](/components/connectors/out-of-the-box-connectors/salesforce.md) - Manage your Salesforce instance from your BPMN process. +- [SendGrid Connector](/components/connectors/out-of-the-box-connectors/sendgrid.md) - Quickly send emails from your BPMN processes. +- [Slack outbound Connector](/components/connectors/out-of-the-box-connectors/slack.md) - Send messages to channels or users in your [Slack](https://slack.com) workspace from your BPMN process. +- [SQL Connector](/components/connectors/out-of-the-box-connectors/sql.md) - Connect your BPMN process with SQL databases (Microsoft SQL Server, PostgreSQL, MySQL). +- [Twilio Connector](/components/connectors/out-of-the-box-connectors/twilio.md) - Send and get SMS messages with [Twilio](https://www.twilio.com) service from your BPMN process. +- [UiPath Connector](/components/connectors/out-of-the-box-connectors/uipath.md) - Orchestrate your [UiPath](https://cloud.uipath.com) Bots with Camunda. +- [WhatsApp Connector](/components/connectors/out-of-the-box-connectors/whatsapp.md) - Send messages with [WhatsApp Business](https://business.whatsapp.com/) from your BPMN process. + +## Inbound Connectors + +- [Amazon EventBridge Webhook Connector](/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md) - Start a BPMN process instance triggered by an [Amazon EventBridge service event](https://aws.amazon.com/eventbridge/). +- [Amazon SNS inbound Connector](/components/connectors/out-of-the-box-connectors/amazon-sns.md) - Trigger your BPMN process with an [Amazon Simple Notification Service](https://aws.amazon.com/sns/) notification via HTTPS. +- [Amazon SQS Connector](/components/connectors/out-of-the-box-connectors/amazon-sqs.md) - Receive messages from [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) in your BPMN process. +- [GitHub Webhook Connector](/components/connectors/out-of-the-box-connectors/github.md) - Start a process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). +- [Kafka Consumer Connector](/components/connectors/out-of-the-box-connectors/kafka.md) - Consume messages from [Kafka](https://kafka.apache.org/) from your BPMN process. +- [RabbitMQ Consumer Connector](/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md) - Receive messages from [RabbitMQ](https://www.rabbitmq.com/) in your BPMN process. +- [Slack inbound Connector](/components/connectors/out-of-the-box-connectors/slack.md) - Trigger a [Slack](https://slack.com) bot to start a BPMN process with an event or a slash command +- [Twilio Webhook Connector](/components/connectors/out-of-the-box-connectors/twilio.md) - Start a process instance triggered by a [Twilio webhook](https://www.twilio.com/docs/usage/webhooks). Can be used as an intermediate Connector in existing processes. + +## Protocol Connectors + +- [GraphQL Connector](/components/connectors/protocol/graphql.md) - Execute a [GraphQL](https://graphql.org/) query or mutation from your BPMN process. +- [HTTP Webhook Connector](/components/connectors/protocol/http-webhook.md) - Start a process instance with your custom webhook configuration. +- [Polling Connector](/components/connectors/protocol/polling.md) - The HTTP Polling Connector polls an endpoint at regular intervals, enabling periodic data fetching as an intermediate step in your BPMN processes. +- [REST Connector](/components/connectors/protocol/rest.md) - Make a request to a REST API and use the response in the next steps of your process. +- [SOAP Connector](/components/connectors/protocol/soap.md) - Connect your BPMN process with Simple Object Access Protocol ([SOAP](https://en.wikipedia.org/wiki/SOAP)) services and interact with SOAP service endpoints. + +In addition to this section on Connectors, we recommend reviewing [Connector secrets](/components/console/manage-clusters/manage-secrets.md). + +If you want to build **custom Connectors**, head over to our [Connector SDK guide](/components/connectors/custom-built-connectors/connector-sdk.md). + diff --git a/docs/components/connectors/out-of-the-box-connectors/email.md b/docs/components/connectors/out-of-the-box-connectors/email.md index e43c19ec89a..698498a3488 100644 --- a/docs/components/connectors/out-of-the-box-connectors/email.md +++ b/docs/components/connectors/out-of-the-box-connectors/email.md @@ -2,9 +2,16 @@ id: email title: Email Connector sidebar_label: Email Connector -description: The Email Connector allows you to connect your BPMN service with different email protocol. +description: The Email Connector allows you to connect your BPMN service with different email protocols such as SMTP, POP3 or IMAP. --- +import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; + + + + + The **Email Connector** is an outbound Connector that allows you to connect your BPMN service with any email POP3, IMAP or SMTP server. @@ -66,7 +73,7 @@ with a limit of one email, the task will return the most recently sent email. The task returns a list of emails in JSON format. Each email object contains the following information: - `messageId`: A unique identifier for the email message. -- `fromAddress`: the email addresses of the sender. +- `fromAddress`: The email addresses of the sender. - `subject`: The subject line of the email. - `size`: The size of the email (in bytes). @@ -110,13 +117,14 @@ Reading an email using POP3 protocol will delete the email The task returns a JSON object containing detailed information about the email: - `messageId`: The unique identifier corresponding to the email message. -- `fromAddress`: the email addresses of the sender. +- `fromAddress`: The email addresses of the sender. - `headers` : A list containing the email's headers - `subject`: The subject line of the email. - `size`: The size of the email in bytes. - `plainTextBody`: The plain text version of the email's content. -- `htmlBody`: The HTML version of the email's content, provided it exists. -- `receivedDateTime`: the email's reception datetime +- `htmlBody`: The HTML version of the email's content (if content exists). +- `attachments`: A list of all the email's attachments, provided as a document reference. +- `receivedDateTime`: The email's reception datetime #### Example Response @@ -140,7 +148,19 @@ Below is an example of the JSON response returned when a specific email is read: "value": "test" } ], - "sentDate": "2024-08-19T06:54:28Z" + "attachments": [ + { + "storeId": "in-memory", + "documentId": "20f1fd6a-d8ea-403b-813c-e281c1193495", + "metadata": { + "contentType": "image/webp; name=305a4816-b3df-4724-acd3-010478a54add.webp", + "size": 311032, + "fileName": "305a4816-b3df-4724-acd3-010478a54add.webp" + }, + "documentType": "camunda" + } + ], + "receivedDateTime": "2024-08-19T06:54:28Z" } ``` @@ -239,7 +259,7 @@ object with a field and a value. - If an operator is set, the criteria array must also be defined. - Each criterion within the criteria array is applied to the specified field based on the value associated with it. -:::note +::: #### Example Response @@ -271,14 +291,18 @@ Allow users to send an email from the connected email account. #### Parameters -| Parameter | Description | -| :-------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `From` | Specify the sender's email address(es). This can be a single email address (for example, 'example@camunda.com'), a comma-separated list of addresses, or a Friendly Enough Expression Language (FEEL) expression returning a list of email addresses (for example, =["example@camunda.com"]). | -| `To` | Defines the email recipient(s). Similar to the `From` parameter, this can be a single email address, a comma-separated list, or a FEEL expression (for example, =["example@camunda.com"]). | -| `Cc` | (Optional) Specify the email address(es) to include in the **Carbon Copy (CC)** field. The format is the same as the **From** and **To** fields, and can include a single address, a list, or a FEEL expression. | -| `Bcc` | (Optional) Specify the email address(es) to include in the **Blind Carbon Copy (BCC)** field. It follows the same format as the **CC** field and ensures that BCC recipients are not visible to other recipients. | -| `Subject` | The email subject line. | -| `Email` | The main content of the email. | +| Parameter | Description | +| :------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `From` | Specify the sender's email address(es). This can be a single email address (for example, 'example@camunda.com'), a comma-separated list of addresses, or a Friendly Enough Expression Language (FEEL) expression returning a list of email addresses (for example, =["example@camunda.com"]). | +| `To` | Defines the email recipient(s). Similar to the `From` parameter, this can be a single email address, a comma-separated list, or a FEEL expression (for example, =["example@camunda.com"]). | +| `Cc` | (Optional) Specify the email address(es) to include in the **Carbon Copy (CC)** field. The format is the same as the **From** and **To** fields, and can include a single address, a list, or a FEEL expression. | +| `Bcc` | (Optional) Specify the email address(es) to include in the **Blind Carbon Copy (BCC)** field. It follows the same format as the **CC** field and ensures that BCC recipients are not visible to other recipients. | +| `Headers` | Feel expression containing all the desired headers to be added to the email's headers. cf. `{ "customHeaders" : "new header value" }` | +| `Subject` | The email subject line. | +| `Content Type` | The content type of the email. | +| `Email Text Content` | The text content of the email. This must only be provided if the `Content Type` is `PLAIN` or `HTML & PlainText`. | +| `Html Text Content` | The HTML content of the email. This must only be provided if the `Content Type` is `HTML` or `HTML & PlainText`. | +| `Attachment` | The document reference, either for a single document or as a list for multiple documents. | :::info To learn more about Friendly Enough Expression Language (FEEL) expression, @@ -332,7 +356,7 @@ Allow users to fetch a list of emails from a specified folder, with customizable | `Max Emails to read` | Specify the maximum number of emails to retrieve. This parameter determines the cap on the number of emails the task will return. | | `Sort emails by` |

    Choose the field by which to sort the emails. Supported sorting fields are:

    • `Sent date`: Sorts emails by the date and time they were sent.
    • `Size`: Sorts emails by the size of the email.

    | | `Sort order` |

    Define the sort order:

    • `ASC`: Ascending order, from the oldest or smallest value to the most recent or largest.
    • `DESC`: Descending order, from the most recent or largest value to the oldest or smallest.

    | -| `Folder` | (Optional) the folder to list emails from, default is `INBOX`. | +| `Folder` | (Optional) the folder to list emails from, default is `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | #### Sorting and Limiting Behavior @@ -378,23 +402,24 @@ Retrieve an email's details based on the specified `messageId`. #### Parameters -| Parameter | Description | -| :---------- | :------------------------------------------------------------------------------------------------------------------------ | -| `MessageId` | The unique identifier of the email that must be read. | -| `Folder` | (Optional) Specifies the folder from which the email should be retrieved. If not provided, the default folder is `INBOX`. | +| Parameter | Description | +| :---------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `MessageId` | The unique identifier of the email that must be read. | +| `Folder` | (Optional) Specifies the folder from which the email should be retrieved. If not provided, the default folder is `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | #### Response Structure The task returns a JSON object containing detailed information about the email: -- `messageId`: The unique identifier corresponding to the email message. -- `fromAddress`: the email addresses of the sender. -- `headers` : A list containing the email's headers +- `messageId`: The unique identifier of the email message. +- `fromAddress`: The email addresses of the sender. +- `headers` : A list of the email headers. - `subject`: The subject line of the email. -- `size`: The size of the email in bytes. -- `plainTextBody`: The plain text version of the email's content. -- `htmlBody`: The HTML version of the email's content, provided it exists. -- `receivedDateTime`: the email's reception datetime +- `size`: The size of the email (in bytes). +- `plainTextBody`: The plain text version of the email content. +- `htmlBody`: The HTML version of the email content, if it exists. +- `attachments`: A list of all the email's attachments, provided as a document reference. +- `receivedDateTime`: The date and time the email was received. #### Example Response @@ -418,7 +443,19 @@ The following JSON structure shows an expected response after a successful email "value": "test" } ], - "sentDate": "2024-08-19T06:54:28Z" + "attachments": [ + { + "storeId": "in-memory", + "documentId": "20f1fd6a-d8ea-403b-813c-e281c1193495", + "metadata": { + "contentType": "image/webp; name=305a4816-b3df-4724-acd3-010478a54add.webp", + "size": 311032, + "fileName": "305a4816-b3df-4724-acd3-010478a54add.webp" + }, + "documentType": "camunda" + } + ], + "receivedDateTime": "2024-08-19T06:54:28Z" } ``` @@ -428,10 +465,10 @@ Delete an email from a specified folder, using the email's unique `messageId`. #### Parameters -| Parameter | Description | -| :---------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `MessageId` | The identifier of the email message to delete. | -| `Folder` | (Optional) Specifies the folder from which the email should be deleted. If this parameter is not supplied, the default folder is assumed to be `INBOX`. | +| Parameter | Description | +| :---------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `MessageId` | The identifier of the email message to delete. | +| `Folder` | (Optional) Specifies the folder from which the email should be deleted. If this parameter is not supplied, the default folder is assumed to be `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | #### Response Structure @@ -462,7 +499,8 @@ A search query is represented as a JSON object. Below is an example of a JSON ob using an AND and OR operator to combine multiple conditions: - `Folder`: (Optional) Specifies the folder from which the email should be deleted. If this parameter is not supplied, - the default folder is assumed to be `INBOX`. + the default folder is assumed to be `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or + `inside.folder`) - `Criteria`: _See below_ ```json @@ -521,7 +559,7 @@ object with a field and a value. - If an operator is set, the criteria array must also be defined. - Each criterion within the criteria array is applied to the specified field based on the value associated with it. -:::note +::: #### Example Response @@ -540,11 +578,11 @@ Enable users to transfer an email from one folder to another, streamlining inbox #### Parameters -| Parameter | Description | -| :-------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `MessageId` | The identifier of the email that needs to be moved. | -| `Source folder` | (Optional) The folder from which the email will be moved. If not specified, the default is INBOX. | -| `Target folder` | The destination folder where the email is placed. To specify a new folder or a nested hierarchy, use a dot-separated path (for example, 'Archive' or 'Projects.2023.January'). The system automatically creates any non-existent folders in the path. | +| Parameter | Description | +| :-------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `MessageId` | The identifier of the email that needs to be moved. | +| `Source folder` | (Optional) The folder from which the email will be moved. If not specified, the default is INBOX. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | +| `Target folder` | The destination folder where the email is placed. To specify a new folder or a nested hierarchy, use `.` or `/` separated path (for example, 'Archive/test' or 'Projects.2023.January'). The system automatically creates any non-existent folders in the path. | #### Response Structure @@ -565,3 +603,194 @@ The example below shows the expected JSON response after an email has been succe "to": "TEST" } ``` + +
    + + + +The Email Inbound Connector is an inbound Connector that allows you to connect your BPMN service with any email IMAP +server. + +:::caution +This inbound connector only supports working with IMAP server. +::: + +## Prerequisites + +To use the **Email Inbound Connector**, you must have an IMAP server available to connect to. + +:::note +Use Camunda secrets to avoid exposing your sensitive data as plain text. +See [managing secrets](/components/console/manage-clusters/manage-secrets.md). +::: + +## Authentication + +You can authenticate to a mail server as follows. + +### Simple Authentication + +This method allows the user to connect to any IMAP server using an email address and password. + +#### Parameters + +| Parameter | Description | +| :--------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `username` | Enter your full email address (for example, user@example.com) or the username provided by your email service. This is used to authenticate your access to the mail server. | +| `password` | Enter the password for your email account. Keep your password secure and do not share it with others. | + +## Listener information + +This inbound connector creates a new process each time a new email is received. + +| Parameter | Description | +| :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Folder` |

    (Optional) Define the folder the inbound connector will monitor.

    • If not specified, the default folder is set to `INBOX`.
    • For subfolders, use `.` or `/` separated path (for example, `inside/folder` or `inside.folder`)

    | +| `Polling Wait Time` | Set the interval between each polling operation. See [timer events](/docs/components/modeler/bpmn/timer-events/timer-events.md#time-duration) for more information on time duration and correct format. | +| `Polling Configuration` |

    This section contains settings related to the polling behavior of the connector.

    • Poll All Emails: Poll every email found in the specified folder.
      • Move to Another Folder After Processing: Move processed emails to a specific folder.

        • Folder: Specify the target folder to move processed emails to. To specify a new folder or a nested hierarchy, use a `.` or `/` separated path (for example, Archive/test or Projects.2023.January). Non-existent folders in the path are automatically created.

      • Delete After Processing: Permanently delete each email after processing.

    • `Poll Unseen Emails`: Poll only emails not marked as read in the specified folder.
      • `Move to Another Folder After Processing`: Move processed unseen emails to a specific folder.

        • `Folder`: Specify the target folder to move processed unseen emails to. To specify a new folder or a nested hierarchy, use a `.` or `/` separated path (for example, Archive/test or Projects.2023.January). Non-existent folders in the path are automatically created.
      • `Delete After Processing`: Permanently delete unseen emails from the folder after processing.

      • `Mark as Read After Processing`: Mark each unseen email as read after it is processed.

    | + +## Response Structure + +The task returns a JSON object containing detailed information about the email: + +- `messageId`: The unique identifier of the email message. +- `fromAddress`: The email addresses of the sender. +- `headers` : A list of the email headers. +- `subject`: The subject line of the email. +- `size`: The size of the email (in bytes). +- `plainTextBody`: The plain text version of the email content. +- `htmlBody`: The HTML version of the email content, if it exists. +- `attachments` A list of document reference +- `receivedDateTime`: The date and time the email was received. + +#### Example Response + +The following example JSON response shows the data structure produced when an email triggers the creation of a process +instance: + +```json +{ + "messageId": "messageId", + "fromAddress": "example@camunda.com", + "subject": "Urgent Test", + "size": 65646, + "plainTextBody": "Hey how are you?\r\n", + "htmlBody": "Hello", + "headers": [ + { + "header": "header1", + "value": "example" + }, + { + "header": "header2", + "value": "test" + } + ], + "attachments": [ + { + "storeId": "in-memory", + "documentId": "20f1fd6a-d8ea-403b-813c-e281c1193495", + "metadata": { + "contentType": "image/webp; name=305a4816-b3df-4724-acd3-010478a54add.webp", + "size": 311032, + "fileName": "305a4816-b3df-4724-acd3-010478a54add.webp" + }, + "documentType": "camunda" + } + ], + "receivedDateTime": "2024-08-19T06:54:28Z" +} +``` + +This response includes essential email details such as the `messageId`, sender addresses, subject, size, and the content +of the email both in plain text and HTML format. This information can be used by the process for various workflows, such +as prioritizing tasks, content analysis, and automated responses. + +## Activation condition + +The optional **Activation condition** field allows you to specify a Friendly Enough Expression +Language ([FEEL](/components/modeler/feel/what-is-feel.md)) expression to control when this Connector should trigger a +process instance. This condition acts as a filter, allowing the process to be initiated only when certain criteria are +met by the incoming email. + +For example, the FEEL expression `=(response.subject = "urgent")` ensures that the process is only triggered if the +subject of the incoming email matches "urgent". If this field is left blank, the process is triggered for every email +received by the connector. + +## Correlation + +The **Correlation** section allows you to configure the message correlation parameters. + +### Correlation key + +- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This + corresponds to the **Correlation key** property of a regular **message intermediate catch event**. +- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This + expression is evaluated in the Connector Runtime and the result is used to correlate the message. + +For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the incoming email +message contains `value:{correlationKey:myValue}`, your correlation key settings will look like this: + +- **Correlation key (process)**: `=myCorrelationKey` +- **Correlation key (payload)**: `=message.plainTextBody.correlationKey` + +You can also use the key of the message to accomplish this in the **Correlation key (payload)** field with `=key`. + +:::info +To learn more about correlation keys, see [messages](../../../concepts/messages). +::: + +### Message ID expression + +The optional **Message ID expression** field allows you to extract the message ID from the incoming message. + +- The message ID serves as a unique identifier for the message and is used for message correlation. +- This expression is evaluated in the Connector Runtime and the result used to correlate the message. + +In most cases, you do not need to configure the **Message ID expression**. However, it is useful if you want to ensure +message deduplication or achieve a specific message correlation behavior. + +:::info +To learn more about how message IDs influence message correlation, +see [messages](../../../concepts/messages#message-correlation-overview). +::: + +For example, if you want to set the message ID to the value of the `messageId` field in the incoming message, you can +configure the **Message ID expression** as follows: + +``` += message.messageId +``` + +### Message TTL + +The optional **Message TTL** field allows you to set the time-to-live (TTL) for the correlated messages. + +- TTL defines the time for which the message is buffered in Zeebe before being correlated to the process instance (if it + cannot be correlated immediately). +- The value is specified as an ISO 8601 duration. For example, `PT1H` sets the TTL to one hour. + +:::info +To learn more about TTL in Zeebe, see [message correlation](../../../concepts/messages#message-buffering). +::: + +## Deduplication + +The **Deduplication** section allows you to configure the Connector deduplication parameters. + +- **Connector deduplication** is a mechanism in the Connector Runtime that determines how many email listeners are + created if there are multiple occurrences of the **Email Listener Connector** in a BPMN diagram. This is different to + **message deduplication**. + +- By default, the Connector runtime deduplicates Connectors based on properties, so elements with the same subscription + properties only result in one subscription. + +To customize the deduplication behavior, select the **Manual mode** checkbox and configure the custom deduplication ID. + +:::info +To learn more about deduplication, see [deduplication](../use-connectors/inbound.md#connector-deduplication). +::: + +
    + +
    diff --git a/docs/components/connectors/out-of-the-box-connectors/github.md b/docs/components/connectors/out-of-the-box-connectors/github.md index 06d2623894e..00ced81c5e4 100644 --- a/docs/components/connectors/out-of-the-box-connectors/github.md +++ b/docs/components/connectors/out-of-the-box-connectors/github.md @@ -153,8 +153,8 @@ The **GitHub Connector** currently supports the following operations. - **GitHub API:** [Update a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#update-a-release). - **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Release id:** The unique identifier of the release. +- **Repository:** The name of the repository. The name is not case-sensitive. +- **Release ID:** The unique identifier of the release. - **Body:** Text describing the contents of the tag. - **Tag name:** The name of the tag. - **Release name:** The name of the release @@ -164,8 +164,8 @@ The **GitHub Connector** currently supports the following operations. - **GitHub API:** [Delete a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#delete-a-release). - **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Release id:** The unique identifier of the release. +- **Repository:** The name of the repository. The name is not case-sensitive. +- **Release ID:** The unique identifier of the release. #### List releases @@ -280,12 +280,6 @@ handling response is still applicable [as described](/components/connectors/prot The **GitHub Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -:::note -If you have used the GitHub Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a GitHub Webhook Connector task 1. Start building your BPMN diagram. You can use GitHub Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. diff --git a/docs/components/connectors/out-of-the-box-connectors/google-gemini.md b/docs/components/connectors/out-of-the-box-connectors/google-gemini.md new file mode 100644 index 00000000000..3823a456b0c --- /dev/null +++ b/docs/components/connectors/out-of-the-box-connectors/google-gemini.md @@ -0,0 +1,233 @@ +--- +id: google-gemini +title: Google Gemini Connector +sidebar_label: Google Gemini Connector +description: The Gemini large language models (LLMs) that are used by Gemini for Google Cloud are trained on datasets of publicly available code, Google Cloud-specific material, and other relevant technical information in addition to the datasets used to train the Gemini [foundation models](https://storage.googleapis.com/deepmind-media/gemini/gemini_1_report.pdf). +--- + +:::info +The **Google Gemini Connector** is available for `8.7.0` or later. +::: + +The **Google Gemini Connector** is an outbound Connector that allows you to access Gemini multimodal models from Google. It is capable of understanding virtually any input, and can combine different types of information in a BPMN process. + +## Create a Google Gemini Connector task + +import ConnectorTask from '../../../components/react-components/connector-task.md' + + + +## Make your Google Gemini Connector executable + +To execute this Connector, ensure all mandatory fields are correctly filled. + +:::note +All the mandatory and non-mandatory fields and required settings depending on the operation selection you choose are covered in the upcoming sections. +::: + +## Authentication + +Choose an authentication type from the **Type** dropdown. For details on authentication types, see [Google authentication types](#google-authentication-types). + +## Project ID + +Enter your google cloud project identifier. + +## Region + +Enter the region where your project is located. For example, `us-central1 (lowa)`, `us-west1 (Oregon)`. + +## Model + +Select a model from the dropdown. The following models are currently supported: + +- gemini-1.5-flash-001 +- gemini-1.5-flash-002 +- gemini-1.5-pro-001 +- gemini-1.5-pro-002 +- gemini-1.0-pro-001 +- gemini-1.0-pro-002 +- gemini-1.0-pro-vision-001 + +## Prompt + +Enter a prompt as a FEEL expression, providing text and media. + +- To provide text to Gemini, your expression should contain key _"text"_ and text data. For example, _"text"_ : _"your text"_ +- To provide media to Gemini, your expression should contain key _"mime"_ and mime type text, and key _"uri"_ and media URI. For example, _"mime"_: _"mime type"_, _"uri"_: _"your URI"_. + +For example: + +```feel += [{"text": "who is this video about"}, +{"mime": "video/*", "uri": "https://youtu.be/..."}] +``` + +## System instructions + +Enter system instructions as a string, to determine how the model should respond. + +To learn more about system instructions, refer to [Google system instructions](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions?hl=en). + +## Grounding + +Grounding connects the model output to the verifiable sources of information. + +- This is useful in situations where accuracy and reliability are important. +- To use grounding, select the _Grounding_ checkbox and input the path to the data store. + +To learn more about grounding, refer to [Google grounding overview](https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview?hl=en). + +## Safety Filter Settings + +You can adjust the likelihood of receiving a model response which might contain harmful content. + +- Content is blocked based on the probability that it is harmful. +- To use safety filter settings, select the _Safety Filter Settings_ checkbox and select the desired level from dropdown. +- By default, all filters are set to OFF. + +To learn more about safety filters, refer to [Google responsible AI safety filters and settings](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/responsible-ai?hl=en#safety_filters_and_attributes). + +## Add stop sequence + +A stop sequence is a series of characters (including spaces) that stops response generation if encountered by the model. + +The stop sequence should be inserted as a string list. + +For example: + +```feel += ["text 1", "text 2"] +``` + +## Temperature + +The **Temperature** controls the randomness in token selection. + +- A lower temperature is good when you expect a true or correct response. A temperature of `0` means the highest probability token is usually selected. +- A higher temperature can lead to diverse or unexpected results. Some models have a higher temperature max to encourage more random responses. + +## Output token limit + +The **Output token limit** Determines the maximum amount of text output from a single prompt. A token is approximately four characters. + +## Seed + +Setting a **Seed** value is useful if you make repeated requests and want the same model response. + +Deterministic outcome isn’t guaranteed. Changing the model or other settings can cause variations in the response even when you use the same seed value. + +## Top-K + +The **Top-K** specifies the number of candidate tokens when the model is selecting an output token. + +- Use a lower value for less random responses and a higher value for more random responses. +- Only the _gemini-1.0-pro-vision-001_ model supports Top-K. + +## Top-P + +The **Top-P** changes how the model selects tokens for output. + +- Tokens are selected from the most probable to the least probable, until the sum of their probabilities equals the top-p value. +- For example, if tokens A, B, and C have a probability of .3, .2, and .1 and the top-p value is .5, then the model will select either A or B as the next token (using temperature). +- For the least variable results, set top-P to 0. + +## Functional call description + +**Function calling** is a feature of Gemini models that makes it easier to get structured data outputs from generative models. + +- The **Functional call description** must be provided in fell format. +- It is important that all types must be registered with capslock. + +To learn more about function calling, refer to [Google function calling](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling). + +For example: + +```fell +[ + { + "name": "get_exchange_rate", + "description":"Get the exchange rate for currencies between countries", + "parameters": { + "type": "OBJECT", + "properties": { + "currency_date": { + "type": "STRING", + "description": "A date that must always be in YYYY-MM-DD format or the value 'latest' if a time period is not specified" + }, + "currency_from": { + "type": "STRING", + "description": "The currency to convert from in ISO 4217 format" + }, + "currency_to": { + "type": "STRING", + "description": "The currency to convert to in ISO 4217 format" + } + }, + "required":[ + "currency_date", + "currency_from", + "currency_to" + ] + } + } +] +``` + +### Google authentication types + +The **Google Gemini Connector** currently supports two methods for authentication and authorization: + +- Based on a short-lived JWT bearer token. +- Based on a refresh token. + +Google supports multiple ways to obtain both types of token. Refer to the [official Google OAuth documentation](https://developers.google.com/identity/protocols/oauth2) for current instructions, or see the examples below. + +#### Example 1: Obtain JWT bearer token with a service account + +:::warning +The following code snippet is for demonstration purposes only and must not be used for real production systems due to security concerns. +For production usage, follow the [official Google guidelines](https://developers.google.com/identity/protocols/oauth2/service-account). +::: + +Assuming you have created a service account and downloaded a JSON file with keys, run the following Python 3 snippet to print the JWT token in the terminal: + +```python +import google.auth +import google.auth.transport.requests +from google.oauth2 import service_account +# Scopes required to execute 'create' endpoind with Google Drive API +SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.appdata'] +# File with keys +SERVICE_ACCOUNT_FILE = 'google-service-account-creds.json' +credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES) +auth_req = google.auth.transport.requests.Request() +credentials.refresh(auth_req) +# Print token +print(credentials.token) +``` + +#### Example 2: Obtain bearer and refresh token with OAuth client + +:::warning +The following code snippet is for demonstration purposes only and must not be used for real production systems due to security concerns. +For production usage, follow the [official Google guidelines](https://developers.google.com/identity/protocols/oauth2/web-server). +::: + +Assuming you have created an OAuth client, you can download key files from the Google [Console](https://console.cloud.google.com/apis/credentials). Run the following Python 3 snippet to print the refresh token in the terminal: + +```python +from google_auth_oauthlib.flow import InstalledAppFlow +import pprint + +SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/documents'] +OAUTH_KEYS = './oauth-keys.json' # path to your file with OAuth credentials + +def main(): + flow = InstalledAppFlow.from_client_secrets_file(OAUTH_KEYS, SCOPES) + creds = flow.run_local_server(port=54948) + pprint.pprint(vars(creds)) + +if __name__ == "__main__": + main() +``` diff --git a/docs/components/connectors/out-of-the-box-connectors/kafka.md b/docs/components/connectors/out-of-the-box-connectors/kafka.md index e40919e5016..0264c048902 100644 --- a/docs/components/connectors/out-of-the-box-connectors/kafka.md +++ b/docs/components/connectors/out-of-the-box-connectors/kafka.md @@ -71,7 +71,7 @@ In the **Message** section, set the **Key** and the **Value** that will be sent ## Schema strategies :::caution -Use Schema strategies with caution, as this is an [alpha feature](/reference/alpha-features.md). Functionality may not be comprehensive and could change. +Use Schema strategies with caution, as this is an [alpha feature](/components/early-access/alpha/alpha-features.md). Functionality may not be comprehensive and could change. ::: This Connector supports different schema strategies, offering a compact, fast, and binary data exchange format for Kafka messages. diff --git a/docs/components/connectors/out-of-the-box-connectors/microsoft-teams.md b/docs/components/connectors/out-of-the-box-connectors/microsoft-teams.md index f9e156d16b9..fd6d381cf75 100644 --- a/docs/components/connectors/out-of-the-box-connectors/microsoft-teams.md +++ b/docs/components/connectors/out-of-the-box-connectors/microsoft-teams.md @@ -70,9 +70,9 @@ Visit [Microsoft Teams Access Token](https://learn.microsoft.com/azure/active-di For a **Refresh Token** type authentication, take the following steps: -1. Click the **Refresh Token** connection type in the **Authentication** section. -2. Set **Refresh Token** to `Refresh Token`. Read more on [how to get a refresh token](https://learn.microsoft.com/en-us/graph/auth-v2-user). -3. Set **Tenant id** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. Read more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). +1. Click the **Refresh token** connection type in the **Authentication** section. +2. Set **Refresh token** to `Refresh Token`. Read more on [how to get a refresh token](https://learn.microsoft.com/en-us/graph/auth-v2-user). +3. Set **Tenant ID** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. Read more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). 4. Set the **Client ID** field: the application ID that the [Azure app registration portal](https://go.microsoft.com/fwlink/?linkid=2083908) assigned to your app. 5. Set the **Secret ID** field: the client secret that you created in the app registration portal for your app. @@ -81,7 +81,7 @@ For a **Refresh Token** type authentication, take the following steps: For a **Client credentials** type authentication, take the following steps: 1. Click the **Client credentials** connection type in the **Authentication** section. -2. Set **Tenant id** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. See more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). +2. Set **Tenant ID** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. See more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). 3. Set the **Client ID** field: the application ID that the [Azure app registration portal](https://go.microsoft.com/fwlink/?linkid=2083908) assigned to your app. 4. Set the **Secret ID** field: the client secret that you created in the app registration portal for your app. @@ -103,7 +103,7 @@ For example, if you want to send a message in a Microsoft Teams channel, choose | Property | Methods | Required | Type | Description | | :-------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Chat ID | Get chat by id
    List chat members
    Send message in chat
    List messages in chat
    Get message in chat
    List chat members | Yes | string | Microsoft Teams chat id | +| Chat ID | Get chat by ID
    List chat members
    Send message in chat
    List messages in chat
    Get message in chat
    List chat members | Yes | string | Microsoft Teams chat ID | | Content | Send message in chat | Yes | text | Content that will be sent to chat | | Content Type | Send message in chat | Yes | dropdown | Content type of body message | | Chat type | Create a new chat | Yes | dropdown | Click **one on one** to create a one-on-one chat or **group** to create a group chat. | @@ -111,9 +111,9 @@ For example, if you want to send a message in a Microsoft Teams channel, choose | Members | Create a new chat | Yes | FEEL expression | See [members property](#members-property) to learn more. | | Top | List messages in chat | No | numbers | Controls the number of items per response; maximum allowed top value is 50. | | Order by | List messages in chat | Yes | dropdown | Can order by 'lastModifiedDateTime' and 'createdDateTime'. | -| Expand response | Get chat by id | Yes | dropdown | Choose | +| Expand response | Get chat by ID | Yes | dropdown | Choose | | Filter | List messages in chat | No | string | Sets the date range filter for the lastModifiedDateTime and createdDateTime properties. [Learn more about filtering](https://learn.microsoft.com/en-us/graph/filter-query-parameter). | -| Message ID | Get message in chat | Yes | string | Microsoft Teams chat message id | +| Message ID | Get message in chat | Yes | string | Microsoft Teams chat message ID | ##### Expand response @@ -164,19 +164,19 @@ The **members** property must contain a list of members: | Property | Methods | Required | Type | Description | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :------: | :--------------------------------------------------------------------------------------------------------------------: | -| Group ID | Create channel
    Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams group id | -| Channel ID | Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams channel id | +| Group ID | Create channel
    Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams group ID | +| Channel ID | Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams channel ID | | Display name | Create channel | No | string | Displayed name of new Microsoft Teams channel | | Description | Create channel | No | text | Description of new Microsoft Teams channel | | Channel membership type | Create channel | Yes | dropdown | See [teams-channels-overview](https://learn.microsoft.com/microsoftteams/teams-channels-overview) for more information | -| Owner | Create channel (if Channel membership type != STANDARD) | Yes | string | Channel owner; Microsoft Teams user id or Microsoft Teams principal name | +| Owner | Create channel (if Channel membership type != STANDARD) | Yes | string | Channel owner; Microsoft Teams user ID or Microsoft Teams principal name | | Filter | List channels | No | string | The search filter. [Learn more about filtering](https://learn.microsoft.com/en-us/graph/filter-query-parameter) | | Content | Send message to channel | Yes | text | Content that will be sent to chat | | Content Type | Send message to channel | Yes | dropdown | Content type of body message | -| Message ID | Get channel message | Yes | string | Message id of Microsoft Teams in channel | +| Message ID | Get channel message | Yes | string | Message ID of Microsoft Teams in channel | | Top | List channel messages | No | numbers | Controls the number of items per response | | With replies | List channel messages | Yes | boolean | Choose **FALSE** for get messages without replies
    Choose **FALSE** for get messages without replies | -| Message ID | List message replies | Yes | string | Microsoft Teams channel message id | +| Message ID | List message replies | Yes | string | Microsoft Teams channel message ID | #### Channel methods diff --git a/docs/components/connectors/out-of-the-box-connectors/sendgrid.md b/docs/components/connectors/out-of-the-box-connectors/sendgrid.md index 28ea9c2424c..b26f0cc828d 100644 --- a/docs/components/connectors/out-of-the-box-connectors/sendgrid.md +++ b/docs/components/connectors/out-of-the-box-connectors/sendgrid.md @@ -95,7 +95,7 @@ To make the **SendGrid Email Template Connector** executable, fill out all the m 4. Set **Receiver Name** to `Your Name`. 5. Set **Receiver Email** to `Your email address`. 6. Log in to your SendGrid account and navigate to [the Dynamic Template you created](#configure-a-dynamic-template). -7. Copy the id of the template and paste it in the **Template ID field**. +7. Copy the ID of the template and paste it in the **Template ID field**. 8. Provide the test data in the **Template Data** field as a [FEEL context expression](/components/modeler/feel/language-guide/feel-context-expressions.md): ```text diff --git a/docs/components/connectors/out-of-the-box-connectors/slack.md b/docs/components/connectors/out-of-the-box-connectors/slack.md index 15d9e52cfb4..a65a756bea4 100644 --- a/docs/components/connectors/out-of-the-box-connectors/slack.md +++ b/docs/components/connectors/out-of-the-box-connectors/slack.md @@ -33,7 +33,7 @@ To make the **Slack Connector** executable, fill out the mandatory fields highli ### Authentication -Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, '{{secrets.SLACK_OAUTH_TOKEN}}'. +Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, `{{secrets.SLACK_OAUTH_TOKEN}}`. ### Create channel @@ -67,16 +67,16 @@ To invite users to a channel, take the following steps: - The channel name can be up to 80 characters, and can contain lowercase letters, digits, and symbols `-` and `_`. - This can be provided as a FEEL expression. - Invite by **Channel ID**: - - The channel ID must be a valid slack Channel ID. + - The channel ID must be a valid Slack Channel ID. - This can be provided as a FEEL expression. 3. Set the **Users** as required: - 1. One single user name or email or id (for example: `@myUser` or `my.user@company.com` or `ABCDEF12345`). + 1. One single username or email or ID (for example: `@myUser` or `my.user@company.com` or `ABCDEF12345`). 2. A comma separated list of users (for example: `@myUser, my.user@company.com, ABCDEF12345`). 3. FEEL expression. In this case you can provide a valid list of strings (for example: `["@myUser", "my.user@company.com", "ABCDEF12345"]`). - Formats: - If a username starts with an `@` symbol, it will be handled as user name. - If a username is in an email format, it will be handled as an email. - - If a username doesn't start with an `@`, and isn't an email, it will be handled as a user id. + - If a username doesn't start with an `@`, and isn't an email, it will be handled as a user ID. - If a null input or an input which is not a type of String or a Collection provided, you will get an Exception. - If all username is provided as any other type than a String, you will get an Exception. - If one of the usernames is provided as any other type than a String, it will be omitted. diff --git a/docs/components/connectors/out-of-the-box-connectors/twilio.md b/docs/components/connectors/out-of-the-box-connectors/twilio.md index 6c43eaca5bc..782f428334c 100644 --- a/docs/components/connectors/out-of-the-box-connectors/twilio.md +++ b/docs/components/connectors/out-of-the-box-connectors/twilio.md @@ -179,10 +179,6 @@ To learn more about implementing retry and error handling logic in your BPMN dia The **Twilio Webhook Connector** is an inbound Connector that enables you to start a BPMN process instance triggered by a [Twilio event](https://www.twilio.com/docs/usage/webhooks). -:::note -If you have used the **Twilio Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a Twilio Webhook Connector task 1. Start building your BPMN diagram. You can use the **Twilio Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/docs/components/connectors/out-of-the-box-connectors/uipath.md b/docs/components/connectors/out-of-the-box-connectors/uipath.md index ce7f2536227..41046e098dd 100644 --- a/docs/components/connectors/out-of-the-box-connectors/uipath.md +++ b/docs/components/connectors/out-of-the-box-connectors/uipath.md @@ -74,10 +74,10 @@ This operation allows you to create a new item and add it to a queue from UiPath For this section, you must fill out the following fields: -1. **Cloud URL**: Comes with a default value of `cloud.uipath.com`. You can always change it, if needed. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. +1. **Cloud URL**: Comes with a default value of `cloud.uipath.com`. You can always change it, if needed. To use a Connector secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. 2. **Cloud organization**: The name of your organization. See [about organizations](https://docs.uipath.com/automation-cloud/docs/about-organizations) to learn more. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. 3. **Cloud tenant**: The name of the tenant. See [about tenants](https://docs.uipath.com/automation-cloud/docs/about-tenants) to learn more. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. -4. **Organization Unit ID**: Click **Orchestrator** and you will find the id in the URL. For example, `https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/?tid=26929&fid=112233` where the **Organization Unit ID** is `112233`. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. +4. **Organization Unit ID**: Click **Orchestrator** and you will find the ID in the URL. For example, `https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/?tid=26929&fid=112233` where the **Organization Unit ID** is `112233`. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. #### Input diff --git a/docs/components/connectors/protocol/http-webhook.md b/docs/components/connectors/protocol/http-webhook.md index 6aea2097267..45c2dba8813 100644 --- a/docs/components/connectors/protocol/http-webhook.md +++ b/docs/components/connectors/protocol/http-webhook.md @@ -7,12 +7,6 @@ description: Start a process instance with your custom webhook configuration, tr The **HTTP Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by external HTTP call. -:::note -If you have used the HTTP Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an HTTP Webhook Connector event 1. Start building your BPMN diagram. You can use HTTP Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. @@ -83,7 +77,7 @@ Please refer to the [update guide](/components/connectors/custom-built-connector - Set the **API Key** property to the expected value of the API key. - Set the **API Key locator** property that will be evaluated against the incoming request to extract the API key. [See the example](#how-to-configure-api-key-authorization). -- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer {JWT_TOKEN}. +- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer `{JWT_TOKEN}`. - Set JWK URL which is used as a well-known public URL to fetch the [JWKs](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets). - Set JWT role property expression which will be evaluated against the content of the JWT to extract the list of roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). @@ -314,8 +308,9 @@ not only the response body, but also the headers and the HTTP status returned by the Connector. ::: -A response expression can be used to return data after the webhook has been invoked. You can use FEEL to return the body, headers, and the HTTP status to the client invoking -the Webhook Connector endpoint. +#### Use the request + +You can use a response expression to return data after the webhook has been invoked. You can use FEEL to return the request body, headers, and the HTTP status to the client invoking the Webhook Connector endpoint. For example, given a webhook request with the payload body: @@ -326,7 +321,7 @@ For example, given a webhook request with the payload body: } ``` -You can return `myValue1` in a new key `myCustomKey` with a response body expression that may look like this: +You can return `myValue1` in a new key `myCustomKey` with a response body expression such as: ```json ={ @@ -334,7 +329,7 @@ You can return `myValue1` in a new key `myCustomKey` with a response body expres } ``` -The default HTTP status code is `200`. You can change it by providing a `statusCode` key in your expression: +The default HTTP status code is `200`. You can change it by including a `statusCode` key in your expression: ```json ={ @@ -343,7 +338,7 @@ The default HTTP status code is `200`. You can change it by providing a `statusC } ``` -Headers are also supported by using the `headers` key in the response expression: +Headers are also supported, using the `headers` key in the response expression: ```json ={ @@ -360,12 +355,14 @@ When working with `request` data, use the following references to access data: You can also use FEEL expressions to modify the data you return. +#### Use the `correlation` object + +When using the Webhook Connector with a start event that correlates a message, you can access the `correlation` object in the response expression. In addition to the `request` object you have access to the `correlation` result. The data available via the `correlation` object depends on the type of BPMN element you are using the Webhook Connector with. -A start event with a message definition uses message publishing internally to correlate an incoming -request with Zeebe. A successful correlation will therefore lead to a published message and the `correlation` object will contain the following properties: +A start event with a message definition uses message publishing internally to correlate an incoming request with Zeebe. A successful correlation will therefore lead to a published message and the `correlation` object will contain the following properties: ```json { @@ -385,3 +382,40 @@ newly create process instance key when accessing the `correlation` object: "tenantId": "" } ``` + +#### Use the `documents` object + +You can access created documents in both the response expression and the result expression. + +The `documents` object contains the references for created documents. + +**Example response expression** + +```json +{ + "body": { + "message": "Document created", + "documents": documents + } +} +``` + +If the `documents` list is not empty, document items are returned in the following format (example values provided): + +```json +{ + "storeId": "in-memory", + "documentId": "2b7215da-12b1-4374-8743-85d6854fcba5", + "metadata": { + "size": 405551, + "expiresAt": null, + "fileName": "my-image.jpg", + "customProperties": null, + "contentType": "image/jpeg" + } +} +``` + +:::note +Request parts are automatically stored in the configured document store when sending a multipart request. +::: diff --git a/docs/components/connectors/protocol/rest.md b/docs/components/connectors/protocol/rest.md index 4a2b159f3bd..1cdeca28c18 100644 --- a/docs/components/connectors/protocol/rest.md +++ b/docs/components/connectors/protocol/rest.md @@ -27,6 +27,40 @@ To make the **REST Connector** executable, choose the required authentication ty All the mandatory and non-mandatory fields will be covered in the upcoming sections. Depending on the authentication selection you make, more fields might be required. We will also cover this in the next section. ::: +### Configure a proxy server in Self-Managed + +If you are using Camunda Self-Managed, you can configure this Connector to use an HTTP or HTTPS proxy server. + +You can do this using the `JAVA_OPTS` environment variable. For example: + +``` +JAVA_OPTS=-Dhttp.proxyHost=proxy -Dhttp.proxyPort=3128 -Dhttps.proxyHost=proxy -Dhttps.proxyPort=3128 -Dhttp.nonProxyHosts=OTHER_DOMAIN +``` + +#### HTTP + +To specify the proxy as an HTTP protocol handler, set the following standard JVM properties: + +| Property | Description | +| :------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `http.proxyHost` | The host name of the proxy server. | +| `http.proxyPort` | The port number (default is 80). | +| `http.nonProxyHosts` |

    A list of hosts to connect to directly, bypassing the proxy.

    • Specify as a list of patterns, separated by |.
    • Patterns can start or end with a `*` for wildcards.
    • Any host matching one of these patterns uses a direct connection instead of a proxy.

    | + +#### HTTPS + +To specify the proxy as an HTTPS (HTTP over SSL) protocol handler, set the following standard JVM properties: + +| Property | Description | +| :------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `https.proxyHost` | The host name of the proxy server. | +| `https.proxyPort` | The port number (default is 443). | +| `http.nonProxyHosts` |

    A list of hosts to connect to directly, bypassing the proxy.

    • Specify as a list of patterns, separated by |.
    • Patterns can start or end with a `*` for wildcards.
    • Any host matching one of these patterns uses a direct connection instead of a proxy.

    | + +:::note +The HTTPS protocol handler also uses the `http.nonProxyHosts` property to specify non-proxy hosts. +::: + ### Authentication You can choose among the available authentication type according to your authentication requirements. @@ -57,7 +91,7 @@ Select the **REST Connector** and fill out the following properties under the ** - **Headers**: The API key will be included in the request headers. 3. Specify your API key details: - **API key name**: Enter the parameter name expected by the API (e.g., apiKey). - - **API key value**: Reference the secret you created for your API key (e.g., {{secrets.REST_API_KEY_SECRET}}). + - **API key value**: Reference the secret you created for your API key (e.g., `{{secrets.REST_API_KEY_SECRET}}`). ### REST Connector (Basic) @@ -207,6 +241,44 @@ Additionally, you can choose to unpack the content of your `response` into multi } ``` +## Error handling + +If an error occurs, the Connector throws an error and includes the error response in the `error` variable in Operate. Click on the REST Connector in Operate to see this variable. + +The following example shows the `error` variable in an error response: + +```json +{ + "code": "400", + "variables": { + "response": { + "headers": { + "Content-Length": "70", + "Date": "Thu, 17 Oct 2024 09:31:51 GMT", + "Content-Type": "application/json" + }, + "body": { + "temperature": 36, + "message": "My custom error message", + "booleanField": true + } + } + }, + "message": "Bad Request", + "type": "io.camunda.connector.api.error.ConnectorException" +} +``` + +You can handle this error using an Error Boundary Event and the following error expression: + +```json +if matches(error.code, "400") and error.variables.response.body.temp = 36 +then bpmnError("Too hot", error.variables.response.body.message, error.variables.response.body) +else null +``` + +In this example, passing `error.variables.response.body` as the third argument to the `bpmnError` function allows you to pass additional information about the error to the error boundary event. For example, the `message`, `temperature` and `booleanField` fields from the error response are passed to the error boundary event. + ## OData support The **REST Connector** supports JSON-based [OData protocol](https://www.odata.org/). diff --git a/docs/components/console/manage-clusters/cluster-backups.md b/docs/components/console/manage-clusters/cluster-backups.md index 1b7029ad27a..4b92980fb19 100644 --- a/docs/components/console/manage-clusters/cluster-backups.md +++ b/docs/components/console/manage-clusters/cluster-backups.md @@ -1,6 +1,6 @@ --- id: create-backups -title: Create backup +title: Backups description: "If your organization works within Camunda's Enterprise plan, you can create cluster backups." --- diff --git a/docs/components/console/manage-clusters/img/cluster-detail-clients.png b/docs/components/console/manage-clusters/img/cluster-detail-clients.png index a8c6583e5ef..4509c640f56 100644 Binary files a/docs/components/console/manage-clusters/img/cluster-detail-clients.png and b/docs/components/console/manage-clusters/img/cluster-detail-clients.png differ diff --git a/docs/components/console/manage-clusters/img/cluster-details-create-client.png b/docs/components/console/manage-clusters/img/cluster-details-create-client.png index 550dc4d0917..2db70d972b1 100644 Binary files a/docs/components/console/manage-clusters/img/cluster-details-create-client.png and b/docs/components/console/manage-clusters/img/cluster-details-create-client.png differ diff --git a/docs/components/console/manage-clusters/img/cluster-details-created-client.png b/docs/components/console/manage-clusters/img/cluster-details-created-client.png index 5ab80337261..8d037073738 100644 Binary files a/docs/components/console/manage-clusters/img/cluster-details-created-client.png and b/docs/components/console/manage-clusters/img/cluster-details-created-client.png differ diff --git a/docs/components/console/manage-clusters/manage-alerts.md b/docs/components/console/manage-clusters/manage-alerts.md index c5c9e983ce8..fa6f952f999 100644 --- a/docs/components/console/manage-clusters/manage-alerts.md +++ b/docs/components/console/manage-clusters/manage-alerts.md @@ -1,6 +1,6 @@ --- id: manage-alerts -title: Manage alerts +title: Alerts description: "Camunda 8 can notify you when process instances stop with an error." --- diff --git a/docs/components/console/manage-clusters/manage-api-clients.md b/docs/components/console/manage-clusters/manage-api-clients.md index 54b582977a9..d5753278548 100644 --- a/docs/components/console/manage-clusters/manage-api-clients.md +++ b/docs/components/console/manage-clusters/manage-api-clients.md @@ -1,6 +1,6 @@ --- id: manage-api-clients -title: Manage API clients +title: API clients description: "Let's create a client and manage our API clients." --- @@ -42,7 +42,7 @@ When the rate limit is triggered, the client will receive an HTTP 429 response. Currently, Camunda 8 SaaS supports the following scopes: -- Zeebe - Access to the [Zeebe gRPC](/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) APIs. +- Zeebe - Access to the [Zeebe gRPC](/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) APIs. - Tasklist - Access to the [Tasklist GraphQL](/apis-tools/tasklist-api/tasklist-api-overview.md) API. - Operate - Access to the [Operate REST API](/apis-tools/operate-api/overview.md). - Optimize - Access to the [Optimize REST API]($optimize$/apis-tools/optimize-api/overview). diff --git a/docs/components/console/manage-clusters/manage-cluster.md b/docs/components/console/manage-clusters/manage-cluster.md index 6b09e55a28e..0f4aa6a105e 100644 --- a/docs/components/console/manage-clusters/manage-cluster.md +++ b/docs/components/console/manage-clusters/manage-cluster.md @@ -1,10 +1,10 @@ --- id: manage-cluster title: Manage your cluster -description: "Follow these steps to rename, resume, update, or delete your cluster." +description: "Follow these steps to rename, resume, update, resize, or delete your cluster." --- -Read through the following sections to rename, resume, update, or delete your cluster. +Learn how to rename, resume, update, resize, or delete your cluster. ## Rename a cluster @@ -59,10 +59,23 @@ When an update is available, an **Update** button will appear. This button is no You can decide if you want to have [automated updates](/reference/auto-updates.md) to new versions of Camunda 8 activated. You can also toggle this feature anytime later in the **Settings** tab of your cluster. -## Delete a cluster +## Resize a cluster + +You can increase or decrease the [cluster size](/components/concepts/clusters.md#cluster-size) at any time. For example, increase the cluster size to improve performance and add capacity, or decrease the cluster size to free up reservations for another cluster. + +1. Open the cluster details by clicking on the cluster name. +1. Select **Resize cluster** next to the cluster type. +1. Select the new cluster size from the available sizes. +1. Click **Confirm** to resize the cluster, or **Cancel** to close the modal without resizing the cluster. :::note -This action cannot be undone. +Contact your Customer Success Manager to increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. +::: + +## Delete a cluster + +:::caution +Deleting a cluster is **permanent** and cannot be undone. ::: A cluster can be deleted at any time. To delete your cluster, navigate to the **Clusters** tab in the top navigation and click **Delete** to the far right of the cluster name. diff --git a/docs/components/console/manage-clusters/manage-ip-allowlists.md b/docs/components/console/manage-clusters/manage-ip-allowlists.md index 0437ba49e2d..c5586806f7b 100644 --- a/docs/components/console/manage-clusters/manage-ip-allowlists.md +++ b/docs/components/console/manage-clusters/manage-ip-allowlists.md @@ -1,6 +1,6 @@ --- id: manage-ip-allowlists -title: Manage IP allowlists +title: IP allowlists description: "If your organization works within Camunda's Enterprise plan, you can restrict access to clusters with an IP allowlist." keywords: [whitelist, allowlist, ip whitelist, ip allowlist] --- diff --git a/docs/components/console/manage-clusters/manage-secrets.md b/docs/components/console/manage-clusters/manage-secrets.md index 7518b65d507..ea7025f8011 100644 --- a/docs/components/console/manage-clusters/manage-secrets.md +++ b/docs/components/console/manage-clusters/manage-secrets.md @@ -1,6 +1,6 @@ --- id: manage-secrets -title: Manage secrets +title: Connector secrets description: Create secrets and reference them in your Connectors without exposing sensitive information in your BPMN processes. --- diff --git a/docs/components/console/manage-organization/advanced-search.md b/docs/components/console/manage-organization/advanced-search.md index 54ad0f5f64d..ba8d6437d02 100644 --- a/docs/components/console/manage-organization/advanced-search.md +++ b/docs/components/console/manage-organization/advanced-search.md @@ -14,7 +14,7 @@ This search functionality allows users to: ## Open the search bar -Press `ctrl`+`k`, `⌘`+`k`, or click the magnifier in the top navigation bar to open the search bar. +Press `Ctrl+K`, `⌘+K`, or click the magnifier in the top navigation bar to open the search bar. ![Open the search bar](./img/open_console_search.png) diff --git a/docs/components/console/manage-organization/enable-alpha-features.md b/docs/components/console/manage-organization/enable-alpha-features.md index 252c9a5971e..918336a90df 100644 --- a/docs/components/console/manage-organization/enable-alpha-features.md +++ b/docs/components/console/manage-organization/enable-alpha-features.md @@ -10,7 +10,7 @@ Opting-in to Camunda alpha terms currently only applies to Enterprise SaaS subsc Customers in other subscriptions can still turn on/off these settings directly from Console. ::: -If you aren't already familiar with accessing **alpha features**, learn more in our [alpha feature documentation](/reference/alpha-features.md). +If you aren't already familiar with accessing **alpha features**, learn more in our [alpha feature documentation](/components/early-access/alpha/alpha-features.md). Alpha terms typically refer to the specific terms and conditions that govern the use and testing of this software during its alpha phase. These terms outline the rights, responsibilities, and limitations of both the software provider (Camunda) and the users (alpha testers or early adopters) during the testing and evaluation period. diff --git a/docs/components/console/manage-organization/external-sso.md b/docs/components/console/manage-organization/external-sso.md index 5bcd128ca4f..7343422d032 100644 --- a/docs/components/console/manage-organization/external-sso.md +++ b/docs/components/console/manage-organization/external-sso.md @@ -30,7 +30,7 @@ For Azure AD, you will need to provide: - The domain used for the login email addresses - The Microsoft Azure AD domain -- The generated client id +- The generated client ID - The client secret value To generate the client on your end, you will need to use the Camunda **Redirect URL** `https://weblogin.cloud.camunda.io/login/callback `. Ensure you attach the user permissions `Users > User.Read`. @@ -43,7 +43,7 @@ Default organizations for external identity providers are only available for org By setting up an external identity provider, it is possible to configure up to 10 default organizations. The following information must be added in the ticket so that the support team can configure the default organizations: -- Organization Id +- Organization ID - Default organization roles If a user logs in with the configured connection, the user is automatically assigned to these organizations with the corresponding roles. diff --git a/docs/components/console/manage-organization/img/activity-view.png b/docs/components/console/manage-organization/img/activity-view.png index cbae303ee37..1a946c8fec7 100644 Binary files a/docs/components/console/manage-organization/img/activity-view.png and b/docs/components/console/manage-organization/img/activity-view.png differ diff --git a/docs/components/console/manage-organization/view-organization-activity.md b/docs/components/console/manage-organization/view-organization-activity.md index 06f45af8a04..aa11469449c 100644 --- a/docs/components/console/manage-organization/view-organization-activity.md +++ b/docs/components/console/manage-organization/view-organization-activity.md @@ -1,9 +1,15 @@ --- id: view-organization-activity title: View organization activity -description: "Let's analyze the capabilities of the Activity tab." +description: "The Activity tab allows you to view details of all activity within an organization, such as cluster creation, deletion, updates, and user invitations." --- -The **Activity** tab lists all activities within an organization. Here, you can note when a cluster was created or deleted. +You can view all activity within an organization on the **Activity** tab. + +For example, you can see details for cluster creation, deletion, updates, and user invitations. ![activity-view](./img/activity-view.png) + +## Export activity + +Click **Export activity**, and select whether to export and download the activity list as a JSON or CSV file. diff --git a/docs/components/console/manage-plan/migrate-from-prof-to-starter.md b/docs/components/console/manage-plan/migrate-from-prof-to-starter.md index 1fdcd43ec98..8f3d548da9b 100644 --- a/docs/components/console/manage-plan/migrate-from-prof-to-starter.md +++ b/docs/components/console/manage-plan/migrate-from-prof-to-starter.md @@ -11,7 +11,7 @@ Here are a few important remarks to consider before completing the migration ste - Since the two plans have different types of clusters included and fees for those, we recommend comparing the [Professional plan](https://camunda.com/blog/2023/05/camunda-professional-edition-accelerate-projects/) with the [Starter plan](https://camunda.com/blog/2023/09/camunda-starter/) to [understand your monthly costs](https://camunda.com/pricing/starter-plan-price-calculator/) before the migration. - General users and development/production cluster reservations in the Professional plan are migrated “as is” to the Starter plan, which may result in overage costs (e.g. production clusters in Professional will be transferred to production clusters in the Starter plan). If you are not using your production cluster in the Professional plan, we recommend you delete it beforehand and create a new development cluster in the Starter plan afterward. - Once you have edited the plan below, the changes will take effect on the first day of your next subscription period. -- If you have any questions, do not hesitate to [contact us](https://camunda.com/contact/). +- If you have any questions, do not hesitate to [contact us](/reference/contact.md). ::: diff --git a/docs/components/early-access/alpha/alpha-features.md b/docs/components/early-access/alpha/alpha-features.md new file mode 100644 index 00000000000..81b80ca6f83 --- /dev/null +++ b/docs/components/early-access/alpha/alpha-features.md @@ -0,0 +1,38 @@ +--- +id: alpha-features +title: Alpha features +sidebar_label: Alpha features +description: "Use alpha features to learn about upcoming changes, try them out, and share feedback." +--- + +You can use alpha features to learn about upcoming changes, try them out, and share feedback. + +:::info +To understand the difference between an alpha feature and an alpha release, see [alpha features and releases](/reference/release-policy.md#alpha-features-and-releases). +::: + +## Alpha + +Selected Camunda features and components are released as **alpha** versions. We release these in an early state for you to test and participate in development by sharing your feedback before they reach [general availability](/reference/release-policy.md#general-availability-ga). + +Limitations of alpha features and components include: + +- Not for production use. +- APIs, dependencies, and configuration are likely to change. +- Not necessarily feature-complete. +- Might lack full documentation. +- No guaranteed updates to newer releases. +- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://camunda.com/services/enterprise-support-guide/). +- No maintenance service. +- (SaaS) No availability targets. +- Released outside the standard [release policy](/reference/release-policy.md). + +To learn more about using alpha features, see [enabling alpha features](/components/console/manage-organization/enable-alpha-features.md). + +:::note + +- Alpha features can also be included in a minor version (stable) release. +- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/reference/contact.md). +- Alpha releases can also have **limited availability**, such as features that are only available to enterprise customers. + +::: diff --git a/docs/components/early-access/alpha/sap/btp-integration.md b/docs/components/early-access/alpha/sap/btp-integration.md new file mode 100644 index 00000000000..06201e728c6 --- /dev/null +++ b/docs/components/early-access/alpha/sap/btp-integration.md @@ -0,0 +1,15 @@ +--- +id: btp-integration +title: SAP BTP integration +description: "Learn about the Camunda SAP Business Technology Platform (BTP) integration, an artifact run on BTP." +--- + +The Camunda SAP Business Technology Platform (BTP) integration is an artifact run on BTP. This connects to Camunda 8 SaaS to provide: + +- A generic Fiori app for starting BPMN processes and displaying [Camunda Forms](/components/modeler/forms/camunda-forms-reference.md) in the Fiori design language. +- The select exposure of SAP BTP services to [BPMN tasks](/components/modeler/bpmn/bpmn.md) and vice versa. +- A generic endpoint to start BPMN processes with. + +:::note Important! +The SAP BTP integration is an alpha feature available upon request. Visit [our contact page](/reference/contact.md) to contact us. +::: diff --git a/docs/components/early-access/alpha/sap/camunda-sap-integration.md b/docs/components/early-access/alpha/sap/camunda-sap-integration.md new file mode 100644 index 00000000000..0396aad835e --- /dev/null +++ b/docs/components/early-access/alpha/sap/camunda-sap-integration.md @@ -0,0 +1,46 @@ +--- +id: sap-integration +title: SAP integration +description: "Camunda's SAP integration consists of several features that can be used independently of one another: SAP OData, RFC, and BTP." +--- + +Camunda's SAP integration consists of several features that can be used independently of one another: SAP OData, RFC, and BTP. + +- [SAP OData Connector](./odata-connector.md): Interact with a S/4HANA or ECC System via OData v2 + v4 APIs, directly from your BPMN model. +- [SAP RFC outbound Connector](./rfc-connector.md): Query BAPIs and remote-enabled function modules on SAP ECC systems. +- [SAP BTP integration](./btp-integration.md) + - Use [Tasklist's](/components/tasklist/introduction-to-tasklist.md) forms in the Fiori UI + - Operate SAP BTP services from [BPMN tasks](/components/modeler/bpmn/bpmn.md) + - Inbound proxy endpoint to start process instances of BPMN models, including custom variables + +These features run in the customer's [SAP BTP instance](https://www.sap.com/products/technology-platform.html). They do not require a proprietary Camunda setup, and instead re-use an existing infrastructure with minimal prerequisites. + +![SAP integration overview](./img/sap-integration-overview.png) + +## Technical requirements + +All SAP integration artifacts have the following technical requirements: + +- They run on SAP BTP, Cloud Foundry environment, and thus [need the respective service](https://discovery-center.cloud.sap/serviceCatalog/cloud-foundry-runtime?region=all). +- [(free) Destination Service](https://discovery-center.cloud.sap/serviceCatalog/destination?region=all&service_plan=lite&commercialModel=btpea) for system and service connectivity. +- If the S/4 or ECC system is located on-premises: + - [(free) Connectivity Service](https://discovery-center.cloud.sap/serviceCatalog/connectivity-service?region=all) + - [SAP Cloud Connector](https://help.sap.com/docs/connectivity/sap-btp-connectivity-cf/cloud-connector) configured and connected to both S/4 ECC and the BTP subaccount where the Camunda SAP integration artifacts will run. +- Technical user with respective access right to the S/4 and/or ECC system. + +### SAP OData Connector + +No additional requirements. + +### SAP RFC Connector + +- [(free) SAP Authorization and Trust Management Service](https://discovery-center.cloud.sap/serviceCatalog/authorization-and-trust-management-service?region=all) + +### SAP BTP integration + +- [(free) SAP Authorization and Trust Management Service](https://discovery-center.cloud.sap/serviceCatalog/authorization-and-trust-management-service?region=all) +- [PostgreSQL on SAP BTP, hyperscaler option](https://discovery-center.cloud.sap/serviceCatalog/postgresql-hyperscaler-option?region=all) + +For scaling out and up, either add multiple instances of an integration module (for example, the SAP OData Connector) or equip an integration module with more runtime memory (for example, the SAP BTP integration). + +For more sophisticated tuning, all mechanisms of BTP can be applied (such as the [(free) Application Autoscaler](https://discovery-center.cloud.sap/serviceCatalog/application-autoscaler?service_plan=standard®ion=all&commercialModel=btpea)), as all SAP integration artifacts fully adhere to the BTP cloud-based development standards and can be fully configured just as any custom developed apps on BTP. diff --git a/docs/components/early-access/alpha/sap/img/btp-destination-rfc.png b/docs/components/early-access/alpha/sap/img/btp-destination-rfc.png new file mode 100644 index 00000000000..5c3e01daec9 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/btp-destination-rfc.png differ diff --git a/docs/components/early-access/alpha/sap/img/btp-destination.png b/docs/components/early-access/alpha/sap/img/btp-destination.png new file mode 100644 index 00000000000..0fcbf3d5a73 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/btp-destination.png differ diff --git a/docs/components/early-access/alpha/sap/img/odata-connector-error-expression.png b/docs/components/early-access/alpha/sap/img/odata-connector-error-expression.png new file mode 100644 index 00000000000..6a70eb55edf Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/odata-connector-error-expression.png differ diff --git a/docs/components/early-access/alpha/sap/img/odata-steps.png b/docs/components/early-access/alpha/sap/img/odata-steps.png new file mode 100644 index 00000000000..d94742a26d8 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/odata-steps.png differ diff --git a/docs/components/early-access/alpha/sap/img/rfc-overview.png b/docs/components/early-access/alpha/sap/img/rfc-overview.png new file mode 100644 index 00000000000..919650ad49c Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/rfc-overview.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-integration-overview.png b/docs/components/early-access/alpha/sap/img/sap-integration-overview.png new file mode 100644 index 00000000000..91b2356c10f Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-integration-overview.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template-advanced.png b/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template-advanced.png new file mode 100644 index 00000000000..f62fe06fd14 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template-advanced.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template-result.png b/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template-result.png new file mode 100644 index 00000000000..eb543c080b2 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template-result.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template.png b/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template.png new file mode 100644 index 00000000000..8d06c4b6458 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-odata-connector-element-template.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-error-handling1.png b/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-error-handling1.png new file mode 100644 index 00000000000..7c06848d528 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-error-handling1.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-error-handling2.png b/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-error-handling2.png new file mode 100644 index 00000000000..06c0b472f20 Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-error-handling2.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-in-model.png b/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-in-model.png new file mode 100644 index 00000000000..b5ada529d1c Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-odata-connector-task-in-model.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-rfc-connector-element-template.png b/docs/components/early-access/alpha/sap/img/sap-rfc-connector-element-template.png new file mode 100644 index 00000000000..c3bfcef8fca Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-rfc-connector-element-template.png differ diff --git a/docs/components/early-access/alpha/sap/img/sap-rfc-connector-task-in-model.png b/docs/components/early-access/alpha/sap/img/sap-rfc-connector-task-in-model.png new file mode 100644 index 00000000000..42efdfa09de Binary files /dev/null and b/docs/components/early-access/alpha/sap/img/sap-rfc-connector-task-in-model.png differ diff --git a/docs/components/early-access/alpha/sap/odata-connector.md b/docs/components/early-access/alpha/sap/odata-connector.md new file mode 100644 index 00000000000..12391fe8514 --- /dev/null +++ b/docs/components/early-access/alpha/sap/odata-connector.md @@ -0,0 +1,171 @@ +--- +id: odata-connector +title: SAP OData Connector +description: "The SAP OData Connector is a protocol and outbound Connector that runs as a Docker image on the SAP Business Technology Platform (BTP)." +--- + +The SAP OData Connector is a protocol and outbound [Connector](/components/connectors/introduction.md) that runs as a Docker image on the SAP Business Technology Platform (BTP). + +This Connector is designed to run in [hybrid mode](/guides/use-connectors-in-hybrid-mode.md), hosted in the customer's SAP BTP sub-account in the [Cloud Foundry environment](https://discovery-center.cloud.sap/serviceCatalog/cloud-foundry-runtime?region=all). + +This Connector works with Camunda 8 SaaS, and utilizes SAP BTP's [Destination](https://learning.sap.com/learning-journeys/administrating-sap-business-technology-platform/using-destinations) and [Connectivity](https://help.sap.com/docs/connectivity/sap-btp-connectivity-cf/what-is-sap-btp-connectivity) concepts to query a SAP system via both OData v2 and v4. + +:::note Important! +This Connector is an alpha feature and available upon request. Visit [our contact page](/reference/contact.md) to contact us. +::: + +## Overview + +For a standard overview of the steps involved in the SAP OData Connector, see the following diagram: + +![OData steps](./img/odata-steps.png) + +## Prerequisites + +To run the SAP OData Connector Docker image, the following SAP infrastructure setup is required: + +- [Cloud Foundry CLI](https://github.com/cloudfoundry/cli) with [multiapps plugin](https://github.com/cloudfoundry/multiapps-cli-plugin) installed on the machine executing the deployment. +- SAP BTP subaccount with a [Cloud Foundry environment](https://discovery-center.cloud.sap/serviceCatalog/cloud-foundry-runtime?region=all) enabled and a [created space](https://help.sap.com/docs/btp/sap-business-technology-platform/create-spaces). +- Minimum of [1 GB storage quota and 2 GB runtime memory](https://help.sap.com/docs/btp/sap-business-technology-platform/managing-space-quota-plans). +- [Entitlements](https://help.sap.com/docs/btp/sap-business-technology-platform/managing-entitlements-and-quotas-using-cockpit) for: + - [Connectivity Service](https://discovery-center.cloud.sap/serviceCatalog/connectivity-service?region=all), `lite` plan (to connect to the SAP is on-premises). + - [Destination Service](https://discovery-center.cloud.sap/serviceCatalog/destination?service_plan=lite®ion=all&commercialModel=btpea), `lite` plan. +- One or more instance- or subaccount-level destinations, pointing to the SAP systems to communicate with. + ![sample BTP destination configuration](./img/btp-destination.png) +- Ensure `Additional Properties` are correctly set on the Destination. For example: + +```json +HTML5.DynamicDestination: true +sap-client: +WebIDEEnabled: true +WebIDESystem: +WebIDEUsage: odata_gen +``` + +:::warning +Currently, only `BasicAuthentication` is supported on the Destination by the SAP OData Connector. +::: + +## Deployment to BTP + +A descriptor file is required to deploy the SAP OData Connector to a space in a SAP BTP subaccount. An exemplary deployment descriptor `mtad.yaml.example` is provided by Camunda. This is a standard format in SAP BTP's Cloud Foundry environment to describe the application that needs deployment. Take the following steps: + +1. Adjust the values to match those of the targeted Camunda 8 SaaS environment and rename it to `mtad.yaml`. +2. Adjust the names of the SAP BTP Destination and Connectivity instances to your liking - both will be created automatically for you upon deployment. If instances in your subaccount of any of the two services exist, they will be reused. +3. After creating the `mtad.yaml` file, log into the desired SAP BTP subaccount via the [Cloud Foundry `cli`](https://github.com/cloudfoundry/cli) (cf-cli): + +```shell +$> cf login +API endpoint: https://api.cf. ... +... +``` + +4. Deploy the SAP OData Connector via the `cf-cli`. Note that this requires [the "multiapps" plugin of Cloud Foundry](https://github.com/cloudfoundry/multiapps-cli-plugin) to be installed on the machine the deployment runs on: + +```shell +$> cf deploy ./ # append the -f flag to shortcircuit ongoing deployments +Deploying multi-target app archive /Some/path/sap-odata-connector.mtar in org / space as you@example.org .. +... +Application "sap-odata-connector" started and available at "some.url.hana.ondemand.com" +``` + +## Deployment in Camunda 8 SaaS + +- If using Web Modeler, [import the SAP OData Connector's element template](/components/connectors/manage-connector-templates.md#importing-existing-connector-templates) for design use. + +![sample BPMN diagram with SAP OData connector](./img/sap-odata-connector-task-in-model.png) + +- If using Desktop Modeler, [follow the standard importing procedure](/components/modeler/desktop-modeler/element-templates/configuring-templates.md). + +## Working with the SAP OData Connector in Camunda Modeler + +### Modeling options + +To use the **SAP OData Connector** in your process, either change the type of existing task by clicking on it and using the **wrench-shaped** change type context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](../../../connectors/use-connectors/index.md) to learn more. + +:::note +The configuration options will dynamically change with the selected HTTP method and the OData protocol version. For example, a `payload` field is only displayed when the HTTP method is something other than "GET". +::: + +![SAP OData connector element template](./img/sap-odata-connector-element-template.png) + +Specifying the `BTP destination name` allows you to reuse existing Destinations from the subaccount or instance level. Authentication and authorizations are maintained at this level, which is why it's not necessary to maintain credentials for the Connector. + +### Advanced capabilities + +In addition to the basic OData settings such as Service, Entity, EntitySet, Method, and OData version, the **Advanced** section allows you to fine tune `GET` queries to the SAP method with all standard parameters. + +For example, supplying `$filter` and `$select` parameters helps in reducing data transferred over the wire, while `$expand` helps in retrieving additional entities with a single query. + +![Advanced options of the SAP OData connector element template](./img/sap-odata-connector-element-template-advanced.png) + +### Query result structure + +The result of any query, whether it is reading or writing to the SAP system, is in JSON format in the following structure: + +```json +{ + result: , + statusCode: , + countOrInlineCount: +} +``` + +- `result` contains the result of the query, whether it is content retrieved from a SAP system via `GET` or the result of a write or update operation via `POST`, `PUT`, `PATCH`, or `DELETE`. (Note that with the latter, the `result` is always empty.) +- `statusCode` holds the [HTTP status code](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) of the operation. +- `countOrInlineCount` is only present in the response when the corresponding option `$inlinecount` (for OData v2) or `$count` (for OData v4) was checked in the design time of the BPMN task. It then shows the number of results from the `GET` query to the SAP system. + +![the output mapping of the SAP OData element template](./img/sap-odata-connector-element-template-result.png) + +The query result can either be mapped to a single result variable or worked on [via FEEL with an expression](/components/connectors/use-connectors/index.md#result-expression). The same is applicable to `getResponse`, as a result variable contains the described query JSON in its entirety. The result expression `{getStatusCode: statusCode}` would only hold the HTTP status code in the `getStatusCode` process variable. + +### Error handling + +The SAP OData Connector allows handling of query errors directly in the model. This means an OData error is relayed to the process instance in the reserved variables `bpmnError` and `error` and can be processed accordingly. + +1. Equip the Connector task with an error handling expression such as: + +```js +if error.code = "400" then + bpmnError("400", "client request is bad", { errorMessage: error.message, errorCode: error.code }) +else if error.code = "404" then + bpmnError("404", "queried resource not found", { errorMessage: error.message, errorCode: error.code }) +else if error.code = "500" then + bpmnError("500", "server error", { errorMessage: error.message, errorCode: error.code }) +else if error.code = "503" then + bpmnError("503", "I'm just an proxy teapot", { errorMessage: error.message, errorCode: error.code }) +else + null +``` + +![image-20241010160419616](./img/odata-connector-error-expression.png) + +2. Specifically note the third parameter to `bpmnError`: + +```js +{ errorMessage: error.message, errorCode: error.code } +``` + +This relays the error message and code to the next step in the process flow. + +3. Equip the BPMN task with an error boundary event: + +![error boundary event on SAP OData connector](./img/sap-odata-connector-task-error-handling2.png) + +If the SAP OData Connector encounters an error, the boundary event will catch the error and continue the process flow. The error boundary event can receive these configuration parameters to contain further error details: + +![error output mapping](./img/sap-odata-connector-task-error-handling1.png) + +- `errorMessage`: Contains a verbose version of the error message and cause and relays it into the process scope as `ov_errorMessage`. +- `errorCode`: Holds a predefined value describing the scope of the error, relaying it to the process scope as `errorCode`. It can be one of the following: + - `INVALID_PAYLOAD`: The payload of the request was detected as erroneous by the server. + - `REQUEST_ERROR`: The request contained an error, for example, a wrong combination of `GET` query parameters. + - `GENERIC_ERROR` + - `DESTINATION_ERROR`: An error occurred while claiming the Destination from the runtime environment. +- `error`: The serialized Error object, available in the example above as `ov_error`. + +## Tips + +- Ensure the connection from the Cloud Foundry environment via the destination to the SAP systems works. Using the [Terminal in Business Application Studio](https://community.sap.com/t5/technology-blogs-by-sap/how-to-check-the-connectivity-to-your-backend-system-in-business/ba-p/13479832) is a quick way to verify this. +- Validate requests first in an API client before trying with the SAP OData Connector in Modeler. Then, copy over to the element template fields. This saves time and reduces potential error. +- Any payload size <= 2.5MB can be considered safe. diff --git a/docs/components/early-access/alpha/sap/rfc-connector.md b/docs/components/early-access/alpha/sap/rfc-connector.md new file mode 100644 index 00000000000..812981e6a21 --- /dev/null +++ b/docs/components/early-access/alpha/sap/rfc-connector.md @@ -0,0 +1,271 @@ +--- +id: rfc-connector +title: SAP RFC Connector +description: "The SAP RFC Connector is a Java Spring Boot application that runs on SAP BTP." +--- + +The SAP RFC [Connector](/components/connectors/introduction.md) is a [protocol and outbound Connector](/components/connectors/connector-types.md). This Connector is a Java Spring Boot application that runs on SAP Business Technology Platform (BTP). + +It connects to Camunda 8 SaaS, and utilizes SAP BTP's [Destination](https://learning.sap.com/learning-journeys/administrating-sap-business-technology-platform/using-destinations) and [Connectivity](https://help.sap.com/docs/connectivity/sap-btp-connectivity-cf/what-is-sap-btp-connectivity) concepts to query a SAP system via the RFC protocol to interact with remote-enabled Function Modules and BAPIs. + +:::note Important! +This Connector is an alpha feature available upon request. Visit [our contact page](/reference/contact.md) to contact us. +::: + +## Overview + +For a standard overview of the steps involved in the SAP RFC Connector, see the following diagram: + +![RFC overview](./img/rfc-overview.png) + +## Prerequisites + +To run the SAP RFC Connector Docker image, the following SAP infrastructure setup is required: + +- [Cloud Foundry CLI](https://github.com/cloudfoundry/cli) with the [multiapps plugin](https://github.com/cloudfoundry/multiapps-cli-plugin) installed on the machine executing the deployment. +- SAP BTP subaccount with a [Cloud Foundry environment](https://discovery-center.cloud.sap/serviceCatalog/cloud-foundry-runtime?region=all) enabled and a [created space](https://help.sap.com/docs/btp/sap-business-technology-platform/create-spaces). +- A minimum of [1 GB storage quota and 2 GB runtime memory](https://help.sap.com/docs/btp/sap-business-technology-platform/managing-space-quota-plans). +- [Entitlements](https://help.sap.com/docs/btp/sap-business-technology-platform/managing-entitlements-and-quotas-using-cockpit) for: + - [Connectivity Service](https://discovery-center.cloud.sap/serviceCatalog/connectivity-service?region=all), `lite` plan (to connect to the SAP is on-premises). + - [Destination Service](https://discovery-center.cloud.sap/serviceCatalog/destination?service_plan=lite®ion=all&commercialModel=btpea), `lite` plan. + - [Authorization and Trust Management Service](https://discovery-center.cloud.sap/serviceCatalog/authorization-and-trust-management-service?region=all), `application` plan. +- One or more instance- or subaccount-level Destinations, pointing to the SAP systems to communicate with. + ![btp-destination-rfc](./img/btp-destination-rfc.png) +- Ensure `Additional Properties` set on the Destination are aligned with those of your Connector or remote SAP system. + +## Deployment to BTP + +Unlike other Camunda Connectors, the SAP RFC Connector must be deployed as a Java application. This is because it uses SAP's [JCo Java library](https://support.sap.com/en/product/connectors/jco.html) to connect via RFC to the configured SAP system. You must build the application yourself as the JCo library's license prohibits redistribution, so Camunda cannot pre-build it for you. + +### Building the Java application + +1. In the application folder, navigate to `src/main/resources/application.properties` and insert the credentials for the cluster the SAP RFC Connector should connect to: + +```properties +zeebe.client.cloud.region=xxx +zeebe.client.cloud.clusterId=guid +zeebe.client.cloud.clientId=yyy +zeebe.client.cloud.clientSecret=zzz +camunda.connector.polling.enabled=false +``` + +2. Copy the deployment descriptor `mta.yaml.example` to `mta.yaml` and enter the same credentials in the `modules.properties` scope: + +```yaml +_schema-version: 3.3.0 +ID: sap-rfc-connector +# ... +modules: + - name: sap-rfc-connector + # ... + properties: + ZEEBE_CLIENT_CLOUD_CLUSTERID: 'guid' + ZEEBE_CLIENT_CLOUD_CLIENTID: 'xxx' + ZEEBE_CLIENT_CLOUD_CLIENT_SECRET: 'yyy' + ZEEBE_CLIENT_CLOUD_REGION: 'zzz' +``` + +3. Adjust any property describing an infrastructure setting to your requirements. For example, if a pre-existing destination instance is to be used, adjust the respective resource name. Otherwise, the deployment will create any of the services listed in `resources` for you. +4. Build the deployable archive via `$> mbt build`. + +### Deploying the Java application + +1. Log in to the desired SAP BTP subaccount via the [Cloud Foundry `cli`](https://github.com/cloudfoundry/cli) (cf-cli): + +```shell +$> cf login +API endpoint: https://api.cf. ... +... +``` + +2. Deploy the SAP RFC Connector via the `cf-cli`. Note that this requires [the "multiapps" plugin of Cloud Foundry](https://github.com/cloudfoundry/multiapps-cli-plugin) to be installed on the machine the deployment runs on. + +```shell +$> cf deploy mta_archives/*.mtar # append the -f flag to shortcircuit ongoing deployments +Deploying multi-target app archive .mtar in org / space as you@example.org .. +... +Application "sap-rfc-connector" started and available at "some.url.hana.ondemand.com" +``` + +### Deployment in Camunda 8 SaaS + +- If using Web Modeler, [import the SAP RFC Connector's element template](/components/connectors/manage-connector-templates.md#importing-existing-connector-templates) contained in the repository in `element-templates/sap-rfc-connector.json` for design use. + +![sap-rfc-connector-task-in-model](./img/sap-rfc-connector-task-in-model.png) + +- If using Desktop Modeler, [follow the standard importing procedure](/components/modeler/desktop-modeler/element-templates/configuring-templates.md). + +## Working with the SAP RFC Connector in Camunda Modeler + +### Modeling options + +To use the **SAP RFC Connector** in your process, either change the type of existing task by clicking on it and using the **wrench-shaped** change type context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. + +![sap-rfc-connector-task-in-model](./img/sap-rfc-connector-element-template.png) + +First, choose whether to call a `BAPI` or a Function Module (`FM`). + +Then, provide the `exporting`-, `importing-`, and `tables` parameters as lists of objects. + +All object entries in the list look similar to `[{name:"param", type:"type"}]`, pointing to the parameter name of the BAPI/FM and its type. For example, `[{name:"PERSON_IN_CHARGE_FROM", type:"BAPI0012_GEN-PERS_IN_CHRG"}]`. + +For those with experience in `ABAP`, the configuration options are similar. + +### Sending variables to the RFC target + +The `exporting parameter` is sent to the RFC target. The object structure generally looks like `[{name: "param", type: "type", value: }]`. + +Example: + +```json +[ + { + "name": "CONTROLLINGAREA", + "type": "BAPI0012_GEN-CO_AREA", + "value": "1000" + } +] +``` + +This corresponds with the BAPI's/FM's `importing` definition, meaning it imports these variables from the RFC call: + +```ABAP +*" IMPORTING <-- this is the BAPI/FM - don't be confused! In Camunda, this is "exporting" :) +*" VALUE(CONTROLLINGAREA) LIKE BAPI0012_GEN-CO_AREA +``` + +### Receiving variables from the RFC target + +`Importing parameter` is what is expected back from the RFC target. They are configured in the same "list of objects" style pattern in the element template as the other parameters and generally look like `[{name: "param", type: "type"}]`. + +Example: + +```json +[ + { + "name": "DETAIL_DATA", + "type": " BAPI1079_DETAIL" + } +] +``` + +This corresponds with the BAPI's/FM's `exporting` definition, meaning it exports these variables to the caller: + +```ABAP +*" EXPORTING +*" VALUE(DETAIL_DATA) LIKE BAPI1079_DETAIL +``` + +### Special cases: sending and/or receiving a "table" and a "changing" structure + +#### tables + +The `tables parameter` can be both "exporting" and "importing". + +:::warning +Sending tables as tabular data to an RFC target is not yet supported. +::: + +```json +{ + "name": "COSTCENTERLIST", + "type": "BAPI0012_CCLIST" +} +``` + +The above example is an object parameter in the `tables parameter` section that describes a result table to be received back from the RFC call. In conforms with the BAPI `BAPI_COSTCENTER_GETLIST1` parameter definition on the SAP system: + +```ABAP +*" TABLES +*" COSTCENTERLIST STRUCTURE BAPI0012_CCLIST +``` + +The same is applicable for the return structure `BAPIRET2` that denotes the result status of the RFC call: + +```json +{ + "name": "BAPIRET2", + "isReturn": true +} +``` + +This aligns with the BAPI definition: + +```ABAP +*" TABLES +*" .... +*" RETURN STRUCTURE BAPIRET2 +``` + +#### changing + +A `changing parameter` is a variable received by an RFC target that is processed, changed, and returned. It is only available for `FM`-type RFC targets in the SAP RFC Connector. The overall structure is `[{name: "param", type: "type", value: }]`. + +Example: + +```json +[ + { + "name": "CV_RESULT", + "type": "I", + "value": "100" + } +] +``` + +The value `100` is sent to the Fuction Module and sent back as `CV_RESULT`. + +## Query result structure + +### BAPI + +The result of a call to a BAPI holds the following JSON structure: + +```json +{ + tables: [ + { ... } + ], + importing: { + { ... } + } +] +``` + +`tables` holds a representation of the result tables configured. + +`importing` is the result of what was sent to the BAPI in the `exporting` section above. + +### Function Module + +The result of a call to a Function Module holds the following JSON structure: + +```json +{ + tables: [ + { ... } + ], + importing: [ + { ... } + ], + changing: [ + { ... } + ] +] +``` + +- `tables` holds a representation of the result tables configured. +- `importing` is the result of what was sent to the Function Module in the `exporting` section above. +- `changing` is the result of what was sent to the Function Module in the `changing` section above. + +## Error handling + +The SAP RFC Connector allows handling of query errors directly in the model. This means an RFC error is relayed to the process instance in the reserved variables `bpmnError` and `error` and can be processed accordingly: + +``` +DESTINATION_ERROR, +REQUEST_EXECUTION_ERROR, +REQUEST_SERIALIZATION_ERROR, +JCO_RUNTIME_ERROR, +GENERIC_ERROR +``` diff --git a/docs/components/early-access/experimental/rpa/camunda-rpa-framework-library.md b/docs/components/early-access/experimental/rpa/camunda-rpa-framework-library.md new file mode 100644 index 00000000000..f4f140d7962 --- /dev/null +++ b/docs/components/early-access/experimental/rpa/camunda-rpa-framework-library.md @@ -0,0 +1,101 @@ +--- +id: rpa-framework-library +title: RPA framework library +description: "Learn how Camunda offers a custom library for the RPA framework, providing out-of-the-box functionality to manipulate Camunda variables within RPA scripts." +--- + +Camunda offers a custom library for the **robotic process automation (RPA)** framework, that allows you to manipulate [Camunda variables](/components/concepts/variables.md) within RPA scripts. + +## Import the Camunda library + +To use Camunda-provided keywords, import the Camunda library into your **Robot** file. The Camunda library is only available within the [Camunda RPA runtime](/components/early-access/experimental/rpa/rpa-integration-with-camunda.md). + +```robot +*** Settings *** +Library Camunda + +# More Libraries here ... +``` + +## Use keywords and set variables + +To set a process variable in Camunda from within an RPA script, use the keyword `Set Output Variable`. This keyword sets an output variable to be returned from the RPA worker to Camunda. Learn more about [variable propagation](/components/concepts/variables.md#inputoutput-variable-mappings). + +### Syntax + +```robot +Set Output Variable {variableName} {value} +``` + +### Parameters + +- \{variable_name}: The name of the variable you want to set or update. +- \{value}: The value you want to assign to the variable. + +### Example + +```robot +Set Output Variable customerName John Doe +``` + +This command sets a variable named `customerName` with the value `John Doe` in the current task [context](/components/concepts/variables.md). This will be available in the process scope after task completion. + +## Example usage + +The following example demonstrates a script that solves the first challenge at [rpachallenge.com](https://rpachallenge.com/) and returns the result message to Camunda: + +```robot +*** Settings *** +Documentation Robot to solve the first challenge at rpachallenge.com, +... which consists of filling a form that randomly rearranges +... itself for ten times, with data taken from a provided +... Microsoft Excel file. Return Congratulation message to Camunda. + +Library RPA.Browser.Playwright +Library RPA.Excel.Files +Library RPA.HTTP +Library Camunda + +*** Tasks *** +Complete the challenge + Start the challenge + Fill the forms + Collect the results + +*** Keywords *** +Start the challenge + New Browser headless=false + New Page http://rpachallenge.com/ + RPA.HTTP.Download + ... http://rpachallenge.com/assets/downloadFiles/challenge.xlsx + ... overwrite=True + Click button + +Fill the forms + ${people}= Get the list of people from the Excel file + FOR ${person} IN @{people} + Fill and submit the form ${person} + END + +Get the list of people from the Excel file + Open Workbook challenge.xlsx + ${table}= Read Worksheet As Table header=True + Close Workbook + RETURN ${table} + +Fill and submit the form + [Arguments] ${person} + Fill Text //input[@ng-reflect-name="labelFirstName"] ${person}[First Name] + Fill Text //input[@ng-reflect-name="labelLastName"] ${person}[Last Name] + Fill Text //input[@ng-reflect-name="labelCompanyName"] ${person}[Company Name] + Fill Text //input[@ng-reflect-name="labelRole"] ${person}[Role in Company] + Fill Text //input[@ng-reflect-name="labelAddress"] ${person}[Address] + Fill Text //input[@ng-reflect-name="labelEmail"] ${person}[Email] + Fill Text //input[@ng-reflect-name="labelPhone"] ${person}[Phone Number] + Click input[type=submit] + +Collect the results + ${resultText}= Get Text selector=css=div.congratulations .message2 + Set Output Variable resultText ${resultText} + Close Browser +``` diff --git a/docs/components/early-access/experimental/rpa/img/rpa-task-example.png b/docs/components/early-access/experimental/rpa/img/rpa-task-example.png new file mode 100644 index 00000000000..e3c84b9d0a6 Binary files /dev/null and b/docs/components/early-access/experimental/rpa/img/rpa-task-example.png differ diff --git a/docs/components/early-access/experimental/rpa/img/testing-rpa.png b/docs/components/early-access/experimental/rpa/img/testing-rpa.png new file mode 100644 index 00000000000..676d8093fe6 Binary files /dev/null and b/docs/components/early-access/experimental/rpa/img/testing-rpa.png differ diff --git a/docs/components/early-access/experimental/rpa/rpa-integration-with-camunda.md b/docs/components/early-access/experimental/rpa/rpa-integration-with-camunda.md new file mode 100644 index 00000000000..efe5d3c0242 --- /dev/null +++ b/docs/components/early-access/experimental/rpa/rpa-integration-with-camunda.md @@ -0,0 +1,84 @@ +--- +id: rpa-integration +title: RPA integration +description: "Learn how Camunda enables orchestration of simple robotic process automation (RPA) tasks within Camunda 8 based on the RPA Framework." +--- + +Camunda enables orchestration of simple **robotic process automation (RPA)** tasks within Camunda 8 based on the [RPA Framework](https://rpaframework.org/). The RPA integration consists of two primary components: RPA runtime and the [Camunda Desktop Modeler](/components/modeler/about-modeler.md) plugin. + +## RPA runtime + +You can use the RPA runtime to run your RPA scripts for both local testing or from your Camunda SaaS [cluster](/guides/create-cluster.md). + +- **For a full development setup**: Visit the [Camunda Prototype RPA Worker GitHub repository](https://github.com/camunda/rpa-runtime). +- **For a Windows setup without Python**: Take the following steps: + +1. Download the [RPA runner repository](https://github.com/camunda/rpa-runtime/archive/refs/heads/main.zip) from GitHub. +2. Unzip the file and navigate to the `rpa-runtime-main` repository folder. +3. Execute `setup.ps1` using PowerShell. If permission issues arise, alter your Execution Policy with the command `Set-ExecutionPolicy RemoteSigned`. +4. Enter your credentials into the `.env` file. See [how to create credentials](/components/console/manage-clusters/manage-api-clients.md#create-a-client). +5. Start the RPA worker locally by running `start.ps1` in PowerShell. To stop the worker, close the PowerShell terminal. + +### Directories + +The RPA runtime manages files locally, with directories structured as follows: + +- `/rpaFiles`: Contains the deployed scripts, with filenames matching the names used in your process. +- `/workers/{scriptName}/{jobId}/`: Stores logs and runtime data for your workers, where the job ID corresponds to the Zeebe job ID when executing tasks from Camunda. + +## Camunda Desktop Modeler plugin + +:::note +Desktop Modeler version 5.29.0 or higher is required for this plugin. Until its release on November 8th 2024, use the [nightly build](https://downloads.camunda.cloud/release/camunda-modeler/nightly/). +::: + +### Installation + +For more information on plugin installation, visit the [plugin installation guide](/components/modeler/desktop-modeler/plugins/plugins.md). + +To install the RPA plugin, take the following steps: + +1. Download the latest version of the [RPA plugin](http://github.com/camunda/camunda-modeler-rpa-plugin/zipball/latest/) from GitHub. +2. Extract the folder to `resources/plugins` within your Desktop Modeler installation directory. +3. Ensure the path is structured correctly: `/resources/plugins//index.js`. +4. Launch or restart Desktop Modeler. + +You can now open and edit `.robot` files in Desktop Modeler. + +### Robot development + +To create or edit a `.robot` file, use the welcome page or file dialog in Modeler. + +The editor allows for script development, with all RPA Framework libraries listed in the [RPA Framework documentation](https://rpaframework.org/#libraries). Camunda also offers a [Camunda library extension](/components/early-access/experimental/rpa/camunda-rpa-framework-library.md) to integrate with Camunda and return variables from your RPA scripts, for example. + +Currently, only a single `.robot` file is supported, and adding custom Python files or libraries is not possible. + +### Testing + +To test your script, select the **Play** icon in the bottom left corner of Desktop Modeler to access the **Robot Testing** tab. + +![RPA testing example](./img/testing-rpa.png) + +You can now run the script with the expected input from the process on your local RPA runtime. Once the script completes, select **Show log** to see a more detailed view of the run. + +Refer to the [Camunda library documentation](/components/early-access/experimental/rpa/camunda-rpa-framework-library.md) for information on returning variables from your RPA script to your process. + +### Deployment + +Once your script is finalized and tested, take the following steps: + +1. Select the **Deploy** icon next to the **Run** icon in the footer of Desktop Modeler to deploy this to the RPA runner. +2. Ensure the script name used during deployment matches the name referenced within your BPMN. + +### Using Robot scripts in a process + +To integrate the script within your process, take the following steps: + +1. In a Camunda 8 Desktop Modeler diagram, create a task if you have not already done so, and change the task to an RPA task by selecting the wrench-shaped change type context menu icon. +2. Use the properties panel to configure the script name and input mappings. The script name should correspond with the deployed script name. For input mappings, specify the required script inputs or set the mapping to null to use process variables directly. + +![rpa task example in Desktop Modeler](./img/rpa-task-example.png) + +:::note +Have questions, a bug report, or an idea for improvement? Visit the [open issue tracker](https://github.com/camunda/rpa-runtime/issues/new/choose) on GitHub. +::: diff --git a/docs/components/early-access/overview.md b/docs/components/early-access/overview.md new file mode 100644 index 00000000000..f5fe5328fec --- /dev/null +++ b/docs/components/early-access/overview.md @@ -0,0 +1,39 @@ +--- +id: overview +title: Overview +sidebar_label: Overview +--- + +Introducing early access - a space to explore new features and components currently in development by Camunda. + +Early access includes two categories of features: + +## Experimental features + +Experimental features are very early-stage ideas and functionalities under exploration. This allows Camunda to innovate quickly and test new concepts. + +You can test out these features and provide additional feedback to help Camunda understand how they perform in real-world scenarios. + +:::note +These features may be unstable, and subject to significant changes or removal. +::: + +## Alpha features + +[Alpha features](/components/early-access/alpha/alpha-features.md) are more developed and closer to becoming part of the product but may not yet be fully optimized or supported. Selected Camunda features and components are released as alpha versions to provide early access. By testing these features, you have the opportunity to participate in their development by sharing feedback before they reach [general availability](/reference/release-policy.md#general-availability-ga). + +## What to expect + +| | Experimental features | Alpha features | +| ---------------------------------- | -------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| Purpose | Early innovation and ideation | Test upcoming features that could make it to GA | +| Suitable for production use | No | No | +| Stability | APIs, dependencies, and configuration are likely to change. | APIs, dependencies, and configuration are likely to change. | +| Feature complete | No | No | +| Documentation | May have some or no documentation. | May have some documentation. | +| Updates | No guaranteed updates. May be removed entirely and may not evolve into alpha or official features. | No guaranteed updates to newer releases, but likely to continue development. | +| Support | No dedicated support or SLAs. | Support based on SLAs, with bugs treated as part of regular feature/help requests. | +| Maintenance service | No | No | +| Available on | SaaS/Self-Managed | SaaS/Self-Managed | +| Release cycle | Outside the standard [release policy](/docs/reference/release-policy.md) | Outside the standard [release policy](/docs/reference/release-policy.md) | +| Admin/owner access required | Yes | Yes | diff --git a/docs/components/modeler/bpmn/bpmn-coverage.md b/docs/components/modeler/bpmn/bpmn-coverage.md index c96e34cd921..d0e298d1938 100644 --- a/docs/components/modeler/bpmn/bpmn-coverage.md +++ b/docs/components/modeler/bpmn/bpmn-coverage.md @@ -140,15 +140,19 @@ import CompensationSvg from './assets/bpmn-symbols/compensation.svg' ## Data +:::note +`DataObject` and `DataStore`, like other BPMN standard IO mappings, are supported by Camunda for modeling purposes only. +::: + import DataObjectSvg from './assets/bpmn-symbols/data-object.svg' import DataStoreSvg from './assets/bpmn-symbols/data-store.svg' diff --git a/docs/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md b/docs/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md index 6e3114634eb..73d526f0fcd 100644 --- a/docs/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md +++ b/docs/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md @@ -48,7 +48,7 @@ The `bindingType` attribute determines which version of the called decision is e - `deployment`: The version that was deployed together with the currently running version of the process. - `versionTag`: The latest deployed version that is annotated with the version tag specified in the `versionTag` attribute. -To learn more about choosing binding types, see [Choosing the resource binding type](/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md). +To learn more about choosing binding types, see [choosing the resource binding type](/components/best-practices/modeling/choosing-the-resource-binding-type.md). :::note If the `bindingType` attribute is not specified, `latest` is used as the default. diff --git a/docs/components/modeler/bpmn/call-activities/call-activities.md b/docs/components/modeler/bpmn/call-activities/call-activities.md index fca00892937..b2d602eca05 100644 --- a/docs/components/modeler/bpmn/call-activities/call-activities.md +++ b/docs/components/modeler/bpmn/call-activities/call-activities.md @@ -14,7 +14,7 @@ When the created process instance is completed, the call activity is left and th ## Defining the called process -A call activity must define the BPMN process id of the called process as `processId`. +A call activity must define the BPMN process ID of the called process as `processId`. Usually, the `processId` is defined as a [static value](/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `shipping-process`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "shipping-" + tenantId`). The expression is evaluated on activating the call activity and must result in a `string`. @@ -24,7 +24,7 @@ The `bindingType` attribute determines which version of the called process is in - `deployment`: The version that was deployed together with the currently running version of the calling process. - `versionTag`: The latest deployed version that is annotated with the version tag specified in the `versionTag` attribute. -To learn more about choosing binding types, see [Choosing the resource binding type](/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md). +To learn more about choosing binding types, see [choosing the resource binding type](/components/best-practices/modeling/choosing-the-resource-binding-type.md). :::note If the `bindingType` attribute is not specified, `latest` is used as the default. @@ -42,10 +42,6 @@ When a non-interrupting boundary event is triggered, the created process instanc ## Variable mappings -By default, all variables of the call activity scope are copied to the created process instance. This can be limited to copying only the local variables of the call activity, by setting the attribute `propagateAllParentVariables` to `false`. - -By disabling this attribute, variables existing at higher scopes are no longer copied. If the attribute `propagateAllParentVariables` is set (default: `true`), all variables are propagated to the child process instance. - Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. diff --git a/docs/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md b/docs/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md index 56bee823e46..5a1b961b484 100644 --- a/docs/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md +++ b/docs/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md @@ -14,6 +14,8 @@ When an embedded subprocess is entered, the start event is activated. The subpro Embedded subprocesses are often used together with **boundary events**. One or more boundary events can be attached to a subprocess. When an interrupting boundary event is triggered, the entire subprocess (including all active elements) is terminated. +When adding an embedded subprocess to your model, you can either add a collapsed or expanded subprocess. You cannot collapse an existing expanded subprocess in your model. + ## Collapsed subprocesses :::caution @@ -22,7 +24,7 @@ Collapsed subprocesses are currently only partially supported by Optimize. While All other Camunda components fully support collapsed subprocesses. ::: -A subprocess can be collapsed to conceal its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. +A collapsed subprocess conceals its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. Collapsed subprocesses serve purely display purposes. For the creation of reusable processes, it is recommended to utilize [call activities](../call-activities/call-activities.md). diff --git a/docs/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md b/docs/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md index e084bd3c5fa..10585deb9d2 100644 --- a/docs/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md +++ b/docs/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md @@ -1,7 +1,7 @@ --- id: exclusive-gateways title: "Exclusive gateway" -description: "An exclusive gateway (or XOR-gateway) allows you to make a decision based on data." +description: "Learn more about exclusive gateways (or XOR-gateways) and their conditions, which allow you to make a decision based on data such as process instance variables." --- An exclusive gateway (or XOR-gateway) allows you to make a decision based on data (i.e. on process instance variables). diff --git a/docs/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md b/docs/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md index 3f8f76ebc18..2b56ab76e44 100644 --- a/docs/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md +++ b/docs/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md @@ -1,10 +1,10 @@ --- id: inclusive-gateways title: "Inclusive gateway" -description: "An inclusive gateway (or OR-gateway) allows you to make multiple decisions based on data." +description: "Learn more about inclusive gateways, which allow for making multiple decisions based on data or process instance variables, and can be diverging or converging." --- -The inclusive gateway (or OR-gateway) allows for making multiple decisions based on data, or process instance variables. Inclusive gateways can be diverging (a sequence flow is split into multiple paths) or converging (split paths are merged before continuing). +The inclusive gateway (or OR-gateway) allows for making multiple decisions based on data or process instance variables. Inclusive gateways can be diverging (a sequence flow is split into multiple paths) or converging (split paths are merged before continuing). ![A process model to prepare lunch at lunchtime can use an inclusive gateway to decide which steps to take to prepare the different lunch components, e.g. cook pasta,stir-fry steak, prepare salad, or any combination of these.](assets/inclusive-gateway.png) diff --git a/docs/components/modeler/bpmn/manual-tasks/manual-tasks.md b/docs/components/modeler/bpmn/manual-tasks/manual-tasks.md index 5d2ac406614..d444261ac2f 100644 --- a/docs/components/modeler/bpmn/manual-tasks/manual-tasks.md +++ b/docs/components/modeler/bpmn/manual-tasks/manual-tasks.md @@ -4,18 +4,15 @@ title: "Manual tasks" description: "A manual task defines a task that is external to the BPM engine." --- -A manual task defines a task that is external to the BPM engine. This is used to model work that is done -by somebody who the engine does not need to know of and there is no known system or UI interface. - -For the engine, a manual task is handled as a pass-through activity, automatically continuing the -process at the moment the process instance arrives. +A manual task defines a task that requires human interaction but no external tooling or UI interface. For example, a user reviewing a document or completing a physical task. ![task](assets/manual-task.png) -Manual tasks have no real benefit for automating processes. Manual tasks instead provide insights into the tasks -that are performed outside of the process engine. +Manual tasks are part of [human task orchestration](/guides/getting-started-orchestrate-human-tasks.md), but differ from [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) which define an actionable task assisted by a business process execution engine or software application. + +Within the engine and BPMN model, a manual task is handled as a pass-through activity, automatically continuing the process at the moment the process instance arrives. -## Additional resources +Manual tasks provide insights into the tasks performed outside the process engine, aiding in modeling a process, though no linked automation process is utilized. ### XML representation diff --git a/docs/components/modeler/bpmn/message-events/message-events.md b/docs/components/modeler/bpmn/message-events/message-events.md index 9be8722db94..e9c7e67d484 100644 --- a/docs/components/modeler/bpmn/message-events/message-events.md +++ b/docs/components/modeler/bpmn/message-events/message-events.md @@ -12,7 +12,7 @@ Message events are events which reference a message; they are used to wait until A process can have one or more message start events (besides other types of start events). Each of the message events must have a unique message name. -When a process is deployed, it creates a message subscription for each message start event. Message subscriptions of the previous version of the process (based on the BPMN process id) are closed. +When a process is deployed, it creates a message subscription for each message start event. Message subscriptions of the previous version of the process (based on the BPMN process ID) are closed. ### Message correlation diff --git a/docs/components/modeler/bpmn/multi-instance/multi-instance.md b/docs/components/modeler/bpmn/multi-instance/multi-instance.md index 6817a484f57..acc0429530a 100644 --- a/docs/components/modeler/bpmn/multi-instance/multi-instance.md +++ b/docs/components/modeler/bpmn/multi-instance/multi-instance.md @@ -1,7 +1,7 @@ --- id: multi-instance title: "Multi-instance" -description: "A multi-instance activity is executed multiple times - once for each element of a given collection." +description: "Learn about multi-instance activities like service tasks and receive tasks. These can be executed multiple times - once for each element of a given collection." --- A multi-instance activity is executed multiple times - once for each element of a given collection (like a _foreach_ loop in a programming language). diff --git a/docs/components/modeler/bpmn/service-tasks/service-tasks.md b/docs/components/modeler/bpmn/service-tasks/service-tasks.md index f445b0a417c..37c2e72cde9 100644 --- a/docs/components/modeler/bpmn/service-tasks/service-tasks.md +++ b/docs/components/modeler/bpmn/service-tasks/service-tasks.md @@ -1,7 +1,7 @@ --- id: service-tasks title: "Service tasks" -description: "A service task represents a work item in the process with a specific type." +description: "Learn more about service tasks which represent a work item in the process with a specific type. When a service task is entered, a corresponding job is created." --- A service task represents a work item in the process with a specific type. diff --git a/docs/components/modeler/bpmn/signal-events/signal-events.md b/docs/components/modeler/bpmn/signal-events/signal-events.md index c805e9da582..647ec8bc643 100644 --- a/docs/components/modeler/bpmn/signal-events/signal-events.md +++ b/docs/components/modeler/bpmn/signal-events/signal-events.md @@ -18,8 +18,8 @@ Broadcasting a signal iterates over the available subscriptions. If the name of name of the signal start event, the process instance is created. Signal subscriptions only exist for the latest version of a process definition. Deploying a new version of the same -process (based on the BPMN process id) will delete the old signal subscription. A new subscription is opened for the -new deployed process definition. When the latest version of a process is deleted, the signal subscription is also deleted. If the previous version of the same process (based on the BPMN process id) contains a signal start event, a new subscription +process (based on the BPMN process ID) will delete the old signal subscription. A new subscription is opened for the +new deployed process definition. When the latest version of a process is deleted, the signal subscription is also deleted. If the previous version of the same process (based on the BPMN process ID) contains a signal start event, a new subscription is opened for it. ## Signal intermediate catch events diff --git a/docs/components/modeler/bpmn/timer-events/timer-events.md b/docs/components/modeler/bpmn/timer-events/timer-events.md index 30b6b06b9c3..af3ae28e02b 100644 --- a/docs/components/modeler/bpmn/timer-events/timer-events.md +++ b/docs/components/modeler/bpmn/timer-events/timer-events.md @@ -1,7 +1,7 @@ --- id: timer-events title: "Timer events" -description: "Timer events are events triggered by a defined timer." +description: "Learn about events triggered by a timer, including timer start events, intermediate timer catch events, and interrupting/non-interrupting timer boundary events." --- Timer events are events triggered by a defined timer. @@ -12,7 +12,7 @@ Timer events are events triggered by a defined timer. A process can have one or more timer start events (besides other types of start events). Each of the timer events must have either a time date or time cycle definition. -When a process is deployed, it schedules a timer for each timer start event. Scheduled timers of the previous version of the process (based on the BPMN process id) are canceled. +When a process is deployed, it schedules a timer for each timer start event. Scheduled timers of the previous version of the process (based on the BPMN process ID) are canceled. When a timer is triggered, a new process instance is created and the corresponding timer start event is activated. diff --git a/docs/components/modeler/bpmn/user-tasks/user-tasks.md b/docs/components/modeler/bpmn/user-tasks/user-tasks.md index b8ad4445655..90a64a6191c 100644 --- a/docs/components/modeler/bpmn/user-tasks/user-tasks.md +++ b/docs/components/modeler/bpmn/user-tasks/user-tasks.md @@ -4,8 +4,9 @@ title: "User tasks" description: "A user task is used to model work that needs to be done by a human actor." --- -A user task is used to model work that needs to be done by a human actor. When -the process instance arrives at such a user task, a new user task instance is created at Zeebe. +A user task is used to model work that needs to be done by a human and is assisted by a business process execution engine or software application. This differs from [manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md), which are not assisted by external tooling. + +When the process instance arrives at a user task, a new user task instance is created at Zeebe. The process instance stops at this point and waits until the user task instance is completed. When the user task instance is completed, the process instance continues. @@ -125,7 +126,7 @@ Depending on your use case, two different types of form references can be used: - `deployment`: The version that was deployed together with the currently running version of the process. - `versionTag`: The latest deployed version that is annotated with the version tag specified in the `versionTag` attribute. - To learn more about choosing binding types, see [Choosing the resource binding type](/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md). + To learn more about choosing binding types, see [choosing the resource binding type](/components/best-practices/modeling/choosing-the-resource-binding-type.md). :::note If the `bindingType` attribute is not specified, `latest` is used as the default. @@ -161,7 +162,7 @@ A user task does not have to be managed by Zeebe. Instead, you can also use job workers to implement a custom user task logic. Note that you will lose all the task lifecycle and state management features that Zeebe provides and will have to implement them yourself. Use job workers only in case you require a very specific implementation of user tasks that can't be implemented on top of Zeebe user tasks. :::info -If you started using Camunda 8 with version 8.4 or a lower version and upgraded to 8.5 or newer, your user tasks are probably implemented as job workers. Refer to the [migration guide](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md) to find a detailed list of the differences between the task implementation types and learn how to migrate to Zeebe user tasks. +If you started using Camunda 8 with version 8.4 or a lower version and upgraded to 8.5 or newer, your user tasks are probably implemented as job workers. Refer to the [migration guide](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md) to find a detailed list of the differences between the task implementation types and learn how to migrate to Zeebe user tasks. ::: You can define a job worker implementation for a user task by removing its `zeebe:userTask` extension element. diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/chooser.png b/docs/components/modeler/desktop-modeler/element-templates/img/chooser.png index 3f7dbbb7329..ff0ff569ca6 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/chooser.png and b/docs/components/modeler/desktop-modeler/element-templates/img/chooser.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/entries-visible.png b/docs/components/modeler/desktop-modeler/element-templates/img/entries-visible.png index efe55182bce..73237b9b09e 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/entries-visible.png and b/docs/components/modeler/desktop-modeler/element-templates/img/entries-visible.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png b/docs/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png index 4f1f4caca2f..5f46c557aa8 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png and b/docs/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/groups.png b/docs/components/modeler/desktop-modeler/element-templates/img/groups.png index 655d1064023..73c4b142c33 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/groups.png and b/docs/components/modeler/desktop-modeler/element-templates/img/groups.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/icons.png b/docs/components/modeler/desktop-modeler/element-templates/img/icons.png index f2915e63f88..2e19106d275 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/icons.png and b/docs/components/modeler/desktop-modeler/element-templates/img/icons.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/modal.png b/docs/components/modeler/desktop-modeler/element-templates/img/modal.png index 3c2ee1bd436..172ad44a3d2 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/modal.png and b/docs/components/modeler/desktop-modeler/element-templates/img/modal.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/overview.png b/docs/components/modeler/desktop-modeler/element-templates/img/overview.png index 875826dfa79..03eeb014bbf 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/overview.png and b/docs/components/modeler/desktop-modeler/element-templates/img/overview.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/template-not-found.png b/docs/components/modeler/desktop-modeler/element-templates/img/template-not-found.png index 9390046ea2a..7536242e188 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/template-not-found.png and b/docs/components/modeler/desktop-modeler/element-templates/img/template-not-found.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png b/docs/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png index 1fdfc085ae0..33d2599b1ba 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png and b/docs/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png differ diff --git a/docs/components/modeler/desktop-modeler/element-templates/img/update-template.png b/docs/components/modeler/desktop-modeler/element-templates/img/update-template.png index 01cb74d5371..a30d318e7d9 100644 Binary files a/docs/components/modeler/desktop-modeler/element-templates/img/update-template.png and b/docs/components/modeler/desktop-modeler/element-templates/img/update-template.png differ diff --git a/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png b/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png index 01933cce86d..98e051780ef 100644 Binary files a/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png and b/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png differ diff --git a/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png b/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png index ee0710560a9..a96a3ae0229 100644 Binary files a/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png and b/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png differ diff --git a/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png b/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png index d91a0981d1d..15bb3157988 100644 Binary files a/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png and b/docs/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png differ diff --git a/docs/components/modeler/desktop-modeler/img/deploy-icon.png b/docs/components/modeler/desktop-modeler/img/deploy-icon.png index f322ad01657..d7d6c03f478 100644 Binary files a/docs/components/modeler/desktop-modeler/img/deploy-icon.png and b/docs/components/modeler/desktop-modeler/img/deploy-icon.png differ diff --git a/docs/components/modeler/desktop-modeler/img/element-configuration.png b/docs/components/modeler/desktop-modeler/img/element-configuration.png index 4c7dde4b4bd..107fc5294b6 100644 Binary files a/docs/components/modeler/desktop-modeler/img/element-configuration.png and b/docs/components/modeler/desktop-modeler/img/element-configuration.png differ diff --git a/docs/components/modeler/desktop-modeler/img/elements.png b/docs/components/modeler/desktop-modeler/img/elements.png index 30f4e2712c1..318b79994e4 100644 Binary files a/docs/components/modeler/desktop-modeler/img/elements.png and b/docs/components/modeler/desktop-modeler/img/elements.png differ diff --git a/docs/components/modeler/desktop-modeler/img/empty.png b/docs/components/modeler/desktop-modeler/img/empty.png index 5ddc75fc564..13c4537d138 100644 Binary files a/docs/components/modeler/desktop-modeler/img/empty.png and b/docs/components/modeler/desktop-modeler/img/empty.png differ diff --git a/docs/components/modeler/desktop-modeler/img/new-diagram.png b/docs/components/modeler/desktop-modeler/img/new-diagram.png index e3f7c77299b..9c307896cf9 100644 Binary files a/docs/components/modeler/desktop-modeler/img/new-diagram.png and b/docs/components/modeler/desktop-modeler/img/new-diagram.png differ diff --git a/docs/components/modeler/desktop-modeler/img/properties-panel.png b/docs/components/modeler/desktop-modeler/img/properties-panel.png index a44d967f2d3..d3df1428c56 100644 Binary files a/docs/components/modeler/desktop-modeler/img/properties-panel.png and b/docs/components/modeler/desktop-modeler/img/properties-panel.png differ diff --git a/docs/components/modeler/desktop-modeler/img/start-instance-icon.png b/docs/components/modeler/desktop-modeler/img/start-instance-icon.png index 4fa881c5de1..52851547bf6 100644 Binary files a/docs/components/modeler/desktop-modeler/img/start-instance-icon.png and b/docs/components/modeler/desktop-modeler/img/start-instance-icon.png differ diff --git a/docs/components/modeler/desktop-modeler/img/start-instance-step-1.png b/docs/components/modeler/desktop-modeler/img/start-instance-step-1.png index 2cb53fc17fb..b5c1c93de79 100644 Binary files a/docs/components/modeler/desktop-modeler/img/start-instance-step-1.png and b/docs/components/modeler/desktop-modeler/img/start-instance-step-1.png differ diff --git a/docs/components/modeler/desktop-modeler/img/start-instance-step-2.png b/docs/components/modeler/desktop-modeler/img/start-instance-step-2.png index 974cff13f16..6087b144438 100644 Binary files a/docs/components/modeler/desktop-modeler/img/start-instance-step-2.png and b/docs/components/modeler/desktop-modeler/img/start-instance-step-2.png differ diff --git a/docs/components/modeler/desktop-modeler/img/start-instance-successful.png b/docs/components/modeler/desktop-modeler/img/start-instance-successful.png index 6704af8efbb..dcf93e44c8a 100644 Binary files a/docs/components/modeler/desktop-modeler/img/start-instance-successful.png and b/docs/components/modeler/desktop-modeler/img/start-instance-successful.png differ diff --git a/docs/components/modeler/desktop-modeler/install-the-modeler.md b/docs/components/modeler/desktop-modeler/install-the-modeler.md index 9f4328b2ddc..6bd325655ec 100644 --- a/docs/components/modeler/desktop-modeler/install-the-modeler.md +++ b/docs/components/modeler/desktop-modeler/install-the-modeler.md @@ -2,7 +2,7 @@ id: install-the-modeler title: Install Desktop Modeler sidebar_label: Installation -description: "Learn how to install Camunda Desktop Modeler." +description: "Learn how to install Camunda Desktop Modeler, a desktop application for modeling BPMN, DMN, and Forms and support building executable diagrams with Camunda." --- Download [Desktop Modeler](./index.md) for Windows, macOS, and Linux from the [Camunda downloads page](https://camunda.com/download/modeler/). diff --git a/docs/components/modeler/desktop-modeler/plugins/plugins.md b/docs/components/modeler/desktop-modeler/plugins/plugins.md index aec6a119f91..0435c1a3e8e 100644 --- a/docs/components/modeler/desktop-modeler/plugins/plugins.md +++ b/docs/components/modeler/desktop-modeler/plugins/plugins.md @@ -193,7 +193,7 @@ npm run build When creating a plugin, you can place the directory containing your plugin in the aforementioned `resources/plugins` directory. -Plugins will be loaded on application startup (menu plugins) or reload (style and modeling tool plugins). To reload the application, open the developer tools F12 and press `CtrlOrCmd + R`. This will clear all unsaved diagrams. +Plugins will be loaded on application startup (menu plugins) or reload (style and modeling tool plugins). To reload the application, press `F12` to open the developer tools, and press `Ctrl+R` or `Cmd+R`. This will clear all unsaved diagrams. ## Additional resources diff --git a/docs/components/modeler/desktop-modeler/telemetry/telemetry.md b/docs/components/modeler/desktop-modeler/telemetry/telemetry.md index 8db915f21f0..7f177d47b94 100644 --- a/docs/components/modeler/desktop-modeler/telemetry/telemetry.md +++ b/docs/components/modeler/desktop-modeler/telemetry/telemetry.md @@ -12,7 +12,7 @@ The events **Desktop Modeler** sends share a similar payload which usually (but - **event name**: The name of the event triggered (e.g. `diagram:opened`) - **application version**: The version of Desktop Modeler being used (e.g. Version 5.0.0) -- **editor id**: A randomly generated id assigned to your Desktop Modeler installation +- **editor id**: A randomly generated ID assigned to your Desktop Modeler installation ## Definition of events @@ -54,8 +54,8 @@ These events include the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the case of a form, the payload also includes the `formFieldTypes`: @@ -78,8 +78,8 @@ The `Deployment Event` and `Start Instance` have the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the event of an unsuccessful deployment, an `error` property will be present in the payload containing an error code. @@ -106,7 +106,7 @@ Currently, these containers are: The event supplies: -- The `parent` container id to locate the application section +- The `parent` container ID to locate the application section - The button label or link text (generalized as label) for identification of what was specifically clicked - A type to differentiate buttons, internal links, and external links - The link target (optional for external links) diff --git a/docs/components/modeler/desktop-modeler/troubleshooting.md b/docs/components/modeler/desktop-modeler/troubleshooting.md index cd88267e3cc..c43dfc076d3 100644 --- a/docs/components/modeler/desktop-modeler/troubleshooting.md +++ b/docs/components/modeler/desktop-modeler/troubleshooting.md @@ -171,6 +171,18 @@ DEBUG=* ZEEBE_NODE_LOG_LEVEL=DEBUG GRPC_VERBOSITY=DEBUG GRPC_TRACE=all camunda-m +## Desktop Modeler does not start on Ubuntu 24 / modern Linux + +Modern Linux operating systems introduce restrictions on user namespaces, a sandboxing (isolation) mechanism Modeler uses. You may see an error message when you start the application: + +```sh +$ ./camunda-modeler +[46193:1114/170934.837319:FATAL:setuid_sandbox_host.cc(163)] The SUID sandbox helper binary was found, but is not configured correctly. Rather than run without sandboxing I'm aborting now. You need to make sure that [...]/camunda-modeler-[...]-linux-x64/chrome-sandbox is owned by root and has mode 4755. +zsh: trace trap (core dumped) ./camunda-modeler +``` + +To remedy this, configure your system to allow sandboxing by [creating an AppArmor profile](https://github.com/camunda/camunda-modeler/issues/4695#issuecomment-2478458250), or review [this issue](https://github.com/camunda/camunda-modeler/issues/4695#issuecomment-2478581677) for an in-depth explanation of available options. If you don't have the necessary permissions to permit sandboxing, you may choose to disable the sandbox, though this is not recommended. + ## Other questions? Head over to the [Modeler category on the forum](https://forum.camunda.io/c/modeler/6) to receive help from the community. diff --git a/docs/components/modeler/desktop-modeler/use-connectors.md b/docs/components/modeler/desktop-modeler/use-connectors.md index 921b5c0f693..6a0f28ee837 100644 --- a/docs/components/modeler/desktop-modeler/use-connectors.md +++ b/docs/components/modeler/desktop-modeler/use-connectors.md @@ -12,7 +12,7 @@ Desktop Modeler automatically fetches and updates [element templates](./element- ## Automatic Connector template fetching -Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. +Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. The fetch is triggered whenever you start the application, or every 24 hours if the application is not closed. After an update check has concluded, a notification indicates if the templates are up to date or have been updated: diff --git a/docs/components/modeler/dmn/decision-literal-expression.md b/docs/components/modeler/dmn/decision-literal-expression.md index 90e8871ad87..b10e1686336 100644 --- a/docs/components/modeler/dmn/decision-literal-expression.md +++ b/docs/components/modeler/dmn/decision-literal-expression.md @@ -42,20 +42,20 @@ attribute on the `decision` element. ![Decision Id](assets/decision-literal-expression/decision-id.png) -The id is the technical identifier of the decision. It is set in the `id` +The ID is the technical identifier of the decision. It is set in the `id` attribute on the `decision` element. -Each decision should have an unique id when it is deployed to Camunda. +Each decision should have an unique ID when it is deployed to Camunda. :::caution -The decision id may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). +The decision ID may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). -The decision id can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to +The decision ID can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the operator `-`. -If the decision id contain a special character or symbol then the decision result can't be accessed in +If the decision ID contain a special character or symbol then the decision result can't be accessed in a [dependent decision](decision-requirements-graph.md#required-decisions). ::: @@ -116,7 +116,7 @@ a [dependent decision](decision-requirements-graph.md#required-decisions). :::tip -It is recommended to use the decision id as the variable name. +It is recommended to use the decision ID as the variable name. In contrast to decision tables, the result of a decision literal expression can be accessed in a [dependent decision](decision-requirements-graph.md#required-decisions) by its variable name instead of its decision diff --git a/docs/components/modeler/dmn/decision-requirements-graph.md b/docs/components/modeler/dmn/decision-requirements-graph.md index 04efa0946b6..2aff91aa848 100644 --- a/docs/components/modeler/dmn/decision-requirements-graph.md +++ b/docs/components/modeler/dmn/decision-requirements-graph.md @@ -47,9 +47,9 @@ The name describes the DRG. It is set as the `name` attribute on the `definition ![Decision Requirements Graph Id](assets/decision-requirements-graph/drg-id.png) -The id is the technical identifier of the DRG. It is set in the `id` attribute on the `definitions` element. +The ID is the technical identifier of the DRG. It is set in the `id` attribute on the `definitions` element. -Each DRG should have an unique id when it is deployed to Camunda. +Each DRG should have an unique ID when it is deployed to Camunda. ```xml @@ -66,7 +66,7 @@ Each DRG should have an unique id when it is deployed to Camunda. ![Decision](assets/decision-requirements-graph/decision.png) A decision requirements graph can have one or more decisions. A decision has a [name](decision-table.md#decision-name) -which is shown in the DRD and an [id](decision-table.md#decision-id). The decision logic inside the decision must be +which is shown in the DRD and an [ID](decision-table.md#decision-id). The decision logic inside the decision must be either a [decision table](decision-table.md) or a [decision literal expression](decision-literal-expression.md). A decision is represented by a `decision` element inside the `definitions` XML element. @@ -89,15 +89,15 @@ A decision is represented by a `decision` element inside the `definitions` XML e A decision can have one or more required decisions which it depends on. A required decision is represented by a `requiredDecision` element inside an `informationRequirement` XML element. It -has a `href` attribute and the value starts with `#` followed by the [decision id](decision-table.md#decision-id) of the +has a `href` attribute and the value starts with `#` followed by the [decision ID](decision-table.md#decision-id) of the required decision. :::tip -The result of a required decision can be accessed in the dependent decision by its decision id. +The result of a required decision can be accessed in the dependent decision by its decision ID. If the required decision is a decision table and has more than one output then the output values are grouped under the -decision id and can be accessed by their [output names](decision-table-output.md#output-name) ( +decision ID and can be accessed by their [output names](decision-table-output.md#output-name) ( e.g. `decisionId.outputName`). The structure of the result depends on the decision table [hit policy](decision-table-hit-policy.md). diff --git a/docs/components/modeler/dmn/decision-table-input.md b/docs/components/modeler/dmn/decision-table-input.md index 479d9f181ad..5b831e373cd 100644 --- a/docs/components/modeler/dmn/decision-table-input.md +++ b/docs/components/modeler/dmn/decision-table-input.md @@ -31,9 +31,9 @@ XML element. ``` -## Input id +## Input ID -The input id is a unique identifier of the decision table input. It is used by Camunda to reference the +The input ID is a unique identifier of the decision table input. It is used by Camunda to reference the input clause. Therefore, it is required. It is set as the `id` attribute of the `input` XML element. ```xml diff --git a/docs/components/modeler/dmn/decision-table-output.md b/docs/components/modeler/dmn/decision-table-output.md index 61c74f129d5..81e80d6146a 100644 --- a/docs/components/modeler/dmn/decision-table-output.md +++ b/docs/components/modeler/dmn/decision-table-output.md @@ -27,9 +27,9 @@ XML element. ``` -## Output id +## Output ID -The output id is a unique identifier of the decision table output. It is used by Camunda to reference the +The output ID is a unique identifier of the decision table output. It is used by Camunda to reference the output clause. Therefore, it is required. It is set as the `id` attribute of the `output` XML element. ```xml @@ -75,7 +75,7 @@ a [dependent decision](decision-requirements-graph.md#required-decisions) nor in :::tip -If the decision table has only one output then it is recommended that the [decision id](decision-table.md#decision-id) +If the decision table has only one output then it is recommended that the [decision ID](decision-table.md#decision-id) is used as the output name. The decision result can be accessed in a [dependent decision](decision-requirements-graph.md#required-decisions) by its diff --git a/docs/components/modeler/dmn/decision-table.md b/docs/components/modeler/dmn/decision-table.md index b9d9f9a3c30..fd6ed6684d1 100644 --- a/docs/components/modeler/dmn/decision-table.md +++ b/docs/components/modeler/dmn/decision-table.md @@ -41,25 +41,25 @@ attribute on the `decision` element. It can be changed via the Properties Panel ``` -## Decision id +## Decision ID -![Decision Id](assets/decision-table/decision-id.png) +![Decision ID](assets/decision-table/decision-id.png) -The id is the technical identifier of the decision. It is set in the `id` +The ID is the technical identifier of the decision. It is set in the `id` attribute on the `decision` element. Just as the `name`, the `id` can be changed via the Properties Panel after selecting the respective "Decision" in the Decision Requirements Diagram view. -Each decision should have an unique id when it is deployed to Camunda. +Each decision should have an unique ID when it is deployed to Camunda. :::caution -The decision id may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). +The decision ID may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). -The decision id can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to +The decision ID can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the operator `-`. -If the decision id contain a special character or symbol then the decision result can't be accessed in +If the decision ID contains a special character or symbol then the decision result can't be accessed in a [dependent decision](decision-requirements-graph.md#required-decisions). ::: diff --git a/docs/components/modeler/dmn/dmn.md b/docs/components/modeler/dmn/dmn.md index f8a894fe113..e14c4e758b3 100644 --- a/docs/components/modeler/dmn/dmn.md +++ b/docs/components/modeler/dmn/dmn.md @@ -10,7 +10,7 @@ description: "Camunda Desktop and Web Modeler both offer the same Modeling exper Camunda Desktop and Web Modeler both offer the same Modeling experience for DMN 1.3 models: Modeling starts in the Decision Requirements Diagram (DRD) view. From there, you can add DMN elements from the palette on the left side by dragging and dropping them onto the diagram canvas. -Alternatively, you can add new elements by using the context menu that appears when you select an element in the diagram. Using the wrench icon in the context menu, you can change the type of an element in place. Use the properties panel on the right side to change the name or id of the DMN diagram. +Alternatively, you can add new elements by using the context menu that appears when you select an element in the diagram. Using the wrench icon in the context menu, you can change the type of an element in place. Use the properties panel on the right side to change the name or ID of the DMN diagram. ## Demo @@ -53,6 +53,10 @@ You can also edit literal expressions. Just as with decision tables, in the deci ## Business knowledge models +:::caution +Viewing the result of BKM evaluation is currently not supported in Operate. +::: + A _business knowledge model_ (BKM) is a reusable function containing a piece of decision logic. Typically, a BKM instantiates business logic that is required in multiple decisions, such as a common computation. For example, an amortization formula might be used in different loan application processes. You can make BKM elements executable using literal expressions written in FEEL, in almost the same way you would create a decision using a literal expression. A BKM literal expression can optionally accept parameters to be used as inputs to the FEEL expression, and it returns a single result whose name is the same as the BKM element name. Once you’ve created a BKM, it appears in autosuggestions when you’re using literal expressions to create decision logic. diff --git a/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md b/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md index 9f6b2358e5a..63e8c984897 100644 --- a/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md +++ b/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md @@ -621,3 +621,30 @@ is empty([]) is empty([1,2,3]) // false ``` + +## partition(list, size) + + + +Returns consecutive sublists of a list, each of the same size (the final list may be smaller). + +If `size` is less than `0`, it returns `null`. + +**Function signature** + +```feel +partition(list: list, size: number): list +``` + +**Examples** + +```feel +partition([1,2,3,4,5], 2) +// [[1,2], [3,4], [5]] + +partition([], 2) +// [] + +partition([1,2], 0) +// null +``` diff --git a/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md b/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md index 3d0bbe0fc73..6185481e816 100644 --- a/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md +++ b/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md @@ -392,3 +392,28 @@ to base64(value: string): string to base64("FEEL") // "RkVFTA==" ``` + +## is blank(string) + + + +Returns `true` if the given string is blank (empty or contains only whitespaces). + +**Function signature** + +```feel +is blank(string: string): boolean +``` + +**Examples** + +```feel +is blank("") +// true + +is blank(" ") +// true + +is blank("hello world") +// false +``` diff --git a/docs/components/modeler/feel/language-guide/feel-context-expressions.md b/docs/components/modeler/feel/language-guide/feel-context-expressions.md index 8e6a232f1d0..8a475a1c1db 100644 --- a/docs/components/modeler/feel/language-guide/feel-context-expressions.md +++ b/docs/components/modeler/feel/language-guide/feel-context-expressions.md @@ -1,15 +1,22 @@ --- id: feel-context-expressions title: Context expressions -description: "This document outlines context expressions and examples." +description: "Learn more about how you can use FEEL context expressions, including examples that show common use cases for FEEL context expressions." --- +You can use the following FEEL context expressions. Examples are provided to show common use cases. + ### Literal -Creates a new context with the given entries. Each entry has a key and a value. The key is either a -name or a string. The value can be any type. +Creates a new context with the given entries. + +- Each entry has a key and a value. +- The key is either a name or a string. +- The value can be any type. -Refer to the [naming conventions](./feel-variables.md#variable-names) for valid key names. +:::info +For valid key names, see [naming conventions](./feel-variables.md#variable-names). +::: ```feel { @@ -111,8 +118,7 @@ accessed by their key. a.b ``` -Extracts the entries with the key `b` of the list of context elements `a` (i.e. a projection). It -returns a list containing the values of the context elements with the key `b`. +Extracts the entries with the key `b` of the list of context elements `a` (that is, a projection). It returns a list containing the values of the context elements with the key `b`. ```feel [ @@ -128,8 +134,7 @@ returns a list containing the values of the context elements with the key `b`. // ["p1", "p2"] ``` -If an element of the list `a` doesn't contain an entry with the key `b`, the result contains `null` -of this element. +If an element of the list `a` doesn't contain an entry with the key `b`, the result contains `null` of this element. ```feel [ @@ -144,3 +149,35 @@ of this element. ].b // [5, null] ``` + +## Examples + +### Validate data + +Validate journal entries and return all violations. + +```feel +{ + check1: { + error: "Document Type invalid for current year posting", + violations: collection[documentType = "S2" and glDate > startFiscalYear] + }, + check2: { + error: "Document Type invalid for current year posting", + violations: collection[ledgerType = "GP" and foreignAmount != null] + }, + result: [check1, check2][count(violations) > 0] +} +``` + +### Structure calculation + +Calculate the minimum age of a given list of birthdays. + +```feel +{ + age: function(birthday) (today() - birthday).years, + ages: for birthday in birthdays return age(birthday), + minAge: min(ages) +}.minAge +``` diff --git a/docs/components/modeler/feel/language-guide/feel-data-types.md b/docs/components/modeler/feel/language-guide/feel-data-types.md index 117a9535503..e3d49c38a80 100644 --- a/docs/components/modeler/feel/language-guide/feel-data-types.md +++ b/docs/components/modeler/feel/language-guide/feel-data-types.md @@ -70,7 +70,7 @@ date("2017-03-10") ### Time -A local or zoned time. The time can have an offset or time zone id. +A local or zoned time. The time can have an offset or time zone ID. - Format: `HH:mm:ss` / `HH:mm:ss+/-HH:mm` / `HH:mm:ss@ZoneId` - Java Type: `java.time.LocalTime` / `java.time.OffsetTime` @@ -89,7 +89,7 @@ time("10:31:10@Europe/Paris") ### Date-time -A date with a local or zoned time component. The time can have an offset or time zone id. +A date with a local or zoned time component. The time can have an offset or time zone ID. - Format: `yyyy-MM-dd'T'HH:mm:ss` / `yyyy-MM-dd'T'HH:mm:ss+/-HH:mm` / `yyyy-MM-dd'T'HH:mm:ss@ZoneId` - Java Type: `java.time.LocalDateTime` / `java.time.DateTime` diff --git a/docs/components/modeler/feel/language-guide/feel-list-expressions.md b/docs/components/modeler/feel/language-guide/feel-list-expressions.md index 78fe7d88632..a507601d49b 100644 --- a/docs/components/modeler/feel/language-guide/feel-list-expressions.md +++ b/docs/components/modeler/feel/language-guide/feel-list-expressions.md @@ -1,9 +1,11 @@ --- id: feel-list-expressions title: List expressions -description: "This document outlines list expressions and examples." +description: "Learn more about how you can use FEEL list expressions, including examples that show common use cases for FEEL list expressions." --- +You can use the following FEEL list expressions. Examples are provided to show common use cases. + ### Literal Creates a new list of the given elements. The elements can be of any type. @@ -45,8 +47,7 @@ If the index is out of the range of the list, it returns `null`. // null ``` -If the index is negative, it starts counting the elements from the end of the list. The last -element of the list is at index `-1`. +If the index is negative, it starts counting the elements from the end of the list. The last element of the list is at index `-1`. ```feel [1,2,3,4][-1] @@ -69,8 +70,7 @@ The index of a list starts at `1`. In other languages, the index starts at `0`. a[c] ``` -Filters the list `a` by the condition `c`. The result of the expression is a list that contains all -elements where the condition `c` evaluates to `true`. The other elements are excluded. +Filters the list `a` by the condition `c`. The result of the expression is a list that contains all elements where the condition `c` evaluates to `true`. The other elements are excluded. While filtering, the current element is assigned to the variable `item`. @@ -132,3 +132,92 @@ every x in [1,2,3] satisfies even(x) every x in [1,2], y in [2,3] satisfies x < y // false ``` + +## Examples + +### Filter list and return the first element + +Return the first packaging element which unit is "Palette". + +```feel +data.attribute.packaging[unit = "Palette"][1] +``` + +### Group list + +Group the given list of invoices by their person. + +Each invoice has a person. The persons are extracted from the invoices and are used as a filter for the list. + +```feel +for p in distinct values(invoices.person) return invoices[person = p] +``` + +#### Evaluation context + +```feel +{"invoices":[ + {"id":1, "person":"A", "amount": 10}, + {"id":2, "person":"A", "amount": 20}, + {"id":3, "person":"A", "amount": 30}, + {"id":4, "person":"A", "amount": 40}, + {"id":5, "person":"B", "amount": 15}, + {"id":6, "person":"B", "amount": 25} +]} +``` + +#### Evaluation result + +```feel +[ + [ + { id: 1, person: "A", amount: 10 }, + { id: 2, person: "A", amount: 20 }, + { id: 3, person: "A", amount: 30 }, + { id: 4, person: "A", amount: 40 }, + ], + [ + { id: 5, person: "B", amount: 15 }, + { id: 6, person: "B", amount: 25 }, + ], +] +``` + +### Merge two lists + +Merge two given lists. Each list contains context values with the same structure. Each context has an `id` entry that identifies the value. + +The result is a list that contains all context values grouped by the identifier. + +```feel + { + ids: union(x.files.id,y.files.id), + getById: function (files,fileId) get or else(files[id=fileId][1], {}), + merge: for id in ids return context merge(getById(x.files, id), getById(y.files, id)) + }.merge +``` + +#### Evaluation context + +```feel +{ + "x": {"files": [ + {"id":1, "content":"a"}, + {"id":2, "content":"b"} + ]}, + "y": {"files": [ + {"id":1, "content":"a2"}, + {"id":3, "content":"c"} + ]} +} +``` + +#### Evaluation result + +```feel +[ + { id: 1, content: "a2" }, + { id: 2, content: "b" }, + { id: 3, content: "c" }, +] +``` diff --git a/docs/components/modeler/feel/language-guide/feel-temporal-expressions.md b/docs/components/modeler/feel/language-guide/feel-temporal-expressions.md index a8ba21dcafa..a44bbdf45ef 100644 --- a/docs/components/modeler/feel/language-guide/feel-temporal-expressions.md +++ b/docs/components/modeler/feel/language-guide/feel-temporal-expressions.md @@ -1,15 +1,17 @@ --- id: feel-temporal-expressions title: Temporal expressions -description: "This document outlines temporal expressions and examples." +description: "Learn more about how you can use FEEL temporal expressions, including examples that show common use cases for FEEL temporal expressions." --- +You can use the following FEEL temporal expressions. Examples are provided to show common use cases. + ### Literal Creates a new temporal value. A value can be written in one of the following ways: -- using a temporal function (e.g. `date("2020-04-06")`) -- using the `@` - notation (e.g. `@"2020-04-06"`) +- using a temporal function (for example, `date("2020-04-06")`) +- using the `@` - notation (for example, `@"2020-04-06"`) ```feel date("2020-04-06") @@ -115,12 +117,11 @@ duration("P2D") + duration("P5D") ### Subtraction -Subtracts a value from another value. The operator is defined for the followings types. +Subtracts a value from another value. The operator is defined for the following types. If a value has a different type, the result is `null`. -If one value has a timezone or time-offset, the other value must have a timezone or time-offset too. -Otherwise, the result is `null`. +If one value has a timezone or time-offset, the other value must have a timezone or time-offset too. Otherwise, the result is `null`. @@ -201,7 +202,7 @@ duration("P1Y") - duration("P3M") ### Multiplication -Multiplies a value by another value. The operator is defined for the followings types. +Multiplies a value by another value. The operator is defined for the following types. If a value has a different type, the result is `null`. @@ -248,7 +249,7 @@ duration("P1M") * 6 ### Division -Divides a value by another value. The operator is defined for the followings types. +Divides a value by another value. The operator is defined for the following types. If a value has a different type, the result is `null`. @@ -431,3 +432,101 @@ duration("PT2H30M").minutes duration("P6M").months // 6 ``` + +## Examples + +### Compare date with offset + +Check if a date is at least 6 months before another date. + +```feel +date1 < date2 + @"P6M" +``` + +### Calculate age + +Return the current age of a person based on a given birthday. + +```feel +years and months duration(date(birthday), today()).years +``` + +### Check for weekend + +Check if the current day is on a weekend. + +```feel +day of week(today()) in ("Saturday","Sunday") +``` + +### Calculate duration between dates + +Return the duration between now and the next Tuesday at 08:00. + +```feel +(for x in 1..7 + return date and time(today(),@"08:00:00Z") + @"P1D" * x +)[day of week(item) = "Tuesday"][1] - now() +``` + +### Calculate duration between times + +Return the duration between now and the next time it is 09:00 in the Europe/Berlin timezone. + +```feel +{ + time: @"09:00:00@Europe/Berlin", + date: if time(now()) < time then today() else today() + @"P1D", + duration: date and time(date, time) - now() +}.duration +``` + +### Calculate next weekday + +Return the next day that is not a weekend at 00:00. + +```feel +(for x in 1..3 + return date and time(today(),@"00:00:00Z") + @"P1D" * x +)[not(day of week(item) in ("Saturday","Sunday"))][1] +``` + +### Change format of dates + +Transform a given list of date-time values into a custom format. + +```feel +for d in dates return { + date: date(date and time(d)), + day: string(date.day), + month: substring(month of year(date), 1, 3), + year: string(date.year), + formatted: day + "-" + month + "-" + year +}.formatted +``` + +#### Evaluation context + +```feel +["2021-04-21T07:25:06.000Z", "2021-04-22T07:25:06.000Z"] +``` + +#### Evaluation result + +```feel +["21-Apr-2021", "22-Apr-2021"] +``` + +### Create a Unix timestamp + +Return the current point in time as a Unix timestamp. + +```feel +(now() - @"1970-01-01T00:00Z") / @"PT1S" * 1000 +``` + +#### Evaluation result + +```feel +1618200039000 +``` diff --git a/docs/components/modeler/feel/what-is-feel.md b/docs/components/modeler/feel/what-is-feel.md index 88546e0058a..05a8c57d497 100644 --- a/docs/components/modeler/feel/what-is-feel.md +++ b/docs/components/modeler/feel/what-is-feel.md @@ -1,7 +1,7 @@ --- id: what-is-feel title: What is FEEL? -description: "FEEL is a part of DMN specification of the Object Management Group." +description: "Learn more about using Friendly Enough Expression Language (FEEL) in Camunda. FEEL is a part of the DMN specification of the Object Management Group (OMG)." --- import { MarkerCamundaExtension } from "@site/src/mdx/MarkerCamundaExtension"; diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-button.md b/docs/components/modeler/forms/form-element-library/forms-element-library-button.md index 577b3c0615e..e3614b67505 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-button.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-button.md @@ -6,7 +6,7 @@ description: A form element to trigger form actions A button allowing the user to trigger form actions. -![Form Button Symbol](/img/form-icons/form-button.svg) +Form Button Symbol ### Configurable properties @@ -15,4 +15,4 @@ A button allowing the user to trigger form actions. - **Submit**: Submit the form (given there are no validation errors). - **Reset**: Reset the form, all user inputs will be lost. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the button. -- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md b/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md index f3812384600..3b60e61f43b 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A set of checkbox options providing data multi-selection for small datasets. -![Form Checklist Symbol](/img/form-icons/form-checklist.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A set of checkbox options providing data multi-selection for small datasets. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox group. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox group must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox.md b/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox.md index 834a18cfa63..9546284e2d4 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-checkbox.md @@ -6,7 +6,7 @@ description: A form element to read and edit boolean data A checkbox allowing the user to read and edit boolean data. -![Form Checkbox Symbol](/img/form-icons/form-checkbox.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A checkbox allowing the user to read and edit boolean data. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-datetime.md b/docs/components/modeler/forms/form-element-library/forms-element-library-datetime.md index aeb127128dc..c02c2912925 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-datetime.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-datetime.md @@ -6,7 +6,7 @@ description: Learn about the datetime form element to read and edit date and tim A component allowing the user to read and edit date and time data. -![Form Datetime Symbol](/img/form-icons/form-datetime.svg) +Form Datetime Symbol ## Configurable properties @@ -19,7 +19,7 @@ A component allowing the user to read and edit date and time data. - **Read only**: Makes the datetime component read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the datetime component, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the datetime component. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Time format**: Defines the time data format. This can either be **UTC offset**, **UTC normalized**, or **No timezone**. - **Time interval**: Defines the steps of time that can be selected in the time input field. - **Disallow past dates**: Enables the restriction to not allow past dates. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md b/docs/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md index 4af25ce51cc..1a24ae1440e 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md @@ -6,7 +6,7 @@ description: Learn about the dynamic list form element to dynamically manage a l The **dynamic list** element is designed to dynamically manage a list of form elements. It enables users to add or remove items from the list and is particularly useful in scenarios where the number of items in a list is not fixed. -![Dynamic List Symbol](/img/form-icons/form-dynamiclist.svg) +Dynamic List Symbol ## Configurable properties diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-expression.md b/docs/components/modeler/forms/form-element-library/forms-element-library-expression.md index f4f24988c5e..27133f2d8de 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-expression.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-expression.md @@ -6,7 +6,7 @@ description: A form element to compute form state An expression field allowing the user to compute new data based on form state. -![Form Expression Field Symbol](/img/form-icons/form-expression.svg) +Form Expression Field Symbol ### Configurable properties @@ -14,7 +14,7 @@ An expression field allowing the user to compute new data based on form state. - **Target value**: Defines an [expression](../../feel/language-guide/feel-expressions-introduction.md) to evaluate. - **Compute on**: Defines when the expression should be evaluated. Either whenever the result changes, or only on form submission. - **Deactivate if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to disable the expression. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). :::info diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-filepicker.md b/docs/components/modeler/forms/form-element-library/forms-element-library-filepicker.md new file mode 100644 index 00000000000..6b96c0f358a --- /dev/null +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-filepicker.md @@ -0,0 +1,22 @@ +--- +id: forms-element-library-filepicker +title: Filepicker +description: A form element to select files +--- + +A form element to select files. + +Form Filepicker Symbol + +### Configurable properties + +- **Field label**: Label displayed on top of the file picker. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). +- **Supported file formats**: [Comma-separated list of supported file formats.](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file#unique_file_type_specifiers) Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md) or plain text. +- **Upload multiple files**: Allows the user to upload multiple files at once. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). +- **Key**: Binds the field to a form variable, refer to the [data binding documentation](../configuration/forms-config-data-binding.md). +- **Read only**: Makes the file picker read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). +- **Disabled**: Disables the file picker, for use during development. +- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the file picker. +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). +- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. + - **Required**: File picker must have a selected file. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-group.md b/docs/components/modeler/forms/form-element-library/forms-element-library-group.md index a90f822b54a..4353743a7b1 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-group.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-group.md @@ -6,7 +6,7 @@ description: Learn about the group form element to group multiple form elements The group element serves as a container to group various form elements together. It allows for nesting of fields and assists in organizing complex forms. -![Form Group Symbol](/img/form-icons/form-group.svg) +Form Group Symbol ### Configurable properties @@ -15,7 +15,7 @@ The group element serves as a container to group various form elements together. - **Show outline**: Can be toggled on and off to display a separating outline around the group - **Vertical alignment**: Determines the alignment of items in the list. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Usage diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-html.md b/docs/components/modeler/forms/form-element-library/forms-element-library-html.md index bc8e56f454f..bad7f9a9483 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-html.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-html.md @@ -6,13 +6,13 @@ description: A form element to display HTML content. A flexible display component designed to quickly render HTML content for the user. -![Form HTML Symbol](/img/form-icons/form-html.svg) +Form HTML Symbol ## Configurable properties - **Content**: This property accepts HTML content. Define it using [templating syntax](../configuration/forms-config-templating-syntax.md) or as plaintext HTML. The rendered content is sanitized for security reasons, see below for details. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to conditionally hide the HTML content. -- **Columns**: Space the field will use inside its row. The **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. The **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Our security and sanitation strategy diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-iframe.md b/docs/components/modeler/forms/form-element-library/forms-element-library-iframe.md index f928bf92197..782621b9c84 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-iframe.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-iframe.md @@ -12,7 +12,7 @@ Every iframe component is a sandbox. This means that the content of the iframe i ::: -![Form iframe Symbol](/img/form-icons/form-iframe.svg) +Form iframe Symbol ## Configurable properties @@ -30,7 +30,7 @@ Every iframe component is a sandbox. This means that the content of the iframe i - **Top level navigation**: Gives the iframe permission to change the URL of the parent page, navigating away entirely from it. - **Storage access by user**: Controls access of local storage based on user interactions, may be expected in addition to allow same origin on certain browsers for functionality depending on storage. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the iframe. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Security advisory diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-image.md b/docs/components/modeler/forms/form-element-library/forms-element-library-image.md index 3becca2d45e..ca0c674345e 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-image.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-image.md @@ -6,11 +6,11 @@ description: Learn about the image view form element to display an image. An element allowing the user to display images. -![Form Image Symbol](/img/form-icons/form-image.svg) +Form Image Symbol ## Configurable properties - **Image source**: Specifies the image source via [expression](../../feel/language-guide/feel-expressions-introduction.md), [templating syntax](../configuration/forms-config-templating-syntax.md) or [static value](/components/concepts/expressions.md#expressions-vs-static-values) (hyperlink or data URI). - **Alternative text**: Provides an alternative text to the image in case it cannot be displayed. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the image. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-number.md b/docs/components/modeler/forms/form-element-library/forms-element-library-number.md index 6e9d108ca19..902253e3642 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-number.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-number.md @@ -6,7 +6,7 @@ description: A form element to read and edit numeric data A number field allowing the user to read and edit numeric data. -![Form Number Symbol](/img/form-icons/form-number.svg) +Form Number Symbol ### Configurable properties @@ -19,7 +19,7 @@ A number field allowing the user to read and edit numeric data. - **Read only**: Makes the number field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the number field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the number. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Serialize to string**: Configures the output format of the datetime value. This enables unlimited precision digits. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Number field must contain a value. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-radio.md b/docs/components/modeler/forms/form-element-library/forms-element-library-radio.md index 285fc2b6272..3d9c8928bcc 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-radio.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-radio.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A radio group allowing the user to select one of multiple data options for small datasets. -![Form Radio Symbol](/img/form-icons/form-radio.svg) +Form Radio Symbol ### Configurable properties @@ -18,7 +18,7 @@ A radio group allowing the user to select one of multiple data options for small - **Disabled**: Disables the radio group, for use during development. - **Options source**: Radio group components can be configured with an options source defining the individual choices the component provides, refer to [options source docs](../configuration/forms-config-options.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the radio group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One radio option must be selected. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-select.md b/docs/components/modeler/forms/form-element-library/forms-element-library-select.md index 17ae7dd2ce0..102a401ff77 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-select.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-select.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A Select dropdown allowing the user to select one of multiple data option from larger datasets. -![Form Select Symbol](/img/form-icons/form-select.svg) +Form Select Symbol ### Configurable properties @@ -18,7 +18,7 @@ A Select dropdown allowing the user to select one of multiple data option from l - **Read only**: Makes the select read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the select, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the select. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Options source**: Selects can be configured with an options source defining the individual choices the select provides, refer to [options source docs](../configuration/forms-config-options.md). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One select entry must be selected. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-separator.md b/docs/components/modeler/forms/form-element-library/forms-element-library-separator.md index a83fc9ed785..392d1418cb4 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-separator.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-separator.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add a visual separation between A **separator** element is used to create a visual separation between two elements. -![Form Spacer Symbol](/img/form-icons/form-separator.svg) +Form Separator Symbol ## Usage diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-spacer.md b/docs/components/modeler/forms/form-element-library/forms-element-library-spacer.md index 22043da492b..7284c15dfd3 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-spacer.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-spacer.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add vertical space between eleme A **spacer** element is used to create a defined amount of vertical space between two elements. -![Form Spacer Symbol](/img/form-icons/form-spacer.svg) +Form Spacer Symbol ## Configurable properties diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-table.md b/docs/components/modeler/forms/form-element-library/forms-element-library-table.md index 947b4f5e505..5ff8cb77e7d 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-table.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-table.md @@ -6,7 +6,7 @@ description: Learn about the table form element to render tabular data. This is an element allowing the user to render tabular data. -![Form table Symbol](/img/form-icons/form-table.svg) +Form Table Symbol ## Configurable properties @@ -16,4 +16,4 @@ This is an element allowing the user to render tabular data. - **Number of rows per page**: The size of each page. Used only if pagination is enabled. Must be greater than zero. - **Headers source**: Defines which headers will be used in the table. This can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md) or a list of static headers. Review [table data binding](../configuration/forms-config-table-data-binding.md) for the required header structure. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the table. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-taglist.md b/docs/components/modeler/forms/form-element-library/forms-element-library-taglist.md index 97411ce8db5..e4dd64059ea 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-taglist.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-taglist.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A complex and searchable tag based component providing multi-selection for large datasets. -![Form Taglist Symbol](/img/form-icons/form-taglist.svg) +Form Taglist Symbol ### Configurable properties @@ -14,7 +14,7 @@ A complex and searchable tag based component providing multi-selection for large - **Field description**: Description provided below the taglist. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Key**: Binds the field to a form variable, refer to [data binding docs](../configuration/forms-config-data-binding.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the taglist. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Taglist must contain a value. - **Options source**: Taglists can be configured with an options source defining the individual choices your user can make, refer to [options source docs](../configuration/forms-config-options.md). diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-text.md b/docs/components/modeler/forms/form-element-library/forms-element-library-text.md index c2b043cacee..a3fe68e537e 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-text.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-text.md @@ -6,13 +6,13 @@ description: A form element to display simple Markdown-powered text. A Markdown-powered text component allowing to display simple information to the user. -![Form Text Symbol](/img/form-icons/form-text.svg) +Form Text Symbol ## Configurable properties - **Text**: Either an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). After evaluation, the result is processed using a Markdown renderer that supports basic HTML and [GitHub-flavored Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). To ensure safety and prevent cross-site scripting in Camunda Forms, potentially harmful HTML elements will not be rendered. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Example text configurations diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-textarea.md b/docs/components/modeler/forms/form-element-library/forms-element-library-textarea.md index d851a970a92..2e66e4d0712 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-textarea.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-textarea.md @@ -6,7 +6,7 @@ description: Learn about the text area form element to read and edit multiline t A text area allowing the user to read and edit multiline textual data. -![Form Textarea Symbol](/img/form-icons/form-textArea.svg) +Form Textarea Symbol ## Configurable properties @@ -17,7 +17,7 @@ A text area allowing the user to read and edit multiline textual data. - **Read only**: Makes the text area read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text area; for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text area. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text area must contain a value. - **Minimum length**: Text area must have at least `n` characters. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library-textfield.md b/docs/components/modeler/forms/form-element-library/forms-element-library-textfield.md index 1aafa0e824f..da45e37b3c9 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library-textfield.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library-textfield.md @@ -6,7 +6,7 @@ description: A form element to read and edit textual data A text field allowing the user to read and edit textual data. -![Form Text Field Symbol](/img/form-icons/form-textField.svg) +Form Text Field Symbol ### Configurable properties @@ -17,7 +17,7 @@ A text field allowing the user to read and edit textual data. - **Read only**: Makes the text field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text field. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text field must contain a value. - **Regular expression validation**: Use predefined validation patterns. Available options are: `Email`, `Phone`, and `Custom`. diff --git a/docs/components/modeler/forms/form-element-library/forms-element-library.md b/docs/components/modeler/forms/form-element-library/forms-element-library.md index 6ecc1d125f9..e0bbdfea44b 100644 --- a/docs/components/modeler/forms/form-element-library/forms-element-library.md +++ b/docs/components/modeler/forms/form-element-library/forms-element-library.md @@ -115,6 +115,12 @@ The following form elements are currently available within Camunda Forms: + + + + + + diff --git a/docs/components/modeler/forms/sidebar-schema.js b/docs/components/modeler/forms/sidebar-schema.js index 83dc5b406ec..3e455627265 100644 --- a/docs/components/modeler/forms/sidebar-schema.js +++ b/docs/components/modeler/forms/sidebar-schema.js @@ -24,6 +24,7 @@ module.exports = { lib_dir + "forms-element-library-iframe", lib_dir + "forms-element-library-table", lib_dir + "forms-element-library-expression", + lib_dir + "forms-element-library-filepicker", lib_dir + "forms-element-library-image", lib_dir + "forms-element-library-spacer", lib_dir + "forms-element-library-separator", diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/right.png b/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/right.png index 7fb706e8562..5104889e411 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/right.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/right.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/wrong.png b/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/wrong.png index 2d787a157ce..9b853dec046 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/wrong.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/called-element/wrong.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/right.png b/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/right.png index bba7c67ea7f..c15b4de3fef 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/right.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/right.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/wrong.png b/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/wrong.png index 82ae6e47333..5be66f2577a 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/wrong.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/element-type/wrong.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/right.png b/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/right.png index ea53c299fd5..063a2db05ae 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/right.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/right.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-code.png b/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-code.png index 115a4d3af8f..b16e3d885c8 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-code.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-code.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-reference.png b/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-reference.png index b7ebf131000..b18ccdeb9d4 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-reference.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/error-reference/wrong-no-error-reference.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/right.png b/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/right.png index 18ef0e45f20..456ad3986da 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/right.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/right.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-code.png b/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-code.png index 5db823374aa..ff98c16d3fb 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-code.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-code.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-reference.png b/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-reference.png index b5c6d07737b..24c68585426 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-reference.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/escalation-reference/wrong-no-escalation-reference.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/feel/right.png b/docs/components/modeler/reference/modeling-guidance/rules/img/feel/right.png index 02447f79459..2e6624b3c1b 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/feel/right.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/feel/right.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/feel/wrong.png b/docs/components/modeler/reference/modeling-guidance/rules/img/feel/wrong.png index 078cd1d0ff7..78c5bf6193d 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/feel/wrong.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/feel/wrong.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/history-time-to-live/info.png b/docs/components/modeler/reference/modeling-guidance/rules/img/history-time-to-live/info.png index e797fb03cc4..cb674903de3 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/history-time-to-live/info.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/history-time-to-live/info.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/right.png b/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/right.png index f25bdf1d5d4..3071bf5b3d6 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/right.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/right.png differ diff --git a/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/wrong-no-message-reference.png b/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/wrong-no-message-reference.png index 43f787a6bf6..50f50637799 100644 Binary files a/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/wrong-no-message-reference.png and b/docs/components/modeler/reference/modeling-guidance/rules/img/message-reference/wrong-no-message-reference.png differ diff --git a/docs/components/modeler/web-modeler/advanced-modeling/camunda-docs-ai.md b/docs/components/modeler/web-modeler/advanced-modeling/camunda-docs-ai.md index 0dd35baed97..8fedb1fb935 100644 --- a/docs/components/modeler/web-modeler/advanced-modeling/camunda-docs-ai.md +++ b/docs/components/modeler/web-modeler/advanced-modeling/camunda-docs-ai.md @@ -8,7 +8,7 @@ description: Ask our smart AI-powered chatbot technical and non-technical questi Alpha :::note -Camunda Docs AI is an alpha feature. To use this feature, enable the [AI-powered features](https://camunda.com/blog/2024/02/camunda-docs-ai-developer-experience-new-level/) through the [alpha features](/components/console/manage-organization/enable-alpha-features.md) menu. Learn more about [alpha features and general availability](/reference/alpha-features.md). +Camunda Docs AI is an alpha feature. To use this feature, enable the [AI-powered features](https://camunda.com/blog/2024/02/camunda-docs-ai-developer-experience-new-level/) through the [alpha features](/components/console/manage-organization/enable-alpha-features.md) menu. Learn more about [alpha features](/components/early-access/alpha/alpha-features.md) and [general availability](/reference/release-policy.md#general-availability-ga). ::: Camunda Docs AI provides a smart AI-powered chatbot that helps you find answers to your technical and non-technical questions about Camunda within Web Modeler, rather than navigating and searching across multiple sources of information like documentation, forums, blog posts, etc. diff --git a/docs/components/modeler/web-modeler/advanced-modeling/form-linking.md b/docs/components/modeler/web-modeler/advanced-modeling/form-linking.md index c7cf43464fe..cf43c6b50c5 100644 --- a/docs/components/modeler/web-modeler/advanced-modeling/form-linking.md +++ b/docs/components/modeler/web-modeler/advanced-modeling/form-linking.md @@ -89,6 +89,10 @@ To correct any instances affected by this issue, we recommend the following step ### Camunda Form (embedded) +:::info +Embedded forms are supported only by job worker-based user tasks and are not available for the [Zeebe user task implementation type](/components/modeler/bpmn/user-tasks/user-tasks.md#user-task-implementation-types). +::: + When choosing **Camunda Form (embedded)** as type you have the option to directly paste the form's JSON schema into the **Form JSON configuration** field of the properties panel. The form will be embedded directly into the BPMN diagram's XML representation. diff --git a/docs/components/modeler/web-modeler/camunda-marketplace.md b/docs/components/modeler/web-modeler/camunda-marketplace.md index 1e366d34ebb..0842066baee 100644 --- a/docs/components/modeler/web-modeler/camunda-marketplace.md +++ b/docs/components/modeler/web-modeler/camunda-marketplace.md @@ -12,6 +12,10 @@ If you are a **[Web Modeler Self-Managed](/self-managed/modeler/web-modeler/inst ## Browse Marketplace Connectors +:::note +Connectors created by partners or the community are not part of the commercial Camunda product. Camunda does not support these Connectors as part of its commercial services to enterprise customers. Please evaluate each client to make sure it meets your requirements before using. +::: + To navigate to the Camunda Marketplace, take the following steps: 1. Log in to your Camunda account. diff --git a/docs/components/modeler/web-modeler/deploy-process-application.md b/docs/components/modeler/web-modeler/deploy-process-application.md index d6f25b1777c..91385f1ddd2 100644 --- a/docs/components/modeler/web-modeler/deploy-process-application.md +++ b/docs/components/modeler/web-modeler/deploy-process-application.md @@ -39,6 +39,8 @@ Once validation is complete, deploy your process application to cluster stages i All BPMN, DMN, and form files contained in the process application folder are deployed as a single bundle. +In Self-Managed, you can deploy your diagram to the cluster defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). You should have the `Zeebe` [role](/self-managed/identity/user-guide/roles/add-assign-role.md/#add-a-role) assigned in Identity to be authorized to deploy. + :::note If any resource fails to deploy, the whole deployment [fails](#deployment-errors) and the cluster state remains unchanged. This safely ensures that a process application cannot be deployed incompletely or in an inconsistent state. ::: diff --git a/docs/components/modeler/web-modeler/git-sync.md b/docs/components/modeler/web-modeler/git-sync.md index b4b10c9130f..8187999bdbc 100644 --- a/docs/components/modeler/web-modeler/git-sync.md +++ b/docs/components/modeler/web-modeler/git-sync.md @@ -4,13 +4,26 @@ title: Git sync description: Connect Web Modeler to your Git repositories to keep your projects synced. --- -Organization owners and administrators can connect their Web Modeler process applications to GitHub, allowing users to keep their Web Modeler, Desktop Modeler, and official version control projects synced. +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; -Once the basic integration is configured by an organization owner or organization administrator, project administrators and editors can use the built-in button to pull changes from GitHub, integrate contributions from Desktop Modeler users, and merge their own work. +Organization owners and administrators can connect their Web Modeler process applications to GitHub and GitLab, allowing users to keep their Web Modeler, Desktop Modeler, and official version control projects synced. -## Connect to GitHub +Once the connection is configured by an organization owner or organization administrator, project administrators and editors can use the built-in button to pull changes from the remote repository, integrate contributions from Desktop Modeler users, and merge their own work. -### Create a new GitHub App +## Connect to a remote repository + +Select your Git repository host: + + + + + +

    Create a new GitHub App

    Web Modeler requires a GitHub App to sync changes with your GitHub repository. @@ -24,35 +37,40 @@ Follow the [GitHub documentation](https://docs.github.com/en/apps/creating-githu Click **Create GitHub App** to finish. -### Generate a private key +

    Generate a private key

    1. In your new application's setting page, navigate to **General > Private keys**. 2. Select **Generate a private key**. This key is automatically downloaded as a .pem file when created, and can be opened in a text editor to copy and paste the contents into Web Modeler. -### Install the GitHub App +

    Install the GitHub App

    1. In your application's setting page, navigate to **Install app**. 2. Click on the **Install** button for your organization or account. 3. Select **Only select repositories**, and choose the repository to sync with Web Modeler. 4. Once redirected to your application's installation page, copy the **Installation ID** located at the end of the page's URL: `https://github.com/settings/installations/{installation_id}`. -### Configure GitHub in Web Modeler +

    Configure GitHub in Web Modeler

    :::note -An organization administration account (or project administrator in Camunda Self-Managed) is required for the initial GitHub configuration. +When using a self-hosted GitHub instance, ensure the environment variable `CAMUNDA_MODELER_GITSYNC_GITHUB_BASEURL` is set to the API URL of your self-hosted GitHub instance. It usually looks like `http(s)://HOSTNAME/api/v3`. Refer to [GitHub documentation](https://docs.github.com/en/enterprise-server@3.15/rest/enterprise-admin?apiVersion=2022-11-28#endpoint-urls) and choose the correct enterprise server version. ::: -1. Within Web Modeler, navigate to the process application you would like to connect to GitHub, and click **Connect GitHub**. +1. Within Web Modeler, navigate to the process application you would like to connect to GitHub, and click **Connect repository**. + +2. Select the **GitHub** tile (if not already selected), located at the top of the modal. -2. Provide the following information in the GitHub Configuration modal: +3. Provide the following information in the **Configure GitHub** modal: - **Installation ID:** Found in the URL of your GitHub App's installation page. - - **Client ID:** Found in your GitHub App's settings page. + - **Client ID:** Found in your GitHub App's settings page. You can also use Application ID as an alternative. (If you are using GitHub Enterprise Server 3.13 or prior, Application ID is required.) - **Private Key:** The contents of the .pem file downloaded from your GitHub App's settings page. - - **GitHub repository URL:** The URL of the repository you would like to sync with. + - **GitHub repository URL:** The base URL of the repository you want to sync with, for example `https://github.com/camunda/example-repo`. The URL cannot contain the `.git` extension or a folder path. - **Branch name:** The branch name to use for merging and managing changes. + - **Path:** (optional) The path to the folder containing your process application files. If left empty, Web Modeler syncs with the root of the repository. This path is automatically created if it does not exist. + +4. Click **Open repository** to test your configuration. The repository for the provided branch and optional path opens in a new tab. -3. Click **Save Configuration**. +5. Click **Save Configuration**. :::note When synchronizing for the first time with a remote repository that already contains commits, ensure Web Modeler has assigned the correct main process. @@ -62,15 +80,66 @@ When successful, your project will display a new **Sync with GitHub** button. ![The Sync with GitHub within Web Modeler](./img/git-sync.png) -## Sync with GitHub +
    + + +

    Create a new access token

    + +Web Modeler requires an access token to sync changes with your GitLab repository. You can use the following options: + +- **Project access token** (Recommended) +- Group access token +- Personal access token + +Follow the [GitLab documentation](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token) to generate a new project access token for your repository with the following configuration: + +- Enable the following [**scopes**](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#scopes-for-a-project-access-token) for either the `developer` or `maintainer` role: + - `api` + - `read_api` + - `read_repository` + - `write_repository` + +

    Get the project ID

    + +1. Navigate to your GitLab project. +2. Click the menu icon in the top right corner and select **Copy project ID**. + +

    Configure GitLab in Web Modeler

    :::note -File synchronization only happens at the root level of the remote repository. Files contained in subfolders will not be synchronized. +When using a self-hosted GitLab instance, ensure the environment variable `CAMUNDA_MODELER_GITSYNC_GITLAB_BASEURL` is set to the API URL of your self-hosted GitLab instance. It usually looks like `http(s)://HOSTNAME/api/v4`. ::: -Organization owners/administrators, project administrators, and project editors can sync their version of Web Modeler with the connected GitHub repository at any time. +1. Within Web Modeler, navigate to the process application you would like to connect to GitLab, and click **Connect repository**. + +2. Select the **GitLab** tile, located at the top of the modal. + +3. Provide the following information in the **Configure GitLab** modal: + + - **Access token:** The generated project, group or personal access token. + - **Project ID:** The ID copied from the GitLab project settings. + - **GitLab repository URL:** The base URL of the repository you want to sync with, for example `https://gilab.com/camunda/example-repo`. The URL cannot contain the `.git` extension or a folder path. + - **Branch name:** The branch name to use for merging and managing changes. + - **Path:** (optional) The path to the folder containing your process application files. If left empty, Web Modeler syncs with the root of the repository. This path is automatically created if it does not exist. -1. In your connected process application, click **Sync with GitHub**. +4. Click **Open repository** to test your configuration. The repository for the provided branch and optional path opens in a new tab. + +5. Click **Save Configuration**. + +:::note +When synchronizing for the first time with a remote repository that already contains commits, ensure Web Modeler has assigned the correct main process. +::: + +When successful, your project will display a new **Sync with GitLab** button. + +
    +
    + +## Sync with remote repository + +Organization owners/administrators, project administrators, and project editors can sync their version of Web Modeler with the connected repository at any time. + +1. In your connected process application, click **Sync with GitHub** or **Sync with GitLab**. 2. Enter a [version number](./process-applications.md#versioning) to create a new milestone for your process application. The new milestone will be created prior to pushing your changes to the central repository. 3. Click **Synchronize**. @@ -80,12 +149,45 @@ Once the pull is complete and any merge conflicts are resolved, Web Modeler will ## Manage existing configurations -Existing GitHub configurations can be edited from the gear icon beside the **Sync with GitHub** button. Permission to update these settings are limited by the roles within your organization and project. +Existing Git configurations can be edited from the gear icon beside the **Sync with GitHub** or **Sync with GitLab** button. Permission to update these settings are limited to **project administrators**. + +## Change Git provider + +To switch from GitHub to GitLab, or vice versa, update your configuration with the following steps: + +1. Disconnect your current Git provider by clicking the gear icon beside the **Sync with GitHub** or **Sync with GitLab** button, and clicking the **Delete provider connection** button at the bottom of the modal. +2. After confirming the operation, open the **Connect repository** modal and provide the necessary information for the new Git provider, following the steps outlined for [GitHub](./git-sync.md/?platform=github#connect-to-a-remote-repository) or [GitLab](./git-sync.md/?platform=gitlab#connect-to-a-remote-repository). + +## Advanced use cases + +Git sync supports a variety of development workflows, including the following advanced use cases. + +### Monorepos -- **Organization owners/administrators:** Edit and update all configuration options. -- **Project administrators - Self-Managed:** Edit and update all configuration options. -- **Project administrators - SaaS:** Edit and update only the **GitHub repository URL** and **branch name**. -- **Project editors:** Cannot make changes to the GitHub configuration. +A monorepo is a single repository containing multiple logical projects that each have disparate workflows and release cadences. + +To set up Git sync with a monorepo, you can specify the **path** to your project during the configuration. This allows you to keep multiple projects in one repository, each with its own sync configuration. + +:::note +If you are using Git sync to work with monorepos, you should pull changes regularly, as the GitHub API is limited to a fixed amount of files and commits per synchronization action. See [troubleshooting](#troubleshooting) for more information. +::: + +### Parallel feature development + +Git sync supports parallel feature development by allowing multiple process applications to be connected to different feature branches. This allows teams to work on multiple features simultaneously without interfering with each other's work. + +To use Git sync for parallel feature development: + +1. Create a new [process application](/docs/components/modeler/web-modeler/create-a-process-application.md) in Modeler for each active feature branch you want to develop. +2. Configure Git sync for each instance by connecting it to the corresponding feature branch in your repository. +3. Work on your feature in Modeler, using **Sync with GitHub** or **Sync with GitLab** to pull and push changes as needed. +4. Once the feature is complete and merged into the main branch, you can delete the process application associated with the feature branch. + +To perform hotfixes or patches of production or production-bound processes, sync a copy of the process application to the `main` branch. + +:::caution +Creating multiple copies of a process application can complicate navigation and deployment if you have multiple files with the same ID in a project. To avoid this, you can create copies of the process application in different projects. +::: ## Troubleshooting @@ -95,3 +197,5 @@ Existing GitHub configurations can be edited from the gear icon beside the **Syn - When synchronizing for the first time with a remote repository that already contains commits, Web Modeler will attempt to select a main process with a file name that matches its own main process. If there is no matching process, Web Modeler will select a process at random from the available `.bpmn` files. In the event that no `.bpmn` files exist in the remote repository, Web Modeler will not proceed, and will instead display an error message. Ensure the main process is correctly assigned, especially in cases where a random process has been selected. - Actions which alter the SHA of the commit to which Web Modeler is synced (for example, squash) may cause synchronization errors. - Timeouts may occur during a sync. In the event of a timeout, close the modal and retry the synchronization. +- Using self-hosted instances of Git providers may require additional configuration. Refer to the Web Modeler configuration part for your [git host](#connect-to-a-remote-repository) for more details. +- **(GitHub specific)** A single synchronization action is limited to incorporating a maximum of 250 commits or making changes to up to 300 files, regardless of whether these changes affect the Web Modeler files directly. Web Modeler does not provide a notification when these thresholds are exceeded. Should you encounter this limitation, it may be necessary to initiate a fresh synchronization. A fresh synchronization fetches all the files in the repository without relying on the incremental changes, thus bypassing the limitations. This can be achieved by either changing the branch or modifying the GitHub repository URL. diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png index 4dbd018b1e8..baa536d91a8 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png index 181ef2fe17c..9aee9ca25c7 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png index 7f58e0844db..d1d2f6c3c40 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png index d5dfda7d208..e12fc4628b0 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png index c03ecf4b021..d6ad478a770 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png index 89b87af5218..92572f56797 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png index e69b47a3b0a..0a1a13ca2de 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png index a25925bce92..8286a484e2f 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png index 54660f1fd52..5561d5de7e0 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-home.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-home.png index 6374c015832..1f482c5dc85 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-home.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-home.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png index c30496a06b2..13682310801 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png index fff1f8ad0f1..b299fb877a3 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png index e535d5c54b8..3df77ca71d2 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png index 86838c1950e..a804f8fb698 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png differ diff --git a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png index 5219a796154..c2e6e89611d 100644 Binary files a/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png and b/docs/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png differ diff --git a/docs/components/modeler/web-modeler/img/design-mode.png b/docs/components/modeler/web-modeler/img/design-mode.png index a3dfbfa0130..2f7dcbc4d64 100644 Binary files a/docs/components/modeler/web-modeler/img/design-mode.png and b/docs/components/modeler/web-modeler/img/design-mode.png differ diff --git a/docs/components/modeler/web-modeler/img/implement-mode.png b/docs/components/modeler/web-modeler/img/implement-mode.png index c54fe4ef18b..cea00459251 100644 Binary files a/docs/components/modeler/web-modeler/img/implement-mode.png and b/docs/components/modeler/web-modeler/img/implement-mode.png differ diff --git a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png index cc2cda7fff3..a663caaa99d 100644 Binary files a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png and b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png differ diff --git a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png index e2c75332646..fff577bad0e 100644 Binary files a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png and b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png differ diff --git a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png index 42613495b8b..1df7fdb58fe 100644 Binary files a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png and b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png differ diff --git a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png index 10466afdff0..3d088c32deb 100644 Binary files a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png and b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png differ diff --git a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png index c21b5dc4567..5813766fb23 100644 Binary files a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png and b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png differ diff --git a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png index d98c2b940d0..e22dbc5c803 100644 Binary files a/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png and b/docs/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png index 664a0feeae9..8bde0c3efd3 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png index 5018b3ef613..bdfe9014ba8 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png index bf65f281071..2b8b9289449 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png index 5ecc85f84da..e95ab6f6ad3 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png index a68638ba6d9..d9b0243c171 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png index 3d961631d73..1b9cc9659c4 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png index f8f54538a3b..e8c54754784 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png index 51560aa75eb..3fed3621ad1 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png index ca70a36d5c9..933ab3662f1 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png index f5440304331..12fc1953318 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png index fb6224defa7..ad160f87c97 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png index 20fce005c76..4e165589c69 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png index c46de3c033f..59f4cc97c22 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png index 2516876f300..70f8e5f2de8 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png index 7badc1a6061..27e06e6fe45 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png differ diff --git a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png index 087600bec01..9696f5f681b 100644 Binary files a/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png and b/docs/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png differ diff --git a/docs/components/modeler/web-modeler/img/read-only-properties.png b/docs/components/modeler/web-modeler/img/read-only-properties.png index 6e88ad52269..172fcfe1248 100644 Binary files a/docs/components/modeler/web-modeler/img/read-only-properties.png and b/docs/components/modeler/web-modeler/img/read-only-properties.png differ diff --git a/docs/components/modeler/web-modeler/img/real-time-collaboration.png b/docs/components/modeler/web-modeler/img/real-time-collaboration.png index 9516ed0aa6c..b20ef040fd7 100644 Binary files a/docs/components/modeler/web-modeler/img/real-time-collaboration.png and b/docs/components/modeler/web-modeler/img/real-time-collaboration.png differ diff --git a/docs/components/modeler/web-modeler/img/web-modeler-add-endevent.png b/docs/components/modeler/web-modeler/img/web-modeler-add-endevent.png index 1c9ccf8c843..f1278a7909c 100644 Binary files a/docs/components/modeler/web-modeler/img/web-modeler-add-endevent.png and b/docs/components/modeler/web-modeler/img/web-modeler-add-endevent.png differ diff --git a/docs/components/modeler/web-modeler/img/web-modeler-add-task.png b/docs/components/modeler/web-modeler/img/web-modeler-add-task.png index a1880dd3aab..20e1173de3f 100644 Binary files a/docs/components/modeler/web-modeler/img/web-modeler-add-task.png and b/docs/components/modeler/web-modeler/img/web-modeler-add-task.png differ diff --git a/docs/components/modeler/web-modeler/img/web-modeler-blueprint.png b/docs/components/modeler/web-modeler/img/web-modeler-blueprint.png index 97034bf53f3..6a8d34fb60d 100644 Binary files a/docs/components/modeler/web-modeler/img/web-modeler-blueprint.png and b/docs/components/modeler/web-modeler/img/web-modeler-blueprint.png differ diff --git a/docs/components/modeler/web-modeler/img/web-modeler-deploy.png b/docs/components/modeler/web-modeler/img/web-modeler-deploy.png index 6597df42fed..18dc70503b4 100644 Binary files a/docs/components/modeler/web-modeler/img/web-modeler-deploy.png and b/docs/components/modeler/web-modeler/img/web-modeler-deploy.png differ diff --git a/docs/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png b/docs/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png index fe3d00c4fc2..409f09dc983 100644 Binary files a/docs/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png and b/docs/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png differ diff --git a/docs/components/modeler/web-modeler/img/web-modeler-start-instance.png b/docs/components/modeler/web-modeler/img/web-modeler-start-instance.png index 4e50d016129..dc2ac7d54f1 100644 Binary files a/docs/components/modeler/web-modeler/img/web-modeler-start-instance.png and b/docs/components/modeler/web-modeler/img/web-modeler-start-instance.png differ diff --git a/docs/components/modeler/web-modeler/launch-web-modeler.md b/docs/components/modeler/web-modeler/launch-web-modeler.md index 818f394ed8d..ccf7c515719 100644 --- a/docs/components/modeler/web-modeler/launch-web-modeler.md +++ b/docs/components/modeler/web-modeler/launch-web-modeler.md @@ -14,8 +14,8 @@ To launch Web Modeler, follow the steps below: 2. Select **Create new project** to create a new project and store diagrams. ![web modeler empty home](img/web-modeler-new-user-home.png) 3. Name your diagram. You can go back and change the name any time by clicking on the project name and **Edit name**. -4. Select **Browse blueprints** to view blueprints for various use cases as a starting point for your first diagram. Open these blueprints by selecting **Use Blueprint**. Alternatively, click **Create new > BPMN diagram** to create a blank BPMN diagram. +4. Select **Browse blueprints** to open the blueprints dialog and browse blueprints for various use cases as a starting point for your first diagram. ![web modeler blueprint browsing](img/web-modeler-blueprint.png) -5. While browsing blueprints, you can also open the details of a specific blueprint by selecting **More details**. This opens a new tab in the [Camunda Marketplace](/components/modeler/web-modeler/camunda-marketplace.md). Here, you can have a closer look at the diagram, and open it in SaaS or Self-Managed. - -![Camunda marketplace example](img/camunda-marketplace-example.png) +5. While browsing blueprints, open the details of a specific blueprint by selecting **More details**. This opens a new tab in [Camunda Marketplace](/components/modeler/web-modeler/camunda-marketplace.md). Here, have a closer look at the diagram, and open it in SaaS or Self-Managed. + ![Camunda marketplace example](img/camunda-marketplace-example.png) +6. Open a blueprint by selecting **Use blueprint**, which downloads the blueprint into the project and opens it in the diagram screen. Alternatively, click **Create new > BPMN diagram** to create a blank BPMN diagram. diff --git a/docs/components/modeler/web-modeler/milestones.md b/docs/components/modeler/web-modeler/milestones.md index 6782f0d162b..2d78dbae0ad 100644 --- a/docs/components/modeler/web-modeler/milestones.md +++ b/docs/components/modeler/web-modeler/milestones.md @@ -32,7 +32,7 @@ You can create a new milestone either from your diagram or the milestone history ![milestones create via the breadcrumb menu](img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png) -- From the milestone history, hover over the the latest version in the **Milestones** panel and select **Create a new milestone**. +- From the milestone history, hover over the draft in the **Milestones** panel and select **Create a new milestone**. ![milestones create via icon](img/milestones/web-modeler-milestone-create-via-icon-highlight.png) diff --git a/docs/components/modeler/web-modeler/play-your-process.md b/docs/components/modeler/web-modeler/play-your-process.md index 55e63a62768..64eaaa6848d 100644 --- a/docs/components/modeler/web-modeler/play-your-process.md +++ b/docs/components/modeler/web-modeler/play-your-process.md @@ -14,7 +14,7 @@ Play is a Zeebe-powered playground environment within Web Modeler for validating To use Play, open a BPMN diagram and click the **Play** tab. Read the [limitations and availability section](#limitations-and-availability) if this section is missing. -In Self-Managed, you are prompted to select from the clusters defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). The Camunda 8 Docker Compose distribution provides one cluster configured by default. If no configuration is found, you are prompted to [manually enter your cluster details](#use-play-with-camunda-self-managed). +In Self-Managed, you are prompted to select from the clusters defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). The Camunda 8 Helm and Docker Compose distributions provide one cluster configured by default. A Play environment is then started that utilizes your selected development cluster in SaaS, or the specified cluster in a Self-Managed setup. @@ -151,7 +151,9 @@ This section explains why you might not see the **Play** tab, and any additional For more information about terms, refer to our [licensing and terms page](https://legal.camunda.com/licensing-and-other-legal-terms#c8-saas-trial-edition-and-free-tier-edition-terms). -Although Play is compatible with cluster versions 8.5.1 and above, we fully support and recommend using versions 8.6.0 or higher. +**Version compatibility:** Although Play is compatible with cluster versions 8.5.1 and above, Camunda fully supports and recommends using versions 8.6.0 or higher. + +**Execution listeners:** Play does not currently support [execution listeners](/components/concepts/execution-listeners.md). As a workaround, you can skip the element using [modifications](#modify-a-process-instance). ### Camunda 8 SaaS @@ -160,10 +162,6 @@ Additionally, within their organization, users need to have a [role](/components ### Camunda 8 Self-Managed -:::note -To use Play with Docker, ensure OAuth is enabled for your configured components. The `docker-compose-core.yaml` file in the Camunda [platform repository](https://github.com/camunda/camunda-platform) does not provide authentication, and cannot be used with Play. -::: - In Self-Managed, Play is controlled by the `PLAY_ENABLED` [configuration property](/self-managed/modeler/web-modeler/configuration/configuration.md#feature-flags) in Web Modeler. This is `true` by default for the Docker and Kubernetes distributions. Prior to the 8.6 release, Play can be accessed by installing the 8.6.0-alpha [Helm charts](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-alpha), or running the 8.6.0-alpha [Docker Compose](https://github.com/camunda/camunda-platform/tree/main/docker-compose/camunda-8.6) configuration. @@ -177,25 +175,11 @@ Prior to the 8.6 release, Play can be accessed by installing the 8.6.0-alpha [He ## Use Play with Camunda Self-Managed -After selecting the **Play** tab in Self-Managed, you are prompted to select from the clusters defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). The Camunda 8 Docker Compose distribution provides one cluster configured by default. - -If no cluster is configured, Web Modeler requests the following cluster details to use for deployment: - -| Name | Description | Example value | -| ----------------- | ----------------------------------------------- | ---------------------------------------------------------------------------------- | -| Cluster endpoint | Address where your cluster can be reached | `http://zeebe:26500` | -| Operate base url | Address where Operate can be reached | `http://operate:8080` | -| Operate audience | Permission name for Operate | `operate-api` | -| Tasklist base url | Address where Tasklist can be reached | `http://tasklist:8080` | -| Tasklist audience | Permission name for Tasklist | `tasklist-api` | -| Zeebe rest url | Address where the Zeebe REST API can be reached | `http://zeebe:8080` | -| Client ID | Name of your registered client | `zeebe` | -| Client secret | Password for your registered client | `zecret` | -| OAuth token url | Token issuer server | `http://keycloak:18080/auth/realms/camunda-platform/protocol/openid-connect/token` | -| OAuth audience | Permission name for Zeebe | `zeebe-api` | +After selecting the **Play** tab in Self-Managed, you are prompted to select from the clusters defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). The Camunda 8 Helm and Docker Compose distributions provide one cluster configured by default. ### Limitations +- Play does not support multi-tenancy. - The environment variables `CAMUNDA_CUSTOM_CERT_CHAIN_PATH`, `CAMUNDA_CUSTOM_PRIVATE_KEY_PATH`, `CAMUNDA_CUSTOM_ROOT_CERT_PATH`, and `CAMUNDA_CUSTOM_ROOT_CERT_STRING` can be set in Docker or Helm chart setups. However, these configurations have not been tested with Play's behavior, and therefore are not supported when used with Play. - Play cannot check the presence of Connector secrets in Self-Managed setups. If a secret is missing, Play will show an incident at runtime. diff --git a/docs/components/modeler/web-modeler/run-or-publish-your-process.md b/docs/components/modeler/web-modeler/run-or-publish-your-process.md index 82233c6ada1..1c5441f1a99 100644 --- a/docs/components/modeler/web-modeler/run-or-publish-your-process.md +++ b/docs/components/modeler/web-modeler/run-or-publish-your-process.md @@ -37,7 +37,7 @@ To deploy, click **Deploy** in the upper right corner of the modeling screen: ![The deploy dialog of a BPMN diagram](img/web-modeler-deploy.png) -In Self-Managed, you can deploy your diagram to the cluster defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). If no configuration is found, you are prompted to manually enter your cluster details. +In Self-Managed, you can deploy your diagram to the cluster defined in your Web Modeler [configuration](/self-managed/modeler/web-modeler/configuration/configuration.md#clusters). You should have the `Zeebe` [role](/self-managed/identity/user-guide/roles/add-assign-role.md/#add-a-role) assigned in Identity to be authorized to deploy. ### Before deploying a process diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png index 3ad06d1daa5..98c7592a604 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png index 1bd77e49196..8c44872ab94 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png index 9d14fb9b8b3..2db1a42914d 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png index f9eb0b6a355..d26a7cbb744 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png index b9c552a2d45..83a4708f98d 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png index 56f4feedcb5..09a9d86a33e 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png index c6cf678d3c2..ba0a4fba1b6 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png index f9363676e39..c14fe0332dd 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png differ diff --git a/docs/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png b/docs/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png index e404c64a5d8..25c3e55c0ee 100644 Binary files a/docs/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png and b/docs/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/decision-button.png b/docs/components/operate/userguide/img/delete-resources/decision-button.png index 89568da7abb..e4fb5d3a6d9 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/decision-button.png and b/docs/components/operate/userguide/img/delete-resources/decision-button.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/decision-filters.png b/docs/components/operate/userguide/img/delete-resources/decision-filters.png index c1297720b91..8f373547314 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/decision-filters.png and b/docs/components/operate/userguide/img/delete-resources/decision-filters.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/decision-modal.png b/docs/components/operate/userguide/img/delete-resources/decision-modal.png index 04ecfe5fa1b..3f5a2f4b81c 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/decision-modal.png and b/docs/components/operate/userguide/img/delete-resources/decision-modal.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/decision-operations-panel.png b/docs/components/operate/userguide/img/delete-resources/decision-operations-panel.png index 995dd31e432..dab311866eb 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/decision-operations-panel.png and b/docs/components/operate/userguide/img/delete-resources/decision-operations-panel.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/process-button.png b/docs/components/operate/userguide/img/delete-resources/process-button.png index 759862de71d..266feed89de 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/process-button.png and b/docs/components/operate/userguide/img/delete-resources/process-button.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/process-filters.png b/docs/components/operate/userguide/img/delete-resources/process-filters.png index 10b92398fe0..e59dd6c0be6 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/process-filters.png and b/docs/components/operate/userguide/img/delete-resources/process-filters.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/process-modal.png b/docs/components/operate/userguide/img/delete-resources/process-modal.png index 987cd8c2a2e..e7b43dd6881 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/process-modal.png and b/docs/components/operate/userguide/img/delete-resources/process-modal.png differ diff --git a/docs/components/operate/userguide/img/delete-resources/process-operations-panel.png b/docs/components/operate/userguide/img/delete-resources/process-operations-panel.png index 294a89a256e..4695be9a40a 100644 Binary files a/docs/components/operate/userguide/img/delete-resources/process-operations-panel.png and b/docs/components/operate/userguide/img/delete-resources/process-operations-panel.png differ diff --git a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png index 0420066f41c..ab58ba88927 100644 Binary files a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png and b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png differ diff --git a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png index 529f8459e71..af55d0f72e3 100644 Binary files a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png and b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png differ diff --git a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png index 6ad81347f76..512e61877cd 100644 Binary files a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png and b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png differ diff --git a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png index a4dfa0b55b7..216cb1ba1f7 100644 Binary files a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png and b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png differ diff --git a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png index bda1fae52da..60828f6b37e 100644 Binary files a/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png and b/docs/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png differ diff --git a/docs/components/operate/userguide/img/monitor-operation-state/expand-row-button.png b/docs/components/operate/userguide/img/monitor-operation-state/expand-row-button.png index 070a4e3ceaa..196c9f12acd 100644 Binary files a/docs/components/operate/userguide/img/monitor-operation-state/expand-row-button.png and b/docs/components/operate/userguide/img/monitor-operation-state/expand-row-button.png differ diff --git a/docs/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png b/docs/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png index 4080f3073a5..e1a7ec398bb 100644 Binary files a/docs/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png and b/docs/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png differ diff --git a/docs/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png b/docs/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png index cfdd18812c9..d6e33aaf57d 100644 Binary files a/docs/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png and b/docs/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png differ diff --git a/docs/components/operate/userguide/img/monitor-operation-state/operation-state-row.png b/docs/components/operate/userguide/img/monitor-operation-state/operation-state-row.png index 2b715e663fe..5e7cc20f808 100644 Binary files a/docs/components/operate/userguide/img/monitor-operation-state/operation-state-row.png and b/docs/components/operate/userguide/img/monitor-operation-state/operation-state-row.png differ diff --git a/docs/components/operate/userguide/img/monitor-operation-state/operations-panel.png b/docs/components/operate/userguide/img/monitor-operation-state/operations-panel.png index 553d9f6b9f6..dc65cf042f3 100644 Binary files a/docs/components/operate/userguide/img/monitor-operation-state/operations-panel.png and b/docs/components/operate/userguide/img/monitor-operation-state/operations-panel.png differ diff --git a/docs/components/operate/userguide/img/process-instance-migration/highlight-mapping.png b/docs/components/operate/userguide/img/process-instance-migration/highlight-mapping.png index 278f7715043..ea4f1f654b4 100644 Binary files a/docs/components/operate/userguide/img/process-instance-migration/highlight-mapping.png and b/docs/components/operate/userguide/img/process-instance-migration/highlight-mapping.png differ diff --git a/docs/components/operate/userguide/img/process-instance-migration/map-elements.png b/docs/components/operate/userguide/img/process-instance-migration/map-elements.png index 535b2533e78..5de38787656 100644 Binary files a/docs/components/operate/userguide/img/process-instance-migration/map-elements.png and b/docs/components/operate/userguide/img/process-instance-migration/map-elements.png differ diff --git a/docs/components/operate/userguide/img/process-instance-migration/migrate-button.png b/docs/components/operate/userguide/img/process-instance-migration/migrate-button.png index 13dec77b84e..0583fc4e6a7 100644 Binary files a/docs/components/operate/userguide/img/process-instance-migration/migrate-button.png and b/docs/components/operate/userguide/img/process-instance-migration/migrate-button.png differ diff --git a/docs/components/operate/userguide/img/process-instance-migration/process-filters.png b/docs/components/operate/userguide/img/process-instance-migration/process-filters.png index 88f090c5e3d..041af6d034e 100644 Binary files a/docs/components/operate/userguide/img/process-instance-migration/process-filters.png and b/docs/components/operate/userguide/img/process-instance-migration/process-filters.png differ diff --git a/docs/components/operate/userguide/img/process-instance-migration/select-target-process.png b/docs/components/operate/userguide/img/process-instance-migration/select-target-process.png index 8636cb9a24d..5d00a37fad7 100644 Binary files a/docs/components/operate/userguide/img/process-instance-migration/select-target-process.png and b/docs/components/operate/userguide/img/process-instance-migration/select-target-process.png differ diff --git a/docs/components/operate/userguide/img/process-instance-migration/summary.png b/docs/components/operate/userguide/img/process-instance-migration/summary.png index a8a903642c3..b554d902212 100644 Binary files a/docs/components/operate/userguide/img/process-instance-migration/summary.png and b/docs/components/operate/userguide/img/process-instance-migration/summary.png differ diff --git a/docs/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png b/docs/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png index bda1fae52da..60828f6b37e 100644 Binary files a/docs/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png and b/docs/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png differ diff --git a/docs/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png b/docs/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png index fb46985eba1..bd377b6013b 100644 Binary files a/docs/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png and b/docs/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png differ diff --git a/docs/components/operate/userguide/img/selections-and-operations/operate-select-operation.png b/docs/components/operate/userguide/img/selections-and-operations/operate-select-operation.png index a136dd6b3fd..526a870144d 100644 Binary files a/docs/components/operate/userguide/img/selections-and-operations/operate-select-operation.png and b/docs/components/operate/userguide/img/selections-and-operations/operate-select-operation.png differ diff --git a/docs/components/operate/userguide/monitor-operation-status.md b/docs/components/operate/userguide/monitor-operation-status.md index e4cb4781f22..ad7e066a3d6 100644 --- a/docs/components/operate/userguide/monitor-operation-status.md +++ b/docs/components/operate/userguide/monitor-operation-status.md @@ -14,7 +14,7 @@ First, go to the processes view in Operate by clicking **Processes** in the top If you don't have any operations, the operations panel will show "No operations have been created yet." ::: -Select an operation from the list by clicking on the operation id. Notice that the operation is set as a filter: +Select an operation from the list by clicking on the operation ID. Notice that the operation is set as a filter: ![operate-view-process-filters](./img/monitor-operation-state/expanded-operations-panel.png) diff --git a/docs/components/operate/userguide/process-instance-migration.md b/docs/components/operate/userguide/process-instance-migration.md index e05e36cb63a..e6973291c9a 100644 --- a/docs/components/operate/userguide/process-instance-migration.md +++ b/docs/components/operate/userguide/process-instance-migration.md @@ -19,7 +19,7 @@ Process instances can be migrated from one specific process definition version t ![operate-migrate-button](./img/process-instance-migration/migrate-button.png) :::note -It is only possible to migrate running process instances, meaning instances in active or incident state. All other process instances will not be part of the migration plan and will be ignored. +It is only possible to migrate running process instances, meaning instances in an active or incident state. All other process instances will not be part of the migration plan and will be ignored. Learn more about [all limitations](/components/concepts/process-instance-migration.md#limitations). ::: The migration view features three areas: the source process diagram (top left), the target process diagram (top right) and the flow node mapping (bottom panel). @@ -37,7 +37,7 @@ In this example, all service tasks from version 1 of `orderProcess` are each map ![operate-view-process-filters](./img/process-instance-migration/map-elements.png) :::note -It is currently only possible to map service tasks, user tasks, subprocesses, call activities, and child instances. Mapping subprocesses to a different scope or mapping event subprocesses is not yet supported by Zeebe. To learn about all limitations, visit the [concepts section](/components/concepts/process-instance-migration.md#limitations). +It is currently only possible to map elements with migration supported by Zeebe. Learn more about [supported elements](/components/concepts/process-instance-migration.md#supported-bpmn-elements). ::: 6. (Optional) Click on a flow node in the diagram or on a source flow node row in the bottom panel to see how flow nodes are mapped. diff --git a/docs/components/tasklist/userguide/defining-task-priorities.md b/docs/components/tasklist/userguide/defining-task-priorities.md index 01a41c9bdc7..84087789162 100644 --- a/docs/components/tasklist/userguide/defining-task-priorities.md +++ b/docs/components/tasklist/userguide/defining-task-priorities.md @@ -6,7 +6,7 @@ description: "Organize and order your tasks with clear prioritization." import styles from "./styles.module.css"; -You can add prioritization to [User Task elements](/components/modeler/bpmn/user-tasks/user-tasks.md) by specifying a priority value for a user task. This determines the task's importance in relation to other tasks within processes. +You can add prioritization to [user task elements](/components/modeler/bpmn/user-tasks/user-tasks.md) by specifying a priority value for a user task. This determines the task's importance in relation to other tasks within processes. - The task priority is an **integer** value ranging from 0 to 100, with a default value of 50. - A higher priority value indicates higher importance. @@ -26,11 +26,11 @@ These labels give Tasklist users a clear view of task priority, making it easier This step-by-step guide shows you how to define task priorities for Tasklist users. -### 1. Model a BPMN Process +### 1. Model a BPMN process Start by modeling your [BPMN process in Modeler](/guides/automating-a-process-using-bpmn.md), ensuring that the required user tasks are defined within the process. -### 2. Set a Priority for User Tasks +### 2. Set a priority for user tasks During user task configuration you can specify a priority value. You can also define the value using an [expression](/components/concepts/expressions.md). @@ -38,17 +38,17 @@ The priority value determines the task's importance relative to other tasks. ![set-user-task-priority-in-modeler](img/modeler-user-task-priority.jpg) -### 3. Deploy and Start the Process +### 3. Deploy and start the process After the process is fully defined and all configurations are complete, the process can be deployed and started. The priority values are now associated with each user task within the process. -### 4. Task Priority in Tasklist +### 4. View task priority in Tasklist Tasklist users can view the tasks assigned to them within their task list. Each task card displays the assigned priority label, ensuring users have a clear understanding of the task's importance and priority. ![set-user-task-priority-in-modeler](img/tasklist–tasks-with-priority.jpg) -### 5. Sort Tasks by Priority +### 5. Sort tasks by priority Task users can sort tasks by priority. This helps users organize their workload by focusing on urgent items first. diff --git a/docs/components/tasklist/userguide/img/modeler-user-task-priority.jpg b/docs/components/tasklist/userguide/img/modeler-user-task-priority.jpg index df0f3db07a8..847680dfd8a 100644 Binary files a/docs/components/tasklist/userguide/img/modeler-user-task-priority.jpg and b/docs/components/tasklist/userguide/img/modeler-user-task-priority.jpg differ diff --git a/docs/components/tasklist/userguide/img/tasklist-language-settings.jpg b/docs/components/tasklist/userguide/img/tasklist-language-settings.jpg index 564d12ea78e..edc13fe3bc5 100644 Binary files a/docs/components/tasklist/userguide/img/tasklist-language-settings.jpg and b/docs/components/tasklist/userguide/img/tasklist-language-settings.jpg differ diff --git a/docs/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg b/docs/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg index 7d7035c447b..15478961d8b 100644 Binary files a/docs/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg and b/docs/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg differ diff --git "a/docs/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" "b/docs/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" index 86c1ed19da5..52310ac02de 100644 Binary files "a/docs/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" and "b/docs/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" differ diff --git a/docs/components/tasklist/userguide/using-tasklist.md b/docs/components/tasklist/userguide/using-tasklist.md index 2d6b9b5f825..cef0c352bf5 100644 --- a/docs/components/tasklist/userguide/using-tasklist.md +++ b/docs/components/tasklist/userguide/using-tasklist.md @@ -68,6 +68,8 @@ From the task detail page you can switch to the **Process** tab. This provides a :::note The diagram indicates the version of the process instance in which the task was initiated. + +This feature is available for diagrams deployed on version 8.6 or higher. ::: #### Resource-based access (RBA) diff --git a/docs/components/zeebe/technical-concepts/architecture.md b/docs/components/zeebe/technical-concepts/architecture.md index 8921c856a4d..3a0d3e1f936 100644 --- a/docs/components/zeebe/technical-concepts/architecture.md +++ b/docs/components/zeebe/technical-concepts/architecture.md @@ -36,7 +36,7 @@ Client applications can be scaled up and down separately from Zeebe. The Zeebe b Clients are libraries you embed in an application (e.g. a microservice that executes your business logic) to connect to a Zeebe cluster. -Clients connect to the Zeebe Gateway via a mix of REST and [gRPC](https://grpc.io). While REST can be served over any HTTP version, the gRPC part of the API requires an HTTP/2-based transport. To learn more about how REST is used in Zeebe, review the [Zeebe API (REST)](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md). To learn more about gRPC in Zeebe, review the [Zeebe API (gRPC)](/apis-tools/zeebe-api/grpc.md). +Clients connect to the Zeebe Gateway via a mix of REST and [gRPC](https://grpc.io). While REST can be served over any HTTP version, the gRPC part of the API requires an HTTP/2-based transport. To learn more about how REST is used in Zeebe, review the [Camunda API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md). To learn more about gRPC in Zeebe, review the [Zeebe API (gRPC)](/apis-tools/zeebe-api/grpc.md). The Zeebe project includes officially-supported Java and Go clients. [Community clients](/apis-tools/community-clients/index.md) have been created in other languages, including C#, Ruby, and JavaScript. Thanks to code generators for gRPC and the OpenAPI spec, it is possible to [generate clients](/apis-tools/build-your-own-client.md) in a range of different programming languages. @@ -52,7 +52,7 @@ The gateway is stateless and sessionless, and gateways can be added as necessary ## Brokers -The Zeebe broker is the distributed workflow engine that tracks the state of active process instances. +The Zeebe Broker is the distributed workflow engine that tracks the state of active process instances. Brokers can be partitioned for horizontal scalability and replicated for fault tolerance. A Zeebe deployment often consists of more than one broker. diff --git a/docs/components/zeebe/technical-concepts/process-lifecycles.md b/docs/components/zeebe/technical-concepts/process-lifecycles.md index 10b223e6fdc..1dadec1f9d1 100644 --- a/docs/components/zeebe/technical-concepts/process-lifecycles.md +++ b/docs/components/zeebe/technical-concepts/process-lifecycles.md @@ -31,7 +31,7 @@ Given the above process, a successful execution yields the following records in
    Compute intermediary data
    Filepicker SymbolFilepickerSelect files
    Image Symbol Image view
    - + diff --git a/docs/components/zeebe/zeebe-overview.md b/docs/components/zeebe/zeebe-overview.md index c1fda640f43..0e80316cde3 100644 --- a/docs/components/zeebe/zeebe-overview.md +++ b/docs/components/zeebe/zeebe-overview.md @@ -20,12 +20,6 @@ With Zeebe you can: For documentation on deploying Zeebe as part of Camunda 8 Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/zeebe-installation.md). -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda 8 Starter or Camunda 8 Enterprise plans. Customers can choose either plan based on their process automation requirements. Camunda 8 Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda 8, you can always find support through the [community](/contact/). - ## Next steps - Get familiar with [technical concepts](technical-concepts/technical-concepts-overview.md). diff --git a/docs/guides/configuring-out-of-the-box-connector.md b/docs/guides/configuring-out-of-the-box-connector.md index 18316fce3a6..bf62341e1f9 100644 --- a/docs/guides/configuring-out-of-the-box-connector.md +++ b/docs/guides/configuring-out-of-the-box-connector.md @@ -1,6 +1,6 @@ --- id: configuring-out-of-the-box-connectors -title: Configure an out-of-the-box Connector +title: Integrate a Camunda Connector description: "Ready to use out of the box, Connectors help automate complex business processes by inserting them into BPMN diagrams." keywords: [connector, modeling, connectors, low-code, no-code] --- @@ -39,7 +39,7 @@ Once logged in to your Camunda 8 account, take the following steps: 1. From Modeler, click **New project > Create new > BPMN diagram**. 2. Name your project by replacing the **New Project** text at the top of the page. In this example, we'll name ours `Expense process`. 3. Select **Create new > BPMN diagram**. -4. Give your model a descriptive name by replacing the **New BPMN Diagram** text at the top of the page. Then, give your model a descriptive id within the **General** tab inside the properties panel on the right side of the screen. In this case, we've named our model `Submit expense` with an id of `submitting-expense`. +4. Give your model a descriptive name by replacing the **New BPMN Diagram** text at the top of the page. Then, give your model a descriptive ID within the **General** tab inside the properties panel on the right side of the screen. In this case, we've named our model `Submit expense` with an ID of `submitting-expense`. ## Build a BPMN diagram diff --git a/docs/guides/create-cluster.md b/docs/guides/create-cluster.md index a93f27b4e87..aed054f616a 100644 --- a/docs/guides/create-cluster.md +++ b/docs/guides/create-cluster.md @@ -1,6 +1,6 @@ --- id: create-cluster -title: Create your cluster +title: Create a cluster description: "Create a cluster in Camunda 8 to deploy and run your process." --- diff --git a/docs/guides/getting-started-java-spring.md b/docs/guides/getting-started-java-spring.md index 225a4b677b8..57991df165c 100644 --- a/docs/guides/getting-started-java-spring.md +++ b/docs/guides/getting-started-java-spring.md @@ -113,39 +113,19 @@ To implement a service task, take the following steps: ### Configure Spring Boot Starter -See our documentation on [adding the Spring Zeebe SDK to your project](/apis-tools/spring-zeebe-sdk/getting-started.md#add-the-spring-zeebe-sdk-to-your-project) for more details, also described below: - -1. Copy the following code snippet into the `pom.xml` file of your Spring project, below properties and above dependencies: - -```xml - - - - true - - - false - - identity - Camunda Identity - https://artifacts.camunda.com/artifactory/camunda-identity/ - - -``` - -2. Add the following dependency to your `pom.xml` file, as a child of the `` element: +Add the following Maven dependency to your Spring Boot Starter project, replacing `x` with the latest patch level available: ```xml - io.camunda - spring-boot-starter-camunda-sdk - 8.6.3 + io.camunda + spring-boot-starter-camunda-sdk + 8.6.x ``` ### Configure the Zeebe client -Open your `src/main/resources/application.yaml` file, and paste the following snippet to connect to the Self-Managed Zeebe broker: +Open your `src/main/resources/application.yaml` file, and paste the following snippet to connect to the Self-Managed Zeebe Broker: ```yaml camunda: diff --git a/docs/guides/getting-started-orchestrate-apis.md b/docs/guides/getting-started-orchestrate-apis.md index 9d40d76137a..880dc5ddc14 100644 --- a/docs/guides/getting-started-orchestrate-apis.md +++ b/docs/guides/getting-started-orchestrate-apis.md @@ -1,7 +1,7 @@ --- id: orchestrate-apis title: Get started with API orchestration -sidebar_label: Get started with API orchestration +sidebar_label: APIs description: "Use Connectors to build low code process automation solutions" keywords: [api endpoints, orchestration, getting started, user guide, connectors] @@ -33,7 +33,7 @@ To use a **REST Connector** in your process, follow the steps below: 1. Create a BPMN diagram. To do this, click **New project** within Modeler. 2. Name your project and select **Create new > BPMN diagram**. -3. Give your model a descriptive name and id. On the right side of the page, expand the **General** section of the properties panel to find the name and id fields. For this guide, we'll use `API Orchestration Tutorial` for the name and `api-orchestration-tutorial` for the id. +3. Give your model a descriptive name and ID. On the right side of the page, expand the **General** section of the properties panel to find the name and ID fields. For this guide, we'll use `API Orchestration Tutorial` for the name and `api-orchestration-tutorial` for the ID. 4. Use Web Modeler to design a BPMN flow with a Connector. Create a Connector by dragging the rectangular task element from the palette, or click the existing start event and the displayed task element to the right of the start event. 5. Change the task type by clicking the wrench icon and select **REST Outbound Connector** in the **Connectors** section. Alternatively, you can directly choose a **REST Outbound Connector** by using the context pad. diff --git a/docs/guides/getting-started-orchestrate-human-tasks.md b/docs/guides/getting-started-orchestrate-human-tasks.md index 7f21235bf64..999775428ce 100644 --- a/docs/guides/getting-started-orchestrate-human-tasks.md +++ b/docs/guides/getting-started-orchestrate-human-tasks.md @@ -1,7 +1,7 @@ --- id: orchestrate-human-tasks title: Get started with human task orchestration -sidebar_label: Get started with human task orchestration +sidebar_label: Human tasks description: "Efficiently allocate work through user tasks." keywords: [human tasks, orchestration, getting started, user guide] --- diff --git a/docs/guides/getting-started-orchestrate-microservices.md b/docs/guides/getting-started-orchestrate-microservices.md index b62fd44fb08..72e78eadcd9 100644 --- a/docs/guides/getting-started-orchestrate-microservices.md +++ b/docs/guides/getting-started-orchestrate-microservices.md @@ -1,7 +1,7 @@ --- id: orchestrate-microservices title: Get started with microservice orchestration -sidebar_label: Get started with microservice orchestration +sidebar_label: Microservices description: "Orchestrate microservices for visibility and resilience." keywords: [microservices, orchestration, getting-started] --- @@ -32,7 +32,7 @@ You must have access to a Camunda 8 SaaS account. Additionally, you need the following: -- Java >= 8 +- Java ≥ 8 - Maven - IDE (IntelliJ, VSCode, or similar) - Download and unzip or clone the [repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java` @@ -43,7 +43,7 @@ Start by designing your automated process using BPMN. This guide introduces you 1. To create a BPMN diagram, click **New project** within Modeler. 2. Name your project and select **Create new > BPMN diagram**. -3. Give your model a descriptive name and id. On the right side of the page, expand the **General** section of the properties panel to find the name and id fields. For this guide, we'll use `Microservice Orchestration Tutorial` for the name and `microservice-orchestration-tutorial` for the id. +3. Give your model a descriptive name and ID. On the right side of the page, expand the **General** section of the properties panel to find the name and ID fields. For this guide, we'll use `Microservice Orchestration Tutorial` for the name and `microservice-orchestration-tutorial` for the ID. 4. Use Web Modeler to design a BPMN process with service tasks. These service tasks are used to call your microservices via workers. Create a service task by dragging the task icon from the palette, or by clicking the existing start event and clicking the task icon. Make sure there is an arrow connecting the start event to the task. Click the wrench icon and select **Service Task** to change the task type. ![Task with dropdown showing config, including service task](./img/microservice-orchestration-config-service-task.png) 5. Add a descriptive name using the **General** section in the properties panel. For this guide, we'll use `Call Microservice`. @@ -64,20 +64,20 @@ Start by designing your automated process using BPMN. This guide introduces you To interact with your Camunda 8 cluster, you'll use the Zeebe client. First, you'll need to create credentials. -1. The main page for Console should be open on another tab. Use Console to navigate to your clusters either through the navigation **Clusters** or by using the section under **View all** on the **Clusters** section of the main dashboard. Click on your existing cluster. This will open the **Overview** for your cluster, where you can find your cluster id and region. You will need this information later when creating a worker in the next section. +1. The main page for Console should be open on another tab. Use Console to navigate to your clusters either through the navigation **Clusters** or by using the section under **View all** on the **Clusters** section of the main dashboard. Click on your existing cluster. This will open the **Overview** for your cluster, where you can find your cluster ID and region. You will need this information later when creating a worker in the next section. :::note If your account is new, you should have a cluster already available. If no cluster is available, or you’d like to create a new one, click **Create New Cluster**. ::: 2. Navigate to the **API** tab. Click **Create**. 3. Provide a descriptive name for your client like `microservice-worker`. For this tutorial, the scope can be the default Zeebe scope. Click **Create**. -4. Your client credentials can be copied or downloaded at this point. You will need your client id and your client secret when creating a worker in the next section, so keep this window open. Once you close or navigate away from this screen, you will not be able to see them again. +4. Your client credentials can be copied or downloaded at this point. You will need your client ID and your client secret when creating a worker in the next section, so keep this window open. Once you close or navigate away from this screen, you will not be able to see them again. ## Step 4: Create a worker for the service task Next, we’ll create a worker for the service task by associating it with the type we specified on the service task in the BPMN diagram. 1. Open the downloaded or cloned project ([repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java`) in your IDE . -2. Add your credentials to `application.properties`. Your client id and client secret are available from the previous section in the credential text file you downloaded or copied. Go to the cluster overview page to find your cluster id and region. +2. Add your credentials to `application.properties`. Your client ID and client secret are available from the previous section in the credential text file you downloaded or copied. Go to the cluster overview page to find your cluster ID and region. 3. In the `Worker.java` file, change the type to match what you specified in the BPMN diagram. If you followed the previous steps for this guide and entered “orchestrate-something”, no action is required. 4. After making these changes, perform a Maven install, then run the Worker.java `main` method via your favorite IDE. If you prefer using a terminal, run `mvn package exec:java`. 5. Using the Modeler tab in your browser, navigate to Operate and you will see your token has moved to the end event, completing this process instance. diff --git a/docs/guides/img/form-editor.png b/docs/guides/img/form-editor.png index 103db1fefdb..ab00e9e0951 100644 Binary files a/docs/guides/img/form-editor.png and b/docs/guides/img/form-editor.png differ diff --git a/docs/guides/improve-processes-with-optimize.md b/docs/guides/improve-processes-with-optimize.md index 55b073e667f..6ae6fc0a6bc 100644 --- a/docs/guides/improve-processes-with-optimize.md +++ b/docs/guides/improve-processes-with-optimize.md @@ -1,7 +1,7 @@ --- id: improve-processes-with-optimize title: Improve processes with Optimize -sidebar_label: Improve processes with Optimize +sidebar_label: Analyze processes with Optimize description: "Leverage data collected during process execution, analyze bottlenecks, and examine areas for improvement." --- diff --git a/docs/guides/introduction-to-camunda-8.md b/docs/guides/introduction-to-camunda-8.md index bcf3c0d7b1c..74ef3f25773 100644 --- a/docs/guides/introduction-to-camunda-8.md +++ b/docs/guides/introduction-to-camunda-8.md @@ -36,7 +36,7 @@ type:"link", href:"/docs/next/guides/getting-started-java-spring/", label: "Get } ]}/> -With these guides, start working with [Web Modeler](/components/modeler/about-modeler.md) to get familiar with BMPN and model a business process, or as a Java developer, step through using Spring Boot and the Spring Zeebe SDK with Desktop Modeler to interact with a local Self-Managed Camunda 8 installation. +With these guides, start working with [Web Modeler](/components/modeler/about-modeler.md) to get familiar with BPMN and model a business process, or as a Java developer, step through using Spring Boot and the Spring Zeebe SDK with Desktop Modeler to interact with a local Self-Managed Camunda 8 installation. ### Use cases diff --git a/docs/guides/message-correlation.md b/docs/guides/message-correlation.md deleted file mode 100644 index 7c0dc0ef853..00000000000 --- a/docs/guides/message-correlation.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: message-correlation -title: Message correlation -description: "Message correlation allows you to target a running workflow with a state update from an external system asynchronously." ---- - -Intermediate -Time estimate: 20 minutes - -## Prerequisites - -- [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js) -- [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) -- [Desktop Modeler](https://camunda.com/download/modeler/) - -## Message correlation - -Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously. - -This tutorial uses the [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js), but it serves to illustrate message correlation concepts that are applicable to all language clients. - -We will use [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production. However, it is useful during development. - -## Workflow - -Here is a basic example from [the Camunda 8 documentation](/components/concepts/messages.md): - -![message correlation workflow](img/message-correlation-workflow.png) - -Use [Desktop Modeler](https://camunda.com/download/modeler/) to open the [test-messaging](https://github.com/jwulf/zeebe-message-correlation/blob/master/bpmn/test-messaging.bpmn) file in [this GitHub project](https://github.com/jwulf/zeebe-message-correlation). - -Click on the intermediate message catch event to see how it is configured: - -![message properties](img/message-correlation-message-properties.png) - -A crucial piece here is the **Subscription Correlation Key**. In a running instance of this workflow, an incoming **Money Collected** message will have a `correlationKey` property: - -```typescript - zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid" - }); -``` - -The concrete value of the message `correlationKey` is matched against running workflow instances by comparing the supplied value against the `orderId` variable of running instances subscribed to this message. This is the relationship established by setting the `correlationKey` to `orderId` in the message catch event in the BPMN. - -## Running the demonstration - -To run the demonstration, take the following steps: - -1. Clone this repository. -2. Install dependencies: - :::note - This guide requires `npm` version 6. - ::: - `npm i && npm i -g ts-node typescript` -3. In another terminal, start the Zeebe Broker in addition to [simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor). -4. Deploy the workflow and start an instance: - `ts-node start-workflow.ts` - This starts a workflow instance with the `orderId` set to 345: - -```typescript -await zbc.createProcessInstance("test-messaging", { - orderId: "345", - customerId: "110110", - paymentStatus: "unpaid", -}); -``` - -5. Open Simple Monitor at [http://localhost:8082](http://localhost:8082). -6. Click on the workflow instance. You will see the current state of the workflow: - ![workflow state](img/message-correlation-workflow-state.png) - The numbers above the BPMN symbols indicate that no tokens are waiting at the start event, and one has passed through. One token is waiting at the **Collect Money** task, and none have passed through. -7. Take a look at the **Variables** tab at the bottom of the screen. (If you don't see it, you are probably looking at the workflow, rather than the instance. In that case, drill down into the instance): - ![message correlation variables](img/message-correlation-variables.png) - You can see that this workflow instance has the variable `orderId` set to the value 345. -8. Start the workers: - `ts-node workers.ts` -9. Refresh Simple Monitor to see the current state of the workflow: - ![message correlation wait on message](img/message-correlation-wait-on-message.png) - Now, the token is at the message catch event, waiting for a message to be correlated. -10. Take a look at the **Message Subscriptions** tab: - ![message subscriptions](img/message-correlation-message-subscriptions.png) - You can see the broker has opened a message subscription for this workflow instance with the concrete value of the `orderId` 345. This was created when the token entered the message catch event. -11. Send the message in another terminal: - `ts-node send-message.ts` -12. Refresh Simple Monitor, and note that the message has been correlated and the workflow has run to completion: - -![message correlation completed](img/message-correlation-completed.png) - -The **Message Subscriptions** tab now reports that the message was correlated: - -![message correlation correlated](img/message-correlation-correlated.png) - -## Message buffering - -Messages are buffered on the broker, so your external systems can emit messages before your process arrives at the catch event. The amount of time a message is buffered is configured when publishing the message from the client library. - -For example, to send a message buffered for 10 minutes with the JavaScript client: - -```typescript -zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid", - }, - timeToLive: 600000, -}); -``` - -To see it in action, take the following steps: - -1. Keep the workers running. -2. Publish the message: - -```typescript -ts-node send-message.ts -``` - -3. Click on **Messages** at the top of the Simple Monitor page. You will see the message buffered on the broker: - -![message buffered on broker](img/message-correlation-buffered.png) - -4. Start another instance of the workflow: - -```typescript -ts-node start-workflow.ts -``` - -Note that the message is correlated to the workflow instance, even though it arrived before the workflow instance was started. - -## Common mistakes - -A couple of common gotchas: - -- The `correlationKey` in the BPMN message definition is the name of the workflow variable to match against. The `correlationKey` in the message is the concrete value to match against that variable in the workflow instance. - -- The message subscription _is not updated after it is opened_. That is not an issue in the case of a message catch event. However, for boundary message events (both interrupting and non-interrupting,) the subscription is opened _as soon as the token enters the bounding subprocess_. If any service task modifies the `orderId` value inside the subprocess, the subscription is not updated. - -For example, the interrupting boundary message event in the following example will not be correlated on the updated value, because the subscription is opened when the token enters the subprocess, using the value at that time: - -![not correlating](img/message-correlation-not-like-this.png) - -If you need a boundary message event correlated on a value modified somewhere in your process, put the boundary message event in a subprocess after the task that sets the variable. The message subscription for the boundary message event will open when the token enters the subprocess, with the current variable value. - -![correlating](img/message-correlation-like-this.png) - -## Summary - -Message Correlation is a powerful feature in Camunda 8. Knowing how messages are correlated, and how and when the message subscription is created is important to design systems that perform as expected. - -Simple Monitor is a useful tool for inspecting the behavior of a local Camunda 8 system to figure out what is happening during development. diff --git a/docs/guides/migrating-from-camunda-7/adjusting-bpmn-models.md b/docs/guides/migrating-from-camunda-7/adjusting-bpmn-models.md index 08c4f1b36e3..c653c147aa7 100644 --- a/docs/guides/migrating-from-camunda-7/adjusting-bpmn-models.md +++ b/docs/guides/migrating-from-camunda-7/adjusting-bpmn-models.md @@ -92,7 +92,7 @@ The following is **not** possible: ![User Task](../../components/modeler/bpmn/assets/bpmn-symbols/user-task.svg) -Human task management is also available in Camunda 8, but uses a different Tasklist user interface and API. +[Human task management](/guides/getting-started-orchestrate-human-tasks.md) is also available in Camunda 8, but uses a different Tasklist user interface and API. In Camunda 7, you have [different ways to provide forms for user tasks](https://docs.camunda.org/manual/latest/user-guide/task-forms/): diff --git a/docs/guides/migrating-from-camunda-7/conceptual-differences.md b/docs/guides/migrating-from-camunda-7/conceptual-differences.md index fa6375d3f8f..e76f7e840f8 100644 --- a/docs/guides/migrating-from-camunda-7/conceptual-differences.md +++ b/docs/guides/migrating-from-camunda-7/conceptual-differences.md @@ -2,7 +2,7 @@ id: conceptual-differences title: Conceptual differences with Camunda 7 and Camunda 8 sidebar_label: Conceptual differences -description: "Understand conceptual differences with Camunda 7 and Camunda 8 before migrating." +description: "Understand conceptual differences with Camunda 7 and Camunda 8 before migrating, such as the embedded engine, different data types, and the expression language." --- ## Conceptual differences @@ -58,7 +58,7 @@ There are several differences between how [multi-tenancy](/self-managed/concepts 2. In Camunda 7, users can deploy shared resources (processes, decisions, and forms) available to all tenants. In Camunda 8, there are no shared resources. This will be added in the future. 3. In Camunda 7, data is mapped to a `null` tenant identifier, meaning by default resources are shared. In Camunda 8, data is mapped to the `` tenant identifier when multi-tenancy is disabled. 4. [Tenant checks in Camunda 7](https://docs.camunda.org/manual/develop/user-guide/process-engine/multi-tenancy/#disable-the-transparent-access-restrictions) can be disabled to perform admin/maintenance operations. This can't be done in Camunda 8, but an admin user can be authorized to all tenants, which would result in the same thing. -5. If a user tries to trigger a command on a resource mapped to multiple tenants in Camunda 7, an exception is thrown, and [the `tenantId` must be explicitly provided](https://docs.camunda.org/manual/develop/user-guide/process-engine/multi-tenancy/#run-commands-for-a-tenant). However, the Camunda 7 engine will try to infer the correct `tenantId` as much as possible. Users in Camunda 7 that are authorized for multiple tenants may perform a lot more operations without providing a `tenantId`. This inference in the Zeebe broker doesn't happen in Camunda 8, and Zeebe asks users to provide the `tenantId` explicitly. +5. If a user tries to trigger a command on a resource mapped to multiple tenants in Camunda 7, an exception is thrown, and [the `tenantId` must be explicitly provided](https://docs.camunda.org/manual/develop/user-guide/process-engine/multi-tenancy/#run-commands-for-a-tenant). However, the Camunda 7 engine will try to infer the correct `tenantId` as much as possible. Users in Camunda 7 that are authorized for multiple tenants may perform a lot more operations without providing a `tenantId`. This inference in the Zeebe Broker doesn't happen in Camunda 8, and Zeebe asks users to provide the `tenantId` explicitly. ## Process solutions using Spring Boot @@ -123,7 +123,7 @@ With Camunda 7 a typical deployment includes: With Camunda 8 you deploy: - Your Spring Boot application with all custom code and the Zeebe client embedded. This application is typically scaled to at least two instances (for resilience) -- The Zeebe broker, typically scaled to at least three instances (for resilience) +- The Zeebe Broker, typically scaled to at least three instances (for resilience) - An elastic database (for Operate, Tasklist, and Optimize) - Optimize, Operate, and Tasklist (each one is a Java application). You can scale those applications to increase availability if you want. diff --git a/docs/guides/migrating-from-camunda-7/index.md b/docs/guides/migrating-from-camunda-7/index.md index 000e09dd12f..39764a6da25 100644 --- a/docs/guides/migrating-from-camunda-7/index.md +++ b/docs/guides/migrating-from-camunda-7/index.md @@ -53,4 +53,4 @@ As described earlier in this guide, migration is an ongoing topic and this guide - Discuss workload migrations (operations) - Eventual consistency -[Reach out to us](/contact/) to discuss your specific migration use case. +[Reach out to us](/reference/contact.md) to discuss your specific migration use case. diff --git a/docs/guides/model-your-first-process.md b/docs/guides/model-your-first-process.md index 02227f0737f..5c13a32adbb 100644 --- a/docs/guides/model-your-first-process.md +++ b/docs/guides/model-your-first-process.md @@ -21,7 +21,7 @@ In Camunda 8, you have two options to design and deploy a process, but for the p 1. From Modeler, click **New project**. 2. Name your project and select **Create new > BPMN diagram**. ![blank project create bpmn diagram](./img/blank-project.png) -3. Give your model a descriptive name, and then give your model a descriptive id within the **General** tab inside the properties panel on the right side of the screen. +3. Give your model a descriptive name, and then give your model a descriptive ID within the **General** tab inside the properties panel on the right side of the screen. 4. Create a task by dragging the rectangular task icon from the palette, or by clicking the existing start event and clicking the task icon. Make sure there is an arrow connecting the start event to the task. 5. Name the task by double-clicking the task or using the properties panel. 6. Create an end event by dragging the end event icon from the palette, or by clicking the existing start event and clicking the end event icon. diff --git a/docs/guides/react-components/_install-c8run.md b/docs/guides/react-components/_install-c8run.md index adba10df495..520996f22c9 100644 --- a/docs/guides/react-components/_install-c8run.md +++ b/docs/guides/react-components/_install-c8run.md @@ -11,14 +11,14 @@ If no version of Java is found, follow your chosen installation's instructions f ### Install and start Camunda 8 Run -1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/c8run-8.6.2) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. +1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/8.7.0-alpha2) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. 2. Navigate to the new `c8run` directory. 3. Start Camunda 8 Run by running `./start.sh` (or `.\c8run.exe start` on Windows) in your terminal. When successful, a new Operate window automatically opens. :::note -If Camunda 8 Run fails to start, run the [shutdown script](/self-managed/setup/deploy/local/c8run.md/#shut-down-camunda-8-run) to end the current processes, then run the start script again. +If Camunda 8 Run fails to start, run the [shutdown script](/self-managed/setup/deploy/local/c8run.md#shut-down-camunda-8-run) to end the current processes, then run the start script again. ::: For more information and local configuration options, see the [Camunda 8 Run installation guide](/self-managed/setup/deploy/local/c8run.md). diff --git a/docs/guides/setting-up-development-project.md b/docs/guides/setting-up-development-project.md index 8f099e9a8d4..d0d2d4b9c53 100644 --- a/docs/guides/setting-up-development-project.md +++ b/docs/guides/setting-up-development-project.md @@ -1,6 +1,6 @@ --- id: setting-up-development-project -title: Set up your first development project +title: Set up a development project description: "Set up your first project to model, deploy, and start a process instance." keywords: [get-started, local-install] --- diff --git a/docs/guides/setup-client-connection-credentials.md b/docs/guides/setup-client-connection-credentials.md index 00200bdc22a..18301f74c79 100644 --- a/docs/guides/setup-client-connection-credentials.md +++ b/docs/guides/setup-client-connection-credentials.md @@ -13,7 +13,7 @@ Here, we'll set up client connection credentials to create, name, and connect yo Currently, Camunda 8 SaaS supports the following scopes: -- Zeebe - Access to the [Zeebe gRPC](/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) APIs. +- Zeebe - Access to the [Zeebe gRPC](/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) APIs. - Tasklist - Access to the [Tasklist GraphQL](/apis-tools/tasklist-api/tasklist-api-overview.md) API. - Operate - Access to the [Operate REST API](/apis-tools/operate-api/overview.md). - Optimize - Access to the [Optimize REST API]($optimize$/apis-tools/optimize-api/overview). diff --git a/docs/images/operate/modifications/add-token-result.png b/docs/images/operate/modifications/add-token-result.png index 1e0b91a61c1..bfeae7ff743 100644 Binary files a/docs/images/operate/modifications/add-token-result.png and b/docs/images/operate/modifications/add-token-result.png differ diff --git a/docs/images/operate/modifications/add-token.png b/docs/images/operate/modifications/add-token.png index 0360f69efb2..0521bc21194 100644 Binary files a/docs/images/operate/modifications/add-token.png and b/docs/images/operate/modifications/add-token.png differ diff --git a/docs/images/operate/modifications/add-variable-result.png b/docs/images/operate/modifications/add-variable-result.png index fd78323d1bd..6e1cadc84d2 100644 Binary files a/docs/images/operate/modifications/add-variable-result.png and b/docs/images/operate/modifications/add-variable-result.png differ diff --git a/docs/images/operate/modifications/add-variable-to-new-scope.png b/docs/images/operate/modifications/add-variable-to-new-scope.png index 5418b9a8ec6..52381c7272f 100644 Binary files a/docs/images/operate/modifications/add-variable-to-new-scope.png and b/docs/images/operate/modifications/add-variable-to-new-scope.png differ diff --git a/docs/images/operate/modifications/applied-modifications.png b/docs/images/operate/modifications/applied-modifications.png index 197d640cad4..4e06133cc7c 100644 Binary files a/docs/images/operate/modifications/applied-modifications.png and b/docs/images/operate/modifications/applied-modifications.png differ diff --git a/docs/images/operate/modifications/apply-modifications-button.png b/docs/images/operate/modifications/apply-modifications-button.png index d126abd0a96..a3cd4141566 100644 Binary files a/docs/images/operate/modifications/apply-modifications-button.png and b/docs/images/operate/modifications/apply-modifications-button.png differ diff --git a/docs/images/operate/modifications/cancel-token-result.png b/docs/images/operate/modifications/cancel-token-result.png index c013cb90496..955fac999ef 100644 Binary files a/docs/images/operate/modifications/cancel-token-result.png and b/docs/images/operate/modifications/cancel-token-result.png differ diff --git a/docs/images/operate/modifications/cancel-token.png b/docs/images/operate/modifications/cancel-token.png index 75bea13b206..945e21b6051 100644 Binary files a/docs/images/operate/modifications/cancel-token.png and b/docs/images/operate/modifications/cancel-token.png differ diff --git a/docs/images/operate/modifications/edit-variable-on-existing-scope.png b/docs/images/operate/modifications/edit-variable-on-existing-scope.png index 4e45f37115b..904e47f4f59 100644 Binary files a/docs/images/operate/modifications/edit-variable-on-existing-scope.png and b/docs/images/operate/modifications/edit-variable-on-existing-scope.png differ diff --git a/docs/images/operate/modifications/edit-variable-result.png b/docs/images/operate/modifications/edit-variable-result.png index 8d9ba4dc484..dde7a69b65d 100644 Binary files a/docs/images/operate/modifications/edit-variable-result.png and b/docs/images/operate/modifications/edit-variable-result.png differ diff --git a/docs/images/operate/modifications/edit-variable-value.png b/docs/images/operate/modifications/edit-variable-value.png index 934a49555a9..885a0ed7a04 100644 Binary files a/docs/images/operate/modifications/edit-variable-value.png and b/docs/images/operate/modifications/edit-variable-value.png differ diff --git a/docs/images/operate/modifications/enter-modification-mode.png b/docs/images/operate/modifications/enter-modification-mode.png index 0ed6c1ebd8c..5980c67990f 100644 Binary files a/docs/images/operate/modifications/enter-modification-mode.png and b/docs/images/operate/modifications/enter-modification-mode.png differ diff --git a/docs/images/operate/modifications/modification-mode.png b/docs/images/operate/modifications/modification-mode.png index e11a1068930..91957edddba 100644 Binary files a/docs/images/operate/modifications/modification-mode.png and b/docs/images/operate/modifications/modification-mode.png differ diff --git a/docs/images/operate/modifications/modification-summary-modal.png b/docs/images/operate/modifications/modification-summary-modal.png index 02f11aff5d6..6d45bb76287 100644 Binary files a/docs/images/operate/modifications/modification-summary-modal.png and b/docs/images/operate/modifications/modification-summary-modal.png differ diff --git a/docs/images/operate/modifications/move-token-result.png b/docs/images/operate/modifications/move-token-result.png index 3c6ec632f8a..a1bb3c32d94 100644 Binary files a/docs/images/operate/modifications/move-token-result.png and b/docs/images/operate/modifications/move-token-result.png differ diff --git a/docs/images/operate/modifications/move-token-select-target.png b/docs/images/operate/modifications/move-token-select-target.png index 951a9800de1..6c29db4594f 100644 Binary files a/docs/images/operate/modifications/move-token-select-target.png and b/docs/images/operate/modifications/move-token-select-target.png differ diff --git a/docs/images/operate/modifications/move-token.png b/docs/images/operate/modifications/move-token.png index 87827dd98c6..a13883038c8 100644 Binary files a/docs/images/operate/modifications/move-token.png and b/docs/images/operate/modifications/move-token.png differ diff --git a/docs/images/operate/modifications/not-supported-flow-nodes.png b/docs/images/operate/modifications/not-supported-flow-nodes.png index bb2a5f59e8a..d5901a73ee6 100644 Binary files a/docs/images/operate/modifications/not-supported-flow-nodes.png and b/docs/images/operate/modifications/not-supported-flow-nodes.png differ diff --git a/docs/images/operate/modifications/select-new-scope.png b/docs/images/operate/modifications/select-new-scope.png index ed9f149dd06..484a2225189 100644 Binary files a/docs/images/operate/modifications/select-new-scope.png and b/docs/images/operate/modifications/select-new-scope.png differ diff --git a/docs/images/operate/modifications/undo-modification.png b/docs/images/operate/modifications/undo-modification.png index 8d9ba4dc484..dde7a69b65d 100644 Binary files a/docs/images/operate/modifications/undo-modification.png and b/docs/images/operate/modifications/undo-modification.png differ diff --git a/docs/images/operate/operate-dashboard-no-processes.png b/docs/images/operate/operate-dashboard-no-processes.png index d00ff42eb40..fbcdb3efc52 100644 Binary files a/docs/images/operate/operate-dashboard-no-processes.png and b/docs/images/operate/operate-dashboard-no-processes.png differ diff --git a/docs/images/operate/operate-incident-resolved-path.png b/docs/images/operate/operate-incident-resolved-path.png index 9aeb29afa54..a30cb3138ff 100644 Binary files a/docs/images/operate/operate-incident-resolved-path.png and b/docs/images/operate/operate-incident-resolved-path.png differ diff --git a/docs/images/operate/operate-incident-resolved.png b/docs/images/operate/operate-incident-resolved.png index 859243dd709..bd1c6f3dc1d 100644 Binary files a/docs/images/operate/operate-incident-resolved.png and b/docs/images/operate/operate-incident-resolved.png differ diff --git a/docs/images/operate/operate-introduction.png b/docs/images/operate/operate-introduction.png index 6935f5092d9..ab58ba88927 100644 Binary files a/docs/images/operate/operate-introduction.png and b/docs/images/operate/operate-introduction.png differ diff --git a/docs/images/operate/operate-process-retry-incident.png b/docs/images/operate/operate-process-retry-incident.png index 40946bbde83..e222a68906f 100644 Binary files a/docs/images/operate/operate-process-retry-incident.png and b/docs/images/operate/operate-process-retry-incident.png differ diff --git a/docs/images/operate/operate-process-view-incident.png b/docs/images/operate/operate-process-view-incident.png index 110cc2596e4..1689e3c8096 100644 Binary files a/docs/images/operate/operate-process-view-incident.png and b/docs/images/operate/operate-process-view-incident.png differ diff --git a/docs/images/operate/operate-view-instance-edit-icon.png b/docs/images/operate/operate-view-instance-edit-icon.png index 3974cfba959..87a4d30cb79 100644 Binary files a/docs/images/operate/operate-view-instance-edit-icon.png and b/docs/images/operate/operate-view-instance-edit-icon.png differ diff --git a/docs/images/operate/operate-view-instance-incident.png b/docs/images/operate/operate-view-instance-incident.png index 9ece37d7ec2..6c96d4e5fe7 100644 Binary files a/docs/images/operate/operate-view-instance-incident.png and b/docs/images/operate/operate-view-instance-incident.png differ diff --git a/docs/images/operate/operate-view-instance-save-variable-icon.png b/docs/images/operate/operate-view-instance-save-variable-icon.png index 9af83353843..9e0bc3d659d 100644 Binary files a/docs/images/operate/operate-view-instance-save-variable-icon.png and b/docs/images/operate/operate-view-instance-save-variable-icon.png differ diff --git a/docs/reference/alpha-features.md b/docs/reference/alpha-features.md deleted file mode 100644 index 8a65e9ee0fa..00000000000 --- a/docs/reference/alpha-features.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: alpha-features -title: Alpha features -sidebar_label: Alpha features -description: "Use alpha features to learn about upcoming changes, try them out, and share feedback." ---- - -You can use alpha features to learn about upcoming changes, try them out, and share feedback. - -:::info -To understand the difference between an alpha feature and an alpha release, see [alpha features and releases](release-policy.md#alpha-features-and-releases). -::: - -## Alpha - -Selected Camunda features and components are released as **alpha** versions. We release these in an early state for you to test and participate in development by sharing your feedback before they reach [general availability (GA)](#general-availability-ga). - -Limitations of alpha features and components include: - -- Not for production use. -- APIs, dependencies, and configuration are likely to change. -- Not necessarily feature-complete. -- Might lack full documentation. -- No guaranteed updates to newer releases. -- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://docs.camunda.org/enterprise/support/). -- No maintenance service. -- (SaaS) No availability targets. -- Released outside the standard [release policy](release-policy.md). - -To learn more about using alpha features, see [enabling alpha features](/components/console/manage-organization/enable-alpha-features.md). - -:::note - -- Alpha features can also be included in a minor version (stable) release. -- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/contact). - -::: - -## General availability (GA) - -Once features and components are released and considered stable, they become generally available. - -Stable features and components are: - -- Ready for production use for most users with minimal risk. -- Supported by [L1 Priority-level support](https://docs.camunda.org/enterprise/support/#priority-level) for production use. -- Fully documented. - -A release or component is considered stable if it has passed all verification and test stages and can be released to production. - -:::note -Alpha releases can also have **limited availability**, such as features that are only available to enterprise customers. -::: diff --git a/docs/reference/announcements.md b/docs/reference/announcements.md index e187135dfe8..da89278137f 100644 --- a/docs/reference/announcements.md +++ b/docs/reference/announcements.md @@ -1,410 +1,119 @@ --- id: announcements title: "Announcements" -description: "Important announcements including deprecation & removal notices" +description: "Important announcements for upcoming and past Camunda 8 releases that customers should be aware of, including deprecation & removal notices." --- -## Camunda 8.6 - -Release date: 8th of Oct 2024 - -End of maintenance: 14th of April 2026 - -### License key changes - -With the 8.6 release, Camunda 8 Self-Managed requires a license key for production usage. For additional details, review the [blog post on licensing updates for Camunda 8 Self-Managed](https://camunda.com/blog/2024/04/licensing-update-camunda-8-self-managed/). - -Review the following documentation for your components for more information on how to provide the license key to each component as an environment variable: - -- [Console](/self-managed/console-deployment/configuration.md#environment-variables) -- [Zeebe](/self-managed/zeebe-deployment/configuration/configuration.md#licensing) -- [Operate](/self-managed/operate-deployment/operate-configuration.md#licensing) -- [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#licensing) -- [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration-platform-8#licensing) -- [Identity](/self-managed/identity/deployment/configuration-variables.md#license-configuration) -- [Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#licensing) - -To configure with Helm, visit the [Self Managed installation documentation](/self-managed/setup/install.md). - -:::note -Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on startup or functionality, with the exception that Web Modeler has a limitation of five users. To obtain a license, visit the [Camunda Enterprise page](https://camunda.com/platform/camunda-platform-enterprise-contact/). -::: - -### Zeebe Java client - -Starting with 8.7, the Zeebe Java client will become the new Camunda Java client. This transition brings a new Java client structure designed to enhance the user experience and introduce new features while maintaining compatibility with existing codebases. - -The primary goal of those changes is to enable users to interact with Camunda clusters with one consolidated client rather than multiple. The `CamundaClient` will replace the `ZeebeClient`, offering the same functionality and adding new capabilities. If you need to continue using the old `ZeebeClient`, you can use the version 8.6 artifact without any issues with newer cluster versions as the client is forward-compatible. - -:::note -The Zeebe Java client will not be developed further and will only receive bug fixes for as long as version 8.6 is officially supported. -::: - -#### Key changes - -- **New package structure**: - - Package `io.camunda.client`: This package contains the new `CamundaClient` and all the features slated for release in version 8.7. -- **Properties and environment variables refactoring**: - - All old Java client property names will be refactored to more general ones. For instance, `zeebe.client.tenantId` will become `camunda.client.tenantId`. - - Similarly, environment variables will be renamed following the same concept: `ZEEBE_REST_ADDRESS` will become `CAMUNDA_REST_ADDRESS`. -- **Artifact ID change**: - - The `artifactId` will change from `zeebe-client-java` to `camunda-client-java`. - -### Deprecation: Zeebe Go client & CLI client (zbctl) - -The Zeebe Go Client and CLI client (zbctl) will be [officially deprecated](https://camunda.com/blog/2024/09/deprecating-zbctl-and-go-clients/) with the 8.6 release as part of our efforts to streamline the Camunda 8 API experience. This client and CLI utility will not be released starting with Camunda 8.6, will no longer receive new features, and will be transitioned to a community-maintained status. - -The documentation of the Zeebe Go Client and CLI client (zbctl) moved to the [community clients section](/apis-tools/community-clients/index.md). - -### Camunda 8 SaaS - Required cluster update - -:::caution -By **August 30th, 2024** all automation clusters in Camunda 8 SaaS must be [updated](/components/console/manage-clusters/manage-cluster.md#update-a-cluster) to the following versions at a **minimum**: - -- **8.2+gen27** -- **8.3+gen11** -- **8.4+gen7** -- **8.5+gen2** - -::: - -auth0 announced an End-Of-Life for one of the functionalities that is being utilized by previous automation clusters. The new versions are not using this functionality anymore. This update ensures your cluster will work seamlessly after auth0 deactivates the feature in production. - -You minimally need to take the following [update](/components/console/manage-clusters/manage-cluster.md#update-a-cluster) path: - -- 8.0.x -> 8.2+gen27 -- 8.1.x -> 8.2+gen27 -- 8.2.x -> 8.2+gen27 -- 8.3.x -> 8.3+gen11 -- 8.4.x -> 8.4+gen7 -- 8.5.x -> 8.5+gen2 - -If you do not update the cluster by August 30th 2024, we will update the cluster for you. **Without an update, you would lose access to your cluster.** - -Camunda 8 Self-Managed clusters are not affected by this. - -### Support for Amazon OpenSearch for Optimize - -This release extends the OpenSearch features supported by Optimize. Full support is committed for the next release in January 2025. - -### Supported environment changes (OpenJDK, ElasticSearch, Amazon OpenSearch) - -Version changes are made to supported environments: +Important changes and updates for Camunda 8 releases that customers should be aware of, including deprecation & removal notices. -- OpenJDK minimum version raised to 21+ in Operate -- ElasticSearch minimum version raised to 8.13+ -- Amazon OpenSearch minimum version raised to 2.9+ +## Camunda 8.7 -To learn more about supported environments, see [supported environments](/reference/supported-environments.md). +Camunda 8.7 is scheduled for release on 11 February, 2024. -### Connectors +
    +
    -#### Deprecation: None start event element templates for Kafka, RabbitMQ, Amazon SQS, and Amazon SNS inbound Connectors +**[8.7 Announcements](/reference/announcements/870.md)** -The [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events) element templates for the out-of-the-box Kafka, RabbitMQ, Amazon SQS, and Amazon SNS inbound Connectors have been deprecated in Camunda Modeler. +
    +
    -Users can no longer select these templates when creating a new none start event element in Camunda Modeler. Existing none start event elements with these templates will continue to work as expected, but users are encouraged to migrate to the [message start event](/components/modeler/bpmn/message-events/message-events.md#message-start-events) element templates for these Connectors. +- [API updates](/reference/announcements/870.md#api-updates-saasself-managed) +- [Identity management updates](/reference/announcements/870.md#identity-management-updates-saasself-managed) +- [Installation and deployment updates](/reference/announcements/870.md#installation-and-deployment-updates-self-managed) +- [Camunda Java client and Camunda Spring SDK](/reference/announcements/870.md#camunda-java-client-and-camunda-spring-sdk-self-managed) -Message start event element templates are better suited for the message-based communication these Connectors provide, and offer more flexibility and features compared to the none start event element templates, such as the ability to define a message ID and a correlation key for idempotency. Read more in the [inbound Connectors documentation](/components/connectors/use-connectors/inbound.md) and the [messaging concepts documentation](/components/concepts/messages.md#message-uniqueness). +
    +
    -#### Breaking changes in the Connector SDK - -The `void correlate(Object variables)` method in the `InboundConnectorContext` interface has been removed, following the deprecation in 8.4.0. Use the `CorrelationResult correlateWithResult(Object variables)` method instead. - -The `CorrelationResult` record has been changed compared to the previous versions: - -- `CorrelationResult.Success` now contains a `ProcessElementContext` that represents the element that was correlated. Compared to the previous version, where the correlated element was returned directly, this change allows accessing element properties after correlation for user-controlled post-correlation actions. -- `CorrelationResult.Failure` now provides the `CorrelationFailureHandlingStrategy` that defines how the failure should be handled. - -An example of how to use the new `CorrelationResult` can be found in the [Connector SDK documentation](/components/connectors/custom-built-connectors/connector-sdk.md#inbound-connector-runtime-logic). - -### Flow control enabled by default in SaaS - -Flow control is now enabled by default in Camunda 8.6 SaaS. This change ensures the cluster is protected from excessive load and can maintain a stable state. - -These new configuration defaults are tailored to the cluster size and optimized for a stable performance. However, the cluster might reject requests if the load is too high with this change. The error message for this is `Failed to write client request to partition X, because the write limit is exhausted`. If the error persists, this may be a sign of underlining issues, or a need to adjust the cluster size. - -For more information on how to configure flow control for a Self-Managed cluster, visit the [flow control documentation](/self-managed/operational-guides/configure-flow-control/configure-flow-control.md). - -### Camunda 8 Self-Managed - -#### Helm chart - Separated Ingress deprecation - -The separated Ingress Helm configuration for Camunda 8 Self-Managed has been deprecated in 8.6, and will be removed from the Helm chart in 8.7. Only the combined Ingress configuration is officially supported. See the [Ingress guide](/self-managed/setup/guides/ingress-setup.md) for more information on configuring a combined Ingress setup. - -#### Helm chart - `global.multiregion.installationType` deprecation - -The `global.multiregion.installationType` option is used in failover and failback scenarios. This option in the Helm chart has been deprecated in 8.6, and will be removed from the Helm chart in 8.7. `global.multiregion.installationType` was replaced with a set of API endpoints called while following the ([dual-region operational procedure](/self-managed/operational-guides/multi-region/dual-region-ops.md)) - -#### Helm chart - Elasticsearch nodes number - -The default value of Elasticsearch deployment pods has changed from 2 to 3, and an affinity setting has been added to avoid scheduling Elasticsearch pods on the same Kubernetes worker. - -### Camunda Optimize artifact and Docker tag separation - -Starting with Camunda 8.6, the Camunda Optimize artifact has been split into two distinct versions, and versioning between Camunda 7 and Camunda 8 is no longer interchangeable: - -- **Before Camunda 8.6**: Versions like `8.x` and `3.x` (used for Camunda 7) could sometimes be used interchangeably. -- **From Camunda 8.6 onwards**: `8.6 != 3.14`. Each version corresponds strictly to its platform: - - **Camunda 7**: Uses the `3.x` versioning scheme and the `latest` Docker tag. - - **Camunda 8**: Uses the `8.x` versioning scheme and the `8-latest` Docker tag. - -#### Action required: - -- **Camunda 7 Users**: Continue using `3.x` versions and the `latest` Docker tag. -- **Camunda 8 Users**: If you haven't already done so, update your configurations to use `8.x` versions and the `8-latest` Docker tag. - -Make sure to update your Docker configurations accordingly to ensure compatibility. - -### New base path for Operate and Tasklist web applications +## Camunda 8.6 -We are introducing a new base path for both the Operate and Tasklist **web applications**. This change applies to both Self-Managed and SaaS environments. +Camunda 8.6 was released on 8 October, 2024. -#### For Self-Managed +
    +
    -- The new base path for Operate is `/operate`, and for Tasklist, it is `/tasklist`. -- For a [Separated Ingress](/self-managed/setup/guides/ingress-setup.md?ingress=separated) configuration: - - for Operate, the full URL will be `{operate-host}/operate`. Any calls to `{operate-host}` will automatically be redirected to `{operate-host}/operate` - - for Tasklist, the full URL will be `{tasklist-host}/tasklist`. Any calls to `{tasklist-host}` will automatically be redirected to `{tasklist-host}/tasklist`. -- For a [Combined Ingress](/self-managed/setup/guides/ingress-setup.md?ingress=combined) configuration: - - for Operate, the full URL will be `{common-host}/{operate-contextPath}/operate`. Any calls to `{common-host}/{operate-contextPath}` will be automatically redirected to `{common-host}/{operate-contextPath}/operate`. - - for Tasklist, the full URL will be `{common-host}/{tasklist-contextPath}/tasklist`. Any calls to `{common-host}/{tasklist-contextPath}` will be automatically redirected to `{common-host}/{tasklist-contextPath}/tasklist`. +**[8.6 Announcements](/reference/announcements/860.md)** -#### For SaaS +
    +
    -- The full URL for Operate is now structured as `https://{region}.operate.camunda.io/{clusterId}/operate`. -- The full URL for Tasklist is now structured as `https://{region}.tasklist.camunda.io/{clusterId}/tasklist`. -- Any calls to `https://{region}.operate.camunda.io/{clusterId}` will be redirected to `https://{region}.operate.camunda.io/{clusterId}/operate`. -- Any calls to `https://{region}.tasklist.camunda.io/{clusterId}` will be redirected to `https://{region}.tasklist.camunda.io/{clusterId}/tasklist`. +- [License key changes](/reference/announcements/860.md#license-key-changes) +- [Zeebe Java client](/reference/announcements/860.md#zeebe-java-client) +- [Deprecation: Zeebe Go client & CLI client (zbctl)](/reference/announcements/860.md#deprecation-zeebe-go-client--cli-client-zbctl) +- [Camunda 8 SaaS - Required cluster update](/reference/announcements/860.md#camunda-8-saas---required-cluster-update) +- [Support for Amazon OpenSearch for Optimize](/reference/announcements/860.md#support-for-amazon-opensearch-for-optimize) +- [Supported environment changes](/reference/announcements/860.md#supported-environment-changes-openjdk-elasticsearch-amazon-opensearch) +- [Connectors](/reference/announcements/860.md#connectors) +- [Flow control enabled by default in SaaS](/reference/announcements/860.md#flow-control-enabled-by-default-in-saas) +- [Camunda 8 Self-Managed](/reference/announcements/860.md#camunda-8-self-managed) +- [Camunda Optimize artifact and Docker tag separation](/reference/announcements/860.md#camunda-optimize-artifact-and-docker-tag-separation) +- [New base path for Operate and Tasklist web applications](/reference/announcements/860.md#new-base-path-for-operate-and-tasklist-web-applications) -:::note -**API URLs** for both Operate and Tasklist remain **unchanged**. -::: +
    +
    ## Camunda 8.5 -Release date: 9th of April 2024 - -End of maintenance: 14th of October 2025 - -### Updated SaaS URLs - -We will simplify the URL for Camunda 8 SaaS from cloud.camunda.io ([console.cloud.camunda.io](https://console.cloud.camunda.io/)) to camunda.io ([console.camunda.io](http://console.camunda.io/)). - -On or around July 9th, users will be directed to the new URLs. Both URLs will continue to be active for at least 18 months so navigation from supported versions of components like Operate is still possible. - -Internal allowlisting or active rules for [cloud.camunda.io](http://cloud.camunda.io/) must be transitioned to the new [camunda.io](http://camunda.io/) URL. This change primarily affects Console and Modeler. During sign up, users will be briefly redirected through [accounts.cloud.camunda.io](http://accounts.camunda.io/), which will also be updated. - -### Syntax changes in Helm chart - -A Camunda Helm chart upgrade is not possible from v9.x.x to v10.0.0 or v10.0.1. Instead, upgrade directly to v10.0.2+. +Camunda 8.5 was released on 9 April, 2024. -The Camunda Helm chart v10.0.0 has major changes in the values file structure. Some keys in the values file have been changed. For compatibility, the keys are deprecated in the Camunda release cycle 8.5 and will be removed in the Camunda 8.6 release (October 2024). +
    +
    -Follow the [upgrade instructions](/self-managed/setup/upgrade.md#helm-chart-1002+) to upgrade from Camunda Helm chart v9.x.x to Camunda Helm chart v10.x.x. +**[8.5 Announcements](/reference/announcements/850.md#camunda-85)** -### Support for Amazon OpenSearch +
    +
    -With the 8.5 release, Optimize is now also compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5+. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is not supported. +- [Updated SaaS URLs](/reference/announcements/850.md#updated-saas-urls) +- [Syntax changes in Helm chart](/reference/announcements/850.md#syntax-changes-in-helm-chart) +- [Support for Amazon OpenSearch](/reference/announcements/850.md#support-for-amazon-opensearch) +- [Known limitations](/reference/announcements/850.md#known-limitations) +- [Changes in supported environments](/reference/announcements/850.md#changes-in-supported-environments) +- [New generation naming scheme](/reference/announcements/850.md#camunda-saas-new-generation-naming-scheme) +- [Removal of Web Modeler's beta API](/reference/announcements/850.md#removal-of-web-modelers-beta-api) +- [Serialization of timestamp values in management API](/reference/announcements/850.md#zeebe-850-breaks-serialization-of-timestamp-values-in-management-api-self-managed-only) -### Known limitations - -This release contains the following limitations: - -- In **Optimize `8.5.0`** - - **Limitation** - - **Description:** OpenSearch support in Optimize is limited to data import and the raw data report. - - **Reference:** n/a - - **Mitigation:** Optimize can be installed and used in production with limited reporting functionality. Optimize imports all process data generated by Zeebe. All reporting functionality as described in the docs will be delivered with upcoming patches. -- In **Console `8.5.x`** - - **Limitation** - - **Description:** Custom OIDC provider support for Console is not supported - - **Reference:** https://github.com/camunda/issues/issues/784 - -### Changes in supported environments - -- Raised minimum Go version to 1.21 for the Zeebe Go client - -### Camunda SaaS: New generation naming scheme - -With the April release, the generation naming scheme in Camunda 8 changed and no longer includes the patch version. - -The new naming scheme used for all Camunda SaaS generations created after April 2024 is `Camunda .+gen`, where `N` is incremented with every atomic change to the component version set. Existing generations will not be renamed. - -For patch releases to existing generations, `N` is set to the latest patch level plus 1. For example, when `Camunda 8.4.5` is the current generation name, the following patch will be released as `Camunda 8.4+gen6`. - -This was done to decouple the generation name from the particular patch level of the components it contains, as some component versions like Connectors are decoupled from other components. - -You will learn about the particular component patch version changes in the update dialogue to the latest generation available. The following screenshot shows a sample update from `Camunda 8.5+gen1` to `Camunda 8.5+gen2`, where only the Connectors patch version changed. - -![New Generating naming sample showing an update dialogue from 8.5+gen1 to 8.5+gen2](img/generation-naming-scheme-sample.png) - -Note that the actual values shown in this screenshot don't correspond to any actual generations and only serve as an example. - -### Removal of Web Modeler's beta API - -The Web Modeler beta API has been removed. The API was deprecated in 8.3 and is no longer available in 8.5. Use the [Web Modeler v1 API](/apis-tools/web-modeler-api/index.md) instead. -For a migration guide, see the [Web Modeler API documentation](/apis-tools/web-modeler-api/index.md#migrating-from-beta-to-v1). - -### Zeebe 8.5.0 breaks serialization of timestamp values in management API (Self-Managed only) - -Zeebe 8.5.0 was released with [a new bug](https://github.com/camunda/camunda/issues/17347) that breaks serialization of timestamp values in management APIs, such as [backup](/self-managed/operational-guides/backup-restore/backup-and-restore.md) and [cluster scaling](/self-managed/zeebe-deployment/operations/cluster-scaling.md). -Timestamps which were previously serialized as `ISO8061` strings are now serialized as integer values. - -Until a fix is delivered in 8.5.1, workarounds include not deserializing timestamp values from affected APIs, or deserializing them as integers. +
    +
    ## Camunda 8.4 -Release date: 9th of January 2024 - -End of maintenance: 9th of July 2025 - -:::caution -The [form linking](/components/modeler/web-modeler/advanced-modeling/form-linking.md#using-the-link-button) feature is impacted by an [issue](https://github.com/camunda/camunda/issues/16311) where the wrong forms can get linked with new user task instances, effectively corrupting the user task instance. If you make use of this feature and run either `8.4.0`, `8.4.1` or `8.4.2`, we urge you to update to the newest `8.4.3` patch that includes the required fix. - -Follow the instructions in the [form linking](/components/modeler/web-modeler/advanced-modeling/form-linking.md#known-issues-with-linked-forms) documentation to resolve this issue. -::: - -### Versioning changes in Helm chart - -As of the 8.4 release, the Camunda 8 **Helm chart** version is decoupled from the version of the application. The Helm chart release still follows the applications release cycle, but it has an independent version. (e.g., in the application release cycle 8.4, the chart version is 9.0.0). - -For more details about the applications version included in the Helm chart, review the [full version matrix](https://helm.camunda.io/camunda-platform/version-matrix/). - -### Dockerfile numeric ID - -The Dockerfile now uses a numeric user ID instead of a non-numeric user. -This will allow the Helm users to use `runAsNonRoot=true` without the need to explicitly set the ID in the Helm `values.yaml` file. - -### Deprecated in 8.4 - -The [Zeebe configuration properties for Camunda Identity](../self-managed/zeebe-deployment/configuration/gateway.md#zeebegatewayclustersecurityauthenticationidentity) -were deprecated in `8.4`. Please use the dedicated Camunda Identity properties or the [corresponding environment variables](../self-managed/identity/deployment/configuration-variables.md#core-configuration). - -### Versioning changes in Elasticsearch - -As of the 8.4 release, Camunda is compatible with Elasticsearch 8.9+ and no longer supports older Elasticsearch versions. See [supported environments](/reference/supported-environments.md). - -### Support for Amazon OpenSearch +Camunda 8.4 was released on 9 January, 2024. -As of the 8.4 release, Zeebe, Operate, and Tasklist are now compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. +
    +
    -:::info -The Helm charts are not yet prepared with the OpenSearch configurations as templates/pre-filled. The Helm charts can still be used to install for OpenSearch, but some adjustments are needed beforehand. Refer to the [Helm deployment documentation](/self-managed/setup/install.md) for further details. -::: +**[8.4 Announcements](/reference/announcements/850.md#camunda-84)** -### Known limitations +
    +
    -This release contains the following limitations: +- [Versioning changes in Helm chart](/reference/announcements/850.md#versioning-changes-in-helm-chart) +- [Dockerfile numeric ID](/reference/announcements/850.md#dockerfile-numeric-id) +- [Deprecated in 8.4](/reference/announcements/850.md#deprecated-in-84) +- [Versioning changes in Elasticsearch](/reference/announcements/850.md#versioning-changes-in-elasticsearch) +- [Support for Amazon OpenSearch](/reference/announcements/850.md#support-for-amazon-opensearch-1) +- [Known limitations](/reference/announcements/850.md#known-limitations-1) -- In **Operate `8.4.0`** - - **Bug** - - **Description:** Instance migration always points to the latest process version - - **Reference:** https://github.com/camunda/issues/issues/567 - - **Mitigation:** Bug is planned to be fixed with upcoming `8.4.1` release - - **Bug** - - **Description:** Backwards migration over multiple versions does not work - - **Reference:** https://github.com/camunda/issues/issues/568 - - **Mitigation:** Bug is planned to be fixed with upcoming `8.4.1` release -- In **Camunda HELM `9.0.x`** - - **Limitation** - - **Description:** The existing Helm charts use the Elasticsearch configurations by default and are not yet prepared with the OpenSearch configurations as templates/pre-filled. The Helm charts can still be used to install for OpenSearch, but some adjustments are needed beforehand. - - **Reference:** n/a - - **Mitigation:** - 1. Refer to our [docs for the installation](/self-managed/setup/install.md#components-installed-by-the-helm-charts), the docs include guidance about necessary adjustments of the Helm chart configuration. - 2. The OpenSearch configuration in Helm charts will be provided in one of our future Helm releases. -- In **Connectors `8.4.x`** - - **Missing feature** - - **Description:** Custom OIDC provider support for Connectors is missing - - **Reference:** https://github.com/camunda/issues/issues/569 - - **Mitigation:** - 1. Feature is planned to be delivered with an upcoming patch release. Please see [issue](https://github.com/camunda/issues/issues/569) for latest progress. - 2. [Disable Connectors component](/self-managed/setup/guides/connect-to-an-oidc-provider.md#configuration) when configuring a custom OIDC provider. +
    +
    ## Camunda 8.3 -Release date: 10th of October 2023 - -End of maintenance: 9th of April 2025 - -:::caution -For existing clusters we recommend updating to `8.3.1` directly and not `8.3.0` due to issues in data migration of Operate, Tasklist, and Optimize that could prolong the migration or even blocking it from finishing. -::: - -:::caution Breaking change - -### Zeebe Docker image now runs with unprivileged user by default - -The default user in the Zeebe Docker image changed from root to an unprivileged user with the UID 1000. This was done to provide stronger compliance with the [OWASP recommendations on Docker Security](https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-2-set-a-user). - -Please refer to the [Update 8.2 to 8.3](/self-managed/operational-guides/update-guide/820-to-830.md) guide. -::: - -:::info -The update from `8.2.x` to `8.3.x` performs a migration for nearly all entities stored in Operate, Tasklist, and Optimize to support [multi-tenancy](/self-managed/concepts/multi-tenancy.md). Therefore, migration may take longer. -::: - -### Deprecated in 8.3 - -[Web Modeler's beta API](/apis-tools/web-modeler-api/index.md) was deprecated in 8.3 and will be removed in 8.5. -Use `v1` instead, see [migration hints](/apis-tools/web-modeler-api/index.md#migrating-from-beta-to-v1). - -### Versioning changes in Elasticsearch - -As of the 8.3 release, Camunda is compatible with Elasticsearch 8.8+ and no longer supports Elasticsearch 7.x. See [supported environments](/reference/supported-environments.md). - -### Versioning changes in Helm chart - -[Helm charts versioning](/self-managed/setup/overview.md) changed in July 2023. - -Starting from July 2023 (v8.2.8), the Camunda 8 **Helm chart** version follows the same unified schema -and schedule as [Camunda 8 applications](https://github.com/camunda/camunda-platform). - -Before this change, the Camunda 8 **Helm chart** version only followed the minor version. - -## Camunda 8.2 - -Release date: 11th of April 2023 - -End of maintenance: 8th of October 2024 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.2.0) -[Release blog](https://camunda.com/blog/2023/04/camunda-platform-8-2-key-to-scaling-automation/) - -### Update from Web Modeler 8.2 to a later minor version - -Web Modeler versions 8.2.7 to 8.2.12 are affected by [camunda/issues#677](https://github.com/camunda/issues/issues/677). - -If you are using one of these versions, you should first update to Web Modeler 8.2.13 (or a subsequent patch version) before upgrading to a later minor version (8.3 or higher). - -If your current version of Web Modeler is 8.2.6 or earlier, you may directly upgrade to a later minor version. - -### Do not update to Camunda 8.2.22 - -:::caution -Zeebe release `8.2.22` suffers from [camunda/zeebe#16406](https://github.com/camunda/camunda/issues/16406), which results in a Zeebe broker being unable to start if at least one DMN model is deployed. We urge users to skip this release and update to `8.2.23` right away. -::: - -### Do not update from Camunda 8.1.X to 8.2.6 - -An issue in the Operate 8.2.6 patch was discovered after it was published on June 8th. - -You should not update directly from 8.1.x to 8.2.6 (it will require manual intervention as indices break), you either first update to 8.2.5 then 8.2.6 or straight from 8.1.x to 8.2.7. - -To prevent this entirely we removed the Operate 8.2.6 artifacts from this release. - -As Camunda 8.2.7 was already released on Tuesday Jun 13th, you can just update to 8.2.7 directly, skipping 8.2.6. +Camunda 8.3 was released on 10 October, 2023. -### OpenSearch 1.3.x support +
    +
    -- Operate version 8.2+ support OpenSearch 1.3.x. However, 8.2.x patches will only be released on the OS 1.3 branch until end of 2023 given that OS 1.3 maintenance period ends by then. We recommend customers to go to 8.4.x which supports OS 2.5+. +**[8.3 Announcements](/reference/announcements/850.md#camunda-83)** -### Optimize and Helm chart compatibility +
    +
    -For Optimize 3.10.1, a new environment variable introduced redirection URL. However, the change is not compatible with Camunda Helm charts until it is fixed in 3.10.3 (and Helm chart 8.2.9). Therefore, those versions are coupled to certain Camunda Helm chart versions: +- [Deprecated in 8.3](/reference/announcements/850.md#deprecated-in-83) +- [Versioning changes in Elasticsearch](/reference/announcements/850.md#versioning-changes-in-elasticsearch-1) +- [Versioning changes in Helm chart](/reference/announcements/850.md#versioning-changes-in-helm-chart-1) -| Optimize version | Camunda Helm chart version | -| --------------------------------- | -------------------------- | -| Optimize 3.10.1 & Optimize 3.10.2 | 8.2.0 - 8.2.8 | -| Optimize 3.10.3+ | 8.2.9 - 8.2.22 | -| Optimize 8.2.7+ | 8.2.23+ | +
    +
    diff --git a/docs/reference/announcements/850.md b/docs/reference/announcements/850.md new file mode 100644 index 00000000000..4f8342a2141 --- /dev/null +++ b/docs/reference/announcements/850.md @@ -0,0 +1,188 @@ +--- +id: announcements-850 +title: "8.5 - 8.3 Announcements" +description: "Important announcements including deprecation & removal notices for the Camunda 8.5, 8.4, 8.3, and 8.2 releases." +--- + +Important changes and updates for the Camunda 8.5, 8.4, and 8.3 releases are summarized below. + +## Camunda 8.5 + +| Release date | End of maintenance | Release notes | +| :----------- | :----------------- | :--------------------------------------------------- | +| 9 April 2024 | 14 October 2025 | [8.5 release notes](/reference/release-notes/850.md) | + +### Updated SaaS URLs + +We will simplify the URL for Camunda 8 SaaS from cloud.camunda.io ([console.cloud.camunda.io](https://console.cloud.camunda.io/)) to camunda.io ([console.camunda.io](http://console.camunda.io/)). + +On or around July 9th, users will be directed to the new URLs. Both URLs will continue to be active for at least 18 months so navigation from supported versions of components like Operate is still possible. + +Internal allowlisting or active rules for [cloud.camunda.io](http://cloud.camunda.io/) must be transitioned to the new [camunda.io](http://camunda.io/) URL. This change primarily affects Console and Modeler. During sign up, users will be briefly redirected through [accounts.cloud.camunda.io](http://accounts.camunda.io/), which will also be updated. + +### Syntax changes in Helm chart + +A Camunda Helm chart upgrade is not possible from v9.x.x to v10.0.0 or v10.0.1. Instead, upgrade directly to v10.0.2+. + +The Camunda Helm chart v10.0.0 has major changes in the values file structure. Some keys in the values file have been changed. For compatibility, the keys are deprecated in the Camunda release cycle 8.5 and will be removed in the Camunda 8.6 release (October 2024). + +Follow the [upgrade instructions](/self-managed/setup/upgrade.md#helm-chart-1002+) to upgrade from Camunda Helm chart v9.x.x to Camunda Helm chart v10.x.x. + +### Support for Amazon OpenSearch + +With the 8.5 release, Optimize is now also compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5+. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is not supported. + +### Known limitations + +This release contains the following limitations: + +- In **Optimize `8.5.0`** + - **Limitation** + - **Description:** OpenSearch support in Optimize is limited to data import and the raw data report. + - **Reference:** n/a + - **Mitigation:** Optimize can be installed and used in production with limited reporting functionality. Optimize imports all process data generated by Zeebe. All reporting functionality as described in the docs will be delivered with upcoming patches. +- In **Console `8.5.x`** + - **Limitation** + - **Description:** Custom OIDC provider support for Console is not supported + - **Reference:** https://github.com/camunda/issues/issues/784 + +### Changes in supported environments + +- Raised minimum Go version to 1.21 for the Zeebe Go client + +### Camunda SaaS: New generation naming scheme + +With the April release, the generation naming scheme in Camunda 8 changed and no longer includes the patch version. + +The new naming scheme used for all Camunda SaaS generations created after April 2024 is `Camunda .+gen`, where `N` is incremented with every atomic change to the component version set. Existing generations will not be renamed. + +For patch releases to existing generations, `N` is set to the latest patch level plus 1. For example, when `Camunda 8.4.5` is the current generation name, the following patch will be released as `Camunda 8.4+gen6`. + +This was done to decouple the generation name from the particular patch level of the components it contains, as some component versions like Connectors are decoupled from other components. + +You will learn about the particular component patch version changes in the update dialogue to the latest generation available. The following screenshot shows a sample update from `Camunda 8.5+gen1` to `Camunda 8.5+gen2`, where only the Connectors patch version changed. + +![New Generating naming sample showing an update dialogue from 8.5+gen1 to 8.5+gen2](../img/generation-naming-scheme-sample.png) + +Note that the actual values shown in this screenshot don't correspond to any actual generations and only serve as an example. + +### Removal of Web Modeler's beta API + +The Web Modeler beta API has been removed. The API was deprecated in 8.3 and is no longer available in 8.5. Use the [Web Modeler v1 API](/apis-tools/web-modeler-api/index.md) instead. +For a migration guide, see the [Web Modeler API documentation](/apis-tools/web-modeler-api/index.md#migrating-from-beta-to-v1). + +### Zeebe 8.5.0 breaks serialization of timestamp values in management API (Self-Managed only) + +Zeebe 8.5.0 was released with [a new bug](https://github.com/camunda/camunda/issues/17347) that breaks serialization of timestamp values in management APIs, such as [backup](/self-managed/operational-guides/backup-restore/backup-and-restore.md) and [cluster scaling](/self-managed/zeebe-deployment/operations/cluster-scaling.md). +Timestamps which were previously serialized as `ISO8061` strings are now serialized as integer values. + +Until a fix is delivered in 8.5.1, workarounds include not deserializing timestamp values from affected APIs, or deserializing them as integers. + +## Camunda 8.4 + +| Release date | End of maintenance | +| :------------- | :----------------- | +| 9 January 2024 | 9 July 2025 | + +:::caution +The [form linking](/components/modeler/web-modeler/advanced-modeling/form-linking.md#using-the-link-button) feature is impacted by an [issue](https://github.com/camunda/camunda/issues/16311) where the wrong forms can get linked with new user task instances, effectively corrupting the user task instance. If you make use of this feature and run either `8.4.0`, `8.4.1` or `8.4.2`, we urge you to update to the newest `8.4.3` patch that includes the required fix. + +Follow the instructions in the [form linking](/components/modeler/web-modeler/advanced-modeling/form-linking.md#known-issues-with-linked-forms) documentation to resolve this issue. +::: + +### Versioning changes in Helm chart + +As of the 8.4 release, the Camunda 8 **Helm chart** version is decoupled from the version of the application. The Helm chart release still follows the applications release cycle, but it has an independent version. (e.g., in the application release cycle 8.4, the chart version is 9.0.0). + +For more details about the applications version included in the Helm chart, review the [full version matrix](https://helm.camunda.io/camunda-platform/version-matrix/). + +### Dockerfile numeric ID + +The Dockerfile now uses a numeric user ID instead of a non-numeric user. +This will allow the Helm users to use `runAsNonRoot=true` without the need to explicitly set the ID in the Helm `values.yaml` file. + +### Deprecated in 8.4 + +The [Zeebe configuration properties for Camunda Identity](/self-managed/zeebe-deployment/configuration/gateway.md#zeebegatewayclustersecurityauthenticationidentity) +were deprecated in `8.4`. Please use the dedicated Camunda Identity properties or the [corresponding environment variables](/self-managed/identity/deployment/configuration-variables.md#core-configuration). + +### Versioning changes in Elasticsearch + +As of the 8.4 release, Camunda is compatible with Elasticsearch 8.9+ and no longer supports older Elasticsearch versions. See [supported environments](/reference/supported-environments.md). + +### Support for Amazon OpenSearch + +As of the 8.4 release, Zeebe, Operate, and Tasklist are now compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. + +:::info +The Helm charts are not yet prepared with the OpenSearch configurations as templates/pre-filled. The Helm charts can still be used to install for OpenSearch, but some adjustments are needed beforehand. Refer to the [Helm deployment documentation](/self-managed/setup/install.md) for further details. +::: + +### Known limitations + +This release contains the following limitations: + +- In **Operate `8.4.0`** + - **Bug** + - **Description:** Instance migration always points to the latest process version + - **Reference:** https://github.com/camunda/issues/issues/567 + - **Mitigation:** Bug is planned to be fixed with upcoming `8.4.1` release + - **Bug** + - **Description:** Backwards migration over multiple versions does not work + - **Reference:** https://github.com/camunda/issues/issues/568 + - **Mitigation:** Bug is planned to be fixed with upcoming `8.4.1` release +- In **Camunda HELM `9.0.x`** + - **Limitation** + - **Description:** The existing Helm charts use the Elasticsearch configurations by default and are not yet prepared with the OpenSearch configurations as templates/pre-filled. The Helm charts can still be used to install for OpenSearch, but some adjustments are needed beforehand. + - **Reference:** n/a + - **Mitigation:** + 1. Refer to our [docs for the installation](/self-managed/setup/install.md#components-installed-by-the-helm-charts), the docs include guidance about necessary adjustments of the Helm chart configuration. + 2. The OpenSearch configuration in Helm charts will be provided in one of our future Helm releases. +- In **Connectors `8.4.x`** + - **Missing feature** + - **Description:** Custom OIDC provider support for Connectors is missing + - **Reference:** https://github.com/camunda/issues/issues/569 + - **Mitigation:** + 1. Feature is planned to be delivered with an upcoming patch release. Please see [issue](https://github.com/camunda/issues/issues/569) for latest progress. + 2. [Disable Connectors component](/self-managed/setup/guides/connect-to-an-oidc-provider.md#configuration) when configuring a custom OIDC provider. + +## Camunda 8.3 + +| Release date | End of maintenance | +| :-------------- | :----------------- | +| 10 October 2023 | 9 April 2025 | + +:::caution +For existing clusters we recommend updating to `8.3.1` directly and not `8.3.0` due to issues in data migration of Operate, Tasklist, and Optimize that could prolong the migration or even blocking it from finishing. +::: + +:::caution Breaking change + +### Zeebe Docker image now runs with unprivileged user by default + +The default user in the Zeebe Docker image changed from root to an unprivileged user with the UID 1000. This was done to provide stronger compliance with the [OWASP recommendations on Docker Security](https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-2-set-a-user). + +Please refer to the [Update 8.2 to 8.3](/self-managed/operational-guides/update-guide/820-to-830.md) guide. +::: + +:::info +The update from `8.2.x` to `8.3.x` performs a migration for nearly all entities stored in Operate, Tasklist, and Optimize to support [multi-tenancy](/self-managed/concepts/multi-tenancy.md). Therefore, migration may take longer. +::: + +### Deprecated in 8.3 + +[Web Modeler's beta API](/apis-tools/web-modeler-api/index.md) was deprecated in 8.3 and will be removed in 8.5. +Use `v1` instead, see [migration hints](/apis-tools/web-modeler-api/index.md#migrating-from-beta-to-v1). + +### Versioning changes in Elasticsearch + +As of the 8.3 release, Camunda is compatible with Elasticsearch 8.8+ and no longer supports Elasticsearch 7.x. See [supported environments](/reference/supported-environments.md). + +### Versioning changes in Helm chart + +[Helm charts versioning](/self-managed/setup/overview.md) changed in July 2023. + +Starting from July 2023 (v8.2.8), the Camunda 8 **Helm chart** version follows the same unified schema +and schedule as [Camunda 8 applications](https://github.com/camunda/camunda-platform). + +Before this change, the Camunda 8 **Helm chart** version only followed the minor version. diff --git a/docs/reference/announcements/860.md b/docs/reference/announcements/860.md new file mode 100644 index 00000000000..9885351db04 --- /dev/null +++ b/docs/reference/announcements/860.md @@ -0,0 +1,182 @@ +--- +id: announcements-860 +title: "8.6 Announcements" +description: "Important changes and updates for the Camunda 8.6 release including deprecation & removal notices." +--- + +Important changes and updates for the Camunda 8.6 release are summarized below. + +| Release date | End of maintenance | Release notes | +| :------------- | :----------------- | :--------------------------------------------------- | +| 8 October 2024 | 14 April 2026 | [8.6 release notes](/reference/release-notes/860.md) | + +## License key changes + +With the 8.6 release, Camunda 8 Self-Managed requires a license key for production usage. For additional details, review the [blog post on licensing updates for Camunda 8 Self-Managed](https://camunda.com/blog/2024/04/licensing-update-camunda-8-self-managed/). + +Review the following documentation for your components for more information on how to provide the license key to each component as an environment variable: + +- [Console](/self-managed/console-deployment/configuration/configuration.md#environment-variables) +- [Zeebe](/self-managed/zeebe-deployment/configuration/configuration.md#licensing) +- [Operate](/self-managed/operate-deployment/operate-configuration.md#licensing) +- [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#licensing) +- [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration-platform-8#licensing) +- [Identity](/self-managed/identity/deployment/configuration-variables.md#license-configuration) +- [Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#licensing) + +To configure with Helm, visit the [Self Managed installation documentation](/self-managed/setup/install.md). + +:::note +Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on startup or functionality, with the exception that Web Modeler has a limitation of five users. To obtain a license, visit the [Camunda Enterprise page](https://camunda.com/platform/camunda-platform-enterprise-contact/). +::: + +## Zeebe Java client + +Starting with 8.7, the Zeebe Java client will become the new Camunda Java client. This transition brings a new Java client structure designed to enhance the user experience and introduce new features while maintaining compatibility with existing codebases. + +The primary goal of those changes is to enable users to interact with Camunda clusters with one consolidated client rather than multiple. The `CamundaClient` will replace the `ZeebeClient`, offering the same functionality and adding new capabilities. If you need to continue using the old `ZeebeClient`, you can use the version 8.6 artifact without any issues with newer cluster versions as the client is forward-compatible. + +:::note +The Zeebe Java client will not be developed further and will only receive bug fixes for as long as version 8.6 is officially supported. +::: + +### Key changes + +- **New package structure**: + - Package `io.camunda.client`: This package contains the new `CamundaClient` and all the features slated for release in version 8.7. +- **Properties and environment variables refactoring**: + - All old Java client property names will be refactored to more general ones. For instance, `zeebe.client.tenantId` will become `camunda.client.tenantId`. + - Similarly, environment variables will be renamed following the same concept: `ZEEBE_REST_ADDRESS` will become `CAMUNDA_REST_ADDRESS`. +- **Artifact ID change**: + - The `artifactId` will change from `zeebe-client-java` to `camunda-client-java`. + +## Deprecation: Zeebe Go client & CLI client (zbctl) + +The Zeebe Go Client and CLI client (zbctl) will be [officially deprecated](https://camunda.com/blog/2024/09/deprecating-zbctl-and-go-clients/) with the 8.6 release as part of our efforts to streamline the Camunda 8 API experience. This client and CLI utility will not be released starting with Camunda 8.6, will no longer receive new features, and will be transitioned to a community-maintained status. + +The documentation of the Zeebe Go Client and CLI client (zbctl) moved to the [community clients section](/apis-tools/community-clients/index.md). + +## Camunda 8 SaaS - Required cluster update + +:::caution +By **August 30th, 2024** all automation clusters in Camunda 8 SaaS must be [updated](/components/console/manage-clusters/manage-cluster.md#update-a-cluster) to the following versions at a **minimum**: + +- **8.2+gen27** +- **8.3+gen11** +- **8.4+gen7** +- **8.5+gen2** + +::: + +auth0 announced an End-Of-Life for one of the functionalities that is being utilized by previous automation clusters. The new versions are not using this functionality anymore. This update ensures your cluster will work seamlessly after auth0 deactivates the feature in production. + +You minimally need to take the following [update](/components/console/manage-clusters/manage-cluster.md#update-a-cluster) path: + +- 8.0.x -> 8.2+gen27 +- 8.1.x -> 8.2+gen27 +- 8.2.x -> 8.2+gen27 +- 8.3.x -> 8.3+gen11 +- 8.4.x -> 8.4+gen7 +- 8.5.x -> 8.5+gen2 + +If you do not update the cluster by August 30th 2024, we will update the cluster for you. **Without an update, you would lose access to your cluster.** + +Camunda 8 Self-Managed clusters are not affected by this. + +## Support for Amazon OpenSearch for Optimize + +This release extends the OpenSearch features supported by Optimize. Full support is committed for the next release in January 2025. + +## Supported environment changes (OpenJDK, ElasticSearch, Amazon OpenSearch) + +Version changes are made to supported environments: + +- OpenJDK minimum version raised to 21+ in Operate +- ElasticSearch minimum version raised to 8.13+ +- Amazon OpenSearch minimum version raised to 2.9+ + +To learn more about supported environments, see [supported environments](/reference/supported-environments.md). + +## Connectors + +### Deprecation: None start event element templates for Kafka, RabbitMQ, Amazon SQS, and Amazon SNS inbound Connectors + +The [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events) element templates for the out-of-the-box Kafka, RabbitMQ, Amazon SQS, and Amazon SNS inbound Connectors have been deprecated in Camunda Modeler. + +Users can no longer select these templates when creating a new none start event element in Camunda Modeler. Existing none start event elements with these templates will continue to work as expected, but users are encouraged to migrate to the [message start event](/components/modeler/bpmn/message-events/message-events.md#message-start-events) element templates for these Connectors. + +Message start event element templates are better suited for the message-based communication these Connectors provide, and offer more flexibility and features compared to the none start event element templates, such as the ability to define a message ID and a correlation key for idempotency. Read more in the [inbound Connectors documentation](/components/connectors/use-connectors/inbound.md) and the [messaging concepts documentation](/components/concepts/messages.md#message-uniqueness). + +### Breaking changes in the Connector SDK + +The `void correlate(Object variables)` method in the `InboundConnectorContext` interface has been removed, following the deprecation in 8.4.0. Use the `CorrelationResult correlateWithResult(Object variables)` method instead. + +The `CorrelationResult` record has been changed compared to the previous versions: + +- `CorrelationResult.Success` now contains a `ProcessElementContext` that represents the element that was correlated. Compared to the previous version, where the correlated element was returned directly, this change allows accessing element properties after correlation for user-controlled post-correlation actions. +- `CorrelationResult.Failure` now provides the `CorrelationFailureHandlingStrategy` that defines how the failure should be handled. + +An example of how to use the new `CorrelationResult` can be found in the [Connector SDK documentation](/components/connectors/custom-built-connectors/connector-sdk.md#inbound-connector-runtime-logic). + +## Flow control enabled by default in SaaS + +Flow control is now enabled by default in Camunda 8.6 SaaS. This change ensures the cluster is protected from excessive load and can maintain a stable state. + +These new configuration defaults are tailored to the cluster size and optimized for a stable performance. However, the cluster might reject requests if the load is too high with this change. The error message for this is `Failed to write client request to partition X, because the write limit is exhausted`. If the error persists, this may be a sign of underlining issues, or a need to adjust the cluster size. + +For more information on how to configure flow control for a Self-Managed cluster, visit the [flow control documentation](/self-managed/operational-guides/configure-flow-control/configure-flow-control.md). + +## Camunda 8 Self-Managed + +### Helm chart - Separated Ingress deprecation + +The separated Ingress Helm configuration for Camunda 8 Self-Managed has been deprecated in 8.6, and will be removed from the Helm chart in 8.7. Only the combined Ingress configuration is officially supported. See the [Ingress guide](/self-managed/setup/guides/ingress-setup.md) for more information on configuring a combined Ingress setup. + +### Helm chart - `global.multiregion.installationType` deprecation + +The `global.multiregion.installationType` option is used in failover and failback scenarios. This option in the Helm chart has been deprecated in 8.6, and will be removed from the Helm chart in 8.7. `global.multiregion.installationType` was replaced with a set of API endpoints called while following the ([dual-region operational procedure](/self-managed/operational-guides/multi-region/dual-region-ops.md)) + +#### Helm chart - Elasticsearch nodes number + +The default value of Elasticsearch deployment pods has changed from 2 to 3, and an affinity setting has been added to avoid scheduling Elasticsearch pods on the same Kubernetes worker. + +## Camunda Optimize artifact and Docker tag separation + +Starting with Camunda 8.6, the Camunda Optimize artifact has been split into two distinct versions, and versioning between Camunda 7 and Camunda 8 is no longer interchangeable: + +- **Before Camunda 8.6**: Versions like `8.x` and `3.x` (used for Camunda 7) could sometimes be used interchangeably. +- **From Camunda 8.6 onwards**: `8.6 != 3.14`. Each version corresponds strictly to its platform: + - **Camunda 7**: Uses the `3.x` versioning scheme and the `latest` Docker tag. + - **Camunda 8**: Uses the `8.x` versioning scheme and the `8-latest` Docker tag. + +### Action required: + +- **Camunda 7 Users**: Continue using `3.x` versions and the `latest` Docker tag. +- **Camunda 8 Users**: If you haven't already done so, update your configurations to use `8.x` versions and the `8-latest` Docker tag. + +Make sure to update your Docker configurations accordingly to ensure compatibility. + +## New base path for Operate and Tasklist web applications + +We are introducing a new base path for both the Operate and Tasklist **web applications**. This change applies to both Self-Managed and SaaS environments. + +### For Self-Managed + +- The new base path for Operate is `/operate`, and for Tasklist, it is `/tasklist`. +- For a [Separated Ingress](/self-managed/setup/guides/ingress-setup.md?ingress=separated) configuration: + - for Operate, the full URL will be `{operate-host}/operate`. Any calls to `{operate-host}` will automatically be redirected to `{operate-host}/operate` + - for Tasklist, the full URL will be `{tasklist-host}/tasklist`. Any calls to `{tasklist-host}` will automatically be redirected to `{tasklist-host}/tasklist`. +- For a [Combined Ingress](/self-managed/setup/guides/ingress-setup.md?ingress=combined) configuration: + - for Operate, the full URL will be `{common-host}/{operate-contextPath}/operate`. Any calls to `{common-host}/{operate-contextPath}` will be automatically redirected to `{common-host}/{operate-contextPath}/operate`. + - for Tasklist, the full URL will be `{common-host}/{tasklist-contextPath}/tasklist`. Any calls to `{common-host}/{tasklist-contextPath}` will be automatically redirected to `{common-host}/{tasklist-contextPath}/tasklist`. + +### For SaaS + +- The full URL for Operate is now structured as `https://{region}.operate.camunda.io/{clusterId}/operate`. +- The full URL for Tasklist is now structured as `https://{region}.tasklist.camunda.io/{clusterId}/tasklist`. +- Any calls to `https://{region}.operate.camunda.io/{clusterId}` will be redirected to `https://{region}.operate.camunda.io/{clusterId}/operate`. +- Any calls to `https://{region}.tasklist.camunda.io/{clusterId}` will be redirected to `https://{region}.tasklist.camunda.io/{clusterId}/tasklist`. + +:::note +**API URLs** for both Operate and Tasklist remain **unchanged**. +::: diff --git a/docs/reference/announcements/870.md b/docs/reference/announcements/870.md new file mode 100644 index 00000000000..cf4c795311c --- /dev/null +++ b/docs/reference/announcements/870.md @@ -0,0 +1,197 @@ +--- +id: announcements-870 +title: "8.7 Announcements" +description: "Important changes and updates for the Camunda 8.7 release including deprecation & removal notices." +--- + +import DeployDiagramImg from '../img/deploy-diagram-modal.png'; + +Important changes and updates for the Camunda 8.7 release are summarized below. + +| Scheduled release date | Scheduled end of maintenance | Release notes | Blog | +| :--------------------- | :--------------------------- | :--------------------------------------------------- | :---------------------------------------------------------------------------------------------- | +| 11 February 2025 | 11 August 2026 | [8.7 release notes](/reference/release-notes/870.md) | [Announcing Camunda 8.7](https://camunda.com/blog/2024/11/camunda-8-7-releasing-february-2025/) | + +- [API updates](#api-updates-saasself-managed) +- [Identity management updates](#identity-management-updates-saasself-managed) +- [Installation and deployment updates](#installation-and-deployment-updates-self-managed) +- [Camunda Java client and Camunda Spring SDK](#camunda-java-client-and-camunda-spring-sdk-self-managed) + +## API updates SaaSSelf-Managed + +The 8.7 release includes API updates to support the move to a [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) unified experience. + +### Camunda 8 REST API updates + +- New Query endpoints (with advanced search filtering) will be added for process entities (processes, decisions, user tasks, and forms). These will replace the component APIs (Tasklist, Operate) going forward. +- New endpoints will allow you to manage and query users and resource permissions in an orchestration cluster. +- All the Camunda 8 REST API endpoints will support resource-based authorizations to enable fine-grained permissions. +- API terminology is aligned so technical assets have an identical, easily-understood, descriptive property name. + +### Deprecated: Operate and Tasklist v1 REST APIs + +The deprecation process for the [Operate](/apis-tools/operate-api/overview.md) and [Tasklist](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) REST APIs starts with the 8.7 release. You can begin migrating to the [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) for querying to prepare for this change. + +- Version 8.7, 8.8: These APIs are still available but deprecated, and so not recommended for new implementations. +- Version 8.9: These APIs will be removed. + +### Deprecated: Job-based User Tasks querying + +As `Job-worker` user tasks will be deprecated in Camunda 8.9, Camunda recommends you start using `Camunda User Tasks` (formerly known as `Zeebe User Task`) in your process definitions. + +- Version 8.7, 8.8: `Job-worker` user tasks are available for querying, but Camunda modelers automatically apply the `Camunda user task` and show a warning message for each job worker user task. +- Version 8.9: `Job-worker` user tasks will be deprecated. With Camunda 8.9 and later, customers can use the `Job-worker` implementation of user tasks as standard jobs with headers to enable open architecture and composable solutions. + +### Deprecated: Zeebe gRPC API endpoints + +With the 8.7 release, Camunda announces the deprecation of several [Zeebe gRPC](/apis-tools/zeebe-api/grpc.md) endpoints for removal in 8.9. + +- Key gRPC endpoints necessary for high-throughput and low-latency applications remain available with 8.7. +- The final list of retained gRPC endpoints will be confirmed with the 8.7 release. +- Selected endpoints will remain active, with others scheduled for removal in the 8.9 release. + +### Removed: Tasklist GraphQL API + +With the 8.7 release, the deprecated [Tasklist GraphQL API](/apis-tools/tasklist-api/tasklist-api-overview.md) will be removed from the product. + + + +## Identity management updates SaaSSelf-Managed + +The [Identity service](/self-managed/identity/what-is-identity.md) is enhanced to deliver greater flexibility, control, and security for both Self-Managed and SaaS users. These updates are part of our broader effort to streamline the platform’s architecture. + +### Cluster-level identity management + +Identity settings will be configured at the orchestration cluster level, allowing each cluster to have unique OIDC configurations. This cluster-specific setup empowers organizations to assign different identity providers (IdPs) across clusters, offering improved control over permissions and user group mappings, resulting in a more streamlined and efficient configuration experience. + +For SaaS customers, identity management in Camunda 8.7 remains consistent with Camunda 8.6, allowing the attachment of a single IdP per organization. However, cluster-level identity capabilities are provided for SaaS as well as Self-Managed. This means that user groups, roles, and access permissions can now be managed at the cluster level, giving SaaS customers the same granular access control as in Self-Managed environments. + +### Decoupling from Keycloak Self-Managed + +Built-in Keycloak integration in Self-Managed is removed, allowing customers to use any compatible IdP. + +- Keycloak remains fully supported as an external option. For cluster-level identity management it must be connected as an external OIDC provider moving forward. +- OpenID Connect (OIDC) remains the standard for seamless integration with chosen IdPs. + +### Resource-based permissions + +Resource-level permissions are introduced for process definitions and web applications. + +- Admin users retain full access, but regular users must be granted specific permissions to perform actions/view resources. +- For organizations that build custom front-ends and access Camunda via API, users with API permissions can still access process data through the V2 API. + + + +## Installation and deployment updates Self-Managed + +Camunda 8.7 introduces a streamlined architecture, consolidating core components such as Zeebe, Operate, and Tasklist into a single deployable unit. Enhanced deployment options are also included, such as new Kubernetes Helm guides, deployment reference architectures, and improved support for professional developers with Camunda 8 Run. + +You can download the alpha release of the unified package from the Camunda GitHub repository, either as an executable Java application (Camunda Orchestration Core) or a Docker image. + +:::caution breaking change: Deploy diagram modal + +The Web Modeler **Deploy diagram** modal has changed, and clusters must now be proactively configured to be able to deploy from Web Modeler. + +New 8.7 deploy diagram modal + +- In 8.6, you could still configure cluster details on the **Deploy diagram** modal when deploying. +- In 8.7, you can no longer configure cluster details on the **Deploy diagram** modal. You must [configure the cluster](/docs/self-managed/modeler/web-modeler/configuration/configuration.md#clusters) to be able to deploy from this modal. +- Note that you must also be assigned the `Zeebe` [Identity role](/docs/self-managed/identity/user-guide/roles/add-assign-role.md) to be able to deploy. + +::: + +### Helm charts + +If you are using the recommended Camunda 8 deployment option (Helm charts), the upgrade path from version 8.6 to 8.7 will be straightforward by chaninging the values file to the new syntax. Updated Helm charts will be provided to support the upgrade to the new streamlined architecture. + +New migration guides will also be provided to support you when migrating from a previous Camunda version. + +:::caution +Additional upgrade considerations are necessary for deployments that use custom scripts, such as Docker containers, manual installations, or custom-developed Kubernetes deployments. For these deployments, customers can either continue to deploy with their original 8.6 topology and upgrade each component independently, or adopt our Helm Chart approach for the upgrade, which allows for unifying the deployment into a single JAR or container executable. +::: + +#### Separated Ingress removed + +With Camunda 8.7, the Helm chart only supports combined Ingress setup, where all Camunda components run on the same Ingress object and hostname. Customers running on a separate Ingress must migrate to the combined Ingress setup, see [Ingress setup](/self-managed/setup/guides/ingress-setup.md). + +The following Helm chart values have been removed: + +```yaml +connectors.ingress +console.ingress +identity.ingress +operate.ingress +optimize.ingress +tasklist.ingress +webModeler.ingress +zeebeGateway.ingress +``` + +### Manual installation + +For organizations that do not use cloud-native platforms such as Kubernetes or container services, we will publish a reference architecture that provides guidance on implementing Camunda production clusters on VM-based systems, using Amazon Web Services (AWS) EC2 as an example. + +The architecture will include details on optimal instance sizing, network configurations, and security best practices, to ensure robust performance and reliability. + +### Camunda Exporter + +A new Camunda Exporter brings the importer and archiving logic of web components (Tasklist and Operate) closer to the distributed platform (Zeebe). The index schema is also being harmonized. + +#### Harmonized index schema + +Camunda is harmonizing our index structure and usage. + +- This removes unnecessary duplications over multiple indices due to the previous architecture. +- With this change, several Operate indices can and will be used by Tasklist. +- New indices have been created to integrate Identity into the system. + +![Harmonized indices schema](../img/harmonized-indices-schema.png) + +#### Camunda Exporter + +The exporter can consume Zeebe records (mostly events created by the engine), aggregate data, and store the related data into shared and harmonized indices. + +- Data is archived in the background, coupled to the exporter but without blocking the exporter's progress. +- Indices can be located in either ElasticSearch (ES) or Opensearch (OS). Our web components (Tasklist and Operate) then use the new harmonized indices to show data to the user. + +The following diagram shows a simplified version of this work. + +![Camunda Exporter diagram](../img/target-camunda-exporter.png) + +- For example, Tasklist and Operate Importers are still required for old data to be imported, but the Camunda exporter writes all new data into ES/OS. After old indices are drained, importers can be turned off. +- The archiver, which takes care of the archiving of completed process instances, will be moved into the Zeebe system as well, to reduce the installation complexity and provide a better scaling and replication factor (based on partitions). +- This helps achieve a streamlined architecture, and improves platform performance and stability (especially regarding ES/OS). +- A new separate component covers the migration, which will be part of the single application but can also deployed separately. It will adjust the previous Operate indices to make them more harmonized and usable by Tasklist. + + + +## Camunda Java client and Camunda Spring SDK Self-Managed + +With the Camunda 8.7 release, Camunda Java client and Camunda Spring SDK replace the Zeebe Java client and Zeebe Spring SDK. This allows you to use a single consolidated client to interact with Camunda clusters. + +The `CamundaClient` replaces the `ZeebeClient`, offering the same functionality and adding new capabilities. + +:::note + +- If you need to continue using the old `ZeebeClient`, you can use the version 8.6 artifact without any issues with newer cluster versions as the client is forward-compatible. +- The Zeebe Java client will not be developed further and only receives bug fixes while version 8.6 is officially supported. + +::: + +### Key changes + +| Change | Description | +| :---------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| New package structure | Package `io.camunda.client`: Contains the new `CamundaClient` and all 8.7 features. | +| Refactored properties and environment variables |

    • All old Java client property names are refactored to more general ones. For example, `zeebe.client.tenantId` to `camunda.client.tenantId`.

    • Similarly, environment variables are renamed following the same concept: `ZEEBE_REST_ADDRESS` to `CAMUNDA_REST_ADDRESS`.

    | +| Artifact ID change | The `artifactId` changes from `zeebe-client-java` to `camunda-client-java`. | + +## Southeast Asia region for SaaS customers SaaS + +SaaS customers can now create orchestration clusters in the [Singapore (asia-southeast1) region](/reference/regions.md), ensuring lower latency and improved processing speed for organizations operating in southeast Asian countries. diff --git a/docs/reference/contact.md b/docs/reference/contact.md new file mode 100644 index 00000000000..4e6c7fc3892 --- /dev/null +++ b/docs/reference/contact.md @@ -0,0 +1,45 @@ +--- +id: contact +title: Contact +description: Contact Camunda, submit feedback, find support using the Camunda community forum, note bug reports and feature requests, and review security notices. +keywords: + [ + support, + contact-us, + get-support, + help, + need-help, + bug, + bug-report, + feature-request, + issue, + enterprise-support, + ] +--- + +There are a few different channels you can reach us based on your needs: + +- We encourage everyone to participate in our **community** via the [Camunda community forum](https://forum.camunda.io/), where you can exchange ideas with other Camunda users, as well as Camunda employees. For all other Camunda community programs and resources, visit our [Camunda Developer Hub](https://camunda.com/developers). + +- We welcome your **bug** reports and **feature requests** through our community channels mentioned above. + +- For **security-related issues**, review our [security notices](/reference/notices.md) for the most up-to-date information on known issues and steps to report a vulnerability so we can solve the problem as quickly as possible. Do not use GitHub for security-related issues. + +- **Feedback and support** can be submitted or requested via JIRA by following our [Enterprise support process](https://camunda.com/services/enterprise-support-guide/). All users can also find feedback and support options in the Help Center or [Camunda community forum](https://forum.camunda.io/). + +- For sales inquiries, information about Camunda 8 performance and benchmarking, or anything not listed above, use our [Contact Us](https://camunda.com/contact/) form. + +## Locating Camunda 8 credentials + +Need assistance locating your Camunda 8 credentials? You can obtain these credentials from Camunda by submitting a **Help Request**. To do this, take the following steps: + +1. Log in to [Jira](https://jira.camunda.com/secure/Dashboard.jspa). +2. Click **Create** in the navigation bar at the top of the page. This launches a **Create Issue** pop-up. +3. In the **Issue Type** field, select **Help Request**. +4. In the **Help Request Type** field, click the option that reads **I need the credentials for downloading Camunda**. +5. In the **Summary** and **Description** fields, **I need the credentials for downloading Camunda** will populate by default. + ![completed help request example](./img/create-issue-request.png) +6. (Optional) Add more details, such as the priority level or authorized support contacts. +7. Click **Create** at the bottom of the pop-up **Create Issue** box. + +After completing these steps, your request is generated. Find additional details on submitting a self-service help request [here](https://camunda.com/services/enterprise-support-guide/). diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md index b659d040f8b..0ed65c96669 100644 --- a/docs/reference/glossary.md +++ b/docs/reference/glossary.md @@ -4,19 +4,23 @@ title: "Glossary" description: "This section defines common terminology referenced within the documentation." --- +### Automation cluster + +See [orchestration cluster](#orchestration-cluster). + ### Bridge Synonym to "[Connector](#connector)". ### Broker -A broker is an instance of a Zeebe installation which executes processes and manages process state. A single broker is installed on a single machine. +The [Zeebe Broker](#zeebe-broker) is the distributed workflow engine that tracks the state of active process instances. However, a Zeebe deployment often consists of more than one broker. Brokers can be partitioned for horizontal scalability and replicated for fault tolerance. -- [Architecture](/components/zeebe/technical-concepts/architecture.md#brokers) +- [Architecture](/components/zeebe/technical-concepts/architecture.md) ### Client -A client interacts with the Zeebe broker on behalf of the business application. Clients poll for work from the broker. +A client interacts with the Zeebe Broker on behalf of the business application. Clients poll for work from the broker. - [Architecture](/components/zeebe/technical-concepts/architecture.md#clients) @@ -82,11 +86,11 @@ In a clustered environment, a broker which is not a leader is a follower of a gi - [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) -### Gateway +### Human task -Clients communicate with the Zeebe cluster through a gateway. The gateway provides a REST and gRPC API and forwards client commands to the cluster. Depending on the setup, a gateway can be embedded in the broker or can be configured to be standalone. +Camunda 8 allows you to orchestrate processes with human tasks, which may be [user tasks](#user-task) or [manual tasks](#manual-task). -- [Architecture](/components/zeebe/technical-concepts/architecture.md#gateways) +- [Human task orchestration](/guides/getting-started-orchestrate-human-tasks.md) ### Hybrid mode @@ -149,19 +153,35 @@ The log is comprised of an ordered sequence of records written to persistent sto - [Partitions](/components/zeebe/technical-concepts/partitions.md#partition-data-layout) +### Manual task + +A manual task defines a task that requires human interaction but no external tooling or UI interface. For example, a user reviewing a document or completing a physical task. + +Manual tasks are part of [human task orchestration](/guides/getting-started-orchestrate-human-tasks.md), but differ from [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) which define an actionable task assisted by a business process execution engine or software application. + +- [Manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md) + ### Message A message contains information to be delivered to interested parties during execution of a process instance. Messages can be published via Kafka or Zeebe’s internal messaging system. Messages are associated with timestamp and other constraints such as time-to-live (TTL). - [Messages](/components/concepts/messages.md) +### Orchestration cluster + +An orchestration cluster includes Zeebe, Operate, Tasklist, Optimize, and Connectors. Previously [automation cluster](#automation-cluster). + +### Orchestration core + +An orchestration core or orchestration cluster core includes Zeebe, Operate, Tasklist, Optimize, and Identity. + ### Outbound Connector Outbound [Connectors](#connector) in Camunda 8 allow workflows to trigger with external systems or services, making it possible to integrate workflows with other parts of a business process or system architecture. ### Partition -A partition represents a logical grouping of data in a Zeebe broker. This data includes process instance variables stored in RocksDB, commands, and events generated by Zeebe stored in the log. The number of partitions is defined by configuration. +A partition represents a logical grouping of data in a Zeebe Broker. This data includes process instance variables stored in RocksDB, commands, and events generated by Zeebe stored in the log. The number of partitions is defined by configuration. - [Partitions](/components/zeebe/technical-concepts/partitions.md) @@ -250,6 +270,12 @@ An [inbound Connector](#inbound-connector) that subscribes to a message queue. This way, a Camunda workflow can receive messages from an external system or service (like Kafka or RabbitMQ) using message queuing technology. This type of inbound Connector is commonly used in distributed systems where different components of the system need to communicate with each other asynchronously. +### User task + +A user task is used to model work that needs to be done by a human and is assisted by a business process execution engine or software application. This differs from [manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md), which are not assisted by external tooling. + +- [User tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) + ### Webhook Connector Webhooks are a subtype of [inbound Connector](#inbound-connector). @@ -273,3 +299,13 @@ See [process instance](#process-instance). ### Workflow instance variable See [process instance variable](#process-instance-variable). + +## Zeebe Broker + +The [Zeebe Broker](/components/zeebe/technical-concepts/architecture.md#brokers) is the distributed workflow engine that tracks the state of active process instances. The Zeebe Broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. + +### Zeebe Gateway + +The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. + +- [Zeebe Gateway overview](/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md) diff --git a/src/pages/img/create-issue-request.png b/docs/reference/img/create-issue-request.png similarity index 100% rename from src/pages/img/create-issue-request.png rename to docs/reference/img/create-issue-request.png diff --git a/docs/reference/img/deploy-diagram-modal.png b/docs/reference/img/deploy-diagram-modal.png new file mode 100644 index 00000000000..ec1c32d4128 Binary files /dev/null and b/docs/reference/img/deploy-diagram-modal.png differ diff --git a/docs/reference/img/doc-icon.png b/docs/reference/img/doc-icon.png new file mode 100644 index 00000000000..26cc92e5102 Binary files /dev/null and b/docs/reference/img/doc-icon.png differ diff --git a/docs/reference/img/harmonized-indices-schema.png b/docs/reference/img/harmonized-indices-schema.png new file mode 100644 index 00000000000..7cd8cfc9e0c Binary files /dev/null and b/docs/reference/img/harmonized-indices-schema.png differ diff --git a/docs/reference/img/target-camunda-exporter.png b/docs/reference/img/target-camunda-exporter.png new file mode 100644 index 00000000000..20395a6e901 Binary files /dev/null and b/docs/reference/img/target-camunda-exporter.png differ diff --git a/docs/reference/notices.md b/docs/reference/notices.md index 1773709bb43..60b0a1322ba 100644 --- a/docs/reference/notices.md +++ b/docs/reference/notices.md @@ -74,11 +74,11 @@ Tasklist The REST API functionality of Tasklist 8.2.0 and 8.2.1 allows unauthenticated access to the following methods/URLs: -- GET /v1/tasks/{taskId} +- GET /v1/tasks/\{taskId} - POST /v1/tasks/search -- POST /v1/tasks/{taskId}/variables/search -- POST /v1/forms/{formId} -- POST /v1/variables/{variableId} +- POST /v1/tasks/\{taskId}/variables/search +- POST /v1/forms/\{formId} +- POST /v1/variables/\{variableId} Find more information about the methods in our [Tasklist REST API documentation](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). @@ -115,7 +115,7 @@ At this point, Camunda is not aware of any specific attack vector in Tasklist al #### How to determine if the installation is affected -You are Tasklist version (8.0.3 >= version <= 8.0.7) or <= 8.1.2 +You are Tasklist version (8.0.3 ≥ version ≤ 8.0.7) or ≤ 8.1.2 #### Solution @@ -142,7 +142,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.11 or ≤ 1.3.6 #### Solution @@ -168,7 +168,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.8 or ≤ 1.1.9 #### Solution @@ -194,7 +194,7 @@ Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bu #### How to determine if the installation is affected -You are using IAM version <= 1.2.8 +You are using IAM version ≤ 1.2.8 #### Solution @@ -219,7 +219,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.7 or ≤ 1.1.8 #### Solution @@ -248,7 +248,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.7 +You are using IAM version ≤ 1.2.7 #### Solution @@ -273,7 +273,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.6 or ≤ 1.1.7 #### Solution @@ -302,7 +302,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.6 +You are using IAM version ≤ 1.2.6 #### Solution @@ -327,7 +327,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.5 or ≤ 1.1.6 #### Solution @@ -357,7 +357,7 @@ Still, Camunda recommends applying fixes as mentioned in the Solution section be #### How to determine if the installation is affected -You are using IAM version <= 1.2.5 +You are using IAM version ≤ 1.2.5 #### Solution diff --git a/docs/reference/overview.md b/docs/reference/overview.md index 1911f5150c1..301e4cc5409 100644 --- a/docs/reference/overview.md +++ b/docs/reference/overview.md @@ -15,7 +15,7 @@ This section contains general reference material for Camunda 8. - [Service status](status.md) - Camunda 8 SaaS is a hosted service for the Camunda 8 stack that runs on the Google Cloud Platform (GCP). When availability changes, Camunda provides you with a current service status. - [Supported environments](supported-environments.md) - Learn more about supported environments across your web browser, Desktop Modeler, clients, Camunda 8 Self-Managed, and the Camunda 7 and Optimize version matrix. - [Dependencies & third party libraries](dependencies.md) - A complete list of all dependencies and third-party libraries for all the components of Camunda 8, including Self-Managed. -- [Alpha features](alpha-features.md) - Use alpha features to learn about upcoming changes, try them out, and share feedback. +- [Alpha features](/components/early-access/alpha/alpha-features.md) - Use alpha features to learn about upcoming changes, try them out, and share feedback. ## Security and license information diff --git a/docs/reference/regions.md b/docs/reference/regions.md index 2ea6139f85b..b43f87b48ab 100644 --- a/docs/reference/regions.md +++ b/docs/reference/regions.md @@ -4,29 +4,39 @@ title: "Regions" description: "After creating a cluster, specify a region for that cluster. Read on for details of Google Cloud Platform regions currently supported in Camunda 8 SaaS." --- -When you create a cluster in Camunda 8 SaaS, you must specify a region for that cluster. +When you [create a cluster](/components/console/manage-clusters/create-cluster.md) in Camunda 8 SaaS, you must specify a region for that cluster. -Currently, we make these regions available for customers on the Trial, Starter, and Enterprise Plans. Enterprise customers can discuss custom regions with their Customer Success Manager. +The following regions are available for customers on Trial, Starter, and Enterprise Plans. Enterprise customers can also discuss custom regions with their Customer Success Manager. :::note -Our Console and Web Modeler components are currently hosted in the EU. [Contact us](https://camunda.com/contact/) if you have additional questions. -::: -Below, find a list of regions currently supported in Camunda 8 SaaS. +- Management cluster components (Console and Web Modeler) are currently hosted in GCP (EU). [Contact us](/reference/contact.md) if you have additional questions. +- Single-tenant clusters run on a dedicated GCP or AWS infrastructure. + +::: ## Available Google Cloud Platform (GCP) regions +The following GCP regions are currently supported in Camunda 8 SaaS. + | GCP region | Secondary backups region | -| ------------------------------------------------ | ------------------------------------------------- | +| :----------------------------------------------- | :------------------------------------------------ | | Belgium, Europe (europe-west1) | Germany, Europe (europe-west3) | | Iowa, North America (us-central1) | Salt Lake City, North America (us-west1) | | London, Europe (europe-west2) | _Not available_ | +| Singapore, Asia (asia-southeast1) | Changhua County, Taiwan (asia-east1) | | South Carolina, North America (us-east1) | Iowa, North America (us-central1) | | Sydney, Australia (australia-southeast1) | Melbourne, Australia (australia-southeast2) | | Toronto, North America (northamerica-northeast2) | Montréal, North America (northamerica-northeast1) | -You can find the locations behind the region codes [on the Google page](https://cloud.google.com/about/locations). +To learn more about each region code/location, refer to [Google cloud locations](https://cloud.google.com/about/locations). -:::note -Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](https://camunda.com/contact/) as we are able to make additional regions available on request. -::: +## Available Amazon Web Services (AWS) regions + +The following AWS regions are currently supported in Camunda 8 SaaS for Trial plan customers. + +| AWS region | Secondary backups region | +| :---------------------------------- | :----------------------- | +| North America, Virginia (us-east-1) | _Not available_ | + +To learn more about each region code/location, refer to [AWS regions and availability zones](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/). diff --git a/docs/reference/release-notes/850.md b/docs/reference/release-notes/850.md index 43f3806758c..d514f55adca 100644 --- a/docs/reference/release-notes/850.md +++ b/docs/reference/release-notes/850.md @@ -108,7 +108,7 @@ The first iteration of this feature brings back existing features from Camunda 7 -The first step to offer an intuitive and consistent experience via a single, [unified Camunda 8 REST API](https://camunda.com/blog/2024/03/streamlining-camunda-apis-zeebe-rest-api/) is to provide the Zeebe REST API. With this release, developers can use the Zeebe REST API to manage [Zeebe user tasks](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md), enabling immediate task state changes. The Zeebe REST API includes support for Identity authentication and multi-tenancy, ensuring parity to the Zeebe gRPC API. +The first step to offer an intuitive and consistent experience via a single, [unified Camunda 8 REST API](https://camunda.com/blog/2024/03/streamlining-camunda-apis-zeebe-rest-api/) is to provide the Zeebe REST API. With this release, developers can use the Zeebe REST API to manage [Zeebe user tasks](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md), enabling immediate task state changes. The Zeebe REST API includes support for Identity authentication and multi-tenancy, ensuring parity to the Zeebe gRPC API. ### Refactoring suggestions Modeler diff --git a/docs/reference/release-notes/860.md b/docs/reference/release-notes/860.md index cad16c2db31..2431729e07e 100644 --- a/docs/reference/release-notes/860.md +++ b/docs/reference/release-notes/860.md @@ -11,13 +11,13 @@ keywords: ] --- -These release notes identify the new features included in 8.6, including [alpha feature releases](/docs/reference/alpha-features.md). +These release notes identify the new features included in 8.6, including [alpha feature releases](/components/early-access/alpha/alpha-features.md). ## 8.6 minor -| Release date | End of maintenance | Changelog(s) | Release blog | Update guide | -| -------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | -| 8 October 2024 | 14 April 2026 | | [Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) | [Self-Managed update guide](self-managed/operational-guides/update-guide/850-to-860.md) | +| Release date | End of maintenance | Changelog(s) | Release blog | Update guide | +| -------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| 8 October 2024 | 14 April 2026 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0) | [Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) | [Self-Managed update guide](self-managed/operational-guides/update-guide/850-to-860.md) | ### Advanced SaaS offering SaaS Console @@ -66,6 +66,10 @@ Business Knowledge Models (BKM) can now be implemented in a decision model. - Users can extract and reuse expressions in their DMN diagrams. - When writing an expression in a decision, the BKM name autocompletes together with the required parameters. +:::note +Viewing a BKM in Operate is not supported yet. +::: + ### Deprecate zbctl and GO client Zeebe @@ -154,7 +158,7 @@ Auto-mapping simplifies the process of migrating complex and lengthy process def | Release date | Changelog(s) | | | ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| 10 September 2024 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0-alpha5)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0-alpha5) | [Release blog](https://camunda.com/blog/2024/08/camunda-alpha-release-september-2024/) | +| 10 September 2024 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0-alpha5)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0-alpha5) | [Release blog](https://camunda.com/blog/2024/09/camunda-alpha-release-september-2024/) | ### Auto-pause SaaS Console @@ -211,7 +215,7 @@ When creating a process for a local (non-English) region, you can design forms i -Real-time feedback is added for message correlation for messages with `ttl=o`, enabling external systems to immediately determine the success or failure of message correlation. This enhancement allows external systems to take prompt and appropriate actions based on the correlation result, improving overall efficiency and reducing response times. +The Camunda 8 REST API provides an [endpoint for synchronous message correlation](/apis-tools/camunda-api-rest/specifications/correlate-message.api.mdx), enabling external systems to immediately determine the success or failure of message correlation. This enhancement allows external systems to take prompt and appropriate actions based on the correlation result, improving overall efficiency and reducing response times. ### Public Marketplace blueprint support for HTO & DMN Web Modeler Marketplace @@ -320,7 +324,7 @@ You can now synchronize process applications with GitHub using a native integrat - After an admin approves and configures the basic integration, you can select a path to synchronize with in a GitHub repository. - You can pull changes from GitHub to integrate contributions from Desktop Modeler users, make changes, and begin the process to make a pull request so every change is properly reviewed and approved. -Do you use another tool such as GitLab or Bitbucket? [Contact us](/contact/) to make your request. Until then, you can use our Connectors system and the CI/CD blueprint on the Marketplace. +Do you use another tool such as GitLab or Bitbucket? [Contact us](/reference/contact.md) to make your request. Until then, you can use our Connectors system and the CI/CD blueprint on the Marketplace. ### Persist data across sessions @@ -389,7 +393,7 @@ New platform users interested in orchestrating API endpoints now have a high-lev "Cloud" has been removed from the URLs in SaaS versions of Modeler and Console for conciseness. -### Incident Copilot Alpha Play +### Incident copilot alpha Play @@ -423,7 +427,7 @@ Enhance BPMN workflow reliability with selective message acknowledgement, enabli Time-to-live (TTL) is now configurable for inbound Connectors via a property in all inbound intermediate element templates called `Message TTL`. The new default value for TTL is 0. Read more about [message buffering](/components/concepts/messages.md#message-buffering) and [message correlation](/components/concepts/messages.md#message-correlation-overview). -### Incident Copilot Alpha Play +### Incident copilot alpha Play diff --git a/docs/reference/release-notes/870.md b/docs/reference/release-notes/870.md new file mode 100644 index 00000000000..3771a030c74 --- /dev/null +++ b/docs/reference/release-notes/870.md @@ -0,0 +1,257 @@ +--- +id: 870 +title: "8.7 Release notes" +description: "Release notes for 8.7, including alphas" +keywords: + [ + "product development lifecycle", + "software development lifecycle", + "CI/CD", + "AI", + ] +--- + +These release notes identify the new features included in 8.7, including [alpha feature releases](/components/early-access/alpha/alpha-features.md). + +## 8.7 minor + +| Scheduled release date | Scheduled end of maintenance | Changelog(s) | Release blog | Update guide | +| ---------------------- | ---------------------------- | ------------ | ------------ | ------------ | +| 11 February 2025 | 11 August 2026 | - | - | - | + +## 8.7.0-alpha2 + +| Release date | Changelog(s) | Blog | +| :--------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------ | +| 10 December 2024 |
    • [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.7.0-alpha2)
    • [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.7.0-alpha2.1)
    | [Release blog](https://camunda.com/blog/2024/12/camunda-alpha-release-december-2024/) | + + + +:::caution + +This [alpha release](/reference/release-policy.md) contains a known issue where Self-Managed customers using the 8.7.0-alpha2 Helm Chart cannot login to Operate. This issue is due to key architecture refactoring and improvements, and will be resolved in the next release. + +::: + + + +### Camunda 8 REST API Query API API + +You can now use a single Query API in the Camunda 8 REST API to find process and decision data instead of using multiple component APIs. + +For example, send a request to the [Query decision definitions](/apis-tools/camunda-api-rest/specifications/find-decision-definitions.api.mdx) endpoint to search for decision definitions. + +New Query API endpoints are added as follows: + +- Decision definitions +- Decision instances +- Decision requirements +- Flownode instances +- Incidents +- Process definitions +- Process instances +- User tasks +- Variables + +To learn more about these endpoints, see the [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md#query-api) documentation. + +### Connectors SaaSSelf-ManagedConnectors + +New Connectors and enhancements are included in this release. + +#### AWS Amazon Comprehend Connector + +The new Amazon Comprehend Connector allows you to integrate your BPMN service with Amazon Comprehend, a service which extracts insights about the content of documents, such as personal identifiable information (PII) and key phrases. + +To learn more about this Connector, see [Amazon Comprehend Connector](/components/connectors/out-of-the-box-connectors/amazon-comprehend.md). + +#### Email Connector attachments + +The Email connector is enhanced as follows: + +- Supports attachments stored in the document store. +- Supports custom headers. +- Messages can now be sent as plaintext, HTML, or in both formats. + +To learn more about this Connector, see [Email Connector](/components/connectors/out-of-the-box-connectors/email.md). + +#### Google Gemini Connector + +The new Google Gemini Connector allows you to access Gemini multimodal models from Google, capable of understanding virtually any input, and combining different types of information in your BPMN process. + +To learn more about this Connector, see [Google Gemini Connector](/components/connectors/out-of-the-box-connectors/google-gemini.md). + +#### Webhook Connector document upload + +Document upload is now supported by the Webhook Connector. Uploads can now be stored in the document store and are available for further processing for start and intermediate events. + +- Use the `documents` object to access created documents in both the response expression and the result expression. +- The `documents` object contains the references for created documents. + +To learn more about this feature, see [HTTP Webhook Connector](/components/connectors/protocol/http-webhook.md). + +### Connector Runtime SaaSSelf-ManagedConnectors + +#### Spring SDK and Camunda REST API Migration + +The Connectors experience is enhanced with the migration from the Spring Zeebe to the Camunda REST API, and the removal of dependency on the Operate client. + +#### Testing Support migration + +Connectors are supported in the Camunda Process Test (CPT) Java library you can use to test your BPMN processes and process application. + +To learn more about this feature, see [Camunda Process Test getting started](/apis-tools/testing/getting-started.md). + + + +### Cluster disk space cleared for paused trial clusters SaaS + +Cluster disk space is cleared when a trial cluster is paused. + +- You will need to redeploy processes to the cluster once it is resumed from a paused state. +- Cluster configuration settings (for example, API Clients, Connector secrets, and IP allowlists) are saved so you can easily resume a cluster. + + + +### Document handling SaaSSelf-Managed + +New features are available as part of the enhanced document handling being delivered with the 8.7 release. + +- A new Document API is available as part of the [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md). +- The [Java client](/apis-tools/java-client/index.md) is enhanced to support these new Document API methods. +- A document store concept is introduced and implemented as an in-memory and a GCP-based document store. +- A new Tasklist [Filepicker component](/components/modeler/forms/form-element-library/forms-element-library-filepicker.md) is added for uploading documents to the document store in a form. +- The [Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) is enhanced to provide document support in property/variable bindings. +- The [Webhook connector](/components/connectors/protocol/http-webhook.md) supports Documents via the `documents` object. + + + +### Export activity logs in Console SaaSConsole + +You can export activity logs as JSON or CSV files from the Console UI or API. + +- **UI:** On the Organization management **Activity** tab, click **Export activity**. +- **API:** Send a GET request to the Management API `GetJson` or `GetCsv` endpoint. + +To learn more about this feature, see [view organization activity](/components/console/manage-organization/view-organization-activity.md). + + + +### Process instance migration SaaSSelf-ManagedZeebe + +Enhanced process instance migration allows you to solve problems with process definitions and use the latest process improvements. + +You can now migrate the following: + +- Compensation boundary event subscriptions +- Escalation boundary events +- Escalation event subprocesses + +To learn more about migration, see [process instance migration](/components/concepts/process-instance-migration.md). + + + +### Singapore region available for SaaS SaaS + +A new Singapore (asia-southeast1) region is available for SaaS clusters. Use this region to: + +- Improve overall processing speed and reduce latency if you operate in Singapore and Southeast Asian (SEA) countries. +- Keep cluster data within Singapore to support your local data residency and compliance needs. + +To learn more about supported SaaS regions, see [regions](/reference/regions.md). + + + +### Tags and properties in Self-Managed Console Self-ManagedConsole + +Use custom tags and properties in Self-Managed Console to improve your orchestration cluster management. + +- Administrators can now assign tags such as `prod`, `dev`, or `test` to clusters for clear identification across environments. +- Tags are shown in the Console UI, and accessible via the Administration API to streamline usage reporting and cost allocation. +- Custom properties provide contextual information about each cluster. Administrators can add detailed descriptions, team names, and include links to resources such as Grafana dashboards or internal portals, shown in the Console **Cluster Details**. + +This feature allows you to differentiate clusters, ensure configurations align with production standards (for example, check TLS is enabled, correct partition counts), and improve operational efficiency by making key information more visible. + + + +### Unified deployment experience from Web Modeler Self-ManagedModeler + +The deployment experience is further simplified for Enterprise customers running Web Modeler Self-Managed. + +- User tokens are used for deployments instead of machine-to-machine (M2M) tokens generated from a client ID and secret. +- You no longer need to enter a client ID and secret in the deploy modal. Instead, simply choose a cluster (or stage for process applications) and deploy. + +## 8.7.0-alpha1 + +| Release date | Changelog(s) | Blog | +| :--------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------ | +| 12 November 2024 |
    • [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.7.0-alpha1)
    • [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.7.0-alpha1)
    | [Release blog](https://camunda.com/blog/2024/11/camunda-alpha-release-november-2024/) | + +### Activity log information in Console Console + + + +Console activity logs now contain information about changes made to secrets (add, update, remove), and Console user removals (unregistered organization users). + +### Email Connector Connectors + + + +The new Email Connector allows you to: + +- Integrate your BPMN service with any email server using POP3, IMAP, or SMTP. +- Automate the retrieval, deletion, search, and organization of emails directly within your processes. + +To learn more about this Connector, see [Email Connector](/components/connectors/out-of-the-box-connectors/email.md). + +### Generate Connector templates (OpenAPI + Postman) SaaSConnectors + + + +You can now configure and automatically generate a custom Connector template in Web Modeler. This feature simplifies creating consistent, deployable templates, making Connector setup quicker and more flexible. + +- You can start from a blank template or import an existing API definition such as an OpenAPI specification, Swagger specification, or a Postman collection. +- For example, download a Postman collection as a YAML file, import this into the generator, and choose which methods to include in the generated template. + +To learn more about generating Connector templates, see [generate a Connector template](/components/connectors/custom-built-connectors/connector-template-generator.md). + +### Monorepo Git sync Modeler + + + +When configuring Git sync in Web Modeler, define the optional `/path` option to unlock new use cases. + +- This option allows you to specify the path to the folder containing your process application files. +- Sync with your main branch to perform visual diffing, collaboration, and manual testing in Web Modeler. Remember not to make any changes in this branch. +- Edit the `/path` for multiple process applications to integrate Web Modeler with your existing monorepo and code assets. + +To learn more about configuring Git sync, see [Git sync](/components/modeler/web-modeler/git-sync.md). + +### Resize clusters on SaaS SaaSConsole + + + +Enterprise customers can flexibly resize their clusters to adjust capacity and performance. + +- Increase or decrease the cluster size at any time by adding or removing hosting packages. +- For example, increase the cluster size to improve performance and add capacity, or decrease the cluster size to free up reservations for another cluster. + +To learn more about this feature, see [resize a cluster](/components/console/manage-clusters/manage-cluster.md#resize-a-cluster). + +### Unified deployment experience for Web Modeler Self-ManagedModeler + + + +#### Deployment stages + +Predefined deployment stages for process applications are now also available in Web Modeler Self-Managed. + +- Select your dev, test, stage, and prod clusters to ensure process applications flow easily and predictably through your deployment pipeline. +- For added control, do not assign a stage or prod environment to enable rapid iteration in Web Modeler while still ensuring deployments run through your approved pipeline. + +#### Simplified deployment + +The deployment experience for Enterprise customers running Web Modeler Self-Managed is simplified. + +- During Camunda installation, you can configure your Helm chart to decide which clusters are available from Web Modeler by default, and save their connection information. +- With this setup, you only need to select a cluster, and add secrets and a tenant ID as required. diff --git a/docs/reference/release-notes/release-notes.md b/docs/reference/release-notes/release-notes.md index a256125a9b3..8f3d74e103e 100644 --- a/docs/reference/release-notes/release-notes.md +++ b/docs/reference/release-notes/release-notes.md @@ -6,25 +6,28 @@ description: "Release notes for Camunda 8 and its components." Camunda 8 release notes include notable new and improved features, enhancements, and bug fixes. Release notes are separated by minor release pages and include alphas released during the development cycle. -| Version | Release date | Scheduled end of maintenance | Changelogs | Release blog | -| -------------------------------------- | --------------- | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | -| [8.6](/reference/release-notes/860.md) | 8 October 2024 | 14 April 2026 | | TBD | -| [8.5](/reference/release-notes/850.md) | 9 April 2024 | 14 October 2025 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.5.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.5.0) | [Release blog](https://camunda.com/blog/2024/04/camunda-8-5-release/) | -| 8.4 | 9 January 2024 | 9 July 2025 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.4.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.4.0) | [Release blog](https://camunda.com/blog/2024/01/camunda-8-4-simplifying-installation-enhancing-user-experience/) | -| 8.3 | 10 October 2023 | 9 April 2025 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.3.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.3.0) | [Release blog](https://camunda.com/blog/2023/10/camunda-8-3-scaling-automation-maximize-value/) | -| 8.2 | 11 April 2023 | 8 October 2024 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.2.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.2.0) | [Release blog](https://camunda.com/blog/2023/04/camunda-platform-8-2-key-to-scaling-automation/) | - -_ While Camunda Cloud 1.3 is available in the site dropdown, it is unsupported. However, the corresponding Optimize version for Camunda Cloud 1.3, Optimize 3.7, remains supported at this time. _ +| Version | Release date | Scheduled end of maintenance | Changelogs | Release blog | +| -------------------------------------- | ---------------- | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | +| [8.7](/reference/release-notes/870.md) | 11 February 2025 | 11 August 2026 | - | - | +| [8.6](/reference/release-notes/860.md) | 8 October 2024 | 14 April 2026 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0) | [Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) | +| [8.5](/reference/release-notes/850.md) | 9 April 2024 | 14 October 2025 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.5.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.5.0) | [Release blog](https://camunda.com/blog/2024/04/camunda-8-5-release/) | +| 8.4 | 9 January 2024 | 9 July 2025 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.4.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.4.0) | [Release blog](https://camunda.com/blog/2024/01/camunda-8-4-simplifying-installation-enhancing-user-experience/) | +| 8.3 | 10 October 2023 | 9 April 2025 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.3.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.3.0) | [Release blog](https://camunda.com/blog/2023/10/camunda-8-3-scaling-automation-maximize-value/) | +| 8.2 | 11 April 2023 | 8 October 2024 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.2.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.2.0) | [Release blog](https://camunda.com/blog/2023/04/camunda-platform-8-2-key-to-scaling-automation/) | + +:::note +Camunda Cloud 1.3 is available in the site dropdown, but is unsupported. However, the corresponding Optimize version for Camunda Cloud 1.3, Optimize 3.7, remains supported at this time. +::: ## Changelogs -Technical changelogs for Camunda 8 are available under on GitHub: +Technical changelogs for Camunda 8 are available on GitHub: - [Camunda 8 Core](https://github.com/camunda/camunda/releases) - [Connectors](https://github.com/camunda/connectors/releases) - [Camunda Helm Chart](https://github.com/camunda/camunda-platform-helm/releases) -Most releases and changelogs are available through the [Camunda Platform repo](https://github.com/camunda/camunda-platform), however, the following component release notes are available as linked below: +Most releases and changelogs are available through the [Camunda Platform repo](https://github.com/camunda/camunda-platform). The following component release notes are available as linked below: - [Desktop Modeler](https://github.com/camunda/camunda-modeler/releases) - [Connectors](https://github.com/camunda/connectors/releases) diff --git a/docs/reference/release-policy.md b/docs/reference/release-policy.md index 14cfba06f53..627c0d8670d 100644 --- a/docs/reference/release-policy.md +++ b/docs/reference/release-policy.md @@ -18,7 +18,7 @@ It is important to understand the different ways the term "alpha" is used in the ### Alpha feature -Refers to a feature or component released as an alpha version, in an early state for you to test and participate in development by sharing your feedback before the feature reaches [general availability (GA)](alpha-features.md#general-availability-ga). Some alpha features require turning on for your cluster before you can use them. See [alpha features](alpha-features.md). +Refers to a feature or component released as an alpha version, in an early state for you to test and participate in development by sharing your feedback before the feature reaches [general availability](#general-availability-ga). Some alpha features require turning on for your cluster before you can use them. See [alpha features](/components/early-access/alpha/alpha-features.md). ### Alpha release @@ -26,11 +26,23 @@ Refers to a release made available between minor versions that allows you to pre :::note -- An alpha release can also be made available where the entire version is an alpha with [alpha limitations](alpha-features.md#alpha). +- An alpha release can also be made available where the entire version is an alpha with [alpha limitations](/components/early-access/alpha/alpha-features.md#alpha). - Additionally, "Alpha channel" refers to the channel you can use when provisioning a SaaS cluster. See [alpha channel](#alpha-channel). ::: +## General availability (GA) + +Once features and components are released and considered stable, they become generally available. + +Stable features and components are: + +- Ready for production use for most users with minimal risk. +- Supported by [L1 Priority-level support](https://camunda.com/services/enterprise-support-guide/) for production use. +- Fully documented. + +A release or component is considered stable if it has passed all verification and test stages and can be released to production. + ## SaaS provisioning In Camunda 8 SaaS we differentiate between components that are part of a Camunda 8 cluster (cluster components), and components outside the cluster (non-cluster components). @@ -50,7 +62,7 @@ You can provision cluster components using one of two channels, following the [C #### Stable channel -You can use the **Stable** channel to access [general availability](alpha-features.md#general-availability-ga) features for cluster components. +You can use the **Stable** channel to access [general availability](#general-availability-ga) features for cluster components. - Provides the latest feature and patch releases ready for most users at minimal risk. - Releases follow semantic versioning and can be updated to the next minor or patch release without data loss. @@ -58,7 +70,7 @@ You can use the **Stable** channel to access [general availability](alpha-featur #### Alpha channel -You can use the **Alpha** channel to access [alpha features](alpha-features.md) and patch releases for cluster components. +You can use the **Alpha** channel to access [alpha features](/components/early-access/alpha/alpha-features.md) and patch releases for cluster components. - Provides alpha releases to preview and prepare for the next stable release. - Alpha releases provide a short-term stability point to test new features and give feedback before they are released to the stable channel. Use an alpha release to test the upcoming minor release with your infrastructure. diff --git a/docs/reference/status.md b/docs/reference/status.md index c8de779c5d2..ecb840a23ab 100644 --- a/docs/reference/status.md +++ b/docs/reference/status.md @@ -21,4 +21,4 @@ To receive service status updates: ## Support -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/contact). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). +Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/reference/contact.md). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). diff --git a/docs/reference/supported-environments.md b/docs/reference/supported-environments.md index bebc2dc47bf..6b5b2bddc9a 100644 --- a/docs/reference/supported-environments.md +++ b/docs/reference/supported-environments.md @@ -8,7 +8,7 @@ The supported environments page lists browsers, operating systems, clients, depl **If the particular technology is not listed, we cannot resolve issues caused by the usage of that unlisted technology.** -You may [raise a feature request](/contact) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/contact) to work with Consulting services. +You may [raise a feature request](/reference/contact.md) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/reference/contact.md) to work with Consulting services. Recommendations are denoted with [recommended], however, other listed options are supported as well. @@ -30,15 +30,13 @@ For example, 1.2+ means support for the minor version 2, and any higher minors ( ## Desktop Modeler - Windows 10 / 11 -- Mac OS 12 / 13 / 14 +- Mac OS 12 / 13 / 14 / 15 - Ubuntu LTS (latest) ## Clients - **Zeebe Java Client**: OpenJDK 8+ - **Zeebe Spring SDK**: OpenJDK 17+ -- **Zeebe Go Client**: Go 1.13+ -- **zbctl**: Windows, macOS, and Linux (latest) - **Connector SDK**: OpenJDK 17+ - **Spring SDK**: Spring Boot 3.3.x (for the exact version, check the [version matrix](/apis-tools/spring-zeebe-sdk/getting-started.md#version-compatibility).) - **Helm CLI**: 3.14.x (for the exact version, check the [version matrix](https://helm.camunda.io/camunda-platform/version-matrix/).) @@ -112,6 +110,7 @@ This matrix shows which component versions work together: | Design | Automate | | Improve | | --------------------- | ----------- | -------------------------------------------------------------------------- | --------------- | +| Desktop Modeler 5.28+ | Zeebe 8.6.x | Operate 8.6.x Tasklist 8.6.x Identity 8.6.x Connectors 8.6.x Console 8.6.x | Optimize 8.6.x | | Desktop Modeler 5.22+ | Zeebe 8.5.x | Operate 8.5.x Tasklist 8.5.x Identity 8.5.x Connectors 8.5.x Console 8.5.x | Optimize 8.5.x | | Desktop Modeler 5.19+ | Zeebe 8.4.x | Operate 8.4.x Tasklist 8.4.x Identity 8.4.x Connectors 8.4.x | Optimize 8.4.x | | Desktop Modeler 5.16+ | Zeebe 8.3.x | Operate 8.3.x Tasklist 8.3.x Identity 8.3.x Connectors 8.3.x | Optimize 8.3.x | diff --git a/docs/self-managed/about-self-managed.md b/docs/self-managed/about-self-managed.md index 1ee25e43bf4..6eb3059e818 100644 --- a/docs/self-managed/about-self-managed.md +++ b/docs/self-managed/about-self-managed.md @@ -1,7 +1,7 @@ --- id: about-self-managed title: "Camunda 8 Self-Managed" -description: "Camunda 8 Self-Managed is a self-hosted alternative to using Camunda 8 SaaS." +description: "Step through everything you need to download, configure, and work with components of Camunda 8 Self-Managed, a self-hosted alternative to using Camunda 8 SaaS." --- import Components from './react-components/components.md' @@ -38,4 +38,4 @@ In this configuration, Camunda 8 Self-Managed can be accessed as follows: - Identity, Operate, Optimize, Tasklist, Modeler: `https://camunda.example.com/[identity|operate|optimize|tasklist|modeler]` - Web Modeler also exposes a WebSocket endpoint on `https://camunda.example.com/modeler-ws`. This is only used by the application itself and should not be accessed by users directly. - Keycloak authentication: `https://camunda.example.com/auth` -- Zeebe gateway: `grpc://zeebe.camunda.example.com` +- Zeebe Gateway: `grpc://zeebe.camunda.example.com` diff --git a/docs/self-managed/concepts/exporters.md b/docs/self-managed/concepts/exporters.md index a6dc88b2ae7..be9af05b892 100644 --- a/docs/self-managed/concepts/exporters.md +++ b/docs/self-managed/concepts/exporters.md @@ -66,7 +66,24 @@ heavy work during instantiation/configuration. ### Metrics -The exporter is provided with a Micrometer [MeterRegistry](https://docs.micrometer.io/micrometer/reference/concepts/registry.html) in the `Exporter#configure(Context)` method through the configuration. Any metrics to be exported should interact with the registry. +The exporter is provided with a Micrometer [MeterRegistry](https://docs.micrometer.io/micrometer/reference/concepts/registry.html) in the `Exporter#configure(Context)` method through the configuration. Any metrics to be exported should interact with the registry, for example: + +```java +public class SomeExporter implements Exporter { + @Override + public void configure(final Context context) { + // ... + registry = context.getMeterRegistry(); + // ... + } + + public void flush() { + try (final var ignored = Timer.resource(registry, "meter.name")) { + exportBulk(); + } + } +} +``` When an exporter is validated, it is only provided with an in-memory register which is then discarded. diff --git a/docs/self-managed/concepts/multi-region/dual-region.md b/docs/self-managed/concepts/multi-region/dual-region.md index 7d44cadefb3..899224c7499 100644 --- a/docs/self-managed/concepts/multi-region/dual-region.md +++ b/docs/self-managed/concepts/multi-region/dual-region.md @@ -98,11 +98,11 @@ Amazon OpenSearch is **not supported** in dual-region configurations. - Required open ports between the two regions: - **9200** for Elasticsearch (for cross-region data pushed by Zeebe). - **26500** for communication to the Zeebe Gateway from clients/workers. - - **26501** and **26502** for communication between Zeebe brokers and Zeebe Gateway. + - **26501** and **26502** for communication between Zeebe brokers and the Zeebe Gateway. ### Zeebe cluster configuration -The following Zeebe brokers and replication configuration is supported: +The following Zeebe brokers and replication configuration are supported: - `clusterSize` must be a multiple of **2** and at least **4** to evenly distribute brokers across the two regions. - `replicationFactor` must be **4** to ensure even partition distribution across regions. @@ -120,7 +120,7 @@ The following Zeebe brokers and replication configuration is supported: | Connectors Deployment | Connectors can be deployed in a dual-region setup, but attention to [idempotency](../../../components/connectors/use-connectors/inbound.md#creating-the-connector-event) is required to avoid event duplication. In a dual-region setup, you'll have two connector deployments and using message idempotency is of importance to not duplicate events. | | Connectors | If you are running Connectors and have a process with an inbound connector deployed in a dual-region setup, consider the following:
    • when you want to delete the process deployment, delete it via Operate (not zbctl), otherwise the inbound connector won't deregister.
    • if you have multiple Operate instances running, then perform the delete operation in both instances. This is a [known limitation](https://github.com/camunda/camunda/issues/17762).
    | | Zeebe Cluster Scaling | Not supported. | -| Web Modeler | Web Modeler is a standalone component that is not covered in this guide. Modelling applications can operate independently outside of the automation clusters. | +| Web Modeler | Web Modeler is a standalone component that is not covered in this guide. Modelling applications can operate independently outside of the orchestration clusters. | ### Infrastructure and deployment platform considerations @@ -149,9 +149,9 @@ This means the Zeebe stretch cluster will not have a quorum when half of its bro The [operational procedure](./../../operational-guides/multi-region/dual-region-ops.md) looks in detail at a recovery from a region loss and how to long-term fully re-establish the lost region. -::caution +:::caution Customers are expected to proactively monitor for regional failures and take ownership of executing the necessary [operational procedures](./../../operational-guides/multi-region/dual-region-ops.md) to ensure smooth recovery and failover. -:: +::: ### Active region loss diff --git a/docs/self-managed/connectors-deployment/connectors-configuration.md b/docs/self-managed/connectors-deployment/connectors-configuration.md index ddc999ba271..4b4af44da09 100644 --- a/docs/self-managed/connectors-deployment/connectors-configuration.md +++ b/docs/self-managed/connectors-deployment/connectors-configuration.md @@ -1,6 +1,7 @@ --- id: connectors-configuration title: Configuration +description: "Configure the Connector runtime environment based on the Zeebe instance to connect to, the Connector functions to run, and secrets available to the Connectors." --- import Tabs from "@theme/Tabs"; @@ -14,8 +15,6 @@ You can configure the Connector runtime environment in the following ways: ## Connecting to Zeebe -In general, the Connector Runtime will respect all properties known to [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe). - -Disabling Operate polling will lead to inability to use inbound (e.g., webhook) capabilities. -However, if you still wish to do so, you need to start your Connector runtime with the following environment variables: +Disabling Operate polling will lead to inability to use inbound capabilities like webhooks. If you still wish to do so, start your Connector runtime with the following environment variables: ```bash CAMUNDA_CONNECTOR_POLLING_ENABLED=false @@ -102,7 +100,7 @@ OPERATE_CLIENT_ENABLED=false ## Manual discovery of Connectors By default, the Connector runtime picks up outbound Connectors available on the classpath automatically. -To disable this behavior, use the following environment variables to configure Connectors and their configuration explicitly: +To disable this behavior, use the following environment variables to configure Connectors explicitly: | Environment variable | Purpose | | :-------------------------------------------- | :------------------------------------------------------------ | @@ -111,9 +109,9 @@ To disable this behavior, use the following environment variables to configure C | `CONNECTOR_{NAME}_INPUT_VARIABLES` (optional) | Variables to fetch for worker with `NAME` | | `CONNECTOR_{NAME}_TIMEOUT` (optional) | Timeout in milliseconds for worker with `NAME` | -Through that configuration, you define all job workers to run. +Through this configuration, you define all job workers to run. -Specifying optional values allow you to override `@OutboundConnector`-provided Connector configuration. +Specifying optional values allows you to override `@OutboundConnector`-provided Connector configuration. ```bash CONNECTOR_HTTPJSON_FUNCTION=io.camunda.connector.http.rest.HttpJsonFunction @@ -196,7 +194,7 @@ Reference the secret in the Connector's input in the prefixed style `{{secrets.M Create your own implementation of the `io.camunda.connector.api.secret.SecretProvider` interface that [comes with the SDK](https://github.com/camunda/connectors/blob/main/connector-sdk/core/src/main/java/io/camunda/connector/api/secret/SecretProvider.java). -Package this class and all its dependencies as a JAR, e.g. `my-secret-provider-with-dependencies.jar`. This needs to include a file +Package this class and all its dependencies as a JAR, for example `my-secret-provider-with-dependencies.jar`. This needs to include a file `META-INF/services/io.camunda.connector.api.secret.SecretProvider` that contains the fully qualified class name of your secret provider implementation. Add this JAR to the runtime environment, depending on your deployment setup. Your secret provider will serve secrets as implemented. @@ -223,75 +221,71 @@ java -cp 'connector-runtime-application-VERSION-with-dependencies.jar:...:my-sec ## Multi-tenancy -The Connector Runtime supports multiple tenants for inbound and outbound Connectors. +The Connector Runtime supports multiple tenants for inbound and outbound Connectors. These are configurable in [Identity](/self-managed/identity/user-guide/tenants/managing-tenants.md). + A single Connector Runtime can serve a single tenant or can be configured to serve -multiple tenants. By default, the runtime uses the `` tenant id for all -Zeebe related operations like handling Jobs and publishing Messages. +multiple tenants. By default, the runtime uses the tenant ID `` for all +Zeebe-related operations like handling jobs and publishing messages. :::info Support for **outbound Connectors** with multiple tenants requires a dedicated -tenant job worker config (described below). **Inbound Connectors** will automatically work for all tenants -the configured Connector Runtime client has access to. This can be configured in Identity via -the application assignment. +tenant job worker config (described below). **Inbound Connectors** automatically work for all tenants the configured Connector Runtime client has access to. This can be configured in Identity via the application assignment. ::: ### Environment variables -The following environment variables are used by the Connector Runtime -for the configuration of multi-tenancy. +The Connector Runtime uses the following environment variables to configure multi-tenancy: -| Name | Description | Default value | -| ------------------------------------------ | --------------------------------------------------------------- | ------------- | -| ZEEBE_CLIENT_DEFAULT-TENANT-ID | The default tenant id used to communicate with Zeebe | `` | -| ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS | The default tenants ids (comma separated) used to activate jobs | `` | +| Name | Description | Default value | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| ZEEBE_CLIENT_DEFAULT-TENANT-ID | The default tenant ID used to communicate with Zeebe. Changing this value will set a new default tenant ID used for fetching jobs and publishing messages. | `` | +| ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS | The default tenant IDs (comma separated) used to activate jobs. To run the Connector Runtime in a setup where a single runtime serves multiple tenants, add each tenant ID to this list. | `` | -If you are using an embedded version of the Connector Runtime you can specify the tenant information -in your Spring configuration like in this example `application.properties` file: +If you are using an embedded version of the Connector Runtime, you can specify the tenant information in your Spring configuration like in this example `application.properties` file: ```bash -zeebe.client.default-tenant-id= -zeebe.client.default-job-worker-tenant-ids=t1, +zeebe.client.default-tenant-id=myTenant +zeebe.client.default-job-worker-tenant-ids=myTenant ``` ### Outbound Connector config -The Connector Runtime uses the `` tenant for outbound Connector related features. +The Connector Runtime uses the default tenant for outbound Connector-related features. If support for a different tenant or multiple tenants should be enabled, the tenants need to be configured individually using the following environment variables. If you want to use outbound Connectors for a single tenant that is different -from the `` tenant you can specify a different default tenant id using: +from the default tenant, you can specify a different default tenant ID using: ```bash -ZEEBE_CLIENT_DEFAULT-TENANT-ID=tenant1 +ZEEBE_CLIENT_DEFAULT-TENANT-ID=myTenant ``` -This will change the default tenant id used for fetching jobs and publishing messages -to the tenant id `tenant1`. +This will change the default tenant ID used for fetching jobs and publishing messages +to the tenant ID `myTenant`. :::note -Please keep in mind that inbound Connectors will still be enabled for -all tenants that the Connector Runtime client has access to. +Inbound Connectors will still be enabled for +all tenants the Connector Runtime client has access to. ::: -If you want to run the Connector Runtime in a setup where a single runtime -serves multiple tenants you have to add each tenant id to the list of the default job workers: +To run the Connector Runtime in a setup where a single runtime +serves multiple tenants, add each tenant ID to the list of the default job workers: ```bash -ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS=tenant1, tenant2 +ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS=`myTenant, otherTenant` ``` -In this case the `ZEEBE_CLIENT_DEFAULT-TENANT-ID` will **not** be used for the +In this case, the `ZEEBE_CLIENT_DEFAULT-TENANT-ID` will **not** be used for the configuration of job workers. -### Inbound Connector config +### Inbound Connector configuration -The Connector Runtime will fetch and execute all inbound Connectors it receives from +The Connector Runtime fetches and executes all inbound Connectors it receives from Operate independently of the outbound Connector configuration without any additional configuration required from the user. -If you want to restrict the Connector Runtime inbound Connector feature to a single tenant or multiple tenants -you have to use Identity and assign the tenants the Connector application should have access to. +To restrict the Connector Runtime inbound Connector feature to a single tenant or multiple tenants, use Identity and assign the tenants the Connector application should have access to. ### Troubleshooting diff --git a/docs/self-managed/console-deployment/configuration.md b/docs/self-managed/console-deployment/configuration.md deleted file mode 100644 index 874312d78c7..00000000000 --- a/docs/self-managed/console-deployment/configuration.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: configuration -title: "Configuration" -sidebar_label: "Configuration" -description: "Read details on the configuration variables of Console Self-Managed." ---- - -:::note -Console Self-Managed is available only to [Enterprise customers](/reference/licenses.md#console). -::: - -Console Self-Managed can be configured using environment variables and configuration parameters. - -:::note -Underscores in environment variables correspond to configuration file key levels. -::: - -## Environment variables - -| Environment variable | Description | Example value | -| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------- | -| `KEYCLOAK_BASE_URL` | Base URL for Keycloak | https://example.com/auth | -| `KEYCLOAK_INTERNAL_BASE_URL` | Internal Base URL for Keycloak | http://camunda-platform-keycloak:80/auth | -| `KEYCLOAK_REALM` | Realm for Keycloak | camunda-platform | -| `CAMUNDA_IDENTITY_AUDIENCE` | Audience for Console client | console | -| `CAMUNDA_IDENTITY_CLIENT_ID` | Client Id for Console client | console | -| `CAMUNDA_CONSOLE_CONTEXT_PATH` | Context path for Console | console | -| `CAMUNDA_CONSOLE_CUSTOMERID` | Unique identifier of the customer | `customer-id` | -| `CAMUNDA_CONSOLE_INSTALLATIONID` | Unique installation id of the current customer installation | `installation-id` | -| `CAMUNDA_CONSOLE_TELEMETRY` | Telemetry config for Console Self-Managed: `disabled`, `online`, or `download` | `online` | -| `CAMUNDA_CONSOLE_DISABLE_AUTH` | Disables authentication for Console. With this option, set users don't have to log in to use Console and API requests can be executed without an Authorization header.
    By disabling authentication all `CAMUNDA_IDENTITY`, variables won't be used. | `true` | -| `CAMUNDA_LICENSE_KEY` | Your Camunda 8 license key, if your installation requires a license. For Helm installations, license keys can be configured globally in your `values.yaml` file. See the [Helm installation documentation](/self-managed/setup/install.md#configure-license-key) for more details. | N/A | - -Console environment variables could be set in Helm via the `console.env` key. For more details, check [Console Helm values](https://artifacthub.io/packages/helm/camunda/camunda-platform#console-parameters). - -:::note -Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on Console startup or functionality. To obtain a license, visit the [Camunda Enterprise page](https://camunda.com/platform/camunda-platform-enterprise-contact/). -::: - -## Telemetry - -You can enable telemetry and usage collection to help us improve our product by sending several telemetry metrics to Camunda. The information we collect will contribute to continuous product enhancement and help us understand how Camunda is used. We do not collect sensitive information and limit data points to several metrics. For more information, you can download collected data set metrics from the telemetry page at anytime. - -By enabling data collection and reporting, you can get a new page to introspect Camunda 8 component metrics. Usually accessible via monitoring tools like Prometheus, you can now access these metrics directly in Console. By default, telemetry collection is disabled and no data is collected. -When `CAMUNDA_CONSOLE_TELEMETRY` env var or `telemetry` parameter is set to `online`, the telemetry feature is activated and the collected data is sent once every 24 hours via HTTPS. -When `CAMUNDA_CONSOLE_TELEMETRY` env var or `telemetry` parameter is set to `download`, the telemetry feature is activated. Data collected **will not** be sent to Camuda automatically, but could be downloaded from Console and shared with us on request. - -To enable usage collection, configure the parameters described in the next section. - -## Configuration parameters - -To enable telemetry, the following parameters need to be configured. Camunda will provide you with the customer ID (Camunda Docker username) needed to send telemetry data to Camunda. - -| Parameter | Description | Example value | -| ---------------- | ----------------------------------------------------------------------------------- | --------------- | -| `customerId` | Unique identifier of the customer. This is also a Camunda Docker registry user name | `customername` | -| `installationId` | Unique installation id of the current customer installation | `my-deployment` | -| `telemetry` | Telemetry config for Console Self-Managed: `disabled`, `online` or `download` | `online` | - -Console environment variables could be set in Helm. For more details, check [Console Helm values](https://artifacthub.io/packages/helm/camunda/camunda-platform#console-parameters). -For example: - -```yaml -console: - env: - - name: CAMUNDA_CONSOLE_CUSTOMERID - values: customername - - name: CAMUNDA_CONSOLE_INSTALLATIONID - values: my-deployment - - name: CAMUNDA_CONSOLE_TELEMETRY - value: online -``` - -## Using a different OpenID Connect (OIDC) authentication provider than Keycloak - -By default, Console uses Keycloak to provide authentication. -You can use a different OIDC provider by following the steps described in the [OIDC connection guide](/self-managed/setup/guides/connect-to-an-oidc-provider.md). - -## Monitoring - -To help understand how Console operates, we expose the following endpoints by default: - -| Endpoint | Port | Path | -| ------------------------------------------------ | ------ | ------------------- | -| Metrics endpoint with default Prometheus metrics | `9100` | `/prometheus` | -| Readiness probe | `9100` | `/health/readiness` | -| Liveness probe | `9100` | `/health/liveness` | - -## Troubleshooting - -| Problem | Solution | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| Invalid parameter: redirect_uri | Ensure the correct redirect URL is configured for the application Console in Identity. The redirect URL must match the Console URL. | -| JWKS for authentication is not reachable | To verify a user's access token the JWKS needs to be reachable. Make sure the environment variable `KEYCLOAK_INTERNAL_BASE_URL` is set correctly. | -| Console shows error 401 | Make sure the logged-in user has the role `Console` assigned in the Identity service. | diff --git a/docs/self-managed/console-deployment/configuration/configuration.md b/docs/self-managed/console-deployment/configuration/configuration.md new file mode 100644 index 00000000000..1c09cea48e1 --- /dev/null +++ b/docs/self-managed/console-deployment/configuration/configuration.md @@ -0,0 +1,207 @@ +--- +id: configuration +title: "Console configuration" +sidebar_label: "Overview" +description: "Read details on the configuration variables of Console Self-Managed." +--- + +Console Self-Managed can be configured using environment variables and configuration parameters. + +:::note +Underscores in environment variables correspond to configuration file key levels. +::: + +## Environment variables + +| Environment variable | Description | Example value | +| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------- | +| `KEYCLOAK_BASE_URL` | Base URL for Keycloak | https://example.com/auth | +| `KEYCLOAK_INTERNAL_BASE_URL` | Internal base URL for Keycloak | http://camunda-platform-keycloak:80/auth | +| `KEYCLOAK_REALM` | Realm for Keycloak | camunda-platform | +| `CAMUNDA_IDENTITY_AUDIENCE` | Audience for Console client | console | +| `CAMUNDA_IDENTITY_CLIENT_ID` | Client ID for Console client | console | +| `CAMUNDA_CONSOLE_CONTEXT_PATH` | Context path for Console | console | +| `CAMUNDA_CONSOLE_CUSTOMERID` | Unique identifier of the customer | `customer-id` | +| `CAMUNDA_CONSOLE_INSTALLATIONID` | Unique installation ID of the current customer installation | `installation-id` | +| `CAMUNDA_CONSOLE_TELEMETRY` | Telemetry config for Console Self-Managed: `disabled`, `online`, or `download` | `online` | +| `CAMUNDA_CONSOLE_DISABLE_AUTH` | Disables authentication for Console. With this option, set users don't have to log in to use Console and API requests can be executed without an authorization header.
    By disabling authentication, all `CAMUNDA_IDENTITY` variables won't be used. | `true` | +| `CAMUNDA_LICENSE_KEY` | Your Camunda 8 license key, if your installation requires a license. For Helm installations, license keys can be configured globally in your `values.yaml` file. See the [Helm installation documentation](/self-managed/setup/install.md#configure-license-key) for more details. | N/A | +| `SERVER_SSL_ENABLED` | [optional]
    Whether to enable SSL support.
    Default: `false` | `true` | +| `SERVER_SSL_CERTIFICATE` | [optional]
    Path to a PEM-encoded SSL certificate file. | `file:/full/path/to/certificate.pem` | +| `SERVER_SSL_CERTIFICATE_PRIVATE_KEY` | [optional]
    Path to a PEM-encoded private key file for the SSL certificate. | `file:/full/path/to/key.pem` | +| `SERVER_SSL_PASSPHRASE` | [optional]
    Passphrase for the key. | `passphrase` | +| `MANAGEMENT_SERVER_SSL_ENABLED` | [optional]
    Whether to enable SSL support for the management server routes.
    Default: `false` | `true` | +| `MANAGEMENT_SERVER_SSL_CERTIFICATE` | [optional]
    Path to a PEM-encoded SSL certificate file. | `file:/full/path/to/certificate.pem` | +| `MANAGEMENT_SERVER_SSL_CERTIFICATE_PRIVATE_KEY` | [optional]
    Path to a PEM-encoded private key file for the SSL certificate. | `file:/full/path/to/key.pem` | +| `MANAGEMENT_SERVER_SSL_PASSPHRASE` | [optional]
    Passphrase for the key. | `passphrase` | + +Console environment variables could be set in Helm via the `console.env` key. For more details, check [Console Helm values](https://artifacthub.io/packages/helm/camunda/camunda-platform#console-parameters). + +:::note +Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on Console startup or functionality. To obtain a license, visit the [Camunda Enterprise page](https://camunda.com/platform/camunda-platform-enterprise-contact/). +::: + +## Telemetry + +You can enable telemetry and usage collection to help us improve our product by sending several telemetry metrics to Camunda. The information we collect will contribute to continuous product enhancement and help us understand how Camunda is used. We do not collect sensitive information and limit data points to several metrics. For more information, you can download collected data set metrics from the telemetry page at anytime. + +By enabling data collection and reporting, you can get a new page to introspect Camunda 8 component metrics. Usually accessible via monitoring tools like Prometheus, you can now access these metrics directly in Console. By default, telemetry collection is disabled and no data is collected. +When `CAMUNDA_CONSOLE_TELEMETRY` env var or `telemetry` parameter is set to `online`, the telemetry feature is activated and the collected data is sent once every 24 hours via HTTPS. +When `CAMUNDA_CONSOLE_TELEMETRY` env var or `telemetry` parameter is set to `download`, the telemetry feature is activated. Data collected **will not** be sent to Camuda automatically, but could be downloaded from Console and shared with us on request. + +To enable usage collection, configure the parameters described in the next section. + +## Configuration parameters + +To enable telemetry, the following parameters need to be configured. Camunda will provide you with the customer ID (Camunda Docker username) needed to send telemetry data to Camunda. + +| Parameter | Description | Example value | +| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- | +| `customerId` | Unique identifier of the customer. This is also a Camunda Docker registry username. | `customername` | +| `installationId` | Unique installation ID of the current customer installation. | `my-deployment` | +| `telemetry` | Telemetry config for Console Self-Managed: `disabled`, `online`, or `download`. | `online` | +| `managed.releases.tags` | Assign cluster tags to indicate what type of cluster it is. Default tags are `dev`, `stage`, `test`, or `prod`, but users can assign any custom tag. | `- dev` (list of strings) | +| `managed.releases.custom-properties` | List of custom properties users can add to their cluster with custom descriptions and custom links on the cluster details page. | See custom properties section | + +Console environment variables could be set in Helm. For more details, check [Console Helm values](https://artifacthub.io/packages/helm/camunda/camunda-platform#console-parameters). +For example: + +```yaml +console: + env: + - name: CAMUNDA_CONSOLE_CUSTOMERID + values: customername + - name: CAMUNDA_CONSOLE_INSTALLATIONID + values: my-deployment + - name: CAMUNDA_CONSOLE_TELEMETRY + value: online +``` + +### Override configuration parameters + +Configuration parameters formerly replaced the complete configuration. Even if you only changed the `customerId`, the complete configuration still had to be added. + +This is no longer the case with the override parameters. A subset of parameters can be set so individual parameters can be adjusted. If a parameter must be changed for a specific cluster, the `name` and `namespace` fields must be set with the exact values so correlations can be made accordingly. + +#### Example + +Given the following configuration provided by Helm: + +```yaml +camunda: + console: + customerId: customer-id + installationId: camunda-platform-id-dev-console-sm-main + telemetry: disabled + managed: + method: plain + releases: + - name: camunda-platform + namespace: camunda-platform-namespace + version: 9.1.2 + components: + - name: Console + id: console + version: ... + url: https://... + readiness: https://... + metrics: https://... + - name: Keycloak + id: keycloak + version: ... + url: https://... + - name: Identity + id: identity + version: ... + url: https://... + readiness: https://... + metrics: https://... + - name: WebModeler WebApp + id: webModelerWebApp + version: ... + url: https://... + - name: Zeebe Gateway + id: zeebeGateway + version: ... + urls: + grpc: grpc://... + http: https://... + readiness: https://... + metrics: https://... + - name: Zeebe + id: zeebe + version: ... +``` + +The following example of an `overrideConfiguration` changes the `customerId` and adds `tags` and `custom-properties` for the cluster with name `camunda-platform` in namespace `camunda-platform-namespace`: + +```yaml +console: + overrideConfiguration: + camunda: + console: + customerId: "new-customer-id" + managed: + releases: + - name: camunda-platform + namespace: camunda-platform-namespace + tags: + - production + custom-properties: + - description: "This is a custom description of the cluster." + links: + - name: "Camunda" + url: "https://camunda.com" + - name: "Camunda Docs" + url: "https://docs.camunda.io" + - name: "Grafana" + url: "https://..." +``` + +### Custom properties + +Custom properties are useful to add custom information to the **Cluster details** page in Console. A custom property contains a description and multiple links. + +The following example shows one custom property for a cluster: + +```yaml +console: + overrideConfiguration: + camunda: + console: + customerId: "new-customer-id" + managed: + releases: + - name: camunda-platform + namespace: camunda-platform + custom-properties: + - description: "Useful links to Camunda resources." + links: + - name: "Camunda Blog" + url: "https://camunda.com/blog/" + - name: "Camunda Docs" + url: "https://docs.camunda.io" +``` + +## Using a different OpenID Connect (OIDC) authentication provider than Keycloak + +By default, Console uses Keycloak to provide authentication. +You can use a different OIDC provider by following the steps described in the [OIDC connection guide](/self-managed/setup/guides/connect-to-an-oidc-provider.md). + +## Monitoring + +To help understand how Console operates, we expose the following endpoints by default: + +| Endpoint | Port | Path | +| ------------------------------------------------ | ------ | ------------------- | +| Metrics endpoint with default Prometheus metrics | `9100` | `/prometheus` | +| Readiness probe | `9100` | `/health/readiness` | +| Liveness probe | `9100` | `/health/liveness` | + +## Troubleshooting + +| Problem | Solution | +| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Invalid parameter: redirect_uri | Ensure the correct redirect URL is configured for the application Console in Identity. The redirect URL must match the Console URL. | +| JWKS for authentication is not reachable | To verify a user's access token the JWKS needs to be reachable. Make sure the environment variable `KEYCLOAK_INTERNAL_BASE_URL` is set correctly. | +| Console shows error 401 | Make sure the logged-in user has the role `Console` assigned in the Identity service. | diff --git a/docs/self-managed/console-deployment/configuration/ssl.md b/docs/self-managed/console-deployment/configuration/ssl.md new file mode 100644 index 00000000000..e74aa635e3f --- /dev/null +++ b/docs/self-managed/console-deployment/configuration/ssl.md @@ -0,0 +1,53 @@ +--- +id: ssl +title: "Console SSL configuration" +sidebar_label: "SSL" +description: "Read details on additional SSL configuration for Console." +--- + +By default, communication between Console, Identity, and other components is not encrypted, as it usually occurs backend-to-backend within the same [Docker](/self-managed/setup/deploy/other/docker.md) network or [Kubernetes](/self-managed/setup/install.md) cluster. + +TLS-encrypted communication can be enabled by following the steps below (for example, if backend-to-backend communication is not possible in a custom Camunda 8 installation setup). + +## Configure Console for secure connections + +Console can be configured using [environment variables](/self-managed/console-deployment/configuration/configuration.md#environment-variables) to enable secure connections to both Console and Identity. + +### Configure the Identity base URL + +Set the base URL (starting with `https://`) of your Identity instance using the following properties: + +| Environment variable | Example value | +| ---------------------------- | ------------------------------ | +| `KEYCLOAK_BASE_URL` | `https://identity.example.com` | +| `KEYCLOAK_INTERNAL_BASE_URL` | `https://identity.example.com` | + +### Configure SSL certificate + +Enable and configure SSL by setting the following properties: + +| Environment variable | Description | Example value | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------ | +| `SERVER_SSL_ENABLED` | To enable SSL, set to `true`. | `true` | +| `SERVER_SSL_CERTIFICATE` | The path to a PEM-encoded SSL certificate file. Ensure the provided path is accessible from the container (for example, via a mounted volume). | `file:/full/path/to/certificate.pem` | +| `SERVER_SSL_CERTIFICATE_PRIVATE_KEY` | The path to a PEM-encoded private key file for the SSL certificate. Ensure the provided path is accessible from the container (for example, via a mounted volume). | `file:/full/path/to/key.pem` | +| `SERVER_SSL_PASSPHRASE` | _Optional_ A passphrase for the private key. | `passphrase` | + +SSL can be configured separately for the management routes using the `MANAGEMENT_` properties: + +| Environment variable | Description | Example value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------ | +| `MANAGEMENT_SERVER_SSL_ENABLED` | To enable SSL, set to `true`. | `true` | +| `MANAGEMENT_SERVER_SSL_CERTIFICATE` | The path to a PEM-encoded SSL certificate file. Ensure the provided path is accessible from the container (for example, via a mounted volume). | `file:/full/path/to/certificate.pem` | +| `MANAGEMENT_SERVER_SSL_CERTIFICATE_PRIVATE_KEY` | The path to a PEM-encoded private key file for the SSL certificate. Ensure the provided path is accessible from the container (for example, via a mounted volume). | `file:/full/path/to/key.pem` | +| `MANAGEMENT_SERVER_SSL_PASSPHRASE` | _Optional_ A passphrase for the private key. | `passphrase` | + +## (Optional) Provide a custom certificate + +If you are using a custom (self-signed) TLS certificate in Console or Identity, configure Console to accept the certificate. + +Provide the path to the certificate file via the environment variable `NODE_EXTRA_CA_CERTS`: + +| Environment variable | Description | Example value | +| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `NODE_EXTRA_CA_CERTS` | The path to your self-signed TLS certificate. Ensure the provided path is accessible from the container (for example, via a mounted volume). | `/path/to/certificate.crt` | diff --git a/docs/self-managed/console-deployment/overview.md b/docs/self-managed/console-deployment/overview.md index c8a37467b5b..825e5fc5787 100644 --- a/docs/self-managed/console-deployment/overview.md +++ b/docs/self-managed/console-deployment/overview.md @@ -2,9 +2,9 @@ id: overview title: "Console (Self-Managed)" sidebar_label: "Overview" -description: "Console Self-Managed provides key insights into automation cluster deployments, process orchestration usage, and streamlining usage tracking." +description: "Console Self-Managed provides key insights into orchestration cluster deployments, process orchestration usage, and streamlining usage tracking." --- -Camunda Console Self-Managed offers a centralized overview for Camunda 8 installations, designed to enhance operational efficiency and oversight within Enterprise environments. Console Self-Managed provides key insights into automation cluster deployments, process orchestration usage, and streamlining usage tracking. +Camunda Console Self-Managed offers a centralized overview for Camunda 8 installations, designed to enhance operational efficiency and oversight within Enterprise environments. Console Self-Managed provides key insights into orchestration cluster deployments, process orchestration usage, and streamlining usage tracking. Camunda Console Self-Managed provided is available as a container image. Refer to the [installation guide](/self-managed/setup/overview.md) for details on how to install this component. diff --git a/docs/self-managed/console-deployment/telemetry.md b/docs/self-managed/console-deployment/telemetry.md index 08b19f22140..fda37c67c13 100644 --- a/docs/self-managed/console-deployment/telemetry.md +++ b/docs/self-managed/console-deployment/telemetry.md @@ -24,7 +24,7 @@ The telemetry feature categorizes the collected data into general Camunda data a ### How to enable telemetry -By default, the telemetry configuration is set to `disabled`. Telemetry can be activated by setting the appropriate configuration in the Camunda 8 Helm chart or setting the appropriate environment variables in the Console configuration. Refer to [Console configuration](./configuration.md) for more information. +By default, the telemetry configuration is set to `disabled`. Telemetry can be activated by setting the appropriate configuration in the Camunda 8 Helm chart or setting the appropriate environment variables in the Console configuration. Refer to [Console configuration](./configuration/configuration.md) for more information. ### Telemetry configuration options explained diff --git a/docs/self-managed/identity/deployment/configuration-variables.md b/docs/self-managed/identity/deployment/configuration-variables.md index 2f37d61ef18..09066025e1f 100644 --- a/docs/self-managed/identity/deployment/configuration-variables.md +++ b/docs/self-managed/identity/deployment/configuration-variables.md @@ -35,10 +35,16 @@ import Licensing from '../../../self-managed/react-components/licensing.md' ## OIDC configuration -| Evnironment variable | Description | Default value | -| ---------------------------- | --------------------------------------------------- | ------------- | -| IDENTITY_INITIAL_CLAIM_NAME | The name of the claim to use for the initial user. | oid | -| IDENTITY_INITIAL_CLAIM_VALUE | The value of the claim to use for the initial user. | | +Claims are name/value pairs used to represent an individual identity. Configure your initial claim and value to match the claim used with your OIDC provider. For example, to use your Microsoft Entra unique account ID, set `IDENTITY_INITIAL_CLAIM_NAME` to `oid`, and `IDENTITY_INITIAL_CLAIM_VALUE` to the ID. + +:::note +Once set, you cannot update your initial claim name and value using environment or Helm values. You must change these values directly in the database. +::: + +| Environment variable | Description | Default value | +| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `IDENTITY_INITIAL_CLAIM_NAME` | The type of claim to use for the initial user. Examples can include `oid`, `name` or `email`. | `oid` | +| `IDENTITY_INITIAL_CLAIM_VALUE` | The value of the claim to use for the initial user. For the default `oid`, the value usually corresponds to the unique ID of your user account. | | ## Component configuration diff --git a/docs/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md b/docs/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md index 835be739ecc..1ccd5bc75c1 100644 --- a/docs/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md +++ b/docs/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md @@ -5,12 +5,15 @@ sidebar_label: "Connect to an existing Keycloak instance" description: "Learn how to connect Identity to your existing Keycloak instance." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + In this guide, we'll demonstrate how to connect Identity to your existing Keycloak instance. ## Prerequisites -- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/23.0.1/server_admin/#using-the-admin-console) -- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/23.0.1/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak. +- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/latest/server_admin/#using-the-admin-console) +- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak :::note Clients in Camunda 8 SaaS and applications in Camunda 8 Self-Managed provide a similar purpose. One key difference is that for Camunda 8 SaaS, you can set up specific [client connection credentials](/guides/setup-client-connection-credentials.md), whereas in Identity, an application is created with credentials automatically assigned. @@ -24,7 +27,15 @@ As of the 8.5.3 release, Identity uses the Keycloak frontend URL instead of the To avoid connectivity issues, ensure your Keycloak frontend URL is accessible by adjusting your network, firewall, or security settings as needed. This adjustment is crucial to maintain the integration with Keycloak and ensure compatibility. ::: -To connect Identity to an existing Keycloak instance, take the following steps: +To connect Identity to an existing Keycloak instance, take the following steps for your Camunda installation: + + + + 1. Log in to your Keycloak Admin Console. 2. Select the realm you would like to connect Identity to. In our example, this is **camunda-platform**. @@ -56,6 +67,18 @@ To connect Identity to an existing Keycloak instance, take the following steps: ::: 13. Start Identity. + + + +1. Log in to your Keycloak Admin Console. +2. Verify the name of the realm you would like to connect Identity to. In our example, this is **camunda-platform**. + ![keycloak-admin-realm-select](../img/keycloak-admin-realm-select.png) +3. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +4. Start Identity. + + + + :::note What does Identity create when starting? Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/self-managed/identity/deployment/starting-configuration.md). ::: diff --git a/docs/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md b/docs/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md index f39ada6f300..2f6d9bd6f54 100644 --- a/docs/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md +++ b/docs/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md @@ -4,46 +4,69 @@ title: Deploy diagram description: "Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed. Follow the steps below to deploy a diagram: 1. Click the rocket-shaped deployment icon: -![deployment icon](./img/deploy-icon.png) + ![deployment icon](./img/deploy-icon.png) 2. Click **Camunda 8 Self-Managed**: -![deployment configuration](./img/deploy-empty.png) + ![deployment configuration](./img/deploy-empty.png) 3. Input the `Cluster endpoint`: -:::note -You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. + :::note + You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. + + Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). + ::: + + :::caution + + Multi-tenancy is only available with authentication enabled [through Identity](/self-managed/identity/what-is-identity.md), and [enabled in all required components](/self-managed/concepts/multi-tenancy.md). + + ::: + + ![deployment via Camunda 8](./img/deploy-endpoint.png) + +4. Select your authentication method, and input the required credentials: -Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). -::: + -:::caution + -Multi-tenancy is only available with authentication enabled [through Identity](/self-managed/identity/what-is-identity.md), and [enabled in all required components](/self-managed/concepts/multi-tenancy.md). + For **basic authentication**, input your username and password: -::: + ![basic auth configuration](./img/deploy-with-basic-auth.png) -![deployment via Camunda 8](./img/deploy-endpoint.png) + -4. Select **Basic**, and input your username and password in case your gateway requires basic authentication: + -![basic auth configuration](./img/deploy-with-basic-auth.png) + For **OAuth**, input the credentials for your OAuth provider. These are configured as part of the default [Helm installation](/self-managed/setup/install.md) and can be discovered in [Identity](/self-managed/identity/what-is-identity.md), or are set by Zeebe [environment variables](/self-managed/zeebe-deployment/security/client-authorization.md#environment-variables). -5. Select **OAuth**, and input the credentials in case your gateway requires authentication with OAuth: + ![oauth configuration](./img/deploy-with-oauth.png) -:::note -The OAuth URL needs to contain the full path to the token endpoint, i.e. `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. -::: + | Name | Description | Example value | + | --------------- | ------------------------------------ | ----------------------------------------------------------------------------------------- | + | Client ID | The name of your Zeebe client. | `zeebe` | + | Client secret | The password of your Zeebe client. | `zecret` | + | OAuth token url | The full path to the token endpoint. | `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. | + | OAuth audience | The permission name for Zeebe. | `zeebe-api` | -![oauth configuration](./img/deploy-with-oauth.png) + + -6. Select the **Remember** checkbox if you want to locally store the connection information. +5. Select the **Remember** checkbox if you want to locally store the connection information. -7. Click **Deploy** to perform the deployment. +6. Click **Deploy** to perform the deployment. ![deployment successful](./img/deploy-success.png) diff --git a/docs/self-managed/modeler/desktop-modeler/img/deploy-empty.png b/docs/self-managed/modeler/desktop-modeler/img/deploy-empty.png index 319eb95bc07..ec6fb799074 100644 Binary files a/docs/self-managed/modeler/desktop-modeler/img/deploy-empty.png and b/docs/self-managed/modeler/desktop-modeler/img/deploy-empty.png differ diff --git a/docs/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png b/docs/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png index 08b9bfc6223..aad956e1866 100644 Binary files a/docs/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png and b/docs/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png differ diff --git a/docs/self-managed/modeler/desktop-modeler/img/deploy-icon.png b/docs/self-managed/modeler/desktop-modeler/img/deploy-icon.png index f322ad01657..d7d6c03f478 100644 Binary files a/docs/self-managed/modeler/desktop-modeler/img/deploy-icon.png and b/docs/self-managed/modeler/desktop-modeler/img/deploy-icon.png differ diff --git a/docs/self-managed/modeler/desktop-modeler/img/deploy-success.png b/docs/self-managed/modeler/desktop-modeler/img/deploy-success.png index e3384864cf7..20ead7045d8 100644 Binary files a/docs/self-managed/modeler/desktop-modeler/img/deploy-success.png and b/docs/self-managed/modeler/desktop-modeler/img/deploy-success.png differ diff --git a/docs/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png b/docs/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png index 9a765d21543..c4d7679bda1 100644 Binary files a/docs/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png and b/docs/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png differ diff --git a/docs/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png b/docs/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png index e34977a5b16..a103da93ff5 100644 Binary files a/docs/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png and b/docs/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png differ diff --git a/docs/self-managed/modeler/web-modeler/configuration/configuration.md b/docs/self-managed/modeler/web-modeler/configuration/configuration.md index 9b989e42c79..5585791bf52 100644 --- a/docs/self-managed/modeler/web-modeler/configuration/configuration.md +++ b/docs/self-managed/modeler/web-modeler/configuration/configuration.md @@ -20,12 +20,15 @@ import Licensing from '../../../../self-managed/react-components/licensing.md' ### Clusters -Clusters configured using the following options can be selected when deploying from Web Modeler. If no clusters are configured, your cluster information can be provided at the time of the deployment. The Camunda 8 [Docker Compose distribution](/self-managed/setup/deploy/local/docker-compose.md) provides a local Zeebe cluster configured by default. +Clusters must be configured using the following options to access the cluster from within Web Modeler. If no clusters are configured, you will not be able to perform any actions that require a cluster (for example, deploy, start an instance, or Play a process). + +The Camunda 8 [Helm](/self-managed/setup/install.md) and [Docker Compose](/self-managed/setup/deploy/local/docker-compose.md) distributions provide a local Zeebe cluster configured by default. To add additional clusters, increment the `0` value for each variable (`CAMUNDA_MODELER_CLUSTERS_1_NAME`). | Environment variable | Description | Example value | | ---------------------------------------------------- | -------------------------------------------------------------- | ----------------------------------- | +| `CAMUNDA_MODELER_CLUSTERS_0_ID` | A unique identifier to use for your cluster. | `test-cluster-1` | | `CAMUNDA_MODELER_CLUSTERS_0_NAME` | The name of your cluster. | `test cluster 1` | | `CAMUNDA_MODELER_CLUSTERS_0_VERSION` | The Camunda version used by this cluster. | `8.6.0` | | `CAMUNDA_MODELER_CLUSTERS_0_AUTHENTICATION` | The authentication to use with your cluster. | `OAUTH`, `NONE` | @@ -83,11 +86,13 @@ The `restapi` component sends certain events (e.g. "file updated", "comment adde Web Modeler integrates with Identity and Keycloak for authentication and authorization (using OAuth 2.0 + OpenID Connect) as well as user management. -| Environment variable | Description | Example value | -| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | -| `CAMUNDA_IDENTITY_BASEURL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | -| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | -| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | +| Environment variable | Description | Example value | +| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `CAMUNDA_IDENTITY_BASEURL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | +| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI` | [optional] URL of the JWK Set endpoint (used for JWT validation). Only necessary if URL cannot be derived from the OIDC configuration endpoint. | `https://keycloak.example.com/auth/realms/camunda-platform/protocol/openid-connect/certs` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWS_ALGORITHMS` | [optional] List of trusted JWS algorithms used for JWT validation. Only necessary if the algorithms cannot be derived from the JWK Set response. | `ES256` | Refer to the [advanced Identity configuration guide](./identity.md) for additional details on how to connect a custom OpenID Connect (OIDC) authentication provider. @@ -140,6 +145,20 @@ Refer to the [advanced SSL configuration guide](./ssl.md) for additional details | `RESTAPI_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the regular API endpoints. | `8081` | `8081` | | `RESTAPI_MANAGEMENT_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the management API endpoints. | `8091` | `8091` | +### Proxy + +These settings are useful when the application needs to make outgoing network requests in environments that require traffic to pass through a proxy server. + +| Environment variable | Description | Example value | Default value | +| -------------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------- | ------------- | +| `http_proxy` | Specifies the proxy server to be used for outgoing HTTP requests. | `http://proxy.example.com:8080` | - | +| `https_proxy` | Specifies the proxy server to be used for outgoing HTTPS requests. | `https://secureproxy.example.com:443` | - | +| `no_proxy` | A comma-separated list of domain names or IP addresses for which the proxy should be bypassed. | `localhost,127.0.0.1,.example.com` | - | + +:::note +The proxy-related environment variables are lowercase because they follow a widely accepted convention used in many system environments and tools. +::: + ### Feature Flags | Environment variable | Description | Example value | Default value | @@ -181,10 +200,13 @@ The `webapp` component sends certain events (e.g. "user opened diagram", "user l ### Logging -| Environment variable | Description | Example value | -| -------------------- | -------------------------------------- | ---------------------------- | -| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| Environment variable | Description | Example value | +| -------------------- | ----------------------------------------------- | ---------------------------- | +| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| `LOG_LEVEL_CLIENT` | [optional]
    Log level for the client | `DEBUG` | +| `LOG_LEVEL_WEBAPP` | [optional]
    Log level for the Node.js server | `DEBUG` | +The `LOG_LEVEL_*` options can be found [here](../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-webapp-component) for additional details on how to customize the `webapp` logging output. ### SSL diff --git a/docs/self-managed/modeler/web-modeler/configuration/logging.md b/docs/self-managed/modeler/web-modeler/configuration/logging.md index 307a2ea773b..ab86be8bc30 100644 --- a/docs/self-managed/modeler/web-modeler/configuration/logging.md +++ b/docs/self-managed/modeler/web-modeler/configuration/logging.md @@ -38,6 +38,16 @@ To enable additional log output to a file, adjust the following environment vari LOG_FILE_PATH=/full/path/to/log/file.log ``` +### Configuring log levels + +To control the verbosity of the logs, adjust the environment variables `LOG_LEVEL_CLIENT` (browser client) and `LOG_LEVEL_WEBAPP` (Node.js server). + +```properties +LOG_LEVEL_CLIENT=DEBUG +``` + +The `LOG_LEVEL_*` options can be found [here](../../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). + ## Logging configuration for the `websocket` component By default, the `websocket` component logs to the Docker container's standard output. diff --git a/docs/self-managed/modeler/web-modeler/installation.md b/docs/self-managed/modeler/web-modeler/installation.md index 9cc2ac07094..4162f8482f7 100644 --- a/docs/self-managed/modeler/web-modeler/installation.md +++ b/docs/self-managed/modeler/web-modeler/installation.md @@ -4,4 +4,4 @@ title: Installation description: "Details on installation of Web Modeler Self-Managed." --- -Refer to the [installation guide](/self-managed/setup/overview.md) for details on how to install Web Modeler, and the [contact page](/contact) for guidance on obtaining Camunda 8 credentials. +Refer to the [installation guide](/self-managed/setup/overview.md) for details on how to install Web Modeler, and the [contact page](/reference/contact.md) for guidance on obtaining Camunda 8 credentials. diff --git a/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md b/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md new file mode 100644 index 00000000000..6059d9bed8c --- /dev/null +++ b/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md @@ -0,0 +1,36 @@ +--- +id: troubleshoot-proxy-configuration +title: "Troubleshoot proxy configuration issues" +sidebar_label: "Proxy configuration" +description: "Troubleshooting guide for issues caused by incorrect proxy configuration in Web Modeler." +--- + +Troubleshoot and resolve issues in Web Modeler caused by incorrect or incomplete proxy configuration. + +## Issue + +Users experience a variety of failures when Web Modeler attempts to communicate with external services. These issues can manifest as: + +- Failed authentication due to the inability to access the JWKS (JSON Web Key Set) endpoint. Error message: "Expected 200 OK from the JSON Web Key Set HTTP response." +- Failure to reach other external services, such as the Camunda Marketplace. + +## Cause + +Proxy settings must be correctly configured for Web Modeler to route outgoing requests through a network proxy. Common issues occur when: + +- The proxy server is not properly configured or unreachable. +- Requests to external services are being blocked by the proxy configuration. +- Authentication requests, such as those to the OIDC provider, fail when the JWKS endpoint is unreachable via the proxy. + +## Resolution + +Ensure correct proxy configuration for both `webapp` and `restapi` components. + +- For the `webapp` component, proxy configuration is handled via the environment variables `http_proxy`, `https_proxy` and `no_proxy`. + ```properties + http_proxy=http://proxy.example.com:8080 https_proxy=https://secureproxy.example.com:443 no_proxy=localhost,127.0.0.1,.example.com + ``` +- For the `restapi` component, the proxy configuration is handled via JVM settings passed as the value of the environment variable `JAVA_OPTS`. + ```properties + JAVA_OPTS=-Dhttp.proxyHost= -Dhttps.proxyPort= + ``` diff --git a/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md b/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md index a06f1b6898b..0e6313d149c 100644 --- a/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md +++ b/docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md @@ -21,7 +21,7 @@ Depending on your infrastructure, the default timeouts configured may be too sho You can pass custom timeouts in milliseconds for Web Modeler's Zeebe client to `modeler-restapi` via three individual environment variables: ```shell -ZEEBE_CLIENT_REQUESTTIMEOUT=30000 # limit the time to wait for a response from the Zeebe gateway +ZEEBE_CLIENT_REQUESTTIMEOUT=30000 # limit the time to wait for a response from the Zeebe Gateway ZEEBE_AUTH_CONNECT_TIMEOUT=60000 # limit the time to wait for a connection to the OAuth server ZEEBE_AUTH_READ_TIMEOUT=60000 # limits the time to wait for a response from the OAuth server ``` diff --git a/docs/self-managed/operate-deployment/importer-and-archiver.md b/docs/self-managed/operate-deployment/importer-and-archiver.md index 04cad62c780..17a48be690f 100644 --- a/docs/self-managed/operate-deployment/importer-and-archiver.md +++ b/docs/self-managed/operate-deployment/importer-and-archiver.md @@ -32,7 +32,7 @@ Each single importer/archiver node must be configured using the following config | ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------- | | camunda.operate.clusterNode.partitionIds | Array of Zeebe partition ids this Importer (or Archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. | | camunda.operate.clusterNode.nodeCount | Total amount of Importer (or Archiver) nodes in the cluster. | 1 | -| camunda.operate.clusterNode.currentNodeId | Id of current Importer (or Archiver) node, starting from 0. | 0 | +| camunda.operate.clusterNode.currentNodeId | ID of current Importer (or Archiver) node, starting from 0. | 0 | It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/docs/self-managed/operate-deployment/operate-configuration.md b/docs/self-managed/operate-deployment/operate-configuration.md index 59bde307f9b..ef22cf4c196 100644 --- a/docs/self-managed/operate-deployment/operate-configuration.md +++ b/docs/self-managed/operate-deployment/operate-configuration.md @@ -73,7 +73,7 @@ in terms of tenant assignment, Operate - Zeebe connection must be secured. Check ### Troubleshooting multi-tenancy in Operate -If users can view data from the `` tenant only and no data from other tenants (and you have not [configured multi-tenancy using Helm](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#global-parameters)), multi-tenancy is not enabled in Operate. Refer to the [configuration instructions above](#multi-tenancy). +If users can view data from the `` tenant only and no data from other tenants (and you have not [configured multi-tenancy using Helm](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.6#global-parameters)), multi-tenancy is not enabled in Operate. Refer to the [configuration instructions above](#multi-tenancy). If multi-tenancy is enabled in Operate but disabled in [Identity](/self-managed/identity/what-is-identity.md), users will not have any tenant authorizations in Operate and will not be able to access the data of any tenants in Operate. @@ -209,9 +209,9 @@ camunda.operate: selfSigned: true ``` -## Zeebe broker connection +## Zeebe Broker connection -Operate needs a connection to the Zeebe broker to start the import and execute user operations. +Operate needs a connection to the Zeebe Broker to start the import and execute user operations. ### Settings to connect diff --git a/docs/self-managed/operational-guides/application-configs.md b/docs/self-managed/operational-guides/application-configs.md index 47bfcee3638..b021fb22c0c 100644 --- a/docs/self-managed/operational-guides/application-configs.md +++ b/docs/self-managed/operational-guides/application-configs.md @@ -63,20 +63,20 @@ operate: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 numberOfShards: 3 # Zeebe instance zeebe: # Broker contact point - brokerContactPoint: "cpt-zeebe-gateway:26500" + brokerContactPoint: "-zeebe-gateway:26500" # ELS instance to export Zeebe data to zeebeElasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Index prefix, configured in Zeebe Elasticsearch exporter @@ -122,37 +122,34 @@ operate: ## Default properties set by the helm chart -Before you supply a configuration, it's helpful to know what the default configuration is so you can start from a working configuration and then update the values you want: +The `helm template` command generates the application's default configuration, allowing you to only update the values required by your setup. Use the following command to generate the default configuration, substituting in the name of your release: ```bash -helm template \ +helm template \ -f values.yaml \ camunda/camunda-platform \ --show-only templates/operate/configmap.yaml ``` -`--show-only` will allow you to print out the `configmap` to the console: +The `--show-only` flag prints out the `configmap` to the console: ```yaml # Source: camunda-platform/templates/operate/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: cpt-operate + name: -operate-configuration labels: app: camunda-platform app.kubernetes.io/name: camunda-platform - app.kubernetes.io/instance: cpt + app.kubernetes.io/instance: app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: camunda-platform - helm.sh/chart: camunda-platform-9.3.1 - app.kubernetes.io/version: "8.4.5" + helm.sh/chart: camunda-platform-10.3.2 app.kubernetes.io/component: operate + app.kubernetes.io/version: "8.5.5" data: application.yml: | - server: - servlet: - context-path: "/operate" spring: profiles: active: "identity-auth" @@ -160,41 +157,46 @@ data: oauth2: resourceserver: jwt: - issuer-uri: "http://cpt-keycloak:80/auth/realms/camunda-platform" - jwk-set-uri: "http://cpt-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/certs" + issuer-uri: "http://-keycloak:80/auth/realms/camunda-platform" + jwk-set-uri: "http://-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/certs" camunda: identity: clientId: "operate" audience: "operate-api" + baseUrl: "http://-identity:80" # Operate configuration file camunda.operate: identity: - redirectRootUrl: "https://dev.jlscode.com" + redirectRootUrl: "http://localhost:8081" # ELS instance to store Operate data elasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 - # Zeebe instance - zeebe: - # Broker contact point - brokerContactPoint: "cpt-zeebe-gateway:26500" + # Elasticsearch full url + url: "http://-elasticsearch:9200" # ELS instance to export Zeebe data to zeebeElasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Index prefix, configured in Zeebe Elasticsearch exporter prefix: zeebe-record + # Elasticsearch full url + url: "http://-elasticsearch:9200" + # Zeebe instance + zeebe: + # Broker contact point + brokerContactPoint: "-zeebe-gateway:26500" logging: level: ROOT: INFO @@ -207,14 +209,14 @@ Then, take the contents under `application.yml` and put it under the `operate.co ## Where to search for configuration options -- [Zeebe Broker](docs/self-managed/zeebe-deployment/configuration/broker.md) -- [Zeebe Gateway](docs/self-managed/zeebe-deployment/configuration/gateway.md) -- [Operate](docs/self-managed/operate-deployment/operate-configuration.md) -- [Tasklist](docs/self-managed/tasklist-deployment/tasklist-configuration.md) -- [Web Modeler](docs/self-managed/modeler/web-modeler/configuration/configuration.md) -- [Console](docs/self-managed/console-deployment/configuration.md) -- [Connectors](docs/self-managed/connectors-deployment/connectors-configuration.md) -- [Identity](docs/self-managed/identity/deployment/configuration-variables.md) +- [Zeebe Broker](/self-managed/zeebe-deployment/configuration/broker.md) +- [Zeebe Gateway](/self-managed/zeebe-deployment/configuration/gateway.md) +- [Operate](/self-managed/operate-deployment/operate-configuration.md) +- [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md) +- [Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md) +- [Console](/self-managed/console-deployment/configuration/configuration.md) +- [Connectors](/self-managed/connectors-deployment/connectors-configuration.md) +- [Identity](/self-managed/identity/deployment/configuration-variables.md) - [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration) ## Limitations diff --git a/docs/self-managed/operational-guides/backup-restore/backup-and-restore.md b/docs/self-managed/operational-guides/backup-restore/backup-and-restore.md index 045a13485d0..c5d68a21004 100644 --- a/docs/self-managed/operational-guides/backup-restore/backup-and-restore.md +++ b/docs/self-managed/operational-guides/backup-restore/backup-and-restore.md @@ -24,7 +24,7 @@ Zeebe stores its backup to an external storage and must be configured before the ### Backup process -The backup of each component and the backup of a Camunda 8 cluster is identified by an id. This means a backup `x` of Camunda 8 consists of backup `x` of Zeebe, backup `x` of Optimize, backup `x` of Operate, and backup `x` of Tasklist. The backup id must be an integer and greater than the previous backups. +The backup of each component and the backup of a Camunda 8 cluster is identified by an id. This means a backup `x` of Camunda 8 consists of backup `x` of Zeebe, backup `x` of Optimize, backup `x` of Operate, and backup `x` of Tasklist. The backup ID must be an integer and greater than the previous backups. :::note We recommend using the timestamp as the backup id. diff --git a/docs/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md b/docs/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md index bae5f4f7250..eb3d1a568f0 100644 --- a/docs/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md +++ b/docs/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md @@ -5,17 +5,15 @@ description: "How to perform a backup and restore of Operate and Tasklist data." keywords: ["backup", "backups"] --- -:::note -This release introduces breaking changes, including: - -- The [get backup state API and response codes](#get-backup-state-api). -- The utilized URL has changed. For example, `curl 'http://localhost:8080/actuator/backups'` rather than the previously used `backup`. -- `backupId` must be of integer type now instead of string, which is in sync with Zeebe `backupId` requirements. +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +:::warning breaking changes! +As of the Camunda 8.6 release, the `/actuator` endpoints (including `/backups`) now default to port 9600. Ensure your `management.server.port` configuration parameter is correctly set before continuing. ::: Operate stores its data over multiple indices in Elasticsearch. Backup of Operate data includes several -Elasticsearch snapshots containing sets of Operate indices. Each backup is identified by `backupId`. For example, a backup with an id of `123` may contain the following Elasticsearch snapshots: +Elasticsearch snapshots containing sets of Operate indices. Each backup is identified by `backupId`. For example, a backup with an ID of `123` may contain the following Elasticsearch snapshots: ``` camunda_operate_123_8.1.0_part_1_of_6 @@ -29,7 +27,7 @@ camunda_operate_123_8.1.0_part_6_of_6 Operate provides an API to perform a backup and manage backups (list, check state, delete). Restore a backup using the standard Elasticsearch API. :::note -The backup API can be reached via the Actuator management port, which by default is the same as application HTTP port (and in turn defaults to 8080). The port may be reconfigured with the help of `management.server.port` configuration parameter. +The backup API can be reached via the Actuator management port, which by default is the same as application HTTP port (and in turn defaults to 9600). The port may be reconfigured with the help of `management.server.port` configuration parameter. ::: ## Prerequisites @@ -37,33 +35,68 @@ The backup API can be reached via the Actuator management port, which by default Before you can use the backup and restore feature: 1. The [Elasticsearch snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html) must be configured. -2. Operate and Tasklist must be configured with the repository name using the following configuration parameters: +2. Operate and Tasklist must be configured with the repository name using one of the following configuration options: + + + + + +#### Operate ```yaml -for Operate: camunda: operate: backup: repositoryName: +``` + + + + + +#### Operate + +``` +CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= +``` + + + + +#### Tasklist -for Tasklist: + + + + +```yaml camunda: tasklist: backup: repositoryName: ``` -or with environmental variables: + -``` -for Operate: -CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= + -for Tasklist: +``` CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME= - ``` + + + ## Create backup API During backup creation Operate can continue running. To create the backup, call the following endpoint: @@ -79,15 +112,15 @@ Response: | Code | Description | | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 200 OK | Backup was successfully started, snapshots will be created asynchronously. List of snapshots is returned in the response body (see example below). This list must be persisted together with the backup id to be able to restore it later. | -| 400 Bad Request | In case something is wrong with `backupId`, e.g. the same backup id already exists. | +| 200 OK | Backup was successfully started, snapshots will be created asynchronously. List of snapshots is returned in the response body (see example below). This list must be persisted together with the backup ID to be able to restore it later. | +| 400 Bad Request | In case something is wrong with `backupId`, e.g. the same backup ID already exists. | | 500 Server Error | All other errors, e.g. ES returned error response when attempting to create a snapshot. | | 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | Example request: -``` -curl --request POST 'http://localhost:8080/actuator/backups' \ +```shell +curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123 }' ``` @@ -120,14 +153,14 @@ Response: | Code | Description | | ---------------- | --------------------------------------------------------------------------------------- | | 200 OK | Backup state could be determined and is returned in the response body. | -| 404 Not Found | Backup with given id does not exist. | +| 404 Not Found | Backup with given ID does not exist. | | 500 Server Error | All other errors, e.g. ES returned error response when attempting to execute the query. | | 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | For example, the request could look like this: -``` -curl 'http://localhost:8080/actuator/backups/123' +```shell +curl 'http://localhost:9600/actuator/backups/123' ``` Example response: @@ -179,8 +212,8 @@ Response: For example, the request could look like this: -``` -curl 'http://localhost:8080/actuator/backups' +```shell +curl 'http://localhost:9600/actuator/backups' ``` Response will contain JSON with array of objects representing state of each backup (see [get backup state API endpoint](#get-backup-state-api)). @@ -190,7 +223,9 @@ Response will contain JSON with array of objects representing state of each back To delete all the Elasticsearch snapshots associated with the specific backup id, the following endpoint may be used: ``` + DELETE actuator/backups/123 + ``` Response: @@ -214,7 +249,7 @@ To restore the backup with a known backup id, you must restore all the snapshots Example of Elasticsearch query: -``` +```shell curl --request POST `http://localhost:9200/_snapshot/test/camunda_operate_123_8.1.0-snapshot_part_1_of_6/_restore?wait_for_completion=true` ``` diff --git a/docs/self-managed/operational-guides/backup-restore/optimize-backup.md b/docs/self-managed/operational-guides/backup-restore/optimize-backup.md index c694ff53341..2215873da0d 100644 --- a/docs/self-managed/operational-guides/backup-restore/optimize-backup.md +++ b/docs/self-managed/operational-guides/backup-restore/optimize-backup.md @@ -11,21 +11,21 @@ This release introduces breaking changes, including the utilized URL. For example, `curl 'http://localhost:8092/actuator/backups'` rather than the previously used `backup`. ::: -Optimize stores its data over multiple indices in Elasticsearch. To ensure data integrity across indices, a backup of Optimize data consists of two Elasticsearch snapshots, each containing a different set of Optimize indices. Each backup is identified by a positive integer backup ID. For example, a backup with ID `123456` consists of the following Elasticsearch snapshots: +Optimize stores its data over multiple indices in the database. To ensure data integrity across indices, a backup of Optimize data consists of two ElasticSearch/OpenSearch snapshots, each containing a different set of Optimize indices. Each backup is identified by a positive integer backup ID. For example, a backup with ID `123456` consists of the following snapshots: ``` camunda_optimize_123456_3.9.0_part_1_of_2 camunda_optimize_123456_3.9.0_part_2_of_2 ``` -Optimize provides an API to trigger a backup and retrieve information about a given backup's state. During backup creation Optimize can continue running. The backed up data can later be restored using the standard Elasticsearch snapshot restore API. +Optimize provides an API to trigger a backup and retrieve information about a given backup's state. During backup creation Optimize can continue running. The backed up data can later be restored using the standard ElasticSearch/OpenSearch snapshot restore API. ## Prerequisites The following prerequisites must be set up before using the backup API: -1. A snapshot repository of your choice must be registered with Elasticsearch. -2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize configuration: +1. A snapshot repository of your choice must be registered with ElasticSearch/OpenSearch. +2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable, or by adding it to your Optimize [`environment-config.yaml`]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/): ```yaml backup: @@ -48,17 +48,17 @@ POST actuator/backups ### Response -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| 202 Accepted | Backup process was successfully initiated. To determine whether backup process was completed refer to the GET API. | -| 400 Bad Request | Indicates issues with the request, for example when the `backupId` contains invalid characters. | -| 409 Conflict | Indicates that a backup with the same `backupId` already exists. | -| 500 Server Error | All other errors, e.g. issues communicating with Elasticsearch for snapshot creation. Refer to the returned error message for more details. | -| 502 Bad Gateway | Optimize has encountered issues while trying to connect to Elasticsearch. | +| Code | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| 202 Accepted | Backup process was successfully initiated. To determine whether backup process was completed refer to the GET API. | +| 400 Bad Request | Indicates issues with the request, for example when the `backupId` contains invalid characters. | +| 409 Conflict | Indicates that a backup with the same `backupId` already exists. | +| 500 Server Error | All other errors, e.g. issues communicating with the database for snapshot creation. Refer to the returned error message for more details. | +| 502 Bad Gateway | Optimize has encountered issues while trying to connect to the database. | ### Example request -``` +```shell curl --request POST 'http://localhost:8092/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123456 }' @@ -96,13 +96,13 @@ GET actuator/backup | 200 OK | Backup state could be determined and is returned in the response body (see example below). | | 400 Bad Request | There is an issue with the request, for example the repository name specified in the Optimize configuration does not exist. Refer to returned error message for details. | | 404 Not Found | If a backup ID was specified, no backup with that ID exists. | -| 500 Server Error | All other errors, e.g. issues communicating with Elasticsearch for snapshot state retrieval. Refer to the returned error message for more details. | -| 502 Bad Gateway | Optimize has encountered issues while trying to connect to Elasticsearch. | +| 500 Server Error | All other errors, e.g. issues communicating with the database for snapshot state retrieval. Refer to the returned error message for more details. | +| 502 Bad Gateway | Optimize has encountered issues while trying to connect to the database. | ### Example request -``` -curl ---request GET 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request GET 'http://localhost:8092/actuator/backups/123456' ``` ### Example response @@ -135,8 +135,8 @@ Possible states of the backup: - `COMPLETE`: The backup can be used for restoring data. - `IN_PROGRESS`: The backup process for this backup ID is still in progress. -- `FAILED`: Something went wrong when creating this backup. To find out the exact problem, use the [Elasticsearch get snapshot status API](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/get-snapshot-status-api.html) for each of the snapshots included in the given backup. -- `INCOMPATIBLE`: The backup is incompatible with the current Elasticsearch version. +- `FAILED`: Something went wrong when creating this backup. To find out the exact problem, use the [Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-status-api.html) / [OpenSearch](https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot-status/) get snapshot status API for each of the snapshots included in the given backup. +- `INCOMPATIBLE`: The backup is incompatible with the current ElasticSearch/OpenSearch version. - `INCOMPLETE`: The backup is incomplete (this could occur when the backup process was interrupted or individual snapshots were deleted). ## Delete backup API @@ -154,36 +154,36 @@ DELETE actuator/backups/{backupId} | Code | Description | | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 204 No Content | The delete request for the associated snapshots was submitted to Elasticsearch successfully. | +| 204 No Content | The delete request for the associated snapshots was submitted to the database successfully. | | 400 Bad Request | There is an issue with the request, for example the repository name specified in the Optimize configuration does not exist. Refer to returned error message for details. | | 500 Server Error | An error occurred, for example the snapshot repository does not exist. Refer to the returned error message for details. | -| 502 Bad Gateway | Optimize has encountered issues while trying to connect to Elasticsearch. | +| 502 Bad Gateway | Optimize has encountered issues while trying to connect to ElasticSearch/OpenSearch. | ### Example request -``` -curl ---request DELETE 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request DELETE 'http://localhost:8092/actuator/backups/123456' ``` ## Restore backup -There is no Optimize API to perform the backup restore. Instead, the standard [Elasticsearch restore snapshot API](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/restore-snapshot-api.html) can be used. Note that the Optimize versions of your backup snapshots must match the currently running version of Optimize. You can identify the version at which the backup was taken by the version tag included in respective snapshot names; for example, a snapshot with the name`camunda_optimize_123456_3.9.0_part_1_of_2` was taken of Optimize version `3.9.0`. +There is no Optimize API to perform the backup restore. Instead, the standard [Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/restore-snapshot-api.html) / [OpenSearch](https://opensearch.org/docs/latest/api-reference/snapshots/restore-snapshot) restore snapshot API can be used. Note that the Optimize versions of your backup snapshots must match the currently running version of Optimize. You can identify the version at which the backup was taken by the version tag included in respective snapshot names; for example, a snapshot with the name`camunda_optimize_123456_3.9.0_part_1_of_2` was taken of Optimize version `3.9.0`. :::note Optimize must NOT be running while a backup is being restored. ::: -To restore an existing backup, all the snapshots this backup contains (as listed in the response of the [create backup API request](#example-response)) must be restored using the Elasticsearch API. +To restore an existing backup, all the snapshots this backup contains (as listed in the response of the [create backup API request](#example-response)) must be restored using the restore API. To restore a given backup, the following steps must be performed: 1. Stop Optimize. -2. Ensure no Optimize indices are present in Elasticsearch (or the restore process will fail). -3. Iterate over all Elasticsearch snapshots included in the desired backup and restore them using the Elasticsearch restore snapshot API. +2. Ensure no Optimize indices are present in the database (or the restore process will fail). +3. Iterate over all ElasticSearch/OpenSearch snapshots included in the desired backup and restore them using the restore snapshot API mentioned above. 4. Start Optimize. -Example Elasticsearch request: +Example request: -``` +```shell curl --request POST `http://localhost:9200/_snapshot/repository_name/camunda_optimize_123456_3.9.0_part_1_of_2/_restore?wait_for_completion=true` ``` diff --git a/docs/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md b/docs/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md index fc3a1fc1ab6..e59e1691536 100644 --- a/docs/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md +++ b/docs/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md @@ -26,7 +26,7 @@ Even when the underlying storage bucket is the same, backups from one are not co ### S3 backup store -To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket: +To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket. This configuration can be set in your Zeebe [`config/application.yaml`](/docs/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -87,7 +87,7 @@ zeebe.broker.data.backup.s3.compression: zstd # or use environment variable ZEEB The GCS backup strategy utilizes the [Google Cloud Storage REST API](https://cloud.google.com/storage/docs/request-endpoints). ::: -To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use: +To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -101,7 +101,7 @@ zeebe: ``` The bucket specified with `bucketName` **must already exist**, Zeebe will not try to create one for you. -To prevent misconfiguration, Zeebe will check at startup that the specified bucket exists and can be accessed. +To prevent misconfiguration, Zeebe will check at startup that the specified bucket exists and can be accessed, and log at WARN level if the bucket does not exist. Setting a `basePath` is not required but useful if you want to use the same bucket for multiple Zeebe clusters. When `basePath` is set, Zeebe will only create and access objects under this path. @@ -124,7 +124,7 @@ There are multiple [data encryption options](https://cloud.google.com/storage/do ### Azure backup store -To store your backups in Azure Storage, choose the `AZURE` backup store and specify how to connect with the Azure container: +To store your backups in Azure Storage, choose the `AZURE` backup store and specify how to connect with the Azure container. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -170,14 +170,14 @@ POST actuator/backups } ``` -A `backupId` is an integer and must be greater than the id of previous backups that are completed, failed, or deleted. +A `backupId` is an integer and must be greater than the ID of previous backups that are completed, failed, or deleted. Zeebe does not take two backups with the same ids. If a backup fails, a new `backupId` must be provided to trigger a new backup. -The `backupId` cannot be reused, even if the backup corresponding to the backup id is deleted. +The `backupId` cannot be reused, even if the backup corresponding to the backup ID is deleted.
    Example request -``` +```shell curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": "100" }' @@ -191,7 +191,7 @@ curl --request POST 'http://localhost:9600/actuator/backups' \ | ---------------- | ------------------------------------------------------------------------------------------------------------------------ | | 202 Accepted | A Backup has been successfully scheduled. To determine if the backup process was completed, refer to the GET API. | | 400 Bad Request | Indicates issues with the request, for example when the `backupId` is not valid or backup is not enabled on the cluster. | -| 409 Conflict | Indicates a backup with the same `backupId` or a higher id already exists. | +| 409 Conflict | Indicates a backup with the same `backupId` or a higher ID already exists. | | 500 Server Error | All other errors. Refer to the returned error message for more details. | | 502 Bad Gateway | Zeebe has encountered issues while communicating to different brokers. | | 504 Timeout | Zeebe failed to process the request within a pre-determined timeout. | @@ -220,7 +220,7 @@ GET actuator/backups/{backupId}
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups/100' ``` @@ -239,7 +239,7 @@ curl --request GET 'http://localhost:9600/actuator/backups/100' When the response is 200 OK, the response body consists of a JSON object describing the state of the backup. -- `backupId`: Id in the request. +- `backupId`: ID in the request. - `state`: Gives the overall status of the backup. The state can be one of the following: - `COMPLETED` if all partitions have completed the backup. - `FAILED` if at least one partition has failed. In this case, `failureReason` contains a string describing the reason for failure. @@ -293,7 +293,7 @@ GET actuator/backups
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups' ``` @@ -371,7 +371,7 @@ DELETE actuator/backups/{backupId}
    Example request -``` +```shell curl --request DELETE 'http://localhost:9600/actuator/backups/100' ``` diff --git a/docs/self-managed/operational-guides/configure-flow-control/configure-flow-control.md b/docs/self-managed/operational-guides/configure-flow-control/configure-flow-control.md index 08cbf734eae..22514755159 100644 --- a/docs/self-managed/operational-guides/configure-flow-control/configure-flow-control.md +++ b/docs/self-managed/operational-guides/configure-flow-control/configure-flow-control.md @@ -21,7 +21,7 @@ A static write rate limit can prevent throughput peaks, and write rate throttlin These write limits are enabled by default in SaaS and disabled in Self-Managed. For most use cases, write rate limits can be enabled as needed if an issue arises. ::: -Flow control is configured in your Zeebe broker's `application.yaml` file. The default values can be found in the `# flowControl` section of the Zeebe broker [configuration](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) and [standalone](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) templates. +Flow control is configured in your Zeebe Broker's `application.yaml` file. The default values can be found in the `# flowControl` section of the Zeebe Broker [configuration](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) and [standalone](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) templates. ```yaml zeebe: diff --git a/docs/self-managed/operational-guides/configure-multi-tenancy.md b/docs/self-managed/operational-guides/configure-multi-tenancy.md index e74efe332b4..7b09cd1715b 100644 --- a/docs/self-managed/operational-guides/configure-multi-tenancy.md +++ b/docs/self-managed/operational-guides/configure-multi-tenancy.md @@ -19,7 +19,7 @@ Multi-tenancy must be enabled for each required component. Using the single glob ## Helm charts When using Helm charts, you can enable multi-tenancy globally with the flag `global.multitenancy.enabled`. -Visit [the Helm chart configuration](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#global-parameters) for additional details. +Visit [the Helm chart configuration](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.6#global-parameters) for additional details. ## Environment variables diff --git a/docs/self-managed/operational-guides/multi-region/dual-region-ops.md b/docs/self-managed/operational-guides/multi-region/dual-region-ops.md index 722de513fbe..ce7e32aa3b7 100644 --- a/docs/self-managed/operational-guides/multi-region/dual-region-ops.md +++ b/docs/self-managed/operational-guides/multi-region/dual-region-ops.md @@ -147,7 +147,7 @@ Start with creating a port-forward to the `Zeebe Gateway` in the surviving regio The following alternatives to port-forwarding are possible: -- if Zeebe Gateway is exposed to the outside of the Kubernetes cluster, you can skip port-forwarding and use the URL directly +- If the Zeebe Gateway is exposed to the outside of the Kubernetes cluster, you can skip port-forwarding and use the URL directly - [`exec`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_exec/) into an existing pod (such as Elasticsearch), and execute `curl` commands from inside of the pod - [`run`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_run/) an Ubuntu pod in the cluster to execute `curl` commands from inside the Kubernetes cluster diff --git a/docs/self-managed/operational-guides/troubleshooting/log-levels.md b/docs/self-managed/operational-guides/troubleshooting/log-levels.md index f5423bb4a8b..365aa71fd33 100644 --- a/docs/self-managed/operational-guides/troubleshooting/log-levels.md +++ b/docs/self-managed/operational-guides/troubleshooting/log-levels.md @@ -24,3 +24,4 @@ Enable logging for each component of Camunda 8 using the following instructions: - [Operate](/self-managed/operate-deployment/operate-configuration.md#logging) - [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#logging) - [Web Modeler](/self-managed/modeler/web-modeler/configuration/logging.md) +- [Identity](/self-managed/identity/user-guide/configuration/configure-logging.md) diff --git a/docs/self-managed/operational-guides/troubleshooting/troubleshooting.md b/docs/self-managed/operational-guides/troubleshooting/troubleshooting.md index 53c0cc54a0c..b68331309e0 100644 --- a/docs/self-managed/operational-guides/troubleshooting/troubleshooting.md +++ b/docs/self-managed/operational-guides/troubleshooting/troubleshooting.md @@ -47,9 +47,9 @@ Therefore, if you are not using the [ingress-nginx controller](https://github.co ## Identity `contextPath` -Camunda 8 Self-Managed can be accessed externally via different methods. One such method is the [combined Ingress setup](self-managed/setup/guides/ingress-setup.md#combined-ingress-setup). In that configuration, Camunda Identity is accessed using a specific path, configured by setting the `contextPath` variable, for example `https://camunda.example.com/identity`. +Camunda 8 Self-Managed can be accessed externally via the [combined Ingress setup](self-managed/setup/guides/ingress-setup.md#combined-ingress-setup). In that configuration, Camunda Identity is accessed using a specific path, configured by setting the `contextPath` variable, for example `https://camunda.example.com/identity`. -For security reasons, Camunda Identity requires secure access (HTTPS) when a `contextPath` is configured. If you want to use Camunda Identity with HTTP, use a [separate Ingress setup](self-managed/setup/guides/ingress-setup.md#separated-ingress-setup) (applications such as Operate, Optimize, etc, can still be accessed in a combined setup). +For security reasons, Camunda Identity requires secure access (HTTPS) when a `contextPath` is configured. :::note Due to limitations, the Identity `contextPath` approach is unavailable when using a browser in Incognito mode. @@ -67,6 +67,25 @@ A gateway timeout can occur if the headers of a response are too big (for exampl If you encounter errors during Helm chart installation, such as type mismatches or other template rendering issues, you may be using an outdated version of the Helm CLI. Helm's handling of data types and template syntax can vary significantly between versions. Ensure you use the Helm CLI version `3.13` or higher. +## DNS disruption issue for Zeebe in Kubernetes clusters (1.29-1.31) + +Kubernetes clusters running versions 1.29 to 1.31 may experience DNS disruptions during complete node restarts, such as during upgrades or evictions, particularly if the cluster's DNS resolver pods are affected. + +This issue is specifically noticeable for Zeebe (Netty), as it will no longer be able to form a cluster because of improper DNS responses. This occurs because Zeebe continues to communicate with a non-existent DNS resolver, caused by improper cleanup of conntrack entries for UDP connections. + +Details on this issue can be found in [this Kubernetes issue](https://github.com/kubernetes/kubernetes/issues/125467) and has been resolved in the following patch releases: + +- Kubernetes 1.29.10 +- Kubernetes 1.30.6 +- Kubernetes 1.31.2 + +Kubernetes versions 1.32 and versions before 1.29 are not affected. + +If an immediate cluster upgrade to a fixed version is not possible, the following temporary workarounds can be applied if you encounter DNS issues: + +- Restart the `kube-proxy` pod(s) +- Delete the affected Zeebe pod + ## Anomaly detection scripts The [c8-sm-checks](https://github.com/camunda/c8-sm-checks) project introduces a set of scripts to aid detection of Camunda deployment anomalies. @@ -133,6 +152,12 @@ This script verifies connectivity to a Zeebe instance using HTTP/2 and gRPC prot Find more information on [how to register your application on Identity](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/payment-example-process-application/kube/README.md#4-generating-an-m2m-token-for-our-application). +### IRSA configuration check + +The AWS EKS IRSA configuration scripts are focused on verifying the correct setup of IAM Roles for Service Accounts (IRSA) within your Kubernetes deployment on AWS. These scripts ensure that your Kubernetes service accounts are correctly associated with IAM roles, allowing components like PostgreSQL, OpenSearch, and others in your deployment to securely interact with AWS resources. + +For detailed usage instructions and setup information, please refer to the [IRSA guide](/self-managed/setup/deploy/amazon/amazon-eks/irsa.md#irsa-check-script). + ### Interpretation of the results Each script produces an output indicating the status of individual checks, which can be either `[OK]`, which signals a healthy status, or `[FAIL]`, which signals an unhealthy status. diff --git a/docs/self-managed/operational-guides/update-guide/840-to-850.md b/docs/self-managed/operational-guides/update-guide/840-to-850.md index b94830b187a..0a36d850327 100644 --- a/docs/self-managed/operational-guides/update-guide/840-to-850.md +++ b/docs/self-managed/operational-guides/update-guide/840-to-850.md @@ -31,11 +31,11 @@ Note that there is **no** actual corruption or data loss, however. The broker health check routes have moved, and the old routes are now deprecated. Specifically, the following routes will return [a status code of 301](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/301) and redirect you. See the table below about the new mappings: -| Old route | **New route** | -| --------------------------------------- | ------------------------------------------------------------- | -| http://{zeebe-broker-host}:9600/health | **http://{zeebe-broker-host}:9600/actuator/health/status** | -| http://{zeebe-broker-host}:9600/ready | **http://{zeebe-broker-host}:9600/actuator/health/readiness** | -| http://{zeebe-broker-host}:9600/startup | **http://{zeebe-broker-host}:9600/actuator/health/startup** | +| Old route | **New route** | +| ---------------------------------------- | -------------------------------------------------------------- | +| http://\{zeebe-broker-host}:9600/health | **http://\{zeebe-broker-host}:9600/actuator/health/status** | +| http://\{zeebe-broker-host}:9600/ready | **http://\{zeebe-broker-host}:9600/actuator/health/readiness** | +| http://\{zeebe-broker-host}:9600/startup | **http://\{zeebe-broker-host}:9600/actuator/health/startup** | Please migrate to the new routes in your deployments. **If you're using the official Helm charts, then you don't have to do anything here.** diff --git a/docs/self-managed/operational-guides/update-guide/860-to-870.md b/docs/self-managed/operational-guides/update-guide/860-to-870.md new file mode 100644 index 00000000000..205beda3075 --- /dev/null +++ b/docs/self-managed/operational-guides/update-guide/860-to-870.md @@ -0,0 +1,14 @@ +--- +id: 860-to-870 +title: Update 8.6 to 8.7 +description: "Review which adjustments must be made to migrate from Camunda 8.6.x to Camunda 8.7.0." +--- + +The following sections explain which adjustments must be made to migrate from Camunda 8.6.x to 8.7.x for each component. + +## Backup + +### Google Cloud Storage + +Configuring a non-existing bucket for backups will not prevent Zeebe to start up anymore and will only result +in logs (at WARN) in the startup phase. diff --git a/docs/self-managed/operational-guides/update-guide/introduction.md b/docs/self-managed/operational-guides/update-guide/introduction.md index 3a0ddf4236b..1760f265bb7 100644 --- a/docs/self-managed/operational-guides/update-guide/introduction.md +++ b/docs/self-managed/operational-guides/update-guide/introduction.md @@ -12,12 +12,21 @@ When updating from one minor version to the next, you do not need to update to e Depending on your amount of data, run a minor version for at least 24 hours before updating to the next version. -:::note -Versions prior to Camunda 8 are listed below and identified as Camunda Cloud versions. -::: - There is a dedicated update guide for each version: +### [Camunda 8.6 to Camunda 8.7](../860-to-870) + +Update from 8.6.x to 8.7.0 + +[Release notes](/reference/release-notes/870.md) + +### [Camunda 8.5 to Camunda 8.6](../850-to-860) + +Update from 8.5.x to 8.6.0 + +[Release notes](/reference/release-notes/860.md) | +[Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) + ### [Camunda 8.4 to Camunda 8.5](../840-to-850) Update from 8.4.x to 8.5.0 diff --git a/docs/self-managed/react-components/components.md b/docs/self-managed/react-components/components.md index d934e5a1deb..3f7f96ad8e7 100644 --- a/docs/self-managed/react-components/components.md +++ b/docs/self-managed/react-components/components.md @@ -16,6 +16,6 @@ Camunda 8 Self-Managed users may also use [Desktop Modeler](../../components/mod :::note -To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/contact). +To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/reference/contact.md). ::: diff --git a/docs/self-managed/setup/assets/operate-dashboard-no-processes.png b/docs/self-managed/setup/assets/operate-dashboard-no-processes.png index d00ff42eb40..fbcdb3efc52 100644 Binary files a/docs/self-managed/setup/assets/operate-dashboard-no-processes.png and b/docs/self-managed/setup/assets/operate-dashboard-no-processes.png differ diff --git a/docs/self-managed/setup/assets/operate-introduction.png b/docs/self-managed/setup/assets/operate-introduction.png index 6935f5092d9..ab58ba88927 100644 Binary files a/docs/self-managed/setup/assets/operate-introduction.png and b/docs/self-managed/setup/assets/operate-introduction.png differ diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md index cebaa707b73..137adb9efe3 100644 --- a/docs/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md +++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md @@ -95,13 +95,13 @@ https://github.com/camunda/c8-multi-region/blob/main/aws/dual-region/scripts/exp #### config.tf -This file contains the [backend](https://developer.hashicorp.com/terraform/language/settings/backends/configuration) and [provider](https://developer.hashicorp.com/terraform/language/providers/configuration) configuration, meaning where to store the [Terraform state](https://developer.hashicorp.com/terraform/language/state) and which providers to use, their versions, and potential credentials. +This file contains the [backend](https://developer.hashicorp.com/terraform/language/backend) and [provider](https://developer.hashicorp.com/terraform/language/providers/configuration) configuration, meaning where to store the [Terraform state](https://developer.hashicorp.com/terraform/language/state) and which providers to use, their versions, and potential credentials. The important part of `config.tf` is the initialization of two AWS providers, as you need one per region and this is a limitation by AWS given everything is scoped to a region. :::note -It's recommended to use a different backend than `local`. Find more information in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +It's recommended to use a different backend than `local`. Find more information in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/backend). ::: @@ -233,7 +233,8 @@ kubectl --context $CLUSTER_1 apply -f https://raw.githubusercontent.com/camunda/ ``` 3. The script will retrieve the IPs of the load balancer via the AWS CLI and return the required config change. -4. As the script suggests, copy the statement between the placeholders to edit the CoreDNS configmap in cluster 0 and cluster 1, depending on the placeholder. +4. The script prints the `kubectl edit` commands to change the DNS settings of each cluster inline. Copy the statement between the placeholders to edit the CoreDNS configmap in cluster 0 and cluster 1, depending on the placeholder. + An alternative to inline editing is to create two copies of the file `kubernetes/coredns.yml`, one for each cluster. Add the section generated by the script to each file. Apply the changes to each cluster with e.g. `kubectl --context cluster-london -n kube-system apply -f file.yml`. Replace the `context` parameter with your current values.
    Example output diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md index d0d28f778da..d196001567e 100644 --- a/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md +++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md @@ -1,30 +1,40 @@ --- id: eks-helm title: "Install Camunda 8 on an EKS cluster" -description: "Set up the Camunda 8 environment with Helm and an optional DNS setup on Amazon EKS." +description: "Set up the Camunda 8 environment with Helm and an optional Ingress setup on Amazon EKS." --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -This guide offers a comprehensive guide for installing the Camunda 8 Helm chart on your pre-existing AWS Kubernetes EKS cluster. Additionally, it includes instructions for setting up an optional DNS configuration. -Lastly you'll verify that the connection to your Self-Managed Camunda 8 environment is working. +This guide provides a comprehensive walkthrough for installing the Camunda 8 Helm chart on your existing AWS Kubernetes EKS cluster. It also includes instructions for setting up optional DNS configurations and other optional AWS-managed services, such as OpenSearch and PostgreSQL. -## Prerequisites +Lastly you'll verify that the connection to your Self-Managed Camunda 8 environment is working. -- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [terraform](./terraform-setup.md) guide. +## Requirements +- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [Terraform](./terraform-setup.md) guide. - [Helm (3.16+)](https://helm.sh/docs/intro/install/) - [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. +- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some variables. +- [GNU envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) to generate manifests. - (optional) Domain name/[hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) in Route53. This allows you to expose Camunda 8 and connect via [zbctl](/apis-tools/community-clients/cli-client/index.md) or [Camunda Modeler](https://camunda.com/download/modeler/). +- A namespace to host the Camunda Platform, in this guide we will reference `camunda` as the target namespace. -## Considerations +### Considerations While this guide is primarily tailored for UNIX systems, it can also be run under Windows by utilizing the [Windows Subsystem for Linux](https://learn.microsoft.com/windows/wsl/about). Multi-tenancy is disabled by default and is not covered further in this guide. If you decide to enable it, you may use the same PostgreSQL instance and add an extra database for multi-tenancy purposes. -### Architecture +:::caution Optimize compatibility with OpenSearch + +**Migration:** The migration step will be disabled during the installation. For more information, refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md). +::: + +## Architecture + + Note the [existing architecture](../../../../about-self-managed.md#architecture) extended by deploying a Network Load Balancer with TLS termination within the [ingress](https://kubernetes.github.io/ingress-nginx/user-guide/tls/) below. @@ -32,51 +42,85 @@ Additionally, two components ([external-dns](https://github.com/kubernetes-sigs/ ![Camunda 8 Self-Managed AWS Architecture Diagram](./assets/camunda-8-self-managed-architecture-aws.png) -## Usage - -In the following, we're using `helm upgrade --install` as it runs install on initial deployment and upgrades future usage. This may make it easier for future [Camunda 8 Helm upgrades](/self-managed/setup/upgrade.md) or any other component upgrades. - -### Environment prerequisites +## Export environment variables To streamline the execution of the subsequent commands, it is recommended to export multiple environment variables. +### Export the AWS region and Helm chart version + The following are the required environment variables with some example values: +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/chart-env.sh +``` + +### Export database values + +When using either standard authentication (network based or username and password) or IRSA authentication, specific environment variables must be set with valid values. Follow the guide for either [eksctl](./eksctl.md#configuration-1) or [Terraform](./terraform-setup.md#export-values-for-the-helm-chart) to set them correctly. + +Verify the configuration of your environment variables by running the following loop: + + + + + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/check-env-variables.sh +``` + + + + + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/procedure/check-env-variables.sh +``` + + + + + +## (Optional) Ingress Setup + +:::info Domain or domainless installation + +If you do not have a domain name, external access to Camunda 8 web endpoints from outside the AWS VPC will not be possible. In this case, you may skip the DNS setup and proceed directly to [deploying Camunda 8 via Helm charts](#deploy-camunda-8-via-helm-charts). + +Alternatively, you can use `kubectl port-forward` to access the Camunda platform without a domain or Ingress configuration. For more information, see the [kubectl port-forward documentation](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_port-forward/). + +Throughout the rest of this installation guide, we will refer to configurations as **"With domain"** or **"Without domain"** depending on whether the application is exposed via a domain. +::: + +In this section, we provide an optional setup guide for configuring an Ingress with TLS and DNS management, allowing you to access your application through a specified domain. If you haven't set up an Ingress, refer to the [Kubernetes Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for more details. In Kubernetes, an Ingress is an API object that manages external access to services in a cluster, typically over HTTP, and can also handle TLS encryption for secure connections. + +To monitor your Ingress setup using Amazon CloudWatch, you may also find the official AWS guide on [monitoring nginx workloads with CloudWatch Container Insights and Prometheus](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights-Prometheus-Sample-Workloads-nginx.html) helpful. Additionally, for detailed steps on exposing Kubernetes applications with the nginx ingress controller, refer to the [official AWS tutorial](https://aws.amazon.com/fr/blogs/containers/exposing-kubernetes-applications-part-3-nginx-ingress-controller/). + +### Export Values + +Set the following values for your Ingress configuration: + ```shell -# Your standard region that you host AWS resources in -export REGION=eu-central-1 -# Following two environment variables can be skipped if you don't have a domain -# The domain name that you intend to use +# The domain name you intend to use export DOMAIN_NAME=camunda.example.com -# The e-mail to register with Let's Encrypt +# The email address for Let's Encrypt registration export MAIL=admin@camunda.example.com -# The Ingress-Nginx Helm Chart version +# Helm chart versions for Ingress components export INGRESS_HELM_CHART_VERSION="4.11.2" -# The External DNS Helm Chart version export EXTERNAL_DNS_HELM_CHART_VERSION="1.15.0" -# The Cert-Manager Helm Chart version export CERT_MANAGER_HELM_CHART_VERSION="1.15.3" -# The Camunda 8 Helm Chart version -export CAMUNDA_HELM_CHART_VERSION="11.0.0" ``` -Additionally, follow the guide from either [eksctl](./eks-helm.md) or [Terraform](./terraform-setup.md) to retrieve the following values, which will be required for subsequent steps: - -- EXTERNAL_DNS_IRSA_ARN -- CERT_MANAGER_IRSA_ARN -- DB_HOST -- PG_USERNAME -- PG_PASSWORD -- DEFAULT_DB_NAME -- REGION +Additionally, obtain these values by following the guide for either [eksctl](./eks-helm.md) or [Terraform](./terraform-setup.md), as they will be needed in later steps: -### DNS set up +- `EXTERNAL_DNS_IRSA_ARN` +- `CERT_MANAGER_IRSA_ARN` +- `REGION` -:::info -If you don't have a domain name, you cannot access Camunda 8 web endpoints from outside the AWS VPC. Therefore, you can skip the DNS set up and continue with deploying [Camunda 8](#deploy-camunda-8-via-helm-charts). -::: - -#### ingress-nginx +### ingress-nginx [Ingress-nginx](https://github.com/kubernetes/ingress-nginx) is an open-source Kubernetes Ingress controller that provides a way to manage external access to services within a Kubernetes cluster. It acts as a reverse proxy and load balancer, routing incoming traffic to the appropriate services based on rules defined in the Ingress resource. @@ -94,7 +138,7 @@ helm upgrade --install \ --create-namespace ``` -#### external-dns +### external-dns [External-dns](https://github.com/kubernetes-sigs/external-dns) is a Kubernetes add-on that automates the management of DNS records for external resources, such as load balancers or Ingress controllers. It monitors the Kubernetes resources and dynamically updates the DNS provider with the appropriate DNS records. @@ -106,7 +150,8 @@ Consider setting `domainFilters` via `--set` to restrict access to certain hoste Make sure to have `EXTERNAL_DNS_IRSA_ARN` exported prior by either having followed the [eksctl](./eksctl.md#policy-for-external-dns) or [Terraform](./terraform-setup.md#outputs) guide. ::: -:::warning +:::warning Uniqueness of txtOwnerId for DNS + If you are already running `external-dns` in a different cluster, ensure each instance has a **unique** `txtOwnerId` for the TXT record. Without unique identifiers, the `external-dns` instances will conflict and inadvertently delete existing DNS records. In the example below, it's set to `external-dns` and should be changed if this identifier is already in use. Consult the [documentation](https://kubernetes-sigs.github.io/external-dns/v0.15.0/#note) to learn more about DNS record ownership. @@ -126,7 +171,7 @@ helm upgrade --install \ --create-namespace ``` -#### cert-manager +### cert-manager [Cert-manager](https://cert-manager.io/) is an open-source Kubernetes add-on that automates the management and issuance of TLS certificates. It integrates with various certificate authorities (CAs) and provides a straightforward way to obtain, renew, and manage SSL/TLS certificates for your Kubernetes applications. @@ -181,120 +226,434 @@ spec: EOF ``` -### Deploy Camunda 8 via Helm charts +## Deploy Camunda 8 via Helm charts For more configuration options, refer to the [Helm chart documentation](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters). Additionally, explore our existing resources on the [Camunda 8 Helm chart](/self-managed/setup/install.md) and [guides](/self-managed/setup/guides/guides.md). - - +Depending of your installation path, you may use different settings. +For easy and reproducible installations, we will use yaml files to configure the chart. + +### 1. Create the `values.yml` file + +Start by creating a `values.yml` file to store the configuration for your environment. This file will contain key-value pairs that will be substituted using `envsubst`. You can find a reference example of this file here: + + + The following makes use of the [combined Ingress setup](/self-managed/setup/guides/ingress-setup.md#combined-ingress-setup) by deploying a single Ingress for all HTTP components and a separate Ingress for the gRPC endpoint. -:::warning +:::info Cert-manager annotation for domain installation +The annotation `kubernetes.io/tls-acme=true` will be [interpreted by cert-manager](https://cert-manager.io/docs/usage/ingress/) and automatically results in the creation of the required certificate request, easing the setup. +::: + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/helm-values/values-domain.yml +``` + +:::warning Exposure of the Zeebe Gateway + +Publicly exposing the Zeebe Gateway without proper authorization can pose significant security risks. To avoid this, consider disabling the Ingress for the Zeebe Gateway by setting the following values to `false` in your configuration file: -Publicly exposing the Zeebe Gateway without authorization enabled can lead to severe security risks. Consider disabling the Ingress for the Zeebe Gateway by setting the `zeebeGateway.ingress.grpc.enabled` and `zeebeGateway.ingress.rest.enabled` to `false`. +- `zeebeGateway.ingress.grpc.enabled` +- `zeebeGateway.ingress.rest.enabled` -By default, authorization is enabled to ensure secure access to Zeebe. Typically, only internal components need direct access, making it unnecessary to expose Zeebe externally. +By default, authorization is enabled to ensure secure access to Zeebe. Typically, only internal components need direct access to Zeebe, making it unnecessary to expose the gateway externally. ::: -```shell -helm upgrade --install \ - camunda camunda-platform \ - --repo https://helm.camunda.io \ - --version $CAMUNDA_HELM_CHART_VERSION \ - --namespace camunda \ - --create-namespace \ - --set identityKeycloak.postgresql.enabled=false \ - --set identityKeycloak.externalDatabase.host=$DB_HOST \ - --set identityKeycloak.externalDatabase.user=$PG_USERNAME \ - --set identityKeycloak.externalDatabase.password=$PG_PASSWORD \ - --set identityKeycloak.externalDatabase.database=$DEFAULT_DB_NAME \ - --set global.ingress.enabled=true \ - --set global.ingress.host=$DOMAIN_NAME \ - --set global.ingress.tls.enabled=true \ - --set global.ingress.tls.secretName=camunda-c8-tls \ - --set-string 'global.ingress.annotations.kubernetes\.io\/tls-acme=true' \ - --set global.identity.auth.publicIssuerUrl="https://$DOMAIN_NAME/auth/realms/camunda-platform" \ - --set global.identity.auth.operate.redirectUrl="https://$DOMAIN_NAME/operate" \ - --set global.identity.auth.tasklist.redirectUrl="https://$DOMAIN_NAME/tasklist" \ - --set global.identity.auth.optimize.redirectUrl="https://$DOMAIN_NAME/optimize" \ - --set identity.contextPath="/identity" \ - --set identity.fullURL="https://$DOMAIN_NAME/identity" \ - --set operate.contextPath="/operate" \ - --set tasklist.contextPath="/tasklist" \ - --set optimize.contextPath="/optimize" \ - --set zeebeGateway.ingress.grpc.enabled=true \ - --set zeebeGateway.ingress.grpc.host=zeebe.$DOMAIN_NAME \ - --set zeebeGateway.ingress.grpc.tls.enabled=true \ - --set zeebeGateway.ingress.grpc.tls.secretName=zeebe-c8-tls-grpc \ - --set-string 'zeebeGateway.ingress.grpc.annotations.kubernetes\.io\/tls-acme=true' \ - --set zeebeGateway.contextPath="/zeebe" -``` - -The annotation `kubernetes.io/tls-acme=true` is [interpreted by cert-manager](https://cert-manager.io/docs/usage/ingress/) and automatically results in the creation of the required certificate request, easing the setup. +#### Reference the credentials in secrets + +Before installing the Helm chart, create Kubernetes secrets to store the Keycloak database authentication credentials and the OpenSearch authentication credentials. + +To create the secrets, run the following commands: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/create-external-db-secrets.sh +``` + + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/helm-values/values-no-domain.yml +``` + +#### Reference the credentials in secrets + +Before installing the Helm chart, create Kubernetes secrets to store the Keycloak database authentication credentials and the OpenSearch authentication credentials. + +To create the secrets, run the following commands: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/create-external-db-secrets.sh +``` - -```shell -helm upgrade --install \ - camunda camunda-platform \ - --repo https://helm.camunda.io \ - --version $CAMUNDA_HELM_CHART_VERSION \ - --namespace camunda \ - --create-namespace \ - --set identityKeycloak.postgresql.enabled=false \ - --set identityKeycloak.externalDatabase.host=$DB_HOST \ - --set identityKeycloak.externalDatabase.user=$PG_USERNAME \ - --set identityKeycloak.externalDatabase.password=$PG_PASSWORD \ - --set identityKeycloak.externalDatabase.database=$DEFAULT_DB_NAME + + +The following makes use of the [combined Ingress setup](/self-managed/setup/guides/ingress-setup.md#combined-ingress-setup) by deploying a single Ingress for all HTTP components and a separate Ingress for the gRPC endpoint. + +:::info Cert-manager annotation for domain installation +The annotation `kubernetes.io/tls-acme=true` will be [interpreted by cert-manager](https://cert-manager.io/docs/usage/ingress/) and automatically results in the creation of the required certificate request, easing the setup. +::: + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/helm-values/values-domain.yml +``` + +:::warning Exposure of the Zeebe Gateway + +Publicly exposing the Zeebe Gateway without proper authorization can pose significant security risks. To avoid this, consider disabling the Ingress for the Zeebe Gateway by setting the following values to `false` in your configuration file: + +- `zeebeGateway.ingress.grpc.enabled` +- `zeebeGateway.ingress.rest.enabled` + +By default, authorization is enabled to ensure secure access to Zeebe. Typically, only internal components need direct access to Zeebe, making it unnecessary to expose the gateway externally. + +::: + + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/helm-values/values-no-domain.yml ``` + -### Verify connectivity to Camunda 8 +### 2. Configure your deployment + +#### Enable Enterprise components + +Some components are not enabled by default in this deployment. For more information on how to configure and enable these components, refer to [configuring Web Modeler, Console, and Connectors](../../../install.md#configuring-web-modeler-console-and-connectors). + +#### Use internal Elasticsearch instead of the managed OpenSearch + +If you do not wish to use a managed OpenSearch service, you can opt to use the internal Elasticsearch deployment. This configuration disables OpenSearch and enables the internal Kubernetes Elasticsearch deployment: + +
    +Show configuration changes to disable external OpenSearch usage + +```yaml +global: + elasticsearch: + enabled: true + opensearch: + enabled: false + +elasticsearch: + enabled: true +``` + +
    + +#### Use internal PostgreSQL instead of the managed Aurora + +If you prefer not to use an external PostgreSQL service, you can switch to the internal PostgreSQL deployment. In this case, you will need to configure the Helm chart as follows and remove certain configurations related to the external database and service account: + +
    +Show configuration changes to disable external database usage + +```yaml +identityKeycloak: + postgresql: + enabled: true + + # Remove external database configuration + # externalDatabase: + # ... + + # Remove service account and annotations + # serviceAccount: + # ... + + # Remove extra environment variables for external database driver + # extraEnvVars: + # ... + +webModeler: + # Remove this part + + # restapi: + # externalDatabase: + # url: jdbc:aws-wrapper:postgresql://${DB_HOST}:5432/${DB_WEBMODELER_NAME} + # user: ${DB_WEBMODELER_USERNAME} + # existingSecret: webmodeler-postgres-secret + # existingSecretPasswordKey: password + +identity: + # Remove this part + + # externalDatabase: + # enabled: true + # host: ${DB_HOST} + # port: 5432 + # username: ${DB_IDENTITY_USERNAME} + # database: ${DB_IDENTITY_NAME} + # existingSecret: identity-postgres-secret + # existingSecretPasswordKey: password +``` + +
    + +#### Fill your deployment with actual values + +Once you've prepared the `values.yml` file, run the following `envsubst` command to substitute the environment variables with their actual values: + +```bash +# generate the final values +envsubst < values.yml > generated-values.yml + +# print the result +cat generated-values.yml +``` + +:::info Camunda Helm chart no longer automatically generates passwords + +Starting from **Camunda 8.6**, the Helm chart deprecated the automatic generation of secrets, and this feature has been fully removed in **Camunda 8.7**. + +::: + +Next, store various passwords in a Kubernetes secret, which will be used by the Helm chart. Below is an example of how to set up the required secret. You can use `openssl` to generate random secrets and store them in environment variables: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/generate-passwords.sh +``` + +Use these environment variables in the `kubectl` command to create the secret. + +- The values for `postgres-password` and `password` are not required if you are using an external database. If you choose not to use an external database, you must provide those values. +- The `smtp-password` should be replaced with the appropriate external value ([see how it's used by Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#smtp--email)). + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/create-identity-secret.sh +``` + +### 3. Install Camunda 8 using Helm + +Now that the `generated-values.yml` is ready, you can install Camunda 8 using Helm. Run the following command: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/install-chart.sh +``` + +This command: + +- Installs (or upgrades) the Camunda platform using the Helm chart. +- Substitutes the appropriate version using the `$CAMUNDA_HELM_CHART_VERSION` environment variable. +- Applies the configuration from `generated-values.yml`. + +:::note + +This guide uses `helm upgrade --install` as it runs install on initial deployment and upgrades future usage. This may make it easier for future [Camunda 8 Helm upgrades](/self-managed/setup/upgrade.md) or any other component upgrades. + +::: + +You can track the progress of the installation using the following command: + +```bash +watch -n 5 ' + kubectl get pods -n camunda --output=wide; + if [ $(kubectl get pods -n camunda --field-selector=status.phase!=Running -o name | wc -l) -eq 0 ] && + [ $(kubectl get pods -n camunda -o json | jq -r ".items[] | select(.status.containerStatuses[]?.ready == false)" | wc -l) -eq 0 ]; + then + echo "All pods are Running and Healthy - Installation completed!"; + else + echo "Some pods are not Running or Healthy"; + fi +' +``` + +
    +Understand how each component interacts with IRSA + + +#### Web Modeler + +As the Web Modeler REST API uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. +Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. + +#### Keycloak + +:::caution Only available from v21+ + +IAM Roles for Service Accounts can only be implemented with Keycloak 21+. This may require you to adjust the version used in the Camunda Helm chart. + +::: + +From Keycloak versions 21+, the default JDBC driver can be overwritten, allowing use of a custom wrapper like the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) to utilize the features of IRSA. This is a wrapper around the default JDBC driver, but takes care of signing the requests. + +The [official Keycloak documentation](https://www.keycloak.org/server/db#preparing-keycloak-for-amazon-aurora-postgresql) also provides detailed instructions for utilizing Amazon Aurora PostgreSQL. + +A custom Keycloak container image containing necessary configurations is accessible on Docker Hub at [camunda/keycloak](https://hub.docker.com/r/camunda/keycloak). This image, built upon the base image [bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak), incorporates the required wrapper for seamless integration. + +#### Container image sources + +The sources of the [Camunda Keycloak images](https://hub.docker.com/r/camunda/keycloak) can be found on [GitHub](https://github.com/camunda/keycloak). In this repository, the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) is assembled in the `Dockerfile`. + +Maintenance of these images is based on the upstream [Bitnami Keycloak images](https://hub.docker.com/r/bitnami/keycloak), ensuring they are always up-to-date with the latest Keycloak releases. The lifecycle details for Keycloak can be found on [endoflife.date](https://endoflife.date/keycloak). + +##### Keycloak image configuration + +Bitnami Keycloak container image configuration is available at [hub.docker.com/bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak). + +##### Identity + +Identity uses PostgreSQL, and `identity` is configured to use IRSA with Amazon Aurora PostgreSQL. Check the [Identity database configuration](../../../../identity/deployment/configuration-variables.md#running-identity-on-amazon-aurora-postgresql) for more details. Identity includes the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. + +#### Amazon OpenSearch Service + +##### Internal database configuration + +The default setup is sufficient for Amazon OpenSearch Service clusters without **fine-grained access control**. + +Fine-grained access control adds another layer of security to OpenSearch, requiring you to add a mapping between the IAM role and the internal OpenSearch role. Visit the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) on fine-grained access control. + +There are different ways to configure the mapping within Amazon OpenSearch Service: + +- Via a [Terraform module](https://registry.terraform.io/modules/idealo/opensearch/aws/latest) in case your OpenSearch instance is exposed. +- Via the [OpenSearch dashboard](https://opensearch.org/docs/latest/security/access-control/users-roles/). +- Via the **REST API**. To authorize the IAM role in OpenSearch for access, follow these steps: + + Use the following `curl` command to update the OpenSearch internal database and authorize the IAM role for access. Replace placeholders with your specific values: + + ```bash + curl -sS -u ":" \ + -X PATCH \ + "https:///_opendistro/_security/api/rolesmapping/all_access?pretty" \ + -H 'Content-Type: application/json' \ + -d' + [ + { + "op": "add", + "path": "/backend_roles", + "value": [""] + } + ] + ' + ``` + + - Replace `` and `` with your OpenSearch domain admin credentials. + - Replace `` with your OpenSearch endpoint URL. + - Replace `` with the IAM role name created by Terraform, which is output by the `opensearch_role` module. + + :::note Security of basic auth usage + + **This example uses basic authentication (username and password), which may not be the best practice for all scenarios, especially if fine-grained access control is enabled.** The endpoint used in this example is not exposed by default, so consult your OpenSearch documentation for specifics on enabling and securing this endpoint. + + ::: + +Ensure that the `iam_role_arn` of the previously created `opensearch_role` is assigned to an internal role within Amazon OpenSearch Service. For example, `all_access` on the Amazon OpenSearch Service side is a good candidate, or if required, extra roles can be created with more restrictive access. + + +
    + +## Verify connectivity to Camunda 8 First, we need an OAuth client to be able to connect to the Camunda 8 cluster. -This can be done by following the [Identity getting started guide](/self-managed/identity/getting-started/install-identity.md) followed by the [incorporating applications documentation](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). -Instead of creating a confidential application, a machine-to-machine (M2M) application is required to be created. -This reveals a `client-id` and `client-secret` that can be used to connect to the Camunda 8 cluster. +### Generate an M2M token using Identity + +Generate an M2M token by following the steps outlined in the [Identity getting started guide](/self-managed/identity/getting-started/install-identity.md), along with the [incorporating applications documentation](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). + +Below is a summary of the necessary instructions: + + + + +1. Open Identity in your browser at `https://${DOMAIN_NAME}/identity`. You will be redirected to Keycloak and prompted to log in with a username and password. +2. Use `demo` as both the username and password. +3. Select **Add application** and select **M2M** as the type. Assign a name like "test." +4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Zeebe API** with "write" permission. +5. Retrieve the `client-id` and `client-secret` values from the application details + +```shell +export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application +export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +``` + + + + + +Identity and Keycloak must be port-forwarded to be able to connect to the cluster. + +```shell +kubectl port-forward services/camunda-identity 8080:80 --namespace camunda +kubectl port-forward services/camunda-keycloak 18080:80 --namespace camunda +``` + +1. Open Identity in your browser at `http://localhost:8080`. You will be redirected to Keycloak and prompted to log in with a username and password. +2. Use `demo` as both the username and password. +3. Select **Add application** and select **M2M** as the type. Assign a name like "test." +4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Zeebe API** with "write" permission. +5. Retrieve the `client-id` and `client-secret` values from the application details + +```shell +export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application +export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +``` + +
    +To access the other services and their UIs, port-forward those Components as well: + + +```shell +Operate: +> kubectl port-forward svc/camunda-operate 8081:80 --namespace camunda +Tasklist: +> kubectl port-forward svc/camunda-tasklist 8082:80 --namespace camunda +Optimize: +> kubectl port-forward svc/camunda-optimize 8083:80 --namespace camunda +Connectors: +> kubectl port-forward svc/camunda-connectors 8086:8080 --namespace camunda +WebModeler: +> kubectl port-forward svc/camunda-web-modeler-webapp 8084:80 --namespace camunda +Console: +> kubectl port-forward svc/camunda-console 8085:80 --namespace camunda +``` + + +
    + +
    +
    + +### Use the token - + For a detailed guide on generating and using a token, please conduct the relevant documentation on [authenticating with the REST API](./../../../../../apis-tools/camunda-api-rest/camunda-api-rest-authentication.md?environment=self-managed). - + Export the following environment variables: ```shell -export ZEEBE_ADDRESS=zeebe-rest.$DOMAIN_NAME -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +export ZEEBE_ADDRESS_REST=https://$DOMAIN_NAME/zeebe export ZEEBE_AUTHORIZATION_SERVER_URL=https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token ``` - + -This requires to port-forward the Zeebe Gateway and Keycloak to be able to connect to the cluster. +This requires to port-forward the Zeebe Gateway to be able to connect to the cluster. ```shell -kubectl port-forward services/camunda-zeebe-gateway 8080:8080 -kubectl port-forward services/camunda-keycloak 18080:80 +kubectl port-forward services/camunda-zeebe-gateway 8080:8080 --namespace camunda ``` Export the following environment variables: ```shell -export ZEEBE_ADDRESS=localhost:8080 -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +export ZEEBE_ADDRESS_REST=http://localhost:8080 export ZEEBE_AUTHORIZATION_SERVER_URL=http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token ``` @@ -302,22 +661,20 @@ export ZEEBE_AUTHORIZATION_SERVER_URL=http://localhost:18080/auth/realms/camunda -Generate a temporary token to access the REST API: +Generate a temporary token to access the REST API, then capture the value of the `access_token` property and store it as your token. ```shell -curl --location --request POST "${ZEEBE_AUTHORIZATION_SERVER_URL}" \ +export TOKEN=$(curl --location --request POST "${ZEEBE_AUTHORIZATION_SERVER_URL}" \ --header "Content-Type: application/x-www-form-urlencoded" \ --data-urlencode "client_id=${ZEEBE_CLIENT_ID}" \ --data-urlencode "client_secret=${ZEEBE_CLIENT_SECRET}" \ ---data-urlencode "grant_type=client_credentials" +--data-urlencode "grant_type=client_credentials" | jq '.access_token' -r) ``` -Capture the value of the `access_token` property and store it as your token. - Use the stored token, in our case `TOKEN`, to use the REST API to print the cluster topology. ```shell -curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS}/v2/topology" +curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS_REST}/v2/topology" ``` ...and results in the following output: @@ -415,41 +772,36 @@ curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS}/v2/topology" After following the installation instructions in the [zbctl docs](/apis-tools/community-clients/cli-client/index.md), we can configure the required connectivity to check that the Zeebe cluster is reachable. - + Export the following environment variables: ```shell export ZEEBE_ADDRESS=zeebe.$DOMAIN_NAME:443 -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application export ZEEBE_AUTHORIZATION_SERVER_URL=https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token export ZEEBE_TOKEN_AUDIENCE='zeebe-api' export ZEEBE_TOKEN_SCOPE='camunda-identity' ``` - - + + -This requires to port-forward the Zeebe Gateway and Keycloak to be able to connect to the cluster. +This requires to port-forward the Zeebe Gateway to be able to connect to the cluster. ```shell -kubectl port-forward services/camunda-zeebe-gateway 26500:26500 -kubectl port-forward services/camunda-keycloak 18080:80 +kubectl port-forward services/camunda-zeebe-gateway 26500:26500 --namespace camunda ``` Export the following environment variables: ```shell export ZEEBE_ADDRESS=localhost:26500 -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application export ZEEBE_AUTHORIZATION_SERVER_URL=http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token export ZEEBE_TOKEN_AUDIENCE='zeebe-api' export ZEEBE_TOKEN_SCOPE='camunda-identity' ``` - + @@ -495,103 +847,56 @@ Brokers: For more advanced topics, like deploying a process or registering a worker, consult the [zbctl docs](/apis-tools/community-clients/cli-client/cli-get-started.md). -If you want to access the other services and their UI, you can port-forward those as well: - -```shell -Identity: -> kubectl port-forward svc/camunda-identity 8080:80 -Operate: -> kubectl port-forward svc/camunda-operate 8081:80 -Tasklist: -> kubectl port-forward svc/camunda-tasklist 8082:80 -Optimize: -> kubectl port-forward svc/camunda-optimize 8083:80 -Connectors: -> kubectl port-forward svc/camunda-connectors 8088:8080 -``` - -:::note -Keycloak must be port-forwarded at all times as it is required to authenticate. -::: - -```shell -kubectl port-forward services/camunda-keycloak 18080:80 -``` - - + Follow our existing [Modeler guide on deploying a diagram](/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md). Below are the helper values required to be filled in Modeler: - - + + + The following values are required for the OAuth authentication: -```shell -# Make sure to manually replace #DOMAIN_NAME with your actual domain since Modeler can't access the shell context -Cluster endpoint=https://zeebe.$DOMAIN_NAME -Client ID='client-id' # retrieve the value from the identity page of your created m2m application -Client Secret='client-secret' # retrieve the value from the identity page of your created m2m application -OAuth Token URL=https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token -Audience=zeebe-api # the default for Camunda 8 Self-Managed -``` +- **Cluster endpoint:** `https://zeebe.$DOMAIN_NAME`, replacing `$DOMAIN_NAME` with your domain +- **Client ID:** Retrieve the client ID value from the identity page of your created M2M application +- **Client Secret:** Retrieve the client secret value from the Identity page of your created M2M application +- **OAuth Token URL:** `https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token`, replacing `$DOMAIN_NAME` with your domain +- **Audience:** `zeebe-api`, the default for Camunda 8 Self-Managed - - + -This requires to port-forward the Zeebe Gateway and Keycloak to be able to connect to the cluster. + -```shell -kubectl port-forward services/camunda-zeebe-gateway 26500:26500 -kubectl port-forward services/camunda-keycloak 18080:80 -``` - -The following values are required for the OAuth authentication: +This requires port-forwarding the Zeebe Gateway to be able to connect to the cluster: ```shell -# Make sure to manually replace #DOMAIN_NAME with your actual domain since Modeler can't access the shell context -Cluster endpoint=http://localhost:26500 -Client ID='client-id' # retrieve the value from the identity page of your created m2m application -Client Secret='client-secret' # retrieve the value from the identity page of your created m2m application -OAuth Token URL=http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token -Audience=zeebe-api # the default for Camunda 8 Self-Managed +kubectl port-forward services/camunda-zeebe-gateway 26500:26500 --namespace camunda ``` -If you want to access the other services and their UI, you can port-forward those as well: +The following values are required for OAuth authentication: -```shell -Identity: -> kubectl port-forward svc/camunda-identity 8080:80 -Operate: -> kubectl port-forward svc/camunda-operate 8081:80 -Tasklist: -> kubectl port-forward svc/camunda-tasklist 8082:80 -Optimize: -> kubectl port-forward svc/camunda-optimize 8083:80 -Connectors: -> kubectl port-forward svc/camunda-connectors 8088:8080 -``` +- **Cluster endpoint:** `http://localhost:26500` +- **Client ID:** Retrieve the client ID value from the identity page of your created M2M application +- **Client Secret:** Retrieve the client secret value from the Identity page of your created M2M application +- **OAuth Token URL:** `http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token` +- **Audience:** `zeebe-api`, the default for Camunda 8 Self-Managed -:::note -Keycloak must be port-forwarded at all times as it is required to authenticate. -::: - -```shell -kubectl port-forward services/camunda-keycloak 18080:80 -``` - - + - + -### Testing installation with payment example application +## Test the installation with payment example application To test your installation with the deployment of a sample application, refer to the [installing payment example guide](../../../guides/installing-payment-example.md). -### Advanced topics +## Advanced topics The following are some advanced configuration topics to consider for your cluster: diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md index 146e779ad8e..612ce508f68 100644 --- a/docs/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md +++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md @@ -1,6 +1,6 @@ --- id: eks-eksctl -title: "Deploy an EKS cluster with eksctl" +title: "Deploy an EKS cluster with eksctl (quickstart)" description: "Deploy an Amazon Kubernetes cluster (EKS) with eksctl with step-by-step guidance." --- @@ -8,79 +8,103 @@ This guide explores the streamlined process of deploying Camunda 8 Self-Managed [Eksctl](https://eksctl.io/) is a common CLI tool for quickly creating and managing your Amazon EKS clusters and is [officially endorsed](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html) by Amazon. +While this guide is suitable for testing purposes, building a robust, scalable, and reproducible infrastructure is better achieved using Infrastructure as Code (IaC) tools like those described in the [Terraform guide](./terraform-setup.md), which offers more flexibility and control over your cloud environment. + This guide provides a user-friendly approach for setting up and managing Amazon EKS clusters. It covers everything from the prerequisites, such as AWS IAM role configuration, to creating a fully functional Amazon EKS cluster and a managed Aurora PostgreSQL instance. Ideal for those seeking a practical and efficient method to deploy Camunda 8 on AWS, this guide provides detailed instructions for setting up the necessary environment and AWS IAM configurations. ## Prerequisites - An [AWS account](https://docs.aws.amazon.com/accounts/latest/reference/accounts-welcome.html) is required to create resources within AWS. -- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources. -- [eksctl (0.191+)](https://eksctl.io/getting-started/), a CLI tool for creating and managing Amazon EKS clusters. - [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl), a CLI tool to interact with the cluster. +- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources. +- [eksctl (0.193+)](https://eksctl.io/getting-started/), a CLI tool for creating and managing Amazon EKS clusters. +- This guide uses GNU/Bash for all the shell commands listed. -## Considerations +### Considerations This is a basic setup to get started with Camunda 8 but does not reflect a high performance setup. For a better starting point towards production, we recommend utilizing [Infrastructure as Code tooling](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code) and following our [Terraform guide](./terraform-setup.md). +We refer to this architecture as the **standard installation**, which can be set up with or without a **domain** ([Ingress](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html)). +The standard installation utilizes a username and password connection for the Camunda components (or simply relies on network isolation for certain components). This option is straightforward and easier to implement, making it ideal for environments where simplicity and rapid deployment are priorities, or where network isolation provides sufficient security. + To try out Camunda 8 or develop against it, consider signing up for our [SaaS offering](https://camunda.com/platform/), or if you already have an Amazon EKS cluster, consider skipping to the [Helm guide](./eks-helm.md). While the guide is primarily tailored for UNIX systems, it can also be run under Windows by utilizing the [Windows Subsystem for Linux](https://learn.microsoft.com/windows/wsl/about). -:::warning +:::warning Cost management + Following this guide will incur costs on your Cloud provider account, namely for the managed Kubernetes service, running Kubernetes nodes in EC2, Elastic Block Storage (EBS), and Route53. More information can be found on [AWS](https://aws.amazon.com/eks/pricing/) and their [pricing calculator](https://calculator.aws/#/) as the total cost varies per region. + ::: -## Outcome +### Outcome + + -Following this guide results in the following: +This guide results in the following: -- An Amazon EKS 1.30 Kubernetes cluster with four nodes. +- An Amazon EKS Kubernetes cluster running the latest Kubernetes version with four nodes ready for Camunda 8 installation. - Installed and configured [EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html), which is used by the Camunda 8 Helm chart to create [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - A [managed Aurora PostgreSQL 15.x](https://aws.amazon.com/rds/aurora/) instance that will be used by the Camunda 8 components. -- [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured. +- A [managed OpenSearch domain](https://aws.amazon.com/opensearch-service/) created and configured for use with the Camunda platform.. +- [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured and [Pod Identities](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). - This simplifies the setup by not relying on explicit credentials, but instead allows creating a mapping between IAM roles and Kubernetes service accounts based on a trust relationship. A [blog post](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/) by AWS visualizes this on a technical level. - This allows a Kubernetes service account to temporarily impersonate an AWS IAM role to interact with AWS services like S3, RDS, or Route53 without supplying explicit credentials. This basic cluster setup is required to continue with the Helm set up as described in our [AWS Helm guide](./eks-helm.md). -## Deploying Amazon EKS cluster with eksctl +## 1. Configure AWS and eksctl -The `eksctl` tool allows the creation of clusters via a single command, but this doesn't support all configuration options. Therefore, we're supplying a YAML file that can be used with the CLI to create the cluster preconfigured with various settings. +### Set up AWS authentication -### `eksctl` prerequisites +Use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) to run the following commands: -To configure access, set up authentication to allow interaction with AWS via the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html). +```bash +# set your region +export AWS_REGION="eu-central-1" -A user creating AWS resources will be the owner and will always be linked to them. This means that the user will always have admin access on Kubernetes unless you delete it. +aws configure +``` + +Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_REGION`, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). + +:::caution Ownership of the created resources -Therefore, it is a good practice to create a separate [IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) that will be solely used for the `eksctl` command. [Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl`. +A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) to ensure that the resources are managed and owned by that specific user. +This ensures that the user maintains admin access to Kubernetes and associated resources unless those resources are explicitly deleted. + +[Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl`. + +::: -### Environment prerequisites +### Set up eksctl -We recommended exporting multiple environment variables to streamline the execution of the subsequent commands. +[eksctl](https://eksctl.io/) is a tool that allows the creation of clusters via a single command, but does not support all configuration options. This setup supplies a YAML file that can be used with the CLI to create the cluster preconfigured with various settings. -The following are the required environment variables with some example values. Define your secure password for the Postgres database. +Review the [installation guide](https://eksctl.io/installation/) for additional details. + +### Configure your infrastructure + +In this guide, we will set up multiple environment variables to configure the components. +Each component starts with a section that configures the different variables according to your needs. + +## 2. EKS cluster + +### Configuration ```shell +##### Kubernetes parameters + # The name used for the Kubernetes cluster export CLUSTER_NAME=camunda-cluster # Your standard region that you host AWS resources in -export REGION=eu-central-1 +export REGION="$AWS_REGION" # Multi-region zones, derived from the region -export ZONES="eu-central-1a eu-central-1b eu-central-1c" +export ZONES="${REGION}a ${REGION}b ${REGION}c" # The AWS Account ID export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) # CIDR range used for the VPC subnets export CIDR=10.192.0.0/16 -# Name for the Postgres DB cluster and instance -export RDS_NAME=camunda-postgres -# Postgres DB admin username -export PG_USERNAME=camunda -# Postgres DB password of the admin user -export PG_PASSWORD=camundarocks123 -# The default database name created within Postgres. Can directly be consumed by the Helm chart -export DEFAULT_DB_NAME=camunda -# The PostgreSQL version -export POSTGRESQL_VERSION=15.8 # Optional # Default node type for the Kubernetes cluster @@ -108,7 +132,7 @@ The variable `KMS_ARN` contains the required output. It should look something li For more information concerning the KMS encryption, refer to the [eksctl documentation](https://eksctl.io/usage/kms-encryption/). -### eksctl cluster YAML +### Create the cluster using eksctl Execute the following script, which creates a file called `cluster.yaml` with the following contents: @@ -119,7 +143,7 @@ apiVersion: eksctl.io/v1alpha5 metadata: name: ${CLUSTER_NAME:-camunda-cluster} # e.g. camunda-cluster region: ${REGION:-eu-central-1} # e.g. eu-central-1 - version: "1.30" + version: "1.31" availabilityZones: - ${REGION:-eu-central-1}c # e.g. eu-central-1c, the minimal is two distinct Availability Zones (AZs) within the region - ${REGION:-eu-central-1}b @@ -128,20 +152,30 @@ cloudWatch: clusterLogging: {} iam: vpcResourceControllerPolicy: true - withOIDC: true # enables and configures OIDC for IAM Roles for Service Accounts (IRSA) addons: - name: vpc-cni resolveConflicts: overwrite version: latest + useDefaultPodIdentityAssociations: true + - name: kube-proxy resolveConflicts: overwrite version: latest - - name: aws-ebs-csi-driver # automatically configures IRSA + useDefaultPodIdentityAssociations: true + + - name: aws-ebs-csi-driver resolveConflicts: overwrite version: latest + useDefaultPodIdentityAssociations: true + - name: coredns resolveConflicts: overwrite version: latest + useDefaultPodIdentityAssociations: true + + - name: eks-pod-identity-agent + version: latest + kind: ClusterConfig kubernetesNetworkConfig: ipFamily: IPv4 @@ -149,7 +183,13 @@ managedNodeGroups: - amiFamily: AmazonLinux2 desiredCapacity: ${NODE_COUNT:-4} # number of default nodes spawned if no cluster autoscaler is used disableIMDSv1: true - disablePodIMDS: true + iam: + withAddonPolicies: + albIngress: true + autoScaler: true + cloudWatch: true + ebs: true + awsLoadBalancerController: true instanceSelector: {} instanceTypes: - ${NODE_TYPE:-m6i.xlarge} # node type that is selected as default @@ -194,25 +234,27 @@ EOF With eksctl you can execute the previously created file as follows and takes 25-30 minutes. ```shell +cat cluster.yaml + eksctl create cluster --config-file cluster.yaml ``` ### (Optional) IAM access management -The access concerning Kubernetes is split into two layers. One being the IAM permissions allowing general Amazon EKS usage, like accessing the Amazon EKS UI, generating the Amazon EKS access via the AWS CLI, etc. The other being the cluster access itself determining which access the user should have within the Kubernetes cluster. +Kubernetes access is divided into two distinct layers. The **first layer** involves **AWS IAM permissions**, which enable basic Amazon EKS functionalities such as using the Amazon EKS UI and generating Amazon EKS access through the AWS CLI. The **second layer** provides **cluster access**, determining the user's permissions within the Kubernetes cluster. -Therefore, we first have to supply the user with the sufficient IAM permissions and afterward assign the user a role within the Kubernetes cluster. +As a result, we must initially grant the user adequate AWS IAM permissions and subsequently assign them a specific role within the Kubernetes cluster for proper access management.
    -

    IAM Permissions

    + First Layer: IAM Permissions

    A minimum set of permissions is required to gain access to an Amazon EKS cluster. These two permissions allow a user to execute `aws eks update-kubeconfig` to update the local `kubeconfig` with cluster access to the Amazon EKS cluster. The policy should look as follows and can be restricted further to specific Amazon EKS clusters if required: -```shell +```json cat <./policy-eks.json { "Version": "2012-10-17", @@ -233,7 +275,7 @@ EOF Via the AWS CLI, you can run the following to create the policy above in IAM. ```shell - aws iam create-policy --policy-name "BasicEKSPermissions" --policy-document file://policy-eks.json +aws iam create-policy --policy-name "BasicEKSPermissions" --policy-document file://policy-eks.json ``` The created policy `BasicEKSPermissions` has to be assigned to a group, a role, or a user to work. Consult the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policy-cli) to find the correct approach for you. @@ -243,19 +285,19 @@ The created policy `BasicEKSPermissions` has to be assigned to a group, a role,

    -

    Cluster Access

    + Second Layer: Cluster Access

    By default, the user creating the Amazon EKS cluster has admin access. To allow other users to access it, we have to adjust the `aws-auth` configmap. This can either be done manually via `kubectl` or via `eksctl`. In the following sections, we explain how to do this. -##### eksctl +#### eksctl With `eksctl`, you can create an AWS IAM user to Kubernetes role mapping with the following command: ```shell eksctl create iamidentitymapping \ --cluster=$CLUSTER_NAME \ - --region=eu-central-1 \ + --region=$REGION \ --arn arn:aws:iam::0123456789:user/ops-admin \ --group system:masters \ --username admin @@ -270,7 +312,7 @@ Example: ```shell eksctl create iamidentitymapping \ --cluster=$CLUSTER_NAME \ - --region=eu-central-1 \ + --region=$REGION \ --arn arn:aws:iam::0123456789:user/ops-admin \ --group system:masters \ --username admin @@ -278,7 +320,7 @@ eksctl create iamidentitymapping \ More information about usage and other configuration options can be found in the [eksctl documentation](https://eksctl.io/usage/iam-identity-mappings/). -##### kubectl +#### kubectl The same can also be achieved by using `kubectl` and manually adding the mapping as part of the `mapRoles` or `mapUsers` section. @@ -291,152 +333,107 @@ For detailed examples, review the [documentation provided by AWS](https://docs.a

    -## PostgreSQL database - -Creating a Postgres database can be solved in various ways. For example, by using the UI or the AWS CLI. -In this guide, we provide you with a reproducible setup. Therefore, we use the CLI. For creating PostgreSQL with the UI, refer to [the AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_GettingStarted.CreatingConnecting.PostgreSQL.html). - -The resulting PostgreSQL instance and default database `camunda` is intended to be used with Keycloak. You may manually add extra databases after creation for Identity with multi-tenancy. -This will not be covered in this guide as the Identity default for multi-tenancy is to be disabled. +### Access the created EKS cluster -1. Identify the VPC associated with the Amazon EKS cluster: +Access the Amazon EKS cluster via the `AWS CLI` using the following command: ```shell -export VPC_ID=$(aws ec2 describe-vpcs \ - --query "Vpcs[?Tags[?Key=='alpha.eksctl.io/cluster-name']|[?Value=='$CLUSTER_NAME']].VpcId" \ - --output text) +aws eks --region "$REGION" update-kubeconfig --name "$CLUSTER_NAME" --alias "$CLUSTER_NAME" ``` -2. The variable `VPC_ID` contains the output value required for the next step (the value should look like this: `vpc-1234567890`). -3. Create a security group within the VPC to allow connection to the Aurora PostgreSQL instance: +After updating the kubeconfig, verify your connection to the cluster with `kubectl`: ```shell -export GROUP_ID=$(aws ec2 create-security-group \ - --group-name aurora-postgres-sg \ - --description "Security Group to allow the Amazon EKS cluster to connect to Aurora PostgreSQL" \ - --vpc-id $VPC_ID \ - --output text) +kubectl get nodes ``` -4. The variable `GROUP_ID` contains the output (the value should look like this: `sg-1234567890`). -5. Create a security Ingress rule to allow access to PostgreSQL. +Create a namespace for Camunda: ```shell -aws ec2 authorize-security-group-ingress \ - --group-id $GROUP_ID \ - --protocol tcp \ - --port 5432 \ - --cidr $CIDR - # the CIDR range should be exactly the same value as in the `cluster.yaml` +kubectl create namespace camunda ``` -6. Retrieve subnets of the VPC to create a database subnet group: +In the remainder of the guide, we reference the `camunda` namespace to create some required resources in the Kubernetes cluster, such as secrets or one-time setup jobs. -```shell -export SUBNET_IDS=$(aws ec2 describe-subnets \ - --filter Name=vpc-id,Values=$VPC_ID \ - --query "Subnets[?Tags[?Key=='aws:cloudformation:logical-id']|[?contains(Value, 'Private')]].SubnetId" \ - --output text | expand -t 1) -``` +### Check existing StorageClasses -7. The variable `SUBNET_IDS` contains the output values of the private subnets (the value should look like this: `subnet-0123456789 subnet-1234567890 subnet-9876543210`). +We recommend using **gp3** volumes with Camunda 8 (see [volume performance](./amazon-eks.md#volume-performance)). It may be necessary to create the `gp3` StorageClass, as the default configuration only includes **gp2**. For detailed information, refer to the [AWS documentation](https://aws.amazon.com/ebs/general-purpose/). -8. Create a database subnet group to associate PostgreSQL within the existing VPC: +To see the available StorageClasses in your Kubernetes cluster, including which one is set as default, use the following command: -```shell -aws rds create-db-subnet-group \ - --db-subnet-group-name camunda-postgres \ - --db-subnet-group-description "Subnet for Camunda PostgreSQL" \ - --subnet-ids $(echo $SUBNET_IDS) +```bash +kubectl describe storageclass ``` -9. Create a PostgreSQL cluster within a private subnet of the VPC. +To check if `gp3` is set as the default StorageClass, look for the annotation `storageclass.kubernetes.io/is-default-class: "true"` in the output of the previous command. -For the latest Camunda-supported PostgreSQL engine version, check our [documentation](../../../../../reference/supported-environments.md#camunda-8-self-managed). +If `gp3` is not installed, or is not set as the default StorageClass, complete the following steps to install it and set it as default: -```shell -aws rds create-db-cluster \ - --db-cluster-identifier $RDS_NAME \ - --engine aurora-postgresql \ - --engine-version $POSTGRESQL_VERSION \ - --master-username $PG_USERNAME \ - --master-user-password $PG_PASSWORD \ - --vpc-security-group-ids $GROUP_ID \ - --availability-zones $(echo $ZONES) \ - --database-name $DEFAULT_DB_NAME \ - --db-subnet-group-name camunda-postgres -``` +1. Create the `gp3` StorageClass: -More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-cluster.html). + ```shell + cat << EOF | kubectl apply -f - + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: ebs-sc + annotations: + storageclass.kubernetes.io/is-default-class: "true" + provisioner: ebs.csi.aws.com + parameters: + type: gp3 + reclaimPolicy: Retain + volumeBindingMode: WaitForFirstConsumer + EOF + ``` -10. Wait for the PostgreSQL cluster to be ready: + This manifest defines an `ebs-sc` StorageClass to be created. This StorageClass uses the `ebs.csi.aws.com` provisioner, which is supplied by the **aws-ebs-csi-driver** addon installed during cluster creation. For more information, refer to the [official AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html). -```shell -aws rds wait db-cluster-available \ - --db-cluster-identifier $RDS_NAME -``` +2. Modify the `gp2` StorageClass to mark it as a non-default StorageClass: -11. Create a database instance within the DB cluster. + ```shell + kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' + ``` -The `engine-version` must be the same as the previously created PostgreSQL cluster. +3. Verify the changes by running the `kubectl get storageclass` command. -```shell -aws rds create-db-instance \ - --db-instance-identifier $RDS_NAME \ - --db-cluster-identifier $RDS_NAME \ - --engine aurora-postgresql \ - --engine-version $POSTGRESQL_VERSION \ - --no-publicly-accessible \ - --db-instance-class db.t3.medium -``` +After executing these commands, you will have a `gp3` StorageClass set as the default and the `gp2` StorageClass marked as non-default, provided that **gp2** was already present. -More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-instance.html). +### Domain deployment requirements -12. Wait for changes to be applied: +If you plan to deploy Camunda using an external domain associated with an external certificate, you will need to set up some IAM policies to allow both **external-dns** and **cert-manager** to interact with Route 53, which controls the DNS. -```shell -aws rds wait db-instance-available \ - --db-instance-identifier $RDS_NAME -``` +By default, the cluster uses **Pod Identity** to manage IAM roles for your applications. This means that service accounts are associated with IAM roles, allowing your pods to securely access AWS resources without hardcoding credentials. For more information on configuring Pod Identity, refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). -### Verifying connectivity between the Amazon EKS cluster and the PostgreSQL database +#### Enable OIDC and IAM roles for Service Accounts (IRSA) -1. Retrieve the writer endpoint of the DB cluster. +To [enable OpenID Connect (OIDC) and IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) on your cluster, complete the following steps: -```shell -export DB_HOST=$(aws rds describe-db-cluster-endpoints \ - --db-cluster-identifier $RDS_NAME \ - --query "DBClusterEndpoints[?EndpointType=='WRITER'].Endpoint" \ - --output text) -``` - -2. Start Ubuntu container in interactive mode within the Amazon EKS cluster. +1. Determine the OIDC issuer ID for your cluster. -```shell -kubectl run ubuntu --rm -i --tty --image ubuntu --env="DB_HOST=$DB_HOST" --env="PG_USERNAME=$PG_USERNAME" -- bash -``` + First, ensure that your EKS cluster is set up with an OIDC provider. The following command should show you the OIDC issuer: -3. Install required dependencies: + ```bash + export oidc_id=$(aws eks describe-cluster --name "$CLUSTER_NAME" --query "cluster.identity.oidc.issuer" --output text | cut -d '/' -f 5) + echo "$oidc_id" + ``` -```shell -apt update && apt install -y postgresql-client -``` + Determine whether an IAM OIDC provider with your cluster’s issuer ID is already in your account: -4. Connect to PostgreSQL database: + ```bash + aws iam list-open-id-connect-providers | grep $oidc_id | cut -d "/" -f4 + ``` -```shell -psql \ - --host=$DB_HOST \ - --username=$PG_USERNAME \ - --port=5432 \ - --dbname=postgres -``` + If output is returned, an IAM OIDC provider is already set up for your cluster, so you can skip the next step. If no output is returned, you will need to set up an IAM OIDC provider for your cluster. -Verify that the connection is successful. +1. Create an IAM OIDC identity provider for your cluster with the following command: -## Prerequisites for Camunda 8 installation + ```bash + eksctl utils associate-iam-oidc-provider --region "$REGION" --cluster "$CLUSTER_NAME" --approve + ``` -### Policy for external-dns +#### Policy for external-dns The following instructions are based on the [external-dns](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md) guide concerning the AWS setup and only covers the required IAM setup. The Helm chart will be installed in the [follow-up guide](./eks-helm.md). @@ -479,11 +476,13 @@ aws iam create-policy --policy-name "AllowExternalDNSUpdates" --policy-document export EXTERNAL_DNS_POLICY_ARN=$(aws iam list-policies \ --query 'Policies[?PolicyName==`AllowExternalDNSUpdates`].Arn' \ --output text) + +echo "EXTERNAL_DNS_POLICY_ARN=$EXTERNAL_DNS_POLICY_ARN" ``` The `EXTERNAL_DNS_POLICY_ARN` will be used in the next step to create a role mapping between the Kubernetes Service Account and AWS IAM Service Account. -Using `eksctl` allows us to create the required role mapping for external-dns. +Use `eksctl` to create the required role mapping for external-dns: ```shell eksctl create iamserviceaccount \ @@ -500,13 +499,15 @@ eksctl create iamserviceaccount \ export EXTERNAL_DNS_IRSA_ARN=$(aws iam list-roles \ --query "Roles[?RoleName=='external-dns-irsa'].Arn" \ --output text) + +echo "EXTERNAL_DNS_IRSA_ARN=$EXTERNAL_DNS_IRSA_ARN" ``` The variable `EXTERNAL_DNS_IRSA_ARN` contains the `arn` (it should look like this: `arn:aws:iam::XXXXXXXXXXXX:role/external-dns-irsa`). Alternatively, you can deploy the Helm chart first and then use `eksctl` with the option `--override-existing-serviceaccounts` instead of `--role-only` to reconfigure the created service account. -### Policy for cert-manager +#### Policy for cert-manager The following instructions are taken from the [cert-manager](https://cert-manager.io/docs/configuration/acme/dns01/route53/) guide concerning the AWS setup and only covers the required IAM setup. The Helm chart will be installed in the [follow-up guide](./eks-helm.md). @@ -553,11 +554,13 @@ aws iam create-policy --policy-name "AllowCertManagerUpdates" --policy-document export CERT_MANAGER_POLICY_ARN=$(aws iam list-policies \ --query 'Policies[?PolicyName==`AllowCertManagerUpdates`].Arn' \ --output text) + +echo "CERT_MANAGER_POLICY_ARN=$CERT_MANAGER_POLICY_ARN" ``` The `CERT_MANAGER_POLICY_ARN` is used in the next step to create a role mapping between the Amazon EKS Service Account and the AWS IAM Service Account. -Using `eksctl` allows us to create the required role mapping for cert-manager. +Use `eksctl` to create the required role mapping for cert-manager: ```shell eksctl create iamserviceaccount \ @@ -574,39 +577,419 @@ eksctl create iamserviceaccount \ export CERT_MANAGER_IRSA_ARN=$(aws iam list-roles \ --query "Roles[?RoleName=='cert-manager-irsa'].Arn" \ --output text) + +echo "CERT_MANAGER_IRSA_ARN=$CERT_MANAGER_IRSA_ARN" ``` The variable `CERT_MANAGER_IRSA_ARN` will contain the `arn` (it should look like this: `arn:aws:iam::XXXXXXXXXXXX:role/cert-manager-irsa`). Alternatively, you can deploy the Helm chart first and then use `eksctl` with the option `--override-existing-serviceaccounts` instead of `--role-only` to reconfigure the created service account. -### StorageClass +## 3. PostgreSQL database + +Creating a PostgreSQL database can be accomplished through various methods, such as using the AWS Management Console or the AWS CLI. This guide focuses on providing a reproducible setup using the CLI. For information on creating PostgreSQL using the UI, refer to the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_GettingStarted.CreatingConnecting.PostgreSQL.html). + +:::info Optional service + +If you don't want to use the Amazon RDS Aurora managed service for PostgreSQL, you can skip this section. +However, note that you may need to adjust the following instructions to remove references to it. + +If you choose not to use this service, you'll need to either provide a managed PostgreSQL service or use the internal deployment by the Camunda Helm chart in Kubernetes. + +::: -We recommend using gp3 volumes with Camunda 8 (see [volume performance](./amazon-eks.md#volume-performance)). It is necessary to create the StorageClass as the default configuration only includes `gp2`. For detailed information, refer to the [AWS documentation](https://aws.amazon.com/ebs/general-purpose/). +The following components use the PostgreSQL database: -The following steps create the `gp3` StorageClass: +- Keycloak +- Identity +- Web Modeler -1. Create `gp3` StorageClass. +### Configuration ```shell -cat << EOF | kubectl apply -f - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ebs-sc - annotations: - storageclass.kubernetes.io/is-default-class: "true" -provisioner: ebs.csi.aws.com -parameters: - type: gp3 -reclaimPolicy: Retain -volumeBindingMode: WaitForFirstConsumer -EOF +##### Postgres parameters + +# Name for the Postgres DB cluster and instance +export RDS_NAME=camunda-postgres +# Postgres DB admin username +export AURORA_USERNAME=secret_user +# Postgres DB password of the admin user +export AURORA_PASSWORD=camundarocks123 +# The PostgreSQL version +export POSTGRESQL_VERSION=15.8 + +# For each database, we need to generate a username, password and database name +export DB_KEYCLOAK_NAME="keycloak_db" +export DB_KEYCLOAK_USERNAME="keycloak-pg" +export DB_KEYCLOAK_PASSWORD="CHANGE-ME-PLEASE" + +export DB_IDENTITY_NAME="identity_db" +export DB_IDENTITY_USERNAME="identity-pg" +export DB_IDENTITY_PASSWORD="CHANGE-ME-PLEASE" + +export DB_WEBMODELER_NAME="webmodeler_db" +export DB_WEBMODELER_USERNAME="webmodeler-pg" +export DB_WEBMODELER_PASSWORD="CHANGE-ME-PLEASE" ``` -2. Modify the `gp2` storage class to mark it as a non-default storage class: +### Step-by-step setup + +1. Identify the VPC associated with the Amazon EKS cluster: + + ```shell + export VPC_ID=$(aws ec2 describe-vpcs \ + --query "Vpcs[?Tags[?Key=='alpha.eksctl.io/cluster-name']|[?Value=='$CLUSTER_NAME']].VpcId" \ + --output text) + + echo "VPC_ID=$VPC_ID" + ``` + + The variable `VPC_ID` contains the output value required for the next step (the value should look like this: `vpc-1234567890`). + +2. Create a security group within the VPC to allow connections to the Aurora PostgreSQL instance: + + ```shell + export GROUP_ID_AURORA=$(aws ec2 create-security-group \ + --group-name aurora-postgres-sg \ + --description "Security Group to allow the Amazon EKS cluster $CLUSTER_NAME to connect to Aurora PostgreSQL $RDS_NAME" \ + --vpc-id $VPC_ID \ + --output text) + + echo "GROUP_ID_AURORA=$GROUP_ID_AURORA" + ``` + + The variable `GROUP_ID_AURORA` contains the output (the value should look like this: `sg-1234567890`). + +3. Create a security ingress rule to allow access to PostgreSQL: + + ```shell + aws ec2 authorize-security-group-ingress \ + --group-id $GROUP_ID_AURORA \ + --protocol tcp \ + --port 5432 \ + --cidr $CIDR + # The CIDR range should match the value in the `cluster.yaml` + ``` + +4. Retrieve subnets of the VPC to create a database subnet group: + + ```shell + export SUBNET_IDS=$(aws ec2 describe-subnets \ + --filter Name=vpc-id,Values=$VPC_ID \ + --query "Subnets[?Tags[?Key=='aws:cloudformation:logical-id']|[?contains(Value, 'Private')]].SubnetId" \ + --output text | expand -t 1) + + echo "SUBNET_IDS=$SUBNET_IDS" + ``` + + The variable `SUBNET_IDS` contains the output values of the private subnets (the value should look like this: `subnet-0123456789 subnet-1234567890 subnet-9876543210`). + +5. Create a database subnet group to associate PostgreSQL within the existing VPC: + + ```shell + aws rds create-db-subnet-group \ + --db-subnet-group-name camunda-postgres \ + --db-subnet-group-description "Subnet for Camunda PostgreSQL $RDS_NAME" \ + --subnet-ids $(echo "$SUBNET_IDS") + ``` + +6. Create a PostgreSQL cluster within a private subnet of the VPC: + + For the latest Camunda-supported PostgreSQL engine version, check our [documentation](../../../../../reference/supported-environments.md#camunda-8-self-managed). + + ```shell + aws rds create-db-cluster \ + --db-cluster-identifier $RDS_NAME \ + --engine aurora-postgresql \ + --engine-version $POSTGRESQL_VERSION \ + --master-username $AURORA_USERNAME \ + --master-user-password $AURORA_PASSWORD \ + --vpc-security-group-ids $GROUP_ID_AURORA \ + --availability-zones $(echo $ZONES) \ + --db-subnet-group-name camunda-postgres + ``` + + More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-cluster.html). + +7. Wait for the PostgreSQL cluster to be ready: + + ```shell + aws rds wait db-cluster-available \ + --db-cluster-identifier $RDS_NAME + ``` + +8. Create a database instance within the DB cluster: + + Ensure that the `engine-version` matches the previously created PostgreSQL cluster. + + ```shell + aws rds create-db-instance \ + --db-instance-identifier $RDS_NAME \ + --db-cluster-identifier $RDS_NAME \ + --engine aurora-postgresql \ + --engine-version $POSTGRESQL_VERSION \ + --no-publicly-accessible \ + --db-instance-class db.t3.medium + ``` + + More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-instance.html). + +9. Wait for changes to be applied: + + ```shell + aws rds wait db-instance-available \ + --db-instance-identifier $RDS_NAME + ``` + + This command will wait until the instance is ready. + +### Create the databases + +Now that you have a database, you need to create dedicated databases for each Camunda component along with associated users that have configured access. + +We will also use this step to verify connectivity to the database from the created EKS cluster. The creation of the databases will be performed by spawning a pod job in the Kubernetes cluster, using the main user to create the different databases. + +1. Retrieve the writer endpoint of the DB cluster: + + ```shell + export DB_HOST=$(aws rds describe-db-cluster-endpoints \ + --db-cluster-identifier $RDS_NAME \ + --query "DBClusterEndpoints[?EndpointType=='WRITER'].Endpoint" \ + --output text) + + echo "DB_HOST=$DB_HOST" + ``` + +2. Create a secret that references the environment variables: + + ```bash + kubectl create secret generic setup-db-secret --namespace camunda \ + --from-literal=AURORA_ENDPOINT="$DB_HOST" \ + --from-literal=AURORA_PORT="5432" \ + --from-literal=AURORA_DB_NAME="postgres" \ + --from-literal=AURORA_USERNAME="$AURORA_USERNAME" \ + --from-literal=AURORA_PASSWORD="$AURORA_PASSWORD" \ + --from-literal=DB_KEYCLOAK_NAME="$DB_KEYCLOAK_NAME" \ + --from-literal=DB_KEYCLOAK_USERNAME="$DB_KEYCLOAK_USERNAME" \ + --from-literal=DB_KEYCLOAK_PASSWORD="$DB_KEYCLOAK_PASSWORD" \ + --from-literal=DB_IDENTITY_NAME="$DB_IDENTITY_NAME" \ + --from-literal=DB_IDENTITY_USERNAME="$DB_IDENTITY_USERNAME" \ + --from-literal=DB_IDENTITY_PASSWORD="$DB_IDENTITY_PASSWORD" \ + --from-literal=DB_WEBMODELER_NAME="$DB_WEBMODELER_NAME" \ + --from-literal=DB_WEBMODELER_USERNAME="$DB_WEBMODELER_USERNAME" \ + --from-literal=DB_WEBMODELER_PASSWORD="$DB_WEBMODELER_PASSWORD" + ``` + + This command creates a secret named `setup-db-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-db-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + +3. Save the following manifest to a file, for example, `setup-postgres-create-db.yml`: + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/setup-postgres-create-db.yml + ``` + +4. Apply the manifest: + + ```bash + kubectl apply -f setup-postgres-create-db.yml --namespace camunda + ``` + + Once the secret is created, the **Job** manifest from the previous step can consume this secret to securely access the database credentials. + +5. Once the job is created, monitor its progress using: + + ```bash + kubectl get job/create-setup-user-db --namespace camunda --watch + ``` + + Once the job shows as `Completed`, the users and databases will have been successfully created. + +6. View the logs of the job to confirm that the users were created and privileges were granted successfully: + + ```bash + kubectl logs job/create-setup-user-db --namespace camunda + ``` + +7. Cleanup the resources: + + ```bash + kubectl delete job create-setup-user-db --namespace camunda + kubectl delete secret setup-db-secret --namespace camunda + ``` + + Running these commands will clean up both the job and the secret, ensuring that no unnecessary resources remain in the cluster. + +## 4. OpenSearch domain + +Creating an OpenSearch domain can be accomplished through various methods, such as using the AWS Management Console or the AWS CLI. This guide focuses on providing a reproducible setup using the CLI. For information on creating an OpenSearch domain using the UI, refer to the [AWS OpenSearch documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/create-managed-domain.html). + +The resulting OpenSearch domain is intended for use with the Camunda platform, the following components utilize OpenSearch: + +- Operate +- Optimize +- Tasklist +- Zeebe + +:::info Optional service + +If you don't want to use the Amazon OpenSearch managed service for OpenSearch, you can skip this section. +However, note that you may need to adjust the following instructions to remove references to it. + +If you choose not to use this service, you'll need to either provide a managed OpenSearch or Elasticsearch service or use the internal deployment by the Camunda Helm chart in Kubernetes. + +::: + +:::note Migration to OpenSearch is not supported + +Using Amazon OpenSearch Service requires [setting up a new Camunda installation](/self-managed/setup/overview.md). Migration from previous Camunda versions or Elasticsearch environments is currently not supported. Switching between Elasticsearch and OpenSearch, in either direction, is also not supported. + +::: + +### Configuration ```shell -kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' +##### OpenSearch parameters + +# Name for the OpenSearch domain +export OPENSEARCH_NAME=camunda-opensearch ``` + +:::caution Network based security + +The standard deployment for OpenSearch relies on the first layer of security, which is the Network. +While this setup allows easy access, it may expose sensitive data. To enhance security, consider implementing IAM Roles for Service Accounts (IRSA) to restrict access to the OpenSearch cluster, providing a more secure environment. +For more information, see the [Amazon OpenSearch Service fine-grained access control documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html#fgac-access-policies). + +::: + +### Step-by-step setup + +1. Identify the VPC associated with the Amazon EKS cluster: + + ```shell + export VPC_ID=$(aws ec2 describe-vpcs \ + --query "Vpcs[?Tags[?Key=='alpha.eksctl.io/cluster-name']|[?Value=='$CLUSTER_NAME']].VpcId" \ + --output text) + + echo "VPC_ID=$VPC_ID" + ``` + + The variable `VPC_ID` contains the output value required for the next steps (the value should look like this: `vpc-1234567890`). + +2. Create a security group within the VPC to allow connections to the OpenSearch domain: + + ```shell + export GROUP_ID_OPENSEARCH=$(aws ec2 create-security-group \ + --group-name opensearch-sg \ + --description "Security Group to allow internal connections From EKS $CLUSTER_NAME to OpenSearch $OPENSEARCH_NAME" \ + --vpc-id $VPC_ID \ + --output text) + + echo "GROUP_ID_OPENSEARCH=$GROUP_ID_OPENSEARCH" + ``` + + The variable `GROUP_ID_OPENSEARCH` contains the output (the value should look like this: `sg-1234567890`). + +3. Create a security ingress rule to allow access to OpenSearch over HTTPS (port 443) from within the VPC: + + ```shell + aws ec2 authorize-security-group-ingress \ + --group-id $GROUP_ID_OPENSEARCH \ + --protocol tcp \ + --port 443 \ + --cidr $CIDR # Replace with the CIDR range of your EKS cluster, e.g., + ``` + + Ensure that the CIDR range is appropriate for your environment. OpenSearch uses `443` as the https transport port. + +4. Retrieve the private subnets of the VPC: + + ```shell + export SUBNET_IDS=$(aws ec2 describe-subnets \ + --filter Name=vpc-id,Values=$VPC_ID \ + --query "Subnets[?Tags[?Key=='aws:cloudformation:logical-id']|[?contains(Value, 'Private')]].SubnetId" \ + --output text | expand -t 1) + + # format it with coma + export SUBNET_IDS=$(echo "$SUBNET_IDS" | sed 's/ /,/g') + + echo "SUBNET_IDS=$SUBNET_IDS" + ``` + + The variable `SUBNET_IDS` now contains the output values of the private subnets (the value should look like this: `subnet-0123456789 subnet-1234567890`). + +5. Create the OpenSearch domain: + + ```shell + aws opensearch create-domain --domain-name $OPENSEARCH_NAME \ + --engine-version OpenSearch_2.15 \ + --cluster-config "InstanceType=t3.medium.search,InstanceCount=3,ZoneAwarenessEnabled=true,ZoneAwarenessConfig={AvailabilityZoneCount=3}" \ + --node-to-node-encryption-options Enabled=true \ + --ebs-options "EBSEnabled=true,VolumeType=gp3,VolumeSize=50,Iops=3000,Throughput=125" \ + --encryption-at-rest-options Enabled=true \ + --access-policies "{ \"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"*\" }, \"Action\": \"es:*\", \"Resource\": \"arn:aws:es:$REGION:*:domain/$OPENSEARCH_NAME/*\" }]}" \ + --vpc-options "SubnetIds=${SUBNET_IDS},SecurityGroupIds=${GROUP_ID_OPENSEARCH}" + ``` + + - **Domain Name**: `$OPENSEARCH_NAME` is the name of the OpenSearch domain being created. + - **Engine Version**: Uses OpenSearch version `2.15`. + - **Cluster Configuration**: + - `InstanceType=t3.medium.search` specifies the instance type for the domain. + - `InstanceCount=3` creates a cluster with 3 instances. + - `ZoneAwarenessEnabled=true` and `ZoneAwarenessConfig={AvailabilityZoneCount=3}` enable zone awareness and spread the instances across 3 availability zones to improve fault tolerance. + - **Node-to-Node Encryption**: Encryption for traffic between nodes in the OpenSearch cluster is enabled (`Enabled=true`). + - **EBS Options**: + - `EBSEnabled=true` enables Elastic Block Store (EBS) for storage. + - `VolumeType=gp3` specifies the volume type as `gp3` with 50 GiB of storage. + - `Iops=3000` and `Throughput=125` set the IOPS and throughput for the storage. + - **Encryption at Rest**: Data stored in the domain is encrypted at rest (`Enabled=true`). + - **Access Policies**: The default access policy allows all actions (`es:*`) on resources within the domain for any AWS account (`"Principal": { "AWS": "*" }`). This is scoped to the OpenSearch domain resources using the `arn:aws:es:$REGION:*:domain/$OPENSEARCH_NAME/*` resource ARN. + - **VPC Options**: The domain is deployed within the specified VPC, restricted to the provided subnets (`SubnetIds=${SUBNET_IDS}`) and associated security group (`SecurityGroupIds=${GROUP_ID_OPENSEARCH}`). + + This configuration creates a secure OpenSearch domain with encryption both in transit (between nodes) and at rest, zonal fault tolerance, and sufficient storage performance using `gp3` volumes. The access is restricted to resources in the VPC of the EKS cluster and is governed by the specified security group. + +6. Wait for the OpenSearch domain to be active: + + ```shell + while [ "$(aws opensearch describe-domain --domain-name $OPENSEARCH_NAME --query 'DomainStatus.Processing' --output text)" != "False" ]; do echo "Waiting for OpenSearch domain to become availablen this can up to take 20-30 minutes..."; sleep 30; done && echo "OpenSearch domain is now available\!" + ``` + +7. Retrieve the endpoint of the OpenSearch domain: + + ```shell + export OPENSEARCH_HOST=$(aws opensearch describe-domains --domain-names $OPENSEARCH_NAME --query "DomainStatusList[0].Endpoints.vpc" --output text) + + echo "OPENSEARCH_HOST=$OPENSEARCH_HOST" + ``` + + This endpoint will be used to connect to your OpenSearch domain. + +### Verify connectivity from within the EKS cluster + +To verify that the OpenSearch domain is accessible from within your Amazon EKS cluster, follow these steps: + +1. Deploy a temporary pod to test connectivity: + + Create a temporary pod using the `amazonlinux` image in the `camunda` namespace, install `curl`, and test the connection to OpenSearch—all in a single command: + + ```bash + kubectl run amazonlinux-opensearch -n camunda --rm -i --tty --image amazonlinux -- sh -c "curl -XGET https://$OPENSEARCH_HOST/_cluster/health" + ``` + +2. Verify the response: + + If everything is set up correctly, you should receive a response from the OpenSearch service indicating its health status. + +You have successfully set up an OpenSearch domain that is accessible from within your Amazon EKS cluster. For further details, refer to the [OpenSearch documentation](https://opensearch.org/docs/latest/index/). + +## 5. Install Camunda 8 using the Helm chart + +Now that you've exported the necessary values, you can proceed with installing Camunda 8 using Helm charts. Follow the guide [Camunda 8 on Kubernetes](./eks-helm.md) for detailed instructions on deploying the platform to your Kubernetes cluster. diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/irsa.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/irsa.md index bee98384ac6..bd12fb27a28 100644 --- a/docs/self-managed/setup/deploy/amazon/amazon-eks/irsa.md +++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/irsa.md @@ -1,606 +1,177 @@ --- id: irsa -title: "IAM roles for service accounts" +title: "Troubleshooting IAM Roles for Service Accounts (IRSA)" description: "Learn how to configure IAM roles for service accounts (IRSA) within AWS to authenticate workloads." --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -IAM roles for service accounts (IRSA) is a way within AWS to authenticate workloads in Amazon EKS (Kubernetes), for example, to execute signed requests against AWS services. This is a replacement for basic auth and is generally considered a [best practice by AWS](https://aws.github.io/aws-eks-best-practices/security/docs/iam/). +## IRSA configuration validation of a Camunda 8 helm deployment -The following considers the managed services by AWS and provided examples are in Terraform syntax. +The [c8-sm-checks](/self-managed/operational-guides/troubleshooting/troubleshooting.md#anomaly-detection-scripts) utility is designed to validate IAM Roles for Service Accounts ([IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)) configuration in EKS Kubernetes clusters on AWS. It ensures that key components in a Camunda 8 deployment, such as PostgreSQL and OpenSearch, are properly configured to securely interact with AWS resources via the appropriate IAM roles. -## Aurora PostgreSQL +### IRSA check script -[Aurora PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.AuroraPostgreSQL.html) is a managed AWS PostgreSQL–compatible service. +The `/checks/kube/aws-irsa.sh` script verifies IRSA setup in your AWS Kubernetes environment by performing two types of checks: -### Setup +1. **Configuration Verification**: Ensures key IRSA configurations are correctly set, using specific checks on IAM roles, policies, and mappings to service accounts. +2. **Namespace Commands and Job Execution**: Runs commands within the specified namespace using Kubernetes jobs (if necessary) to verify network and access configurations. -When using the Terraform provider of [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest) with the resource [aws_rds_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/rds_cluster) to create a new rational database (RDS) or Aurora cluster, supply the argument `iam_database_authentication_enabled = true` to enable the IAM roles functionality. See the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) for availability and limitations. +This utility is non-intrusive and will not alter any deployment settings. +If the `-s` flag is provided, the script skips spawning debugging pods for network flow verification, which can be helpful if pod creation is restricted or not required for troubleshooting. -#### AWS policy +:::info Compatibility with Helm Deployments -An AWS policy (later assigned to a role) is required to allow assuming a database user within a managed database. See the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html) for policy details. +The script relies on Helm chart values and is compatible only with deployments installed or updated through standard Helm commands. It will not work with other deployment methods, such as those using `helm template` (e.g., [ArgoCD](https://argo-cd.readthedocs.io/en/latest/faq/#after-deploying-my-helm-application-with-argo-cd-i-cannot-see-it-with-helm-ls-and-other-helm-commands)). - - +Compatibility is confirmed for [Camunda Helm chart releases version 11 and above](https://artifacthub.io/packages/helm/camunda/camunda-platform). -To create the AWS policy using Terraform, you can define it with the [aws_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) resource. Here’s an example configuration: - -```json -resource "aws_iam_policy" "rds_policy" { - name = "rds-policy" - - policy = jsonencode({ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "rds-db:connect" - ], - "Resource": [ - "arn:aws:rds-db:::dbuser:/" - ] - } - ] - }) -} -``` - -Replace ``, ``, ``, and `` with the appropriate values for your AWS environment. - - - - - -To create the AWS policy using the AWS CLI, use the `aws iam create-policy` command: - -```bash -aws iam create-policy \ - --policy-name rds-policy \ - --policy-document '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "rds-db:connect" - ], - "Resource": [ - "arn:aws:rds-db:::dbuser:/" - ] - } - ] - }' -``` - -Replace ``, ``, ``, and `` with the appropriate values for your AWS environment. - - - - -#### IAM to Kubernetes mapping - - - - -To assign the policy to a role for IAM role to service account mapping in Amazon EKS, use a Terraform module like [iam-role-for-service-accounts-eks](https://registry.terraform.io/modules/terraform-aws-modules/iam/aws/latest/submodules/iam-role-for-service-accounts-eks): - -```json -module "aurora_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - role_name = "aurora-role" - - role_policy_arns = { - policy = aws_iam_policy.rds_policy.arn - } - - oidc_providers = { - main = { - provider_arn = "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - namespace_service_accounts = [":"] - } - } -} -``` - -This Terraform snippet creates a role that allows the service account `` within the `` to assume the user `` within the database ``. The output of the `aurora_role` module includes the `iam_role_arn`, which you need to annotate the service account. +::: - +#### Key features - +- **Helm values retrieval**: Extracts deployment values using Helm to ensure all required configurations are set. +- **EKS and OIDC configuration check**: Confirms that EKS is configured with IAM and OIDC, matching the minimum required version for IRSA compatibility. +- **Service account role validation**: For each specified component, verifies that the service account exists and has the correct IAM role annotations. +- **Network access verification**: Ensures that PostgreSQL (Aurora) or OpenSearch instances are accessible from within the cluster. This step involves an `nmap` scan through a Kubernetes job. Use the `-s` option to skip this step if network flow verification is unnecessary. +- **IRSA value check**: Validates that the Helm deployment values are correctly configured to use IRSA for secure service interactions with AWS. +- **Aurora PostgreSQL and OpenSearch IAM configuration**: Confirms that these services support IAM login, ensuring secure access configurations. +- **Access and Trust Policy verification**: Checks that access and trust policies are correctly set. Note that the script performs basic checks; if issues arise with these policies, further manual verification may be needed. +- **Service Account Role association test**: Tests that the IAM role association with the service account is functioning as expected by spawning a job with the specified service account and validating the resulting ARN. This step can also be skipped using the `-s` option. +- **OpenSearch Access Policy check**: Validates that the OpenSearch access policy is configured correctly to support secure connections from the cluster. -To assign the policy to a role using the AWS CLI, follow these steps: +#### Example usage -1. **Create the IAM role**: +You can find the complete usage details in the [c8-sm-checks repository](https://github.com/camunda/c8-sm-checks). Below is a quick reference for common usage options: ```bash -aws iam create-role \ - --role-name aurora-role \ - --assume-role-policy-document '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::" - } - } - } - ] - }' +Usage: ./checks/kube/aws-irsa.sh [-h] [-n NAMESPACE] [-e EXCLUDE_COMPONENTS] [-p] [-l] [-s] +Options: + -h Display this help message + -n NAMESPACE Specify the namespace to use (required) + -e EXCLUDE_COMPONENTS Comma-separated list of Components to exclude from the check (reference of the component is the root key used in the chart) + -p Comma-separated list of Components to check IRSA for PostgreSQL (overrides default list: identityKeycloak,identity,webModeler) + -l Comma-separated list of Components to check IRSA for OpenSearch (overrides default list: zeebe,operate,tasklist,optimize) + -s Disable pod spawn for IRSA and connectivity verification. + By default, the script spawns jobs in the specified namespace to perform + IRSA checks and network connectivity tests. These jobs use the amazonlinux:latest + image and scan with nmap to verify connectivity. ``` -2. **Attach the policy to the role**: +**Example Command:** ```bash -aws iam attach-role-policy \ - --role-name aurora-role \ - --policy-arn arn:aws:iam:::policy/rds-policy -``` - - - - -Annotate the service account with the `iam_role_arn`: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - eks.amazonaws.com/role-arn: arn:aws:iam:::role/aurora-role - name: - namespace: -``` - -Replace ``, ``, ``, ``, ``, and `` with the appropriate values for your AWS environment. - -#### Database configuration - -The setup required on the Aurora PostgreSQL side is to create the user and assign the required permissions to it. The following is an example when connected to the PostgreSQL database, and can also be realized by using a [Terraform PostgreSQL Provider](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs). See the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html#UsingWithRDS.IAMDBAuth.DBAccounts.PostgreSQL) for reference concerning Aurora specific configurations. - -```SQL -# create user and grant rds_iam role, which requires the user to login via IAM authentication over password -CREATE USER ""; -GRANT rds_iam TO ""; - -# create some database and grant the user all privileges to it -CREATE DATABASE "some-db"; -GRANT ALL privileges on database "some-db" to ""; -``` - -### Keycloak - -:::caution -IAM Roles for Service Accounts can only be implemented with Keycloak 21 onwards. This may require you to adjust the version used in the Camunda Helm Chart. -::: - -From Keycloak versions 21+, the default JDBC driver can be overwritten, allowing use of a custom wrapper like the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) to utilize the features of IRSA. This is a wrapper around the default JDBC driver, but takes care of signing the requests. - -Furthermore, the [official Keycloak documentation](https://www.keycloak.org/server/db#preparing-keycloak-for-amazon-aurora-postgresql) also provides detailed instructions for utilizing Amazon Aurora PostgreSQL. - -A custom Keycloak container image containing necessary configurations is conveniently accessible on Docker Hub at [camunda/keycloak](https://hub.docker.com/r/camunda/keycloak). This image, built upon the base image [bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak), incorporates the required wrapper for seamless integration. - -#### Container image sources - -The sources of the [Camunda Keycloak images](https://hub.docker.com/r/camunda/keycloak) can be found on [GitHub](https://github.com/camunda/keycloak). In this repository, the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) is assembled in the `Dockerfile`. - -Maintenance of these images is based on the upstream [Bitnami Keycloak images](https://hub.docker.com/r/bitnami/keycloak), ensuring they are always up-to-date with the latest Keycloak releases. The lifecycle details for Keycloak can be found on [endoflife.date](https://endoflife.date/keycloak). - -#### Keycloak image configuration - -Bitnami Keycloak container image configuration is available at [hub.docker.com/bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak). - -#### Kubernetes configuration - -As an example, configure the following environment variables to enable IRSA: - -```yaml -# The AWS wrapper is not capable of XA transactions -- name: KEYCLOAK_EXTRA_ARGS - value: "--db-driver=software.amazon.jdbc.Driver --transaction-xa-enabled=false --log-level=INFO,software.amazon.jdbc:INFO" - -# Enable the AWS IAM plugin -- name: KEYCLOAK_JDBC_PARAMS - value: "wrapperPlugins=iam" -- name: KEYCLOAK_JDBC_DRIVER - value: "aws-wrapper:postgresql" - -# Configure database -- name: KEYCLOAK_DATABASE_USER - value: db-user-name -- name: KEYCLOAK_DATABASE_NAME - value: db-name -- name: KEYCLOAK_DATABASE_HOST - value: db-host -- name: KEYCLOAK_DATABASE_PORT - value: 5432 - -# Ref: https://www.keycloak.org/server/configuration-metrics -- name: KEYCLOAK_ENABLE_STATISTICS - value: "true" - -# Needed to see if Keycloak is healthy: https://www.keycloak.org/server/health -- name: KEYCLOAK_ENABLE_HEALTH_ENDPOINTS - value: "true" -``` - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -##### Helm chart - -For a Helm-based deployment, you can directly configure these settings using Helm values. Below is an example of how you can incorporate these settings into your Helm chart deployment: - -```yaml -identityKeycloak: - postgresql: - enabled: false - image: docker.io/camunda/keycloak:25 # use a supported and updated version listed at https://hub.docker.com/r/camunda/keycloak/tags - extraEnvVars: - - name: KEYCLOAK_EXTRA_ARGS - value: "--db-driver=software.amazon.jdbc.Driver --transaction-xa-enabled=false --log-level=INFO,software.amazon.jdbc:INFO" - - name: KEYCLOAK_JDBC_PARAMS - value: "wrapperPlugins=iam" - - name: KEYCLOAK_JDBC_DRIVER - value: "aws-wrapper:postgresql" - externalDatabase: - host: "aurora.rds.your.domain" - port: 5432 - user: keycloak - database: keycloak -``` - -:::note -For additional details, refer to the [Camunda 8 Helm deployment documentation](/self-managed/setup/install.md). -::: - -### Web Modeler - -Since Web Modeler RestAPI uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. -Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. - -#### Kubernetes configuration - -As an example, configure the following environment variables - -```yaml -- name: SPRING_DATASOURCE_DRIVER_CLASS_NAME - value: software.amazon.jdbc.Driver -- name: SPRING_DATASOURCE_URL - value: jdbc:aws-wrapper:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?wrapperPlugins=iam -- name: SPRING_DATASOURCE_USERNAME - value: db-user-name -``` - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -### Identity - -Since Identity uses PostgreSQL, configure `identity` to use IRSA with Amazon Aurora PostgreSQL. Check the [Identity database configuration](../../../../identity/deployment/configuration-variables.md#running-identity-on-amazon-aurora-postgresql) for more details. -Identity already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. - -#### Kubernetes configuration - -As an example, configure the following environment variables - -```yaml -- name: SPRING_DATASOURCE_DRIVER_CLASS_NAME - value: software.amazon.jdbc.Driver -- name: SPRING_DATASOURCE_URL - value: jdbc:aws-wrapper:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?wrapperPlugins=iam -- name: SPRING_DATASOURCE_USERNAME - value: db-user-name -``` - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -## Amazon OpenSearch Service - -[Amazon OpenSearch Service](https://aws.amazon.com/opensearch-service/) is a managed OpenSearch service provided by AWS, which is a distributed search and analytics engine built on Apache Lucene. - -:::note -As of the 8.4 release, Zeebe, Operate, and Tasklist are now compatible with [Amazon OpenSearch Service](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch Service requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. -::: - -:::caution - -Optimize is not supported using the IRSA method. However, Optimize can be utilized by supplying a username and password. The migration step must also be disabled. For more information, refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md). - -::: - -### Setup - -For Amazon OpenSearch Service, the most common use case is the use of `fine-grained access control`. - -When using the Terraform provider of [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest) with the resource [opensearch_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/opensearch_domain) to create a new Amazon OpenSearch Service cluster, supply the arguments: - -- `advanced_security_options.enabled = true` -- `advanced_security_options.anonymous_auth_enabled = false` to activate `fine-grained access control`. - -Without `fine-grained access control`, anonymous access is enabled and would be sufficient to supply an IAM role with the right policy to allow access. In our case, we'll have a look at `fine-grained access control` and the use without it can be derived from this more complex example. - -#### AWS Policy - -An AWS policy, which later is assigned to a role, is required to allow general access to Amazon OpenSearch Service. See the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ac.html) for the explanation of the policy. - - - - -To create an AWS policy for Amazon OpenSearch Service using Terraform, you can use the [aws_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) resource. Here’s an example configuration: - -```json -resource "aws_iam_policy" "opensearch_policy" { - name = "opensearch_policy" - - policy = jsonencode({ - "Version" : "2012-10-17", - "Statement" : [ - { - "Effect" : "Allow", - "Action" : [ - "es:DescribeElasticsearchDomains", - "es:DescribeElasticsearchInstanceTypeLimits", - "es:DescribeReservedElasticsearchInstanceOfferings", - "es:DescribeReservedElasticsearchInstances", - "es:GetCompatibleElasticsearchVersions", - "es:ListDomainNames", - "es:ListElasticsearchInstanceTypes", - "es:ListElasticsearchVersions", - "es:DescribeElasticsearchDomain", - "es:DescribeElasticsearchDomainConfig", - "es:ESHttpGet", - "es:ESHttpHead", - "es:GetUpgradeHistory", - "es:GetUpgradeStatus", - "es:ListTags", - "es:AddTags", - "es:RemoveTags", - "es:ESHttpDelete", - "es:ESHttpPost", - "es:ESHttpPut" - ], - "Resource" : [ - "arn:aws:es:::domain//*" - ] - } - ] - }) -} +./checks/kube/aws-irsa.sh -n camunda-primary -p "identity,webModeler" -l "zeebe,operate" ``` -Replace ``, ``, and `` with the appropriate values for your Amazon OpenSearch Service domain. +In this example, the script will check **`identity`** and **`webModeler`** components (references of the component name in the helm chart) for Aurora PostgreSQL access and **`zeebe`** and **`operate`** components for OpenSearch access in the `camunda-primary` namespace. - +#### Script output overview - +The script offers detailed output to confirm that each component is properly configured for IRSA. Below is an outline of the checks it performs and the expected output format: -To create an AWS policy for Amazon OpenSearch Service using the AWS CLI, you use the `aws iam create-policy` command: +**Example Output:** -```bash -aws iam create-policy \ - --policy-name opensearch_policy \ - --policy-document '{ - "Version" : "2012-10-17", - "Statement" : [ - { - "Effect" : "Allow", - "Action" : [ - "es:DescribeElasticsearchDomains", - "es:DescribeElasticsearchInstanceTypeLimits", - "es:DescribeReservedElasticsearchInstanceOfferings", - "es:DescribeReservedElasticsearchInstances", - "es:GetCompatibleElasticsearchVersions", - "es:ListDomainNames", - "es:ListElasticsearchInstanceTypes", - "es:ListElasticsearchVersions", - "es:DescribeElasticsearchDomain", - "es:DescribeElasticsearchDomainConfig", - "es:ESHttpGet", - "es:ESHttpHead", - "es:GetUpgradeHistory", - "es:GetUpgradeStatus", - "es:ListTags", - "es:AddTags", - "es:RemoveTags", - "es:ESHttpDelete", - "es:ESHttpPost", - "es:ESHttpPut" - ], - "Resource" : [ - "arn:aws:es:::domain//*" - ] - } - ] - }' ``` - -Replace ``, ``, and `` with the appropriate values for your Amazon OpenSearch Service domain. - - - - -#### IAM to Kubernetes mapping - -To assign the policy to a role for the IAM role to service account mapping in Amazon EKS: - - - - -You can use a Terraform module like [iam-role-for-service-accounts-eks](https://registry.terraform.io/modules/terraform-aws-modules/iam/aws/latest/submodules/iam-role-for-service-accounts-eks): - -```json -module "opensearch_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - role_name = "opensearch-role" - - role_policy_arns = { - policy = aws_iam_policy.opensearch_policy.arn - } - - oidc_providers = { - main = { - provider_arn = "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - namespace_service_accounts = [":"] - } - } -} +[OK] AWS CLI version 2.15.20 is compatible and user is logged in. +[OK] AWS environment detected. Proceeding with the script. +[INFO] Chart camunda-platform is deployed in namespace camunda-primary. +[INFO] Retrieved values for Helm deployment: camunda-platform-11.0.1. +[FAIL] The service account keycloak-sa does not have a valid eks.amazonaws.com/role-arn annotation. You must add it in the chart, see https://docs.camunda.io/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm/ +[FAIL] RoleArn name for component 'identityKeycloak' is empty. Skipping verification. ``` -This Terraform configuration allows the service account `` within the namespace `` to access the Amazon OpenSearch Service for the cluster ``. The output of the `opensearch_role` module includes the `iam_role_arn` needed to annotate the service account. +The script highlights errors with the `[FAIL]` prefix, and these are directed to `stderr` for easier filtering. We recommend capturing `stderr` output to quickly identify failed configurations. -Annotate the service account with the `iam_role_arn` output. +If the script returns a false positive—indicating success when issues are actually present—manually review each output line to ensure reported configuration details (like Role ARNs or annotations) are accurate. For example, ensure that each service account has the correct Role ARN and associated permissions to avoid undetected issues. - +### Advanced troubleshooting for IRSA configuration - +The troubleshooting script provides essential checks but may not capture all potential issues, particularly those related to IAM policies and configurations. If IRSA is not functioning as expected and no errors are flagged by the script, follow the steps below for deeper troubleshooting. -To assign the policy to a role using the AWS CLI, follow these steps: +#### Spawn a debug pod to simulate the pod environment -1. **Create the IAM role**: +To troubleshoot in an environment identical to your pod, deploy a debug pod with the necessary service account. Here are examples of debug manifests you can customize for your needs: -```bash -aws iam create-role \ - --role-name opensearch-role \ - --assume-role-policy-document '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::" - } - } - } - ] - }' -``` +- [OpenSearch client pod](https://github.com/camunda/camunda-tf-eks-module/blob/main/modules/fixtures/opensearch-client.yml) +- [PostgreSQL client pod](https://github.com/camunda/camunda-tf-eks-module/blob/main/modules/fixtures/postgres-client.yml) -2. **Attach the policy to the role**: +1. Adapt the manifests to use the specific `serviceAccountName` (e.g., `aurora-access-sa`) you want to test. +2. Insert a sleep timer in the command to allow time to exec into the pod for live debugging. +3. Create the pod with the `kubectl apply` command: + ```bash + kubectl apply -f debug-client.yaml + ``` +4. Once the pod is running, connect to it with a bash shell (make sure to adjust the app label with your value): + ```bash + kubectl exec -it $(kubectl get pods -l app=REPLACE-WITH-LABEL -o jsonpath='{.items[0].metadata.name}') -- /bin/bash + ``` +5. Inside the pod, display all environment variables to check for IAM and AWS configurations: + ```bash + env + ``` + This command will print out all environment variables, including those related to IRSA. + Inside the pod, validate that key environment variables are correctly injected: + - `AWS_WEB_IDENTITY_TOKEN_FILE`: Path to the token (JWT) file for WebIdentity. + - `AWS_ROLE_ARN`: ARN of the associated IAM role. + - `AWS_REGION`, `AWS_STS_REGIONAL_ENDPOINTS`, and other AWS configuration variables. -```bash -aws iam attach-role-policy \ - --role-name opensearch-role \ - --policy-arn arn:aws:iam:::policy/opensearch_policy -``` +To ensure that IRSA and role associations are functioning: - - +- Check that the expected `AWS_ROLE_ARN` and token are present. +- Decode the JWT token to validate the correct trust relationship with the service account and namespace. -Annotate the service account with the `iam_role_arn`: +#### Verify OpenSearch fine-grained access control (fgac) configuration -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - eks.amazonaws.com/role-arn: arn:aws:iam:::role/opensearch-role - name: - namespace: -``` - -Replace ``, ``, ``, and `` with the appropriate values for your Amazon OpenSearch Service and EKS setup. +For OpenSearch clusters, ensure [fine-grained access control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) is set up to allow the role’s access to the cluster. If you deployed OpenSearch with the [terraform reference architecture implementation for EKS](terraform-setup.md), fgac should already be configured. For manual deployments, follow the process outlined in the [OpenSearch configuration guide](terraform-setup.md#configure-opensearch-fine-grained-access-control) to apply similar controls. -This step is required to be repeated for Tasklist and Zeebe, to grant their service accounts access to OpenSearch. +#### Confirm PostgreSQL IAM role access -#### Database configuration +Verify that PostgreSQL roles are correctly configured to support IAM-based authentication. The database user should have the `rds_iam` role to allow IAM authentication. If the setup was automated with the [terraform reference architecture implementation for EKS](terraform-setup.md), the necessary access configuration should already be in place. For manual configurations, refer to [PostgreSQL configuration instructions](terraform-setup.md#configure-the-database-and-associated-access). -This setup is sufficient for Amazon OpenSearch Service clusters without `fine-grained access control`. +To test connectivity: -`Fine-grained access control` adds another layer of security to OpenSearch, requiring you to add a mapping between the IAM role and the internal OpenSearch role. Visit the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) on `fine-grained access control`. +- Run a manual connection test using the [PostgreSQL client manifest](https://raw.githubusercontent.com/camunda/camunda-tf-eks-module/refs/heads/main/modules/fixtures/postgres-client.yml). +- Use `psql` within the pod to verify the correct roles are assigned. Run: + ```bash + SELECT * FROM pg_roles WHERE rolname=''; + ``` + Confirm that `rds_iam` is listed among the assigned roles. -There are different ways to configure the mapping within Amazon OpenSearch Service: +#### Validate IAM Policies for each role -- Via a [Terraform module](https://registry.terraform.io/modules/idealo/opensearch/aws/latest) in case your OpenSearch instance is exposed. -- Via the [OpenSearch dashboard](https://opensearch.org/docs/latest/security/access-control/users-roles/). +Both trust and permission policies are crucial in configuring IAM Roles for Service Accounts (IRSA) in AWS. Each IAM role should have policies that precisely permit necessary actions and correctly trust the relevant Kubernetes service accounts associated with your components. -
    +##### AssumeRole policies -Via the REST API +In AWS, [AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) allows a user or service to assume a role and temporarily gain permissions to execute specific actions. Each role needs an **AssumeRole policy** that precisely matches AWS requirements for the specific services and actions your components perform. -To authorize the IAM role in OpenSearch for access, follow these steps: +For each IAM role, ensure the **trust policy** includes: -**_Note that this example uses basic authentication (username and password), which may not be the best practice for all scenarios, especially if fine-grained access control is enabled._** The endpoint used in this example is not exposed by default, so consult your OpenSearch documentation for specifics on enabling and securing this endpoint. +1. The correct `Service` field, allowing the pod’s service account to assume the role. +2. An `Action` for `sts:AssumeRoleWithWebIdentity`, as IRSA uses WebIdentity to enable IAM role assumption. -Use the following `curl` command to update the OpenSearch internal database and authorize the IAM role for access. Replace placeholders with your specific values: - -```bash -curl -sS -u ":" \ - -X PATCH \ - "https:///_opendistro/_security/api/rolesmapping/all_access?pretty" \ - -H 'Content-Type: application/json' \ - -d' -[ - { - "op": "add", - "path": "/backend_roles", - "value": [""] - } -] -' -``` +Verify that the policy is configured according to [AWS’s role trust policy guidelines](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) for Kubernetes IRSA. -- Replace `` and `` with your OpenSearch domain admin credentials. -- Replace `` with your OpenSearch endpoint URL. -- Replace `` with the IAM role name created by Terraform, which is output by the `opensearch_role` module. +##### Trust policies -
    +For each role, verify that the [trust policy syntax is correct](https://aws.amazon.com/fr/blogs/security/how-to-use-trust-policies-with-iam-roles/), allowing the appropriate service accounts to assume the role. Refer to AWS’s [trust policy validation tool](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_policy-validator.html) for [accurate syntax and configuration](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-reference-policy-checks.html). -The important part is assigning the `iam_role_arn` of the previously created `opensearch_role` to an internal role within Amazon OpenSearch Service. For example, `all_access` on the Amazon OpenSearch Service side is a good candidate, or if required, extra roles can be created with more restrictive access. +##### Permission policies -### Camunda 8 Self-Managed Helm chart configuration +Each IAM role should also have appropriate permission policies attached. These policies define what actions the role can perform on AWS resources. Verify that permission policies: -The following is an example configuration that can be used to configure the Camunda 8 Self-Managed Helm chart to use the feature set of IRSA for the Amazon OpenSearch Service Exporter: +- Are configured correctly to allow the necessary operations for your resources (e.g., read and write access to S3 buckets or access to RDS). +- Align with your security model by only granting the minimum required permissions. -```yaml -global: - elasticsearch: - enabled: false - opensearch: - enabled: true - aws: - enabled: true - url: - protocol: https - host: aws.opensearch.example.com - port: 443 +The AWS’s [policy simulator](https://policysim.aws.amazon.com/) is a valuable tool for testing how permissions are applied and for spotting misconfigurations. -elasticsearch: - enabled: false +#### If issues persist -optimize: - enabled: false -``` +If issues remain unresolved, compare your configuration with Camunda’s [reference architecture](terraform-setup.md) deployed with Terraform. This setup has been validated to work with IRSA and contains the correct permissions. By comparing it to your setup, you may identify discrepancies that are causing your issues. -:::note -Amazon OpenSearch Service listens on port 443 opposed to the usual port 9200. -::: - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -## Troubleshooting - -### Instance Metadata Service (IMDS) +## Instance Metadata Service (IMDS) [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html) is a default fallback for the AWS SDK due to the [default credentials provider chain](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/credentials-chain.html). Within the context of Amazon EKS, it means a pod will automatically assume the role of a node. This can hide many problems, including whether IRSA was set up correctly or not, since it will fall back to IMDS in case of failure and hide the actual error. @@ -617,3 +188,9 @@ eks_managed_node_group_defaults { ``` Overall, this will disable the role assumption of the node for the Kubernetes pod. Depending on the resulting error within Operate, Zeebe, and Web-Modeler, you'll get a clearer error, which is helpful to debug the error more easily. + +:::note Enabled by default in the terraform reference architecture of EKS + +In the [reference architecture with terraform](terraform-setup.md), this setting is configured like that by default. + +::: diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md index 28c2429c2ee..7a440c0e3b0 100644 --- a/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md +++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md @@ -1,12 +1,17 @@ --- id: eks-terraform -title: "Deploy an EKS cluster with Terraform" +title: "Deploy an EKS cluster with Terraform (advanced)" description: "Deploy an Amazon Kubernetes Cluster (EKS) with a Terraform module for a quick Camunda 8 setup." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + This guide offers a detailed tutorial for deploying an Amazon Web Services (AWS) Elastic Kubernetes Service (EKS) cluster, tailored explicitly for deploying Camunda 8 and using Terraform, a popular Infrastructure as Code (IaC) tool. -This is designed to help leverage the power of IaC to streamline and reproduce a Cloud infrastructure setup. By walking through the essentials of setting up an Amazon EKS cluster, configuring AWS IAM permissions, and integrating a PostgreSQL database, this guide explains the process of using Terraform with AWS, making it accessible even to those new to Terraform or IaC concepts. +It is recommended to use this guide for building a robust and sustainable infrastructure over time. However, for a quicker trial or proof of concept, using the [eksctl](./eksctl.md) method may suffice. + +This guide is designed to help leverage the power of Infrastructure as Code (IaC) to streamline and reproduce a cloud infrastructure setup. By walking through the essentials of setting up an Amazon EKS cluster, configuring AWS IAM permissions, and integrating a PostgreSQL database and an OpenSearch domain (as an alternative to Elasticsearch), this guide explains how to use Terraform with AWS, making it accessible even to those new to Terraform or IaC concepts. It utilizes AWS-managed services when available, providing these as an optional convenience that you can choose to use or not. :::tip @@ -14,16 +19,20 @@ If you are completely new to Terraform and the idea of IaC, read through the [Te ::: -## Prerequisites +## Requirements - An [AWS account](https://docs.aws.amazon.com/accounts/latest/reference/accounts-welcome.html) to create any resources within AWS. +- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources. - [Terraform (1.9+)](https://developer.hashicorp.com/terraform/downloads) -- [Kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. +- [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. +- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some Terraform variables. - [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured. - This simplifies the setup by not relying on explicit credentials and instead creating a mapping between IAM roles and Kubernetes service account based on a trust relationship. A [blog post](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/) by AWS visualizes this on a technical level. - This allows a Kubernetes service account to temporarily impersonate an AWS IAM role to interact with AWS services like S3, RDS, or Route53 without having to supply explicit credentials. + - IRSA is recommended as an [EKS best practice](https://aws.github.io/aws-eks-best-practices/security/docs/iam/). +- This guide uses GNU/Bash for all the shell commands listed. -## Considerations +### Considerations This setup provides an essential foundation for beginning with Camunda 8, though it's not tailored for optimal performance. It's a good initial step for preparing a production environment by incorporating [IaC tooling](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code). @@ -33,282 +42,677 @@ To try out Camunda 8 or develop against it, consider signing up for our [SaaS of For the simplicity of this guide, certain best practices will be provided with links to additional documents, enabling you to explore the topic in more detail. -:::warning +:::info Module update notice (November 2024) + +Modules referenced in this guide have been updated recently from **v2** to **v3**. For more information, refer to our [migration guide from v2 to v3](https://github.com/camunda/camunda-tf-eks-module/blob/main/guides/MIGRATION_GUIDE_v2_to_v3.md). + +::: + +:::warning Cost management + Following this guide will incur costs on your Cloud provider account, namely for the managed Kubernetes service, running Kubernetes nodes in EC2, Elastic Block Storage (EBS), and Route53. More information can be found on [AWS](https://aws.amazon.com/eks/pricing/) and their [pricing calculator](https://calculator.aws/#/) as the total cost varies per region. + ::: -## Outcome +### Variants + +We support two variants of this architecture: + +- The first, **standard installation**, utilizes a username and password connection for the Camunda components (or simply relies on network isolation for certain components). This option is straightforward and easier to implement, making it ideal for environments where simplicity and rapid deployment are priorities, or where network isolation provides sufficient security. + +- The second variant, **IRSA** (IAM Roles for Service Accounts), uses service accounts to perform authentication with IAM policies. This approach offers stronger security and better integration with AWS services, as it eliminates the need to manage credentials manually. It is especially beneficial in environments with strict security requirements, where fine-grained access control and dynamic role-based access are essential. + +#### How to choose + +- If you prefer a simpler setup with basic authentication or network isolation, and your security needs are moderate, the **standard installation** is a suitable choice. +- If you require enhanced security, dynamic role-based access management, and want to leverage AWS’s identity services for fine-grained control, the **IRSA** variant is the better option. + +Both can be set up with or without a **Domain** ([ingress](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html)). + +### Outcome + + Following this tutorial and steps will result in: - An Amazon EKS Kubernetes cluster running the latest Kubernetes version with four nodes ready for Camunda 8 installation. - The [EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) is installed and configured, which is used by the Camunda 8 Helm chart to create [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). -- A [managed Aurora PostgreSQL 15.8](https://aws.amazon.com/rds/postgresql/) instance to be used by the Camunda 8 components. +- A [managed Aurora PostgreSQL 15.x](https://aws.amazon.com/rds/postgresql/) instance to be used by the Camunda platform. +- A [managed OpenSearch domain](https://aws.amazon.com/opensearch-service/) created and configured for use with the Camunda platform. +- (optional) [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured. + - This simplifies the setup by not relying on explicit credentials, but instead allows creating a mapping between IAM roles and Kubernetes service accounts based on a trust relationship. A [blog post](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/) by AWS visualizes this on a technical level. + - This allows a Kubernetes service account to temporarily impersonate an AWS IAM role to interact with AWS services like S3, RDS, or Route53 without supplying explicit credentials. + +## 1. Configure AWS and initialize Terraform + +### Terraform prerequisites + +To manage the infrastructure for Camunda 8 on AWS using Terraform, we need to set up Terraform's backend to store the state file remotely in an S3 bucket. This ensures secure and persistent storage of the state file. -## Installing Amazon EKS cluster with Terraform +:::note +Advanced users may want to handle this part differently and use a different backend. The backend setup provided is an example for new users. +::: -### Terraform prerequsites +#### Set up AWS authentication -1. Create an empty folder to place your Terraform files in. -2. Create a `config.tf` with the following setup: +The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. Before you can use the provider, you must authenticate it using your AWS credentials. +You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods. -```hcl -terraform { - backend "local" { - path = "terraform.tfstate" - } +We recommend using the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials. - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 5.69" - } - } -} +To configure the AWS CLI: -provider "aws" { - region = "eu-central-1" -} +```bash +aws configure ``` -3. Set up the authentication for the `AWS` provider. +Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). -:::note +:::caution Ownership of the created resources -It's recommended to use a different backend than `local`. More information can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user. + +[Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl` ::: -:::note +#### Create an S3 bucket for Terraform state management + +Before setting up Terraform, you need to create an S3 bucket that will store the state file. This is important for collaboration and to prevent issues like state file corruption. + +To start, set the region as an environment variable upfront to avoid repeating it in each command: + +```bash +export AWS_REGION= +``` + +Replace `` with your chosen AWS region (for example, `eu-central-1`). + +Now, follow these steps to create the S3 bucket with versioning enabled: + +1. Open your terminal and ensure the AWS CLI is installed and configured. + +2. Run the following command to create an S3 bucket for storing your Terraform state. Make sure to use a unique bucket name and set the `AWS_REGION` environment variable beforehand: + + ```bash + # Replace "my-eks-tf-state" with your unique bucket name + export S3_TF_BUCKET_NAME="my-eks-tf-state" + + aws s3api create-bucket --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION" \ + --create-bucket-configuration LocationConstraint="$AWS_REGION" + ``` + +3. Enable versioning on the S3 bucket to track changes and protect the state file from accidental deletions or overwrites: + + ```bash + aws s3api put-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --versioning-configuration Status=Enabled --region "$AWS_REGION" + ``` + +4. Secure the bucket by blocking public access: + + ```bash + aws s3api put-public-access-block --bucket "$S3_TF_BUCKET_NAME" --public-access-block-configuration \ + "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" --region "$AWS_REGION" + ``` + +5. Verify versioning is enabled on the bucket: + + ```bash + aws s3api get-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION" + ``` + +This S3 bucket will now securely store your Terraform state files with versioning enabled. + +#### Create a `config.tf` with the following setup + +Once the S3 bucket is created, configure your `config.tf` file to use the S3 backend for managing the Terraform state: + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/config.tf +``` + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/config.tf +``` -The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. You must configure the provider with the proper credentials before using it. You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods. + + -There are several ways to authenticate the `AWS` provider. +#### Initialize Terraform -- (Recommended) Use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) to configure access. Terraform will automatically default to AWS CLI configuration when present. -- Set environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`, which can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). +Once your `config.tf` and authentication are set up, you can initialize your Terraform project. The previous steps configured a dedicated S3 Bucket (`S3_TF_BUCKET_NAME`) to store your state, and the following creates a bucket key that will be used by your configuration. +Configure the backend and download the necessary provider plugins: + +```bash +export S3_TF_BUCKET_KEY="camunda-terraform/terraform.tfstate" + +echo "Storing terraform state in s3://$S3_TF_BUCKET_NAME/$S3_TF_BUCKET_KEY" + +terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" +``` + +Terraform will connect to the S3 bucket to manage the state file, ensuring remote and persistent storage. + +### EKS cluster module setup + +This module establishes the foundational configuration for AWS access and Terraform. + +We will utilize [Terraform modules](https://developer.hashicorp.com/terraform/language/modules), which allow us to abstract resources into reusable components, streamlining our infrastructure management. + +The [Camunda-provided module](https://github.com/camunda/camunda-tf-eks-module) is publicly available and offers a robust starting point for deploying an EKS cluster. It is highly recommended to review this module prior to implementation to understand its structure and capabilities. + +#### Set up the EKS cluster module + +1. Create a `cluster.tf` file in the same directory as your `config.tf` file. +2. Add the following content to your newly created `cluster.tf` file to utilize the provided module: + + + + + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/cluster.tf + ``` + + + + + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/cluster.tf + ``` + + + + +3. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command: + + ```bash + terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" + ``` + +4. Configure user access to the cluster. By default, the user who creates the Amazon EKS cluster has administrative access. + +
    + Grant cluster access to other users +

    + + If you want to grant access to other users, you can configure this by using the `access_entries` input. + + Amazon EKS access management is divided into two distinct layers: + + - The **first layer** involves **AWS IAM permissions**, which allow basic Amazon EKS functionalities such as interacting with the Amazon EKS UI and generating EKS access through the AWS CLI. The module handles this part for you by creating the necessary IAM roles and policies. + + - The **second layer** controls **cluster access** within Kubernetes, defining the user's permissions inside the cluster (for example, policy association). This can be configured directly through the module's `access_entries` input. + + To manage user access, use the `access_entries` configuration, introduced in module version [2.0.0](https://github.com/camunda/camunda-tf-eks-module/releases/tag/2.0.0): + + ```hcl + access_entries = { + example = { + kubernetes_groups = [] + principal_arn = "" + + policy_associations = { + example = { + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + access_scope = { + namespaces = ["default"] + type = "namespace" + } + } + } + } + } + ``` + + In this configuration: + + - Replace `principal_arn` with the ARN of the IAM user or role. + - Use `policy_associations` to define policies for fine-grained access control. + + For a full list of available policies, refer to the [AWS EKS Access Policies documentation](https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html). + + :::info Module deprecation notice + Starting from version 2.x.x of this module, direct mappings through `aws_auth_roles` and `aws_auth_users` are no longer supported. If you are upgrading from version [1.x.x](https://github.com/camunda/camunda-tf-eks-module/releases/tag/1.0.3), you will need to fork the module and follow AWS's official instructions for managing the `aws-auth` ConfigMap. + + For more details, refer to the [official upgrade guide](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md). + ::: + +

    +
    + +5. Customize the cluster setup. The module offers various input options that allow you to further customize the cluster configuration. For a comprehensive list of available options and detailed usage instructions, refer to the [EKS module documentation](https://github.com/camunda/camunda-tf-eks-module/blob/2.6.0/modules/eks-cluster/README.md). + +### PostgreSQL module setup + +:::info Optional module + +If you don't want to use this module, you can skip this section. However, you may need to adjust the remaining instructions to remove references to this module. + +If you choose not to use this module, you must either provide a managed PostgreSQL service or use the internal deployment by the Camunda Helm chart in Kubernetes. ::: -:::warning +We separated the cluster and PostgreSQL modules to offer you more customization options. + +#### Set up the Aurora PostgreSQL module + +1. Create a `db.tf` file in the same directory as your `config.tf` file. +2. Add the following content to your newly created `db.tf` file to utilize the provided module: + + + + + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/db.tf + ``` + + + + + In addition to using standard username and password authentication, you can opt to use [**IRSA (IAM Roles for Service Accounts)**](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) for secure, role-based access to your Aurora database. This method allows your EKS workloads to assume IAM roles without needing to manage AWS credentials directly. + + :::note + Using IRSA is optional. If preferred, you can continue using traditional password-based authentication for database access. + ::: + + If you choose to use IRSA, you’ll need to take note of the **IAM role** created for Aurora and the **AWS Account ID**, as these will be used later to annotate the Kubernetes service account. + + ##### Aurora IRSA role and policy + + The Aurora module uses outputs from the EKS cluster module to configure the IRSA role and policy. Below are the required parameters: -Do not store sensitive information (credentials) in your Terraform files. + Here’s how to define the IAM role trust policy and access policy for Aurora: + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/db.tf + ``` + + Once the IRSA configuration is complete, ensure you **record the IAM role name** (from the `iam_aurora_role_name` configuration), it is required to annotate the Kubernetes service account in the next step. + + + + +3. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command: + + ```bash + terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" + ``` + +4. Customize the Aurora cluster setup through various input options. Refer to the [Aurora module documentation](https://github.com/camunda/camunda-tf-eks-module/blob/2.6.0/modules/aurora/README.md) for more details on other customization options. + +### OpenSearch module setup + +:::info Optional module + +If you don't want to use this module, you can skip this section. However, you may need to adjust the remaining instructions to remove references to this module. + +If you choose not to use this module, you'll need to either provide a managed Elasticsearch or OpenSearch service or use the internal deployment by the Camunda Helm chart in Kubernetes. ::: -:::warning +The OpenSearch module creates an OpenSearch domain intended for Camunda platform. OpenSearch is a powerful alternative to Elasticsearch. For more information on using OpenSearch with Camunda, refer to the [Camunda documentation](/self-managed/setup/guides/using-existing-opensearch.md). -A user who creates resources in AWS will therefore own these resources. In this particular case, the user will always have admin access to the Kubernetes cluster until the cluster is deleted. +:::note Migration to OpenSearch is not supported -Therefore, it can make sense to create an extra [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) which credentials are used for Terraform purposes. +Using Amazon OpenSearch Service requires [setting up a new Camunda installation](/self-managed/setup/overview.md). Migration from previous Camunda versions or Elasticsearch environments is currently not supported. Switching between Elasticsearch and OpenSearch, in either direction, is also not supported. ::: -### Cluster module +#### Set up the OpenSearch domain module -This module creates the basic layout that configures AWS access and Terraform. +1. Create a `opensearch.tf` file in the same directory as your `config.tf` file. +1. Add the following content to your newly created `opensearch.tf` file to utilize the provided module: -The following will use [Terraform modules](https://developer.hashicorp.com/terraform/language/modules), which allows abstracting resources into reusable components. + + -The [Camunda provided module](https://github.com/camunda/camunda-tf-eks-module/tree/2.5.0/modules/eks-cluster) is publicly available. It's advisable to review this module before usage. + :::caution Network based security + The standard deployment for OpenSearch relies on the first layer of security, which is the Network. + While this setup allows easy access, it may expose sensitive data. To enhance security, consider implementing IAM Roles for Service Accounts (IRSA) to restrict access to the OpenSearch cluster, providing a more secure environment. + For more information, see the [Amazon OpenSearch Service Fine-Grained Access Control documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html#fgac-access-policies). + ::: -1. In the folder where your `config.tf` resides, create an additional `cluster.tf`. -2. Paste the following content into the newly created `cluster.tf` file to make use of the provided module: + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/opensearch.tf + ``` -```hcl -module "eks_cluster" { - source = "git::https://github.com/camunda/camunda-tf-eks-module//modules/eks-cluster?ref=2.5.0" + + + - region = "eu-central-1" # change to your AWS region - name = "cluster-name" # change to name of your choosing + In addition to standard authentication, which uses anonymous users and relies on the network for access control, you can also use [**IRSA (IAM Roles for Service Accounts)**](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) to securely connect to OpenSearch. IRSA enables your Kubernetes workloads to assume IAM roles without managing AWS credentials directly. - # Set CIDR ranges or use the defaults - cluster_service_ipv4_cidr = "10.190.0.0/16" - cluster_node_ipv4_cidr = "10.192.0.0/16" -} -``` + :::note + Using IRSA is optional. If you prefer, you can continue using password-based access to your OpenSearch domain. + ::: -There are various other input options to customize the cluster setup further; see the [module documentation](https://github.com/camunda/camunda-tf-eks-module/tree/2.5.0/modules/eks-cluster). + If you choose to use IRSA, you’ll need to take note of the **IAM role name** created for OpenSearch and the **AWS Account ID**, as these will be required later to annotate the Kubernetes service account. -### PostgreSQL module + ##### OpenSearch IRSA role and policy -The resulting PostgreSQL instance and default database `camunda` is intended to be used with Keycloak. You may manually add extra databases after creation for Identity with multi-tenancy. -This will not be covered in this guide as the Identity default for multi-tenancy is to be disabled. + To configure IRSA for OpenSearch, the OpenSearch module uses outputs from the EKS cluster module to define the necessary IAM role and policies. -We separated the cluster and PostgreSQL modules from each other to allow more customization options to the user. + Here's an example of how to define the IAM role trust policy and access policy for OpenSearch, this configuration will deploy an OpenSearch domain with advanced security enabled: -1. In the folder where your `config.tf` resides, create an additional `db.tf` file. -2. Paste the following contents into `db.tf` to make use of the provided module: + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/opensearch.tf + ``` -```hcl -module "postgresql" { - source = "git::https://github.com/camunda/camunda-tf-eks-module//modules/aurora?ref=2.5.0" - engine_version = "15.8" - auto_minor_version_upgrade = false - cluster_name = "cluster-name-postgresql" # change "cluster-name" to your name - default_database_name = "camunda" + Once the IRSA configuration is complete, ensure you **record the IAM role name** (from the `iam_opensearch_role_name` configuration), it is required to annotate the Kubernetes service account in the next step. - # Please supply your own secret values - username = "secret_user" - password = "secretvalue%23" - vpc_id = module.eks_cluster.vpc_id - subnet_ids = module.eks_cluster.private_subnet_ids - cidr_blocks = concat(module.eks_cluster.private_vpc_cidr_blocks, module.eks_cluster.public_vpc_cidr_blocks) - instance_class = "db.t3.medium" - iam_auth_enabled = true + As the OpenSearch domain has advanced security enabled and [fine-grained access control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html), we will later use your provided master username (`advanced_security_master_user_name`) and password (`advanced_security_master_user_password`) to perform the initial setup of the security component, allowing the created IRSA role to access the domain. - depends_on = [module.eks_cluster] -} -``` + + + +1. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command: + + ```bash + terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" + ``` -To manage secrets in Terraform, we recommend [injecting those via Vault](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault). +1. Customize the cluster setup using various input options. For a full list of available parameters, see the [OpenSearch module documentation](https://github.com/camunda/camunda-tf-eks-module/blob/2.6.0/modules/opensearch/README.md). + +### Define outputs + +**Terraform** allows you to define outputs, which make it easier to retrieve important values generated during execution, such as database endpoints and other necessary configurations for Helm setup. + +Each module that you have previously set up contains an output definition at the end of the file. You can adjust them to your needs. + +Outputs allow you to easily reference the **cert-manager** ARN, **external-dns** ARN, and the endpoints for both **PostgreSQL** and **OpenSearch** in subsequent steps or scripts, streamlining your deployment process. ### Execution -1. Open a terminal in the created Terraform folder where `config.tf` and `cluster.tf` are. -2. Initialize the working directory: +:::note Secret management -```hcl -terraform init -``` +We strongly recommend managing sensitive information such as the OpenSearch, Aurora username and password using a secure secrets management solution like HashiCorp Vault. For details on how to inject secrets directly into Terraform via Vault, see the [Terraform Vault Secrets Injection Guide](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault). + +::: -3. Apply the configuration files: +1. Open a terminal in the created Terraform folder where `config.tf` and other `.tf` files are. -```hcl -terraform apply -``` +2. Plan the configuration files: -4. After reviewing the plan, you can type `yes` to confirm and apply the changes. + ```bash + terraform plan -out cluster.plan # describe what will be created + ``` -At this point, Terraform will create the Amazon EKS cluster with all the necessary configurations. The completion of this process may require approximately 20-30 minutes. +3. After reviewing the plan, you can confirm and apply the changes. -## (Optional) AWS IAM access management + ```bash + terraform apply cluster.plan # apply the creation + ``` -Kubernetes access is divided into two distinct layers. The first involves AWS IAM permissions, which enable basic Amazon EKS functionalities such as using the Amazon EKS UI and generating Amazon EKS access through the AWS CLI. The second layer provides access within the cluster itself, determining the user's permissions within the Kubernetes cluster. +Terraform will now create the Amazon EKS cluster with all the necessary configurations. The completion of this process may require approximately 20-30 minutes for each component. -As a result, we must initially grant the user adequate AWS IAM permissions and subsequently assign them a specific role within the Kubernetes cluster for proper access management. +### Reference files -### AWS IAM permissions +Depending on the installation path you have chosen, you can find the reference files used on this page: -A minimum set of permissions is required to access an Amazon EKS cluster to allow a user to execute `aws eks update-kubeconfig` to update the local `kubeconfig` with cluster access to the Amazon EKS cluster. +- **Standard installation:** [Reference Files](https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/) +- **IRSA Installation:** [Reference Files](https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/) -The policy should look as follows and can be restricted to specific Amazon EKS clusters if required: +## 2. Preparation for Camunda 8 installation -```json -cat <./policy-eks.json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "eks:DescribeCluster", - "eks:ListClusters" - ], - "Resource": "*" - } - ] -} -EOF -``` +### Access the created EKS cluster -Via the AWS CLI, you can run the following to create the above policy in AWS IAM. +You can gain access to the Amazon EKS cluster via the `AWS CLI` using the following command: ```shell -aws iam create-policy --policy-name "BasicEKSPermissions" --policy-document file://policy-eks.json +export CLUSTER_NAME="$(terraform console << update-kubeconfig --name +kubectl create namespace camunda ``` -### Terraform AWS IAM permissions - -The user creating the Amazon EKS cluster has admin access by default. -To manage user access use the `access_entries` configuration introduced in module version [2.0.0](https://github.com/camunda/camunda-tf-eks-module/releases/tag/2.0.0): - -```hcl -access_entries = { - example = { - kubernetes_groups = [] - principal_arn = "" - - policy_associations = { - example = { - policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" - access_scope = { - namespaces = ["default"] - type = "namespace" - } - } - } - } -} +In the remainder of the guide, we reference the `camunda` namespace to create some required resources in the Kubernetes cluster, such as secrets or one-time setup jobs. + +### Export values for the Helm chart + +After configuring and deploying your infrastructure with Terraform, follow these instructions to export key values for use in Helm charts to deploy [Camunda 8 on Kubernetes](./eks-helm.md). + +The following commands will export the required outputs as environment variables. You may need to omit some if you have chosen not to use certain modules. These values will be necessary for deploying Camunda 8 with Helm charts: + + + + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/export-helm-values.sh ``` -In this updated configuration: + + + -- `principal_arn` should be replaced with the ARN of the IAM user or role. -- `policy_associations` allow you to associate policies for fine-grained access control. +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/procedure/export-helm-values.sh +``` -For a list of policies, please visit the [AWS EKS Access Policies documentation](https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html). +:::note IRSA users -:::info +To authenticate and authorize access to PostgreSQL and OpenSearch, **you do not need to export the PostgreSQL or OpenSearch passwords**, IRSA will handle the authentication. -Please note that the version 2.x.x of this module no longer supports direct mappings via `aws_auth_roles` and `aws_auth_users`. If you are upgrading from version [1.x.x](https://github.com/camunda/camunda-tf-eks-module/releases/tag/1.0.3), fork the module repository and follow the official AWS instructions for managing the `aws-auth` ConfigMap. -For more details, refer to the [official upgrade guide](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md). +**However**, you will still need to export the relevant usernames and other settings to Helm. ::: -## Outputs + + -Terraform can define outputs to make the retrieval of values generated as part of the execution easier; for example, DB endpoints or values required for the Helm setup. +Ensure that you use the actual values you passed to the Terraform module during the setup of PostgreSQL and OpenSearch. -1. In the folder where your `config.tf` resides, create an additional `output.tf`. -2. Paste the following content to expose those variables: +### Configure the database and associated access -```hcl -output "cert_manager_arn" { - value = module.eks_cluster.cert_manager_arn - description = "The Amazon Resource Name (ARN) of the AWS IAM Roles for Service Account mapping for the cert-manager" -} +As you now have a database, you need to create dedicated databases for each Camunda component and an associated user that have a configured access. Follow these steps to create the database users and configure access. -output "external_dns_arn" { - value = module.eks_cluster.external_dns_arn - description = "The Amazon Resource Name (ARN) of the AWS IAM Roles for Service Account mapping for the external-dns" -} +You can access the created database in two ways: -output "postgres_endpoint" { - value = module.postgresql.aurora_endpoint - description = "The Postgres endpoint URL" -} -``` +1. **Bastion host:** Set up a bastion host within the same network to securely access the database. +2. **Pod within the EKS cluster:** Deploy a pod in your EKS cluster equipped with the necessary tools to connect to the database. -3. Run `terraform apply` again to print the outputs in the terraform state. +The choice depends on your infrastructure setup and security preferences. In this guide, we'll use a pod within the EKS cluster to configure the database. -We can now export those values to environment variables to be used by Helm charts: +1. In your terminal, set the necessary environment variables that will be substituted in the setup manifest: -```shell -export CERT_MANAGER_IRSA_ARN=$(terraform output -raw cert_manager_arn) + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/vars-create-db.sh + ``` -export EXTERNAL_DNS_IRSA_ARN=$(terraform output -raw external_dns_arn) + A **Kubernetes job** will connect to the database and create the necessary users with the required privileges. The script installs the necessary dependencies and runs SQL commands to create the IRSA user and assign it the correct roles and privileges. -export DB_HOST=$(terraform output -raw postgres_endpoint) -``` +2. Create a secret that references the environment variables: -4. Export required values for the [Camunda 8 on Kubernetes](./eks-helm.md) guide. The values will likely differ based on your definitions in the [PostgreSQL setup](#postgresql-module), so ensure you use the values passed to the Terraform module. + + -```shell -# Example guide values, ensure you use the values you pass to the Terraform module -export PG_USERNAME="secret_user" -export PG_PASSWORD="secretvalue%23" -export DEFAULT_DB_NAME="camunda" -``` + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/procedure/create-setup-db-secret.sh + ``` + + This command creates a secret named `setup-db-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-db-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + + + + + + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/procedure/create-setup-db-secret.sh + ``` + + This command creates a secret named `setup-db-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-db-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + + + + +3. Save the following manifest to a file, for example, `setup-postgres-create-db.yml`. + + + + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/setup-postgres-create-db.yml + ``` + + + + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/setup-postgres-create-db.yml + ``` + + + + +4. Apply the manifest: + + ```bash + kubectl apply -f setup-postgres-create-db.yml --namespace camunda + ``` + + Once the secret is created, the **Job** manifest from the previous step can consume this secret to securely access the database credentials. + +5. Once the job is created, monitor its progress using: + + ```bash + kubectl get job/create-setup-user-db --namespace camunda --watch + ``` + + Once the job shows as `Completed`, the users and databases will have been successfully created. + +6. View the logs of the job to confirm that the users were created and privileges were granted successfully: + + ```bash + kubectl logs job/create-setup-user-db --namespace camunda + ``` + +7. Clean up the resources: + + ```bash + kubectl delete job create-setup-user-db --namespace camunda + kubectl delete secret setup-db-secret --namespace camunda + ``` + +Running these commands cleans up both the job and the secret, ensuring that no unnecessary resources remain in the cluster. + +### Configure OpenSearch fine grained access control + +As you now have an OpenSearch domain, you need to configure the related access for each Camunda component. + +You can access the created OpenSearch domain in two ways: + +1. **Bastion host:** Set up a bastion host within the same network to securely access the OpenSearch domain. +2. **Pod within the EKS cluster:** Alternatively, deploy a pod in your EKS cluster equipped with the necessary tools to connect to the OpenSearch domain. + +The choice depends on your infrastructure setup and security preferences. In this tutorial, we'll use a pod within the EKS cluster to configure the domain. + + + + +The standard installation comes already pre-configured, and no additional steps are required. + + + + +1. In your terminal, set the necessary environment variables that will be substituted in the setup manifest: + + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/procedure/vars-create-os.sh + ``` + + A **Kubernetes job** will connect to the OpenSearch dommain and configure it. + +1. Create a secret that references the environment variables: + + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/procedure/create-setup-os-secret.sh + ``` + + This command creates a secret named `setup-os-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-os-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + +1. Save the following manifest to a file, for example, `setup-opensearch-fgac.yml`. + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7-irsa/setup-opensearch-fgac.yml + ``` + +1. Apply the manifest: + + ```bash + kubectl apply -f setup-opensearch-fgac.yml --namespace camunda + ``` + + Once the secret is created, the **Job** manifest from the previous step can consume this secret to securely access the OpenSearch domain credentials. + +1. Once the job is created, monitor its progress using: + + ```bash + kubectl get job/setup-opensearch-fgac --namespace camunda --watch + ``` + + Once the job shows as `Completed`, the OpenSearch domain is configured correctly for fine grained access control. + +1. View the logs of the job to confirm that the privileges were granted successfully: + + ```bash + kubectl logs job/setup-opensearch-fgac --namespace camunda + ``` + +1. Clean up the resources: + + ```bash + kubectl delete job setup-opensearch-fgac --namespace camunda + kubectl delete secret setup-os-secret --namespace camunda + ``` + +Running these commands will clean up both the job and the secret, ensuring that no unnecessary resources remain in the cluster. + + + -## Next steps +## 3. Install Camunda 8 using the Helm chart -Install Camunda 8 using Helm charts by following our installation guide [Camunda 8 on Kubernetes](./eks-helm.md). +Now that you've exported the necessary values, you can proceed with installing Camunda 8 using Helm charts. Follow the guide [Camunda 8 on Kubernetes](./eks-helm.md) for detailed instructions on deploying the platform to your Kubernetes cluster. diff --git a/docs/self-managed/setup/deploy/local/c8run.md b/docs/self-managed/setup/deploy/local/c8run.md index 13f06b5603f..881836191e4 100644 --- a/docs/self-managed/setup/deploy/local/c8run.md +++ b/docs/self-managed/setup/deploy/local/c8run.md @@ -36,7 +36,7 @@ If no version of Java is found, follow your chosen installation's instructions f ## Install and start Camunda 8 Run -1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/c8run-8.6.2) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. +1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/8.7.0-alpha2) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. 2. Navigate to the new `c8run` directory. 3. Start Camunda 8 Run by running `./start.sh` (or `.\c8run.exe start` on Windows) in your terminal. @@ -130,7 +130,7 @@ curl --request POST 'http://localhost:8080/v1/process-definitions/search' \ :::note -Some endpoints in the [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) are considered [alpha features](/reference/alpha-features.md), and are still in development. +Some endpoints in the [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) are considered [alpha features](/components/early-access/alpha/alpha-features.md), and are still in development. ::: 1. Log in as user 'demo' and store the cookie in the file `cookie.txt`: diff --git a/docs/self-managed/setup/deploy/local/local-kubernetes-cluster.md b/docs/self-managed/setup/deploy/local/local-kubernetes-cluster.md index 158da8cc6f8..6f0332a1c2e 100644 --- a/docs/self-managed/setup/deploy/local/local-kubernetes-cluster.md +++ b/docs/self-managed/setup/deploy/local/local-kubernetes-cluster.md @@ -54,9 +54,7 @@ helm repo update If you are deploying Camunda 8 with Ingress configuration, make sure to add additional values to the file you just downloaded `camunda-platform-core-kind-values.yaml` as described in [connecting to Camunda 8 components](#connecting-to-camunda-8-components). ::: -3. Install Camunda 8 using the `camunda-platform-core-kind-values.yaml` file you downloaded previously. This file might contain additional values if you are adding Ingress, TLS, or using a variety of other configuration properties. See [Camunda Helm chart parameters](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters). - -4. Execute the following command: +3. Install Camunda 8 using the `camunda-platform-core-kind-values.yaml` file you downloaded previously. This file might contain additional values if you are adding Ingress, TLS, or using a variety of other configuration properties. See [Camunda Helm chart parameters](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters). Execute the following command: ```sh helm install camunda-platform camunda/camunda-platform \ @@ -67,7 +65,7 @@ This will deploy Camunda 8 components (Optimize, Connectors, and Zeebe), but wit Depending on your machine hardware and internet connection speed, the services might take some time to get started as it will download the Docker images of all Camunda 8 components to your local kind cluster. -5. Check that each pod is running and ready with `kubectl get pods`. If one or more of your pods are pending for long time, it means it cannot be scheduled onto a node. Usually, this happens because there are insufficient resources that prevent it. Use the `kubectl describe ` command to check its status. +4. Check that each pod is running and ready with `kubectl get pods`. If one or more of your pods are pending for long time, it means it cannot be scheduled onto a node. Usually, this happens because there are insufficient resources that prevent it. Use the `kubectl describe ` command to check its status. ## Connecting to Camunda 8 components @@ -91,7 +89,7 @@ First, port-forward each of the components. Use a separate terminal for each com ## Connecting to the workflow engine -To interact with the Camunda workflow engine via Zeebe Gateway using [zbctl](/apis-tools/community-clients/cli-client/cli-get-started.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe gateway as follows: +To interact with the Camunda workflow engine via Zeebe Gateway using [zbctl](/apis-tools/community-clients/cli-client/cli-get-started.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe Gateway as follows: ```sh kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 @@ -109,9 +107,7 @@ To get a full list of the deployed Camunda components and their network properti -Camunda 8 Self-Managed has multiple web applications and gRPC services. Both can be accessed using Kubernetes Ingress. - -In this example, we will use a combined Ingress configuration. For more information, refer to [combined and separated Ingress setup](/self-managed/setup/guides/ingress-setup.md). +Camunda 8 Self-Managed has multiple web applications and gRPC services. These can be accessed using Kubernetes Ingress. For more information, refer to the [Ingress setup guide](/self-managed/setup/guides/ingress-setup.md). ## Prerequisites diff --git a/docs/self-managed/setup/deploy/local/manual.md b/docs/self-managed/setup/deploy/local/manual.md index 053d9b8454b..d03b18b592c 100644 --- a/docs/self-managed/setup/deploy/local/manual.md +++ b/docs/self-managed/setup/deploy/local/manual.md @@ -70,7 +70,7 @@ For **Windows users**, take the following steps: 4. Navigate to the `bin` folder. 5. Start the broker by double-clicking on the `broker.bat` file. -Once the Zeebe broker has started, it should produce the following output: +Once the Zeebe Broker has started, it should produce the following output: ```log 23:39:13.246 [] [main] INFO io.camunda.zeebe.broker.system - Scheduler configuration: Threads{cpu-bound: 2, io-bound: 2}. diff --git a/docs/self-managed/setup/deploy/openshift/redhat-openshift.md b/docs/self-managed/setup/deploy/openshift/redhat-openshift.md index b512b1636a3..2dc2a19d437 100644 --- a/docs/self-managed/setup/deploy/openshift/redhat-openshift.md +++ b/docs/self-managed/setup/deploy/openshift/redhat-openshift.md @@ -33,7 +33,7 @@ We conduct testing and ensure compatibility against the following OpenShift vers | 4.13.x | November 17, 2024 | :::caution -Compatibility is not guaranteed for OpenShift versions no longer supported by Red Hat, as per the End of Support Date. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift). +Camunda 8 supports OpenShift versions in the Red Hat General Availability, Full Support, and Maintenance Support life cycle phases. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift). ::: ## Deploying Camunda 8 in OpenShift diff --git a/docs/self-managed/setup/guides/accessing-components-without-ingress.md b/docs/self-managed/setup/guides/accessing-components-without-ingress.md index 706b4293756..07615294c12 100644 --- a/docs/self-managed/setup/guides/accessing-components-without-ingress.md +++ b/docs/self-managed/setup/guides/accessing-components-without-ingress.md @@ -18,7 +18,7 @@ To interact with Camunda workflow engine via [Zeebe Gateway](/self-managed/zeebe kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 ``` -Now, you can connect and execute operations against your new Zeebe cluster. This allows you to use `zbctl` as a command line interface to read and create resources inside the Zeebe broker. +Now, you can connect and execute operations against your new Zeebe cluster. This allows you to use `zbctl` as a command line interface to read and create resources inside the Zeebe Broker. :::note Accessing the Zeebe cluster directly using `kubectl port-forward` is recommended for development purposes. diff --git a/docs/self-managed/setup/guides/configure-db-custom-headers.md b/docs/self-managed/setup/guides/configure-db-custom-headers.md index 53ad5ef04ca..b0571223ec9 100644 --- a/docs/self-managed/setup/guides/configure-db-custom-headers.md +++ b/docs/self-managed/setup/guides/configure-db-custom-headers.md @@ -89,47 +89,26 @@ Include the plugin parameters in each component's `application.yaml`, or pass th The following examples add the new `my-plugin` JAR to the `application.yaml` for Zeebe, Operate, and Tasklist: -#### Configure Zeebe Exporter - +#### Configure Zeebe Exporter + ```yaml - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_INTERCEPTORPLUGINS_0_ID=my-plugin - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar ``` - - - - -```yaml -- ZEEBE_BROKER_EXPORTERS_OPENSEARCH_ARGS_INTERCEPTORPLUGINS_0_ID=my-plugin -- ZEEBE_BROKER_EXPORTERS_OPENSEARCH_ARGS_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin -- ZEEBE_BROKER_EXPORTERS_OPENSEARCH_ARGS_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar -``` - - - - #### Configure Operate Importer - - - - ```yaml - CAMUNDA_OPERATE_ZEEBEELASTICSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin - CAMUNDA_OPERATE_ZEEBEELASTICSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin @@ -139,33 +118,8 @@ The following examples add the new `my-plugin` JAR to the `application.yaml` for - CAMUNDA_OPERATE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar ``` - - - - -```yaml -- CAMUNDA_OPERATE_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin -- CAMUNDA_OPERATE_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin -- CAMUNDA_OPERATE_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar -- CAMUNDA_OPERATE_OPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin -- CAMUNDA_OPERATE_OPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin -- CAMUNDA_OPERATE_OPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar -``` - - - - #### Configure Tasklist Importer - - - - ```yaml - CAMUNDA_TASKLIST_ZEEBEELASTICSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin - CAMUNDA_TASKLIST_ZEEBEELASTICSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin @@ -175,10 +129,43 @@ The following examples add the new `my-plugin` JAR to the `application.yaml` for - CAMUNDA_TASKLIST_ELASTICSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar ``` +#### Configure Optimize Importer + +:::note +Due to technical limitations, Optimize currently allows registering up to 5 plugins. +::: + +```yaml +- CAMUNDA_OPTIMIZE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin +- CAMUNDA_OPTIMIZE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin +- CAMUNDA_OPTIMIZE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar +``` + +#### Configure Zeebe Exporter + +```yaml +- ZEEBE_BROKER_EXPORTERS_OPENSEARCH_ARGS_INTERCEPTORPLUGINS_0_ID=my-plugin +- ZEEBE_BROKER_EXPORTERS_OPENSEARCH_ARGS_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin +- ZEEBE_BROKER_EXPORTERS_OPENSEARCH_ARGS_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar +``` + +#### Configure Operate Importer + +```yaml +- CAMUNDA_OPERATE_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin +- CAMUNDA_OPERATE_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin +- CAMUNDA_OPERATE_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar +- CAMUNDA_OPERATE_OPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin +- CAMUNDA_OPERATE_OPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin +- CAMUNDA_OPERATE_OPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar +``` + +#### Configure Tasklist Importer + ```yaml - CAMUNDA_TASKLIST_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin - CAMUNDA_TASKLIST_ZEEBEOPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin @@ -188,38 +175,32 @@ The following examples add the new `my-plugin` JAR to the `application.yaml` for - CAMUNDA_TASKLIST_OPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar ``` - - - #### Configure Optimize Importer :::note Due to technical limitations, Optimize currently allows registering up to 5 plugins. ::: - - - - ```yaml -- CAMUNDA_OPTIMIZE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin -- CAMUNDA_OPTIMIZE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin -- CAMUNDA_OPTIMIZE_ELASTICSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar +- CAMUNDA_OPTIMIZE_OPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin +- CAMUNDA_OPTIMIZE_OPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin +- CAMUNDA_OPTIMIZE_OPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar ``` - + + +#### Configure Zeebe Exporter + +:::note +The following configuration uses the default name `camundaExporter`. To use a custom name, update `CAMUNDAEXPORTER` in the provided environment variables to match the name defined in your exporter [configuration](/self-managed/zeebe-deployment/exporters/camunda-exporter.md). +::: ```yaml -- CAMUNDA_OPTIMIZE_OPENSEARCH_INTERCEPTORPLUGINS_0_ID=my-plugin -- CAMUNDA_OPTIMIZE_OPENSEARCH_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin -- CAMUNDA_OPTIMIZE_OPENSEARCH_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar +- ZEEBE_BROKER_EXPORTERS_CAMUNDAEXPORTER_ARGS_CONNECT_INTERCEPTORPLUGINS_0_ID=my-plugin +- ZEEBE_BROKER_EXPORTERS_CAMUNDAEXPORTER_ARGS_CONNECT_INTERCEPTORPLUGINS_0_CLASSNAME=com.myplugin.MyCustomHeaderPlugin +- ZEEBE_BROKER_EXPORTERS_CAMUNDAEXPORTER_ARGS_CONNECT_INTERCEPTORPLUGINS_0_JARPATH=/usr/local/plugin/plg.jar ``` diff --git a/docs/self-managed/setup/guides/connect-to-an-oidc-provider.md b/docs/self-managed/setup/guides/connect-to-an-oidc-provider.md index f51e1f47b3a..77122f018bb 100644 --- a/docs/self-managed/setup/guides/connect-to-an-oidc-provider.md +++ b/docs/self-managed/setup/guides/connect-to-an-oidc-provider.md @@ -34,11 +34,14 @@ configuration](#component-specific-configuration) to ensure the components are c

    Steps

    1. In your OIDC provider, create an application for each of the components you want to connect. The expected redirect URI of the component you are configuring an app for can be found in [component-specific configuration](#component-specific-configuration). -2. Make a note of the following values for each application you create: +2. For all Components, ensure the appropriate application type is used: + - **Operate, Tasklist, Optimize, Identity:** Web applications requiring confidential access/a confidential client + - **Web Modeler, Console:** Single-page applications requiring public access/a public client +3. Make a note of the following values for each application you create: - Client ID - Client secret - Audience -3. Set the following environment variables for the component you are configuring an app for: +4. Set the following environment variables for the component you are configuring an app for: @@ -103,7 +106,7 @@ global: -:::warning +:::note Once set, you cannot update your initial claim name and value using environment or Helm values. You must change these values directly in the database. ::: @@ -124,7 +127,7 @@ Ensure you register a new application for each component. 2. Navigate to the new application's **Overview** page, and make note of the **Client ID**. 3. Within your new application, [configure a platform](https://learn.microsoft.com/en-gb/entra/identity-platform/quickstart-register-app#configure-platform-settings) for the appropriate component: - **Web**: Operate, Tasklist, Optimize, Identity - - **Single-page application**: Modeler + - **Single-page application**: Modeler, Console 4. Add your component's **Microsoft Entra ID** redirect URI, found under [Component-specific configuration](#component-specific-configuration). 5. [Create a new client secret](https://learn.microsoft.com/en-gb/entra/identity-platform/quickstart-register-app?tabs=client-secret#add-credentials), and note the new secret's value for later use. 6. Set the following environment variables for the component you are configuring an app for: @@ -135,8 +138,8 @@ Ensure you register a new application for each component. ``` CAMUNDA_IDENTITY_TYPE=MICROSOFT CAMUNDA_IDENTITY_BASE_URL= - CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 - CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 CAMUNDA_IDENTITY_CLIENT_ID= CAMUNDA_IDENTITY_CLIENT_SECRET= CAMUNDA_IDENTITY_AUDIENCE= @@ -152,13 +155,13 @@ Ensure you register a new application for each component. global: identity: auth: - issuer: https://login.microsoftonline.com//v2.0 + issuer: https://login.microsoftonline.com//v2.0 # this is used for container to container communication - issuerBackendUrl: https://login.microsoftonline.com//v2.0 - tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token - jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys + issuerBackendUrl: https://login.microsoftonline.com//v2.0 + tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token + jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys type: "MICROSOFT" - publicIssuerUrl: https://login.microsoftonline.com//v2.0 + publicIssuerUrl: https://login.microsoftonline.com//v2.0 identity: clientId: existingSecret: @@ -184,7 +187,7 @@ global: zeebe: clientId: audience: - existingSecret: + existingSecret: tokenScope: "/.default" webModeler: clientId: diff --git a/docs/self-managed/setup/guides/ingress-setup.md b/docs/self-managed/setup/guides/ingress-setup.md index 4c67751e418..f95f7dcb840 100644 --- a/docs/self-managed/setup/guides/ingress-setup.md +++ b/docs/self-managed/setup/guides/ingress-setup.md @@ -1,22 +1,17 @@ --- id: ingress-setup -title: "Combined and separated Ingress setup" -description: "Camunda 8 Self-Managed combined and separated Ingress setup" +title: "Ingress setup" +description: "Camunda 8 Self-Managed Ingress setup and example configuration." --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; :::caution -The separated Ingress configuration has been deprecated in version 8.6. To ensure a smooth upgrade experience for new installations, we recommend using the **combined Ingress setup**. +The separated Ingress configuration has been removed in Camunda version 8.7. This guide covers a **combined Ingress setup**. ::: -Camunda 8 Self-Managed has multiple web applications and gRPC services. Both can be accessed externally using Ingress. There are two ways to do this: - -1. **Combined setup:** In this setup, there are two Ingress objects: one Ingress object for all Camunda 8 web applications using a single domain. Each application has a sub-path e.g. `camunda.example.com/operate`, and `camunda.example.com/optimize` and another Ingress which uses gRPC protocol for Zeebe Gateway e.g. `zeebe.camunda.example.com`. -2. **Separated setup:** In this setup, each component has its own Ingress/host e.g. `operate.camunda.example.com`, `optimize.camunda.example.com`, `zeebe.camunda.example.com`, etc. - -There are no significant differences between the two setups. Rather, they both offer flexibility for different workflows. +Camunda 8 Self-Managed has multiple web applications and gRPC services. Both can be accessed externally using Ingress with a **combined setup.** In this configuration, there are two Ingress objects: one Ingress object for all Camunda 8 web applications using a single domain. Each application has a sub-path, for example `camunda.example.com/operate`, and `camunda.example.com/optimize`, and another Ingress that uses gRPC protocol for Zeebe Gateway, for example `zeebe.camunda.example.com`. :::note Camunda 8 Helm chart doesn't manage or deploy Ingress controllers, it only deploys Ingress resources. Hence, this Ingress setup will not work without an Ingress controller running in your cluster. @@ -25,19 +20,10 @@ Camunda 8 Helm chart doesn't manage or deploy Ingress controllers, it only deplo ## Preparation - An Ingress controller should be deployed in advance. The examples below use the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), but any Ingress controller could be used by setting `ingress.className`. -- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#configuration). +- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.6#configuration). ## Configuration - - - - In this setup, a single Ingress/domain is used to access Camunda 8 web applications, and another for Zeebe Gateway. By default, all web applications use `/` as a base, so we just need to set the context path, Ingress configuration, and authentication redirect URLs. ![Camunda 8 Self-Managed Architecture Diagram - Combined Ingress](../../assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) @@ -124,124 +110,6 @@ Once deployed, you can access the Camunda 8 components on: - **Keycloak authentication:** `https://camunda.example.com/auth` - **Zeebe Gateway:** `grpc://zeebe.camunda.example.com` - - - - -In this configuration, every Camunda 8 component is assigned its own Ingress and Domain. The use of a context path is unnecessary because the default base path `/` is used for each Ingress/Domain. In this setup, you only need to provide the Ingress settings and specify the Identity authentication redirect URLs. - -![Camunda 8 Self-Managed Architecture Diagram - Separated Ingress](../../assets/camunda-platform-8-self-managed-architecture-diagram-separated-ingress.png) - -```yaml -# Chart values for the Camunda 8 Helm chart in combined Ingress setup. - -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://artifacthub.io/packages/helm/camunda/camunda-platform - -# IMPORTANT: Make sure to change "camunda.example.com" to your domain. - -global: - identity: - auth: - publicIssuerUrl: "https://keycloak.camunda.example.com/auth/realms/camunda-platform" - operate: - redirectUrl: "https://operate.camunda.example.com" - tasklist: - redirectUrl: "https://tasklist.camunda.example.com" - optimize: - redirectUrl: "https://optimize.camunda.example.com" - webModeler: - redirectUrl: "https://modeler.camunda.example.com" - console: - redirectUrl: "https://console.camunda.example.com" - connectors: - redirectUrl: "https://connectors.camunda.example.com" - -identity: - ingress: - enabled: true - className: nginx - host: "identity.camunda.example.com" - fullURL: "https://identity.camunda.example.com" - -identityKeycloak: - ingress: - enabled: true - ingressClassName: nginx - hostname: "keycloak.camunda.example.com" - -operate: - ingress: - enabled: true - className: nginx - host: "operate.camunda.example.com" - -optimize: - ingress: - enabled: true - className: nginx - host: "optimize.camunda.example.com" - -tasklist: - ingress: - enabled: true - className: nginx - host: "tasklist.camunda.example.com" - -zeebeGateway: - ingress: - rest: - enabled: true - className: nginx - host: "zeebe.camunda.example.com" - grpc: - enabled: true - className: nginx - host: "zeebe-grpc.camunda.example.com" - -webModeler: - ingress: - enabled: true - className: nginx - webapp: - host: "modeler.camunda.example.com" - websockets: - host: "modeler-ws.camunda.example.com" - -console: - ingress: - enabled: true - className: nginx - host: "console.camunda.example.com" - -connectors: - ingress: - enabled: true - className: nginx - host: "connectors.camunda.example.com" -``` - -:::note Web Modeler -The configuration above only contains the Ingress-related values under `webModeler`. Review the additional [installation instructions and configuration hints](/self-managed/setup/install.md#installing-web-modeler). -::: - -Incorporate the custom values mentioned in the example above into the value file you're using to deploy Camunda as outlined in [deploying Camunda 8](/self-managed/setup/install.md): - -```shell -helm install demo camunda/camunda-platform -f values-separated-ingress.yaml -``` - -Once deployed, you can access the Camunda 8 components on: - -- **Applications:** `https://[identity|operate|optimize|tasklist|modeler|console|zeebe].camunda.example.com` -- **Keycloak authentication:** `https://keycloak.camunda.example.com` -- **Zeebe Gateway:** `grpc://zeebe-grpc.camunda.example.com` - - - - - ## Ingress controllers Ingress resources require the cluster to have an [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) running. There are many options for configuring your Ingress Controller. If you are using a cloud provider such as AWS or GCP, follow their Ingress setup guides if an Ingress Controller is not already pre-installed. Ingress configuration for AWS EKS can be found in [install Camunda 8 on an EKS cluster](/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md). diff --git a/docs/self-managed/setup/guides/multi-namespace-deployment.md b/docs/self-managed/setup/guides/multi-namespace-deployment.md index e8d861e1aa7..ee34ec31a8d 100644 --- a/docs/self-managed/setup/guides/multi-namespace-deployment.md +++ b/docs/self-managed/setup/guides/multi-namespace-deployment.md @@ -4,11 +4,11 @@ title: "Multi-namespace deployment" description: "Deploy Camunda 8 Self-Managed across several namespaces for better resource management and environment separation." --- -Camunda 8 Self-Managed offers flexible deployment options that allow it to span multiple namespaces. This setup consists of a management cluster, which includes the Console, Identity, and Web Modeler components, along with several automation clusters (including Zeebe, Operate, Tasklist, and Optimize). +Camunda 8 Self-Managed offers flexible deployment options that allow it to span multiple namespaces. This setup consists of a management cluster, which includes the Console, Identity, and Web Modeler components, along with several orchestration clusters (including Zeebe, Operate, Tasklist, and Optimize). For this configuration, each namespace is set up independently through Helm, with deployments classified into two types: management and automation. Each type has a specific values file designed for its deployment requirements. -Below, we illustrate multi-namespace Camunda deployment: one namespace will be dedicated to the management cluster, and the other two will be used for the automation cluster. +Below, we illustrate multi-namespace Camunda deployment: one namespace will be dedicated to the management cluster, and the other two will be used for the orchestration cluster. ## Management deployment @@ -63,7 +63,7 @@ helm install camunda camunda/camunda-platform \ ## Team One deployment -Let's create a Camunda automation cluster that can be owned and managed by Team One and will be deployed into namespace `camunda-team01`. This deployment includes Zeebe, Operate, Tasklist, and Optimize, and authenticates against Keycloak in the Management deployment: +Let's create a Camunda orchestration cluster that can be owned and managed by Team One and will be deployed into namespace `camunda-team01`. This deployment includes Zeebe, Operate, Tasklist, and Optimize, and authenticates against Keycloak in the Management deployment: ```yaml # File: camunda-team01.yaml diff --git a/docs/self-managed/setup/guides/using-existing-opensearch.md b/docs/self-managed/setup/guides/using-existing-opensearch.md index c8c7f96d0d1..bb0869093a0 100644 --- a/docs/self-managed/setup/guides/using-existing-opensearch.md +++ b/docs/self-managed/setup/guides/using-existing-opensearch.md @@ -12,18 +12,12 @@ This guide steps through using an existing Amazon OpenSearch Service instance. B ### Authentication -There are two layers of permissions with OpenSearch: AWS IAM and OpenSearch internal. If you would like to connect to OpenSearch using AWS IAM roles for service accounts (IRSA) then please also refer to the [IAM roles for service accounts documentation](/self-managed/setup/deploy/amazon/amazon-eks/irsa.md#OpenSearch). +There are two layers of permissions with OpenSearch: AWS IAM and OpenSearch internal. If you would like to connect to OpenSearch using AWS IAM roles for service accounts (IRSA) then please also refer to the [IAM roles for service accounts documentation](/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md#opensearch-module-setup). Otherwise, if it is intended to connect to Amazon OpenSearch Service with basic auth, then the example below can be followed: ## Values file -:::caution - -The migration step within Optimize is currently not supported with OpenSearch. Disable the migration as shown in the example below. - -::: - The following values can be configured in the Camunda 8 Self-Managed Helm chart in order to use Amazon OpenSearch Service: ### Connecting to Amazon OpenSearch Service with basic auth @@ -42,10 +36,6 @@ global: host: opensearch.example.com port: 443 -optimize: - migration: - enabled: false - elasticsearch: enabled: false ``` @@ -56,7 +46,7 @@ If you do not wish to specify the username and password in plaintext within the ```yaml global: - opensearcn: + opensearch: auth: existingSecret: secretName existingSecretKey: secretKey diff --git a/docs/self-managed/setup/install.md b/docs/self-managed/setup/install.md index b3a083fd18e..c8349617535 100644 --- a/docs/self-managed/setup/install.md +++ b/docs/self-managed/setup/install.md @@ -34,18 +34,18 @@ The following charts will be installed as part of Camunda 8 Self-Managed: - **Web Modeler**: Deploys the Web Modeler component that allows you to model BPMN processes in a collaborative way. - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#install-web-modeler). - **Console**: Deploys Camunda Console Self-Managed. - - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#install-console) as the Console is only available to enterprise customers. + - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#install-console). :::note Amazon OpenSearch Helm support The existing Helm charts use the Elasticsearch configurations by default. The Helm charts can still be used to connect to Amazon OpenSearch Service. Refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md). **Zeebe**: Configure the [OpenSearch exporter](/self-managed/zeebe-deployment/exporters/opensearch-exporter.md). -**Operate** & **Tasklist**: These components use the same parameters for both Elasticsearch and OpenSearch. Replace the `elasticsearch` part of the relevant configuration key with `opensearch`, together with its appropriate value. +**Operate**, **Tasklist**, and **Optimize**: These components use the same parameters for both Elasticsearch and OpenSearch. Replace the `elasticsearch` part of the relevant configuration key with `opensearch`, together with its appropriate value. -For example, `CAMUNDA_OPERATE_ELASTICSEARCH_URL` becomes `CAMUNDA_OPERATE_OPENSEARCH_URL`. +For example, `CAMUNDA_OPERATE_ELASTICSEARCH_URL` becomes `CAMUNDA_OPERATE_OPENSEARCH_URL`. In the case of Optimize, please make sure all variables have the proper `CAMUNDA_OPTIMIZE` prefix, i.e. `OPTIMIZE_ELASTICSEARCH_HTTP_PORT` becomes `CAMUNDA_OPTIMIZE_OPENSEARCH_HTTP_PORT`. -Refer to the [Operate](/self-managed/operate-deployment/operate-configuration.md#settings-for-opensearch) and [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#elasticsearch-or-opensearch) configuration documentation for additional component configuration parameters to update. +Refer to the [Operate](/self-managed/operate-deployment/operate-configuration.md#settings-for-opensearch), [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#elasticsearch-or-opensearch) and [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/#opensearch) configuration documentation for additional component configuration parameters to update. ::: ![Camunda 8 Self-Managed Architecture Diagram](../assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) @@ -220,7 +220,7 @@ helm install camunda camunda/camunda-platform --version 8.1 \ By default, Camunda services deployed in a cluster are not accessible from outside the cluster. However, you can choose from several methods to connect to these services: - **Port forwarding:** This method allows you to direct traffic from your local machine to the cluster, making it possible to access Camunda services directly. For detailed instructions, refer to [accessing components without Ingress](/self-managed/setup/guides/accessing-components-without-ingress.md). -- **Ingress configuration:** You can set up the NGINX Ingress controller to manage external service access. This can be done by combining components Ingress in a single domain or configuring separate Ingress for each component. For detailed instructions, refer to [combined and separated Ingress setup](/self-managed/setup/guides/ingress-setup.md). +- **Ingress configuration:** You can set up the NGINX Ingress controller to manage external service access. For detailed instructions, refer to the [Ingress setup guide](/self-managed/setup/guides/ingress-setup.md). - **EKS cluster installation:** For those deploying Camunda 8 on an Amazon EKS cluster, refer to [installing Camunda 8 on an EKS cluster](/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md). ## Configure license key @@ -280,11 +280,11 @@ global: Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on startup or functionality, with the exception that Web Modeler has a limitation of five users. ::: -## Configuring Enterprise components and Connectors +## Configuring Web Modeler, Console, and Connectors -### Enterprise components secret +### Web Modeler and Console secrets -Enterprise components such as Console are published in Camunda's private Docker registry (registry.camunda.cloud) and are exclusive to enterprise customers. These components are not available in public repositories. +The Console and Web Modeler Components are published in Camunda's private Docker registry (registry.camunda.cloud) and are under a [proprietary license](/reference/licenses.md#web-modeler-and-console). These components are not available in public repositories. To enable Kubernetes to pull the images from this registry, first [create an image pull secret](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) using the credentials you received from Camunda: @@ -339,7 +339,7 @@ To set up Web Modeler, you need to provide the following required configuration - Configure the database connection - Web Modeler requires a PostgreSQL database as persistent data storage (other database systems are currently not supported). - _Option 1_: Set `postgresql.enabled: true`. This will install a new PostgreSQL instance as part of the Helm release (using the [PostgreSQL Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) by Bitnami as a dependency). - - _Option 2_: Set `postgresql.enabled: false` and configure a [connection to an external database](#optional-configure-external-database). + - _Option 2_: Set `postgresql.enabled: false` and configure a connection to an external database (see the second example below). We recommend specifying these values in a YAML file that you pass to the `helm install` command. A minimum configuration file would look as follows: @@ -376,11 +376,11 @@ For more details, check [Web Modeler Helm values](https://artifacthub.io/package ### Install Console -Console Self-Managed is an [Enterprise component](/reference/licenses.md#console), which means it is disabled by default in the Camunda 8 Helm chart since it requires an Enterprise license to access the Camunda container registry. +Console Self-Managed is disabled by default in the Camunda 8 Helm chart, as it requires a [proprietary license](/reference/licenses.md#web-modeler-and-console) to access the Camunda container registry. To install Console, two steps are required: -1. [Create a secret with Camunda registry credentials](#enterprise-components-secret). +1. [Create a secret with Camunda registry credentials](#web-modeler-and-console-secrets). 2. Enable Console, and reference the created Kubernetes secret object via Helm values. ```yaml @@ -394,7 +394,7 @@ console: For more details, check [Console Helm values](https://artifacthub.io/packages/helm/camunda/camunda-platform#console-parameters). :::note -Console Self-Managed requires the Identity component to authenticate. Camunda Helm Chart installs Identity by default. When logging in to Console when using port-forward, port-forward Keycloak service `kubectl port-forward svc/-keycloak 18080:80` or configure Identity with Ingress as described in [combined and separated Ingress setup](/self-managed/setup/guides/ingress-setup.md). +Console Self-Managed requires the Identity component to authenticate. Camunda Helm Chart installs Identity by default. When logging in to Console when using port-forward, port-forward the Keycloak service `kubectl port-forward svc/-keycloak 18080:80` or configure Identity with Ingress as described in the [Ingress setup guide](/self-managed/setup/guides/ingress-setup.md). ::: @@ -418,5 +418,5 @@ For upgrading the Camunda Helm chart from one release to another, perform a [Hel ## General notes -- **Zeebe gateway** is deployed as a stateless service. We support [Kubernetes startup and liveness probes](/self-managed/zeebe-deployment/configuration/gateway-health-probes.md) for Zeebe. +- **Zeebe Gateway** is deployed as a stateless service. We support [Kubernetes startup and liveness probes](/self-managed/zeebe-deployment/configuration/gateway-health-probes.md) for Zeebe. - **Zeebe broker nodes** need to be deployed as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) to preserve the identity of cluster nodes. StatefulSets require persistent storage, which must be allocated in advance. Depending on your cloud provider, the persistent storage differs as it is provider-specific. diff --git a/docs/self-managed/setup/upgrade.md b/docs/self-managed/setup/upgrade.md index 6a67f9de5e4..70505ec5227 100644 --- a/docs/self-managed/setup/upgrade.md +++ b/docs/self-managed/setup/upgrade.md @@ -9,7 +9,9 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; :::note -When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the **next major version**. +When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the next **major** version of the chart. + +For example, if the current Helm chart version is 10.x.x, and the latest next major version is 11.0.1, the recommended upgrade is to 11.0.1 (not 11.0.0). ::: Upgrading between minor versions of the Camunda Helm chart may require [configuration changes](#update-your-configuration). To upgrade between patch versions or when no configuration changes are required, see the [`helm upgrade`](#identity-disabled) instructions. @@ -114,9 +116,7 @@ Ensure to use Helm CLI with version `3.14.3` or more. The upgrade could fail to #### Deprecation notes -The following keys in the values file have been changed in Camunda Helm chart v10.0.2. For compatibility, the keys are deprecated in the Camunda release cycle 8.5 and will be removed in the Camunda 8.6 release (October 2024). - -We highly recommend updating the keys in your values file rather than waiting until the 8.6 release. +The following keys were deprecated in 8.5, and their removal has been delayed until the release of Camunda 8.7 (January 2025). We highly recommend updating the keys in your values file rather than waiting until the 8.7 release. | Component | Old Key | New Key | | ------------- | ---------------------------------- | ----------------------------------- | @@ -136,8 +136,6 @@ We highly recommend updating the keys in your values file rather than waiting un | | `global.elasticsearch.host` | `global.elasticsearch.url.host` | | | `global.elasticsearch.port` | `global.elasticsearch.url.port` | -Also, the Web Modeler PostgreSQL key will be changed in the 8.6 release (the new key `webModelerPostgresql` will not work in any chart using Camunda 8.5). - | Component | Old Key | New Key | | ----------- | ------------ | ---------------------- | | Web Modeler | diff --git a/docs/self-managed/tasklist-deployment/data-retention.md b/docs/self-managed/tasklist-deployment/data-retention.md index ce9da9ca7c4..94c86bdd23a 100644 --- a/docs/self-managed/tasklist-deployment/data-retention.md +++ b/docs/self-managed/tasklist-deployment/data-retention.md @@ -56,6 +56,4 @@ Only indices containing dates in their suffix may be deleted. OpenSearch does not support the Index Lifecycle Management (ILM) Policy, and instead uses Index State Management (ISM). The same environment variables that are used to activate ILM on Elasticsearch can be used to activate ISM on OpenSearch. -```yaml -As of the 8.4 release, Tasklist is now compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. -``` +As of the 8.4 release, Tasklist is compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. diff --git a/docs/self-managed/tasklist-deployment/importer-and-archiver.md b/docs/self-managed/tasklist-deployment/importer-and-archiver.md index c75e1fe9655..3950abebce1 100644 --- a/docs/self-managed/tasklist-deployment/importer-and-archiver.md +++ b/docs/self-managed/tasklist-deployment/importer-and-archiver.md @@ -32,7 +32,7 @@ Each single importer/archiver node must be configured using the following config | ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------------------------------------- | | camunda.tasklist.clusterNode.partitionIds | Array of Zeebe partition ids this importer (or archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. | | camunda.tasklist.clusterNode.nodeCount | Total amount of Importer (or archiver) nodes in the cluster. | 1 | -| camunda.tasklist.clusterNode.currentNodeId | Id of current Importer (or archiver) node, starting from 0. | 0 | +| camunda.tasklist.clusterNode.currentNodeId | ID of current Importer (or archiver) node, starting from 0. | 0 | It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.tasklist.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/docs/self-managed/tasklist-deployment/tasklist-authentication.md b/docs/self-managed/tasklist-deployment/tasklist-authentication.md index 81be5e1415d..da8765da0a5 100644 --- a/docs/self-managed/tasklist-deployment/tasklist-authentication.md +++ b/docs/self-managed/tasklist-deployment/tasklist-authentication.md @@ -117,7 +117,7 @@ For more information, visit the [Identity documentation](/self-managed/concepts/ ## Use Identity JWT token to access Tasklist API -Tasklist provides a [GraphQL API](/apis-tools/tasklist-api/tasklist-api-overview.md) under the endpoint `/graphql`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. +Tasklist provides a [REST API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) under the endpoint `/v1`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. :::note Be aware a JWT token is intended to be used for M2M communication and is therefore issued for the relevant application, not for the user. @@ -127,7 +127,7 @@ Be aware a JWT token is intended to be used for M2M communication and is therefo 1. [Add an application in Identity](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). 2. [Add permissions to an application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for Tasklist API. -3. Obtain a token to access the GraphQL API. +3. Obtain a token to access the REST API. You will need: - `client_id` and `client_secret` from Identity application you created. - URL of the authorization server will look like: `http://:/auth/realms/camunda-platform/protocol/openid-connect/token`, where host and port reference Keycloak URL (e.g. `localhost:18080`). @@ -157,7 +157,7 @@ Take the `access_token` value from the response object and store it as your toke 4. Send the token as an authorization header in each request. In this case, request all tasks. ```shell -curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " -d '{"query": "{tasks(query:{}){id name}}"}' http://localhost:8080/graphql +curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " http://localhost:8080/v1/tasks/search ``` ### User task access restrictions diff --git a/docs/self-managed/tasklist-deployment/tasklist-configuration.md b/docs/self-managed/tasklist-deployment/tasklist-configuration.md index c2c7dd029af..ffb2b6b4f35 100644 --- a/docs/self-managed/tasklist-deployment/tasklist-configuration.md +++ b/docs/self-managed/tasklist-deployment/tasklist-configuration.md @@ -136,9 +136,9 @@ camunda.tasklist: selfSigned: true ``` -## Zeebe broker connection +## Zeebe Broker connection -Tasklist needs a connection to Zeebe broker to start the import. +Tasklist needs a connection to the Zeebe Broker to start the import. ### Settings to connect diff --git a/docs/self-managed/zeebe-deployment/configuration/broker.md b/docs/self-managed/zeebe-deployment/configuration/broker.md index 2c44bdd2466..92beedd381b 100644 --- a/docs/self-managed/zeebe-deployment/configuration/broker.md +++ b/docs/self-managed/zeebe-deployment/configuration/broker.md @@ -2,7 +2,7 @@ id: broker-config title: "Broker configuration" sidebar_label: "Broker configuration" -description: "Let's analyze how to configure the Zeebe broker" +description: "Let's analyze how to configure the Zeebe Broker" --- A complete broker configuration template is available in the [Zeebe repo](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template). @@ -386,7 +386,7 @@ This section contains all cluster related configurations, to setup a zeebe clust | Field | Description | Example Value | | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | -| nodeId | Specifies the unique id of this broker node in a cluster. The id should be between 0 and number of nodes in the cluster (exclusive). This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_NODEID`. | 0 | +| nodeId | Specifies the unique ID of this broker node in a cluster. The ID should be between 0 and number of nodes in the cluster (exclusive). This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_NODEID`. | 0 | | partitionsCount | Controls the number of partitions, which should exist in the cluster. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT`. | 1 | | replicationFactor | Controls the replication factor, which defines the count of replicas per partition. The replication factor cannot be greater than the number of nodes in the cluster. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR`. | 1 | | clusterSize | Specifies the zeebe cluster size. This value is used to determine which broker is responsible for which partition. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_CLUSTERSIZE`. | 1 | @@ -473,6 +473,26 @@ membership: syncInterval: 10s ``` +### zeebe.broker.cluster.configManager.gossip + +Configure the parameters used to propagate the dynamic cluster configuration across brokers and gateways. + +| Field | Description | ExampleValue | +| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | +| syncDelay | Sets the interval between two synchronization requests to other members of the cluster. This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CONFIGMANAGER_GOSSIP_SYNCDELAY | 10s | +| syncRequestTimeout | Sets the timeout for the synchronization requests. This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CONFIGMANAGER_GOSSIP_SYNCREQUESTTIMEOUT | 2s | +| gossipFanout | Sets the number of cluster members the configuration is gossiped to. This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CONFIGMANAGER_GOSSIP_GOSSIPFANOUT | 2 | + +#### YAML snippet + +```yaml +configManager: + gossip: + syncDelay: 10s + syncRequestTimeout: 2s + gossipFanout: 2 +``` + ### zeebe.broker.cluster.messageCompression This feature is useful when the network latency between the nodes is very high (for example when nodes are deployed in different data centers). @@ -591,11 +611,11 @@ request: ### zeebe.broker.flowControl.request.gradient -| Field | Description | Example Value | -| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_RTTTOLERANCE` | 2.0 | +| Field | Description | Example Value | +| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_RTTTOLERANCE` | 2.0 | #### YAML snippet @@ -610,12 +630,12 @@ request: ### zeebe.broker.flowControl.request.gradient2 -| Field | Description | Example Value | -| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet @@ -742,11 +762,11 @@ backpressure: ### zeebe.broker.backpressure.gradient -| Field | Description | Example Value | -| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | +| Field | Description | Example Value | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | #### YAML snippet @@ -761,12 +781,12 @@ backpressure: ### zeebe.broker.backpressure.gradient2 -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet @@ -806,6 +826,10 @@ An example configuration for the Elasticsearch exporter can be found [here](../e An example configuration for the OpenSearch exporter can be found [here](../exporters/opensearch-exporter.md#example). +### zeebe.broker.exporters.camundaExporter (Camunda Exporter) + +An example configuration for the Camunda exporter can be found [here](../exporters/camunda-exporter.md#example). + ### zeebe.broker.processing | Field | Description | Example Value | diff --git a/docs/self-managed/zeebe-deployment/configuration/gateway.md b/docs/self-managed/zeebe-deployment/configuration/gateway.md index 2c43c323087..b0b31c27b81 100644 --- a/docs/self-managed/zeebe-deployment/configuration/gateway.md +++ b/docs/self-managed/zeebe-deployment/configuration/gateway.md @@ -2,7 +2,7 @@ id: gateway-config title: "Gateway configuration" sidebar_label: "Gateway configuration" -description: "Analyze how to configure the Zeebe gateway, including byte sizes, time units, paths, and sample YAML snippets." +description: "Analyze how to configure the Zeebe Gateway, including byte sizes, time units, paths, and sample YAML snippets." --- The Zeebe Gateway can be configured similarly to the broker via the `application.yaml` file or environment variables. A complete gateway configuration template is available in the [Zeebe repository](https://github.com/camunda/camunda/blob/main/dist/src/main/config/gateway.yaml.template). @@ -154,7 +154,7 @@ If you use the Helm charts, both properties are configured for you already. | contactPoint | WARNING: This setting is deprecated! Use initialContactPoints instead. Sets the broker the gateway should initial contact. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_CONTACTPOINT`. | 127.0.0.1:26502 | | requestTimeout | Sets the timeout of requests sent to the broker cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_REQUESTTIMEOUT`. | 15s | | clusterName | Sets name of the Zeebe cluster to connect to. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_CLUSTERNAME`. | zeebe-cluster | -| memberId | Sets the member id of the gateway in the cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERID`. | gateway | +| memberId | Sets the member ID of the gateway in the cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERID`. | gateway | | host | Sets the host the gateway node binds to for internal cluster communication. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_HOST`. | 0.0.0.0 | | port | Sets the port the gateway node binds to for internal cluster communication. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_PORT`. | 26502 | | advertisedHost | Controls the advertised host; if omitted defaults to the host. This is particularly useful if your gateway stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_ADVERTISEDHOST`. | 0.0.0.0 | @@ -208,6 +208,26 @@ membership: syncInterval: 10s ``` +### zeebe.gateway.cluster.configManager.gossip + +Configure the parameters used to propagate the dynamic cluster configuration across brokers and gateways. + +| Field | Description | ExampleValue | +| ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | +| syncDelay | Sets the interval between two synchronization requests to other members of the cluster. This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_CONFIGMANAGER_GOSSIP_SYNCDELAY | 10s | +| syncRequestTimeout | Sets the timeout for the synchronization requests. This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_CONFIGMANAGER_GOSSIP_SYNCREQUESTTIMEOUT | 2s | +| gossipFanout | Sets the number of cluster members the configuration is gossiped to. This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_CONFIGMANAGER_GOSSIP_GOSSIPFANOUT | 2 | + +#### YAML snippet + +```yaml +configManager: + gossip: + syncDelay: 10s + syncRequestTimeout: 2s + gossipFanout: 2 +``` + ### zeebe.gateway.cluster.security The cluster security configuration options allow securing communication between the gateway and other nodes in the cluster. @@ -390,7 +410,8 @@ Each interceptor should be configured with the values described below:
    - - @@ -168,14 +169,17 @@ Only if the increased latency does not work for your use case, for example, beca

    Evaluate an expression using JUEL.

    - @@ -168,14 +169,17 @@ Only if the increased latency does not work for your use case, for example, beca

    Evaluate an expression using JUEL.

    - @@ -168,14 +169,17 @@ Only if the increased latency does not work for your use case, for example, beca

    Evaluate an expression using JUEL.

    - @@ -168,14 +169,17 @@ Only if the increased latency does not work for your use case, for example, beca

    Evaluate an expression using JUEL.

    - -
    IntentElement idElement ID Element type
    classNameEntry point of the interceptor, a class which must: + + Entry point of the interceptor, a class which must:
  • implement io.grpc.ServerInterceptor
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • @@ -437,7 +458,8 @@ Each filter should be configured with the values described below:
    classNameEntry point of the filter, a class which must: + + Entry point of the filter, a class which must:
  • implement jakarta.servlet.Filter
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • diff --git a/docs/self-managed/zeebe-deployment/configuration/priority-election.md b/docs/self-managed/zeebe-deployment/configuration/priority-election.md index 4e466a0640a..d7fc45c0413 100644 --- a/docs/self-managed/zeebe-deployment/configuration/priority-election.md +++ b/docs/self-managed/zeebe-deployment/configuration/priority-election.md @@ -10,8 +10,8 @@ It aims to achieve a more uniform leader distribution by assigning each node a p ## Configuration -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. +Enable priority election by setting `zeebe.broker.cluster.raft.enablePriorityElection=true` in your config or +by setting the equivalent environment variable `ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION=true`. If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). @@ -19,7 +19,7 @@ If you are using the fixed partitioning scheme (experimental), you may need [add With priority election enabled, election latency and thus failover time increases. -The result of leader election is not deterministic and priority election can only increase the chance of having a +The result of a leader election is not deterministic, and priority election can only increase the chance of having a uniform leader distribution, not guarantee it. -Factors such as high load can prevent high priority nodes from becoming the leader. +Factors such as high load can prevent high-priority nodes from becoming the leader. diff --git a/docs/self-managed/zeebe-deployment/exporters/camunda-exporter.md b/docs/self-managed/zeebe-deployment/exporters/camunda-exporter.md new file mode 100644 index 00000000000..dfccf1ad3bb --- /dev/null +++ b/docs/self-managed/zeebe-deployment/exporters/camunda-exporter.md @@ -0,0 +1,187 @@ +--- +id: camunda-exporter +title: "Camunda Exporter" +sidebar_label: "Camunda Exporter" +description: "Use the Camunda Exporter to export Zeebe records to Elasticsearch/OpenSearch without additional importers or data transformations." +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +The Camunda Exporter exports Zeebe records directly to Elasticsearch/OpenSearch. Unlike the Elasticsearch and OpenSearch exporters, records are exported in the format required by Operate and Tasklist, and configuring additional importers or data transformations is not required. + +Using the Camunda Exporter can increase process instance throughput, and can reduce the latency of changes appearing in Operate and Tasklist. + +:::note +When exporting, indexes are created as required, and will not be created twice if they already exist. However, once disabled, they +will not be deleted (that is up to the administrator.) A [retention](./camunda-exporter.md?configuration=retention#options) policy can be configured to automatically delete data after a certain number of days. +::: + +## Configuration + +Enable the exporter by configuring the `className` in your [broker configuration](/docs/self-managed/zeebe-deployment/configuration/broker.md#zeebebrokerexporters): + +```yaml +exporters: + camundaExporter: + className: io.camunda.exporter.CamundaExporter + args: + # Refer to the table below for the available args options +``` + +:::note +As the exporter is packaged with Zeebe, it is not necessary to specify a `jarPath`. +::: + +Configure the exporter by providing `args`. See the tables below for configuration options and default values, or review the [example YAML configuration](#example). + +| Option | Description | Default | +| ------------ | ------------------------------------------------------------------------------------------------------------------- | ------- | +| connect | Refer to [Connect](./camunda-exporter.md?configuration=connect#options) for the connection configuration options. | | +| index | Refer to [Index](./camunda-exporter.md?configuration=index#options) for the index configuration options. | | +| bulk | Refer to [Bulk](./camunda-exporter.md?configuration=bulk#options) for the bulk configuration options. | | +| retention | Refer to [Retention](./camunda-exporter.md?configuration=retention#options) for the retention configuration options | | +| createSchema | If `true` missing indexes will be created automatically. | true | + +### Options + + + + + +:::note +Please refer to [supported environments](/reference/supported-environments.md#camunda-8-self-managed) to find out which +versions of Elasticsearch and/or OpenSearch are supported in a Camunda 8 Self-Managed setup. +::: + +| Option | Description | Default | +| -------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------------- | +| type | the type of the underlying search engine to export to. Accepted values are `elasticsearch` or `opensearch`. | elasticsearch | +| clusterName | The name of the Elasticsearch/OpenSearch cluster to export to. | elasticsearch | +| dateFormat | Defines a custom date format that should be used for fetching date data from the engine (should be the same as in the engine) | yyyy-MM-dd'T'HH:mm:ss.SSSZZ | +| socketTimeout | Defines the socket timeout in milliseconds, which is the timeout for waiting for data. | | +| connectTimeout | Determines the timeout in milliseconds until a connection is established. | | +| username | Username used to authenticate. | | +| password | Password used to authenticate. | | +| security | Refer to [Security](./camunda-exporter.md?configuration=security#options) for security configuration. | | + +:::note +If you are using `opensearch` on AWS, the AWS SDK's [DefaultCredentialsProvider](https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.html) is used for authentication. For more details on configuring credentials, refer to the [AWS SDK documentation](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/credentials-chain.html#credentials-default). +::: + + + + + +| Option | Description | Default | +| --------------- | ------------------------------------------------------------------------------------------------- | ------- | +| enabled | If `true`, enables the security (ssl) features for the exporter. | false | +| certificatePath | The file path to the SSL certificate used for secure communication with Elasticsearch/OpenSearch. | | +| verifyHostname | If `true`, the hostname of the SSL certificate will be validated. | true | +| selfSigned | If `true`, allows the use of self-signed SSL certificates. | false | + + + + + +| Option | Description | Default | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| prefix | This prefix will be appended to every index created by the exporter; must not contain `_` (underscore). | | +| numberOfShards | The number of [shards](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#_static_index_settings) used for each created index. | 3 | +| numberOfReplicas | The number of shard [replicas](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings) used for created index. | 0 | +| variableSizeThreshold | Defines a threshold for variable size. Variables exceeding this threshold are split into two properties: `FULL_VALUE` (full content, not indexed) and `VALUE` (truncated content, indexed). | 8191 | +| shardsByIndexName | A map where the key is the index name and the value is the number of shards, allowing you to override the default `numberOfShards` setting for specific indices. | | +| replicasByIndexName | A map where the key is the index name and the value is the number of replicas, allowing you to override the default `numberOfReplicas` setting for specific indices. | | + + + + + +To avoid too many expensive requests to the Elasticsearch/OpenSearch cluster, the exporter performs batch +updates by default. The size of the batch, along with how often it should be flushed (regardless of +size) can be controlled by configuration. + +| Option | Description | Default | +| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| delay | Delay, in seconds, before force flush of the current batch. This ensures that even when we have low traffic of records, we still export every once in a while. | `5` | +| size | The amount of records a batch should have before we flush the batch. | `1000` | + +With the default configuration, the exporter will aggregate records and flush them to Elasticsearch/OpenSearch: + +1. When it has aggregated 1000 records. +2. Five seconds have elapsed since the last flush (regardless of how many + records were aggregated). + + + + + +A retention policy can be set up to delete old data. +When enabled, this creates an Index Lifecycle Management (ILM) Policy that deletes the data after the specified +`minimumAge`. +All index templates created by this exporter apply the created ILM Policy. + +| Option | Description | Default | +| ---------- | ----------------------------------------------------------------------------- | ------- | +| enabled | If `true` the ILM Policy is created and applied to the index templates. | `false` | +| minimumAge | Specifies how old the data must be, before the data is deleted as a duration. | `30d` | +| policyName | The name of the created and applied ILM policy. | | + +:::note +The duration can be specified in days `d`, hours `h`, minutes `m`, seconds `s`, milliseconds `ms`, and/or nanoseconds +`nanos`. +::: + + + + +## Example + +Here is an example configuration of the exporter: + +```yaml +--- +exporters: + # Camunda Exporter ---------- + # An example configuration for the camunda exporter: + # + # These setting can also be overridden using the environment variables "ZEEBE_BROKER_EXPORTERS_CAMUNDAEXPORTER_..." + # To convert a YAML formatted variable to an environment variable, start with the top-level property and separate every nested property with an underscore (_). + # For example, the property "zeebe.broker.exporters.camundaExporter.args.index.numberOfShards" would be converted to "ZEEBE_BROKER_EXPORTERS_CAMUNDAEXPORTER_ARGS_INDEX_NUMBEROFSHARDS". + # + camundaExporter: + className: io.camunda.exporter.CamundaExporter + + args: + connect: + type: elasticsearch + url: http://localhost:9200 + clusterName: elasticsearch + dateFormat: yyyy-MM-dd'T'HH:mm:ss.SSSZZ + socketTimeout: 1000 + connectTimeout: 1000 + username: elastic + password: changeme + security: + enabled: false + certificatePath: /path/to/certificate + verifyHostname: true + selfSigned: false + + bulk: + delay: 5 + size: 1000 + + index: + prefix: + numberOfShards: 3 + numberOfReplicas: 0 + + retention: + enabled: false + minimumAge: 30d + policyName: camunda-retention-policy + + createSchema: true +``` diff --git a/docs/self-managed/zeebe-deployment/exporters/exporters.md b/docs/self-managed/zeebe-deployment/exporters/exporters.md index 1ac12a4c1f1..993551de632 100644 --- a/docs/self-managed/zeebe-deployment/exporters/exporters.md +++ b/docs/self-managed/zeebe-deployment/exporters/exporters.md @@ -4,10 +4,11 @@ title: "Exporters" sidebar_label: "Overview" --- -Zeebe comes packaged with two exporters: +Zeebe comes packaged with three exporters: - [Elasticsearch](elasticsearch-exporter.md) - [OpenSearch](opensearch-exporter.md) +- [Camunda Exporter](camunda-exporter.md) This section of the docs explains how these exporters can be [installed](install-zeebe-exporters.md) and configured. diff --git a/docs/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md b/docs/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md index fc621c0a837..52a2ab7516c 100644 --- a/docs/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md +++ b/docs/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md @@ -40,7 +40,7 @@ env: ``` This example is downloading the exporters' JAR from a URL and adding the JAR to the `exporters` directory, -which will be scanned for JARs and added to the Zeebe broker classpath. Then, with `environment variables`, +which will be scanned for JARs and added to the Zeebe Broker classpath. Then, with `environment variables`, you can configure the exporter parameters. :::note diff --git a/docs/self-managed/zeebe-deployment/operations/backpressure.md b/docs/self-managed/zeebe-deployment/operations/backpressure.md index 377d12a7492..d32366d5f96 100644 --- a/docs/self-managed/zeebe-deployment/operations/backpressure.md +++ b/docs/self-managed/zeebe-deployment/operations/backpressure.md @@ -34,7 +34,7 @@ The limit and inflight count are calculated per partition. Zeebe uses adaptive algorithms from [concurrency-limits](https://github.com/Netflix/concurrency-limits) to dynamically calculate the limit. Configure Zeebe with one of the backpressure algorithms in the following sections. -The default values can be found in the [Zeebe broker standalone configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) or in the [Zeebe broker configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) in the `# backpressure` section. +The default values can be found in the [Zeebe Broker standalone configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) or in the [Zeebe Broker configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) in the `# backpressure` section. #### Fixed limit diff --git a/docs/self-managed/zeebe-deployment/operations/cluster-scaling.md b/docs/self-managed/zeebe-deployment/operations/cluster-scaling.md index 379359c94dc..d7a6bb569b0 100644 --- a/docs/self-managed/zeebe-deployment/operations/cluster-scaling.md +++ b/docs/self-managed/zeebe-deployment/operations/cluster-scaling.md @@ -418,7 +418,7 @@ The response is a JSON object. See detailed specs [here](https://github.com/camu } ``` -- `changeId`: Id of the changes initiated to scale the cluster. This can be used to monitor the progress of the scaling operation. The id typically increases so new requests get a higher id than the previous one. +- `changeId`: The ID of the changes initiated to scale the cluster. This can be used to monitor the progress of the scaling operation. The ID typically increases so new requests get a higher ID than the previous one. - `currentTopology`: A list of current brokers and the partition distribution. - `plannedChanges`: A sequence of operations that has to be executed to achieve scaling. - `expectedToplogy`: The expected list of brokers and the partition distribution once the scaling is completed. diff --git a/docs/self-managed/zeebe-deployment/operations/disk-space.md b/docs/self-managed/zeebe-deployment/operations/disk-space.md index 12cdff52389..d3496d8ce9e 100644 --- a/docs/self-managed/zeebe-deployment/operations/disk-space.md +++ b/docs/self-managed/zeebe-deployment/operations/disk-space.md @@ -4,7 +4,7 @@ title: "Disk space" description: "Understand how Zeebe uses the local disk for storage of its persistent data, and configuring Zeebe settings for the disk usage watermarks." --- -Zeebe uses the local disk for storage of its persistent data. Therefore, if the Zeebe broker runs out of disk space, the system is in an invalid state as the broker cannot update its state. +Zeebe uses the local disk for storage of its persistent data. Therefore, if the Zeebe Broker runs out of disk space, the system is in an invalid state as the broker cannot update its state. To prevent the system from reaching an unrecoverable state, Zeebe expects a minimum size of free disk space available. If this limit is violated, the broker rejects new requests to allow the operations team to free more disk space, and allows the broker to continue to update its state. diff --git a/docs/self-managed/zeebe-deployment/operations/health.md b/docs/self-managed/zeebe-deployment/operations/health.md index b550b8e4325..9ad212e1cdf 100644 --- a/docs/self-managed/zeebe-deployment/operations/health.md +++ b/docs/self-managed/zeebe-deployment/operations/health.md @@ -6,7 +6,7 @@ description: "This document analyzes health status checks and responses." ## Broker -Zeebe broker exposes three HTTP endpoints to query its health status: +The Zeebe Broker exposes three HTTP endpoints to query its health status: - Startup check - Ready check @@ -62,7 +62,7 @@ When a broker becomes unhealthy, it's recommended to check the logs to see what ## Gateway -Zeebe gateway exposes three HTTP endpoints to query its health status: +The Zeebe Gateway exposes three HTTP endpoints to query its health status: - Health status - `http://{zeebe-gateway}:9600/actuator/health` - Startup probe - `http://{zeebe-gateway}:9600/actuator/health/startup` diff --git a/docs/self-managed/zeebe-deployment/operations/management-api.md b/docs/self-managed/zeebe-deployment/operations/management-api.md index 66a49949710..d35d167131d 100644 --- a/docs/self-managed/zeebe-deployment/operations/management-api.md +++ b/docs/self-managed/zeebe-deployment/operations/management-api.md @@ -1,13 +1,13 @@ --- id: management-api title: "Management API" -description: "Zeebe Gateway also exposes an HTTP endpoint for cluster management operations." +description: "The Zeebe Gateway also exposes an HTTP endpoint for cluster management operations." --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -Besides the [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) and [gRPC API](/apis-tools/zeebe-api/grpc.md) for process instance execution, Zeebe Gateway also exposes an HTTP endpoint for cluster management operations. This API is not expected to be used by a typical user, but by a privileged user such as a cluster administrator. It is exposed via a different port and configured using configuration `management.server.port` (or via environment variable `MANAGEMENT_SERVER_PORT`). By default, this is set to `9600`. +Besides the [REST](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) and [gRPC API](/apis-tools/zeebe-api/grpc.md) for process instance execution, the Zeebe Gateway also exposes an HTTP endpoint for cluster management operations. This API is not expected to be used by a typical user, but by a privileged user such as a cluster administrator. It is exposed via a different port and configured using configuration `management.server.port` (or via environment variable `MANAGEMENT_SERVER_PORT`). By default, this is set to `9600`. The API is a custom endpoint available via [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/actuator.html#actuator.endpoints). For additional configurations such as security, refer to the Spring Boot documentation. diff --git a/docs/self-managed/zeebe-deployment/operations/metrics.md b/docs/self-managed/zeebe-deployment/operations/metrics.md index 52cabef81b7..d2825ef4e86 100644 --- a/docs/self-managed/zeebe-deployment/operations/metrics.md +++ b/docs/self-managed/zeebe-deployment/operations/metrics.md @@ -51,7 +51,7 @@ All Zeebe-related metrics have a `zeebe_`-prefix. Most metrics have the following common label: -- `partition`: Cluster-unique id of the partition +- `partition`: Cluster-unique ID of the partition :::note Both brokers and gateways expose their respective metrics. The brokers have an optional metrics exporter that can be enabled for maximum insight. diff --git a/docs/self-managed/zeebe-deployment/operations/network-ports.md b/docs/self-managed/zeebe-deployment/operations/network-ports.md index 45512e7e298..3b3946c2ace 100644 --- a/docs/self-managed/zeebe-deployment/operations/network-ports.md +++ b/docs/self-managed/zeebe-deployment/operations/network-ports.md @@ -19,7 +19,7 @@ Additionally, it will need to communicate with other nodes (mostly brokers) in t To join the cluster, it will also need at least one initial contact point, typically a broker, configured via `zeebe.gateway.cluster.initialContactPoints: [127.0.0.1:26502]`. :::note -You can use all broker connections instead of one to make the startup process of the Zeebe gateway more resilient. +You can use all broker connections instead of one to make the startup process of the Zeebe Gateway more resilient. ::: The relevant [configuration](../configuration/configuration.md) settings are: diff --git a/docs/self-managed/zeebe-deployment/operations/resource-planning.md b/docs/self-managed/zeebe-deployment/operations/resource-planning.md index 7a488c310f9..0f12ee6028f 100644 --- a/docs/self-managed/zeebe-deployment/operations/resource-planning.md +++ b/docs/self-managed/zeebe-deployment/operations/resource-planning.md @@ -148,7 +148,7 @@ Only the leader of a partition exports events. Only committed events (events tha When a partition fails over to a new leader, the new leader is able to construct the current partition state by projecting the event log from the point of the last snapshot. The position of exporters cannot be reconstructed from the event log, so it is set to the last snapshot. This means an exporter can see the same events twice in the event of a fail-over. -You should assign idempotent ids to events in your exporter if this is an issue for your system. The combination of record position and partition id is reliable as a unique id for an event. +You should assign idempotent ids to events in your exporter if this is an issue for your system. The combination of record position and partition ID is reliable as a unique ID for an event. ### Effect of quorum loss diff --git a/docs/self-managed/zeebe-deployment/operations/update-zeebe.md b/docs/self-managed/zeebe-deployment/operations/update-zeebe.md index 5df604da86e..a8d66b7481b 100644 --- a/docs/self-managed/zeebe-deployment/operations/update-zeebe.md +++ b/docs/self-managed/zeebe-deployment/operations/update-zeebe.md @@ -19,7 +19,7 @@ Refer to the [update guide](/self-managed/operational-guides/update-guide/introd A **rolling update** ensures the Zeebe cluster stays available by updating brokers and gateways one by one instead of all at once. -There are three parties to a rolling update: the Zeebe brokers, Zeebe gateways, and the clients. +There are three parts to a rolling update: the Zeebe Broker, Zeebe Gateway, and clients. We recommend updating brokers first, then gateways, and finally clients. This ensures clients don't use new APIs that are not yet supported by the brokers or gateways. @@ -29,7 +29,7 @@ While updating brokers, leadership for partitions will rotate which may cause br The procedure to do a rolling update of Zeebe brokers is the following: -1. Pick the broker with the highest id that runs the old version. +1. Pick the broker with the highest ID that runs the old version. 2. Shut down the broker. 3. Update the broker software to the new version. 4. Start the broker and wait for it to become ready and healthy. @@ -49,7 +49,7 @@ The snapshot period is five minutes by default but is [configurable via `snapsho If your Zeebe deployment is managed by our [Helm charts](/self-managed/setup/install.md), the rolling update procedure is already automated. :::note -Zeebe brokers are managed by a [`StatefulSet`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies). Zeebe gateways are managed by a []`Deployment`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment). +Zeebe brokers are managed by a [`StatefulSet`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies). Zeebe Gateways are managed by a [`Deployment`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment). ::: #### Updating brokers diff --git a/docs/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md b/docs/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md index 9671048eb13..58cc4817d6b 100644 --- a/docs/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md +++ b/docs/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md @@ -155,7 +155,7 @@ When compiling your class, you need to make sure all compile-time dependencies are provided. In the example above, that means we need the `grpc-api` and `slf4j-api` libraries available when compiling. -Since the interceptor will be running inside the Zeebe gateway, the language +Since the interceptor will be running inside the Zeebe Gateway, the language level of the compiled code must be the same as Zeebe's (i.e. currently JDK 21) or lower. This example thus assumes you're using version 21 of `javac`. ```sh diff --git a/docs/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md b/docs/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md index b3175754810..f67d92f1467 100644 --- a/docs/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md +++ b/docs/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md @@ -8,11 +8,11 @@ description: "Learn about this component and contact point of the Zeebe cluster import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. For more information about the Zeebe broker, visit our [additional documentation](../../../components/zeebe/technical-concepts/architecture.md#brokers). +The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. For more information about the Zeebe Broker, visit our [additional documentation](../../../components/zeebe/technical-concepts/architecture.md#brokers). -To summarize, the Zeebe broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. The Zeebe Gateway acts as a load balancer and router between Zeebe’s processing partitions. +To summarize, the Zeebe Broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. The Zeebe Gateway acts as a load balancer and router between Zeebe’s processing partitions. -![Zeebe gateway overview](assets/zeebe-gateway-overview.png) +![Zeebe Gateway overview](assets/zeebe-gateway-overview.png) To interact with the Zeebe cluster, the Zeebe client sends a command to the gateway either as a gRPC message (to port `26500` by default), or a plain HTTP request to its REST API (to port `8080` by default). Given the gateway supports gRPC as well as an OpenAPI spec, the user can use several clients in different languages to interact with the Zeebe cluster. For more information, read our [overview](../../../apis-tools/working-with-apis-tools.md). @@ -42,7 +42,7 @@ The Zeebe Gateway can be run in two different ways: embedded and standalone. -Running the gateway in embedded mode means it will run as part of the Zeebe broker. The broker will accept gRPC client messages via the embedded gateway and distribute the translated requests inside the cluster. This means the request accepted by the embedded gateway does not necessarily go to the same broker, where the embedded gateway is running. +Running the gateway in embedded mode means it will run as part of the Zeebe Broker. The broker will accept gRPC client messages via the embedded gateway and distribute the translated requests inside the cluster. This means the request accepted by the embedded gateway does not necessarily go to the same broker, where the embedded gateway is running. The embedded gateway is useful for development and testing purposes, and to reduce the burden of deploying and running multiple applications. For example, in [zeebe-process-test](https://github.com/camunda/zeebe-process-test) an embedded gateway is used to accept the client commands and write directly to the engine. diff --git a/docusaurus.config.js b/docusaurus.config.js index bf6b8614050..a90cf5de490 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -3,16 +3,18 @@ const { unsupportedVersions } = require("./src/versions"); const latestVersion = require("./src/versions").versionMappings[0].docsVersion; +const docsSiteUrl = process.env.DOCS_SITE_URL || "https://docs.camunda.io"; + module.exports = { title: "Camunda 8 Docs", tagline: "Start orchestrating your processes with Camunda 8 SaaS or Self-Managed.", // url: "https://camunda-cloud.github.io", - url: process.env.DOCS_SITE_URL || "https://docs.camunda.io", + url: docsSiteUrl, // baseUrl: "/camunda-cloud-documentation/", baseUrl: process.env.DOCS_SITE_BASE_URL || "/", customFields: { - canonicalUrlRoot: "https://docs.camunda.io", + canonicalUrlRoot: docsSiteUrl, }, onBrokenLinks: "throw", onBrokenMarkdownLinks: "throw", @@ -98,24 +100,6 @@ module.exports = { }, }, ], - [ - // Zeebe REST API docs generation - "docusaurus-plugin-openapi-docs", - { - id: "api-zeebe-openapi", - docsPluginId: "default", - config: { - zeebe: { - specPath: "api/zeebe/zeebe-openapi.yaml", - outputDir: "docs/apis-tools/zeebe-api-rest/specifications", - sidebarOptions: { - groupPathsBy: "tag", - }, - hideSendButton: true, - }, - }, - }, - ], [ // Zeebe REST API docs generation "docusaurus-plugin-openapi-docs", @@ -269,7 +253,7 @@ module.exports = { }, { label: "Contact", - to: "contact", + to: "docs/reference/contact", }, ], }, @@ -438,13 +422,9 @@ module.exports = { "/docs/**/assets/**", "/docs/**/tags/**", "/docs/next/**", - "/docs/1.3/**", - "/docs/8.2/**", "/docs/8.3/**", "/docs/8.4/**", "/docs/8.5/**", - "/optimize/3.7.0/**", - "/optimize/3.10.0/**", "/optimize/3.11.0/**", "/optimize/3.12.0/**", "/optimize/3.13.0/**", diff --git a/hacks/isolateVersion/6-updateCIWorkflows.sh b/hacks/isolateVersion/6-updateCIWorkflows.sh index c8c707a7f08..1d52eefee3f 100644 --- a/hacks/isolateVersion/6-updateCIWorkflows.sh +++ b/hacks/isolateVersion/6-updateCIWorkflows.sh @@ -13,24 +13,29 @@ sed -i '' "/tags:/a\\ - \"$ARCHIVED_VERSION.[0-9]+\" " .github/workflows/publish-prod.yaml -# c. replace the main docs remote_path with this isolated version's remote_path. +# c. add `unsupported.` to docs URLs +sed -i '' 's/https:\/\/docs.camunda.io/https:\/\/unsupported.docs.camunda.io/' .github/workflows/publish-prod.yaml + +# d. replace the main docs remote_path with this isolated version's remote_path. sed -i '' "s/remote_path: \${{ secrets.AWS_PROD_PUBLISH_PATH }}/remote_path: \${{ secrets.AWS_PROD_PUBLISH_PATH_UNSUPPORTED }}\/$ARCHIVED_VERSION/g" .github/workflows/publish-prod.yaml +# e. update `DOCS_SITE_BASE_URL` to specify isolated version +sed -i '' "s/DOCS_SITE_BASE_URL: \//DOCS_SITE_BASE_URL: \/$ARCHIVED_VERSION\//" .github/workflows/publish-prod.yaml + # 3. publish-stage: -sed -i '' '/Disable Indexing/{N; d;}' .github/workflows/publish-stage.yaml # a. replace `branches: - main` with `branches: - unsupported/{version}` sed -i '' "s/- \"main\"/- \"unsupported\/$ARCHIVED_VERSION\"/" .github/workflows/publish-stage.yaml -# b. remove `disable indexing` step - -# c. add `unsupported.` to docs URLs +# b. add `unsupported.` to docs URLs sed -i '' 's/https:\/\/docs.camunda.io/https:\/\/unsupported.docs.camunda.io/' .github/workflows/publish-stage.yaml sed -i '' 's/https:\/\/stage.docs.camunda.io/https:\/\/stage.unsupported.docs.camunda.io/' .github/workflows/publish-stage.yaml -# d. replace `${{ secrets.AWS_STAGE_PUBLISH_PATH }}` with `${{ secrets.AWS_STAGE_PUBLISH_PATH_UNSUPPORTED }}/{version}` +# c. replace `${{ secrets.AWS_STAGE_PUBLISH_PATH }}` with `${{ secrets.AWS_STAGE_PUBLISH_PATH_UNSUPPORTED }}/{version}` sed -i '' "s/remote_path: \${{ secrets.AWS_STAGE_PUBLISH_PATH }}/remote_path: \${{ secrets.AWS_STAGE_PUBLISH_PATH_UNSUPPORTED }}\/$ARCHIVED_VERSION/g" .github/workflows/publish-stage.yaml +# d. update `DOCS_SITE_BASE_URL` to specify isolated version +sed -i '' "s/DOCS_SITE_BASE_URL: \//DOCS_SITE_BASE_URL: \/$ARCHIVED_VERSION\//" .github/workflows/publish-stage.yaml git add .github/workflows git commit -m "archiving($ARCHIVED_VERSION): update CI workflows" \ No newline at end of file diff --git a/hacks/isolateVersion/7-updateDocusaurusConfig.sh b/hacks/isolateVersion/7-updateDocusaurusConfig.sh index d4450a5acca..78d7a816e43 100644 --- a/hacks/isolateVersion/7-updateDocusaurusConfig.sh +++ b/hacks/isolateVersion/7-updateDocusaurusConfig.sh @@ -3,9 +3,6 @@ notify "Updating docusaurus.config.js..." # Update `url` to include `unsupported` sed -i '' "s/docs.camunda.io/unsupported.docs.camunda.io/" docusaurus.config.js -# Update `baseUrl` to specify isolated version -sed -i '' "s/baseUrl: \"\\/\"/baseUrl: \"\/$ARCHIVED_VERSION\/\"/" docusaurus.config.js - # Update footer social icons based on the new baseUrl sed -i '' "s/src= \"\/img\//src=\"\/$ARCHIVED_VERSION\/img\//g" docusaurus.config.js diff --git a/howtos/documentation-guidelines.md b/howtos/documentation-guidelines.md index 3ab3d88d6da..bbed6214fbc 100644 --- a/howtos/documentation-guidelines.md +++ b/howtos/documentation-guidelines.md @@ -167,6 +167,10 @@ In an effort to automate screenshots across Camunda 8 documentation, the followi Given the following procedures, teams will respond to screenshot updates and suggestions from community members by manually adjusting appropriate screenshots. ::: +:::note +When generating screenshots, remove any personal identifiable information. If a username must be included, ensure this is "My organization". +::: + **Modeler** Visit the [Modeler screenshot automation repo](https://github.com/camunda/camunda-docs-modeler-screenshots/blob/main/README.md) for details on updating screenshots and scripting new screenshots. diff --git a/howtos/technical-writing-cheatsheet.md b/howtos/technical-writing-cheatsheet.md index 12174562eae..1e70d17aab8 100644 --- a/howtos/technical-writing-cheatsheet.md +++ b/howtos/technical-writing-cheatsheet.md @@ -10,40 +10,40 @@ Our primary goal in documentation is to achieve organization, clarity, and direc ## Grammar -| Subject | Practice | Avoid | Example/Use | -| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ----------------------------------------------------------------- | -| Bolding | Bold when referring to button names or items to select. | Click "Create New Diagram." | Click **Create New Diagram** under the **Diagrams** tab. | -| Italics | Use when applying emphasis to a word. | Click _Create New Diagram_. | Click **Create New Diagram**. | -| Numbers | Write whole numbers one through nine in full. Write whole numbers 10 and upwards as numerals. | In this example, we will create 1 diagram. | In this example, we will create one diagram. | -| Spelling | Default to American spelling and US English. Visit the [Oxford American Dictionary](https://www.oxfordreference.com/view/10.1093/acref/9780195392883.001.0001/acref-9780195392883) for details. | Analyse

    Bernd Rücker | Analyze

    Bernd Ruecker | -| Voice/Tense | Second person, [active voice](https://www.grammarly.com/blog/active-vs-passive-voice/#:~:text=Active%20voice%20means%20that%20a,it%20isn't%20that%20simple.). | I, me, my.

    The computer is turned on by pressing the power button. | You, your.

    Press the power button to turn on the computer. | +| Subject | Practice | Avoid | Example/Use | +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------- | ------------------------------------------------------------------- | +| Bolding | Bold when referring to button names or items to select. | Click "Create New Diagram." | Click **Create New Diagram** under the **Diagrams** tab. | +| Italics | Use when applying emphasis to a word. | Click _Create New Diagram_. | Click **Create New Diagram**. | +| Numbers | Write whole numbers one through nine in full. Write whole numbers 10 and upwards as numerals. | In this example, we will create 1 diagram. | In this example, we will create one diagram. | +| Spelling | Default to American spelling and US English. Visit the [Oxford American Dictionary](https://www.oxfordreference.com/view/10.1093/acref/9780195392883.001.0001/acref-9780195392883) for details. | Analyse

    Bernd Rücker | Analyze

    Bernd Ruecker | +| Voice/Tense | Second person, [active voice](https://www.grammarly.com/blog/active-vs-passive-voice/#:~:text=Active%20voice%20means%20that%20a,it%20isn't%20that%20simple.). | I, me, my.

    The computer is turned on by pressing the power button. | You, your.

    Press the power button to turn on the computer. | ## Punctuation -| Subject | Practice | Avoid | Example/Use | -| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | -| Commas | Camunda uses the [Oxford comma](https://www.grammarly.com/blog/what-is-the-oxford-comma-and-why-do-people-care-so-much-about-it/).

    Use a comma to separate independent clauses when they are joined by [coordinating conjunctions](https://www.grammarly.com/blog/coordinating-conjunctions/) like and, but, for, so.

    Use a comma to separate a sentence introduction from the remainder of the sentence content (Therefore, Thus, As a result, So, Henceforth,) | Camunda loves its products, GitHub and Google Analytics.

    We want to automate a process so let’s start by creating an account. | Camunda loves its products, GitHub, and Google Analytics.

    We want to automate a process, so let’s start by creating an account. | -| Hyphens | Use the hyphen (-) to create a compound adjective (two describing words together). | User friendly interface. | User-friendly interface. | -| Quotation marks | Only use double quotations to illustrate the words spoken by another individual. | Navigate to the "Decisions" section. | Navigate to the **Decisions** section. | +| Subject | Practice | Avoid | Example/Use | +| --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| Commas | Camunda uses the [Oxford comma](https://www.grammarly.com/blog/what-is-the-oxford-comma-and-why-do-people-care-so-much-about-it/).

    Use a comma to separate independent clauses when they are joined by [coordinating conjunctions](https://www.grammarly.com/blog/coordinating-conjunctions/) like and, but, for, so.

    Use a comma to separate a sentence introduction from the remainder of the sentence content (Therefore, Thus, As a result, So, Henceforth,) | Camunda loves its products, GitHub and Google Analytics.

    We want to automate a process so let’s start by creating an account. | Camunda loves its products, GitHub, and Google Analytics.

    We want to automate a process, so let’s start by creating an account. | +| Hyphens | Use the hyphen (-) to create a compound adjective (two describing words together). | User friendly interface. | User-friendly interface. | +| Quotation marks | Only use double quotations to illustrate the words spoken by another individual. | Navigate to the "Decisions" section. | Navigate to the **Decisions** section. | ## Formatting, organization and structure for conceptual pieces and implementation steps -| Subject | Practice | Avoid | Example/Use | -| ----------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------ | -| [Admonitions](https://docusaurus.io/docs/markdown-features/admonitions) | Utilize the note [admonition](https://docusaurus.io/docs/markdown-features/admonitions) to separate important notes in documents according to [Docusaurus’ guidance](https://docusaurus.io/docs/markdown-features/admonitions). | Note: This is the `bpmnProcessId`, you'll need to create a new instance. | :::note
    This is the `bpmnProcessId`, you'll need to create a new instance.
    ::: | -| Breaking changes | If you are documenting a breaking change, please ensure this is noted in appropriate/relevant docs outside of solely update guides and announcements. | N/A | N/A | -| Button names | Click **Next**.

    Use the arrow icon > to list out a series of buttons the user needs to press. | Italics and quotes.

    Click "Next" and then select "Open" and press "Enter". | Click **Next > Open > Enter** | -| Filenames | Place filenames within a code block. | Avoid bolding or italicizing filenames. | Open `codeStuff.txt`
    In the **Name** box enter `project1`. | -| Images and gifs | Ensure your images are appropriate in size and clarity.
    All images should include alt text.
    Crop the user bar and any personal information out of your photo or screenshot.
    Gifs are strongly discouraged in place of text for maintainability and accessibility purposes. | Avoid blurry screenshots.
    Avoid including any personal information in your images.
    Avoid images that are unnecessarily large or bulky to keep the page clean and concise. | N/A | -| Titles and headers | Sentence case spelling in titles and headers.

    For sentence case spelling, only capitalize the first word and any proper nouns. | How To Open A File

    Our travel guide to berlin, germany | How to open a file

    Our travel guide to Berlin, Germany | +| Subject | Practice | Avoid | Example/Use | +| ----------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| [Admonitions](https://docusaurus.io/docs/markdown-features/admonitions) | Utilize the note [admonition](https://docusaurus.io/docs/markdown-features/admonitions) to separate important notes in documents according to [Docusaurus’ guidance](https://docusaurus.io/docs/markdown-features/admonitions). | Note: This is the `bpmnProcessId`, you'll need to create a new instance. | :::note
    This is the `bpmnProcessId`, you'll need to create a new instance.
    ::: | +| Breaking changes | If you are documenting a breaking change, please ensure this is noted in appropriate/relevant docs outside of solely update guides and announcements. | N/A | N/A | +| Button names | Click **Next**.

    Use the arrow icon > to list out a series of buttons the user needs to press. | Italics and quotes.

    Click "Next" and then select "Open" and press "Enter". | Click **Next > Open > Enter** | +| Filenames | Place filenames within a code block. | Avoid bolding or italicizing filenames. | Open `codeStuff.txt`
    In the **Name** box enter `project1`. | +| Images and gifs | Ensure your images are appropriate in size and clarity.
    All images should include alt text.
    Crop the user bar and any personal information out of your photo or screenshot.
    Gifs are strongly discouraged in place of text for maintainability and accessibility purposes. | Avoid blurry screenshots.
    Avoid including any personal information in your images. If a username must be included, use "My organization".
    Avoid images that are unnecessarily large or bulky to keep the page clean and concise. | N/A | +| Titles and headers | Sentence case spelling in titles and headers.

    For sentence case spelling, only capitalize the first word and any proper nouns. | How To Open A File

    Our travel guide to berlin, germany | How to open a file

    Our travel guide to Berlin, Germany | ## Product names and other terminology **NOTE: This section is an overview of a few commonly misunderstood Camunda terms. Refer to this summary of [OMG specifications](https://www.omg.org/spec/category/business-modeling/) when referring to acronyms within your documentation.** -| Term/Acronym | Meaning | Avoid | Use | -| ------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | -| [Cluster](https://docs.camunda.io/docs/product-manuals/zeebe/technical-concepts/clustering) | A cluster represents a configuration of one or more brokers collaborating to execute processes. | Avoid using capitalized "Cluster" when it is not the first word in a sentence.

    This also applies to terms like process instance and task. | "Zeebe implements the Gossip protocol to know which brokers are currently part of the cluster." | -| [Elasticsearch](https://github.com/camunda-community-hub/camunda-bpm-elasticsearch) | A free, open, and multitenant-capable search engine. | Elastic search, ElasticSearch | Elasticsearch | -| GitHub | A provider of internet hosting for software development. | Github | GitHub | -| OpenSearch | OpenSearch is the flexible, scalable, open-source way to build solutions for data-intensive applications. | N/A | N/A | +| Term/Acronym | Meaning | Avoid | Use | +| ------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| [Cluster](https://docs.camunda.io/docs/product-manuals/zeebe/technical-concepts/clustering) | A cluster represents a configuration of one or more brokers collaborating to execute processes. | Avoid using capitalized "Cluster" when it is not the first word in a sentence.

    This also applies to terms like process instance and task. | "Zeebe implements the Gossip protocol to know which brokers are currently part of the cluster." | +| [Elasticsearch](https://github.com/camunda-community-hub/camunda-bpm-elasticsearch) | A free, open, and multitenant-capable search engine. | Elastic search, ElasticSearch | Elasticsearch | +| GitHub | A provider of internet hosting for software development. | Github | GitHub | +| OpenSearch | OpenSearch is the flexible, scalable, open-source way to build solutions for data-intensive applications. | N/A | N/A | diff --git a/howtos/technical-writing-styleguide.md b/howtos/technical-writing-styleguide.md index c36ef8f6685..295b3dd4b3e 100644 --- a/howtos/technical-writing-styleguide.md +++ b/howtos/technical-writing-styleguide.md @@ -35,25 +35,25 @@ We encourage document authors and technical writers to keep in mind grammar, pun ## Grammar -| Subject | Practice | Avoid | Example/Use | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Bolding | Bold when referring to button names or items to select.
    Bold to refer to another section within a document. If you are referencing another document entirely, link to it. | Click "Create New Diagram." | Click **Create New Diagram** under the **Diagrams** tab. | -| Capitalization | Refer to [Purdue's American capitalization guide](https://owl.purdue.edu/owl/general_writing/mechanics/help_with_capitals.html). | i like Camunda 7. | I like Camunda 7. | -| Currency | Use the currency symbol first, followed by the amount. Where appropriate, round to a whole number. | I have two hundred dollars. | €300
    $22,600
    €22.6 million | -| Dates | -| Avoid expressing a month as a number to avoid common confusion around US/EU conventions. If there is no way around it, use the US-English “/” as separators for all content (such as marketing materials) with the exception of technical documentation (guides, manuals, etc.)
    In technical documentation, use "-" as separators as it mirrors most timestamps in various programming languages. Also note that it may be best to remove the day of the week as it may not be as relevant as the day and month. | 10 December, 2018 | December 10, 2018 | -| Date ranges | Avoid repeating the same month twice in the same date range.
    If you are using numerical values, enter as follows: Year, month, date. | Camunda Community Summit April 27 - April 28 | Camunda Community Summit April 27-28 | -| Genders | Our default is to optimize for gender neutral writing in most cases, unless a person has specified their pronouns in advance. | He/she | A group of people/a person: They
    A business: It
    A user: The user, they | -| [In to vs. into](https://www.grammarly.com/blog/into-vs-in-to/) | Always think of "into" as a single preposition (within or outside of something.)
    Typically, "into" refers to physically going inside.
    Think of "in to" as two independent prepositions that happen to end up next to one another, and typically aren't physically entering or exiting something.
    Oftentimes, you can tell if you should use "in to" because you could place a comma after "in," and the sentence would still make sense. | Type your name in to the text box to sign up for Camunda 8.
    "Check which folder the file is into ensure you are in the correct location." | Type your name into the text box to sign up for Camunda 8.
    "Check which folder the file is in to ensure you are in the correct location." | -| Italics | Use when applying emphasis to a word. | Avoid overuse, using for button names.
    Click _Create New Diagram_. | See the **Bolding** section in this style guide above.
    Click **Create New Diagram** under the **Diagrams** tab.
    "You _must_ ensure your environment is configured correctly before moving forward through the steps below." | -| Numbers | Write whole numbers one through nine in full.
    Whole numbers 10 and upwards are written as numerals.
    Large numbers may be written as numerals with definition (million, billion).
    Currency does not follow the same rules. See details in the sub-section titled **Currency** above this table. | In this example, we will create 1 diagram and eleven processes to help 1,000,000 customers. | In this example, we will create one diagram and 11 processes to help 1 million customers. | -| Percentages | Written as % and always in numerals. | 10 percent | 10% | -| Pronoun references | Unclear pronoun reference occurs when a pronoun (often "it," "this," "that," or "they") could refer to more than one subject in a sentence, according to practice by [English Composition](https://englishcomposition.org/essential-writing/unclear-pronoun-reference/). Practice clarifying these pronouns, and removing them where appropriate. | After you execute `npm start`, you can run it.
    In the example above, what exactly are we running? | After you execute `npm start`, you can run the program.
    In the revised example above, we removed the unclear pronoun. | -| Spacing | Following a period, it is standard to have _one_ space before beginning a new sentence. | Avoid using two spaces after a period: "Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously." | "Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously." | -| Spelling | Default to American spelling and US English.
    See details in the sub-section titled **Spelling** below this table. | Analyse
    Colour
    Capitalise
    Humour
    Bernd Rücker | Analyze
    Color
    Capitalize
    Humor
    Bernd Ruecker | -| Times | Use the 12-hour clock, preferably UTC, and uppercase AM or PM where necessary. Always use the corresponding time zone if necessary. See this useful list of [standard abbreviations](https://www.timeanddate.com/time/zones/) for time zones.
    Be mindful of time zones when writing about events that occur across multiple borders. We aren't imposing any hard and fast rules, because we're dealing with global time zones and hundreds of conventions as a remote-first company.
    Standardizing on timezones, we typically communicate in or default to CET/CEST, EST/EDT, and PST/PDT.
    CET or CEST is used depending on daylight savings time, CEST indicating Central European Summer Time.
    EST/EDT and PST/ PDT are used depending on daylight savings time, EDT/PDT indicating daylight savings. | Avoid using hyphens to display time periods. Instead, use an en dash.
    To type an en dash on your Mac, type Option+Minus (-). To type an en dash on Windows, hold down Alt and type 0150 on the numeric keyboard; the en dash will appear upon releasing the Alt key. | 1 p.m. CET
    10 a.m. PST | -| Voice/Tense | Second person, [active voice](https://www.grammarly.com/blog/active-vs-passive-voice/). | I, me, my.
    The computer is turned on by pressing the power button. | You, your.
    Press the power button to turn on the computer. | -| Years | Don’t use an apostrophe for years. | 1960's | 1960s | +| Subject | Practice | Avoid | Example/Use | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Bolding | Bold when referring to button names or items to select.
    Bold to refer to another section within a document. If you are referencing another document entirely, link to it. | Click "Create New Diagram." | Click **Create New Diagram** under the **Diagrams** tab. | +| Capitalization | Refer to [Purdue's American capitalization guide](https://owl.purdue.edu/owl/general_writing/mechanics/help_with_capitals.html). | i like Camunda 7. | I like Camunda 7. | +| Currency | Use the currency symbol first, followed by the amount. Where appropriate, round to a whole number. | I have two hundred dollars. | €300
    $22,600
    €22.6 million | +| Dates | +| Avoid expressing a month as a number to avoid common confusion around US/EU conventions. If there is no way around it, use the US-English “/” as separators for all content (such as marketing materials) with the exception of technical documentation (guides, manuals, etc.)
    In technical documentation, use "-" as separators as it mirrors most timestamps in various programming languages. Also note that it may be best to remove the day of the week as it may not be as relevant as the day and month. | 10 December, 2018 | December 10, 2018 | +| Date ranges | Avoid repeating the same month twice in the same date range.
    If you are using numerical values, enter as follows: Year, month, date. | Camunda Community Summit April 27 - April 28 | Camunda Community Summit April 27-28 | +| Genders | Our default is to optimize for gender neutral writing in most cases, unless a person has specified their pronouns in advance. | He/she | A group of people/a person: They
    A business: It
    A user: The user, they | +| [In to vs. into](https://www.grammarly.com/blog/into-vs-in-to/) | Always think of "into" as a single preposition (within or outside of something.)
    Typically, "into" refers to physically going inside.
    Think of "in to" as two independent prepositions that happen to end up next to one another, and typically aren't physically entering or exiting something.
    Oftentimes, you can tell if you should use "in to" because you could place a comma after "in," and the sentence would still make sense. | Type your name in to the text box to sign up for Camunda 8.
    "Check which folder the file is into ensure you are in the correct location." | Type your name into the text box to sign up for Camunda 8.
    "Check which folder the file is in to ensure you are in the correct location." | +| Italics | Use when applying emphasis to a word. | Avoid overuse, using for button names.
    Click _Create New Diagram_. | See the **Bolding** section in this style guide above.
    Click **Create New Diagram** under the **Diagrams** tab.
    "You _must_ ensure your environment is configured correctly before moving forward through the steps below." | +| Numbers | Write whole numbers one through nine in full.
    Whole numbers 10 and upwards are written as numerals.
    Large numbers may be written as numerals with definition (million, billion).
    Currency does not follow the same rules. See details in the sub-section titled **Currency** above this table. | In this example, we will create 1 diagram and eleven processes to help 1,000,000 customers. | In this example, we will create one diagram and 11 processes to help 1 million customers. | +| Percentages | Written as % and always in numerals. | 10 percent | 10% | +| Pronoun references | Unclear pronoun reference occurs when a pronoun (often "it," "this," "that," or "they") could refer to more than one subject in a sentence, according to practice by [English Composition](https://englishcomposition.org/essential-writing/unclear-pronoun-reference/). Practice clarifying these pronouns, and removing them where appropriate. | After you execute `npm start`, you can run it.
    In the example above, what exactly are we running? | After you execute `npm start`, you can run the program.
    In the revised example above, we removed the unclear pronoun. | +| Spacing | Following a period, it is standard to have _one_ space before beginning a new sentence. | Avoid using two spaces after a period: "Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously." | "Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously." | +| Spelling | Default to American spelling and US English.
    See details in the sub-section titled **Spelling** below this table. | Analyse
    Colour
    Capitalise
    Humour
    Bernd Rücker | Analyze
    Color
    Capitalize
    Humor
    Bernd Ruecker | +| Times | Use the 12-hour clock, preferably UTC, and uppercase AM or PM where necessary. Always use the corresponding time zone if necessary. See this useful list of [standard abbreviations](https://www.timeanddate.com/time/zones/) for time zones.
    Be mindful of time zones when writing about events that occur across multiple borders. We aren't imposing any hard and fast rules, because we're dealing with global time zones and hundreds of conventions as a remote-first company.
    Standardizing on timezones, we typically communicate in or default to CET/CEST, EST/EDT, and PST/PDT.
    CET or CEST is used depending on daylight savings time, CEST indicating Central European Summer Time.
    EST/EDT and PST/ PDT are used depending on daylight savings time, EDT/PDT indicating daylight savings. | Avoid using hyphens to display time periods. Instead, use an en dash.
    To type an en dash on your Mac, type Option+Minus (-). To type an en dash on Windows, hold down Alt and type 0150 on the numeric keyboard; the en dash will appear upon releasing the Alt key. | 1 p.m. CET
    10 a.m. PST | +| Voice/Tense | Second person, [active voice](https://www.grammarly.com/blog/active-vs-passive-voice/). | I, me, my.
    The computer is turned on by pressing the power button. | You, your.
    Press the power button to turn on the computer. | +| Years | Don’t use an apostrophe for years. | 1960's | 1960s | ### Spelling @@ -72,16 +72,16 @@ We encourage document authors and technical writers to keep in mind grammar, pun Default to [American punctuation](https://www.unr.edu/writing-speaking-center/student-resources/writing-speaking-resources/british-american-english). -| Subject | Practice | Avoid | Use | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Apostrophes](https://www.thepunctuationguide.com/apostrophe.html) | There are three key uses for apostrophes: contractions, plurals, and possessives.
    Apostrophes are not used for time periods.
    If the noun ends in an 's', place the apostrophe after the 's'. | 1980's
    Let us run the command below.
    We work to assist the businesses's microservices. | Let us = Let's
    Do not = Don't
    It is = It's
    1980s
    Let's run the command below.
    We work to assist the businesses' microservices. | -| [Commas](https://www.grammarly.com/blog/comma/) | Camunda uses the [Oxford comma](https://www.grammarly.com/blog/what-is-the-oxford-comma-and-why-do-people-care-so-much-about-it/). See details in the sub-section titled "Oxford comma" below this table.
    Always use a comma to separate independent clauses when they are joined by any of these seven [coordinating conjunctions](https://www.grammarly.com/blog/coordinating-conjunctions/): and, but, for, or, nor, so, yet.
    Avoid using too many commas and initiating a [run-on sentence](https://owl.purdue.edu/owl/general_writing/punctuation/independent_and_dependent_clauses/runonsentences.html), however. When possible, use short, separate sentences as opposed to several pieces of information in one sentence.
    Always use a comma to separate a sentence introduction from the remainder of the sentence content (Therefore, Thus, As a result, So, Henceforth,)
    In some specific cases, primarily on social media and where copy is exceptionally brief and clear, the Oxford comma is omitted. It should be used consistently in web and long-form content. | Camunda loves its products, GitHub and Google Analytics.
    We want to automate a process so let's start by creating an account with Camunda 8.
    Therefore we needed to wait for the program to load. | Camunda loves its products, GitHub, and Google Analytics.
    We want to automate a process, so let's start by creating an account with Camunda 8.
    Therefore, we needed to wait for the program to load. | -| [Em dash](https://www.grammarly.com/blog/why-you-should-love-the-em-dash/) | —
    As you can see, em dashes are slightly longer than en dashes.
    On a Mac, execute Shift+Option+Minus (-); on Windows use Ctrl+Alt+Minus (-).
    The em dash is used to set apart additional, descriptive notes also defined as "parenthetical information," or information you might put inside parentheses.
    In many programs, you will be unable to create an em dash with a single line. In these instances, please use two hyphens (--) with no white space between to create an em dash.
    If you find yourself pondering if you're using this correctly, cover up the sentence you've written after the em dash. If the first sentence makes perfect sense without it, then you're onto a winner. | - –
    In the true spirit of the city Camunda was founded, Berlin, Germany, Camunda is a diverse, distributed and global organization with "Camundos" around the world.
    At Camunda, we have made it our mission to enable organizations to design, automate and improve these processes, no matter where they are and what they entail.
    Camunda was founded in 2008, at a time when established industry players were advocating a low-code approach, but we believed in a developer-first approach. | In the true spirit of the city Camunda was founded—that is, Berlin, Germany— Camunda is a diverse, distributed, and global organization with "Camundos" around the world.
    At Camunda, we have made it our mission to enable organizations to design, automate and improve these processes—no matter where they are and what they entail.
    Camunda was founded in 2008, at a time when established industry players were advocating a low-code approach—but we believed in a developer-first approach. | -| [En dash](https://www.grammarly.com/blog/dash/) | –
    The en dash is shorter than an em dash, and is not the same as a hyphen.
    To type an en dash on your Mac, type Option+Minus (-). To type an en dash on Windows, hold down Alt and type 0150 on the numeric keyboard; the en dash will appear upon releasing the Alt key.
    The en dash should predominantly be used for date and time ranges. | -
    The scheduled window for the installation is 1-3pm. | The scheduled window for the installation is 1–3 p.m. | -| Hyphens | Use the hyphen (-) to create a compound adjective (where you squash two describing words together, just like German, but easier to read.) | User friendly interface.
    Biggest ever release. | User-friendly interface.
    Biggest-ever release. | -| [Prefixes](https://dictionary.cambridge.org/us/grammar/british-grammar/prefixes) | Prefixes are a stumbling block for many individuals with English as their first language. There's no right or wrong way to use them, because it genuinely depends on the dictionary you use, the style guide you follow, or just how confusing we want to make the language.
    Prefixes are basically a few letters tagged at the front of a word to create a different meaning.
    Ensure you omit the hyphen!
    If at any point you feel a word doesn't look quite right, just send DevRel a quick Slack, because, unfortunately, there are a ton of exceptions. | Un happy
    De activate
    Re-activate
    Un-do | Unhappy
    Deactivate
    Reactivate
    Undo | -| Quotation marks | We only use double quotations to illustrate the words spoken by another individual.
    We do not use quotations when referring to buttons or programs, nor sections of a document.
    See the **Bolding** section in the table above. | One of the greatest benefits of this project has been the close cooperation between business and IT. -Michael Voeller, Head of Project and Demand Management
    Navigate to the "**Decisions**" section of this manual. | "One of the greatest benefits of this project has been the close cooperation between business and IT," said Michael Voeller, Head of Project and Demand Management
    Navigate to the **Decisions** section of this page.
    Preferably, we would link the **Decisions** section in the example above so the user doesn't have to go out of their way to find it. | -| Semicolons | Semicolons are a wondrous use of punctuation when understood! Semicolons are stronger than a comma, but not as divisive as a period.
    Use semicolons to connect two related but independent clauses (the two pieces of information are related to one another, but they can also stand alone.)
    Do not capitalize the first word of the second clause following the semicolon unless it is a proper noun or acronym.
    Semicolons are also used to replace conjunctions (and, or, etc.) | We've automated several processes for our partners, we love to see them succeed. | We've automated several processes for our partners; we love to see them succeed. | +| Subject | Practice | Avoid | Use | +| -------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [Apostrophes](https://www.thepunctuationguide.com/apostrophe.html) | There are three key uses for apostrophes: contractions, plurals, and possessives.
    Apostrophes are not used for time periods.
    If the noun ends in an 's', place the apostrophe after the 's'. | 1980's
    Let us run the command below.
    We work to assist the businesses's microservices. | Let us = Let's
    Do not = Don't
    It is = It's
    1980s
    Let's run the command below.
    We work to assist the businesses' microservices. | +| [Commas](https://www.grammarly.com/blog/comma/) | Camunda uses the [Oxford comma](https://www.grammarly.com/blog/what-is-the-oxford-comma-and-why-do-people-care-so-much-about-it/). See details in the sub-section titled "Oxford comma" below this table.
    Always use a comma to separate independent clauses when they are joined by any of these seven [coordinating conjunctions](https://www.grammarly.com/blog/coordinating-conjunctions/): and, but, for, or, nor, so, yet.
    Avoid using too many commas and initiating a [run-on sentence](https://owl.purdue.edu/owl/general_writing/punctuation/independent_and_dependent_clauses/runonsentences.html), however. When possible, use short, separate sentences as opposed to several pieces of information in one sentence.
    Always use a comma to separate a sentence introduction from the remainder of the sentence content (Therefore, Thus, As a result, So, Henceforth,)
    In some specific cases, primarily on social media and where copy is exceptionally brief and clear, the Oxford comma is omitted. It should be used consistently in web and long-form content. | Camunda loves its products, GitHub and Google Analytics.
    We want to automate a process so let's start by creating an account with Camunda 8.
    Therefore we needed to wait for the program to load. | Camunda loves its products, GitHub, and Google Analytics.
    We want to automate a process, so let's start by creating an account with Camunda 8.
    Therefore, we needed to wait for the program to load. | +| [Em dash](https://www.grammarly.com/blog/why-you-should-love-the-em-dash/) | —
    As you can see, em dashes are slightly longer than en dashes.
    On a Mac, execute Shift+Option+Minus (-); on Windows use Ctrl+Alt+Minus (-).
    The em dash is used to set apart additional, descriptive notes also defined as "parenthetical information," or information you might put inside parentheses.
    In many programs, you will be unable to create an em dash with a single line. In these instances, please use two hyphens (--) with no white space between to create an em dash.
    If you find yourself pondering if you're using this correctly, cover up the sentence you've written after the em dash. If the first sentence makes perfect sense without it, then you're onto a winner. | - –
    In the true spirit of the city Camunda was founded, Berlin, Germany, Camunda is a diverse, distributed and global organization with "Camundos" around the world.
    At Camunda, we have made it our mission to enable organizations to design, automate and improve these processes, no matter where they are and what they entail.
    Camunda was founded in 2008, at a time when established industry players were advocating a low-code approach, but we believed in a developer-first approach. | In the true spirit of the city Camunda was founded—that is, Berlin, Germany— Camunda is a diverse, distributed, and global organization with "Camundos" around the world.
    At Camunda, we have made it our mission to enable organizations to design, automate and improve these processes—no matter where they are and what they entail.
    Camunda was founded in 2008, at a time when established industry players were advocating a low-code approach—but we believed in a developer-first approach. | +| [En dash](https://www.grammarly.com/blog/dash/) | –
    The en dash is shorter than an em dash, and is not the same as a hyphen.
    To type an en dash on your Mac, type Option+Minus (-). To type an en dash on Windows, hold down Alt and type 0150 on the numeric keyboard; the en dash will appear upon releasing the Alt key.
    The en dash should predominantly be used for date and time ranges. | -
    The scheduled window for the installation is 1-3pm. | The scheduled window for the installation is 1–3 p.m. | +| Hyphens | Use the hyphen (-) to create a compound adjective (where you squash two describing words together, just like German, but easier to read.) | User friendly interface.
    Biggest ever release. | User-friendly interface.
    Biggest-ever release. | +| [Prefixes](https://dictionary.cambridge.org/us/grammar/british-grammar/prefixes) | Prefixes are a stumbling block for many individuals with English as their first language. There's no right or wrong way to use them, because it genuinely depends on the dictionary you use, the style guide you follow, or just how confusing we want to make the language.
    Prefixes are basically a few letters tagged at the front of a word to create a different meaning.
    Ensure you omit the hyphen!
    If at any point you feel a word doesn't look quite right, just send DevRel a quick Slack, because, unfortunately, there are a ton of exceptions. | Un happy
    De activate
    Re-activate
    Un-do | Unhappy
    Deactivate
    Reactivate
    Undo | +| Quotation marks | We only use double quotations to illustrate the words spoken by another individual.
    We do not use quotations when referring to buttons or programs, nor sections of a document.
    See the **Bolding** section in the table above. | One of the greatest benefits of this project has been the close cooperation between business and IT. -Michael Voeller, Head of Project and Demand Management
    Navigate to the "**Decisions**" section of this manual. | "One of the greatest benefits of this project has been the close cooperation between business and IT," said Michael Voeller, Head of Project and Demand Management
    Navigate to the **Decisions** section of this page.
    Preferably, we would link the **Decisions** section in the example above so the user doesn't have to go out of their way to find it. | +| Semicolons | Semicolons are a wondrous use of punctuation when understood! Semicolons are stronger than a comma, but not as divisive as a period.
    Use semicolons to connect two related but independent clauses (the two pieces of information are related to one another, but they can also stand alone.)
    Do not capitalize the first word of the second clause following the semicolon unless it is a proper noun or acronym.
    Semicolons are also used to replace conjunctions (and, or, etc.) | We've automated several processes for our partners, we love to see them succeed. | We've automated several processes for our partners; we love to see them succeed. | ### Oxford comma @@ -107,14 +107,14 @@ In the example above, one might assume Rachel Ray finds inspiration in cooking h The following table outlines best practices for conceptual pieces of information in the document. These pieces usually introduce the reader to a topic with a goal of teaching the reader about that topic before introducing further details or steps for implementation. (For example, an opening summary or overview of the document subject.) | Subject | Practice| Avoid | Use | | -- | -- | -- | -- | -| Concise writing | One of the most important techniques in technical writing is keeping your text short, clear, clean, and concise.
    As a result, work to eliminate unnecessary words or phrases to reduce the amount of text the user must read.
    To help test how readable and user-friendly your text is, review these [readability metrics](https://medium.com/technical-writing-is-easy/readability-metrics-and-technical-writing-b776422eaba) you can use.
    Review this additional [guide to Hemingway](https://medium.com/technical-writing-is-easy/hemingway-app-for-technical-writing-f994c8b2412a), a great tool to test the readability and user experience of your document. | Camunda 8 is powered by Zeebe, a new class of BPMN workflow engine that delivers true horizontal scalability and enables high-performance use cases that were once beyond the realm of workflow automation.
    Camunda 8 is architected for the cloud from the ground up. It is ideal for cloud application use cases such as microservices-based applications and integrates seamlessly with best-in-class cloud components. | Camunda 8 is powered by Zeebe, a new class of BPMN workflow engine that delivers horizontal scalability and high-performance use cases for workflow automation.
    Ideal for microservice-based applications, Camunda 8 easily integrates with industry-leading cloud components. | +| Concise writing | One of the most important techniques in technical writing is keeping your text short, clear, clean, and concise.
    As a result, work to eliminate unnecessary words or phrases to reduce the amount of text the user must read.
    To help test how readable and user-friendly your text is, review these [readability metrics](https://medium.com/technical-writing-is-easy/readability-metrics-and-technical-writing-b776422eaba) you can use.
    Review this additional [guide to Hemingway](https://medium.com/technical-writing-is-easy/hemingway-app-for-technical-writing-f994c8b2412a), a great tool to test the readability and user experience of your document. | Camunda 8 is powered by Zeebe, a new class of BPMN workflow engine that delivers true horizontal scalability and enables high-performance use cases that were once beyond the realm of workflow automation.
    Camunda 8 is architected for the cloud from the ground up. It is ideal for cloud application use cases such as microservices-based applications and integrates seamlessly with best-in-class cloud components. | Camunda 8 is powered by Zeebe, a new class of BPMN workflow engine that delivers horizontal scalability and high-performance use cases for workflow automation.
    Ideal for microservice-based applications, Camunda 8 easily integrates with industry-leading cloud components. | | Icons | You may utilize `` and `` icons in the documentation for clarity. | N/A | N/A | -| Separated paragraphs | A user-friendly experience is a clean, concise one with as few words as possible.
    A user-friendly experience separates these chunks of information into separate sections for easy reading and organization.
    Avoid large, lengthy paragraphs. Instead, try to keep your paragraphs to a maximum of four or five sentences, and then begin a new paragraph introducing more information. | Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously. This tutorial uses the Node.js client, but it serves to illustrate message correlation concepts that are applicable to all language clients. We will use Simple Monitor to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production - however, it is useful during development. | Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously.
    This tutorial uses the Node.js client, but it serves to illustrate message correlation concepts that are applicable to all language clients.
    We will use Simple Monitor to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production - however, it is useful during development. | +| Separated paragraphs | A user-friendly experience is a clean, concise one with as few words as possible.
    A user-friendly experience separates these chunks of information into separate sections for easy reading and organization.
    Avoid large, lengthy paragraphs. Instead, try to keep your paragraphs to a maximum of four or five sentences, and then begin a new paragraph introducing more information. | Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously. This tutorial uses the Node.js client, but it serves to illustrate message correlation concepts that are applicable to all language clients. We will use Simple Monitor to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production - however, it is useful during development. | Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously.
    This tutorial uses the Node.js client, but it serves to illustrate message correlation concepts that are applicable to all language clients.
    We will use Simple Monitor to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production - however, it is useful during development. | | Short sentences | Avoid long, lengthy sentences and practice short, separate sentences when describing various processes and technologies. This will help the user take a step-by-step approach, piece by piece, through a larger conceptual item without getting lost or feeling overwhelmed. | At Camunda we have made it our mission to provide developers with the best experience because our platform and tools are easy to get started and use in your environment right away, with full public access to all our docs, open APIs to integrate with just about anything, and a vibrant community of 100,000 developers. | At Camunda we have made it our mission to provide developers with the best experience. Our platform and tools are easy to get started and use in your environment right away. We offer full public access to all our docs and open APIs. We strive to integrate with just about anything, and a vibrant community of 100,000 developers. | -| That | Avoid overuse of the term "that." More often than not, the term is repetitive or unnecessary.
    To practice, double check your sentences by typing Ctrl+F and searching the term "that." Read your sentences without the term to see if the sentences still make sense. If they do, chances are you can remove the term.
    Typically, you can remove "that" before most nouns, though you may want to keep "that" before an adjective. | To confirm that the first gateway works correctly, complete the steps below: | To confirm the first gateway works correctly, complete the steps below: | -| Titles, headers, and sidebars | Sentence case spelling in titles and headers.
    For sentence case spelling, only capitalize the first word and any proper nouns.
    Ensure titles and headers are descriptive enough so Camunda doesn't have a large surplus of mere "Overview" pages.
    In Markdown, do not include a colon in your headers.
    Note that for clean, short sidebar labels, you may remove excess wording that would usually align with the style guide. | How To Open A File
    Our travel guide to berlin, germany
    Camunda 8 overview
    Readiness probe as yaml config:
    Process instance modification | How to open a file
    Our travel guide to Berlin, Germany
    What is Camunda 8?
    Readiness probe as yaml config
    Modifying process instances | -| Whether or not | "Whether X produces the expected value or not" can seem a bit repetitive.
    In most cases, it is appropriate to remove the "or not" at the end of the sentence to avoid repetition and unnecessary text.
    In a picturesque world, we should lean on "If X produces the expected value," entirely eliminating the need to use the "whether or not" terminology. | This specifies whether host language resources like classes and their methods are accessible or not. | This specifies if host language resources like classes and their methods are accessible. | +| That | Avoid overuse of the term "that." More often than not, the term is repetitive or unnecessary.
    To practice, double check your sentences by typing Ctrl+F and searching the term "that." Read your sentences without the term to see if the sentences still make sense. If they do, chances are you can remove the term.
    Typically, you can remove "that" before most nouns, though you may want to keep "that" before an adjective. | To confirm that the first gateway works correctly, complete the steps below: | To confirm the first gateway works correctly, complete the steps below: | +| Titles, headers, and sidebars | Sentence case spelling in titles and headers.
    For sentence case spelling, only capitalize the first word and any proper nouns.
    Ensure titles and headers are descriptive enough so Camunda doesn't have a large surplus of mere "Overview" pages.
    In Markdown, do not include a colon in your headers.
    Note that for clean, short sidebar labels, you may remove excess wording that would usually align with the style guide. | How To Open A File
    Our travel guide to berlin, germany
    Camunda 8 overview
    Readiness probe as yaml config:
    Process instance modification | How to open a file
    Our travel guide to Berlin, Germany
    What is Camunda 8?
    Readiness probe as yaml config
    Modifying process instances | +| Whether or not | "Whether X produces the expected value or not" can seem a bit repetitive.
    In most cases, it is appropriate to remove the "or not" at the end of the sentence to avoid repetition and unnecessary text.
    In a picturesque world, we should lean on "If X produces the expected value," entirely eliminating the need to use the "whether or not" terminology. | This specifies whether host language resources like classes and their methods are accessible or not. | This specifies if host language resources like classes and their methods are accessible. | ### Titles and headers (sentence case): @@ -129,28 +129,28 @@ You may utilize `` and `` The following table outlines best practices for the implementation section of the document. This section usually offers a distinct thing or things for the reader to do (for example, a list of steps). -| Subject | Practice | Avoid | Use | -| ----------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Admonitions](https://docusaurus.io/docs/markdown-features/admonitions) | Currently within Docusaurus, we have the opportunity to utilize [admonitions](https://docusaurus.io/docs/markdown-features/admonitions) to separate important notes in our documents. Please utilize these admonitions appropriately according to [Docusaurus' guidance](https://docusaurus.io/docs/markdown-features/admonitions). This will add a significant boost to our UX! | Note: This is the `bpmnProcessId`, you'll need to create a new instance. | :::note
    This is the `bpmnProcessId`, you'll need to create a new instance.
    ::: | -| Breaking changes | If you are documenting a breaking change, please ensure this is noted in appropriate/relevant docs outside of solely update guides and announcements. | N/A | N/A | -| Bulleted lists | Use bulleted lists for a list of three or more items.
    You may use complete sentences in bulleted lists (followed by a period), or you may avoid using periods in your bulleted lists if the items are fragmented or short (apples, bananas, grapes, for example).
    Always capitalize the first word of the item in the bullet. | Do not use bullets for a series of steps or instructions. Instead, use numerical lists. See **Numerical lists/steps** in the table below.
    Avoid using commas and/or semicolons in bulleted lists as this can cause confusion between listed items.
    Do not lowercase the first word following each bullet. Ensure capitalization. | Camunda 8 can be used for several purposes, including:
    • To automate a process

      To avoid bottlenecks in business
      To create an organized framework
  • | -| Button names | Click **Next**.
    Use the arrow icon > to list out a series of buttons the user needs to press.
    See **Menu bar traversal** for details in the table below. | Italics and quotes.
    Click "Next" and then select "Open" and press "Enter" at the bottom of the page. | Verb + **Button Name**
    Can use screenshot or icon in instructions.
    Click **Next** > **Open** > **Enter** | -| Button verbs | Use common terms like **Click** or **Select**. | Hit, press
    Hit the **Next** button. | Click, select
    Select **Next**. | -| Code blocks | Use code blocks when you are specifically referring to components within code or filenames.
    Use code block highlighting, if available. This will not apply to inline code, but instead for larger blocks of code. | Do not use code blocks for anything outside of code or filenames, including buttons, titles, etc. | Ensure the `taskID` on your JavaScript page is the same.
    Execute the following command:
    `npm start`
    `javascript var = 1;` | -| Filenames | Place filenames within a code block, as noted in the component **Code blocks** in the table above. | Avoid bolding or italicizing filenames. | Open `codeStuff.txt` | -| Gifs | Gifs are strongly discouraged in place of text for maintainability and accessibility purposes.
    If possible, refrain from implementing these in documentation. If you must include them, ensure sufficient text outlines what it taking place in the gif for accessibility purposes. | N/A | N/A | -| Images | Ensure your images are appropriate in size and clarity.
    All images should include alt text for accessibility purposes.
    If using a screenshot to show steps to fill out a UI, include text above or below the screenshot that includes input text.
    Crop the user bar and any personal information out of your photo or screenshot. This may include names, passwords, usernames, etc. | Avoid blurry screenshots. Avoid including any personal information in your images. Avoid images that are unnecessarily large or bulky to keep the page clean and concise. | N/A | -| Latin abbreviations | Do not use Latin abbreviations. Instead, use "for example." | e.g. or i.e. | For example, | -| Links | Link text whenever it refers to a separate section of our documentation or website. No section reference should go unlinked.
    Ensure links are externally linked, meaning when clicked, the link will open in a separate tab and not remove the position the user is in within the documentation in their current tab.
    Please also make sure any repo links are linked to the anchor link on the repo instead of the main/{branch name} link.
    Links should use descriptive wording, rather than just "click here".
    Deep link to specific sections of a document where appropriate. | Visit our Getting Started Guide for more details.
    Click here for more details.
    Learn more about...
    To read more...
    "For more information, see the `[deploying](LINK)` page." | Visit [Get started with Camunda](https://docs.camunda.org/get-started/) for more details.
    To learn more about migrating from Camunda 7 to Camunda 8, visit our migration guide.
    To (do X), visit `[X](LINK)`.
    For more information, see `[merge request](LINK)`. | -| Menu bar traversal | When listing out a series of buttons as steps, use the arrow key to break between buttons. | In the "File" menu, click "Save as." | In the **File** menu, click **Save as**.
    Go to **File > New File > BPMN Diagram**. | -| Notes | When using an admonition to create a note (see the row titled **Admonitions** above) do not place several notes in a row.
    Either remove the information in the sequential notes and leave them as paragraphs/independent sentences, or spread the notes out directly alongside the content the note is referring to. | Admonition, with another admonition immediately following it. | “According to XYZ, it’s important to note...
    Additionally, note that...” | -| Numerical lists/steps | When possible, replace a loaded or long sentence with a series of steps to keep things clear and concise.
    See details in the sub-section titled **Numerical lists/steps** below this table. | Use the Camunda Modeler to open the Payment Retrieval process then click on the Approve Payment Task. Change the activity type to Business Rule Task in the wrench button menu. | 1. Use Camunda Modeler to open the **Payment Retrieval** process.
    2. Click the **Approve Payment** task.
    3. Click the wrench icon, revealing a menu, to change the **activity type** to **Business Rule Task**. | -| Optional steps | Steps may be listed as optional where appropriate. | `1. Optional. Check this out.` | `(Optional) Check this out.` | -| Unordered lists | Do not use numerical lists for lists of items without a set order of actions.
    Additionally, use dashes (minus) instead of asterisks (star). | You can do the following with Optimize: `1. Create reports 2. Create dashboards 3. Analyze heat maps` | You can do the following with Optimize: `- Create reports - Create dashboards - Analyze heatmaps` | -| Please and thank you | In technical writing, give direct, clear instructions. You do not need to ask the user to "please" do something.
    Do not use "please" in a numerical or bulleted list.
    This may seem rather blunt, but our goal is to create clean, direct instructions and documentation. | Please open the link. | Open the link. | -| Semantic versioning | **X** is used when applying a topic to all subsequent patch releases since the minor release.
    0 or another number representing a specific patch release (8, 9, etc.) means you are specifying the minor release, or a particular patch release.
    **+**, therefore, should only be used alongside a specific number specifying a release, and should not follow an X. | Check out the feature in version 8.4.x+. | This feature is available with 8.4.10+. | -| Tabs | When listing several different command options across operating systems, ensure these different references are separated into their own tabs for a clean, clear UX. | See this [documentation](https://docs.camunda.io/docs/components/zeebe/deployment-guide/getting-started/create-process-instance/) example. | See this [GitHub](https://github.com/camunda-cloud/camunda-cloud-documentation/pull/345) example. | -| Visuals | Keep visuals in mind as you create a document to avoid large, lengthy paragraphs. Consider the following:
    Would this series of information be more visually-appealing in a table?
    Should I add a brief video, gif, or image to show the user the more complex steps I've described? | Avoid several paragraphs of information contained in large bodies of text. | Practice clean, clear, and brief chunks of text. Consider a table or image to display the information you've outlined. | +| Subject | Practice | Avoid | Use | +| ----------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Admonitions](https://docusaurus.io/docs/markdown-features/admonitions) | Currently within Docusaurus, we have the opportunity to utilize [admonitions](https://docusaurus.io/docs/markdown-features/admonitions) to separate important notes in our documents. Please utilize these admonitions appropriately according to [Docusaurus' guidance](https://docusaurus.io/docs/markdown-features/admonitions). This will add a significant boost to our UX! | Note: This is the `bpmnProcessId`, you'll need to create a new instance. | :::note
    This is the `bpmnProcessId`, you'll need to create a new instance.
    ::: | +| Breaking changes | If you are documenting a breaking change, please ensure this is noted in appropriate/relevant docs outside of solely update guides and announcements. | N/A | N/A | +| Bulleted lists | Use bulleted lists for a list of three or more items.
    You may use complete sentences in bulleted lists (followed by a period), or you may avoid using periods in your bulleted lists if the items are fragmented or short (apples, bananas, grapes, for example).
    Always capitalize the first word of the item in the bullet. | Do not use bullets for a series of steps or instructions. Instead, use numerical lists. See **Numerical lists/steps** in the table below.
    Avoid using commas and/or semicolons in bulleted lists as this can cause confusion between listed items.
    Do not lowercase the first word following each bullet. Ensure capitalization. | Camunda 8 can be used for several purposes, including:
    • To automate a process

      To avoid bottlenecks in business
      To create an organized framework
  • | +| Button names | Click **Next**.
    Use the arrow icon > to list out a series of buttons the user needs to press.
    See **Menu bar traversal** for details in the table below. | Italics and quotes.
    Click "Next" and then select "Open" and press "Enter" at the bottom of the page. | Verb + **Button Name**
    Can use screenshot or icon in instructions.
    Click **Next** > **Open** > **Enter** | +| Button verbs | Use common terms like **Click** or **Select**. | Hit, press
    Hit the **Next** button. | Click, select
    Select **Next**. | +| Code blocks | Use code blocks when you are specifically referring to components within code or filenames.
    Use code block highlighting, if available. This will not apply to inline code, but instead for larger blocks of code. | Do not use code blocks for anything outside of code or filenames, including buttons, titles, etc. | Ensure the `taskID` on your JavaScript page is the same.
    Execute the following command:
    `npm start`
    `javascript var = 1;` | +| Filenames | Place filenames within a code block, as noted in the component **Code blocks** in the table above. | Avoid bolding or italicizing filenames. | Open `codeStuff.txt` | +| Gifs | Gifs are strongly discouraged in place of text for maintainability and accessibility purposes.
    If possible, refrain from implementing these in documentation. If you must include them, ensure sufficient text outlines what it taking place in the gif for accessibility purposes. | N/A | N/A | +| Images | Ensure your images are appropriate in size and clarity.
    All images should include alt text for accessibility purposes.
    If using a screenshot to show steps to fill out a UI, include text above or below the screenshot that includes input text.
    Crop the user bar and any personal information out of your photo or screenshot. This may include names, passwords, usernames, etc. | Avoid blurry screenshots. Avoid including any personal information in your images. If you must use a username, use "My organization". Avoid images that are unnecessarily large or bulky to keep the page clean and concise. | N/A | +| Latin abbreviations | Do not use Latin abbreviations. Instead, use "for example." | e.g. or i.e. | For example, | +| Links | Link text whenever it refers to a separate section of our documentation or website. No section reference should go unlinked.
    Ensure links are externally linked, meaning when clicked, the link will open in a separate tab and not remove the position the user is in within the documentation in their current tab.
    Please also make sure any repo links are linked to the anchor link on the repo instead of the main/`{branch name}` link.
    Links should use descriptive wording, rather than just "click here".
    Deep link to specific sections of a document where appropriate. | Visit our Getting Started Guide for more details.
    Click here for more details.
    Learn more about...
    To read more...
    "For more information, see the `[deploying](LINK)` page." | Visit [Get started with Camunda](https://docs.camunda.org/get-started/) for more details.
    To learn more about migrating from Camunda 7 to Camunda 8, visit our migration guide.
    To (do X), visit `[X](LINK)`.
    For more information, see `[merge request](LINK)`. | +| Menu bar traversal | When listing out a series of buttons as steps, use the arrow key to break between buttons. | In the "File" menu, click "Save as." | In the **File** menu, click **Save as**.
    Go to **File > New File > BPMN Diagram**. | +| Notes | When using an admonition to create a note (see the row titled **Admonitions** above) do not place several notes in a row.
    Either remove the information in the sequential notes and leave them as paragraphs/independent sentences, or spread the notes out directly alongside the content the note is referring to. | Admonition, with another admonition immediately following it. | “According to XYZ, it’s important to note...
    Additionally, note that...” | +| Numerical lists/steps | When possible, replace a loaded or long sentence with a series of steps to keep things clear and concise.
    See details in the sub-section titled **Numerical lists/steps** below this table. | Use the Camunda Modeler to open the Payment Retrieval process then click on the Approve Payment Task. Change the activity type to Business Rule Task in the wrench button menu. | 1. Use Camunda Modeler to open the **Payment Retrieval** process.
    2. Click the **Approve Payment** task.
    3. Click the wrench icon, revealing a menu, to change the **activity type** to **Business Rule Task**. | +| Optional steps | Steps may be listed as optional where appropriate. | `1. Optional. Check this out.` | `(Optional) Check this out.` | +| Unordered lists | Do not use numerical lists for lists of items without a set order of actions.
    Additionally, use dashes (minus) instead of asterisks (star). | You can do the following with Optimize: `1. Create reports 2. Create dashboards 3. Analyze heat maps` | You can do the following with Optimize: `- Create reports - Create dashboards - Analyze heatmaps` | +| Please and thank you | In technical writing, give direct, clear instructions. You do not need to ask the user to "please" do something.
    Do not use "please" in a numerical or bulleted list.
    This may seem rather blunt, but our goal is to create clean, direct instructions and documentation. | Please open the link. | Open the link. | +| Semantic versioning | **X** is used when applying a topic to all subsequent patch releases since the minor release.
    0 or another number representing a specific patch release (8, 9, etc.) means you are specifying the minor release, or a particular patch release.
    **+**, therefore, should only be used alongside a specific number specifying a release, and should not follow an X. | Check out the feature in version 8.4.x+. | This feature is available with 8.4.10+. | +| Tabs | When listing several different command options across operating systems, ensure these different references are separated into their own tabs for a clean, clear UX. | See this [documentation](https://docs.camunda.io/docs/components/zeebe/deployment-guide/getting-started/create-process-instance/) example. | See this [GitHub](https://github.com/camunda-cloud/camunda-cloud-documentation/pull/345) example. | +| Visuals | Keep visuals in mind as you create a document to avoid large, lengthy paragraphs. Consider the following:
    Would this series of information be more visually-appealing in a table?
    Should I add a brief video, gif, or image to show the user the more complex steps I've described? | Avoid several paragraphs of information contained in large bodies of text. | Practice clean, clear, and brief chunks of text. Consider a table or image to display the information you've outlined. | ### Numerical lists/steps: @@ -182,14 +182,14 @@ Create a new Maven project in your IDE. If you're using Eclipse, follow these st **NOTE: To avoid overuse of company jargon or confusion, please refer to [this summary of OMG specifications](https://www.omg.org/spec/category/business-modeling/) when referring to acronyms within your documentation.** -| Subject | Practice | Avoid | Use | -| ------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Acronyms | BPMN, DMN, TTFN – the use of acronyms should be judged by the level of audience knowledge.
    For a technical audience, use industry standard acronyms from the start. However, for new concepts or emerging acronyms, write the process in full first, and then follow with the acronym at the next use.
    For a non-technical audience, always write an acronym in full the first time you use it in any new piece of content. Afterwards, it can be abbreviated. | Avoid abbreviating the term on its first use if the documentation is for an audience which may be non-technical. | Most often, you should spell out the acronym on first reference and abbreviate thereafter depending on the level of audience knowledge. | -| And/or | Either or both of two stated possibilities. | You can further parallelize archiver and(or) importer within one node using the following configuration parameters | You can further parallelize archiver and/or importer within one node using the following configuration parameters | -| File extensions | Do not capitalize file extensions like .pdf, .doc, etc. | Uppercase | Lowercase | -| [Job and professional titles](https://grammar.yourdictionary.com/capitalization/capitalization-of-job-titles.html) | Do not capitalize a job title if listed after a name.
    Do capitalize a job title if listed before a name.
    An exception may be within a list of people, such as a conference speakers list, where capitalizing titles may be appropriate. | Charley Mann, Content Strategist | Charley Mann, content strategist | -| Process | See details in the sub-section titled **Process vs. workflow** below this table. | Avoid "workflow automation" and "workflow instance" where "process automation" and "process instance" is preferred. | We prefer process automation and process instance over workflow automation or workflow instance.
    See details in the sub-section titled **Process vs. workflow** below this table. | -| Workflow | See details in the sub-section titled **Process vs. workflow** below this table. | Avoid "workflow automation" and "workflow instance" where "process automation" and "process instance" is preferred. | We prefer process automation and process instance over workflow automation or workflow instance.
    See details in the sub-section titled **Process vs. workflow** below this table. | +| Subject | Practice | Avoid | Use | +| ------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Acronyms | BPMN, DMN, TTFN – the use of acronyms should be judged by the level of audience knowledge.
    For a technical audience, use industry standard acronyms from the start. However, for new concepts or emerging acronyms, write the process in full first, and then follow with the acronym at the next use.
    For a non-technical audience, always write an acronym in full the first time you use it in any new piece of content. Afterwards, it can be abbreviated. | Avoid abbreviating the term on its first use if the documentation is for an audience which may be non-technical. | Most often, you should spell out the acronym on first reference and abbreviate thereafter depending on the level of audience knowledge. | +| And/or | Either or both of two stated possibilities. | You can further parallelize archiver and(or) importer within one node using the following configuration parameters | You can further parallelize archiver and/or importer within one node using the following configuration parameters | +| File extensions | Do not capitalize file extensions like .pdf, .doc, etc. | Uppercase | Lowercase | +| [Job and professional titles](https://grammar.yourdictionary.com/capitalization/capitalization-of-job-titles.html) | Do not capitalize a job title if listed after a name.
    Do capitalize a job title if listed before a name.
    An exception may be within a list of people, such as a conference speakers list, where capitalizing titles may be appropriate. | Charley Mann, Content Strategist | Charley Mann, content strategist | +| Process | See details in the sub-section titled **Process vs. workflow** below this table. | Avoid "workflow automation" and "workflow instance" where "process automation" and "process instance" is preferred. | We prefer process automation and process instance over workflow automation or workflow instance.
    See details in the sub-section titled **Process vs. workflow** below this table. | +| Workflow | See details in the sub-section titled **Process vs. workflow** below this table. | Avoid "workflow automation" and "workflow instance" where "process automation" and "process instance" is preferred. | We prefer process automation and process instance over workflow automation or workflow instance.
    See details in the sub-section titled **Process vs. workflow** below this table. | ### Process vs. workflow: diff --git a/optimize/apis-tools/optimize-api/event-ingestion.md b/optimize/apis-tools/optimize-api/event-ingestion.md index 28f1b05a9f6..6e43d73fd6c 100644 --- a/optimize/apis-tools/optimize-api/event-ingestion.md +++ b/optimize/apis-tools/optimize-api/event-ingestion.md @@ -48,7 +48,7 @@ The following request headers have to be provided with every ingest request: | Name | Type | Constraints | Description | | -------------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion) | String | REQUIRED | The version of the CloudEvents specification, which the event uses, must be `1.0`. See [CloudEvents - Version 1.0 - specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion). | -| [id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id) | String | REQUIRED | Uniquely identifies an event, see [CloudEvents - Version 1.0 - id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id). | +| [ID](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id) | String | REQUIRED | Uniquely identifies an event, see [CloudEvents - Version 1.0 - ID](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id). | | [source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1) | String | REQUIRED | Identifies the context in which an event happened, see [CloudEvents - Version 1.0 - source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1). A use-case could be if you have conflicting types across different sources. For example, a `type:OrderProcessed` originating from both `order-service` and `shipping-service`. In this case, the `source` field provides means to clearly separate between the origins of a particular event. Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. | | [type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | String | REQUIRED | This attribute contains a value describing the type of event related to the originating occurrence, see [CloudEvents - Version 1.0 - type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type). Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. The value `camunda` cannot be used for this field. | | [time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | [Timestamp](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type-system) | OPTIONAL | Timestamp of when the occurrence happened, see [CloudEvents - Version 1.0 - time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#time). String encoding: [RFC 3339](https://tools.ietf.org/html/rfc3339). If not present, a default value of the time the event was received will be created. | @@ -108,43 +108,45 @@ POST `/api/ingestion/event/batch` ##### Request body - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] +```json +[ + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", + "source": "order-service", + "type": "orderCreated", + "time": "2020-01-01T10:00:00.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", + "source": "order-service", + "type": "orderValidated", + "time": "2020-01-01T10:00:10.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", + "source": "shipping-service", + "type": "packageShipped", + "traceid": "id1", + "group": "shop", + "time": "2020-01-01T10:00:20.000Z" + } +] +``` #### Response @@ -156,7 +158,7 @@ The API allows you to update any previously ingested cloud event by ingesting an The following request would update the first cloud event that got ingested in the [ingest three cloud events sample](#ingest-cloud-events). Note that on an update, the cloud event needs to be provided as a whole; it's not possible to perform partial updates through this API. -In this example, an additional field `newField` is added to the data block of the cloud event with the id `1edc4160-74e5-4ffc-af59-2d281cf5aca341`. +In this example, an additional field `newField` is added to the data block of the cloud event with the ID `1edc4160-74e5-4ffc-af59-2d281cf5aca341`. #### Request @@ -168,6 +170,7 @@ POST `/api/ingestion/event/batch` ##### Request Body: +``` [ { "specversion": "1.0", @@ -184,6 +187,7 @@ POST `/api/ingestion/event/batch` } } ] +``` #### Response diff --git a/optimize/apis-tools/optimize-api/external-variable-ingestion.md b/optimize/apis-tools/optimize-api/external-variable-ingestion.md index fcbe096af6c..34d377e01e2 100644 --- a/optimize/apis-tools/optimize-api/external-variable-ingestion.md +++ b/optimize/apis-tools/optimize-api/external-variable-ingestion.md @@ -92,6 +92,7 @@ POST `/api/ingestion/variable` Request Body: +``` [ { "id": "7689fced-2639-4408-9de1-cf8f72769f43", @@ -110,6 +111,7 @@ Request Body: "processDefinitionKey": "orderProcess" } ] +``` ### Response diff --git a/optimize/apis-tools/optimize-api/report/get-data-export.md b/optimize/apis-tools/optimize-api/report/get-data-export.md index 10b5e04e9ac..b38bbc867d3 100644 --- a/optimize/apis-tools/optimize-api/report/get-data-export.md +++ b/optimize/apis-tools/optimize-api/report/get-data-export.md @@ -81,6 +81,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -113,6 +114,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? } ] } +``` ##### Response @@ -130,6 +132,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -162,6 +165,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p } ] } +``` ##### Response diff --git a/optimize/apis-tools/optimize-api/tutorial.md b/optimize/apis-tools/optimize-api/tutorial.md index 6f7e9c69177..19f2a08559e 100644 --- a/optimize/apis-tools/optimize-api/tutorial.md +++ b/optimize/apis-tools/optimize-api/tutorial.md @@ -25,12 +25,16 @@ Make sure you keep the generated client credentials in a safe place. The **Clien ## Set up authentication -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. +If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client ID and client secret. Then, we return the actual token that can be passed as an authorization header in each request. To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `OPTIMIZE_CLIENT_ID`, `OPTIMIZE_CLIENT_SECRET`, `OPTIMIZE_BASE_URL`, and `OPTIMIZE_AUDIENCE`, which is `optimize.camunda.io` in a Camunda 8 SaaS environment. For example, your audience may be defined as `OPTIMIZE_AUDIENCE=optimize.camunda.io`. These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CLIENT_ID`, `CAMUNDA_CLIENT_SECRET`, and `CAMUNDA_OPTIMIZE_BASE_URL`. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/optimize/self-managed/optimize-deployment/advanced-features/import-guide.md b/optimize/self-managed/optimize-deployment/advanced-features/import-guide.md index f23cda4b3b0..451ba66436d 100644 --- a/optimize/self-managed/optimize-deployment/advanced-features/import-guide.md +++ b/optimize/self-managed/optimize-deployment/advanced-features/import-guide.md @@ -14,17 +14,17 @@ In general, the import assumes the following setup: - A Camunda engine from which Optimize imports the data. - The Optimize backend, where the data is transformed into an appropriate format for efficient data analysis. -- [Elasticsearch](https://www.elastic.co/guide/index.html), which is the database Optimize persists all formatted data to. +- [Elasticsearch (ES)](https://www.elastic.co/guide/index.html) or [OpenSearch (OS)](https://opensearch.org/), which serves as the database that Optimize uses to persist all of its formatted data. The following depicts the setup and how the components communicate with each other: ![Optimize Import Structure](img/Optimize-Structure.png) -Optimize queries the engine data using a dedicated Optimize REST-API within the engine, transforms the data, and stores it in its own Elasticsearch database such that it can be quickly and easily queried by Optimize when evaluating reports or performing analyses. The reason for having a dedicated REST endpoint for Optimize is performance: the default REST-API adds a lot of complexity to retrieve the data from the engine database, which can result in low performance for large data sets. +Optimize queries the engine data using a dedicated Optimize REST-API within the engine, transforms the data, and stores it in its own database such that it can be quickly and easily queried by Optimize when evaluating reports or performing analyses. The reason for having a dedicated REST endpoint for Optimize is performance: the default REST-API adds a lot of complexity to retrieve the data from the engine database, which can result in low performance for large data sets. Note the following limitations regarding the data in Optimize's database: -- The data is only a near real-time representation of the engine database. This means Elasticsearch may not contain the data of the most recent time frame, e.g. the last two minutes, but all the previous data should be synchronized. +- The data is only a near real-time representation of the engine database. This means the database may not contain the data of the most recent time frame, e.g. the last two minutes, but all the previous data should be synchronized. - Optimize only imports the data it needs for its analysis. The rest is omitted and won't be available for further investigation. Currently, Optimize imports: - The history of the activity instances - The history of the process instances @@ -47,7 +47,7 @@ This section gives an overview of how fast Optimize imports certain data sets. T It is very likely that these metrics change for different data sets because the speed of the import depends on how the data is distributed. -The import is also affected by how the involved components are set up. For instance, if you deploy the Camunda engine on a different machine than Optimize and Elasticsearch to provide both applications with more computation resources, the process is likely to speed up. If the Camunda engine and Optimize are physically far away from each other, the network latency might slow down the import. +The import is also affected by how the involved components are set up. For instance, if you deploy the Camunda engine on a different machine than Optimize and Elasticsearch/OpenSearch to provide both applications with more computation resources, the process is likely to speed up. If the Camunda engine and Optimize are physically far away from each other, the network latency might slow down the import. ### Setup @@ -135,7 +135,7 @@ During execution, the following steps are performed: 2. Map entities and add an import job 3. [Execute the import](#execute-the-import). 1. Poll a job - 2. Persist the new entities to Elasticsearch + 2. Persist the new entities to the database ### Start an import round @@ -175,33 +175,37 @@ First, the `ImportScheduler` retrieves the newest index, which identifies the la #### Map entities and add an import job -All fetched entities are mapped to a representation that allows Optimize to query the data very quickly. Subsequently, an import job is created and added to the queue to persist the data in Elasticsearch. +All fetched entities are mapped to a representation that allows Optimize to query the data very quickly. Subsequently, an import job is created and added to the queue to persist the data in the database. ### Execute the import Full aggregation of the data is performed by a dedicated `ImportJobExecutor` for each entity type, which waits for `ImportJob` instances to be added to the execution queue. As soon as a job is in the queue, the executor: - Polls the job with the new Optimize entities -- Persists the new entities to Elasticsearch +- Persists the new entities to the database The data from the engine and Optimize do not have a one-to-one relationship, i.e., one entity type in Optimize may consist of data aggregated from different data types of the engine. For example, the historic process instance is first mapped to an Optimize `ProcessInstance`. However, for the heatmap analysis it is also necessary for `ProcessInstance` to contain all activities that were executed in the process instance. -Therefore, the Optimize `ProcessInstance` is an aggregation of the engine's historic process instance and other related data: historic activity instance data, user task data, and variable data are all [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) within Optimize's `ProcessInstance` representation. +Therefore, the Optimize `ProcessInstance` is an aggregation of the engine's historic process instance and other related data: historic activity instance data, user task data, and variable data are all nested documents ([ES](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) / [OS](https://opensearch.org/docs/latest/field-types/supported-field-types/nested/)) within Optimize's `ProcessInstance` representation. :::note -Optimize uses [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html), the above mentioned data is an example of documents that are nested within Optimize's `ProcessInstance` index. +Optimize uses nested documents ([ES](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) / [OS](https://opensearch.org/docs/latest/field-types/supported-field-types/nested/)), the above mentioned data is an example of documents that are nested within Optimize's `ProcessInstance` index. -Elasticsearch applies restrictions regarding how many objects can be nested within one document. If your data includes too many nested documents, you may experience import failures. To avoid this, you can temporarily increase the nested object limit in Optimize's [index configuration](./../configuration/system-configuration.md#index-settings). Note that this might cause memory errors. +Elasticsearch and OpenSearch apply restrictions regarding how many objects can be nested within one document. If your data includes too many nested documents, you may experience import failures. To avoid this, you can temporarily increase the nested object limit in Optimize's [index configuration](./../configuration/system-configuration.md#index-settings). Note that this might cause memory errors. ::: Import executions per engine entity are actually independent from another. Each follows a [producer-consumer-pattern](https://dzone.com/articles/producer-consumer-pattern), where the type specific `ImportService` is the single producer and a dedicated single `ImportJobExecutor` is the consumer of its import jobs, decoupled by a queue. So, both are executed in different threads. To adjust the processing speed of the executor, the queue size and the number of threads that process the import jobs can be configured: +:::note +Although the parameters below include `ElasticSearch` in their name, they apply to both ElasticSearch and OpenSearch installations. For backward compatibility reasons, the parameters have not been renamed. +::: + ```yaml import: # Number of threads being used to process the import jobs per data type that are writing - # data to elasticsearch. + # data to the database. elasticsearchJobExecutorThreadCount: 1 - # Adjust the queue size of the import jobs per data type that store data to elasticsearch. + # Adjust the queue size of the import jobs per data type that store data to the database. # A too large value might cause memory problems. elasticsearchJobExecutorQueueSize: 5 ``` diff --git a/optimize/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md b/optimize/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md index 70253979927..bb01105678e 100644 --- a/optimize/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md +++ b/optimize/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md @@ -18,10 +18,6 @@ The following illustration demonstrates this use case with two Optimize instance Changing the value of `*.settings.index.prefix` after an instance was already running results in new indexes being created with the new prefix value. There is no support in migrating data between indexes based on different prefixes. ::: -:::note -Not all Optimize features are supported when using OpenSearch as a database. For a full list of the features that are currently supported, please refer to the [Camunda 7](https://github.com/camunda/issues/issues/705) and [Camunda 8](https://github.com/camunda/issues/issues/635) OpenSearch features. -::: - \* Elasticsearch index prefix settings path: `es.settings.index.prefix`
    \* OpenSearch index prefix settings path: `opensearch.settings.index.prefix` ![Shared Elasticsearch Cluster Setup](img/shared-elasticsearch-cluster.png) diff --git a/optimize/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md b/optimize/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md index e5491a96aea..2144aa97aaf 100644 --- a/optimize/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md +++ b/optimize/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md @@ -18,20 +18,20 @@ in Optimize. Using any other history level will result in less data and/or funct history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an engine before Optimize has imported it, that data will not be available in Optimize. -| YAML path | Default value | Description | -| ---------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engines.${engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | -| engines.${engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | -| engines.${engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | -| engines.${engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | -| engines.${engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | -| engines.${engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | -| engines.${engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | -| engines.${engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | -| engines.${engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | -| engines.${engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | -| engines.${engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | -| engines.${engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | +| YAML path | Default value | Description | +| ----------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| engines.$\{engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | +| engines.$\{engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | +| engines.$\{engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | +| engines.$\{engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | +| engines.$\{engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | +| engines.$\{engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | +| engines.$\{engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | +| engines.$\{engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | +| engines.$\{engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | +| engines.$\{engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | +| engines.$\{engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | +| engines.$\{engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | ## Camunda 7 common import settings @@ -64,16 +64,18 @@ REST API endpoint locations, timeouts, etc. | import.data.user-task-worker.metadata.maxPageSize | 10000 | The max page size when multiple users or groups are iterated during the metadata refresh. | | import.data.user-task-worker.metadata.maxEntryLimit | 100000 | The entry limit of the cache that holds the metadata, if you need more entries you can increase that limit. When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. Please refer to the "Adjust Optimize heap size" documentation. | | import.skipDataAfterNestedDocLimitReached | false | Some data can no longer be imported to a given document if its number of nested documents has reached the configured limit. Enable this setting to skip this data during import if the nested document limit has been reached. | -| import.elasticsearchJobExecutorThreadCount | 1 | Number of threads being used to process the import jobs per data type that are writing data to elasticsearch. | -| import.elasticsearchJobExecutorQueueSize | 5 | Adjust the queue size of the import jobs per data type that store data to elasticsearch. If the value is too large it might cause memory problems. | +| import.elasticsearchJobExecutorThreadCount\* | 1 | Number of threads being used to process the import jobs per data type that are writing data to the database. | +| import.elasticsearchJobExecutorQueueSize\* | 5 | Adjust the queue size of the import jobs per data type that store data to the database. If the value is too large it might cause memory problems. | | import.handler.backoff.interval | 5000 | Interval in milliseconds which is used for the backoff time calculation. | | import.handler.backoff.max | 15 | Once all pages are consumed, the import scheduler component will start scheduling fetching tasks in increasing periods of time, controlled by "backoff" counter. | | import.handler.backoff.isEnabled | true | Tells if the backoff is enabled of not. | | import.indexType | import-index | The name of the import index type. | -| import.importIndexStorageIntervalInSec | 10 | States how often the import index should be stored to Elasticsearch. | +| import.importIndexStorageIntervalInSec | 10 | States how often the import index should be stored to the database. | | import.currentTimeBackoffMilliseconds | 300000 | This is the time interval the import backs off from the current tip of the time during the ongoing import cycle. This ensures that potentially missed concurrent writes in the engine are reread going back by the amount of this time interval. | | import.identitySync.includeUserMetaData | true | Whether to include metaData (firstName, lastName, email) when synchronizing users. If disabled only user IDs will be shown on user search and in collection permissions. | | import.identitySync.collectionRoleCleanupEnabled | false | Whether collection role cleanup should be performed. If enabled, users that no longer exist in the identity provider will be automatically removed from collection permissions. | | import.identitySync.cronTrigger | `0 */2 * * *` | Cron expression for when the identity sync should run, defaults to every second hour. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here.

    For details on the format please refer to:
    • [Cron Expression Description](https://en.wikipedia.org/wiki/Cron)
    • [Spring Cron Expression Documentation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronSequenceGenerator.html)
    | | import.identitySync.maxPageSize | 10000 | The max page size when multiple users or groups are iterated during the import. | | import.identitySync.maxEntryLimit | 100000 | The entry limit of the user/group search cache. When increasing the limit, keep in mind to account for this by increasing the JVM heap memory as well. Please refer to the "Adjust Optimize heap size" documentation on how to configure the heap size. | + +\* Although this parameter includes `ElasticSearch` in its name, it applies to both ElasticSearch and OpenSearch installations. For backward compatibility reasons, the parameter has not been renamed. diff --git a/optimize/self-managed/optimize-deployment/configuration/system-configuration.md b/optimize/self-managed/optimize-deployment/configuration/system-configuration.md index 4f4fbe64dc5..e09bcdaac7b 100644 --- a/optimize/self-managed/optimize-deployment/configuration/system-configuration.md +++ b/optimize/self-managed/optimize-deployment/configuration/system-configuration.md @@ -6,10 +6,6 @@ description: "An overview of all possible configuration options in Optimize." All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. -:::note -When converting configuration properties to environment variables, ensure the `CAMUNDA_OPTIMIZE_` prefix is used (for example, `CAMUNDA_OPTIMIZE_API_ACCESSTOKEN`). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. -::: - You can see a sample configuration file with all possible configuration fields and their default values [here](service-config.yaml). @@ -190,10 +186,6 @@ Define a secured connection to be able to communicate with a secured Elasticsear These settings are only relevant when operating Optimize with OpenSearch. -:::note -Not all Optimize features are supported when using OpenSearch as a database. For a full list of the features that are currently supported, please refer to the [Camunda 7](https://github.com/camunda/issues/issues/705) and [Camunda 8](https://github.com/camunda/issues/issues/635) OpenSearch features. -::: - #### Connection settings This section details everything related to building the connection to OpenSearch. @@ -202,25 +194,20 @@ This section details everything related to building the connection to OpenSearch You can define a number of connection points in a cluster. Therefore, everything under `opensearch.connection.nodes` is a list of nodes Optimize can connect to. If you have built an OpenSearch cluster with several nodes, it is recommended to define several connection points so if one node fails, Optimize is still able to talk to the cluster. ::: -| YAML path | Default value | Description | -| ----------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| opensearch.connection.timeout | 10000 | Maximum time without connection to OpenSearch that Optimize should wait until a timeout triggers. | -| opensearch.connection.responseConsumerBufferLimitInMb | 100 | Maximum size of the OpenSearch response consumer heap buffer. This can be increased to resolve errors from OpenSearch relating to the entity content being too long. | -| opensearch.connection.pathPrefix | | The path prefix under which OpenSearch is available. | -| opensearch.connection.nodes[*].host | localhost | The address/hostname under which the OpenSearch node is available. | -| opensearch.connection.nodes[*].httpPort | 9205 | A port number used by OpenSearch to accept HTTP connections. | -| opensearch.connection.proxy.enabled | false | Whether an HTTP proxy should be used for requests to OpenSearch. | -| opensearch.connection.proxy.host | null | The proxy host to use, must be set if `opensearch.connection.proxy.enabled = true`. | -| opensearch.connection.proxy.port | null | The proxy port to use, must be set if `opensearch.connection.proxy.enabled = true`. | -| opensearch.connection.proxy.sslEnabled | false | Whether this proxy is using a secured connection (HTTPS). | -| opensearch.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | -| opensearch.connection.awsEnabled | false | Determines if AWS credentials shall be used for authentication | +| YAML path | Default value | Description | +| ---------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------- | +| opensearch.connection.timeout | 10000 | Maximum time without connection to OpenSearch that Optimize should wait until a timeout triggers. | +| opensearch.connection.pathPrefix | | The path prefix under which OpenSearch is available. | +| opensearch.connection.nodes[*].host | localhost | The address/hostname under which the OpenSearch node is available. | +| opensearch.connection.nodes[*].httpPort | 9205 | A port number used by OpenSearch to accept HTTP connections. | +| opensearch.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | +| opensearch.connection.awsEnabled | false | Determines if AWS credentials shall be used for authentication | #### Index settings | YAML path | Default value | Description | | ------------------------------------------------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| opensearch.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias `namopensearch`. Custom values allow you to operate multiple isolated Optimize instances on one OpenSearch cluster.

    NOTE: Changing this after Optimize has already run will create new empty indexes. | +| opensearch.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias names. Custom values allow you to operate multiple isolated Optimize instances on one OpenSearch cluster.

    NOTE: Changing this after Optimize has already run will create new empty indexes. | | opensearch.settings.index.number_of_replicas | 1 | How often data should be replicated to handle node failures. | | opensearch.settings.index.number_of_shards | 1 | How many shards should be used in the cluster for process instance and decision instance indices. All other indices will be made up of a single shard.

    NOTE: This property only applies the first time Optimize is started and the schema/mapping is deployed on OpenSearch. If you want this property to take effect again, you need to delete all indices (and with that all data) and restart Optimize. | | opensearch.settings.index.refresh_interval | 2s | How long OpenSearch waits until the documents are available for search. A positive value defines the duration in seconds. A value of -1 means a refresh needs to be done manually. | @@ -232,8 +219,8 @@ Define a secured connection to be able to communicate with a secured OpenSearch | YAML path | Default value | Description | | ----------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| opensearch.security.username | | The basic authentication (x-pack) username. | -| opensearch.security.password | | The basic authentication (x-pack) password. | +| opensearch.security.username | | The basic authentication username. | +| opensearch.security.password | | The basic authentication password. | | opensearch.security.ssl.enabled | false | Used to enable or disable TLS/SSL for the HTTP connection. | | opensearch.security.ssl.certificate | | The path to a PEM encoded file containing the certificate (or certificate chain) that will be presented to clients when they connect. | | opensearch.security.ssl.certificate_authorities | [ ] | A list of paths to PEM encoded CA certificate files that should be trusted, for example ['/path/to/ca.crt'].

    NOTE: if you are using a public CA that is already trusted by the Java runtime, you do not need to set the certificate_authorities. | @@ -245,10 +232,6 @@ Define a secured connection to be able to communicate with a secured OpenSearch | -------------------------------- | ------------- | ------------------------------------------------------------------------ | | opensearch.backup.repositoryName | "" | The name of the snapshot repository to be used to back up Optimize data. | -:::note -The backup functionality is not yet supported for OpenSearch. -::: - ### Email Settings for the email server to send email notifications, e.g. when an alert is triggered. @@ -279,16 +262,16 @@ Settings influencing the process digest feature. Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. -| YAML path | Default value | Description | -| -------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| webhookAlerting.webhooks.${webhookName}.url | | The URL of the webhook. | -| webhookAlerting.webhooks.${webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | -| webhookAlerting.webhooks.${webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | -| webhookAlerting.webhooks.${webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | +| YAML path | Default value | Description | +| --------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| webhookAlerting.webhooks.$\{webhookName}.url | | The URL of the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | ### History cleanup settings @@ -298,20 +281,20 @@ Settings for automatic cleanup of historic process/decision instances based on t Two types of history cleanup are available for Camunda 8 users at this time - process data cleanup and external variable cleanup. For more information, see [History cleanup](/optimize/self-managed/optimize-deployment/configuration/history-cleanup.md). ::: -| YAML path | Default value | Description | -| -------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | +| YAML path | Default value | Description | +| --------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | +| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | +| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | +| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | +| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | +| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.ttl | | Time to live to use for process instances of the process definition with the $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the $\{key}. | +| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | +| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its $\{key}. | +| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.$\{key}.ttl | | Time to live to use for decision instances of the decision definition with the $\{key}. | +| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | ### Localization diff --git a/optimize/self-managed/optimize-deployment/install-and-start.md b/optimize/self-managed/optimize-deployment/install-and-start.md index 2c99ebde0ef..a514fe0b8bf 100644 --- a/optimize/self-managed/optimize-deployment/install-and-start.md +++ b/optimize/self-managed/optimize-deployment/install-and-start.md @@ -89,10 +89,6 @@ After that, [configure the database connection](./configuration/getting-started. #### Getting started with the Optimize Docker image -:::note -Not all Optimize features are supported when using OpenSearch as a database. For a full list of the features that are currently supported, please refer to the [Camunda 7](https://github.com/camunda/issues/issues/705) and [Camunda 8](https://github.com/camunda/issues/issues/635) OpenSearch features. -::: - ##### Full local setup To start the Optimize Docker image and connect to an already locally running Camunda 7 as well as Elasticsearch instance you could run the following command: diff --git a/optimize/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md b/optimize/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md index 34b2b2b0421..c7f9665f08a 100644 --- a/optimize/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md +++ b/optimize/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md @@ -21,7 +21,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). +To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../../reimport.md). diff --git a/optimize/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md b/optimize/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md index 69a8362bd1a..d042f1c64cb 100644 --- a/optimize/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md +++ b/optimize/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md @@ -22,7 +22,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../../reimport.md). diff --git a/optimize/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md b/optimize/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md index a4a22d70fc2..ebf210b86f1 100644 --- a/optimize/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md +++ b/optimize/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md @@ -60,7 +60,7 @@ This approach requires you to manually execute the update script. You can perfor - Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `./upgrade/upgrade.bat` on Windows. For OpenSearch installations, please make sure to set the environment variable `CAMUNDA_OPTIMIZE_DATABASE=opensearch` before executing the update script. - During the execution the executable will output a warning to ask you to back-up your database data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. +- Feel free to [file a support case](https://camunda.com/services/enterprise-support-guide/) if any errors occur during the migration process. - To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../../configuration/logging.md). #### 3.2 Automatic update execution (Optimize >3.2.0) diff --git a/optimize/self-managed/optimize-deployment/reimport.md b/optimize/self-managed/optimize-deployment/reimport.md index 20f459d8828..5b1007df00e 100644 --- a/optimize/self-managed/optimize-deployment/reimport.md +++ b/optimize/self-managed/optimize-deployment/reimport.md @@ -16,6 +16,9 @@ When triggering a reimport, all existing event-based processes get unpublished a You then have to manually publish event-based processes after you have restarted Optimize. ::: +:::note +Engine data reimport is only available when using Optimize with ElasticSearch as a database. +::: To reimport engine data, perform the following steps: diff --git a/optimize_sidebars.js b/optimize_sidebars.js index 03f10d2df8c..fc2214ed09e 100644 --- a/optimize_sidebars.js +++ b/optimize_sidebars.js @@ -964,6 +964,10 @@ module.exports = { "Google Sheets Connector", "components/connectors/out-of-the-box-connectors/google-sheets/" ), + docsLink( + "Google Gemini Connector", + "components/connectors/out-of-the-box-connectors/google-gemini/" + ), ], }, @@ -1068,57 +1072,9 @@ module.exports = { ), docsLink( "Connector templates", + "components/connectors/custom-built-connectors/connector-template-generator", "components/connectors/custom-built-connectors/connector-templates/" ), - - { - "Update guide": [ - docsLink( - "Connector SDK updates", - "components/connectors/custom-built-connectors/update-guide/introduction/" - ), - docsLink( - "Update 0.10 to 0.11", - "components/connectors/custom-built-connectors/update-guide/0100-to-0110/" - ), - docsLink( - "Update 0.9 to 0.10", - "components/connectors/custom-built-connectors/update-guide/090-to-0100/" - ), - docsLink( - "Update 0.8 to 0.9", - "components/connectors/custom-built-connectors/update-guide/080-to-090/" - ), - docsLink( - "Update 0.7 to 0.8", - "components/connectors/custom-built-connectors/update-guide/070-to-080/" - ), - docsLink( - "Update 0.6 to 0.7", - "components/connectors/custom-built-connectors/update-guide/060-to-070/" - ), - docsLink( - "Update 0.5 to 0.6", - "components/connectors/custom-built-connectors/update-guide/050-to-060/" - ), - docsLink( - "Update 0.4 to 0.5", - "components/connectors/custom-built-connectors/update-guide/040-to-050/" - ), - docsLink( - "Update 0.3 to 0.4", - "components/connectors/custom-built-connectors/update-guide/030-to-040/" - ), - docsLink( - "Update 0.2 to 0.3", - "components/connectors/custom-built-connectors/update-guide/020-to-030/" - ), - docsLink( - "Update 0.1 to 0.2", - "components/connectors/custom-built-connectors/update-guide/010-to-020/" - ), - ], - }, ], }, ], @@ -1595,7 +1551,7 @@ module.exports = { Cluster: [ docsLink( "Get cluster topology", - "apis-tools/camunda-api-rest/specifications/get-cluster-topology/" + "apis-tools/camunda-api-rest/specifications/get-topology/" ), ], }, @@ -2091,11 +2047,6 @@ module.exports = { }, ], }, - - docsLink( - "Migrate to Zeebe user tasks", - "apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks/" - ), ], }, @@ -2276,184 +2227,127 @@ module.exports = { }, ], }, - - { - "Zeebe API (REST)": [ - docsLink( - "Overview", - "apis-tools/zeebe-api-rest/zeebe-api-rest-overview/" - ), - docsLink( - "Authentication", - "apis-tools/zeebe-api-rest/zeebe-api-rest-authentication/" - ), - docsLink( - "Tutorial", - "apis-tools/zeebe-api-rest/zeebe-api-tutorial/" - ), - - { - Specifications: [ - docsLink( - "Introduction", - "apis-tools/zeebe-api-rest/specifications/zeebe-rest-api/" - ), - - { - Cluster: [ - docsLink( - "Get cluster topology", - "apis-tools/zeebe-api-rest/specifications/get-cluster-topology/" - ), - ], - }, - - { - "User task": [ - docsLink( - "Complete a user task", - "apis-tools/zeebe-api-rest/specifications/complete-a-user-task/" - ), - docsLink( - "Assign a user task", - "apis-tools/zeebe-api-rest/specifications/assign-a-user-task/" - ), - docsLink( - "Update a user task", - "apis-tools/zeebe-api-rest/specifications/update-a-user-task/" - ), - docsLink( - "Unassign a user task", - "apis-tools/zeebe-api-rest/specifications/unassign-a-user-task/" - ), - ], - }, - ], - }, - ], - }, ], }, ], }, { - Clients: [ + "Clients & SDKs": [ { - "Java client": [ - docsLink("Quick reference", "apis-tools/java-client/"), - docsLink("Job worker", "apis-tools/java-client/job-worker/"), - docsLink("Logging", "apis-tools/java-client/logging/"), - docsLink( - "Zeebe Process Test", - "apis-tools/java-client/zeebe-process-test/" - ), + SDKs: [ + docsLink("Node.js", "apis-tools/node-js-sdk/"), { - Examples: [ - docsLink("Overview", "apis-tools/java-client-examples/"), - docsLink( - "Deploy a process", - "apis-tools/java-client-examples/process-deploy/" - ), - docsLink( - "Create a process instance", - "apis-tools/java-client-examples/process-instance-create/" - ), - docsLink( - "Create non-blocking process instances", - "apis-tools/java-client-examples/process-instance-create-nonblocking/" - ), + "Spring Zeebe": [ docsLink( - "Create a process instance with results", - "apis-tools/java-client-examples/process-instance-create-with-result/" + "Getting started", + "apis-tools/spring-zeebe-sdk/getting-started/" ), docsLink( - "Evaluate a decision", - "apis-tools/java-client-examples/decision-evaluate/" - ), - docsLink( - "Open a job worker", - "apis-tools/java-client-examples/job-worker-open/" - ), - docsLink( - "Handle variables as POJO", - "apis-tools/java-client-examples/data-pojo/" - ), - docsLink( - "Request cluster topology", - "apis-tools/java-client-examples/cluster-topology-request/" + "Configuration", + "apis-tools/spring-zeebe-sdk/configuration/" ), ], }, ], }, - { - "Community clients": [ - docsLink("Component clients", "apis-tools/community-clients/"), - + Clients: [ { - "Zeebe clients": [ - docsLink("C#", "apis-tools/community-clients/c-sharp/"), + "Java client": [ + docsLink("Quick reference", "apis-tools/java-client/"), + docsLink("Job worker", "apis-tools/java-client/job-worker/"), + docsLink("Logging", "apis-tools/java-client/logging/"), docsLink( - "JavaScript/Node.js", - "apis-tools/community-clients/javascript/" + "Zeebe Process Test", + "apis-tools/java-client/zeebe-process-test/" ), - docsLink( - "Micronaut", - "apis-tools/community-clients/micronaut/" - ), - docsLink("Python", "apis-tools/community-clients/python/"), - docsLink("Ruby", "apis-tools/community-clients/ruby/"), - docsLink("Rust", "apis-tools/community-clients/rust/"), - docsLink("Spring", "apis-tools/community-clients/spring/"), - docsLink("Quarkus", "apis-tools/community-clients/quarkus/"), + { - "CLI client": [ - docsLink("Quick reference", "apis-tools/cli-client/"), + Examples: [ + docsLink("Overview", "apis-tools/java-client-examples/"), + docsLink( + "Deploy a process", + "apis-tools/java-client-examples/process-deploy/" + ), + docsLink( + "Create a process instance", + "apis-tools/java-client-examples/process-instance-create/" + ), + docsLink( + "Create non-blocking process instances", + "apis-tools/java-client-examples/process-instance-create-nonblocking/" + ), + docsLink( + "Create a process instance with results", + "apis-tools/java-client-examples/process-instance-create-with-result/" + ), + docsLink( + "Evaluate a decision", + "apis-tools/java-client-examples/decision-evaluate/" + ), + docsLink( + "Open a job worker", + "apis-tools/java-client-examples/job-worker-open/" + ), + docsLink( + "Handle variables as POJO", + "apis-tools/java-client-examples/data-pojo/" + ), docsLink( - "Getting started with the CLI client", - "apis-tools/cli-client/cli-get-started/" + "Request cluster topology", + "apis-tools/java-client-examples/cluster-topology-request/" ), ], }, + ], + }, + + { + "Community clients": [ + docsLink("Component clients", "apis-tools/community-clients/"), { - "Go client": [ - docsLink("Quick reference", "apis-tools/go-client/"), + "Zeebe clients": [ docsLink( - "Getting started with the Go client", - "apis-tools/go-client/go-get-started/" + "JavaScript/Node.js", + "apis-tools/community-clients/javascript/" ), - docsLink("Job worker", "apis-tools/go-client/job-worker/"), + docsLink("Spring", "apis-tools/community-clients/spring/"), + { + "CLI client": [ + docsLink("Quick reference", "apis-tools/cli-client/"), + docsLink( + "Getting started with the CLI client", + "apis-tools/cli-client/cli-get-started/" + ), + ], + }, + + { + "Go client": [ + docsLink("Quick reference", "apis-tools/go-client/"), + docsLink( + "Getting started with the Go client", + "apis-tools/go-client/go-get-started/" + ), + docsLink( + "Job worker", + "apis-tools/go-client/job-worker/" + ), + ], + }, ], }, + docsLink( + "Build your own client", + "apis-tools/build-your-own-client/" + ), ], }, ], }, - - docsLink("Build your own client", "apis-tools/build-your-own-client/"), - ], - }, - - { - SDKs: [ - docsLink("Node.js", "apis-tools/node-js-sdk/"), - - { - "Spring Zeebe": [ - docsLink( - "Getting started", - "apis-tools/spring-zeebe-sdk/getting-started/" - ), - docsLink( - "Configuration", - "apis-tools/spring-zeebe-sdk/configuration/" - ), - ], - }, ], }, @@ -2624,7 +2518,7 @@ module.exports = { "self-managed/setup/guides/accessing-components-without-ingress/" ), docsLink( - "Combined and separated Ingress setup", + "Ingress setup", "self-managed/setup/guides/ingress-setup/" ), docsLink( diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/configuration/disable-sharing.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/configuration/disable-sharing.md deleted file mode 100644 index 15861396ed6..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/configuration/disable-sharing.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: disable-sharing -title: "Disable sharing" -description: "The REST API to disable sharing" ---- - -This API allows users to disable the sharing functionality for all reports and dashboards in Optimize. Note that this setting will be permanently persisted in memory and will take precedence over any other previous configurations (e.g. configuration files). - -When sharing is disabled, previously shared URLs will no longer be accessible. Upon re-enabling sharing, the previously shared URLs will work once again under the same address as before. Calling this endpoint when sharing is already disabled will have no effect. - -## Method & HTTP target resource - -POST `api/public/share/disable` - -## Request headers - -The following request headers must be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ----------------------------------------------------- | -| Authorization | REQUIRED | See [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -No query parameters necessary. - -## Request body - -An empty request body should be sent. - -## Response codes - -Possible HTTP Response Status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Disable sharing - -POST `api/public/share/disable` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Response - -Status 204 (Successful) - -#### Response content - -``` -no content -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/configuration/enable-sharing.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/configuration/enable-sharing.md deleted file mode 100644 index eb4910e9132..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/configuration/enable-sharing.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: enable-sharing -title: "Enable sharing" -description: "The REST API to enable sharing" ---- - -This API allows users to enable the sharing functionality for all reports and dashboards in Optimize. Note that this setting will be permanently persisted in memory and will take precedence over any other previous configurations (e.g. configuration files). - -If sharing had been previously enabled and then disabled, re-enabling sharing will allow users to access previously shared URLs under the same address as before. Calling this endpoint when sharing is already enabled will have no effect. - -## Method & HTTP target resource - -POST `api/public/share/enable` - -## Request headers - -The following request headers must be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ----------------------------------------------------- | -| Authorization | REQUIRED | See [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -No query parameters necessary. - -## Request body - -An empty request body should be sent. - -## Response codes - -Possible HTTP Response Status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Enable sharing - -POST `api/public/share/enable` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Response - -Status 204 (Successful) - -#### Response content - -``` -no content -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/delete-dashboard.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/delete-dashboard.md deleted file mode 100644 index fbdf2073b91..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/delete-dashboard.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -id: delete-dashboard -title: "Delete dashboards" -description: "The REST API to delete dashboards from Optimize." ---- - -The dashboards deletion API allows you to delete dashboards by ID from Optimize. - -:::note Heads up! -The deletion of a dashboard does not affect the referenced reports. -::: - -## Method & HTTP target resource - -DELETE `/api/public/dashboard/{dashboard-ID}` - -Where `dashboard-ID` is the ID of the dashboard you wish to delete. - -## Request headers - -The following request headers have to be provided with every delete request: - -| Header | Constraints | Value | -| ------------- | ----------- | ----------------------------------------------------- | -| Authorization | REQUIRED | See [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -No query parameters available. - -## Request body - -No request body is required. - -## Result - -No response body. - -## Response codes - -Possible HTTP Response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 404 | The requested dashboard was not found, please check the provided dashboard-ID. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Delete a dashboard - -Let's assume you want to delete a dashboard with the ID `e6c5abb1-6a18-44e7-8480-d562d511ba62`, this is what it would look like: - -DELETE `/api/public/dashboard/e6c5aaa1-6a18-44e7-8480-d562d511ba62` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/export-dashboard-definitions.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/export-dashboard-definitions.md deleted file mode 100644 index 43dec7cd627..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/export-dashboard-definitions.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -id: export-dashboard-definitions -title: "Export dashboard definitions" -description: "The REST API to export dashboard definitions." ---- - -This API allows users to export dashboard definitions which can later be imported into another Optimize system. Note that exporting a dashboard also exports all reports contained within the dashboard. The dashboards to be exported may be within a Collection or private entities, the API has access to both. - -The obtained list of entity exports can be imported into other Optimize systems either using the dedicated [import API](../import-entities.md) or [via UI](components/userguide/additional-features/export-import.md#importing-entities). - -## Method & HTTP target resource - -POST `/api/public/export/dashboard/definition/json` - -## Request headers - -The following request headers have to be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------- | -| Authorization | REQUIRED | [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -No query parameters available. - -## Request body - -The request body should contain a JSON array of dashboard IDs to be exported. - -## Result - -The response contains a list of exported dashboard definitions as well as all report definitions contained within the dashboards. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 404 | At least one of the given dashboard IDs does not exist. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Export two dashboards - -Assuming you want to export the two dashboards with IDs `123` and `456` and have configured the accessToken `mySecret`, this is what it would look like: - -POST `/api/public/export/dashboard/definition/json` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Request body - -``` -[ "123", "456" ] -``` - -#### Response - -Status 200. - -#### Response content - -The response contains the two exported dashboard definitions as well as all three process reports contained within the two dashboards. - -``` -[ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - "exportEntityType": "single_process_report", - "name": "Number: Process instance duration", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "625c2411-b95f-4442-936b-1976b9511d4a", - "exportEntityType": "single_process_report", - "name": "Heatmap: Flownode count", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "94a7252e-d5c3-45ea-9906-75271cc0cac2", - "exportEntityType": "single_process_report", - "name": "Data Table: User task count", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "123", - "exportEntityType": "dashboard", - "name": "Dashboard 1", - "sourceIndexVersion": 5, - "reports": [ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - ... - }, - { - "id": "625c2411-b95f-4442-936b-1976b9511d4a", - ... - } - ], - "availableFilters": [...], - "collectionId": null - }, - { - "id": "456", - "exportEntityType": "dashboard", - "name": "Dashboard 2", - "sourceIndexVersion": 5, - "reports": [ - { - "id": "94a7252e-d5c3-45ea-9906-75271cc0cac2", - ... - } - ], - "availableFilters": [...], - "collectionId": null - } -] -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/get-dashboard-ids.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/get-dashboard-ids.md deleted file mode 100644 index 5555be55246..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/dashboard/get-dashboard-ids.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: get-dashboard-ids -title: "Get dashboard IDs" -description: "The REST API to retrieve all dashboard IDs in a given collection." ---- - -This API allows users to retrieve all dashboard IDs from a given collection. - -## Method & HTTP target resource - -GET `/api/public/dashboard` - -## Request headers - -The following request headers have to be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------- | -| Authorization | REQUIRED | [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -The following query parameters have to be provided with every request: - -| Parameter | Constraints | Value | -| ------------ | ----------- | ----------------------------------------------------------------- | -| collectionId | REQUIRED | The ID of the collection for which to retrieve the dashboard IDs. | - -## Request body - -No request body is required. - -## Result - -The response contains a list of IDs of the dashboards existing in the collection with the given collection ID. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 200 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Retrieve all dashboard IDs from a collection - -Assuming you want to retrieve all dashboard IDs in the collection with the ID `1234` and have configured the accessToken `mySecret`, this is what it would look like: - -GET `/api/public/dashboard?collectionId=1234` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Response - -Status 200. - -#### Response content - -``` -[ - { - "id": "9b0eb845-e8ed-4824-bd85-8cd69038f2f5" - }, - { - "id": "1a866c7c-563e-4f6b-adf1-c4648531f7d4" - } -] -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/event-ingestion.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/event-ingestion.md deleted file mode 100644 index 996539c0037..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/event-ingestion.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -id: event-ingestion -title: "Event ingestion" -description: "The REST API to ingest external events into Optimize." ---- - -Camunda 7 only - -The Event Ingestion REST API ingests business process related event data from any third-party system to Camunda Optimize. These events can then be correlated into an [event-based process](components/userguide/additional-features/event-based-processes.md) in Optimize to get business insights into business processes that are not yet fully modeled nor automated using Camunda 7. - -## Functionality - -The Event Ingestion REST API has the following functionality: - -1. Ingest new event data in batches, see the example on [ingesting three cloud events](#ingest-cloud-events). -2. Reingest/override previously ingested events, see the example on [reingesting cloud events](#reingest-cloud-events). - -## CloudEvents compliance - -To provide the best interoperability possible, the Optimize Event Ingestion REST API implements the [CloudEvents Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/spec.md) specification, which is hosted by the [Cloud Native Computing Foundation (CNCF)](https://www.cncf.io/). - -In particular, the Optimize Event Ingestion REST API is a CloudEvents consumer implemented as an HTTP Web Hook, as defined by the [CloudEvents HTTP 1.1 Web Hooks for Event Delivery - Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md) specification. Following the [Structured Content Mode](https://github.com/cloudevents/spec/blob/v1.0/http-protocol-binding.md#32-structured-content-mode) of the [HTTP Protocol Binding for CloudEvents - Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/http-protocol-binding.md), event context attributes and event data is encoded in the [JSON Batch Format](https://github.com/cloudevents/spec/blob/v1.0/json-format.md#4-json-batch-format) of the [CloudEvents JSON Event Format Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/json-format.md). - -## Authorization - -As required by the [CloudEvents HTTP 1.1 Web Hooks for Event Delivery - Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#3-authorization) specification, every [Event Ingestion REST API Request](#method-and-http-target-resource) needs to include an authorization token as an [`Authorization`](https://tools.ietf.org/html/rfc7235#section-4.2) request header. - -Details on how to configure and pass this token can be found [here](./optimize-api-authorization.md). - -## Method and HTTP target resource - -POST `/api/ingestion/event/batch` - -## Request headers - -The following request headers have to be provided with every ingest request: - -| Header | Constraints | Value | -| -------------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------- | -| Authorization | REQUIRED | See [Authorization](./optimize-api-authorization.md) | -| Content-Length | REQUIRED | Size in bytes of the entity-body, also see [Content-Length](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Length). | -| Content-Type | REQUIRED | Must be one of: `application/cloudevents-batch+json` or `application/json` | - -## Request body - -[JSON Batch Format](https://github.com/cloudevents/spec/blob/v1.0/json-format.md#4-json-batch-format) compliant JSON Array of CloudEvent JSON Objects: - -| Name | Type | Constraints | Description | -| -------------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion) | String | REQUIRED | The version of the CloudEvents specification, which the event uses, must be `1.0`. See [CloudEvents - Version 1.0 - specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion). | -| [id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id) | String | REQUIRED | Uniquely identifies an event, see [CloudEvents - Version 1.0 - id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id). | -| [source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1) | String | REQUIRED | Identifies the context in which an event happened, see [CloudEvents - Version 1.0 - source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1). A use-case could be if you have conflicting types across different sources. For example, a `type:OrderProcessed` originating from both `order-service` and `shipping-service`. In this case, the `source` field provides means to clearly separate between the origins of a particular event. Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. | -| [type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | String | REQUIRED | This attribute contains a value describing the type of event related to the originating occurrence, see [CloudEvents - Version 1.0 - type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type). Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. The value `camunda` cannot be used for this field. | -| [time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | [Timestamp](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type-system) | OPTIONAL | Timestamp of when the occurrence happened, see [CloudEvents - Version 1.0 - time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#time). String encoding: [RFC 3339](https://tools.ietf.org/html/rfc3339). If not present, a default value of the time the event was received will be created. | -| [data](https://github.com/cloudevents/spec/blob/v1.0/spec.md#event-data) | Object | OPTIONAL | Event payload data that is part of the event, see [CloudEvents - Version 1.0 - Event Data](https://github.com/cloudevents/spec/blob/v1.0/spec.md#event-data). This CloudEvents Consumer API only accepts data encoded as `application/json`, the optional attribute [CloudEvents - Version 1.0 - datacontenttype](https://github.com/cloudevents/spec/blob/v1.0/spec.md#datacontenttype) is thus not required to be provided by the producer. Furthermore, there are no schema restrictions on the `data` attribute and thus the attribute [CloudEvents - Version 1.0 - dataschema](https://github.com/cloudevents/spec/blob/v1.0/spec.md#datacontenttype) is also not required to be provided. Producer may provide any valid JSON object, but only simple properties of that object will get converted to variables of a process instances of an [event-based process](self-managed/optimize-deployment/configuration/setup-event-based-processes.md) instance later on. | -| group | String | OPTIONAL | This is an OPTIONAL [CloudEvents Extension Context Attribute](https://github.com/cloudevents/spec/blob/v1.0/spec.md#extension-context-attributes) that is specific to this API. A group identifier that may allow to easier identify a group of related events for a user at the stage of mapping events to a process model. An example could be a domain of events that are most likely related to each other; for example, `billing`. When this field is provided, it will be used to allow adding events that belong to a group to the [mapping table](components/userguide/additional-features/event-based-processes.md#external-events). Optimize handles groups case-sensitively. Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. | -| traceid | String | REQUIRED | This is a REQUIRED [CloudEvents Extension Context Attribute](https://github.com/cloudevents/spec/blob/v1.0/spec.md#extension-context-attributes) that is specific to this API. A traceid is a correlation key that relates multiple events to a single business transaction or process instance in BPMN terms. Events with the same traceid will get correlated into one process instance of an Event Based Process. | - -The following is an example of a valid propertie's `data` value. Each of those properties would be available as a variable in any [event-based process](self-managed/optimize-deployment/configuration/setup-event-based-processes.md) where an event containing this as `data` was mapped: - -``` - { - "reviewSuccessful": true, - "amount": 10.5, - "customerId": "lovelyCustomer1" - } -``` - -Nested objects, such as `customer` in this example, would not be available as a variable in event-based processes where an event containing this as `data` value was mapped: - -``` - { - "customer": { - "firstName":"John", - "lasTName":"Doe" - } - } -``` - -## Result - -This method returns no content. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful | -| 400 | Returned if some of the properties in the request body are invalid or missing. | -| 401 | Secret incorrect or missing in HTTP Header `Authorization`. See [Authorization](#authorization) on how to authenticate. | -| 403 | The Event Based Process feature is not enabled. | -| 429 | The maximum number of requests that can be serviced at any time has been reached. The response will include a `Retry-After` HTTP header specifying the recommended number of seconds before the request should be retried. See [Configuration](self-managed/optimize-deployment/configuration/event-based-processes.md#event-ingestion-rest-api-configuration) for information on how to configure this limit. | -| 500 | Some error occurred while processing the ingested event, best check the Optimize log. | - -## Example - -### Ingest cloud events - -#### Request - -POST `/api/ingestion/event/batch` - -##### Request header - -`Authorization: Bearer mySecret` - -##### Request body - - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] - -#### Response - -Status 204. - -### Reingest cloud events - -The API allows you to update any previously ingested cloud event by ingesting an event using the same event `id`. - -The following request would update the first cloud event that got ingested in the [ingest three cloud events sample](#ingest-cloud-events). Note that on an update, the cloud event needs to be provided as a whole; it's not possible to perform partial updates through this API. - -In this example, an additional field `newField` is added to the data block of the cloud event with the id `1edc4160-74e5-4ffc-af59-2d281cf5aca341`. - -#### Request - -POST `/api/ingestion/event/batch` - -##### Request header - -`Authorization: Bearer mySecret` - -##### Request Body: - - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example", - "newField": "allNew" - } - } - ] - -#### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/external-variable-ingestion.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/external-variable-ingestion.md deleted file mode 100644 index 7038767ee5a..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/external-variable-ingestion.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -id: external-variable-ingestion -title: "External variable ingestion" -description: "The REST API to ingest external variable data into Optimize." ---- - -With the external variable ingestion API, variable data held in external systems can be ingested into Optimize directly, -without the need for these variables to be present in your Camunda platform data. This can be useful when external -business data, which is relevant for process analysis in Optimize, is to be associated with specific process instances. - -Especially if this data changes over time, it is advisable to use this REST API to persist external variable updates to Optimize, as otherwise Optimize may not be aware of data changes in the external system. - -## Functionality - -The external variable ingestion API allows users to ingest batches of variable data which Optimize stores in a dedicated -index. All variable data includes a reference to the process instance each variable belongs to, this reference then -enables Optimize to import external variable data from the dedicated index to their respective process instances at -regular intervals. Once Optimize has updated the process instance data, the external variables are available for report -evaluations in Optimize. - -## Limitations - -Note that external variables should be treated as separate from engine variables. If you ingest variables that are already present in the engine, engine imports may override the ingested data and vice versa, leading to unreliable report results. - -Similarly, if the same ingested batch contains variables with duplicate IDs, you may experience unexpected report results because Optimize will assume only one of the updates per ID and batch to be the most up to date one. - -Additionally, ensure the reference information (process instance ID and process definition key) is accurate, as otherwise Optimize will not be able to correctly associate variables with instance data and may create new instance indices, resulting in data which will not be usable in reports. External variables can only be ingested for process instances and will not be affected by any configured variable plugin. - -## Configuration - -Refer to -the [configuration section](../../self-managed/optimize-deployment/configuration/system-configuration.md) to learn more -about how to set up external variable ingestion. - -## Method & HTTP target resource - -POST `/api/ingestion/variable` - -## Request headers - -The following request headers have to be provided with every variable ingestion request: - -| Header | Constraints | Value | -| ------------- | ----------- | --------------------------------------------------- | -| Authorization | REQUIRED\* | See [Authorization](../optimize-api-authorization). | -| Content-Type | REQUIRED | `application/json` | - -- Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every delete request: - -| Parameter | Constraints | Value | -| ------------ | ----------- | -------------------------------------------------- | -| access_token | REQUIRED\* | See [Authorization](../optimize-api-authorization) | - -- Only required if not set as a request header - -## Request body - -The request body contains an array of variable JSON Objects: - -| Name | Type | Constraints | Description | -| -------------------- | ------ | ----------- | ------------------------------------------------------------------------------------------------- | -| id | String | REQUIRED | The unique identifier of this variable. | -| name | String | REQUIRED | The name of the variable. | -| type | String | REQUIRED | The type of the variable. Must be one of: String, Short, Long, Double, Integer, Boolean, or Date. | -| value | String | REQUIRED | The current value of the variable. | -| processInstanceId | String | REQUIRED | The ID of the process instance this variable is to be associated with. | -| processDefinitionKey | String | REQUIRED | The definition key of the process instance this variable is to be associated with. | - -## Result - -This method returns no content. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ---------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 400 | Returned if some properties in the request body are invalid or missing. | -| 401 | Secret incorrect or missing. See [Authorization](../optimize-api-authorization) on how to authorize. | - -## Example - -### Request - -POST `/api/ingestion/variable` - -Request Body: - - [ - { - "id": "7689fced-2639-4408-9de1-cf8f72769f43", - "name": "address", - "type": "string", - "value": "Main Street 1", - "processInstanceId": "c6393461-02bb-4f62-a4b7-f2f8d9bbbac1", - "processDefinitionKey": "shippingProcess" - }, - { - "id": "993f4e73-7f6a-46a6-bd45-f4f8e3470ba1", - "name": "amount", - "type": "integer", - "value": "500", - "processInstanceId": "8282ed49-2243-44df-be5e-1bf893755d8f", - "processDefinitionKey": "orderProcess" - } - ] - -### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/health-readiness.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/health-readiness.md deleted file mode 100644 index f5c2d823ea0..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/health-readiness.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: health-readiness -title: "Health readiness" -description: "The REST API to check the readiness of Optimize." ---- - -The purpose of Health-Readiness REST API is to return information indicating whether Optimize is ready to be used. - -:::note -The Health-Readiness REST API does not require an [`Authorization` header](./optimize-api-authorization.md), and rejects requests that include one. -::: - -## Method & HTTP target resource - -GET `/api/readyz` - -## Response - -The response is an empty body with the status code indicating the readiness of Optimize. The following responses are available: - -- `200`: This indicates that Optimize is ready to use. It is connected to both Elasticsearch and at least one of its configured engines. -- `503`: This indicates that Optimize is not ready to use. It cannot connect to either Elasticsearch or any of its configured engines. diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/import-entities.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/import-entities.md deleted file mode 100644 index 77f0d2b2e3e..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/import-entities.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -id: import-entities -title: "Import entities" -description: "The REST API to import entity definitions." ---- - -This API allows users to import entity definitions such as reports and dashboards into existing collections. These entity definitions may be obtained either using the [report](../report/export-report-definitions/) or [dashboard](../dashboard/export-dashboard-definitions) export API or [via the UI](components/userguide/additional-features/export-import.md#exporting-entities). - -## Prerequisites - -For importing via API, the following prerequisites must be met: - -- All definitions the entities require exist in the target Optimize. -- The target collection, identified using the `collectionId` query parameter, must exist in the target system. -- The collection data sources must include all relevant definitions for the entities. -- The entity data structures match. To ensure matching data structures, confirm that the Optimize version of the source is the same as the version of the target Optimize. - -If any of the above conditions are not met, the import will fail with an error response; refer to the error message in the response for more information. - -## Method & HTTP target resource - -POST `/api/public/import` - -## Request headers - -The following request headers have to be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------ | -| Authorization | REQUIRED | [Authorization](./optimize-api-authorization.md) | - -## Query parameters - -The following query parameters have to be provided with every request: - -| Parameter | Constraints | Value | -| ------------ | ----------- | -------------------------------------------------------------- | -| collectionId | REQUIRED | The ID of the collection for which to retrieve the report IDs. | - -## Request body - -The request body should contain a JSON array of entity definitions to be imported. These entity definitions may be obtained by using the [report](../report/export-report-definitions) or [dashboard](../dashboard/export-dashboard-definitions) export APIs or by [manually exporting entities](components/userguide/additional-features/export-import.md#exporting-entities) via the Optimize UI. - -## Result - -The response contains a list of DTOs that specify the ID and entity type (`report` or `dashboard`) of each newly created entity in the target system. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 200 | Request successful. | -| 400 | The provided list of entities is invalid. This can occur if any of the above listed [prerequisites](#prerequisites) are not met. Check the `detailedMessage` of the error response for more information. | -| 401 | Secret incorrect or missing in HTTP header. See [Authorization](./optimize-api-authorization.md) on how to authenticate. | -| 404 | The given target collection ID does not exist. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Import two entities - -Assuming you want to import a report and a dashboard into the collection with ID `123`, this is what it would look like: - -POST `/api/public/import?collectionId=123` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Request body - -``` -[ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - "exportEntityType": "single_process_report", - "name": "Number: Process instance duration", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "b0eb845-e8ed-4824-bd85-8cd69038f2f5", - "exportEntityType": "dashboard", - "name": "Dashboard 1", - "sourceIndexVersion": 5, - "reports": [ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - ... - } - ], - "availableFilters": [...], - "collectionId": null - } -] -``` - -#### Response - -Status 200. - -#### Response Content - -``` -[ - { - "id": "e8ca18b9-e637-45c8-87da-0a2b08b34d6e", - "entityType": "dashboard" - }, - { - "id": "290b3425-ba33-4fbb-b20b-a4f236036847", - "entityType": "report" - } -] -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/optimize-api-authorization.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/optimize-api-authorization.md deleted file mode 100644 index bffbb6da987..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/optimize-api-authorization.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: optimize-api-authorization -title: "Authorization" -description: "Connect business process-related event data and variable data held in external systems from third-party systems to Optimize, and more." ---- - -Most requests of the Public REST API need to include an authorization token -as an [`Authorization`](https://tools.ietf.org/html/rfc7235#section-4.2) request header. - -Given a valid token `mySecret`, the header would need to be set as follows: - -``` -Authorization: Bearer mySecret -``` - -The token used to access the Optimize API can be a configurable shared secret (except in Camunda 8 SaaS mode) or a JWT compliant with the OAuth2 Protocol (all modes). - -Refer to [Public API Configuration](../../self-managed/optimize-deployment/configuration/system-configuration.md#public-api) for the particular configuration to access the public API using a token. - -### How to obtain the access token for C8 SaaS (Cloud) usage - -You must obtain a token to use the Optimize API. When you create an Optimize [client]($docs$/guides/setup-client-connection-credentials/), you get all the information needed to connect to Optimize. - -See our guide on [building your own client]($docs$/apis-tools/build-your-own-client/). - -The following settings are needed: - -| Name | Description | Default value | -| ------------------------ | ----------------------------------------------- | --------------------- | -| client id | Name of your registered client | - | -| client secret | Password for your registered client | - | -| audience | Permission name; if not given use default value | `optimize.camunda.io` | -| authorization server url | Token issuer server | - | - -Send a token issue _POST_ request to the authorization server with the following content: - -```json -{ - "client_id": "", - "client_secret": "", - "audience": "", - "grant_type": "client_credentials" -} -``` - -See the following example with _curl_: - -```shell -curl -X POST --header 'content-type: application/json' --data '{"client_id": "", "client_secret":"","audience":"","grant_type":"client_credentials"}' https:// -``` - -If the authorization is successful, the authorization server sends back the access token, when it expires, scope, and type: - -```json -{ - "access_token": "ey...", - "scope": "...", - "expires_in": 86400, - "token_type": "Bearer" -} -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/delete-report.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/delete-report.md deleted file mode 100644 index 534ad5e80dd..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/delete-report.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -id: delete-report -title: "Delete reports" -description: "The REST API to delete reports from Optimize." ---- - -The report deletion API allows you to delete reports by ID from Optimize. - -:::note Heads up! -During deletion a report will get removed from any dashboard or combined process report it is referenced by. In case a report is referenced by an alert, the corresponding alert will get deleted too. -::: - -## Method & HTTP target resource - -DELETE `/api/public/report/{report-ID}` - -Where `report-ID` is the ID of the report you wish to delete. - -## Request headers - -The following request headers have to be provided with every delete request: - -| Header | Constraints | Value | -| ------------- | ----------- | ----------------------------------------------------- | -| Authorization | REQUIRED | See [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -No query parameters available. - -## Request body - -No request body is required. - -## Result - -No response body. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 404 | The requested report was not found, please check the provided report-ID. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Delete a report - -Let's assume you want to delete a report with the ID `e6c5abb1-6a18-44e7-8480-d562d511ba62`, this is what it would look like: - -DELETE `/api/public/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/export-report-definitions.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/export-report-definitions.md deleted file mode 100644 index 88fdaf5221b..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/export-report-definitions.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: export-report-definitions -title: "Export report definitions" -description: "The REST API to export report definitions." ---- - -This API allows users to export report definitions which can later be imported into another Optimize system. The reports to be exported may be within a collection or private entities, the API has access to both. - -The obtained list of entity exports can be imported into other Optimize systems either using the dedicated [import API](../import-entities.md) or [via UI](components/userguide/additional-features/export-import.md#importing-entities). - -## Method & HTTP target resource - -POST `/api/public/export/report/definition/json` - -## Request headers - -The following request headers have to be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------- | -| Authorization | REQUIRED | [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -No query parameters available. - -## Request body - -The request body should contain a JSON array of report IDs to be exported. - -## Result - -The response contains a list of exported report definitions. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 404 | At least one of the given report IDs does not exist. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Export two reports - -Assuming you want to export the two reports with IDs `123` and `456` and have configured the accessToken `mySecret`, this is what it would look like: - -POST `/api/public/export/report/definition/json` - -#### Request header - -`Authorization: Bearer mySecret` - -#### Request body - -``` -[ "123", "456" ] -``` - -#### Response - -Status 200. - -#### Response content - -``` -[ - { - "id": "123", - "exportEntityType": "single_process_report", - "name": "Number: Process instance duration", - "sourceIndexVersion": 8, - "collectionId": "40cb3657-bdcb-459d-93ce-06877ac7244a", - "data": {...} - }, - { - "id": "456", - "exportEntityType": "single_process_report", - "name": "Heatmap: Flownode count", - "sourceIndexVersion": 8, - "collectionId": "40cb3657-bdcb-459d-93ce-06877ac7244a", - "data": {...} - } -] -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/get-data-export.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/get-data-export.md deleted file mode 100644 index 3ec3cb9469d..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/get-data-export.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -id: get-data-export -title: "Export report result data" -description: "The REST API to export report result data from Optimize." ---- - -The data export API allows users to export large amounts of data in a machine-readable format (JSON) from Optimize. - -## Functionality - -Users can export all report types (except combined process reports) from `Optimize` using the Data Export API. Moreover, raw data reports can be exported in a paginated fashion, so that large amounts of data can be consumed in chunks by the client. - -### Pagination - -The simplest way to paginate through the results is to perform a search request with all the `REQUIRED` header/query parameters as described in the sections below (but without `searchRequestId`), then pass the `searchRequestId` returned in each response to the next request, until no more documents are returned. Note that it's often the case, but not guaranteed, that the `searchRequestId` remains stable through the entire pagination, so always use the `searchRequestId` from the most current response to make your next request. - -## Method & HTTP target resource - -GET `/api/public/export/report/{report-ID}/result/json` - -Where `report-ID` is the ID of the report you wish to export. - -## Request headers - -The following request headers have to be provided with every data export request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------- | -| Authorization | REQUIRED | [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -The following query parameters have to be provided with every data export request: - -| Parameter | Constraints | Value | -| ----------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| limit | REQUIRED | Maximum number of records per page. Please note that the limit will only be considered when performing the request for the first page of a raw data report. The following requests for a given searchRequestId will have the same page size as the first request. | -| paginationTimeout | REQUIRED | The amount of time (in seconds) for which a search context will be held in memory, so that the remaining pages of the result can be retrieved. For more information on how to paginate through the results, please refer to the section [Pagination](#pagination). | -| searchRequestId | Optional | The ID of a previous search for which you wish to retrieve the next page of results. For more information on how to get and use a searchRequestId please refer to the section [Pagination](#pagination). | - -## Request body - -No request body is required. - -## Result - -| Content | Value | -| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| searchRequestId | The ID of the performed search. The following pages from this search can be retrieved by using this ID. For more information please refer to the section [Pagination](#pagination). | -| numberOfRecordsInResponse | Number of records in the JSON Response. This is a number between [0, limit] | -| totalNumberOfRecords | The total number of records (from all pages) for this report export | -| reportId | The ID of the exported report | -| message | In case there is additional information relevant to this request, this field will contain a message describing it. The response will only contain this field if there is a message to be shown | -| data [Array] | An array containing numberOfRecordsInResponse report data records in JSON Format | - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 200 | Request successful. | -| 400 | Returned if some of the properties from the request are invalid or missing. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 404 | The requested report was not found, please check the provided report-ID. | -| 500 | Some error occurred while processing the export request, best check the Optimize log. | - -## Example - -### Export a raw data report - -Let's assume you want to export a report with the ID `e6c5abb1-6a18-44e7-8480-d562d511ba62`, with a maximum of two records per page, an access token `mySecret` and a pagination timeout of 60s, this is what it would look like - -#### Initial API call - -GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? paginationTimeout=60&limit=2` - -##### Request header - -`Authorization: Bearer mySecret` - -##### Response content - - { - "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", - "numberOfRecordsInResponse": 2, - "totalNumberOfRecords": 11, - "reportId": "e6c5abb1-6a18-44e7-8480-d562d511ba62", - "data": [ - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1801", - "processInstanceId": "1809", - "businessKey": "aBusinessKey", - "startDate": "2021-12-02T17:21:49.330+0200", - "endDate": "2021-12-02T17:21:49.330+0200", - "duration": 0, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - }, - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1801", - "processInstanceId": "1804", - "businessKey": "aBusinessKey", - "startDate": "2021-12-02T17:21:49.297+0200", - "endDate": "2021-12-02T17:21:49.298+0200", - "duration": 1, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - } - ] - } - -##### Response - -Status 200. - -#### Subsequent API calls - -Note here the use of the query parameter `searchRequestId` to retrieve further pages from the initial search. - -`GET /api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json?paginationTimeout=60&searchRequestId=FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ&limit=2` - -##### Request header - -`Authorization: Bearer mySecret` - -##### Response content - - { - "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", - "numberOfRecordsInResponse": 2, - "totalNumberOfRecords": 11, - "reportId": "e6c5abb1-6a18-44e7-8480-d562d511ba62", - "data": [ - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1bc9474d-5762-11ec-8b2c-0242ac120003", - "processInstanceId": "1bdafab8-5762-11ec-8b2c-0242ac120003", - "businessKey": "aBusinessKey", - "startDate": "2021-12-07T15:32:22.739+0200", - "endDate": "2021-12-07T15:32:22.740+0200", - "duration": 1, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - }, - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1bc9474d-5762-11ec-8b2c-0242ac120003", - "processInstanceId": "1bda3763-5762-11ec-8b2c-0242ac120003", - "businessKey": "aBusinessKey", - "startDate": "2021-12-07T15:32:22.735+0200", - "endDate": "2021-12-07T15:32:22.735+0200", - "duration": 0, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - } - ] - } - -##### Response - -Status 200. diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/get-report-ids.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/get-report-ids.md deleted file mode 100644 index b6b94dae0b7..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/report/get-report-ids.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: get-report-ids -title: "Get report IDs" -description: "The REST API to retrieve all report IDs in a given collection." ---- - -This API allows users to retrieve all report IDs from a given collection. - -## Method & HTTP target resource - -GET `/api/public/report` - -## Request headers - -The following request headers have to be provided with every request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------- | -| Authorization | REQUIRED | [Authorization](../optimize-api-authorization.md) | - -## Query parameters - -The following query parameters have to be provided with every request: - -| Parameter | Constraints | Value | -| ------------ | ----------- | -------------------------------------------------------------- | -| collectionId | REQUIRED | The ID of the Collection for which to retrieve the report IDs. | - -## Request body - -No request body is required. - -## Result - -The response contains a list of IDs of the reports existing in the collection with the given collection ID. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------------------------------------------- | -| 200 | Request successful. | -| 401 | Secret incorrect or missing in HTTP Header. See [Authorization](../optimize-api-authorization.md) on how to authenticate. | -| 500 | Some error occurred while processing the request, best check the Optimize log. | - -## Example - -### Retrieve all report IDs from a collection - -Assuming you want to retrieve all report IDs in the collection with the ID `1234` and have configured the accessToken `mySecret`, this is what it would look like: - -GET `/api/public/report?collectionId=1234` - -#### Request header - -`Authorization: Bearer mySecret` - -##### Response - -Status 200. - -##### Response content - -``` -[ - { - "id": "9b0eb845-e8ed-4824-bd85-8cd69038f2f5" - }, - { - "id": "1a866c7c-563e-4f6b-adf1-c4648531f7d4" - } -] -``` diff --git a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/variable-labeling.md b/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/variable-labeling.md deleted file mode 100644 index 0718e952f76..00000000000 --- a/optimize_versioned_docs/version-3.10.0/apis-tools/optimize-api/variable-labeling.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -id: variable-labeling -title: "Variable labeling" -description: "The REST API to create, update, and delete variable labels in Optimize." ---- - -With the variable labeling endpoint, variable labels can be added, updated, and deleted from Optimize. - -## Functionality - -The variable labeling API allows users to add, update, and delete batches of variable label data, which Optimize stores in a dedicated -index. All variable label data includes a reference to the process definition each variable belongs to, which allows Optimize to display a variable's label instead of its original name anywhere the given process definition is being used. Some examples of that would be in reports, configuring filters, report grouping, dashboard filters, and event-based processes. - -## Limitations - -Note that this feature is currently not supported in outlier analysis. This means that during outlier analysis, the original name of a variable will be displayed. - -## Authorization - -Every request requires [Authorization](./optimize-api-authorization.md). - -## Method & HTTP target resource - -POST `/api/public/variables/labels` - -## Request headers - -The following request headers must be provided with every variable labeling request: - -| Header | Constraints | Value | -| ------------- | ----------- | ------------------------------------------------ | -| Authorization | REQUIRED\* | [Authorization](./optimize-api-authorization.md) | - -## Request body - -The request body should contain a reference to the process definition using its key, as well as an array of variable labels. Each variable label object in the array must specify the name and type of the variable for which a label is being added, as well as the value of the label itself. - -## Result - -This method returns no content. - -## Response codes - -Possible HTTP Response Status codes: - -| Code | Description | -| ---- | ------------------------------------------------------------------------------------- | -| 204 | Request successful. | -| 400 | Returned if some of the properties in the request body are invalid or missing. | -| 401 | Secret incorrect or missing. See [Authorization](#authorization) on how to authorize. | -| 404 | The process definition with the given definition key doesn't exist. | - -## Example 1 - -Insert three labels for three variable for a given process definition - -:::note -If the label exists already in the index, its value will be overridden. -::: - -### Request - -POST `/api/public/variables/labels` - -Request Body: - -``` - { - "definitionKey": "bookrequest-1-tenant", - "labels" : [ - { - "variableName": "bookAvailable", - "variableType": "Boolean", - "variableLabel": "book availability" - }, - { - "variableName": "person.name", - "variableType": "String", - "variableLabel": "first and last name" - }, - { - "variableName": "person.hobbies._listSize", - "variableType": "Long", - "variableLabel": "amount of hobbies" - } - ] - } -``` - -### Response - -Status 204. - -## Example 2 - -Delete a label for a variable belonging to a given process definition by inputting an empty -string for its value. If there is no label for the given variable in Elasticsearch, no operation is being conducted. - -### Request - -POST `/api/public/variables/labels` - -Request Body: - -``` - { - "definitionKey": "bookrequest-1-tenant", - "labels" : [ - { - "variableName": "bookAvailable", - "variableType": "Boolean", - "variableLabel": "" - } - ] - } -``` - -### Response - -Status 204. - -## Example 3 - -Insert and delete labels for two variables belonging to a given process definition. The following example adds a label for the variable with name **bookAvailable** and deletes a label for the variable with name **person.name**. - -### Request - -POST `/api/public/variables/labels` - -Request Body: - -``` - { - "definitionKey": "bookrequest-1-tenant", - "labels" : [ - { - "variableName": "bookAvailable", - "variableType": "Boolean", - "variableLabel": "book availability" - }, - { - "variableName": "person.name", - "variableType": "String", - "variableLabel": "" - }, - ] - } -``` - -### Response - -Status 204. - -## Example 4 - -Attempting to insert multiple labels for the same variable will result to a 400 response code. - -### Request - -POST `/api/public/variables/labels` - -Request Body: - -``` - { - "definitionKey": "someProcessDefinitionKey", - "labels" : [ - { - "variableName": "bookAvailable", - "variableType": "Boolean", - "variableLabel": "book availability" - }, - { - "variableName": "bookAvailable", - "variableType": "Boolean", - "variableLabel": "is book available" - }, - ] - } -``` - -### Response - -Status 400. diff --git a/optimize_versioned_docs/version-3.10.0/components/img/dashboard-sharingPopover.png b/optimize_versioned_docs/version-3.10.0/components/img/dashboard-sharingPopover.png deleted file mode 100644 index dc14b7fe776..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/img/dashboard-sharingPopover.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/alerts.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/alerts.md deleted file mode 100644 index 26cc2ba521c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/alerts.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: alerts -title: Alerts -description: Get a notification as soon as your system is behaving in an unexpected manner. ---- - -:::note -Alerting via email not available in Camunda 8 Self-Managed. -::: - -Optimize's alerting functionality can be used to notify you when your report hits a predefined critical value. You can create alerts for any number reports that exist within a collection. - -To configure an alert, take the following steps: - -1. Inside a collection, navigate to the **Alerts** tab to create and view all alerts defined for reports in this collection. You can manage an alert by moving the mouse over the alert entry and clicking the **Edit** or **Delete** buttons in the context menu on the right side of the page. - -![Alert overview](./img/alerts-overview.png) - -2. Click **Create New Alert** to create a new alert. You will then see the following modal: - -![Alert modal overview](./img/alert-modal-description.png) - -3. To give the alert a name, select the report and define a target webhook or email address of the person who will receive the alert. - -:::note -In Camunda 7 and Camunda 8 Self-Managed, you must configure the email service to receive notifications. See the [technical guide](/self-managed/optimize-deployment/configuration/system-configuration.md#email) for which properties need to be defined. -::: - -Note that alerts can only be created for reports which are visualized as a single number and are in the same collection as the alert. Visit the [reports section](../creating-reports.md) on how to define single-number reports. - -4. Set a threshold which defines when an alert should be triggered. A notification is sent to the configured email address or webhook as soon as a report value hits the threshold. If reminder notifications are enabled, the alert will continue to send notifications for as long as the value is above (or below, as defined) the threshold. - -Finally, you'll get a resolve notification as soon as the report value is within a typical range. For example, say you defined an alert which should be triggered when the report value becomes greater than 50. You also enabled reminder notifications to be sent each hour. Here's what that would look like: - -![Notifications graph](./img/alert-notifications-graph.png) - -## Send alerts to external systems - -:::note -Alerting with webhooks is only available for Camunda 7. -::: - -It's possible to configure Optimize to send alerts to an external system when needed. For details on how to configure and add target systems, visit the [technical guide](/self-managed/optimize-deployment/configuration/system-configuration.md#alert-notification-webhooks). Once at least one target system is configured, alerts will have a new input option to select one of the configured systems. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/event-based-processes.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/event-based-processes.md deleted file mode 100644 index f8198a5fd61..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/event-based-processes.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -id: event-based-processes -title: Event-based processes -description: Create and analyze reports backed by ingested events. ---- - -Camunda 7 only - -## Overview - -Event-based processes are BPMN processes that can be created inside Optimize and based on events. These events can be loaded from an external system or created from internal BPMN processes. They are particularly useful to create reports and dashboards based on a process that is not fully automated with Camunda 7 yet. - -Once the event-based process feature is correctly configured, you will see a new link in the navigation to go to the event-based process list. From there, you can see, create, or edit your event-based processes. - -:::note -When Camunda activity events are used in event-based processes, Camunda admin authorizations are not inherited for the event-based process. The authorization to use an event-based process is solely managed via the access management of event-based processes when [publishing an event-based process](#publishing-an-event-based-process) or at any time via the [edit access option](#event-based-process-list---edit-access) in the event-based process list. - -Visit our [technical guide](/self-managed/optimize-deployment/configuration/setup-event-based-processes.md) on authorization management and event-based processes for the reasoning behind this behavior. -::: - -## Set up - -You need to set up the event-based processes feature to make use of this feature. See the [technical guide](/self-managed/optimize-deployment/configuration/setup-event-based-processes.md) for more information. - -## Event-based process list - -All currently available event-based processes are listed under the main navigation item **Event-based processes**. From there, it is possible to see their state, which can be one of the following: - -- `Unmapped` - The process model is created, but no single event is mapped to a flow node. -- `Mapped` - The process model contains at least one mapping of an event to a flow node. -- `Published` - The event-based process is published and can be used in reports by users that are authorized to access it. -- `Unpublished Changes` - The process model contains changes that are not reflected in the currently published state of the event-based process; it needs to get republished manually. - -![Process List](./img/processList.png) - -### Event-based process list - edit access - -To manage authorizations for a published event-based process, the **Edit Access** option in the dropdown menu of each event-based process list entry allows you to authorize users or groups to create reports for these processes in Optimize. - -![Process List - Edit Access](./img/editAccess.png) - -## Creating an event-based process - -There are three ways to create an event-based process: - -### Auto-generate - -:::note -The process auto-generation feature is currently in early beta stage. -::: - -The first way to create an event-based process is to allow Optimize to auto-generate the model based on provided configuration. Using this option, you can specify which event sources should be used for the process, including both Camunda and external events. - -Note that for external events, it is currently only possible to select all the external events. - -![Autogenerate a process](./img/auto-generation.png) - -Optimize will attempt to generate an overall model based on these sources, determining the order of events in the model by sampling stored instances. After auto-generation is complete, you will see the process in [view mode](#view-mode), with the model's nodes fully mapped to their corresponding events. - -To make changes to the autogenerated process, modify either the model itself, the process name, or the process mappings in the same way as any other event-based process by entering [edit mode](#edit-mode). - -### Model a process - -The second way to create an event-based process is to model it manually using the integrated BPMN modeler. - -### Upload BPMN model - -Finally, you can create an event-based process by uploading a `.bpmn` file directly into Optimize. - -## Edit mode - -![Edit Mode](./img/editMode.png) - -The edit mode allows you to build and map your event-based process. Using this mode, you can perform all kinds of operations, such as: - -- Rename the process. -- Model the process using the integrated BPMN modeler. -- Map your diagram nodes to an event from the event table. -- Edit event sources for the events to display in the event table. -- Save the current state with your applied changes. -- Cancel changes you already applied to the process. - -### Modeling - -Modeling can be done using the integrated modeler shown in the screenshot above. To maximize the modeling area, collapse the table during the modeling by clicking on the **Collapse** button in the top right of the table. - -### Event sources - -To map BPMN nodes to events, add event sources to the process first by clicking the **Add Event Sources** button available at the top of the table. - -In this view, it is possible to add two types of events to the events list: - -#### External events - -Events that were ingested into Optimize from an external system. These events can be imported into Optimize using the event ingestion API Optimize provides. - -Defining the `group` property when ingesting the events will allow selecting events that belong to a group. If the group property is not defined or left empty during ingestion of an event, Optimize will consider it `ungrouped`. - -![Selecting External Events](./img/externalEvents.png) - -#### Camunda events - -![Add Source Modal](./img/sourceModal.png) - -These are events generated from an existing Camunda BPMN process. Only processes for which Optimize has imported at least one event will be visible for selection. This means the process has to have at least one instance and Optimize has to have been configured to import data from that process. - -See the [technical guide](/self-managed/optimize-deployment/configuration/setup-event-based-processes.md#use-camunda-activity-event-sources-for-event-based-processes) for more information on how this is configured. - -To add such events, provide the following details: - -- The target process definition that you would like to generate the events from - -- The trace ID location: A trace ID uniquely identifies a process instance across system boundaries. One example would be an invoice number for an invoice handling process. For a Camunda process, it is possible to select a trace ID that exists either in a variable or in the process business key. - -- Which events to display in the table: - -Adding events for every flow node might not be necessary for the event-based process. Therefore, we provide the ability to only add the events that are necessary. There are three options available: - -- Process start and end: This will add only two events in the table, one event is triggered when the process starts and one when it ends. - -- Start and end flow node events: The number of events added to the table will depend on how many start and end events are in the process. For example, if there is one start event and two end events, three events will be added. - -- Start and end flow node events: This option will add events for every flow node in the process. - -Once this information is defined and the sources are added, the events will appear in the table as shown below. - -![Events Table](./img/eventsTable.png) - -#### Events table - -Each event in the table will have the following properties: - -- Mapped as (start/end): Defines whether the event indicates start of BPMN node or the end of it. - -- Event name - -- Group - - - For external events, this corresponds to the group of the ingested event. - - For Camunda process events, this corresponds to the name of the process definition. - -- Source: External system or Camunda process event. - -- Count: How many times this event was triggered. See [additional notes](#event-counts) for more information. - -To assist during event mapping, the events table offers suggestions of potential events to be mapped based on the selected node. This is indicated by a blue strap near the suggested event. The event suggestion only works when adding all external events as a source with no Camunda events. - -### Mapping events - -Mapping is the process of linking BPMN flow nodes to events. - -To start mapping, take the following steps: - -1. Select the node that you would like to map from the diagram. -2. To link the selected node to an event, enable the checkbox of that event from the table. Afterwards, a checkmark sign will be shown on top of the node to indicate that the event has been mapped successfully. - -:::note -Not all BPMN nodes can be mapped. Only events and activities can be mapped to events. -::: - -Once all the necessary nodes are mapped, you can save your diagram to go the view mode. - -## View mode - -The view mode gives you a quick overview of which flow nodes have been mapped to events and allows you to enter the edit mode, publish, or delete the current event-based process. - -![View Mode of event-based processes](./img/processView.png) - -### Publishing an event-based process - -Once you have built and mapped your event-based process, you need to publish it to make it available for reports and dashboards. To publish your process, click the **Publish** button in the view mode of your event-based process. - -![Publish modal](./img/publishModal.png) - -In the shown modal, you can see who will have access to use the event-based process. By default, the process is only available to the user who created it. If you would like to allow other users to use the process in reports, click **Change...** to open the permissions options. - -![permissions modal](./img/usersModal.png) - -In this modal, it is possible to search for users and groups and add them to the list of users who have access to the process. Once that is done, you can save the changes and publish your process. - -Publishing the process will take some time to correlate all events and generate your event-based process. Once the publishing is done, a notification will appear indicating this. - -Now the process is ready and can be used like any other process to create reports and dashboards. - -## External ingested events - -After ingesting events into Optimize from an external system, each individual event will appear in the external events table. - -![External Events](./img/external-events.png) - -By default, the table shows all ingested events sorted by the timestamp from newest to oldest. However, it is also possible to search for events or sort the results by event name, source, group, or trace ID. - -### Deleting ingested events - -One or multiple events can be selected and deleted as shown in the figure below: - -![Deleting External Events](./img/deleting-events.png) - -:::note -When deleting an event mapped to a published event-based process, only the corresponding flow node instance will be removed from the process and no change will happen on the process instance level until the process is republished. - -For example, if you delete an ingested event that was mapped to the only end event within a process, the corresponding process instance will still be considered complete until the process is republished. -::: - -## Additional notes - -### Event-based process auto-generation - -Event-based process auto-generation attempts to determine the order of events based on a sample of stored instances. Due to the nature of sampling, it is possible that the generated model may not always appear as you might expect. - -In some cases, it is possible that some sequence flows may be hidden by overlapping elements on the generated model. - -If both an event source and an embedded subprocess contained within that source are included for auto-generation, they will appear in the auto-generated model as independent processes. - -In the case where external events are configured as an event source, it is possible that Optimize will not be able to determine a model containing all external events. In this scenario, -Optimize will auto-generate a model containing only the external events that it could determine the order of. - -In any of the above scenarios, you are able to correct the model to suit your needs using the editor. Like any other event-based process, an auto-generated model can be edited so you can make any necessary corrections after auto-generation is complete. - -### Published event-based processes - -In some scenarios, reports created using event-based processes might not show all the information expected. - -To avoid this, we encourage you to avoid including the following elements when modelling your event-based processes: - -- Inclusive gateways: These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Inclusive Gateway](./img/inclusive_gateway.png) - -- Complex gateways: These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Complex Gateway](./img/complex_gateway.png) - -- Mixed gateway directions: Mixed gateways are gateways which have no clear direction, instead being a combination of opening and closing gateways. These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Mixed Direction Gateway](./img/mixed_direction_gateway.png) - -- Chained gateways: A chained gateway is one that occurs as part of a sequence of consecutive gateways. These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Chained Gateway](./img/chained_gateway.png) - -### Event counts - -Event counts in the table may not match the values you expected. There are three possible explanations for this: - -- If you have enabled history cleanup, the counts will still include events from process instances that have since been cleaned up. -- For events from Camunda processes, the count value represents the number of times that event has occurred across all versions and tenants of that process, regardless of how the event source is configured. -- The counts for external events will still include ingested events that have since been deleted using the [event inspection feature](#deleting-ingested-events). diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/export-import.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/export-import.md deleted file mode 100644 index 092ee01fb61..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/export-import.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -id: export-import -title: Export and import -description: Export and import Optimize entities. ---- - -## Exporting entities - -Users can export entities by accessing the **Export** option in the entity menu. This downloads a JSON file which -includes all relevant information that defines the selected entity. This file can later be used to import the exported entity into -a different Optimize installation. - -![Exporting a Process Report](./img/export.png) - -## Importing entities - -### Prerequisites - -Exported entities can be imported both as private entities and into a selected collection, provided the following prerequisites are met: - -- All definitions the entity requires exist in the target Optimize. -- When importing into a collection, the collection data sources must include all relevant definitions for the entity. -- The importing user is authorized to access the relevant definitions. -- The entity data structures match. To ensure matching data structures, confirm the Optimize version of the source is the same as the version of the target Optimize. - -If any of the above conditions are not met, the import will fail. Optimize will display an error message explaining why the import was not successful to enable you to fix the issue and retry. - -### Importing private entities - -To import an entity as a private entity, use the **Import Report/Dashboard** option from the **Create New** menu on the welcome page. The entity will appear in the entity list once the import is finished and can be interacted with as usual. - -![Importing a private entity](./img/private-import.png) - -### Importing entities into a collection - -To add the entity to an existing collection, use the same **Import Report/Dashboard** option from the **Create New** menu from within the selected collection. This will import the entity into the collection. Any user that has access to this collection can now also access the imported entity. - -:::note -The collection must have all data sources required by the imported entity or the import will fail. -::: - -![Importing an entity into a Collection](./img/collection-import.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/footer.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/footer.md deleted file mode 100644 index f29afdf5ea5..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/footer.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: footer -title: Footer -description: Take a closer look at engine connection status, Elasticsearch connection, import progress, and more. ---- - -## Footer - -In the **Footer** section of Optimize, you can see some important information, such as: - -- Engine connections status: For each engine that is connected to Optimize, the connection status is shown. This enables you to be aware of any engine connection problems Optimize may be experiencing. -- Status of the connection to Elasticsearch -- Import progress: Indicates if Optimize is currently importing data from the engine. Analysis can only be performed in Optimize on data that has already been imported. -- Timezone: The timezone used to display all date and time information -- Optimize version - -![footer overview](./img/footer-overview.png) - -There are three possible states of engine connection: - -- Connected and import finished (green circle) -- Connected and import is not completed (spinner) -- Not connected (red circle) - -Elasticsearch can be either connected or not (green and red circles respectively). - -![footer engine connections](./img/footer-engine-connections.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alert-modal-description.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alert-modal-description.png deleted file mode 100644 index b98abd565e6..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alert-modal-description.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alert-notifications-graph.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alert-notifications-graph.png deleted file mode 100644 index 9ddf9464e98..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alert-notifications-graph.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alerts-overview.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alerts-overview.png deleted file mode 100644 index c48966f6451..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/alerts-overview.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/auto-generation.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/auto-generation.png deleted file mode 100644 index 0f494b7c5c2..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/auto-generation.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/chained_gateway.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/chained_gateway.png deleted file mode 100644 index 769571c2eaf..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/chained_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/collection-import.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/collection-import.png deleted file mode 100644 index 2cd6c15acb5..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/collection-import.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/complex_gateway.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/complex_gateway.png deleted file mode 100644 index 5b89d866384..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/complex_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/deleting-events.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/deleting-events.png deleted file mode 100644 index 5485aec7846..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/deleting-events.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editAccess.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editAccess.png deleted file mode 100644 index f1da83045a7..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editAccess.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editModal.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editModal.png deleted file mode 100644 index a503ade438c..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editMode.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editMode.png deleted file mode 100644 index e78092a3a07..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/editMode.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/eventsTable.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/eventsTable.png deleted file mode 100644 index 14dead16e3b..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/eventsTable.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/export.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/export.png deleted file mode 100644 index c558b83390a..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/export.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/external-events.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/external-events.png deleted file mode 100644 index 8c1fb25e03d..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/external-events.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/externalEvents.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/externalEvents.png deleted file mode 100644 index 991e989396f..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/externalEvents.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/fixed-start-date-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/fixed-start-date-filter.png deleted file mode 100644 index f2c6b62bd5d..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/fixed-start-date-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/footer-engine-connections.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/footer-engine-connections.png deleted file mode 100644 index bd3ec59fe34..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/footer-engine-connections.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/footer-overview.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/footer-overview.png deleted file mode 100644 index 801fa4a52df..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/footer-overview.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/inclusive_gateway.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/inclusive_gateway.png deleted file mode 100644 index cc38685dc4f..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/inclusive_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/mixed_direction_gateway.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/mixed_direction_gateway.png deleted file mode 100644 index 3285e113a79..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/mixed_direction_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/private-import.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/private-import.png deleted file mode 100644 index a90d60fa204..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/private-import.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/process-version-selection.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/process-version-selection.png deleted file mode 100644 index 2690e232e40..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/process-version-selection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/processList.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/processList.png deleted file mode 100644 index 1d1826a280b..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/processList.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/processView.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/processView.png deleted file mode 100644 index 385f391003e..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/processView.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/publishModal.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/publishModal.png deleted file mode 100644 index c7802339d7e..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/publishModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/relative-start-date-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/relative-start-date-filter.png deleted file mode 100644 index 60551f00eda..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/relative-start-date-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-filterlist-open.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-filterlist-open.png deleted file mode 100644 index 017daeebf99..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-filterlist-open.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-flownode-filterlist-open.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-flownode-filterlist-open.png deleted file mode 100644 index 5a5273d4919..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-flownode-filterlist-open.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-process-variants.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-process-variants.png deleted file mode 100644 index 893f808fae8..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/report-with-process-variants.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/sourceModal.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/sourceModal.png deleted file mode 100644 index c773d5d973a..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/sourceModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/usersModal.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/usersModal.png deleted file mode 100644 index 187b9cee13c..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/usersModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-boolean.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-boolean.png deleted file mode 100644 index 34386854e84..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-boolean.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-date.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-date.png deleted file mode 100644 index 5ea830d1dcc..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-date.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-numeric.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-numeric.png deleted file mode 100644 index 03d24e962e7..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-numeric.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-string.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-string.png deleted file mode 100644 index 53ed3907191..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-filter-string.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-labeling-panel.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-labeling-panel.png deleted file mode 100644 index abe0c6f9555..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/variable-labeling-panel.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/zoom-in.png b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/zoom-in.png deleted file mode 100644 index caa1dc51064..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/img/zoom-in.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/ml-dataset.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/ml-dataset.md deleted file mode 100644 index a154f104d96..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/ml-dataset.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: ml-dataset -title: Machine learning-ready data set -description: Export a raw data report in JSON format, which can be used for training a machine learning model. ---- - -The machine learning-ready data set feature allows the export of data into a single data set, easing performance of advanced analysis with Optimize. The data set generated can contain process information and be assembled by generating a raw data report. - -The data contained in the raw data reports is already organized and pre-processed in such a way that it would allow a trained model to make predictions for future instances based on existing instances for a given definition. - -In addition to the previously existing columns in the raw data reports, we added columns for improved machine learning capabilities. These columns allow a user to access information such as the total number of incidents per process instance, the number of open incidents, the number of user tasks, and the total duration of an event. - -For example, this allows you to predict how long an instance will take to complete based on the number of incidents or user tasks. - -After navigating to a raw data report, note the added columns are now displayed: - -![Raw Data Report](../process-analysis/img/raw-data-report-ml-ready-dataset.png) - -In most cases, when training a machine learning model the data can be fed to common libraries, such as pandas or scikit-learn in CSV or JSON format. To export all data contained in a raw data report and use it as input for model training, export the raw data reports in JSON format. - -This can be done after saving the report and utilizing the external Optimize endpoint provided to export it to JSON. More information on how to use the JSON export endpoint can be found [here](../../../apis-tools/optimize-api/report/get-data-export.md). diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/process-variants-comparison.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/process-variants-comparison.md deleted file mode 100644 index c960c5fcd64..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/process-variants-comparison.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: process-variants-comparison -title: Process variants comparison -description: Using copy data source tool to compare different variants of the process ---- - -When creating reports, it is possible to copy the data source and set different versions and/or tenants for each copy, as well as give it a custom title. This allows you to compare different variants of the process. - -To use this feature, navigate to the definition edit window from inside a report. Click the copy icon while hovering over the data source. This will create a copy of the data source with all its parameters. - -Now click the pencil icon while hovering over the data source you want to edit and select a different version and/or tenant. This way there are two variants of the same process which can be now compared. - -![Process version selection](./img/process-version-selection.png) - -Here is an example of a bar chart report comparing the average process instance duration of two versions of the same process. - -![Report with two versions of the same process](./img/report-with-process-variants.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/variable-labeling.md b/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/variable-labeling.md deleted file mode 100644 index 5deb4671dfe..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/additional-features/variable-labeling.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: variable-labeling -title: Variable labeling -description: Add a label for a variable in a process definition. ---- - -The variable labeling functionality allows users to add, update, and delete batches of variable labels so your data is more understandable by business users. This allows Optimize to display a variable's label instead of its original name anywhere the given process definition is being used. Some examples of that would be -when viewing and configuring reports, dashboards, or event-based processes. - -To use this feature, navigate to the definition edit window from inside a report. Click the pencil icon while hovering over the data source and click **Rename Variables** to access the label edit panel. You will then see the following panel: - -![Label Edit panel](./img/variable-labeling-panel.png) - -Delete a label by inputting an empty field for its value. - -## Limitations - -:::note -This feature is currently not supported in outlier analysis and csv export. This means that during outlier analysis, the original name of a variable is displayed. -::: - -Keep in mind that when applying variable filters in multi-definition reports and multi-definition dashboards, the filters are applied to all variables across definitions which have the same name and type. This happens even in the case that the variables are labeled differently across definitions. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/collections-dashboards-reports.md b/optimize_versioned_docs/version-3.10.0/components/userguide/collections-dashboards-reports.md deleted file mode 100644 index 9d61719752c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/collections-dashboards-reports.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: collections-dashboards-reports -title: Collections, dashboards, and reports -description: "Let's take a closer look at the structure of Optimize and its permissions." ---- - -Within Optimize, **reports** are based on a _single_ visualization, similar to a single chart or graph. **Dashboards** are aggregations of these visualizations, similar to a full spreadsheet of data collections, or a combination of several comparative charts and graphs. **Collections** are groups of these data sets, similar to project folders for organizational purposes where we can nest a series of dashboards and/or reports within. - -The Optimize landing page shows a list of all existing dashboards and reports created, as well as collections an individual user or a group have access to. Click on a collection to view its accompanying reports and dashboards. - -![home page](./img/home.png) - -In addition to the name of the dashboard, report, or collection, you can also see the date it was last modified. Alongside collections, dashboards, and combined process reports, you can also see how many entities are contained within (e.g. how many reports are on a dashboard). You can also see how many users and groups have access to a collection. - -:::note -Optimize offers collaborative capabilities, too. Click the **Share** tab to share a created [dashboard](./creating-dashboards.md). Toggle to **Enable sharing**, and copy or embed the provided link. Colleagues without access to Optimize can still view your report with the shared link. Learn more about [user permissions](./user-permissions.md). -::: - -Clicking on a report, dashboard, or collection takes you to its corresponding details page. When moving the mouse over one of these entities, you can access a context menu that allows you to edit, copy, or delete the entity. Multiple entities can be selected and deleted at once using the bulk menu which appears after selecting at least one entity. When copying an entity, you also have the option to move that copy into a collection. - -![copy sales dashboard](./img/copy.png) - -To find a collection, report, or dashboard, use the search field on the top of the page to filter the list by the name of the entity. - -To [create a dashboard](./creating-dashboards.md) or [report](./creating-reports.md), use the **Create New** button available in the top right corner of the page. - -## User permissions - -:::note -Adding user groups to collections is currently only available in Camunda 7. -::: - -By default, if you create a collection, only you can access the collection and the contents within. To share a collection with other users, add them to the collection. - -![users and user groups](./img/users.png) - -You are automatically assigned the manager role when creating a new collection. There can be multiple managers for a collection. However, there must be at least one manager for every collection. Managers can do the following: - -- Add, edit, and remove dashboards and reports to the collection. -- Edit the collection name and delete the collection using the context menu in the header. -- Add, edit, and remove other users and user groups to collections via the collection's **Users** tab. - -A manager can add a new user or group to the collection using the **Add** button. Use the ID of the user/group to add them. Every user/group has a role assigned to them that specifies their access rights to the collection. - -![add user or user group](./img/addUser.png) - -An editor may edit, delete, and create new dashboards or reports in the collection. Editors may not edit the name of the collection, delete the collection, or change anything in the **Users** tab. - -Those with read-only access to the collection may only view the components contained within, as well as copy them. Viewers cannot create, edit, or delete components in a collection. They are also not allowed to rename or delete the collection itself, or change anything in the **Users** tab. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/combined-process-reports.md b/optimize_versioned_docs/version-3.10.0/components/userguide/combined-process-reports.md deleted file mode 100644 index a4840410da0..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/combined-process-reports.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: combined-process-reports -title: Combined process reports -description: Occasionally, it is necessary to compare multiple reports or visualize them together in one diagram. ---- - -Camunda 7 only - -## Creating a combined process report - -Occasionally, it is necessary to compare multiple reports or visualize them together in one diagram. This can be achieved by creating a special type of report called a **combined process report**. To create a new combined process report, visit the homepage and click **Create New > New Report > Combined Process Report**. - -![Creating a Combined process report](./img/combined-report-create.png) - -Then, you are redirected to the combined process report builder. There, view the selection panel on the right to select multiple reports to combine. - -:::note -If the combined process report is inside a collection, only reports in the same collection can be combined. If the combined process report is not in a collection, it can only combine reports that are also not in a collection. -::: - -A preview of the selected reports will be displayed in the panel on the left. - -![combined process report builder](./img/combined-report.png) - -For example, combining two reports with a table visualization results in the following view: - -![Combining two reports with a table visualization](./img/table-report.png) - -And combining two reports with line chart visualization results in the following view: - -![Combining two reports with line chart visualization](./img/area-chart-report.png) - -You can change the color of chart reports by clicking on the color box near the name of the report. - -You can also drag items in the list of selected reports to change their order in the report view. - -:::note -Not all reports can be combined with each other given differences in their configurations, e.g. a different visualization, may make them incompatible. Therefore, when selecting a report, only the other reports that are combinable with the selected one will appear. -::: - -Only reports that match the following criteria can be combined: - -- Same group by -- Same visualization -- Same view but combining user task duration (work, idle, and total). Flow node duration reports are also possible. -- Distributed reports cannot be combined -- Multi-measure reports including reports containing multiple aggregations or multiple user task duration times cannot be combined. -- Process definition can be different. -- Furthermore, it is possible to combine reports grouped by start date with reports grouped by end date under the condition that the date interval is the same. - -Moreover, only the following visualizations are possible to combine and will show up in the combined selection list: - -- Bar chart -- Line chart -- Table -- Number - -Currently, it is not possible to combine decision reports. - -It is also possible to update the name of the report, save it, and add it to a dashboard exactly like the normal report. The combined process reports will also show up in the reports list along with the normal reports. - -### Configure combined process reports - -You can configure the combined process report using the cog wheel button available on the top right side of the screen. - -For example, in all chart reports, you can change what to show in the tooltips, change the axis names, and set a goal line as shown in the figure below. - -![Configurations available for combined process reports](./img/combined-config.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/creating-dashboards.md b/optimize_versioned_docs/version-3.10.0/components/userguide/creating-dashboards.md deleted file mode 100644 index a55561cb1f4..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/creating-dashboards.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: creating-dashboards -title: Creating dashboards -description: "Define reports in Camunda Optimize according to Key Performance Indicators relevant to your business objectives." ---- - -Often, it is desired to get a quick overview of the business performance by monitoring the underlying processes. To achieve that, you can define reports in Camunda Optimize according to KPIs (Key Performance Indicators) relevant to your business objectives. A dashboard shows multiple reports, so you can get an up-to-date view of the productivity of your system. - -The dashboard consists of the [**edit mode**](./edit-mode.md) and [**view mode**](./view-mode.md). - -To create a new dashboard, click the **Create New** button on the homepage or collection page and select the **New dashboard** option. This opens a dialog where you can set the dashboard name and select one of multiple dashboard templates. When not creating a blank dashboard, select a process definition. This process definition is used to create new reports for the dashboard. - -Creating a dashboard from a template also creates new reports which are saved as soon as the dashboard is saved. - -![create new dashboard](./img/dashboardTemplate.png) - -:::note -Optimize offers collaborative capabilities, too. Click the **Share** tab to share a created dashboard. Toggle to **Enable sharing**, and copy or embed the provided link. Colleagues without access to Optimize can still view your report with the shared link. Learn more about [user permissions](./user-permissions.md). -::: diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/creating-reports.md b/optimize_versioned_docs/version-3.10.0/components/userguide/creating-reports.md deleted file mode 100644 index d261b6c21a9..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/creating-reports.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: creating-reports -title: Creating reports -description: "View your data from different angles and capture all aspects that influence your processes, show new trends, or depict your current business state." ---- - -## Overview - -Reports offer the ability to view your data from different angles, and thus capture all aspects that influence your processes, show new trends, or depict your current business state. - -Each report consists of the [edit mode](./process-analysis/report-analysis/edit-mode.md) and [view mode](./process-analysis/report-analysis/view-mode.md) to perform different kinds of actions on it. - -## Creating a single report - -To create a custom report based on a key performance indicator (KPI) you’d like to analyze, and to incorporate this report into a dashboard, follow the steps below: - -1. On the right side of the **Home** page, select **Create New > New Report**. Here we’ll take a look at a single process, though you can also view data from multiple processes. -2. Click the text box under **Select Process** and select the process you’d like to analyze. -3. Select the type of report you’d like to use on the right side of the **Create new Report** box. As with dashboards, Optimize offers preconfigured templates such as heatmaps and tables. We’ll begin with a heatmap. -4. Click **Create Report**. - ![heatmap example](./img/report-reportEditActions.png) -5. Set up and customize your report. Begin by naming your report in the text box at the top of the page. -6. In the gray text box to the right, confirm your data source, and select what you’d like to review from the process. You can also group by topics such as duration or start date. -7. If you’d like, filter the process instance or flow nodes. For example, you can filter by duration, only viewing process instances running for more than seven days. -8. Finally, you have the option to view particular sets of data from the instance, like instance count or absolute value, by selecting the gear icon to the left of your data customization. You can also choose how you’d like to visualize your data in the box beneath **Visualization** (i.e. bar chart, pie chart, etc.). Once you’ve made your selections, click **Save**. - -## Share your report - -Optimize offers collaborative capabilities, too. Click the **Share** tab to share a created report. Toggle to **Enable sharing**, and copy or embed the provided link. Colleagues without access to Optimize can still view your report with the shared link. - -Learn more about [user permissions](./user-permissions.md). diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/data-sources.md b/optimize_versioned_docs/version-3.10.0/components/userguide/data-sources.md deleted file mode 100644 index 59322e29245..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/data-sources.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: data-sources -title: Data sources -description: "If you create a collection, you can add data sources that can be used to create reports. See the existing data sources or add additional ones." ---- - -If you create a collection, you can add data sources that can be used to create reports. To see the existing data sources or add additional ones, go to the **Data Sources** tab of the collection. - -![add source by definition](./img/sourceByDefinition.png) - -Using the **Add** button, a manager can add one or more sources to the collection by selecting the definitions that need to be added. - -![add source by tenant](./img/sourceByTenant.png) - -The added sources will appear in the process/decision selection list inside the report builder where they can be used to create reports. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-analysis-overview.md b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-analysis-overview.md deleted file mode 100644 index f34bf50f4ca..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-analysis-overview.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: decision-analysis-overview -title: Overview -description: Explore, discover and get insights into your decisions that otherwise would be hidden. ---- - -Camunda 7 only - -Decision reports provide you with the ability to view your data from different angles and thus capture all aspects that influence your decisions, show new trends, or depict your current business state. - -You can also define filters which help you narrow down your view to what you are interested in. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-filter.md b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-filter.md deleted file mode 100644 index 82390242e8e..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-filter.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: decision-filter -title: Filters -description: Narrow down your view on the decision by creating reports based on a subset of all decision evaluations. ---- - -Camunda 7 only - -Similar to [filters for process analysis](../process-analysis/filters.md), you can define filters for your decision reports. - -You can filter by the [evaluation date](#evaluation-date-filter) of the decision, or by [input and output variables](../process-analysis/variable-filters.md). This screenshot shows how to add a filter to your decision report: - -![Decision Report with open filter list in Camunda Optimize](./img/report-with-filterlist-open.png) - -## Evaluation date filter - -Applying an evaluation date filter will result in the report considering only decision evaluations which occurred within the defined date range. Only one evaluation date filter can be defined for any report. - -Like the [process instance date filters](../process-analysis/metadata-filters.md#date-filters), you can define a fixed or relative filter. Read the appropriate section in the process filter guide for details about the differences. - -As an alternative way to create an evaluation date filter, you can use your mouse to select the area you want to create the filter for if your report is visualized as a bar or line chart. - -![Zooming into a section of the chart](./img/zoom-in.png) - -## Variable filter - -Using the input or output variable filter retrieves only those decisions where the evaluation had certain variable values as either input or output. For example, assume you want to analyze only those decision evaluations where the output variable **Classification** had the value **budget**. You can achieve this by creating an output variable filter, selecting the **Classification** variable from the input and check the **budget** option. - -Depending on the variable type, different ways to specify the value range are available. Read the [variable filter section](../process-analysis/variable-filters.md) in the filter guide to see all possible options. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-report.md b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-report.md deleted file mode 100644 index 9e3339ddb2a..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/decision-report.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: decision-report -title: Single report -description: Explore, discover, and get insights into your decision evaluations. ---- - -Camunda 7 only - -Decision reports are very similar to process reports, but allow you to gain insights in your decision definitions, rather than process definitions. - -To create a decision report, click on the **Decision Report** option using the **Create New** dropdown button available on the homepage. - -![Create a new Decision Report from the Report list page](./img/dmn_report_create.png) - -There are a number of different reports you can create based on decisions: - -## Raw data - -Similar to the raw data process report, this allows you to view a table listing all available decision data. This can come in handy if you found interesting insights in certain decision evaluations and need detailed information about those evaluations, or you are exploring a decision definition with a limited number of evaluations. - -You can reorder the columns and click on any column header to sort the table by this column. Using the configuration dialog, you can also define which columns to show and whether to include the evaluation count number in the report. These settings are only available in the edit mode of the report. - -To create a raw data report, select **Raw Data** from the view dropdown. The other fields are filled automatically. - -![Decision Raw Data Table in Camunda Optimize](./img/dmn_raw_data_report.png) - -## Evaluation count - -This view option allows you to create reports that show how often the decision was evaluated. Depending on the group by selection, this could be either the total number of evaluations, a chart displaying how this number of evaluations developed over time, or how they were distributed across variables or rules. As always, you can define [filters](../process-analysis/filters.md) to specify which decision evaluations to include in the report. - -#### Group by: None - -This shows a single number indicating the total amount of evaluations for this decision definition and version in the current filter. Using the configuration dialog, you can limit the precision of the number and define a goal to create a progress bar. Details of both options are described in the [process report configuration section](../process-analysis/report-analysis/configure-reports.md#number). - -![Progress Bar visualization for Decision Evaluation Count](./img/dmn_progress_bar.png) - -#### Group by: Rules - -This report shows the decision table with an additional column to the right. This column contains information on how often each rule matched an evaluation. It also shows a bar indicating how frequently a single rule was matched. You can turn off the numbers or the bar in the configuration dialog. - -![Decision Table with evaluation count information](./img/dmn_decision_table.png) - -#### Group by: Evaluation date - -Using this group by option allows you to see the development of evaluations over time. The result can be visualized as table or chart. In combination with filters, this allows you to create powerful reports. For example, to show during which time period the decision resulted in a certain output variable. If you visualize such a report as a chart, you have access to all the [chart visualization options](../process-analysis/report-analysis/configure-reports.md#charts-line-bar-pie) process reports have, too. - -![Line Chart showing decision evaluations by date](./img/dmn_date_chart.png) - -#### Group by: Input or output variable - -This option allows you to choose a variable from the decision definition to group the results by. In the report, you will see which values this variable had over all evaluations in the filter and how often each value was encountered when evaluating the decision. This type of report can be visualized as table or chart. - -![Pie Chart depicting distribution of output variable values](./img/dmn_pie_chart.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_date_chart.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_date_chart.png deleted file mode 100644 index e7222d33ee0..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_date_chart.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_decision_table.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_decision_table.png deleted file mode 100644 index d14109fe919..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_decision_table.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_pie_chart.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_pie_chart.png deleted file mode 100644 index 471797df5ea..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_pie_chart.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_progress_bar.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_progress_bar.png deleted file mode 100644 index 0122be490c3..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_progress_bar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_raw_data_report.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_raw_data_report.png deleted file mode 100644 index 4dda7309c2d..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_raw_data_report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_report_create.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_report_create.png deleted file mode 100644 index f05df857a19..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/dmn_report_create.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/report-with-filterlist-open.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/report-with-filterlist-open.png deleted file mode 100644 index 9548dcdeab7..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/report-with-filterlist-open.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/zoom-in.png b/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/zoom-in.png deleted file mode 100644 index a67069eae51..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/decision-analysis/img/zoom-in.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/edit-mode.md b/optimize_versioned_docs/version-3.10.0/components/userguide/edit-mode.md deleted file mode 100644 index acbb53334f9..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/edit-mode.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: edit-mode -title: Edit mode -description: "The edit mode allows you to configure the dashboard and adjust it to your needs." ---- - -The edit mode allows you to configure the dashboard and adjust it to your needs, including the following operations: - -- Rename your dashboard -- Add/remove a tile -- Save the current state with your applied changes -- Cancel changes you already applied to the dashboard -- Set filters available on the dashboard -- Set a default auto refresh rate to periodically update the dashboard in [view mode](./view-mode.md) - -![edit mode](./img/dashboard-dashboardEditActions.png) - -Once you have prepared all your reports, you can now start to assemble them into a dashboard. Above the dashboard grid, click **Add a Tile** to open a modal where you can select one of your defined reports and add it to the dashboard. The **Add a Tile** modal allows you to create dashboards that combine Optimize reports with data from other services as external website tiles and text tiles. - -### Optimize reports - -In the **Add a Tile** modal, use the **Optimize Report** tab to add reports to the dashboard. Use the **Select a Report** field to select one of the previously created reports or click **+ New Report from a template** to create a new report. - -![add a report modal](./img/dashboard-addAReportModal.png) - -:::note -If the dashboard is inside a collection, only reports that are in the same collection can be added. If the dashboard is not in a collection, it can only contain reports that are also not in a collection. -::: - -### External websites - -In the **Add a Tile** modal, click **External Website** to enter the URL of an external data source which should be added to the dashboard. Such external websites are added as iframes to the dashboard. - -![external website editor](./img/dashboard-addAReportModal-externalReport.png) - -### Text tiles - -In the **Add a Tile** modal, click **Text** to open the text editor. This allows you to create a document which can contain formatted text, links and images. - -![text editor](./img/dashboard-addAReportModal-textReport.png) - -### Placing tile on the dashboard - -To move the tile to your desired location, drag it in any direction. As soon as you release the dragged tile, it snaps to the closest grid position. Dragging the handle on the lower right corner of each tile will resize it. Delete the tile from your dashboard by clicking the **x** button on the top right corner of each tile. - -![edit actions](./img/dashboard-reportEditActions.png) - -## Adding filters in edit mode - -In the dashboard edit mode, there is an **Add a Filter** button which shows a **Filters** panel. This panel allows you to specify filters which will become available for the dashboard. The following filters are available: - -- Start date: Allows filtering by process instance start date -- End date: Allows filtering by process instance end date -- Instance state: Allows filtering by process instance state, such as running, completed, or canceled -- Variable: Allows filtering by process instance variable value -- Assignee: Allows filtering flow node data by their assignee -- Candidate Group: Allows filtering flow node data by their candidate group - -![filter edits](./img/filter-editMode.png) - -For Variable Filters, specify which variable the filter should apply to. For string and number variables, provide a list of values which should be allowed to be filtered by. - -Additionally, it is possible to allow dashboard users to filter by their own values by checking the **Allow viewer to add filter values** box. In contrast to report filters, adding a value in the modal will not immediately filter by this value, it will only make this value available to filter by in the dashboard. - -For **Assignee** and **Candidate Group** filters, the dashboard editor can specify which assignees and candidate groups are available to filter by. In contrast to report filters, adding an assignee or candidate group to the filter will not immediately filter by this value, it will only make this value available to filter by in the dashboard. Additionally, it is possible to allow dashboard users to filter by their own values by checking the **Allow viewer to add filter values** box. - -The list of variable names, variable values, assignees, and candidate groups is compiled from all reports on the dashboard. - -### Setting a default dashboard filter - -After specifying available filters in the dashboard edit mode, editors of the dashboard can also set a default filter. A default filter is always applied when a user initially opens the dashboard. Viewers can still remove filter values to see unfiltered reports, but if a user does not perform any steps to change the filter manually, they will see the reports with the defined default filter. - -To set a default filter, dashboard editors can use the added filter options in the filter area. Whatever filter configuration is set there when the dashboard is saved becomes the default filter for the dashboard. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/addUser.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/addUser.png deleted file mode 100644 index 0498752b136..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/addUser.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/area-chart-report.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/area-chart-report.png deleted file mode 100644 index 75b4fc7b18e..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/area-chart-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/collection.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/collection.png deleted file mode 100644 index 7b618e709b8..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/collection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-config.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-config.png deleted file mode 100644 index c039612a644..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-config.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-report-create.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-report-create.png deleted file mode 100644 index 3849a161540..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-report-create.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-report.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-report.png deleted file mode 100644 index 4d6735600f0..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/combined-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/configureProcess.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/configureProcess.png deleted file mode 100644 index 7a975450d2e..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/configureProcess.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/copy.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/copy.png deleted file mode 100644 index 19c338d3c8e..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/copy.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal-externalReport.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal-externalReport.png deleted file mode 100644 index 175aa85e116..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal-externalReport.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal-textReport.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal-textReport.png deleted file mode 100644 index 50a298ffc69..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal-textReport.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal.png deleted file mode 100644 index 09ff9d48dd5..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-addAReportModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-dashboardEditActions.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-dashboardEditActions.png deleted file mode 100644 index 1455bd65f18..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-dashboardEditActions.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-reportEditActions.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-reportEditActions.png deleted file mode 100644 index 6913d66befb..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-reportEditActions.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-sharingPopover.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-sharingPopover.png deleted file mode 100644 index dc14b7fe776..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-sharingPopover.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-viewMode-monitorFeatures.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-viewMode-monitorFeatures.png deleted file mode 100644 index 84c178cab93..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboard-viewMode-monitorFeatures.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboardTemplate.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboardTemplate.png deleted file mode 100644 index feae00b34f4..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/dashboardTemplate.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/filter-editMode.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/filter-editMode.png deleted file mode 100644 index 5fbf05722fe..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/filter-editMode.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/filter-viewMode.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/filter-viewMode.png deleted file mode 100644 index 9ecfd8c8cce..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/filter-viewMode.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/home.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/home.png deleted file mode 100644 index 68f49378a19..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/home.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/kpiConfiguration.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/kpiConfiguration.png deleted file mode 100644 index 4010df81313..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/kpiConfiguration.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/pieFormat.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/pieFormat.png deleted file mode 100644 index 502ea2bdf64..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/pieFormat.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/processOverview.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/processOverview.png deleted file mode 100644 index 8c67f499f12..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/processOverview.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/processes.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/processes.png deleted file mode 100644 index a52d8fbeaee..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/processes.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/report-reportEditActions.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/report-reportEditActions.png deleted file mode 100644 index 9a5e7e4fb57..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/report-reportEditActions.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/reportTemplate.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/reportTemplate.png deleted file mode 100644 index 80013d42cf3..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/reportTemplate.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/sourceByDefinition.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/sourceByDefinition.png deleted file mode 100644 index 4d947751b11..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/sourceByDefinition.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/sourceByTenant.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/sourceByTenant.png deleted file mode 100644 index ee4b8f96795..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/sourceByTenant.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/sources.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/sources.png deleted file mode 100644 index 14aee190b45..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/sources.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/table-report.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/table-report.png deleted file mode 100644 index 8e13bc77816..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/table-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/timeGoals.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/timeGoals.png deleted file mode 100644 index d8e149d355d..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/timeGoals.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/img/users.png b/optimize_versioned_docs/version-3.10.0/components/userguide/img/users.png deleted file mode 100644 index cd3784a638c..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/img/users.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/instant-preview-dashboards.md b/optimize_versioned_docs/version-3.10.0/components/userguide/instant-preview-dashboards.md deleted file mode 100644 index 7eeea944806..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/instant-preview-dashboards.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: instant-preview-dashboards -title: Instant preview dashboards -description: "Use automatically generated dashboards and reports from Optimize to find insights for your processes." ---- - -Camunda Optimize is a comprehensive process optimization tool that helps businesses streamline their operations -and improve efficiency. One of the standout features of Optimize is its ability to automatically generate -dashboards for each process, providing users with clear insights into process performance. - -Optimize imports each process deployed to Camunda and automatically creates a dashboard. -The dashboards are designed to be intuitive and easy to use and can be accessed from the [process dashboards page](./process-dashboards.md) by clicking on the desired process. - -The data displayed in this dashboard (and its corresponding reports) is coupled with the user's permissions to that -process definition, meaning the dashboard will include data from all tenants that the user is authorized to see. -Moreover, the dashboards and reports display data from all versions of the process definition in question. - -The dashboard has a predictable URL so it can also be embedded into other tools and web pages. The URL has the format -_https://<OPTIMIZE_URL>/dashboard/instant/<BPMN-PROCESS-ID>/_. This URL is stable across Optimize versions, -so there is no need to change it when updating Optimize. - -:::note -Instant preview dashboards cannot be shared like standard dashboards. To share it, share the URL. The recipient will need to sign in to Optimize to see the dashboard. -::: diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/branch-analysis.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/branch-analysis.md deleted file mode 100644 index 8bfe088b20c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/branch-analysis.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: branch-analysis -title: Branch analysis -description: Optimize provides you with a statistical analysis for a given end event and a gateway. ---- - -## Overview - -If a process contains more than one end event, it is useful to know which path tokens took to reach a specific end event. Optimize provides you with a statistical analysis for a given end event and a gateway. This analysis includes how tokens were split at the gateway in question, and how many of the tokens of each branch reached the end event. - -![branch analysis](./img/analysis-1.png) - -## Branch analysis in Optimize - -Select a process definition using the **Select Process** option in the top left of the page. After selecting a process definition and version, the diagram of the process is displayed on the page. - -By default, all process instances for the selected process definition are included in the analysis. You can reduce this set of process instances by applying filters. - -To perform a statistical analysis on the selected process, specify a gateway and an end event. Moving your mouse cursor over the end event and gateway inputs at the top of the screen highlights available elements in the diagram. Likewise, mouse over an element to see whether it is an end event or gateway. - -Additionally, if you move your mouse over an end event, you see detailed information about this end event, like how many instances reached this end event. Click on an element to select or deselect it. You can also clear the selection using the **x** button in the control panel on top. Changing the process definition also clears the selection. - -After selecting an end event and gateway, a statistical analysis is shown next to the diagram. The analysis consists of two bar charts and a statistical summary. Both charts contain a bar for every sequence flow leaving the selected gateway. - -![branch analysis second example](./img/analysis-2.png) - -The first chart shows the distribution of process instances over the various sequence flows, showing how often each sequence flow has been executed, independently of whether the process instance then reached the selected end event. - -The second chart displays the relative amount of process instances that reached the selected end event after taking the respective outgoing sequence flow. - -Process instances which have taken more than one path (e.g. by looping back to a flow node before the gateway and then taking a different sequence flow) are not considered in the statistical analysis. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/filters.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/filters.md deleted file mode 100644 index 48f290c61d6..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/filters.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: filters -title: Overview -description: Define filters in Optimize to narrow down your view to only a subset of process instances. ---- - -Locating flaws in your process models can be a huge challenge when you have millions of process instances to sift through. Define filters in Optimize to narrow down your view to only a subset of process instances. - -Camunda Optimize offers various ways of filtering your data, such as filter by: - -- [Metadata](./metadata-filters.md) (date, duration, assignee, etc.) -- [Instance state](./instance-state-filters.md) (running or canceled instances) -- [Flow node](./flow-node-filters.md) (flow node date, flow node duration, etc.) -- [Process instance](./process-instance-filters.md) (process instance date, process instance duration) -- [Variables](./variable-filters.md) (boolean, string, etc.) - -## Filter behavior - -There are two ways to filter data in Optimize: - -1. Instance filters: All filters can be used to filter instances in single reports and during branch analysis. -2. Flow node data filters: These filters can be used if you not only want to filter instances, but you additionally need to filter the content of instances (for example, flow nodes). Since not all filters can be applied on flow nodes, only compatible ones can be used as a flow node data filter. Flow node filters also exclude all instances from the result which do not contain at least one flow node that matches the filter. - -To summarize, instance filters remove rows, while flow node data filters remove columns. - -Additionally, if the report contains multiple processes, filters need to specify which definition they apply to. Some filters can apply to multiple definitions at once, while other filters are specific to a certain process definition. For example, because they rely on the flow nodes present in the definition. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/flow-node-filters.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/flow-node-filters.md deleted file mode 100644 index 5e5a25ab9b8..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/flow-node-filters.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: flow-node-filters -title: Flow node filters -description: Take a closer look at flow node status filters, flow node date filters, flow node duration filters, and more. ---- - -## Flow node filter - -Retrieve only those process instances that executed certain flow nodes within your process by using the `Flow Node Filter`. Selecting several values at once means that all the selected flow nodes need to have been executed at least once in the process instance lifetime. At the top of the flow node filter modal you can see a preview of the filter you are about to create. You can also filter process instances where certain flow nodes were not executed. - -![Flow node filter in Camunda Optimize](./img/flownode-filter.png) - -## Flow node selection - -In flow node and user tasks reports, all flow nodes are included in the result by default. This could result in many table rows or chart entries which makes the visualization hard to read. This filter allows you to specify which flow nodes are included and deselect the ones that are not relevant to the report. - -![Specifying which nodes are included in the report](./img/flowNodeSelection.png) - -## Flow node status filter - -Some flow nodes can take a relatively long time to complete (e.g. user tasks or long-running service tasks). By default, a report includes all flow nodes in the calculations, whether they are currently running, canceled, or already completed. You can change this behavior by adding a flow node status filter as a [flow node data filter](./filters.md#filter-behavior). - -Adding one of the flow node status options will filter both instances and flow nodes according to the selected status: - -- For instance reports: The filter will only include instances that have at least one flow node matching the filter criteria. This behavior can be seen if you are in variable, incident, or raw data reports. -- For flow node reports: Flow nodes that do not match the filter criteria will be excluded from the results. - -This behavior can be seen if you are in flow nodes or user task reports. - -Here are the possible options for this filter: - -- Running flow nodes only: Your report will only collect information from flow nodes that are currently running. -- Completed flow nodes only: Considers only successfully completed flow nodes. -- Canceled flow nodes only: Considers only canceled flow nodes. -- Completed or canceled flow nodes only: Considers all completed flow nodes regardless of whether they were canceled or not. - -:::note -For incident reports, flow node status filters always behave as instance filters and do not filter flow nodes. -::: - -## Flow node date filter - -Similar to process instance date filters, flow node date filters allow you to filter the report based on flow node start or end dates. - -:::note -Reports with a flow node end date filter will only consider data from completed flow nodes. -::: - -This filter type can be applied either as a [process instance](./filters.md#filter-behavior) or as a [flow node](./filters.md#filter-behavior) filter: - -- When applied as a process instance filter, you are required to select the flow nodes that are to be relevant to the filter, yielding a report which will only consider those process instances where one or more of the selected flow nodes match the configured filter. - -![Flow Node date filter](./img/flowNode-date-filter.png) - -- When added as a flow node filter, there is no flow node selection. The resulting report automatically only includes data from those flow nodes which match the given filter. - -## Flow node duration filter - -If the **Flow Node Duration Filter** is applied as an instance filter, it will only regard process instances where one or more flow nodes took a certain amount of time for their execution. For instance, you can filter process instances where a flow node took more than three days or less than five seconds. - -If applied as a flow node filter, it will filter flow nodes and only show the flow nodes that were selected in the filter. - -![Flow Node duration filter in Camunda Optimize](./img/flowNode-duration-filter.png) - -:::note -For incident reports, flow node duration filters always behave as instance filters regardless of where they were defined. -::: diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/analysis-1.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/analysis-1.png deleted file mode 100644 index 0132bf75241..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/analysis-1.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/analysis-2.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/analysis-2.png deleted file mode 100644 index 687db1c4edb..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/analysis-2.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/assignee-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/assignee-filter.png deleted file mode 100644 index bbffc0dc0ff..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/assignee-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/combined-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/combined-filter.png deleted file mode 100644 index fbfea8786ab..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/combined-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/duration-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/duration-filter.png deleted file mode 100644 index 314cc6cb440..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/duration-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNode-date-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNode-date-filter.png deleted file mode 100644 index 07bb077c4da..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNode-date-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNode-duration-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNode-duration-filter.png deleted file mode 100644 index ef99a06bbab..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNode-duration-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNodeSelection.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNodeSelection.png deleted file mode 100644 index 0e43ac375ff..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flowNodeSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flownode-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flownode-filter.png deleted file mode 100644 index 134b3737d7f..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/flownode-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_1_heatMap.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_1_heatMap.png deleted file mode 100644 index d2e4102d922..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_1_heatMap.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_2_distribution.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_2_distribution.png deleted file mode 100644 index c5faa30d363..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_2_distribution.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_3_Variables.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_3_Variables.png deleted file mode 100644 index 32272f540a5..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/outlierExample_3_Variables.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/raw-data-report-ml-ready-dataset.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/raw-data-report-ml-ready-dataset.png deleted file mode 100644 index 022556f3cce..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/raw-data-report-ml-ready-dataset.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/variable-filter.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/variable-filter.png deleted file mode 100644 index c67e74c66c7..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/img/variable-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/instance-state-filters.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/instance-state-filters.md deleted file mode 100644 index 95de8883404..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/instance-state-filters.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: instance-state-filters -title: Instance state filters -description: By default, a report considers all process instances, regardless of whether they are still running. This behavior can be adjusted with the Running Instances Only and Completed Instances Only filters. ---- - -## Running completed instances only filter - -By default, a report considers all process instances, regardless of whether they are still running. This behavior can be adjusted with the **Running Instances Only** and **Completed Instances Only** filters. Be aware that setting one of those filters (e.g. **Running Instances Only**) while the other one is already set (e.g. **Completed Instances Only**), will show a warning message since these two filters are incompatible with each other and will not show any data. - -## Canceled instances only filter - -If the **Canceled Instances Only Filter** is applied, the report will only consider those instances which were terminated before completion, either -internally or externally. Be aware that adding this filter along with the **Running Instances Only** will show a warning message since these filters are incompatible and will not show any data. - -## Non canceled instances only filter - -As opposed to the **Canceled Instances Only Filter**, applying the **Non Canceled Instances Only** filter will make Optimize query only those instances which were _not_ canceled during -their execution. This means only active and completed instances are considered. Externally or internally terminated instances are not included in the report. - -## Suspended and non suspended instances only filter - -By default, a report considers all process instances, regardless of whether they are [suspended]($docs$/components/best-practices/operations/operating-camunda-c7#suspending-specific-service-calls) or not. Adding this filter makes it possible to only evaluate process instances that are in the suspension state. Note that if you have enabled history cleanup, this might affect the accuracy of this filer given the suspension state is imported from historic data. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/metadata-filters.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/metadata-filters.md deleted file mode 100644 index a6db0fa391b..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/metadata-filters.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -id: metadata-filters -title: Metadata filters -description: Learn more about metadata filters, like date filters, assignee and candidate group filters, and more. ---- - -## Date filters - -In Optimize, there are two kinds of date filters: the start and the end date filter. Each of these filters can be applied on [process instance](./process-instance-filters.md#process-instance-date-filter) and on [flow node](./flow-node-filters.md#flow-node-date-filter) dates. - -There are multiple ways in which you can define your date filters: - -- Set the filter to a current amount of time. For example, today, this week, this month, etc. In such cases, the filter does not remain static, but moves with time to deliver a subset of the data according to the selected time interval. - -:::note -Within date filters, weeks begin on Monday, not Sunday. This is not configurable in Optimize. -::: - -- Set it to a previous amount of time. For example, yesterday, last week, last month, etc. This filter also moves with time and is automatically adjusted to cover completed periods of time. - -Take the following example: Today is Wednesday, March 11. If you set a process instance start date filter to `Last... + week`, you get all process instances that were started from Monday, March 2 to Sunday, March 8. A week passes, and we now have Wednesday, March 18. Applying the same filter now filters the process instances which were started from Monday, March 9 to Sunday, March 15. - -- To cover previous time periods up the current moment of time, you can use the 'Rolling' option. - -Take the following example: today is March 28. If you set a process instance start date filter to the last three days, you get all process instances that were started from March 26 to March 28. A day passes, and we now have March 29. Applying the same filter now filters the process instances which were started from March 27 to March 29. - -- If you do not want the filter to be completely dynamic, you can also select `Between`, `Before`, or `After`. -- The `Between` option only considers process instances started or ended within a fixed date range (e.g. filter all process instances between 2018-01-01 and 2018-01-26). This range is fixed and does not change. -- In the same way, you can select `After` or `Before` options to only consider process instances that started or ended after/before a fixed date. - -The start and the end date filters are independent and can be applied to a report simultaneously. However, be aware that each of these filters can only exist once. If, for example, you define a new start date filter when another one already exists, the second one will replace the first one. - -## Assignee and candidate group filters - -These filters allow you to include or exclude instances based on the assignee or the candidate group of at least one user task of a particular process instance. - -![Assignee/Candidate group filter modal](./img/assignee-filter.png) - -As shown in the example, it is possible to select one or more assignees or even filter for unassigned instances. - -This filter has different behavior depending on where it was [defined](./filters.md#filter-behavior): - -- As a `Flow Node data filter` applied on a user task report: This filter only includes user task instances that satisfy _all_ assignee/candidateGroup filters defined in the report at once. Mutually exclusive filters like having both an inclusive and an exclusive filter on the same assignee do not yield any results in user task reports. - -- As an `instance filter`: This filter includes all process instances where _at least one_ user task satisfies one particular assignee/candidateGroup criterion. This means multiple mutually exclusive assignee/candidateGroup filter entries might still yield results for these reports (e.g. if the process definition contains multiple user tasks). - -## Incident filter - -This filter has a different behavior depending on where it was [defined](./filters.md#filter-behavior): - -- As an `instance filter`: This filter will retrieve only those process instances that contain open, resolved, or no incidents (depending on your selection). Here are some examples where this filter can be useful: - - - Creating reports that contain no incidents since the instances that have incidents have very long durations and are influencing your data. - - - To monitor all the instances from multiple engines that have open incidents. - - On the other hand, this filter is not useful for counting the number of incidents because instances with an open or resolved instance filter might still contain instances from the other type. - -- As a `Flow Node data filter`: This filter will additionally filter the instance incident states to only include incidents of the same type (open or resolved). As an example, This filter can be used to count the number of open or resolved incidents since it considers the incidents of that type exclusively. This filter is currently only useful if you are in an incident view report. - -:::note -The incident filter does not currently filter flow nodes regardless of where it was defined. -::: - -## Combined filters - -All the previously mentioned filters can be combined. Only those process instances which match all the configured filters -are considered in the report or analysis. The [duration filter](./process-instance-filters.md#process-instance-duration-filter), [flow node filter](./flow-node-filters.md), and [variable filter](./variable-filters.md) can be defined several times. See the following screenshot for a possible combination of filters: - -![Combined filter in Camunda Optimize](./img/combined-filter.png) - -Everyone who has access to the report can add their own filters. For example, by creating a dashboard that contains that report and using dashboard filters. Note that filters can apply to all processes or a subset of processes. - -Filters added in such a way are always combined with the filters set in the report edit mode. That way, users can reduce the set of process instances that are considered when evaluating the report, but not increase the number of instances evaluated above the set the report author specified. - -In essence, if two copies of the same process are present, Optimize combines them with OR logic, and their filters or variables can be combined with the same logic. Therefore, it's possible to compare two differently filtered slices of the same process on the same report (with the group by process feature) or combine them (without group by process). - -Users can get access to a report via the sharing functionality or if the report is in a shared collection. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/outlier-analysis.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/outlier-analysis.md deleted file mode 100644 index 3829e467d91..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/outlier-analysis.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: outlier-analysis -title: Outlier analysis -description: Outlier analysis allows you to easily identify process instances that took significantly longer than others to complete a flow node. ---- - -## Overview - -Outlier analysis allows you to easily identify process instances that took significantly longer than others to complete a flow node, and subsequently slow down your process. - -## Outlier analysis in action - -Select a process definition that you would like to analyze. Once a definition is selected, a **heatmap** is displayed which highlights the flow nodes where Optimize identified many duration outliers. In our example, the **Approve Invoice** task has duration outliers. When hovering over the task, you can see how many instances were identified and how much longer they took than the average duration. - -![outlier analysis example 1](./img/outlierExample_1_heatMap.png) - -Click on **View Details** to directly see a duration distribution chart for the specific flow node. The duration distribution chart contains information about how long the identified outliers took, also in comparison to the other flow node instance durations. - -![outlier analysis example 2](./img/outlierExample_2_distribution.png) - -## Significant variable values - -When looking at the duration outlier instances, you can analyze the data further to find the root cause of why these instances took so long. Click on the significant variables tab to view a table that lists significant variable values in the outlier instances. - -It also allows you to see how many times this variable value occurred in the outlier instances compared to the rest of the process instances. This can give you a good idea of whether there is a correlation between a variable value and a flow node taking more time than expected. In our example, we can see that for most of our duration outliers, the delay variable was set to `true`. - -![outlier analysis example 3](./img/outlierExample_3_Variables.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/process-analysis-overview.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/process-analysis-overview.md deleted file mode 100644 index 161cc840bf7..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/process-analysis-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: process-analysis-overview -title: Overview -description: Further analyze your reports, and learn more about branch and outlier analysis. ---- - -The following documentation provides an opportunity to further analyze your reports through several methods: - -- [Outlier analysis](./outlier-analysis.md): Outlier analysis allows you to easily identify process instances where certain flow node instances took significantly longer than others and subsequently slow down your process. -- [Branch analysis](./branch-analysis.md): If a process contains more than one end event, it is useful to know which path tokens took to reach a specific end event. Optimize provides you with a statistical analysis for a given end event and a gateway. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/process-instance-filters.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/process-instance-filters.md deleted file mode 100644 index 84a90e1851a..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/process-instance-filters.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: process-instance-filters -title: Process instance filters -description: Learn more about process instance date and duration filters. ---- - -## Process instance date filter - -Applying a process instance start or end date filter will result in a report considering only process instances that started or ended within the defined range of dates. - -:::note -Reports with a process instance end date filter applied will only consider completed process instances. -::: - -As an alternative way to create a process instance start date filter, you can directly select the desired filter interval in the chart itself if your report is visualized as bar or line chart. - -## Process instance duration filter - -The **Process Instance Duration Filter** allows you to only regard process instances whose execution from start to end took a certain amount of time. For instance, you can filter process instances that took more than three days or less than five seconds. - -:::note -This filter shows only completed process instances, since the total duration of running process instances is not yet known. -::: - -![Process instance duration filter in Camunda Optimize](./img/duration-filter.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/compare-target-values.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/compare-target-values.md deleted file mode 100644 index 500a455105a..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/compare-target-values.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: compare-target-values -title: Compare target values -description: Based on flow node duration heatmaps, Optimize allows you to specify a target value for every task. ---- - -Based on flow node duration heatmaps, Optimize allows you to specify a target value for every task. For example, if a user task has to be completed within one day, you can set the target value to one day. If the time it takes to complete the user task exceeds this target value, the task is highlighted in the heatmap. - -To set target values and create a target value comparison heatmap, you need to be in the edit mode of a report which has the following configuration: - -| View | Flow node duration/user task duration | -| ------------ | ------------------------------------- | -| Group by | Flow nodes/user tasks | -| Visualize as | Heatmap | - -If your report has this configuration, a target value button is visible. Clicking on the **Target Value** button for the first time opens an overlay containing the process diagram and a table with all flow nodes. You can also see the actual duration value for every flow node. - -To set a target value for a flow node, use the number and unit fields in the last column. If the target value number field for a flow node is empty, this flow node has no target value set (the selected time unit is ignored in that case). - -![Setting Target Values](./img/targetvalue-2.png) - -If you set a target value for a flow node, this target value is represented as a badge on the flow node in the diagram in the upper part of the overlay. You can click on any flow node in the diagram to jump to the target value input field in the table. - -If you have a user task report, you can only select user tasks here, as only those are included in the report result. When selecting a target value input field in the table, the corresponding diagram element is highlighted. To save the target value configuration, click **Apply**. - -After you save the target values, the normal duration heatmap is replaced with a target value visualization. In this new visualization, flow nodes with an average duration larger than the specified target value are highlighted in red. - -If you mouse over one of the nodes, the tooltip shows the following: - -- The target duration value -- The actual duration -- The relative percentage the actual value is of the target value -- A button to download a list of process instance IDs that exceed the target value - -You can also see the target value as a badge on the diagram. - -![Target Value Comparison](./img/targetvalue-1.png) - -After the initial target values for a report are set, you can use the target value button to toggle between the target value and the regular duration view mode. If you want to change target values, use the gear button to open the overlay again. - -As with any change to a report configuration, to persist target values and show them in the report view mode and on dashboards, you need to save the report using the **Save** button in the upper right corner. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/configure-reports.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/configure-reports.md deleted file mode 100644 index b05cc28c306..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/configure-reports.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -id: configure-reports -title: Configure reports -description: The configuration panel groups all the settings that can be applied to a report in one place. ---- - -The configuration panel groups all the settings that can be applied to a report in one place. To see the panel, click on the cog button available in the edit mode of any report. Every visualization has different settings that can be configured in the panel. - -When you save changes to the report configuration, they apply to the report view mode and any dashboard this report is displayed on. - -## Number - -Number reports are any reports that are visualized as a single number (e.g. `Process Instance: Count` grouped by `None` or `Process Instance: Duration` Grouped by `None`). - -In number reports, the following configurations are possible: - -## Number precision - -Number precision can be configured from the panel to limit the most significant units to be shown. - -For example, we have a report that calculates the total process instances duration. When the precision limit is not set, you will see all possible units, e.g.: `1y 5m 2wk 5d 3h 16min 3s 170ms`. In case you are only interested in certain units - e.g. months - you can omit all insignificant units by limiting the precision as shown in the figure below: - -![Number report configurations](./img/NumberConfiguration.png) - -## Number goal value (progress bar) - -Number reports appear as progress bar when the goal option is enabled from the panel as shown. The baseline and the target value of the progress bar can be also set using the panel. - -![Progress Bar Visualization](./img/progressbar.png) - -You can toggle between the progress bar and the single number visualization using the same goal line switch. - -A red line indicator appears on the progress bar when its value exceeds the goal value. On the right side of the indicator, the bar turns into a darker color to clearly show the exceeded amount. - -![Progress Bar Visualization](./img/progressbarExceeded.png) - -## Table settings - -In table reports, the following configurations are possible: - -## Show instance count - -Displays the total instance count on the right side of the visualization. If you save the report while this option is enabled, the number will also be shown on any dashboard this report is added to and when the report is shared. - -## Hide, show, and reorder table columns - -The table settings allow you to hide specific columns using the configuration menu as shown in the figure below: - -![raw data configuration](./img/rawdata.png) - -When working with raw data table reports, you can also re-order the table columns using drag-and-drop on the header of the respective column. - -## Sorting by table column - -To sort a table by a specific column, click on the header of that column. Doing that will show a small caret icon in the header of the column indicating which column the table is currently sorted by and the direction of this sorting (ascending or descending) as shown: - -![Sorting a table in Optimize](./img/sorting.png) - -Clicking again on the same column header will reverse the direction of sorting. - -Saving the reports will also preserve the applied sorting. - -The sorting currently works for all table reports except for: - -- Combined table reports -- Reports grouped by integer type variables - -## Absolute and relative values - -When configuring a count report, you have the opportunity to configure which columns are included in the table. You can hide or show the corresponding columns using the switches for absolute and relative value. - -## Custom bucket size for date variables - -When evaluating a report which is grouped by a date variable and displayed as a table, Optimize offers you the option to select your preferred unit specifying the custom result bucket size from the report configuration menu. The available units are year, month, week, day, and automatic. - -The default unit is automatic, which will create evenly spaced result buckets based on the values of the date variable. This configuration option is also available for charts. - -## Custom bucket size and baseline - -When evaluating a report which is grouped by duration or a number variable, Optimize offers you the option to specify your preferred result bucket size as well as a custom baseline in the report configuration menu. The bucket size determines the width of one bucket, and the baseline specifies the start of the first bucket. - -For example, say a report contains the variable values 0.3, 6, and 13, and you set a bucket size of 5. By default, Optimize would now return a bucket for the values 0.3 to 5.3, one for 5.3 to 10.3, and one for 10.3 to 15.3. You may prefer your bucket start and end points to be a round number, in which case you should set your baseline to 0. With a baseline of 0 and bucket size 5, the result buckets now span 0 to 5, 5 to 10, and 10 to 15. - -If these configuration fields are not set, by default Optimize will create evenly spaced result buckets with a range based on the minimum and maximum values of the number variable. - -This configuration option is also available for charts. - -## Charts (line, bar, pie) - -In bar chart and line chart reports, it is possible to select the color of the graph, add names to the x-axis and y-axis, and edit many other settings as shown in the figure below: - -![chart visualization configurations](./img/chartConfiguration.png) - -In charts, you can hide/show absolute and relative values that appear in the tooltips. - -## Show instance count - -Displays the total instance count on the right side of the visualization. If you save the report while this option is enabled, the number will also be shown on any dashboard this report is added to and when the report is shared. - -## Chart goal line - -Optimize allows you to set a goal line in bar chart and line chart visualizations. Using this feature, it is possible to highlight anything above or below a certain value. - -A good use case for such functionality is the following example: - -First, go to the edit mode of a report and choose the following configuration: - -| View | Count frequency of process instance | -| ------------ | ------------------------------------- | -| Group by | Start date of process instance: Month | -| Visualize as | Bar chart | - -Let us say that the number of completed process instances should always be above six. A goal line can be used as follows: - -Set the target value input field to six and select the above button. If the number of process instances is below six, it will be highlighted in red as shown: - -![Bar charts goal line](./img/targetValue.png) - -This feature can be also used with every other bar chart and line chart visualization. Here is another example where the target value is used with line chart visualization: - -![Line chart goal line](./img/targetline.png) - -## Custom bucket size for date variables - -When evaluating a report which is grouped by a date variable and displayed as a chart, Optimize offers you the option to select your preferred unit specifying the custom result bucket size in the report configuration menu. - -The available units are year, month, week, day, and automatic. The default unit is automatic, which will create evenly spaced result buckets based on the values of the date variable. This configuration option is also available for tables. - -## Custom bucket size and baseline - -When evaluating a report which is grouped by duration or a number variable, Optimize offers you the option to specify your preferred result bucket size as well as a custom baseline in the report configuration menu. The bucket size determines the width of one bucket, and the baseline specifies the start of the first bucket. - -For example, say a report contains the variable values 0.3, 6, and 13 and you set a bucket size of 5. By default, Optimize would now return a bucket for the values 0.3 to 5.3, one for 5.3 to 10.3, and one for 10.3 to 15.3. You may prefer your bucket start and end points to be a round number, in which case you should set your baseline to 0. With a baseline of 0 and bucket size 5, the result buckets now span 0 to 5, 5 to 10, and 10 to 15. - -If these configuration fields are not set, Optimize will create evenly spaced result buckets with a range based on the minimum and maximum values of the number variable by default. - -This configuration option is also available for tables. - -## Stacked bar chart - -When evaluating a report which has a second "Group by", Optimize offers you the option to stack the bar chart bars instead of displaying them near each other. Stacking bars would be useful when the focus of the chart is to compare the totals (e.g. flow node count, process instance count, etc.) and one part of the totals (e.g. flow node, variable value, etc.) - -This configuration option is also available for bar/line charts. - -![Stacked bar chart report](./img/stackedBar.png) - -## Switch bar chart orientation - -When evaluating a report, Optimize will automatically set the bar chart orientation according the nature of the data being displayed. You can also switch the orientation manually using the configuration option shown. - -![Stacked bar chart report](./img/horizontalBar.png) - -## Bar/line chart - -When evaluating a report which has both count and duration measures, Optimize offers you the option to display one of the measures as bars and the other measure as a line. This would help to differentiate between duration and count values displayed in the visualization. By default, the count measure is displayed as bars and the duration as a line. You can also switch between them by using the configuration option shown. - -![Bar/Line chart report](./img/barLine.png) - -## Heatmaps - -When enabling absolute or relative values switches, all tooltips for all flow nodes stay visible. This is also possible when you have defined target values. If you save the report in this state, the tooltips will also be shown on any dashboard this report is added to. - -![Heatmap tooltips](./img/heatmap.png) - -As for charts and table reports, it is possible to display the total instance count on the right-hand side of the visualization. If you save the report while this option is enabled, the number will also be shown on any dashboard this report is added to and when the report is shared. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/define-reports.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/define-reports.md deleted file mode 100644 index 0af89487aee..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/define-reports.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -id: define-reports -title: Define reports -description: In this section of the report builder, you are characterizing the output of the report. ---- - -In this section of the report builder, you are characterizing the output of the report. Basically, you are defining "I want to view ... grouped by ... visualized as ...". To understand better what "View" and "Group by" mean, you can use the analogy of a graph, where "View" is the y-axis and "Group by" is the x-axis. - -First, you need to select which part of the data you want to view. Optimize differentiates between the view (e.g. process instance or flow node) and the measure (e.g. count or duration): - -1. Raw Data: View just a table with the actual data listed as rows. This can come in handy if you found interesting insights in certain process instances and need detailed information about those instances, or you are exploring a process definition with a limited number of instances. This report type also allows you to inspect raw [object variable values](/self-managed/optimize-deployment/configuration/object-variables.md). - -2. Process instance - -- Count: View how many process instances were executed. -- Duration: View how long the process instances took to complete. - -3. Incident - -- Count: View how many incidents occurred on the process. -- Resolution duration: View how long the incident took to get resolved. - -4. Flow node - -- Count: View how often the flow nodes (e.g. tasks) have been executed. -- Duration: View how long each flow node took to complete. - -5. User task - -- Count: View how often each user task has been executed. -- Duration: View how long each user task took to complete. - -6. Variable: View an aggregation of values for a specific numeric variable of the process definition. - -It is possible to display both count and duration measures for a single view in the same report. - -Subsequently, you need to define how to group the data. Think of it as applying a metric to your input, where you break up the data by date, flow nodes, variable values, or other properties. For that, you have different options: - -- **None**: Do not split up the data. -- **Flow nodes**: Cluster the data by flow nodes. -- **User tasks**: Cluster the data by user tasks. -- **Duration**: Cluster the data by duration. Depending on the selected view, this can be the duration of process instances, flow nodes, or user tasks. -- **Start date**: Group instances together that were started during the same date period or time, e.g. hour, day or month. Depending on the selected view, this can be the start date of process instances, flow nodes, or user tasks. -- **End date**: Group instances together that were finished during the same date period or time, e.g. hour, day or month. Depending on the selected view, this can be the start date of process instances, flow nodes, or user tasks. -- **Running date of the process instance**: Group process instances together that were running during the same date period or time, e.g. hour, day, or month. -- **Variable**: Process instances with the same value for the selected variable are grouped together. -- **Assignee**: Only available for user task views. Tasks are grouped together according to their current assignee. -- **Candidate group**: Only available for user task views. Tasks with the same candidate group are grouped together. -- **Process**: Only available for process instance reports with multiple definitions. Data from the same process is grouped together. - -Finally, define how you want the data to be visualized. Examples are heatmap, table, bar, or line chart. - -Not all the above view, group by, and visualization options can be combined. For instance, if you choose `Flow Node: Count` as view, the data is automatically grouped by flow nodes as no other combination would be valid. - -All possible combinations can also be found in the following table: - -| View | Group by | Visualize as | -| --------------------------------------------------- | --------------------------------------------------------------- | --------------------- | -| Raw Data | None | Table | -| Process instance: Count, Process instance: Duration | None | Number | -| Process instance: Count | Start Date, End Date, Running Date, Variable, Duration, Process | Table, Chart | -| Process instance: Duration | Start Date, End Date, Variable, Process | Table, Chart | -| Incident: Count, Incident Duration | None | Number | -| Incident: Count, Incident Duration | Flow Nodes | Table, Chart, Heatmap | -| Flow Node: Count, Flow Node: Duration | Flow Nodes | Table, Chart, Heatmap | -| Flow Node: Count | Start Date, End Date, Duration, Variable | Table, Chart | -| Flow Node: Duration | Start Date, End Date, Variable | Table, Chart | -| User Task: Count, User Task: Duration | User Tasks | Table, Chart, Heatmap | -| User Task: Count, User Task: Duration | Start Date, End Date, Assignee, Candidate Group | Table, Chart | -| User Task: Count | Duration | Table, Chart | -| Variable | None | Number | - -:::note -You might sometimes see a warning message indicating that the data is limited to a certain number of points. This happens because the available stored data, in this case is very large, and it is not possible to display all the data in the selected visualization. -::: - -## Reports with a second "Group by" option - -Using the second "Group by" option, it is possible to apply another grouping to your data to display extra details such as dates, variable values, or assignees. This option will be shown below the first "Group by" option if the current report combination supports it. Here is an overview of the reports that supports a second "Group by": - -## Flow node reports - -Flow node names can be applied as a second "Group by". If the report contains multiple process definitions, the data can also be grouped by process as a second "Group by". - -## User task reports - -User task names, assignees, and candidate groups can be applied as a second "Group by". - -For example, if your report is grouped by assignee/candidate group, it is possible to add another grouping by user task to see which user task your users/group are working on or have completed in the past. If the report contains multiple process definitions, the data can also be grouped by process as a second "Group by". - -:::note -Reports using assignee/candidate groups are only available in Camunda 7. -::: - -![Distributed User Task report](./img/distributed-report.png) - -Refer to the table below for an overview of all report combinations that support a second "Group by": - -| View | Group by | Second group by | -| ------------------------- | ------------------------- | ---------------------------------------------------------------------------------- | -| User Task Count, Duration | User Tasks | Assignee, Candidate Group, Process (only for multi-definition reports) | -| User Task Count, Duration | Start Date, End Date | Assignee, Candidate Group, User Tasks, Process (only for multi-definition reports) | -| User Task Count, Duration | Assignee, Candidate Group | User Tasks, Process (only for multi-definition reports) | -| User Task Count | Duration | User Tasks, Process (only for multi-definition reports) | - -## Process instance reports - -Refer to the table below for the process instance count and duration reports that support a second "Group by": - -| View | Group by | Second group by | -| -------------------------------- | ---------------------- | ----------------------------------------------------------------- | -| Process Instance Count, Duration | Start Date, End Date | Variable, Process (only for multi-definition reports) | -| Process Instance Count, Duration | Variable | Start Date, End Date, Process (only for multi-definition reports) | -| Process Instance Count | Running Date, Duration | Process (only for multi-definition reports) | - -The diagram below shows a report grouped by `Start Date` and a boolean variable: - -![Distributed process instance report](./img/distributedByVar.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/edit-mode.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/edit-mode.md deleted file mode 100644 index f19a7456e3a..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/edit-mode.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: edit-mode -title: Overview -description: The edit mode allows you to configure the report and adjust it to your needs. ---- - -Edit mode allows you to configure the report and adjust it to your needs. The following operations are possible within edit mode: - -- Rename your report -- Build a report -- Configure your report -- Save the current state with your applied changes -- Cancel changes you already applied to the report - -Building a report is the crux of the report edit mode. The building process itself is composed of several steps, which happen in the control panel. - -:::note -In edit mode, you can toggle the automatic preview update of the report by toggling the **Update Preview Automatically** switch. You can also use the **Run** button to run the update manually. -By default, the automatic preview update is **disabled**. -::: - -In this section, learn how to: - -- [Select process definitions](./select-process-definitions.md) -- [Define reports](./define-reports.md) -- [Set durations and variable report aggregation](./measures.md) -- [Compare target values](./compare-target-values.md) -- [Select process instance parts](./process-instance-parts.md) -- [Configure reports](./configure-reports.md) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/NumberConfiguration.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/NumberConfiguration.png deleted file mode 100644 index fab237fc95b..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/NumberConfiguration.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/barLine.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/barLine.png deleted file mode 100644 index fa621c9683c..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/barLine.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/chartConfiguration.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/chartConfiguration.png deleted file mode 100644 index 53acc458782..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/chartConfiguration.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/distributed-report.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/distributed-report.png deleted file mode 100644 index a93d781f0a9..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/distributed-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/distributedByVar.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/distributedByVar.png deleted file mode 100644 index 4f13f9d2197..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/distributedByVar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/durationAggregation.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/durationAggregation.png deleted file mode 100644 index aacabc0e369..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/durationAggregation.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/heatmap.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/heatmap.png deleted file mode 100644 index c8b21df22f5..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/heatmap.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/horizontalBar.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/horizontalBar.png deleted file mode 100644 index 48ced2264ce..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/horizontalBar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/process-part.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/process-part.png deleted file mode 100644 index 52207324172..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/process-part.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/progressbar.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/progressbar.png deleted file mode 100644 index 323e90e6bde..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/progressbar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/progressbarExceeded.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/progressbarExceeded.png deleted file mode 100644 index 75844710c80..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/progressbarExceeded.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/rawdata.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/rawdata.png deleted file mode 100644 index 7b82c8caead..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/rawdata.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-processDefinitionSelection.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-processDefinitionSelection.png deleted file mode 100644 index 161ffe3e1ac..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-processDefinitionSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-sharingPopover.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-sharingPopover.png deleted file mode 100644 index 04f51dce620..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-sharingPopover.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-versionSelection.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-versionSelection.png deleted file mode 100644 index 918805d3fd1..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/report-versionSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/sorting.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/sorting.png deleted file mode 100644 index a2f793a83bc..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/sorting.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/stackedBar.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/stackedBar.png deleted file mode 100644 index d8917aebe0d..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/stackedBar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetValue.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetValue.png deleted file mode 100644 index 8187c3acd69..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetValue.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetline.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetline.png deleted file mode 100644 index fe3bab78575..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetline.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetvalue-1.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetvalue-1.png deleted file mode 100644 index a65c7977f63..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetvalue-1.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetvalue-2.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetvalue-2.png deleted file mode 100644 index 96bbe786fdd..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/targetvalue-2.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/tenantSelection.png b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/tenantSelection.png deleted file mode 100644 index 804a0fd71b1..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/img/tenantSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/measures.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/measures.md deleted file mode 100644 index d63b098d91e..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/measures.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: measures -title: Measures -description: You can add and change different aggregations like minimum, maximum, and median in the report configuration panel. ---- - -## Duration and variable report aggregation - -For duration and variable views, the default aggregation type is the average. You can add and change different aggregations like minimum, maximum, and median in the report configuration panel. Note that the median is an estimate and the other operations are exact values. - -![Duration Aggregation Selection](./img/durationAggregation.png) - -Reports with multiple aggregations that have a [second "Group by"](./define-reports.md#reports-with-a-second-group-by-option) can only be visualized as table. - -## User task duration time - -:::note -The following information regarding idle versus work is currently applicable only to Camunda 7. -::: - -In user task duration reports, you have the opportunity to select which part of the user task's lifecycle you want to see in the report: - -- Idle: View how long each user task was considered idle (not claimed by an assignee/user) during its execution. -- Work: View how long each user task was considered to be worked on by assignees/users (claimed by an assignee/user) during its execution. -- Total: View how long each user task took to complete. - -It is possible to display and compare multiple user task duration times in the same report. Reports with multiple user task duration times that have a [second "Group by"](./define-reports.md#reports-with-a-second-group-by-option) can only be visualized as table. - -:::note -User tasks which have been completed yet have no claim operations are evaluated as follows: if the user task was canceled, the task is considered to have been idle whereas user tasks which were completed are considered to have been worked on programmatically or via a custom UI, meaning the time between start and end is considered work time. -::: diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/overview.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/overview.md deleted file mode 100644 index 5cf7cceccf1..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/overview.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: overview -title: Report process analysis -description: After creating a report, utilize process analysis for a closer look at your process instances. ---- - -## Edit mode - -The [edit mode](./edit-mode.md) allows you to configure the report and adjust it to your needs. The following operations are possible within edit mode: - -- Rename your report -- Build a report -- Configure your report -- Save the current state with your applied changes -- Cancel changes you already applied to the report - -## View mode - -Once you have defined what your report should look like, the [view mode](./view-mode.md) gives you a full view of the report visualization. To see more details about the report, you can interact with it, e.g. by moving your mouse over individual data points in diagrams or zooming in or out of heatmaps. The kind of interaction always depends on the report itself. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/process-instance-parts.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/process-instance-parts.md deleted file mode 100644 index 5915bb6713e..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/process-instance-parts.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: process-instance-parts -title: Process instance parts -description: In some cases, you may not be interested in the duration of the whole process instance, but only a certain part of it. ---- - -In some cases, you may not be interested in the duration of the whole process instance, but only a certain part of it. For that scenario, there is an additional button called **Process Instance Part** available for every process instance duration view that only shows data for a single process definition. - -Clicking this button opens an overlay letting you select the start and end of the part of the process instance you are interested in. After confirming the selection, the displayed duration refers to the selected part only instead of the whole instance. - -In some cases it can happen that the same task is executed multiple times in the same process instance, e.g. if the process contains loops or parallel gateways. In such cases, Optimize considers only the part between the start date of the first instance of the start node and the end date of the first instance of the end node. - -![Process Instance Part Modal](./img/process-part.png) diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/select-process-definitions.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/select-process-definitions.md deleted file mode 100644 index b0a9a696fb7..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/select-process-definitions.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: select-process-definitions -title: Select process definitions -description: Every report relates to one or multiple process definitions, versions, and tenants. ---- - -Every report relates to one or multiple process definitions, versions, and tenants. You must choose at least one process definition you want to create a report for. - -To add a process definition to the report, click **Add** at the top of the **Data Source** section of the report control panel. This opens a dialog showing all process definitions you can use in the report. You can select up to 10 definitions to add to the report. If there are many process definitions, you can use the input field to search for the definition you are looking for. - -![Process definition selection in the report builder in Camunda Optimize](./img/report-processDefinitionSelection.png) - -For every added process definition, you can set a display name and a specific version or version range. To do so, click the **Edit** button in the process definition card. There are also buttons to remove the definition from the report or add a copy of the definition. - -When editing a process definition, using the version dropdown, you can choose between all versions, the latest version, or a specific set of versions. - -![Process Version selection in the report builder in Camunda Optimize](./img/report-versionSelection.png) - -- **All** option: Every process instance across all versions of the process definition will be considered in your report. -- **Always display latest** option: Makes your report always refer to the latest version. Keep in mind that if a new version of the process is deployed, the report will automatically consider process instances of this new version only. -- **Specific version** option: Specify one or multiple existing versions of the process. - -Data from older versions is mapped to the most recent version in the selection. Therefore, the report data can seem to be inconsistent, which is due to changes that occurred within the diagram through the different versions. For example, the old versions do not contain newly added tasks or a part of the diagram was removed because it was considered to be obsolete. - -![Process definition selection for a multi-tenancy scenario](./img/tenantSelection.png) - -By default, all process instances for the selected process definitions are included in a report. You can reduce this set of process instances by applying a [filter](../../process-analysis/filters.md). diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/view-mode.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/view-mode.md deleted file mode 100644 index ad0e109bb75..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/report-analysis/view-mode.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: view-mode -title: View mode -description: Once you have defined what your report should look like, the view mode gives you a full view of the report visualization. ---- - -## View mode - -Once you have defined what your report should look like, the view mode gives you a full view of the report visualization. To see more details about the report, you can interact with it, e.g. by moving your mouse over individual data points in diagrams or zooming in or out of heatmaps. The kind of interaction always depends on the report itself. - -The view mode also provides you with different kinds of actions, such as: - -![report sharing popover in Camunda Optimize](./img/report-sharingPopover.png) - -- Download CSV: In case you want to download the data of the report, you can click the **Download CSV** button. The downloaded file will include the report information in a table format. - -- Sharing: In case you want to share the report with other people or want to embed it in a webpage, you can use the sharing feature of the report. Just click on the **Share** button, which opens up a popover. After enabling the **Enable sharing** switch, a link is generated which you can send to people who do not have access to Camunda Optimize and thus enable them to see the report. - - You can also use the **Embed Link** button if you wish to insert the report into your webpage. Everyone that views the webpage can then see content of the report. The shared versions of the report allow you to view the report itself only. There is no possibility to alter it or interact with any other features of Optimize. You can revoke the sharing any time by disabling the share switch. - - If you prefer to hide the header of the shared report or specific part of it, you can do that by adding the following parameter to the share URL: - - ``` - header : titleOnly / linkOnly / hidden - ``` - - For example, to completely hide the header from the shared report, you can add `header=hidden` as shown: - - ``` - http://?header=hidden - ``` - -- Alerts: If the created report is inside a collection, you can use the **Alert** dropdown to create and manage Alerts for that report. Since alerts can only be created on reports that have a number visualization, the **Alerts** dropdown will be only be visible for such reports. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/variable-filters.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/variable-filters.md deleted file mode 100644 index e23946b105c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-analysis/variable-filters.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: variable-filters -title: Variable filters -description: Learn more about variable filters with booleans, strings, and more. ---- - -Use the `Variable Filter` to retrieve only those process instances which hold the specified variable value for the selected variable. - -:::note -Variable filters can only filter for the final value of the variable. - -For instance, assume you want to analyze only those process instances which have the variable `department` with the value `marketing`. Say you also have some instances where this variable had the value `marketing` at the start of the execution, yet this was later reassigned to the value `sales`. These instances will not be included in the filter. -::: - -To use complex types like object, use the **Variable Import Customization** feature to transform your object variables into primitive type variables. - -Start creating a variable filter by searching for and selecting a variable from the suggested list of variable names. - -![Searching through the variables in variable filter](./img/variable-filter.png) - -There are four types of variables that you can filter for: - -## Boolean variables - -They can have the state `true`, `false`, `null`, or `undefined`. - -## String variables - -Two types of operators are available for variables of type `String`. You can either filter by an exact variable value (`is` and `is not`) or filter by a substring (`contains` and `does not contain`). - -For the operators `is` and `is not`, the first 10 values are loaded and displayed. If the variable has more than 10 values, a `Load More` button is shown to be able to extend the list as much as you need. You can also search through the whole list of values using the search input field. The list only contains variable values that already appeared in one of the instances of the process. - -To filter by a variable value that is not in the list of available values, click the **+ Value** button and add a custom variable value. - -In case the `is` option of the toggle button is selected, checking one or more values means that you want to see only those process instances where the variable value equals one of the checked values (this corresponds to the `or` operator in boolean logic.) - -In case the `is not` option of the toggle button is selected, checking one or more values means that you want to see only those process instances where the variable value does not equal any of the checked values (this corresponds to the `and` operator in the boolean logic.) - -For the operators `contains` and `does not contain`, you can add one or multiple values that should match a substring of the variable value. For the `contains` operator, adding one or more values means that you want to see only those process instances where the variable value contains one of the entered values (this corresponds to the `or` operator in boolean logic). - -In case the `does not contain` operator is selected, adding one or more values means that you want to see only those process instances where the variable value does not contain any of the entered values (this corresponds to the `and` operator in boolean logic.) - -There is an option to include the null or undefined values of the selected variable in the result. By using the same option, it is also possible to show all the values except the null or undefined by selecting the `is not` option of the toggle button. - -## Numeric variables - -Here you have an input field to define whether the variable value in the process instance should be equal, not equal, less than, or greater than a certain value. You can even add more input fields and apply the same operation several times at once. - -If the `is` option of the toggle button is selected, adding one or more values means that you want to see only those process instances where the variable value equals one of the checked values (this corresponds to the `or` operator in boolean logic.) - -If the `is not` option of the toggle button is selected, adding one or more values means that you want to see only those process instances where the variable value does not equal any of the checked values (this corresponds to the `and` operator in boolean logic.) - -In case the `is less than` or `is greater than` option is selected, only one value can be entered. - -Null or undefined options can be included or excluded from the results in a way similar to string variables. - -## Date variables - -This filters all instances where the selected date variable has a value within a specified date range. All the options that are available to configure [date filters](./metadata-filters.md#date-filters) are also available for date variables. - -Similar to the other variables, there are two input switches that allow you to exclude or include process instances where a particular date variable is either `null` or `undefined`. - -## List variable filters - -To filter based on the value of a [list variable](/self-managed/optimize-deployment/configuration/object-variables.md#list-variables), the applied filter will depend on the primitive type of items within the list. For example, you will be creating a numeric variable filter for a variable which is a list of numbers, a string variable filter for a list of strings, and so on. It is important to note here that filters are applied on each individual item within the list variable and not the list itself. - -For example, an "is" filter on a list of string values filters for those instances where any individual list item is equal to the given term. For example, instances whose list variable "contains" the selected value. - -Similarly, the "contains" filter matches process instances whose list variable contains at least one value which in turn contains the given substring. - -## Combine multiple variables filters with OR logic - -Additionally, to use variable filters individually, there is also the option of combining all the previously mentioned variable filters with OR logic. This means that variables which fulfill the condition specified in at least one filter will be displayed. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/process-dashboards.md b/optimize_versioned_docs/version-3.10.0/components/userguide/process-dashboards.md deleted file mode 100644 index 536271fa3fd..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/process-dashboards.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: process-dashboards -title: Process dashboards -description: "The process dashboards section gives you an overview of all the processes and their status on a single page." ---- - -## Overview - -The **Process Default Dashboards and KPIs** section gives you an overview of all the processes and their status on a single page. -This section also allows you to set a process owner and take responsibility of a process, viewing time, and quality KPIs to track process performance. Additionally, a **Adoption Dashboard** can be found at the top of the page, which integrates data from all processes in one view. The optional date filter is applied to all the reports shown in this view. -![Processes page](./img/processOverview.png) - -Note the available views below: - -1. Process Instance Usage: Track Camunda adoption by visualizing how many process instances were started each month. - -2. Overall Incident-Free Rate: Check the overall technical health of your processes by visualizing the percentage of process instances that did not have an incident. - -3. Automation Rate (<1 hour): View the percentage of process instances completed within one hour. This is a good proxy for how automated your entire suite of processes is. - -4. Long-Running Process Instances: View how many process instances that started over a week ago are still running. In addition, you can combine that with their average duration to identify the least efficient processes and potential pain points. - -5. Automation Candidates: View your most expensive user tasks by seeing how often they run next to their average duration. - -6. Active Bottlenecks: Identify the worst bottlenecks across any process in real-time by seeing the number of instances at any certain task and how long those instances have been there. - -7. Time KPIs: These are KPIs based on duration reports or percentage reports with a duration filter. - -8. Quality KPIs: These are KPIs based on other reports. - -9. KPI Results: You can see the KPI’s current and target values, plus a link to the report itself. - -10. Clicking on each process on the list opens a [default dashboard dedicated to this process](./instant-preview-dashboards.md). - -:::tip -For the pie chart reports in 4, 5, and 6, both time and count are displayed in the chart according to the following format: - -![Pie chart format](./img/pieFormat.png) -::: - -## Set time and quality KPIs - -KPIs are single number reports, which have a target set. A report can be configured as a KPI report through the report configuration panel and the classification to a time or quality KPI happens internally in Optimize based on the measure selected. -Once a report is configured as KPI, its status can be seen on the **Adoption Dashboard**. Hovering over its status allows previewing more specific information in regard to the KPIs, such as the target set, the actual current value of the single number report and a link to it. If a user accesses a report via this link and does not have authorization to view it, they will not be able to see any data in the report view. -![Set time and quality KPIs](./img/kpiConfiguration.png) - -## Configuring process owner and digests - -The process can be configured by clicking the **Configure** option selected from the three dots menu displayed on the right side when hovering over the process. From this modal, you can change the owner of the process, as well as enable/disable the process digest. The process digest is a scheduled email report summarizing the current and previous state of the KPI reports for that process. It will be emailed to the owner of that process at [globally configurable regular intervals](/self-managed/optimize-deployment/configuration/system-configuration.md#digest). Note that process digests are an alpha feature. -![Configure Process](./img/configureProcess.png) - -## KPI import scheduler - -Since users might be dealing with hundreds or even thousands of KPIs, a scheduler has been developed which updates the KPI values on a given interval. The default interval in which the KPIs get updates is 10 minutes. -To change this interval, please modify the configuration value for **entity.kpiRefreshInterval**. For more information please visit the relevant [configuration section](/self-managed/optimize-deployment/configuration/system-configuration.md). - -## Limitations - -Since the updates on the KPIs will appear on the process overview page after the given KPI import scheduler interval has passed, changes such as creation, update and deletion of KPIs will show with a delay. In case you wish to make these changes apparent more promptly, you can set the kpi scheduler interval to a lower value as described above. - -Additionally, it is worth mentioning that for the evaluation of the KPI reports, the default timezone of the machine on which Optimize is being run on will be used. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/user-permissions.md b/optimize_versioned_docs/version-3.10.0/components/userguide/user-permissions.md deleted file mode 100644 index 310bd206f89..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/user-permissions.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: user-permissions -title: User permissions -description: "To share a collection of reports and dashboards with other users, add them to the collection." ---- - -By default, if you create a collection, only you can access the collection and the contents within. To share a collection of reports and dashboards with other users, add them to the collection. - -![users and user groups](./img/users.png) - -You are automatically assigned the manager role when creating a new collection. There can be multiple managers for a collection. However, there must be at least one manager for every collection. Managers can do the following: - -- Add, edit, and remove dashboards and reports to the collection. -- Edit the collection name and delete the collection using the context menu in the header. -- Add, edit, and remove other users and user groups to collections via the collection's **Users** tab. - -A manager can add a new user or group to the collection using the **Add** button. Use the ID of the user/group to add them. Every user/group has a role assigned to them that specifies their access rights to the collection. - -![add user or user group](./img/addUser.png) - -An editor may edit, delete, and create new dashboards or reports in the collection. Editors may not edit the name of the collection, delete the collection, or change anything in the **Users** tab. - -Those with read-only access to the collection may only view the components contained within, as well as copy them. Viewers cannot create, edit, or delete components in a collection. They are also not allowed to rename or delete the collection itself, or change anything in the **Users** tab. diff --git a/optimize_versioned_docs/version-3.10.0/components/userguide/view-mode.md b/optimize_versioned_docs/version-3.10.0/components/userguide/view-mode.md deleted file mode 100644 index 43e984c4a29..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/userguide/view-mode.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: view-mode -title: View mode -description: "The view mode provides you with all the features to monitor you process." ---- - -Once you have defined what your dashboard should look like, the view mode provides you with all the features to monitor you process, such as: - -- Full-screen: Display the dashboard in full-screen and only see the essential information of your dashboard - the reports - and hide the header, control panel, and footer. While in full-screen mode, you can click on the **Toggle Theme** button to switch between the default light theme and a dark theme. - -- Auto-refresh: This feature periodically updates the dashboard with the latest data. You can decide how often the update should be performed by setting a time span reaching from 1 to 60 minutes. An animation indicates when the next update is occurring. If you do not wish to use that feature anymore, you can disable it anytime. - -:::note -The refresh rate will not be saved unless it is selected in the [edit mode](./edit-mode.md) of the dashboard. -If it was selected in the view mode, the refresh rate will not be saved when refreshing the dashboard page manually or switching to another page in between. -::: - -- Alerts: If the created dashboard exists inside a collection, it is possible to create and manage created alerts for the reports inside the dashboard. - -![process performance overview](./img/dashboard-viewMode-monitorFeatures.png) - -To share the dashboard with other people or embed it in a webpage, use the sharing feature of the dashboard. Click on the **Share** button, which opens up a popover. After turning the **Enable sharing** switch on, a link is generated which you can send to people who do not have access to Camunda Optimize, and thus enable them to see the dashboard. - -If you applied filters on the dashboard, you can include them in the shared version of the dashboard by enabling the **Share with current filters applied** checkbox. If the checkbox is not checked, the shared dashboard will include the default filters if any have been set. - -![sharing](./img/dashboard-sharingPopover.png) - -You can also click the **Embed Link** button to copy a code to paste into your webpage. Everyone that views the webpage can then see the content of the dashboard. The shared versions of the dashboard allow only to view the dashboard itself. There is no possibility to alter it or interact with any other features of Optimize. Revoke the sharing anytime by disabling the share switch. - -To hide the header of the shared dashboard or specific part of it, add the following parameter to the share URL: - -`header : titleOnly / linkOnly / hidden` - -For example, to completely hide the header from the shared dashboard, add `header=hidden` as shown: - -`http://?header=hidden` - -## Interacting with reports - -To see more details about the report on the dashboard, interact with the reports. The kind of interaction always depends on the report itself. - -If the interactions do not suffice to get the desired information, or you want to edit the report, directly access the report by clicking on its title. - -## Adding filters in view mode - -In the dashboard view mode, there is a **Filters** button which opens a panel that shows all filters available for this dashboard. More filters can be made available in the dashboard edit mode. If the dashboard editor checked the **Allow viewer to add filter values** box for assignee, candidate group, or variable filters, dashboard viewers can add their own values to filter by. - -![filters in view mode](./img/filter-viewMode.png) - -Filters apply to all process reports on the dashboard. If a report already has filters set, they will be combined with the dashboard filter. For example, if a report has a filter to only show running instances and a dashboard filter for suspended instances is set, the report will only show instances that are both running and suspended. Dashboard filters are not applied to decision reports, external websites, or text tiles. - -Variable filters are only applied to reports whose process definition includes the variable. Otherwise, the filter is ignored for that report. Other dashboard filters and filters defined directly on the report are still applied. diff --git a/optimize_versioned_docs/version-3.10.0/components/what-is-optimize.md b/optimize_versioned_docs/version-3.10.0/components/what-is-optimize.md deleted file mode 100644 index 956fd65df64..00000000000 --- a/optimize_versioned_docs/version-3.10.0/components/what-is-optimize.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: what-is-optimize -title: What is Optimize? -description: "Leverage process data and analyze areas for improvement." ---- - -:::note -New to Optimize? Visit our introductory guide to [Optimize]($docs$/guides/improve-processes-with-optimize/) to get started. -::: - -Camunda 8 is built to handle three key aspects of process automation: - -- Design -- Automate -- Improve - -Users can design process flows through our [Modeler]($docs$/components/modeler/about-modeler/). In a production scenario, users can deploy through Desktop Modeler, Web Modeler, or programmatically. A user can use [Tasklist]($docs$/components/tasklist/introduction-to-tasklist/) to review and complete tasks, and [Operate]($docs$/components/operate/operate-introduction) to view and analyze process instances. - -Beyond these design and automate cornerstones lies an important component to leverage our process data and analyze areas for improvement: Optimize. - -Geared toward business stakeholders, Optimize offers business intelligence tooling for Camunda enterprise customers. By leveraging data collected during process execution, users can collaboratively access reports, share process intelligence, analyze bottlenecks, and examine areas in business processes for improvement. - -![process performance dashboard](./img/dashboard-sharingPopover.png) - -As users run process instances through the server, Optimize makes REST API calls into the Camunda server, takes new historical data generated since the previous call, and stores the data in its own Elasticsearch database. - -As a result, users can analyze reports and dashboards, and reap actionable insights independently of what is happening inside the Camunda server itself (meaning no effects on runtime). - -Review heatmap displays for a closer look at the number of instances that took longer than average, based on duration distribution. Users can also visualize a heatmap by counting the number of activity instances, comparing them to the total number of process instances, and obtaining a percentage. - -Unlike standard business intelligence tools, Optimize understands the user’s goals and leads them through continuous process improvement. Optimize is purpose-built to help rapidly identify the constraints of an individual's or organization's system. - -In the following sections, we’ll walk through using and analyzing Optimize. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/engine-data-deletion.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/engine-data-deletion.md deleted file mode 100644 index 2ccab405b21..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/engine-data-deletion.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: engine-data-deletion -title: "Engine data deletion" -description: "Explains how Optimize copes with the deletion of engine data." ---- - -The engine slows down if the historic data grows significantly over time, particularly in cases where the amount of data streaming in each day is large. One solution to this is to remove old data from the engine on a regular basis, yet still importing the data to Optimize so it can be used for deeper analytics. - -To support the described use-case, Optimize does not care if you delete any data on the engine side. Specifically, Optimize does not sync with the engine on data deletion. If you want to remove any data from Optimize, you can either erase the data from Elasticsearch or use the [Optimize History Cleanup Feature](./../../configuration/history-cleanup/). - -The subsections below describe the ways in which Optimize handles data deletion from the engine. - -## Deletion of historic data - -There are two possible ways of doing this: - -- **Historic Cleanup**: If you have enabled the [history cleanup](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup), historic data will be removed in the engine based on configurable time-to-live. -- **Manual Deletion**: You trigger a [manual deletion](https://docs.camunda.org/manual/latest/reference/rest/history/process-instance/post-delete/), e.g. via REST-API. - -Optimize can handle this case as it imports the old data first, and once it has imported everything, it will only add to its database new data streaming in. - -## Manual deletion of deployments/definitions - -In most cases, the deletion of deployments and definitions will not cause any problems. If you have several versions of a definition deployed, e.g. 1-4, and you delete the definition/deployment with definition version 1, then this wouldn't cause any issues. The assumption here is that Optimize has imported the definitions and related historical data beforehand. Otherwise, the deleted definition is lost. - -However, there are two scenarios where Optimize will behave differently. The first is depicted as follows: - -1. You deploy a (process/decision) definition A for the first time. -2. Optimize imports definition A with version 1. -3. You delete the definition/deployment of the definition without having added another version of it. Definition A with version 1 is removed from the engine. -4. You deploy the definition A with the same ID again. -5. Optimize imports another definition A with version 1. - -Optimize identifies the unique definitions by the combination of `definition key`, `version`, and `tenant`, so in this case will have imported the same definition twice. Optimize handles this by marking the definition with the oldest deployment time as deleted. When selecting definitions in Optimize, definitions that are considered to be deleted will not be selectable for reporting. Any data that Optimize has imported related to the deleted definition will appear in reports that use the non-deleted definition. - -To prevent this from happening, avoid deleting and redeploying the same definition (same definition key, tenant, and version) to the engine. - -Secondly, when a definition is deleted in the engine before it has been imported by Optimize and the corresponding instance data is still present in the history tables of the engine, Optimize will attempt to import this instance data for both decision reports and for creating event-based processes. In this scenario, Optimize will simply skip the import of the instance data for definitions that it has not already imported and that have since been deleted in the engine. This data will not be available in Optimize. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-Service-Polling.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-Service-Polling.png deleted file mode 100644 index 9ed62fdd2b7..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-Service-Polling.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-logistic_large.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-logistic_large.png deleted file mode 100644 index ef4dd3b1b28..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-logistic_large.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-logistic_medium.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-logistic_medium.png deleted file mode 100644 index 36a088439ee..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-logistic_medium.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-sales.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-sales.png deleted file mode 100644 index f6c4b23a6a2..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Import-performance-diagramms-sales.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Optimize-Import-Process.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Optimize-Import-Process.png deleted file mode 100644 index bf753ac8a72..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Optimize-Import-Process.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Optimize-Structure.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Optimize-Structure.png deleted file mode 100644 index 63c59d49899..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/img/Optimize-Structure.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/import-guide.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/import-guide.md deleted file mode 100644 index f23cda4b3b0..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/advanced-features/import-guide.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -id: import-guide -title: "Data import" -description: "Shows how the import generally works and an example of import performance." ---- - -Camunda 7 only - -This document provides instructions on how the import of the engine data to Optimize works. - -## Architecture overview - -In general, the import assumes the following setup: - -- A Camunda engine from which Optimize imports the data. -- The Optimize backend, where the data is transformed into an appropriate format for efficient data analysis. -- [Elasticsearch](https://www.elastic.co/guide/index.html), which is the database Optimize persists all formatted data to. - -The following depicts the setup and how the components communicate with each other: - -![Optimize Import Structure](img/Optimize-Structure.png) - -Optimize queries the engine data using a dedicated Optimize REST-API within the engine, transforms the data, and stores it in its own Elasticsearch database such that it can be quickly and easily queried by Optimize when evaluating reports or performing analyses. The reason for having a dedicated REST endpoint for Optimize is performance: the default REST-API adds a lot of complexity to retrieve the data from the engine database, which can result in low performance for large data sets. - -Note the following limitations regarding the data in Optimize's database: - -- The data is only a near real-time representation of the engine database. This means Elasticsearch may not contain the data of the most recent time frame, e.g. the last two minutes, but all the previous data should be synchronized. -- Optimize only imports the data it needs for its analysis. The rest is omitted and won't be available for further investigation. Currently, Optimize imports: - - The history of the activity instances - - The history of the process instances - - The history of variables with the limitation that Optimize only imports primitive types and keeps only the latest version of the variable - - The history of user tasks belonging to process instances - - The history of incidents with the exception of incidents that occurred due to the history cleanup job or a timer start event job running out of retries - - Process definitions - - Process definition XMLs - - Decision definitions - - Definition deployment information - - Historic decision instances with input and output - - Tenants - - The historic identity link logs - -Refer to the [Import Procedure](#import-procedure) section for a more detailed description of how Optimize imports engine data. - -## Import performance overview - -This section gives an overview of how fast Optimize imports certain data sets. The purpose of these estimates is to help you evaluate whether Optimize's import performance meets your demands. - -It is very likely that these metrics change for different data sets because the speed of the import depends on how the data is distributed. - -The import is also affected by how the involved components are set up. For instance, if you deploy the Camunda engine on a different machine than Optimize and Elasticsearch to provide both applications with more computation resources, the process is likely to speed up. If the Camunda engine and Optimize are physically far away from each other, the network latency might slow down the import. - -### Setup - -The following components were used for these import tests: - -| Component | Version | -| ------------------ | --------------- | -| Camunda 7 | 7.10.3 | -| Camunda 7 Database | PostgreSQL 11.1 | -| Elasticsearch | 6.5.4 | -| Optimize | 2.4.0 | - -The Optimize configuration with the default settings was used, as described in detail in the [configuration overview](./../configuration/system-configuration.md). - -The following hardware specifications were used for each dedicated host - -- Elasticsearch: - - Processor: 8 vCPUs\* - - Working Memory: 8 GB - - Storage: local 120GB SSD -- Camunda 7: - - Processor: 4 vCPUs\* - - Working Memory: 4 GB -- Camunda 7 Database (PostgreSQL): - - Processor: 8 vCPUs\* - - Working Memory: 2 GB - - Storage: local 480GB SSD -- Optimize: - - Processor: 4 vCPUs\* - - Working Memory: 8 GB - -\*one vCPU equals one single hardware hyper-thread on an Intel Xeon E5 v2 CPU (Ivy Bridge) with a base frequency of 2.5 GHz. - -The time was measured from the start of Optimize until the entire data import to Optimize was finished. - -### Large size data set - -This data set contains the following amount of instances: - -| Number of Process Definitions | Number of Activity Instances | Number of Process Instances | Number of Variable Instances | Number of Decision Definitions | Number of Decision Instances | -| ----------------------------- | ---------------------------- | --------------------------- | ---------------------------- | ------------------------------ | ---------------------------- | -| 21 | 123 162 903 | 10 000 000 | 119 849 175 | 4 | 2 500 006 | - -Here, you can see how the data is distributed over the different process definitions: - -![Data Distribution](img/Import-performance-diagramms-logistic_large.png) - -Results: - -- **Duration of importing the whole data set:** ~120 minutes -- **Speed of the import:** ~1400 process instances per second during the import process - -### Medium size data set - -This data set contains the following amount of instances: - -| Number of Process Definitions | Number of Activity Instances | Number of Process Instances | Number of Variable Instances | -| ----------------------------- | ---------------------------- | --------------------------- | ---------------------------- | -| 20 | 21 932 786 | 2 000 000 | 6 913 889 | - -Here you can see how the data is distributed over the different process definitions: - -![Data Distribution](img/Import-performance-diagramms-logistic_medium.png) - -Results: - -- **Duration of importing the whole data set:** ~ 10 minutes -- **Speed of the import:** ~1500 process instances per second during the import process - -## Import procedure - -:::note Heads up! -Understanding the details of the import procedure is not necessary to make Optimize work. In addition, there is no guarantee that the following description is either complete or up-to-date. -::: - -The following image illustrates the components involved in the import process as well as basic interactions between them: - -![Optimize Procedure](img/Optimize-Import-Process.png) - -During execution, the following steps are performed: - -1. [Start an import round](#start-an-import-round). -2. [Prepare the import](#prepare-the-import). - 1. Poll a new page - 2. Map entities and add an import job -3. [Execute the import](#execute-the-import). - 1. Poll a job - 2. Persist the new entities to Elasticsearch - -### Start an import round - -The import process is automatically scheduled in rounds by the `Import Scheduler` after startup of Optimize. In each import round, multiple `Import Services` are scheduled to run, each fetches data of one specific entity type. For example, one service is responsible for importing the historic activity instances and another one for the process definitions. - -For each service, it is checked if new data is available. Once all entities for one import service have been imported, the service starts to back off. To be more precise, before it can be scheduled again it stays idle for a certain period of time, controlled by the "backoff" interval and a "backoff" counter. After the idle time has passed, the service can perform another try to import new data. Each round in which no new data could be imported, the counter is incremented. Thus, the backoff counter will act as a multiplier for the backoff time and increase the idle time between two import rounds. This mechanism is configurable using the following properties: - -```yaml -handler: - backoff: - # Interval which is used for the backoff time calculation. - initial: 1000 - # Once all pages are consumed, the import service component will - # start scheduling fetching tasks in increasing periods of time, - # controlled by 'backoff' counter. - # This property sets maximal backoff interval in seconds - max: 30 -``` - -If you would like to rapidly update data imported into Optimize, you have to reduce this value. However, this will cause additional strain on the engine and might influence the performance of the engine if you set a low value. - -More information about the import configuration can be found in the [configuration section](./../configuration/system-configuration-platform-7.md). - -### Prepare the import - -The preparation of the import is executed by the `ImportService`. Every `ImportService` implementation performs several steps: - -#### Poll a new page - -The whole polling/preparation workflow of the engine data is done in pages, meaning only a limited amount of entities is fetched on each execution. For example, say the engine has 1000 historic activity instances and the page size is 100. As a consequence, the engine would be polled 10 times. This prevents running out of memory and overloading the network. - -Polling a new page does not only consist of the `ImportService`, but the `IndexHandler`, and the `EntityFetcher` are also involved. The following image depicts how those components are connected with each other: - -![ImportService Polling Procedure](img/Import-Service-Polling.png) - -First, the `ImportScheduler` retrieves the newest index, which identifies the last imported page. This index is passed to the `ImportService` to order it to import a new page of data. With the index and the page size, the fetching of the engine data is delegated to the `EntityFetcher`. - -#### Map entities and add an import job - -All fetched entities are mapped to a representation that allows Optimize to query the data very quickly. Subsequently, an import job is created and added to the queue to persist the data in Elasticsearch. - -### Execute the import - -Full aggregation of the data is performed by a dedicated `ImportJobExecutor` for each entity type, which waits for `ImportJob` instances to be added to the execution queue. As soon as a job is in the queue, the executor: - -- Polls the job with the new Optimize entities -- Persists the new entities to Elasticsearch - -The data from the engine and Optimize do not have a one-to-one relationship, i.e., one entity type in Optimize may consist of data aggregated from different data types of the engine. For example, the historic process instance is first mapped to an Optimize `ProcessInstance`. However, for the heatmap analysis it is also necessary for `ProcessInstance` to contain all activities that were executed in the process instance. - -Therefore, the Optimize `ProcessInstance` is an aggregation of the engine's historic process instance and other related data: historic activity instance data, user task data, and variable data are all [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) within Optimize's `ProcessInstance` representation. - -:::note -Optimize uses [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html), the above mentioned data is an example of documents that are nested within Optimize's `ProcessInstance` index. - -Elasticsearch applies restrictions regarding how many objects can be nested within one document. If your data includes too many nested documents, you may experience import failures. To avoid this, you can temporarily increase the nested object limit in Optimize's [index configuration](./../configuration/system-configuration.md#index-settings). Note that this might cause memory errors. -::: - -Import executions per engine entity are actually independent from another. Each follows a [producer-consumer-pattern](https://dzone.com/articles/producer-consumer-pattern), where the type specific `ImportService` is the single producer and a dedicated single `ImportJobExecutor` is the consumer of its import jobs, decoupled by a queue. So, both are executed in different threads. To adjust the processing speed of the executor, the queue size and the number of threads that process the import jobs can be configured: - -```yaml -import: - # Number of threads being used to process the import jobs per data type that are writing - # data to elasticsearch. - elasticsearchJobExecutorThreadCount: 1 - # Adjust the queue size of the import jobs per data type that store data to elasticsearch. - # A too large value might cause memory problems. - elasticsearchJobExecutorQueueSize: 5 -``` diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/authorization-management.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/authorization-management.md deleted file mode 100644 index 7058a6ba28c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/authorization-management.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: authorization-management -title: "Authorization management" -description: "Define which data users are authorized to see." ---- - -Camunda 7 only - -User authorization management differs depending on whether the entities to manage the authorizations for are originating from adjacent systems like imported data from connected Camunda-BPM engines such as process instances, or whether the entities are fully managed by Camunda Optimize, such as [event-based processes and instances](components/userguide/additional-features/event-based-processes.md) or [collections](components/userguide/collections-dashboards-reports.md). For entities originating from adjacent systems authorizations are managed in the Camunda 7 via Camunda Admin, for the latter the authorizations are managed in Camunda Optimize. - -## Camunda 7 data authorizations - -The authorization to process or decision data, as well as tenants and user data imported from any connected Camunda REST-API, is not managed in Optimize itself but needs to be configured in the Camunda 7 and can be achieved on different levels with different options. - -If you do not know how authorization in Camunda works, visit the [authorization service documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/). This has the advantage that you don't need to define the authorizations several times. - -### Process or decision definition related authorizations - -You can specify which user has access to certain process or decision definitions, including data related to that definition. By that we mean the user can only see, create, edit, and delete reports to definitions they are authorized to. - -When defining an authorization to grant or deny access to certain definitions, the most important aspect is that you grant access on the resource type "process definition" and "decision definition". You can then relate to a specific definition by providing the definition key as resource ID or use "\*" as resource ID if you want to grant the access to all definitions. To grant access to a definition, you need to set either `ALL` or `READ_HISTORY` as permission. Both permission settings are treated equally in Optimize, so there is no difference between them. - -As an example, have a look how adding authorizations for process definitions could be done in Camunda Admin: - -![Grant Optimize Access in Admin](img/Admin-GrantDefinitionAuthorizations.png) - -1. The first option grants global read access for the process definition `invoice`. With this setting all users are allowed to see, update, create, and delete reports related to the process definition `invoice` in Optimize. -2. The second option defines an authorization for a single user. The user `Kermit` can now see, update, create, and delete reports related to the process definition `invoice` in Optimize. -3. The third option provides access on group level. All users belonging to the group `optimize-users` can see, update, create, and delete reports related to the process definition `invoice` in Optimize. - -It is also possible to revoke the definition authorization for specific users or groups. For instance, you can define access for all process definitions on a global scale, but exclude the `engineers` group from access reports related to the `invoice` process: - -![Revoke Optimize Access for group 'engineers' in Admin](img/Admin-RevokeDefinitionAuthorization.png) - -Decision definitions are managed in the same manner in the `Authorizations -> Decision Definition` section of the Authorizations Management of the Camunda 7. - -### User and Group related Authorizations - -To allow logged-in users to see other users and groups in Optimize (for example, to add them to a collection), they have to be granted **read** permissions for the resource type **User** as well as the resource type **Group**. Access can be granted or denied either for all users/groups or for specific user/group IDs only. This can be done in Camunda Admin as illustrated in the definitions authorization example above. - -## Optimize entity authorization - -There are entities that only exist in Camunda Optimize and authorizations to these are not managed via Camunda Admin but within Optimize. - -### Collections - -[Collections](components/userguide/collections-dashboards-reports.md) are the only way to share Camunda Optimize reports and dashboards with other users. Access to them is directly managed via the UI of collections; see the corresponding user guide section on [Collection - User Permissions](components/userguide/collections-dashboards-reports.md#user-permissions). - -### Event based processes - -Camunda 7 only - -Although [event-based processes](components/userguide/additional-features/event-based-processes.md) may include data originating from adjacent systems like the Camunda Engine when using [Camunda Activity Event Sources](components/userguide/additional-features/event-based-processes.md#event-sources), they do not enforce any authorizations from Camunda Admin. The reason for that is that multiple sources can get combined in a single [event-based process](components/userguide/additional-features/event-based-processes.md) that may contain conflicting authorizations. It is thus required to authorize users or groups to [event-based processes](components/userguide/additional-features/event-based-processes.md) either directly when [publishing](components/userguide/additional-features/event-based-processes.md#publishing-an-event-based-process) them or later on via the [event-based process - Edit Access](components/userguide/additional-features/event-based-processes.md#event-based-process-list---edit-access) option. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/clustering.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/clustering.md deleted file mode 100644 index de22429c75c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/clustering.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -id: clustering -title: "Clustering" -description: "Read about how to run Optimize in a cluster." ---- - -This document describes the set up of a Camunda Optimize cluster which is mainly useful in a failover scenario, but also provides means of load-balancing in terms of distributing import and user load. - -## Configuration - -There are two configuration requirements to address in order to operate Camunda Optimize successfully in a cluster scenario. -Both of these aspects are explained in detail in the following subsections. - -### 1. Import - define importing instance - -Camunda 7 only - -It is important to configure the cluster in the sense that only one instance at a time is actively importing from a particular Camunda 7 engine. - -:::note Warning -If more than one instance is importing data from one and the same Camunda 7 engine concurrently, inconsistencies can occur. -::: - -The configuration property [`engines.${engineAlias}.importEnabled`](./system-configuration-platform-7.md) allows to disable the import from a particular configured engine. - -Given a simple failover cluster consisting of two instances connected to one engine, the engine configurations in the `environment-config.yaml` would look like the following: - -Instance 1 (import from engine `default` enabled): - -``` -... -engines: - 'camunda-bpm': - name: default - rest: 'http://localhost:8080/engine-rest' - importEnabled: true - -historyCleanup: - processDataCleanup: - enabled: true - decisionDataCleanup: - enabled: true -... -``` - -Instance 2 (import from engine `camunda-bpm` disabled): - -``` -... -engines: - 'camunda-bpm': - name: default - rest: 'http://localhost:8080/engine-rest' - importEnabled: false -... -``` - -:::note -The importing instance has the [history cleanup enabled](./system-configuration.md#history-cleanup-settings). It is strongly recommended all non-importing Optimize instances in the cluster do not enable history cleanup to prevent any conflicts when the [history cleanup](../history-cleanup/) is performed. -::: - -### 1.1 Import - event based process import - -Camunda 7 only - -In the context of event-based process import and clustering, there are two additional configuration properties to consider carefully. - -One is specific to each configured Camunda engine [`engines.${engineAlias}.eventImportEnabled`](./system-configuration-platform-7.md) and controls whether data from this engine is imported as event source data as well for [event-based processes](components/userguide/additional-features/event-based-processes.md). You need to enable this on the same cluster node for which the [`engines.${engineAlias}.importEnabled`](./system-configuration-platform-7.md) configuration flag is set to `true`. - -[`eventBasedProcess.eventImport.enabled`](./setup-event-based-processes.md) controls whether the particular cluster node processes events to create event based process instances. This allows you to run a dedicated node that performs this operation, while other nodes might just feed in Camunda activity events. - -### 2. Distributed user sessions - configure shared secret token - -If more than one Camunda Optimize instance are accessible by users for e.g. a failover scenario a shared secret token needs to be configured for all the instances. -This enables distributed sessions among all instances and users do not lose their session when being routed to another instance. - -The relevant configuration property is [`auth.token.secret`](./system-configuration.md#security) which needs to be configured in the `environment-configuration.yaml` of each Camunda Optimize instance that is part of the cluster. - -It is recommended to use a secret token with a length of at least 64 characters generated using a sufficiently good random number generator, for example the one provided by `/dev/urandom` on Linux systems. - -The following example command would generate a 64-character random string: - -``` -< /dev/urandom tr -dc A-Za-z0-9 | head -c64; echo -``` - -The corresponding `environment-config.yaml` entry would look the **same for all instances of the cluster**: - -``` -auth: - token: - secret: '' -``` - -## Example setup - -The tiniest cluster setup consisting of one importing instance from a given `default` engine and another instance where the import is disabled would look like the following: - -![Two Optimize instances](./img/Optimize-Clustering.png) - -The HTTP/S Load-Balancer would route user requests to either of the two instances, while Optimize #1 would also care about importing data from the engine to the shared -Elasticsearch instance/cluster and Optimize #2 only accesses the engine in order to authenticate and authorize users. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/common-problems.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/common-problems.md deleted file mode 100644 index 8fb78a97860..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/common-problems.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: common-problems -title: "Common problems" -description: "Information to help troubleshoot common problems." ---- - -This section aims to provide initial help to troubleshoot common issues. This guide is not intended to be a complete list of possible problems, nor does it provide detailed step-by-step solutions; its intention is merely to point you in the right direction when investigating what may be causing the issue you are experiencing. - -## Optimize is missing some or all definitions - -It is possible that the user you are logged in as does not have the relevant authorizations to view all definitions in Optimize. Refer to the [authorization management section](./authorization-management.md#process-or-decision-definition-related-authorizations) to confirm the user has all required authorizations. - -Another common cause for this type of problem are issues with Optimize's data import, for example due to underlying problems with the engine data. In this case, the Optimize logs should contain more information on what is causing Optimize to not import the definition data correctly. If you are unsure on how to interpret what you find in the logs, create a support ticket. - -## Report assignee, candidate group, variable or suspension state data is inaccurate or missing - -Optimize relies on specific engine logs to retrieve data about assignees, candidate groups, variables, and instance suspension state. If the engine history settings are not set correctly, these logs may be missing from the engine data Optimize imports. Refer to the [history level documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#choose-a-history-level) to ensure it is set correctly. - -Additionally, similar to the issue regarding missing definition data, it is possible that the Optimize import has encountered an issue. In this case, refer to your Optimize logs for more information. - -## Error message indicating that an index is set to read only - -This often occurs when Elasticsearch is running out of disk space. If this is the case, adjusting your Elasticsearch setup accordingly should resolve the issue. Note that you may need to manually unlock your indices afterwards, refer to [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html) on how to do this. - -## Exception indicating an error while checking the engine version - -The most common cause for this issue is that the engine endpoint Optimize uses is not configured correctly. Check your [configuration](./system-configuration-platform-7.md) and ensure the engine REST URL is set correctly. - -## Server language results in UI/server errors - -When Optimize is running with its language set to one with characters that it can't recognize, such as Turkish, you may observe logged issues and unusable elements in the UI. We recommend running Optimize on a server with its language set to English. - -## Update issues - -Always check the [migration and update instructions](./../migration-update/instructions.md) for the versions you are migrating, often this section already documents the problem you are experiencing along with the solution. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/event-based-processes.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/event-based-processes.md deleted file mode 100644 index 884e804160f..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/event-based-processes.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: event-based-process-configuration -title: "Event-based process system configuration" -description: "How to configure event-based processes in Optimize." ---- - -Camunda 7 only - -Configuration of the Optimize event based process feature. - -| YAML Path | Default Value | Description | -| -------------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| eventBasedProcess.authorizedUserIds | [ ] | A list of userIds that are authorized to manage (Create, Update, Publish & Delete) event based processes. | -| eventBasedProcess.authorizedGroupIds | [ ] | A list of groupIds that are authorized to manage (Create, Update, Publish & Delete) event based processes. | -| eventBasedProcess.eventImport.enabled | false | Determines whether this Optimize instance performs event based process instance import. | -| eventBasedProcess.eventImport.maxPageSize | 5000 | The batch size of events being correlated to process instances of event based processes. | -| eventBasedProcess.eventIndexRollover.scheduleIntervalInMinutes | 10 | The interval in minutes at which to check whether the conditions for a rollover of eligible indices are met, triggering one if required. This value should be greater than 0. | -| eventBasedProcess.eventIndexRollover.maxIndexSizeGB | 50 | Specifies the maximum total index size for events (excluding replicas). When shards get too large, query performance can slow down and rolling over an index can bring an improvement. Using this configuration, a rollover will occur when triggered and the current event index size matches or exceeds the maxIndexSizeGB threshold. | - -## Event Ingestion REST API Configuration - -Camunda 7 only - -Configuration of the Optimize [Event Ingestion REST API](../../../apis-tools/optimize-api/event-ingestion.md) for [event-based processes](components/userguide/additional-features/event-based-processes.md). - -| YAML Path | Default Value | Description | -| ----------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| eventBasedProcess.eventIngestion.maxBatchRequestBytes | 10485760 | Content length limit for an ingestion REST API bulk request in bytes. Requests will be rejected when exceeding that limit. Defaults to 10MB. In case this limit is raised you should carefully tune the heap memory accordingly, see Adjust Optimize heap size on how to do that. | -| eventBasedProcess.eventIngestion.maxRequests | 5 | The maximum number of event ingestion requests that can be serviced at any given time. | diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/getting-started.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/getting-started.md deleted file mode 100644 index 85e41e89cdb..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/getting-started.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: getting-started -title: Getting started -description: "All distributions of Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements." ---- - -All distributions of Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config`. There are two files, one called `environment-config.yaml` with values that override the default Optimize properties and another called `environment-logback.xml`, which sets the logging configuration. - -You can see all supported values and read about logging configuration [here](./system-configuration.md). - -## Optimize web container configuration - -Refer to the [configuration section on container settings](./system-configuration.md) for more information on how to adjust the Optimize web container configuration. - -## Elasticsearch configuration - -You can customize the [Elasticsearch connection settings](./system-configuration.md#connection-settings) as well as the [index settings](./system-configuration.md#index-settings). - -## Camunda 7 configuration - -Camunda 7 only - -To perform an import and provide the full set of features, Optimize requires a connection to the REST API of the Camunda engine. For details on how to configure the connection to the Camunda 7, refer to the [Camunda 7 configuration section](./system-configuration-platform-7.md). - -## Camunda 8 specific configuration - -For Camunda 8, Optimize is importing process data from exported zeebe records as created by the [Zeebe Elasticsearch Exporter](https://github.com/camunda/camunda/tree/main/zeebe/exporters/elasticsearch-exporter) from the same Elasticsearch cluster that Optimize used to store it's own data. For the relevant configuration options, refer to the [Camunda 8 import configuration](./system-configuration-platform-8.md). - -## Recommended additional configurations - -### Adjust engine heap size - -Sending huge process definition diagrams via Rest API might cause the engine to crash if the engine heap size is inadequately limited. Thus, it is recommended to increase the heap size of the engine to at least 2 GB; for example, by adding the following Java command line property when starting the engine: - -```bash --Xmx2048m -``` - -For Camunda 7, it is also recommended to decrease the [deployment cache size](https://docs.camunda.org/manual/latest/user-guide/process-engine/deployment-cache/#customize-the-maximum-capacity-of-the-cache) to `500`, e.g. by: - -```bash - -``` - -### Adjust Optimize heap size - -By default, Optimize is configured with 1GB JVM heap memory. Depending on your setup and actual data, you might still encounter situations where you need more than this default for a seamless operation of Optimize. To increase the maximum heap size, you can set the environment variable `OPTIMIZE_JAVA_OPTS` and provide the desired JVM system properties; for example, for 2GB of Heap: - -```bash -OPTIMIZE_JAVA_OPTS=-Xmx2048m -``` - -### Maximum result limits for queries - -It's possible that engine queries [consume a lot of memory](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-api/#query-maximum-results-limit). To mitigate this risk, you can [limit the number of results](https://docs.camunda.org/manual/latest/reference/deployment-descriptors/tags/process-engine/#queryMaxResultsLimit) a query can return. If you do this, we recommend setting `queryMaxResultsLimit` to `10000` so the Optimize import works without any problems. This value should still be low enough so you don't run into any problems with the previously mentioned heap configurations. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/history-cleanup.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/history-cleanup.md deleted file mode 100644 index 7f7701780ff..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/history-cleanup.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -id: history-cleanup -title: "History cleanup" -description: "Make sure that old data is automatically removed from Optimize." ---- - -To satisfy data protection laws or just for general storage management purposes, Optimize provides an automated cleanup functionality. - -There are four types of history cleanup: - -- Process data cleanup -- Decision data cleanup -- External event cleanup -- External variable cleanup - -By default, all four types of history cleanup are disabled. They can be enabled individually by config and the cleanup is applied accordingly. - -:::note Note for Camunda 7 users -By default, the history cleanup is disabled in Optimize when running in Camunda 7. Before enabling it, you should consider the type of cleanup and time to live period that fits to your needs. Otherwise, historic data intended for analysis might get lost irreversibly. - -The default [engine history cleanup](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup) in Camunda 7 works differently than the one in Optimize due to the possible cleanup strategies. The current implementation in Optimize is equivalent to the [end time strategy](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#end-time-based-strategy) of the Engine. -::: - -## Setup - -The most important settings are `cronTrigger` and `ttl`; their global default configuration is the following: - -``` -historyCleanup: - cronTrigger: '0 1 * * *' - ttl: 'P2Y' -``` - -`cronTrigger` - defines at what interval and when the history cleanup should be performed in the format of a cron expression. The default is 1AM every day. To avoid any impact on daily business, it is recommended to schedule the cleanup outside of business hours. - -See the [Configuration Description](./system-configuration.md#history-cleanup-settings) for further insights into this property and its format. - -`ttl` - is the global time to live period of data contained in Optimize. The field that defines the age of a particular entity differs between process, decision, and event data. Refer to the corresponding subsection in regard to that. -The default value is `'P2Y'`, which means by default data older than _2 years_ at the point in time when the cleanup is executed gets cleaned up. -For details on the notation, see the [Configuration Description](./system-configuration.md#history-cleanup-settings) of the ttl property. - -All the remaining settings are entity type specific and will be explained in the following subsections. - -### Process data cleanup - -The age of process instance data is determined by the `endTime` field of each process instance. Running instances are never cleaned up. - -To enable the cleanup of process instance data, the `historyCleanup.processDataCleanup.enabled` property needs to be set to `true`. - -Another important configuration parameter for process instance cleanup is the `historyCleanup.processDataCleanup.cleanupMode`. It determines what in particular gets deleted when a process instance is cleaned up. The default value of `all` results in the whole process instance being deleted. -For other options, review the [configuration description](./system-configuration.md#history-cleanup-settings) of the `historyCleanup.processDataCleanup.cleanupMode` property. - -To set up a process definition-specific `ttl` or different `cleanupMode` you can also provide process specific settings using the `perProcessDefinitionConfig` list which overrides the global settings for the corresponding definition key. - -In this example, process instances of the key `MyProcessDefinitionKey` would be cleaned up after two months instead of two years, and when the cleanup is performed, only their associated variables would be deleted instead of the complete process instance. - -``` -historyCleanup: - ttl: 'P2Y' - processDataCleanup: - enabled: true - cleanupMode: 'all' - perProcessDefinitionConfig: - 'MyProcessDefinitionKey': - ttl: 'P2M' - cleanupMode: 'variables' -``` - -### Decision data cleanup - -The age of decision instance data is determined by the `evaluationTime` field of each decision instance. - -To enable the cleanup of decision instance data, the `historyCleanup.decisionDataCleanup.enabled` property needs to be set to `true`. - -Like for the [Process Data Cleanup](#process-data-cleanup), it is possible to configure a decision definition specific `ttl` using the `perDecisionDefinitionConfig` list. - -``` -historyCleanup: - ttl: 'P2Y' - decisionDataCleanup: - enabled: true - perDecisionDefinitionConfig: - 'myDecisionDefinitionKey': - ttl: 'P3M' -``` - -### Ingested event cleanup - -The age of ingested event data is determined by the [`time`](../../../apis-tools/optimize-api/event-ingestion.md#request-body) field provided for each event at the time of ingestion. - -To enable the cleanup of event data, the `historyCleanup.ingestedEventCleanup.enabled` property needs to be set to `true`. - -``` -historyCleanup: - ttl: 'P2Y' - ingestedEventCleanup: - enabled: true -``` - -:::note -The ingested event cleanup does not cascade down to potentially existing [event-based processes](components/userguide/additional-features/event-based-processes.md) that may contain data originating from ingested events. To make sure data of ingested events is also removed from event-based processes, you need to enable the [Process Data Cleanup](#process-data-cleanup) as well. -::: - -## Example - -Here is an example of what a complete cleanup configuration might look like: - -``` -historyCleanup: - cronTrigger: '0 1 * * 0' - ttl: 'P1Y' - processDataCleanup: - enabled: true - cleanupMode: 'variables' - perProcessDefinitionConfig: - 'VeryConfidentProcess': - ttl: 'P1M' - cleanupMode: 'all' - 'KeepTwoMonthsProcess': - ttl: 'P2M' - decisionDataCleanup: - enabled: true - perDecisionDefinitionConfig: - 'myDecisionDefinitionKey': - ttl: 'P3M' - ingestedEventCleanup: - enabled: true -``` - -The above configuration results in the following setup: - -- The cleanup is scheduled to run every Sunday at 1AM. -- The global `ttl` of any data is one year. -- The process data cleanup is enabled. -- The `cleanupMode` performed on all process instances that passed the `ttl` period is just clearing their variable data but keeping the overall instance data like activityInstances. -- There is a process specific setup for the process definition key `'VeryConfidentProcess'` that has a special `ttl` of one month and those will be deleted completely due the specific `cleanupMode: 'all'` configuration for them. -- There is another process specific setup for the process definition key `'KeepTwoMonthsProcess'` that has a special `ttl` of two months. -- The decision data cleanup is enabled. -- There is a decision definition specific setup for the definition key `myDecisionDefinitionKey` that has a special `ttl` of three months. -- The ingested event cleanup is enabled. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-GrantAccessAuthorizations.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-GrantAccessAuthorizations.png deleted file mode 100644 index bc16527ed3a..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-GrantAccessAuthorizations.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-GrantDefinitionAuthorizations.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-GrantDefinitionAuthorizations.png deleted file mode 100644 index e8f26b37477..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-GrantDefinitionAuthorizations.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-RevokeDefinitionAuthorization.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-RevokeDefinitionAuthorization.png deleted file mode 100644 index bb0d4fc94a7..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-RevokeDefinitionAuthorization.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-RevokeGroupAccess.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-RevokeGroupAccess.png deleted file mode 100644 index 78e4bbc0b04..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Admin-RevokeGroupAccess.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Clustered-Engine-Distributed-Database.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Clustered-Engine-Distributed-Database.png deleted file mode 100644 index abac7ae0c86..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Clustered-Engine-Distributed-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Clustered-Engine-Shared-Database.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Clustered-Engine-Shared-Database.png deleted file mode 100644 index a748f087080..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Clustered-Engine-Shared-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Multiple-Engine-Distributed-Database.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Multiple-Engine-Distributed-Database.png deleted file mode 100644 index 6912a0dc1ff..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Multiple-Engine-Distributed-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Multiple-Engine-Shared-Database.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Multiple-Engine-Shared-Database.png deleted file mode 100644 index 632069a2338..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Multiple-Engine-Shared-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Optimize-Clustering.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Optimize-Clustering.png deleted file mode 100644 index 868a649b5ac..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/Optimize-Clustering.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/admin-tenant-authorization.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/admin-tenant-authorization.png deleted file mode 100644 index abf4a4d12e9..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/admin-tenant-authorization.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/license-guide.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/license-guide.png deleted file mode 100644 index 0b34971b70e..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/license-guide.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/shared-elasticsearch-cluster.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/shared-elasticsearch-cluster.png deleted file mode 100644 index f430ee9fcf2..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/img/shared-elasticsearch-cluster.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/license.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/license.md deleted file mode 100644 index 8a54c8dd485..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/license.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: optimize-license -title: "Optimize license key" -description: "When you log in to Optimize for the first time, you are redirected to the license page where you can enter your license key." ---- - -Camunda 7 only - -When you log in to Optimize for the first time, you are redirected to the license page. Here, enter your license key to be able to use Camunda Optimize. - -![Optimize license page with no license key in the text field and submit button below](img/license-guide.png) - -Alternatively, you can add a file with the license key to the path `${optimize-root-folder}/config/OptimizeLicense.txt`; it will be automatically loaded to the database unless it already contains a license key. - -If you are using the Optimize Docker images and want Optimize to automatically recognize your license key, refer to the [installation guide](../../install-and-start#license-key-file) on how to achieve this. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/localization.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/localization.md deleted file mode 100644 index d3debcc1c42..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/localization.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: localization -title: "Localization" -description: "Localization of Optimize." ---- - -To present a localized version of Optimize to users corresponding to their default browser language, Optimize provides the possibility to configure localizations. - -## Default locale configuration - -The distributions of Optimize contain the default localization files under `./config/localization/`. - -The default localizations available are `en` for English and `de` for German. You can also find community maintained localizations in [this repository](https://github.com/camunda/camunda-optimize-translations). - -Additionally, English is configured as the default `fallbackLocale`. Fallback in this case means whenever a user has a browser configured with a language that is not present in the `availableLocales` list, Optimize will use the `fallbackLocale`. - -The default locale configuration in `./config/environment-config.yaml` looks like the following: - -``` -locales: - availableLocales: ['en', 'de'] - fallbackLocale: 'en' -``` - -For more details on the configuration keys, refer to the [localization configuration section](./system-configuration.md#localization). - -## Custom locale configuration - -Custom locales can be added by creating a locale file under `./config/localization/` and adding it to the `availableLocales` configuration. - -:::note -Configuring a custom locale means you have to maintain it yourself and update it in the context of an Optimize update. - -There is currently no changelog of new localization entries available, and it is required that each localization file contains an entry for each key used by Optimize. -::: - -As an example, a custom localization can be created by making a copy of the `./config/localization/en.json` named `/config/localization/es.json` and adding it to the available locales in `./config/environment-config.yaml` - -``` -locales: - availableLocales: ['en', 'de', 'es'] - fallbackLocale: 'en' -``` diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/logging.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/logging.md deleted file mode 100644 index 300ba2e7f36..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/logging.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: logging -title: "Logging" -description: "Camunda Optimize provides logging facilities that are preconfigured to use INFO logging level which provides minimal output of information in log files." ---- - -Camunda Optimize provides logging facilities that are preconfigured to use -_INFO_ logging level which provides minimal output of information in log files. -This level can be adjusted using the `environment-logback.xml` configuration file. - -## Google Stackdriver (JSON) logging - -To enable Google Stackdriver compatible JSON logging, set the environment variable `JSON_LOGGING=true` before starting Optimize. - -## Default logging configuration - -Although one could potentially configure logging levels for all packages, it -is recommended to set logging levels for the following three Optimize parts using only exact package -reference as follows: - -- Optimize runtime environment: - -```xml - -``` - -- Optimize update: - -```xml - - - -``` - -- Communication to Elasticsearch: - -```xml - -``` - -If you are running Optimize with Docker, use the following environment variables to configure its logging levels: - -- `OPTIMIZE_LOG_LEVEL`: Sets the logging level for the Optimize log. -- `UPGRADE_LOG_LEVEL`: Sets the logging level for the Optimize update log. -- `ES_LOG_LEVEL`: Sets the logging level for Elasticsearch. - -Whether using the configuration file or Docker environment variables, to define the granularity of the information shown in the log you can set one of the following log levels: - -- **error**: Shows errors only. -- **warn**: Like **error**, but displays warnings as well. -- **info**: Logs everything from **warn** and the most important information about state changes or actions in Optimize. -- **debug**: In addition to **info**, writes information about the scheduling process, alerting as well as the import of the engine data. -- **trace**: Like **debug**, but in addition, writes all requests sent to the Camunda engine as well as all queries towards Elasticsearch to the log output. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/multi-tenancy.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/multi-tenancy.md deleted file mode 100644 index 8aa78a49928..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/multi-tenancy.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: multi-tenancy -title: "Multi-tenancy" -description: "Learn about the supported multi-tenancy scenarios." ---- - -Camunda 7 only - -Learn how to set up multi-tenancy with Optimize. - -## Possible multi-tenancy scenarios - -As described in the [Camunda 7 documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/multi-tenancy/), there are two possible multi-tenant scenarios which are also supported by Optimize: - -- [Possible multi-tenancy scenarios](#possible-multi-tenancy-scenarios) - - [Single process engine with tenant-identifiers](#single-process-engine-with-tenant-identifiers) - - [One process engine per tenant](#one-process-engine-per-tenant) - -### Single process engine with tenant-identifiers - -Tenant-identifiers available in the Camunda 7 engine are automatically imported into Optimize and tenant-based access authorization is enforced based on the configured `Tenant Authorizations` within the Camunda 7. This means there is no additional setup required for Optimize in order to support this multi-tenancy scenario. - -Users granted tenant access via the Camunda 7 will be able to create and see reports for that particular tenant in Optimize. In the following screenshot, the user `demo` is granted access to data of the tenant with the id `firstTenant` and will be able to select that tenant in the report builder. Other users, without the particular firstTenant authorization, will not be able to select that tenant in the report builder nor be able to see results of reports that are based on that tenant. - -![Tenant Authorization](img/admin-tenant-authorization.png) - -### One process engine per tenant - -In the case of a multi-engine scenario where tenant-specific data is isolated by deploying to dedicated engines, there are no tenant identifiers present in the particular engines themselves. For a single Optimize instance that is configured to import from each of those engines to support this scenario, it is required to configure a `defaultTenant` for each of those engines. - -The effect of configuring a `defaultTenant` per engine is that all data records imported from the particular engine where no engine-side tenant identifier is present this `defaultTenant` will be added automatically. Optimize users will be authorized to those default tenants based on whether they are authorized to access the particular engine the data originates from. So in this scenario, it is not necessary to configure any `Tenant Authorizations` in the Camunda 7 itself. - -The following `environment-config.yaml` configuration snippet illustrates the configuration of this `defaultTenant` on two different engines. - -``` -... -engines: - "engineTenant1": - name: engineTenant1 - defaultTenant: - # the id used for this default tenant on persisted entities - id: tenant1 - # the name used for this tenant when displayed in the UI - name: First Tenant - ... - "engineTenant2": - name: engineTenant2 - defaultTenant: - # the id used for this default tenant on persisted entities - id: tenant2 - # the name used for this tenant when displayed in the UI - name: Second Tenant -... -``` - -Optimize users who have a `Optimize Application Authorization` on both engines will be able to distinguish between data of both engines by selecting the corresponding tenant in the report builder. - -:::note Heads up! -Once a `defaultTenant.id` is configured and data imported, you cannot change it any more without doing a [full reimport](./../migration-update/instructions.md#force-reimport-of-engine-data-in-optimize) as any changes to the configuration cannot be applied to already imported data records. -::: diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/multiple-engines.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/multiple-engines.md deleted file mode 100644 index f1ba7216737..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/multiple-engines.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: multiple-engines -title: "Multiple process engines" -description: "Learn how to set up multiple process engines with Optimize and which scenarios are supported." ---- - -Camunda 7 only - -Learn how to set up multiple process engines with Optimize and which scenarios are supported. - -## Possible multiple process engine scenarios - -There are two possible setups where multiple process engines can be used: - -- [Possible multiple process engine scenarios](#possible-multiple-process-engine-scenarios) - - [Multiple engines with distributed databases](#multiple-engines-with-distributed-databases) - - [Multiple engines with a shared database](#multiple-engines-with-a-shared-database) -- [Authentication and authorization in the multiple engine setup](#authentication-and-authorization-in-the-multiple-engine-setup) - -Check which scenario corresponds to your setup because the configuration of multiple engines to Optimize is not always suited for the best import performance. - -:::note Heads Up! - -There are two restrictions for the multiple engines feature: - -1. The process engines are assumed to have distinct process definitions, which means that one process definition (same key, tenant and version) is not deployed on two or more engines at the same time. - Alternatively, each engine could be configured with default tenant identifiers as described in the [One Tenant Per Engine Scenario](../multi-tenancy/#one-process-engine-per-tenant). -2. The engines are assumed to have distinct tenant identifiers, which means one particular tenantId is not deployed on two or more engines at the same time. - -::: - -### Multiple engines with distributed databases - -In this scenario, you have multiple process engines and each engine has its own database as illustrated in the following diagram: - -![Clustered Engine with distributed Database](img/Clustered-Engine-Distributed-Database.png) - -Now, you are able to connect each engine to Optimize. The data will then automatically be imported into Optimize. The following diagram depicts the setup: - -![Multiple Engines connected to Optimize, each having its own Database](img/Multiple-Engine-Distributed-Database.png) - -To set up the connections to the engines, you need to add the information to the [configuration file](./system-configuration-platform-7.md). For the sake of simplicity, let's assume we have two microservices, `Payment` and `Inventory`, each having their own engine with its own database and processes. Both are accessible in the local network. The `Payment` engine has the port `8080` and the `Inventory` engine the port `1234`. Now an excerpt of the configuration could look as follows: - -```yaml -engines: - payment: - name: default - rest: http://localhost:8080/engine-rest - authentication: - enabled: false - password: "" - user: "" - enabled: true - inventory: - name: default - rest: http://localhost:1234/engine-rest - authentication: - enabled: false - password: "" - user: "" - enabled: true -``` - -`payment` and `inventory` are custom names that were chosen to distinguish where the data was originally imported from later on. - -### Multiple engines with a shared database - -In this scenario you have multiple engines distributed in a cluster, where each engine instance is connected to a shared database. See the following diagram for an illustration: - -![Clustered Engine with shared Database](img/Clustered-Engine-Shared-Database.png) - -Now it could be possible to connect each engine to Optimize. Since every engine accesses the same data through the shared database, Optimize would import the engine data multiple times. There is also no guarantee that importing the same data multiple times will not cause any data corruption. For this reason, we do not recommend using the setup from [multiple engines with distributed databases](#multiple-engines-with-distributed-databases). - -In the scenario of multiple engines with a shared database, it might make sense to balance the work load on each engine during the import. You can place a load balancer between the engines and Optimize, which ensures that the data is imported only once and the load is distributed among all engines. Thus, Optimize would only communicate to the load balancer. The following diagram depicts the described setup: - -![Multiple Engines with shared Database connected to Optimize](img/Multiple-Engine-Shared-Database.png) - -In general, tests have shown that Optimize puts a very low strain on the engine and its impact on the engine's operations are in almost all cases neglectable. - -## Authentication and authorization in the multiple engine setup - -When you configure multiple engines in Optimize, each process engine can host different users with a different set of authorizations. If a user is logging in, Optimize will try to authenticate and authorize the user on each configured engine. In case you are not familiar with how -the authorization/authentication works for a single engine scenario, visit the [User Access Management](./user-management.md) and [Authorization Management](./authorization-management.md) documentation first. - -To determine if a user is allowed to log in and which resources they are allowed to access within the multiple engine scenario, Optimize uses the following algorithm: - -_Given the user X logs into Optimize, go through the list of configured engines and try to authenticate the user X, for each successful authentication fetch the permissions of X for applications and process definitions from that engine and allow X to access Optimize if authorized by at least one engine._ - -To give you a better understanding of how that works, let's take the following multiple engine scenario: - -``` -- Engine `payment`: - - User without Optimize Application Authorization: Scooter, Walter - - User with Optimize Application Authorization: Gonzo - - Authorized Definitions for Gonzo, Scooter, Walter: Payment Processing - -- Engine `inventory`: - - User with Optimize Application Authorization: Piggy, Scooter - - Authorized Definitions for Piggy, Scooter: Inventory Checkout - -- Engine `order`: - - User with Optimize Application Authorization: Gonzo - - Authorized Definitions for Gonzo: Order Handling - -``` - -Here are some examples that might help you to understand the authentication/authorization procedure: - -- If `Piggy` logged in to Optimize, she would be granted access to Optimize and can create reports for the definition `Inventory Checkout`. -- If `Rizzo` logged in to Optimize, he would be rejected because the user `Rizzo` is not known to any engine. -- If `Walter` logged in to Optimize, he would be rejected despite being authorized to access the definition `Payment Processing` on engine `payment` because `Walter` does not have the `Optimize Application Authorization` required to access Optimize. -- If `Scooter` logged in to Optimize, he would be granted access to Optimize and can create reports for the definition `Inventory Checkout`. He wouldn't - get permissions for the `Payment Processining` or the `Order Handling` definition, since he doesn't have Optimize permissions on the `payment` or `order` engine. -- If `Gonzo` logged in to Optimize, he would be granted access to Optimize and can create reports for the definition `Payment Processining` as well as the `Order Handling` definition, since definitions authorizations are loaded from all engines the user could be authenticated with (in particular `payment` and `order`). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/object-variables.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/object-variables.md deleted file mode 100644 index 569659b7bb7..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/object-variables.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: object-variables -title: "Object and list variable support" -description: "Learn how Optimize imports and handles object and list variables." ---- - -## Object variables - -Complex object variables can be imported into Optimize and thereafter be used in reports and filters. During import, Optimize flattens the given object variable to create individual variables for each property of the object, resulting in multiple "sub variables" for each imported object variable. - -For example, an object variable called `user` with the properties `firstName` and `lastName` will result in two flattened variables: `user.firstName` and `user.lastName`. These variables can be used within reports and filters. - -Additionally, to the flattened properties, Optimize also imports the entire raw value of the object variable. In the above example, this would result in a variable called `user` with value `{"firstName": "John", "lastName": "Smith"}`. This raw object variable can be inspected in Raw Data Reports but is not supported in other report types or filters. - -## List variables - -Optimize also supports object variables which are JSON serialized lists of primitive types, for example a list of strings or numbers. Note that for Camunda 7 and external variables, the `type` of list variables must still be set to `Object`. During import, Optimize also evaluate how many entries are in a given list and persists this in an additional `_listSize` variable. - -For example, a list variable with the name `users` and the values `["John Smith", "Jane Smith"]` will result in two imported variables: one `users` variable with the two given values, and one variable called `users._listSize` with value `2`. Both can be used in reports and filters. - -However, filters are not yet fully optimized for list support, and some filter terms may be initially misleading. This is because filters currently apply to each list item individually rather than the entire list. For example, an "is" filter on a list of string values filters for those instances where any individual list item is equal to the given term, for example, instances whose list variable "contains" the selected value. - -Similarly, the "contains" filter matches process instances whose list variable contains at least one value which in turn contains the given substring. - -The value of list properties within objects as well as variables which are lists of objects rather than primitives can be inspected in the raw object variable value column accessible in raw data reports. - -## Variable plugins - -Any configured [variable plugins](../../plugins/variable-import-plugin) are applied _before_ Optimize creates the flattened property "sub variables", meaning the configured plugins have access to the raw JSON object variables only. Any modifications applied to the JSON object variables will then be persisted to the "sub variables" when Optimize flattens the resulting objects in the next step of the import cycle. - -## Optimize configuration - -The import of object variable values is enabled by default and can be disabled using the `import.data.variable.includeObjectVariableValue` [configuration](./system-configuration-platform-7.md). - -## Other system configurations - -Depending on where the imported object variables originate, the following configuration is required to ensure that your system produces object variable data that Optimize can import correctly: - -### Platform object variables - -Optimize supports both [object process variables serialized as JSON](https://docs.camunda.org/manual/latest/user-guide/data-formats/json/#serializing-process-variables) and [native JSON variables](https://docs.camunda.org/manual/latest/user-guide/data-formats/json/#native-json-variable-value) from Camunda 7. If you are importing object variables, it is required to configure the Platform's spin serialization so that process variables are by default **serialized as JSON**. Refer to the [Camunda 7 documentation](https://docs.camunda.org/manual/latest/user-guide/data-formats/json/#serializing-process-variables) for more information on how to set up JSON serialization. - -Furthermore, to allow Optimize to correctly parse date properties within the object or native JSON variable, ensure date properties of objects are serialized using a common **date format** (for example `yyyy-MM-dd'T'HH:mm:ss.SSSZ`) other than unix timestamps. If date properties are serialized as unix timestamps, these properties cannot be identified and parsed as dates when importing into Optimize and will instead be persisted as number variables. - -### Zeebe object variables - -If you are creating object variables using a Zeebe process, ensure date properties within the JSON object are stored using a common **date format** (for example `yyyy-MM-dd'T'HH:mm:ss.SSSZ`) other than unix timestamps. If Optimize imports unix timestamp date properties, these properties cannot be identified and parsed as dates and will instead be persisted as number variables. - -### External object variables - -External variables of type object require an additional field called `serializationDataFormat` which specifies which data format was used to serialize the given object. - -Refer to the [external object variable API section](../../../apis-tools/optimize-api/external-variable-ingestion.md) for further details on how to ingest external variables. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/security-instructions.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/security-instructions.md deleted file mode 100644 index 14f07baa678..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/security-instructions.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -id: security-instructions -title: "Security instructions" -description: "Learn how to secure your Optimize distribution against potential attacks." ---- - -This page provides an overview of how to secure a Camunda Optimize installation. For Camunda's security policy, a list of security notices, and a guide on how to report vulnerabilities, visit the [general security documentation](https://docs.camunda.org/security/). - -This guide also identifies areas where we consider security issues to be relevant for the Camunda Optimize product and list those in the subsequent sections. Compliance for those areas is ensured based on common industry best practices and influenced by security requirements of standards like OWASP Top 10 and others. - -## Secure the Camunda 7 connection - -Camunda 7 only - -:::note Important! -Optimize does not operate on its own, but needs the Camunda 7 engine to import the data from and Elasticsearch to store the data. A detailed description of the setup can be found in the [architecture overview](../advanced-features/import-guide.md) guide. -::: - -The BPMN platform with its process engine is a full standalone application which has a dedicated [security](https://docs.camunda.org/manual/latest/user-guide/security/) guide. The sections that are of major importance for the communication with Optimize are: [enabling authentication for the REST API](https://docs.camunda.org/manual/latest/user-guide/security/#enabling-authentication-for-the-rest-api/#enabling-authentication-for-the-rest-api) and [enabling SSL/HTTPS](https://docs.camunda.org/manual/latest/user-guide/security/#enabling-authentication-for-the-rest-api). - -## Secure Optimize - -Optimize already comes with a myriad of settings and security mechanism by default. In the following you will find the parts that still need manual adjustments. - -### Disable HTTP - -For security reasons, we recommend using Optimize over HTTPS and disabling HTTP. You can disable HTTP by setting the HTTP property in the container settings to an empty/null value. Consult the respective section in the [configuration guide](./system-configuration.md#container) for the more details. - -### Fine tune Optimize security headers - -Over time, various client-side security mechanisms have been developed to protect web applications from various attacks. Some of these security mechanisms are only activated if the web application sends the corresponding HTTP headers in its server responses. - -Optimize adds several of these headers which can be fine-tuned in the [configuration](./system-configuration.md#security) to ensure appropriate security. - -### Authentication - -Camunda 7 only - -Authentication controls who can access Optimize. Read all about how to restrict the application access in the [user access management guide](./user-management.md). - -### Authorization - -Camunda 7 only - -Authorization controls what data a user can access and change in Optimize once authenticated. Authentication is a prerequisite to authorization. Read all about how to restrict the data access in the [authorization management guide](./authorization-management.md). - -## Secure Elasticsearch - -Optimize stores its data in Elasticsearch, which is a search engine that acts as a document-based datastore. To protect access to this data, Elasticsearch should be configured carefully as well. Refer to the official [Secure the Elastic Stack](https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-cluster.html#secure-cluster) documentation of Elasticsearch. - -Within the Optimize configuration, you can then enable SSL and/or the credentials to be used when Camunda Optimize connects to Elasticsearch. See [Elasticsearch Security](./system-configuration.md#elasticsearch-security) for details. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/service-config.yaml b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/service-config.yaml deleted file mode 100644 index 9882d71228b..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/service-config.yaml +++ /dev/null @@ -1,531 +0,0 @@ ---- -security: - # everything that's related to authentication - auth: - cookie: - same-site: - # decides if the optimize auth cookie has the same site cookie flag set - enabled: true - token: - # Optimize uses token-based authentication to keep track of which users are - # logged in. Define when a token is supposed to expire. - lifeMin: 60 - # Optional secret used to sign authentication tokens, it's recommended to use at least a 64 character secret. - # If set `null` a random secret will be generated with each startup of Optimize. - secret: null - # List of user ids that are granted full permission to all collections, reports & dashboards - # Note: For reports these users are still required to be granted access to the corresponding process/decision - # definitions in Camunda Platform Admin - superUserIds: [] - - # Here you can define HTTP response headers that Optimize can send in its responses - # to increase the security of your application. - # Find more information here: https://owasp.org/www-project-secure-headers/ - responseHeaders: - # HTTP Strict Transport Security (HSTS) is a web security policy mechanism which helps to protect websites - # against protocol downgrade attacks and cookie hijacking. - # More - HSTS: - # The time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. - # If you set the number to a negative value no HSTS header is sent. - max-age: 63072000 - # If this optional parameter is specified, this rule applies to all of the site’s subdomains as well. - includeSubDomains: true - # This header enables the cross-site scripting (XSS) filter in your browser. - # Can have one of the following options: - # * 0: Filter disabled. - # * 1: Filter enabled. If a cross-site scripting attack is detected, in order to stop the attack, - # the browser will sanitize the page. - # * 1; mode=block: Filter enabled. Rather than sanitize the page, when a XSS attack is detected, the browser will - # prevent rendering of the page. - # * 1; report=http://[YOURDOMAIN]/your_report_URI: Filter enabled. The browser will sanitize the page and - # report the violation. This is a Chromium function utilizing CSP - # violation reports to send details to a URI of your choice. - X-XSS-Protection: 1; mode=block - # Setting this header will prevent the browser from interpreting files as a different MIME type to - # what is specified in the Content-Type HTTP header (e.g. treating text/plain as text/css). - X-Content-Type-Options: true - # A Content Security Policy (CSP) has significant impact on the way browsers render pages. - # By default Optimize uses the base-uri directive which restricts the URLs that can be used to the Optimize pages. - # Find more details: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy - Content-Security-Policy: base-uri 'self' - -# A global access token used by all public APIs of Optimize -api: - # Authentication information to be provided for the public APIs of Optimize. - # Provide either a static access token OR a URL for a resource server. If both are provided, the static access - # token will be ignored and the resource server will be used for validation instead - accessToken: ${OPTIMIZE_API_ACCESS_TOKEN:null} - jwtSetUri: ${SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI:null} - -container: - # A host name or IP address, to identify a specific network interface on - # which to listen. - host: localhost - # Allows you to specify a custom context path. If set, must start with a leading '/' - contextPath: ${CAMUNDA_OPTIMIZE_CONTEXT_PATH:null} - ports: - # A port number that will be used by Optimize to process HTTP connections. - # If set to null, ~ or left empty, http connections won't be accepted. - http: 8090 - # A port number that will be used by Optimize to process - # secure HTTPS connections. - https: 8091 - # HTTPS requires an SSL Certificate. When you generate an SSL Certificate, - # you are creating a keystore file and a keystore password for use when the - # browser interface connects - keystore: - location: keystore.jks - password: optimize - # configuration of status reporting web socket - status: - # max number of threads\sessions that will be kept to report status - connections: - max: 10 - # Optional url to access Optimize (used for links to Optimize in e.g. alert emails) - accessUrl: null - # Enable use of HTTP/2 for Optimize - http2Enabled: false - -# Configuration for engines used to import data. Please note that you have to have at -# least one engine configured at all times. -engines: - # An alias of the engine, which will be used for internal purposes like - # logging and displaying which data belong to which engine. - "camunda-bpm": - # The process engines name on the platform, this is the unique engine identifier on the platforms REST API. - name: default - # A default tenant to the be injected on data from this engine where no tenant is configured in the engine itself. - # This property is only relevant in the context of a `One Process Engine Per Tenant`. - # For details consult the Multi-Tenancy documentation. - defaultTenant: - # the id used for this default tenant on persisted entities - id: null - # the name used for this tenant when displayed in the UI - name: null - #A base URL that will be used for connections to the Camunda Engine REST API. - rest: "http://localhost:8080/engine-rest" - # Determines whether this instance of Optimize should import definition & historical data from this engine. - importEnabled: true - # Determines whether this instance of Optimize should convert historical data to event data - # usable for event based processes. - eventImportEnabled: false - authentication: - # Toggles basic authentication on or off. When enabling basic - # authentication, please be aware that you also need to adjust the values - # of the user and password. - # Also note, when enabled, it is required that the user has - # * READ & READ_HISTORY permission on the Process and Decision Definition resources - # * READ permission on the Authorization, Group, User, Deployment & Tenant resources - # to enable users to log in and Optimize to import the engine data. - enabled: false - # When basic authentication is enabled, this password is used to - # authenticate against the engine. - password: "" - # When basic authentication is enabled, this user is used to authenticate - # against the engine. - user: "" - # The webapps configuration allows Optimize to directly link - # to the other Camunda Web Applications, e.g. to jump from - # Optimize directly to a dedicated process instance in Cockpit - webapps: - # Defines the endpoint where to find the camunda webapps for the given engine - endpoint: "http://localhost:8080/camunda" - # Enables/disables linking to other Camunda Web Applications - enabled: true - -engine-commons: - connection: - #Maximum time without connection to the engine, Optimize should wait - #until a time out is triggered. A value of zero means to wait an - # infinite amount of time. - timeout: 0 - read: - # Maximum time a request to the engine should last, - # before a timeout triggers. A value of zero means to wait an - # infinite amount of time. - timeout: 0 - -import: - data: - activity-instance: - # Determines the page size for historic activity instance fetching. - maxPageSize: 10000 - incident: - # Determines the page size for historic incident fetching. - maxPageSize: 10000 - process-definition-xml: - # Determines the page size for process definition xml model - # fetching. Should be a low value, as large models will lead to - # memory or timeout problems. - maxPageSize: 2 - process-definition: - # Determines the page size for process definition fetching. - maxPageSize: 10000 - process-instance: - # Determines the maximum page size for historic process instance fetching. - maxPageSize: 10000 - variable: - # Determines the page size for historic variable instance fetching. - maxPageSize: 10000 - # Controls whether Optimize fetches the serialized value of object variables from the Camunda Runtime REST API. - # By default this is active for backwards compatibility. If no variable plugin to handle object - # variables is installed, it can be turned off to reduce the overhead of the variable import. - includeObjectVariableValue: true - user-task-instance: - # Determines the page size for historic user task instance fetching - maxPageSize: 10000 - identity-link-log: - # Determines the page size for identity link log fetching. - maxPageSize: 10000 - decision-definition-xml: - # Determines the page size for decision definition xml model - # fetching. Should be a low value, as large models will lead to - # memory or timeout problems. - maxPageSize: 2 - decision-definition: - # Determines the page size for decision definition fetching. - maxPageSize: 10000 - decision-instance: - # Determines the page size for historic decision instance fetching. - maxPageSize: 10000 - tenant: - # Determines the page size for tenants fetching. - maxPageSize: 10000 - group: - # Determines the page size for groups fetching. - maxPageSize: 10000 - authorization: - # Determines the page size for authorizations fetching. - maxPageSize: 10000 - dmn: - # Determines if the DMN/decision data, such as decision definitions and instances - # should be imported. - enabled: true - user-task-worker: - # Determines if the user task worker data, such as assignee or candidate group of - # a user task, should be imported. - enabled: true - # This sub-section controls to what extent and how Optimize fetches and displays metadata of user task workers. - # The particular metadata is first-, last name and the email of the users or the names of the candidate groups. - # The data is displayed in the context of reports when grouping/distributing by assignees/candidateGroups or - # when filtering on them. - metadata: - # Determines whether Optimize imports and displays assignee user metadata, otherwise only the user id is shown. - includeUserMetaData: true - # Cron expression for when to fully refresh the internal metadata cache, it defaults to every third hour. - # Otherwise deleted assignees/candidateGroups or metadata changes are not reflected in Optimize. - cronTrigger: "0 */3 * * *" - # The max page size when multiple users or groups are iterated during the metadata refresh. - maxPageSize: 10000 - # The entry limit of the cache that holds the metadata, if you need more entries you can increase that limit. - # When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. - # Please refer to the technical guide on how to configure the heap size. - maxEntryLimit: 100000 - # Some data can no longer be imported to a given document if its number of nested documents has reached the configured - # limit. Enable this setting to skip this data during import if the nested document limit has been reached. - skipDataAfterNestedDocLimitReached: false - # Number of threads being used to process the import jobs per data type that are writing data to elasticsearch. - elasticsearchJobExecutorThreadCount: 1 - # Adjust the queue size of the import jobs per data type that store data to elasticsearch. - # A too large value might cause memory problems. - elasticsearchJobExecutorQueueSize: 5 - handler: - backoff: - # Interval which is used for the backoff time calculation. - initial: 1000 - # Once all pages are consumed, the import service component will - # start scheduling fetching tasks in increasing periods of time, - # controlled by 'backoff' counter. - # This property sets maximal backoff interval in seconds - max: 30 - #States how often the import index should be stored to Elasticsearch. - importIndexStorageIntervalInSec: 10 - # the time interval the import backs off from the current tip of the time, to reread potentially missed concurrent writes - currentTimeBackoffMilliseconds: 300000 - # The identity sync enables Optimize to build up a in memory cache containing Optimize authorized users & groups. - # This data is used in the collection permissions to allow convenient search capabilities - # and to display member meta-data such as first name, last name or email. - identitySync: - # Whether to include metaData (firstName, lastName, email) when synchronizing users - includeUserMetaData: true - # Whether collection role cleanup should be performed - collectionRoleCleanupEnabled: true - # Cron expression for when the identity sync should run, defaults to every second hour. - cronTrigger: "0 */2 * * *" - # The max page size when multiple users or groups are iterated during the import. - maxPageSize: 10000 - # The entry limit of the cache, if you need more entries you can increase that limit. - # When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. - # Please refer to the technical guide on how to configure the heap size. - maxEntryLimit: 100000 - -# everything that is related with configuring Elasticsearch or creating -# a connection to it. -es: - connection: - # Maximum time without connection to Elasticsearch, Optimize should - # wait until a time out triggers. - timeout: 10000 - # Maximum size of the Elasticsearch response consumer heap buffer. This can be increased to resolve errors - # from Elasticsearch relating to the entity content being too long - responseConsumerBufferLimitInMb: 100 - # The path prefix under which Elasticsearch is available - pathPrefix: "" - # a list of Elasticsearch nodes Optimize can connect to. If you have built - # an Elasticsearch cluster with several nodes it is recommended to define - # several connection points in case one node fails. - nodes: - # the address/hostname under which the Elasticsearch node is available. - - host: "localhost" - # A port number used by Elasticsearch to accept HTTP connections. - httpPort: 9200 - # HTTP forward proxy configuration - proxy: - # whether an HTTP proxy should be used for requests to elasticsearch - enabled: false - # the host of the proxy to use - host: null - # the port of the proxy to use - port: null - # whether this proxy is using a secured connection - sslEnabled: false - # Determines whether the hostname verification should be skipped - skipHostnameVerification: false - # Configuration relating to ES backup - backup: - # The repository name in which the backups should be stored - repositoryName: "" - - # Elasticsearch security settings - security: - # the basic auth (x-pack) username - username: null - # the basic auth (x-pack) password - password: null - # SSL/HTTPS secured connection settings - ssl: - # path to a PEM encoded file containing the certificate (or certificate chain) - # that will be presented to clients when they connect. - certificate: null - # A list of paths to PEM encoded CA certificate files that should be trusted, e.g. ['/path/to/ca.crt']. - # Note: if you are using a public CA that is already trusted by the Java runtime, - # you do not need to set the certificate_authorities. - certificate_authorities: [] - # used to enable or disable TLS/SSL for the HTTP connection - enabled: false - # used to specify that the certificate was self-signed - selfSigned: false - - # Maximum time a request to elasticsearch should last, before a timeout - # triggers. - scrollTimeout: 60000 - settings: - # the maximum number of buckets returned for an aggregation - aggregationBucketLimit: 1000 - index: - # the prefix prepended to all Optimize index and alias names - # NOTE: Changing this after Optimize was already run before, will create new empty indexes - prefix: "optimize" - # How often should the data replicated in case of node failure. - number_of_replicas: 1 - # How many shards should be used in the cluster for process instance and decision instance indices. - # All other indices will be made up of a single shard - # NOTE: this property only applies the first time Optimize is started and - # the schema/mapping is deployed on Elasticsearch. If you want to take - # this property to take effect again, you need to delete all indexes (with it all data) - # and restart Optimize. - number_of_shards: 1 - # How long Elasticsearch waits until the documents are available - # for search. A positive value defines the duration in seconds. - # A value of -1 means that a refresh needs to be done manually. - refresh_interval: 2s - # Optimize uses nested documents to store list information such as activities or variables belonging to a - # process instance. So this setting defines the maximum number of activities/variables/incidents that a single - # process instance can contain. This limit helps to prevent out of memory errors and should be used with care. - nested_documents_limit: 10000 - -plugin: - # Defines the directory path in the local Optimize file system which should be checked for plugins - directory: "./plugin" - variableImport: - # Look in the given base package list for variable import adaption plugins. - # If empty, the import is not influenced. - basePackages: [] - engineRestFilter: - # Look in the given base package list for engine rest filter plugins. - # If empty, the REST calls are not influenced. - basePackages: [] - authenticationExtractor: - # Looks in the given base package list for authentication extractor plugins. - # If empty, the standard Optimize authentication mechanism is used. - basePackages: [] - decisionInputImport: - # Look in the given base package list for Decision input import adaption plugins. - # If empty, the import is not influenced. - basePackages: [] - decisionOutputImport: - # Look in the given base package list for Decision output import adaption plugins. - # If empty, the import is not influenced. - basePackages: [] - elasticsearchCustomHeader: - # Look in the given base package list for Elasticsearch custom header fetching plugins. - # If empty, ES requests are not influenced. - basePackages: [] - -serialization: - # Define a custom date format that should be used for - # fetching date data from the engine(should be the same as in the engine) - engineDateFormat: yyyy-MM-dd'T'HH:mm:ss.SSSZ - -alerting: - quartz: - jobStore: "org.quartz.simpl.RAMJobStore" - -email: - # A switch to control email sending process. - enabled: false - # Email address that can be used to send alerts - address: "" - # The smtp server name - hostname: "" - # The smtp server port. This one is also used as SSL port for the security connection. - port: 587 - # Define configuration properties for the authentication of the email server - authentication: - # A switch to control whether the email server requires authentication - enabled: true - # Username of your smtp server - username: "" - # Corresponding password to the given user of your smtp server - password: "" - # States how the connection to the server should be secured. - # Possible values are 'NONE', 'STARTTLS' or 'SSL/TLS' - securityProtocol: "NONE" - -entity: - # which users are authorized to create/edit/delete Optimize entities outside of a collection. - # Available options: 'all', 'superuser', 'none' - authorizedEditors: "all" - # Specifies the frequency with which we want to refresh the KPI values on ES - # The given number is the interval in seconds - kpiRefreshInterval: 600 - -export: - csv: - # which users are authorized to download CSVs. Available options: 'all', 'superuser', 'none' - authorizedUsers: "all" - # Maximum number of records returned by CSV export - # Note: Increasing this value comes at a memory cost for the Optimize application that varies based on the actual data. - # As a rough guideline, an export of a 50000 records raw data report containing 8 variables on each instance - # can cause temporary heap memory peaks of up to ~200MB with the actual CSV file having a size of ~20MB. - # Please adjust the heap memory accordingly. - limit: 1000 - -sharing: - # decides if the sharing feature of Optimize can be used in the UI. - enabled: true - -historyCleanup: - # cron expression for when the cleanup should run - cronTrigger: "0 1 * * *" - # default time to live (ttl) for data, when reached the corresponding process/decision/event instances will get cleaned up - # Format is ISO_8601 duration https://en.wikipedia.org/wiki/ISO_8601#Durations - ttl: "P2Y" - processDataCleanup: - # switch for the camunda process data cleanup, defaults to false - enabled: false - # type of process data cleanup to perform, possible values: - # 'all' - delete everything related to the process instance - # 'variables' - only delete associated variables of a process instance - cleanupMode: "all" - # Defines the batch size in which camunda engine process instance data gets cleaned up - # may be reduced if requests fail due to request size constraints - batchSize: 10000 - # process definition specific configuration parameters that will overwrite the general parameters (ttl, processDataCleanupMode) - # for the specific processDefinition key - perProcessDefinitionConfig: - # 'myProcessDefinitionKey': - # ttl: 'P2M' - # cleanupMode: 'variables' - decisionDataCleanup: - # switch for the camunda decision data cleanup, defaults to false - enabled: false - # decision definition specific configuration parameters that will overwrite the general parameters (ttl) - # for the specific decisionDefinition key - perDecisionDefinitionConfig: - # 'myDecisionDefinitionKey': - # ttl: 'P2M' - ingestedEventCleanup: - # switch for the ingested event data cleanup, defaults to false - enabled: false - -locales: - # all locales available - # Note: for others than the default there must be a .json file available under ./config/localization. - availableLocales: ["en", "de"] - # the fallback locale is used if there is a locale requested that is not available in availableLocales - fallbackLocale: "en" - -ui: - header: - # determines the color theme of the text in the header. Currently 'dark' and 'light' are supported. - textColor: "dark" - # Path to the logo that is displayed in the header of Optimize. - # Path can be: - # * relative: starting from the config folder you can provide a relative path. - # * absolute: full path in the file system. - # - # Supported image formats can be found here: - # https://developer.mozilla.org/en-US/docs/Web/HTML/Element/img#Supported_image_formats - pathToLogoIcon: "logo/camunda_icon.svg" - # a hex encoded color that should be used as background color for the header. Default color is white. - backgroundColor: "#FFFFFF" - -eventBasedProcess: - # A list of userIds that are authorized to manage (Create, Update, Publish & Delete) event based processes. - authorizedUserIds: [] - # A list of groupIds that are authorized to manage (Create, Update, Publish & Delete) event based processes. - authorizedGroupIds: [] - eventImport: - # Determines whether this Optimize instance performs event based process instance import. - enabled: false - # The batch size of events being correlated to process instances of event based processes. - maxPageSize: 5000 - eventIngestion: - # Content length limit for an ingestion REST API Bulk request in bytes. - # Requests will be rejected when exceeding that limit. - # Defaults to 10MB. - maxBatchRequestBytes: 10485760 - # The maximum number of requests to the event ingestion endpoint that can be served at a time - maxRequests: 5 - eventIndexRollover: - # scheduleIntervalInMinutes specifies how frequently the rollover API should be called to see if a rollover of the - # event index is required (whether the rollover is triggered depends on the conditions specified by maxIndexSizeGB). - scheduleIntervalInMinutes: 10 - # A rollover is triggered when the size of the current event index matches or exceeds the maxIndexSizeGB threshold. - maxIndexSizeGB: 50 - -externalVariable: - import: - # Controls whether external ingested variable data is processed and imported into process instance data - enabled: false - # Determines the page size for the external variable import, that got ingested via the external variable API - maxPageSize: 10000 - variableIngestion: - # Content length limit for the external variable ingestion request in bytes. - # Requests will be rejected when exceeding that limit. Defaults to 10MB. - maxBatchRequestBytes: 10485760 - # The maximum number of requests to the external variable ingestion endpoint that can be served at a time. - maxRequests: 5 - variableIndexRollover: - # scheduleIntervalInMinutes specifies how frequently the rollover API should be called to see if a rollover of the - # external variable index is required (whether the rollover is triggered depends on the conditions specified by maxIndexSizeGB). - scheduleIntervalInMinutes: 10 - # A rollover is triggered when the size of the current external variable index matches or exceeds the maxIndexSizeGB threshold. - maxIndexSizeGB: 50 - -telemetry: - # Sets the initial property value of telemetry configuration once when it has never been enabled/disabled before. - # Telemetry can later be enabled/disabled in the UI by superusers - initializeTelemetry: false diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/setup-event-based-processes.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/setup-event-based-processes.md deleted file mode 100644 index fd4d98ac99f..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/setup-event-based-processes.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: setup-event-based-processes -title: "Event-based processes" -description: "Read everything about how to configure event-based processes in Optimize." ---- - -Camunda 7 only - -Event-based processes are BPMN processes that can be created inside Optimize which are based on events originating from external systems. - -Event ingestion is the process of sending event data from external systems to Camunda Optimize to support business processes that are not fully automated with Camunda 7 yet. -Based on this data, it is possible to create process models inside Optimize - called event-based processes - that can be used in reports. - -To enable this feature, refer to [event-based process configuration](#event-based-process-configuration). - -## Event based process configuration - -To make use of ingested events and create event-based process mappings for them, the event-based process feature needs to be enabled in the [Optimize configuration](./system-configuration.md). - -This also includes authorizing particular users by their userId or user groups by their groupId to be able to create so-called event-based processes that can be used by other users of Optimize once published. - -A full configuration example authorizing the user `demo` and all members of the `sales` user group to manage event-based processes, enabling the event-based process import as well as configuring a [Public API](./system-configuration.md#public-api) accessToken with the value `secret`, would look like the following: - - api: - accessToken: secret - - eventBasedProcess: - authorizedUserIds: ['demo'] - authorizedGroupIds: ['sales'] - eventImport: - enabled: true - -## Use Camunda activity event sources for event based processes - -:::note Authorization to event-based processes -When Camunda activity events are used in event-based processes, Camunda Admin Authorizations are not inherited for the event-based process. The authorization to use an event-based process is solely managed via the access management of event-based processes when [publishing an event-based process](components/userguide/additional-features/event-based-processes.md#publishing-an-event-based-process) or at any time via the [Edit Access Option](components/userguide/additional-features/event-based-processes.md#event-based-process-list---edit-access) in the event-based process List. - -Visit [Authorization Management - event-based process](./authorization-management.md#event-based-processes) for the reasoning behind this behavior. -::: - -To publish event-based processes that include [Camunda Event Sources](components/userguide/additional-features/event-based-processes.md#camunda-events), it is required to set [`engines.${engineAlias}.eventImportEnabled`](./system-configuration-platform-7.md) to `true` for the connected engine the Camunda process originates from. - -:::note Heads Up! -You need to [reimport data](./../migration-update/instructions.md#force-reimport-of-engine-data-in-optimize) from this engine to have all historic Camunda events available for event-based processes. Otherwise, only new events will be included. -::: - -As an example, in order to be able to create event processes based on Camunda events from the configured engine named `camunda-bpm`, the configuration of that engine needs to have the `importEnabled` configuration property as well as the `eventImportEnabled` set to `true`: - - engines: - 'camunda-bpm': - importEnabled: true - eventImportEnabled: true diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md deleted file mode 100644 index 27a569b66e0..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: shared-elasticsearch-cluster -title: "Shared Elasticsearch cluster" -description: "Operate multiple Optimize instances on a shared Elasticsearch cluster." ---- - -In case you have a large shared Elasticsearch cluster that you want to operate multiple Optimize instances on that are intended to run in complete isolation from each other, it is required to change the [`es.settings.index.prefix`](./system-configuration.md#index-settings) setting for each Optimize instance. - -:::note Heads Up! -Although a shared Elasticsearch cluster setup is possible, it's recommended to operate a dedicated Elasticsearch cluster per Optimize instance. - -This is due to the fact that a dedicated cluster provides the highest reliability (no resource sharing and no breaking side effects due to misconfiguration) and flexibility (e.g. Elasticsearch and/or Optimize updates can be performed independently between different Optimize setups). -::: - -The following illustration demonstrates this use case with two Optimize instances that connect to the same Elasticsearch cluster but are configured with different `es.settings.index.prefix` values. This results in different indexes and aliases created on the cluster, strictly isolating the data of both Optimize instances, so no instance accesses the data of the other instance. - -:::note Warning -Changing the value of `es.settings.index.prefix` after an instance was already running results in new indexes being created with the new prefix value. There is no support in migrating data between indexes based on different prefixes. -::: - -![Shared Elasticsearch Cluster Setup](img/shared-elasticsearch-cluster.png) diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md deleted file mode 100644 index 63368d28a7d..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: system-configuration-platform-7 -title: "Camunda 7 system configuration" -description: "Configuration for engines used to import data." ---- - -Configuration for engines used to import data. Note that you have to have -at least one engine configured at all times. You can configure multiple engines -to import data from. Each engine configuration should have a unique alias associated -with it and represented by `${engineAlias}`. - -Note that each connected engine must have its respective history level set to `FULL` in order to see all available data -in Optimize. Using any other history level will result in less data and/or functionality within Optimize. Furthermore, -history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an -engine before Optimize has imported it, that data will not be available in Optimize. - -| YAML Path | Default Value | Description | -| ---------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engines.${engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | -| engines.${engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | -| engines.${engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | -| engines.${engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | -| engines.${engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | -| engines.${engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | -| engines.${engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | -| engines.${engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | -| engines.${engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | -| engines.${engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | -| engines.${engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | -| engines.${engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | - -## Camunda 7 common import settings - -Settings used by Optimize, which are common among all configured engines, such as -REST API endpoint locations, timeouts, etc. - -| YAML Path | Default Value | Description | -| --------------------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engine-commons.connection.timeout | 0 | Maximum time in milliseconds without connection to the engine that Optimize should wait until a timeout is triggered. If set to zero, no timeout will be triggered. | -| engine-commons.read.timeout | 0 | Maximum time a request to the engine should last before a timeout triggers. A value of zero means to wait an infinite amount of time. | -| import.data.activity-instance.maxPageSize | 10000 | Determines the page size for historic activity instance fetching. | -| import.data.incident.maxPageSize | 10000 | Determines the page size for historic incident fetching. | -| import.data.process-definition-xml.maxPageSize | 2 | Determines the page size for process definition XML model fetching. Should be a low value, as large models will lead to memory or timeout problems. | -| import.data.process-definition.maxPageSize | 10000 | Determines the page size for process definition entities fetching. | -| import.data.process-instance.maxPageSize | 10000 | Determines the page size for historic decision instance fetching. | -| import.data.variable.maxPageSize | 10000 | Determines the page size for historic variable instance fetching. | -| import.data.variable.includeObjectVariableValue | true | Controls whether Optimize fetches the serialized value of object variables from the Camunda Runtime REST API. By default, this is active for backwards compatibility. If no variable plugin to handle object variables is installed, it can be turned off to reduce the overhead of the variable import.

    Note: Disabling the object variable value transmission is only effective with Camunda 7.15.0+. | -| import.data.user-task-instance.maxPageSize | 10000 | Determines the page size for historic User Task instance fetching. | -| import.data.identity-link-log.maxPageSize | 10000 | Determines the page size for historic identity link log fetching. | -| import.data.decision-definition-xml.maxPageSize | 2 | Determines the page size for decision definition xml model fetching. Should be a low value, as large models will lead to memory or timeout problems. | -| import.data.decision-definition.maxPageSize | 10000 | Determines the page size for decision definition entities fetching. | -| import.data.decision-instance.maxPageSize | 10000 | Overwrites the maximum page size for historic decision instance fetching. | -| import.data.tenant.maxPageSize | 10000 | Overwrites the maximum page size for tenant fetching. | -| import.data.group.maxPageSize | 10000 | Overwrites the maximum page size for groups fetching. | -| import.data.authorization.maxPageSize | 10000 | Overwrites the maximum page size for authorizations fetching. | -| import.data.dmn.enabled | true | Determines if the DMN/decision data, such as decision definitions and instances, should be imported. | -| import.data.user-task-worker.enabled | true | Determines if the User Task worker data, such as assignee or candidate group of a User Task, should be imported. | -| import.data.user-task-worker.metadata.includeUserMetaData | true | Determines whether Optimize imports and displays assignee user metadata, otherwise only the user id is shown. | -| import.data.user-task-worker.metadata.cronTrigger | `0 */3 * * *` | Cron expression for when to fully refresh the internal metadata cache, it defaults to every third hour. Otherwise deleted assignees/candidateGroups or metadata changes are not reflected in Optimize. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. For details on the format please refer to: Cron Expression Description Spring Cron Expression Documentation | -| import.data.user-task-worker.metadata.maxPageSize | 10000 | The max page size when multiple users or groups are iterated during the metadata refresh. | -| import.data.user-task-worker.metadata.maxEntryLimit | 100000 | The entry limit of the cache that holds the metadata, if you need more entries you can increase that limit. When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. Please refer to the "Adjust Optimize heap size" documentation. | -| import.skipDataAfterNestedDocLimitReached | false | Some data can no longer be imported to a given document if its number of nested documents has reached the configured limit. Enable this setting to skip this data during import if the nested document limit has been reached. | -| import.elasticsearchJobExecutorThreadCount | 1 | Number of threads being used to process the import jobs per data type that are writing data to elasticsearch. | -| import.elasticsearchJobExecutorQueueSize | 5 | Adjust the queue size of the import jobs per data type that store data to elasticsearch. If the value is too large it might cause memory problems. | -| import.handler.backoff.interval | 5000 | Interval in milliseconds which is used for the backoff time calculation. | -| import.handler.backoff.max | 15 | Once all pages are consumed, the import scheduler component will start scheduling fetching tasks in increasing periods of time, controlled by "backoff" counter. | -| import.handler.backoff.isEnabled | true | Tells if the backoff is enabled of not. | -| import.indexType | import-index | The name of the import index type. | -| import.importIndexStorageIntervalInSec | 10 | States how often the import index should be stored to Elasticsearch. | -| import.currentTimeBackoffMilliseconds | 300000 | This is the time interval the import backs off from the current tip of the time during the ongoing import cycle. This ensures that potentially missed concurrent writes in the engine are reread going back by the amount of this time interval. | -| import.identitySync.includeUserMetaData | true | Whether to include metaData (firstName, lastName, email) when synchronizing users. If disabled only user IDs will be shown on user search and in collection permissions. | -| import.identitySync.collectionRoleCleanupEnabled | true | Whether collection role cleanup should be performed. If enabled, users that no longer exist in the identity provider will be automatically removed from collection permissions. | -| import.identitySync.cronTrigger | `0 */2 * * *` | Cron expression for when the identity sync should run, defaults to every second hour. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here.

    For details on the format please refer to:
    • [Cron Expression Description](https://en.wikipedia.org/wiki/Cron)
    • [Spring Cron Expression Documentation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronSequenceGenerator.html)
    | -| import.identitySync.maxPageSize | 10000 | The max page size when multiple users or groups are iterated during the import. | -| import.identitySync.maxEntryLimit | 100000 | The entry limit of the user/group search cache. When increasing the limit, keep in mind to account for this by increasing the JVM heap memory as well. Please refer to the "Adjust Optimize heap size" documentation on how to configure the heap size. | diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-8.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-8.md deleted file mode 100644 index 874789c2654..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-8.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -id: system-configuration-platform-8 -title: "Camunda 8 system configuration" -description: "Connection to Camunda 8." ---- - -| YAML Path | Default Value | Description | -| ----------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------- | -| zeebe.enabled | false | Toggles whether Optimize should attempt to import data from the connected Zeebe instance. | -| zeebe.name | zeebe-record | The name suffix of the exported Zeebe records. This must match the record-prefix configured in the exporter of the instance. | -| zeebe.partitionCount | 1 | The number of partitions configured for the Zeebe record source. | -| zeebe.maxImportPageSize | 200 | The max page size for importing Zeebe data. | diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration.md deleted file mode 100644 index c97c7ba91e7..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/system-configuration.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -id: system-configuration -title: "Overview" -description: "An overview of all possible configuration options in Optimize." ---- - -All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. - -:::note -When converting configuration properties to environment variables, ensure the `CAMUNDA_OPTIMIZE_` prefix is used (for example, `CAMUNDA_OPTIMIZE_API_ACCESSTOKEN`). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. -::: - -You can see a sample configuration file with all possible configuration fields -and their default values [here](service-config.yaml). - -In the following section, you will find descriptions and default values of the configuration fields with their respective YAML path. - -:::note Heads Up -For changes in the configuration to take effect, you need to restart Optimize! -::: - -### Java system properties & OS environment variable placeholders - -To externalize configuration properties from the `environment-config.yaml`, Optimize provides variable placeholder support. - -The order in which placeholders are resolved is the following: - -1. Java system properties -2. OS environment variables - -The placeholder format is `${VARIABLE_NAME}` and allows you to refer to a value of a Java system property or OS environment variable of your choice. -The `VARIABLE_NAME` is required to contain only lowercase or uppercase letters, digits and underscore `_` characters and shall not begin with a digit. The corresponding regular expression is `([a-zA-Z_]+[a-zA-Z0-9_]*)`. - -The following example illustrates the usage: - -``` -security: - auth: - token: - secret: ${AUTH_TOKEN_SECRET} -``` - -Given this variable is set before Optimize is started, for example on Unix systems with: - -``` -export AUTH_TOKEN_SECRET=sampleTokenValue -``` - -The value will be resolved at startup to `sampleTokenValue`. - -However, if the same variable is provided at the same time as a Java system property, for example via passing `-DAUTH_TOKEN_SECRET=othertokenValue` to the Optimize startup script: - -``` -./optimize-startup.sh -DAUTH_TOKEN_SECRET=othertokenValue -``` - -The value would be resolved to `othertokenValue` as Java system properties have precedence over OS environment variables. - -:::note -For Windows users, to pass Java system properties to the provided Windows Batch script `optimize-startup.bat`, you have to put them into double quotes when using the `cmd.exe` shell, as shown below. -::: - -``` -optimize-startup.bat "-DAUTH_TOKEN_SECRET=othertokenValue" -``` - -For the Windows Powershell in three double quotes: - -``` -./optimize-startup.bat """-DAUTH_TOKEN_SECRET=othertokenValue""" -``` - -#### Default values - -For variable placeholders it's also possible to provide default values using the following format: `${VARIABLE_NAME:DEFAULT_VALUE}`. The `DEFAULT_VALUE` can contain any character except `}`. - -The following example illustrates the usage: - -``` -security: - auth: - token: - secret: ${AUTH_TOKEN_SECRET:defaultSecret} -``` - -### Security - -These values control mechanisms of Optimize related security, e.g. security headers and authentication. - -| YAML Path | Default Value | Description | -| ------------------------------------------------ | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| | -| security.auth.token.lifeMin | 60 | Optimize uses token-based authentication to keep track of which users are logged in. Define the lifetime of the token in minutes. | -| security.auth.token.secret | null | Optional secret used to sign authentication tokens, it's recommended to use at least a 64-character secret. If set to `null` a random secret will be generated with each startup of Optimize. | -| security.auth.superUserIds | [ ] | List of user IDs that are granted full permission to all collections, reports, and dashboards.

    Note: For reports, these users are still required to be granted access to the corresponding process/decision definitions in Camunda 7 Admin. See [Authorization Management](./authorization-management.md). | -| security.auth.superGroupIds | [ ] | List of group IDs that are granted full permission to all collections, reports, and dashboards. All members of the groups specified will have superuser permissions in Optimize.

    Note: For reports, these groups are still required to be granted access to the corresponding process/decision definitions in Camunda 7 Admin. See [Authorization Management](./authorization-management.md). | -| security.responseHeaders.HSTS.max-age | 63072000 | HTTP Strict Transport Security (HSTS) is a web security policy mechanism which helps to protect websites against protocol downgrade attacks and cookie hijacking. This field defines the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. If you set the number to a negative value no HSTS header is sent. | -| security.responseHeaders.HSTS.includeSubDomains | true | HTTP Strict Transport Security (HSTS) is a web security policy mechanism which helps to protect websites against protocol downgrade attacks and cookie hijacking. If this optional parameter is specified, this rule applies to all the site’s subdomains as well. | -| security.responseHeaders.X-XSS-Protection | 1; mode=block | This header enables the cross-site scripting (XSS) filter in your browser. Can have one of the following options:
    • `0`: Filter disabled.
    • `1`: Filter enabled. If a cross-site scripting attack is detected, in order to stop the attack, the browser will sanitize the page.
    • `1; mode=block`: Filter enabled. Rather than sanitize the page, when a XSS attack is detected, the browser will prevent rendering of the page.
    • `1; report=http://[YOURDOMAIN]/your_report_URI`: Filter enabled. The browser will sanitize the page and report the violation. This is a Chromium function utilizing CSP violation reports to send details to a URI of your choice.
    | -| security.responseHeaders.X-Content-Type-Options | true | Setting this header will prevent the browser from interpreting files as a different MIME type to what is specified in the Content-Type HTTP header (e.g. treating text/plain as text/css). | -| security.responseHeaders.Content-Security-Policy | base-uri 'self' | A Content Security Policy (CSP) has significant impact on the way browsers render pages. By default Optimize uses the base-uri directive which restricts the URLs that can be used to the Optimize pages. Find more details in [Mozilla's Content Security Policy Guide](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy). | - -### Public API - -This section focuses on common properties related to the Public REST API of Optimize. It is -mandatory to configure one of the values below if the Public REST API is to be used. If neither is -configured an error will be thrown and all requests to the Public API will get rejected. If both are configured then -the `jwtSetUri` will take precedence and the `accessToken` will be ignored. - -| YAML Path | Default Value | Description | -| --------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| api.accessToken | null | Secret static shared token to be provided to the secured REST API in the authorization header. Will be ignored if `api.jwtSetUri` is also set. | -| api.jwtSetUri | null | Complete URI to get public keys for JWT validation, e.g. `https://weblogin.cloud.company.com/.well-known/jwks.json` | -| api.audience | optimize | Optimize tries to match this with the `aud` field contained in the JWT token. Only used when `jwtSetUri` is set. | - -### Container - -Settings related to embedded Jetty container, which serves the Optimize application. - -| YAML Path | Default Value | Description | -| -------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| container.host | localhost | A host name or IP address to identify a specific network interface on which to listen. | -| container.contextPath | null | Allows you to specify a custom context path. If set, must start with a leading '/' | -| container.ports.http | 8090 | A port number that will be used by Optimize to process HTTP connections. If set to null, or left empty, HTTP connections won't be accepted. | -| container.ports.https | 8091 | A port number that will be used by Optimize to process secure HTTPS connections. | -| container.ports.actuator | 8092 | A port number that will be used by Optimize's Actuator management server, defaults to 8092 | -| container.keystore.location | keystore.jks | HTTPS requires an SSL Certificate. When you generate an SSL Certificate, you are creating a keystore file and a keystore password for use when the browser interface connects. This field specifies the location of this keystore file. | -| container.keystore.password | optimize | Password of keystore file. | -| container.status.connections.max | 10 | Maximum number of web socket connections accepted for status report. | -| container.accessUrl | null | Optional URL to access Optimize (used for links to Optimize in e.g. alert emails). If no value specified the container host and port are used instead. | -| container.http2Enabled | false | Enable use of HTTP/2 for Optimize | -| container.enableSniCheck | true | Determines whether SNI checking should be enabled. | - -### Elasticsearch - -Settings related to Elasticsearch. - -#### Connection settings - -Everything that is related to building the connection to Elasticsearch. - -Please note that you can define a number of connection points -in a cluster. Therefore, everything that is under `es.connection.nodes` is a list of nodes Optimize can connect to. -If you have built an Elasticsearch cluster with several nodes it is recommended to define several connection points so that -if one node fails, Optimize is still able to talk to the cluster. - -| YAML Path | Default Value | Description | -| --------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| es.connection.timeout | 10000 | Maximum time without connection to Elasticsearch that Optimize should wait until a timeout triggers. | -| es.connection.responseConsumerBufferLimitInMb | 100 | Maximum size of the Elasticsearch response consumer heap buffer. This can be increased to resolve errors from Elasticsearch relating to the entity content being too long | -| es.connection.pathPrefix | | The path prefix under which Elasticsearch is available. | -| es.connection.nodes[*].host | localhost | The address/hostname under which the Elasticsearch node is available. | -| es.connection.nodes[*].httpPort | 9200 | A port number used by Elasticsearch to accept HTTP connections. | -| es.connection.proxy.enabled | false | Whether an HTTP proxy should be used for requests to Elasticsearch. | -| es.connection.proxy.host | null | The proxy host to use, must be set if es.connection.proxy.enabled = true. | -| es.connection.proxy.port | null | The proxy port to use, must be set if es.connection.proxy.enabled = true. | -| es.connection.proxy.sslEnabled | false | Whether this proxy is using a secured connection (HTTPS). | -| es.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | - -#### Index settings - -| YAML Path | Default Value | Description | -| ---------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| es.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias names. Custom values allow to operate multiple isolated Optimize instances on one Elasticsearch cluster.

    NOTE: Changing this after Optimize was already run before will create new empty indexes. | -| es.settings.index.number_of_replicas | 1 | How often data should be replicated to handle node failures. | -| es.settings.index.number_of_shards | 1 | How many shards should be used in the cluster for process instance and decision instance indices. All other indices will be made up of a single shard.

    Note: this property only applies the first time Optimize is started and the schema/mapping is deployed on Elasticsearch. If you want this property to take effect again, you need to delete all indices (and with that all data) and restart Optimize. | -| es.settings.index.refresh_interval | 2s | How long Elasticsearch waits until the documents are available for search. A positive value defines the duration in seconds. A value of -1 means that a refresh needs to be done manually. | -| es.settings.index.nested_documents_limit | 10000 | Optimize uses nested documents to store list information such as activities or variables belonging to a process instance. This setting defines the maximum number of activities/variables/incidents that a single process instance can contain. This limit helps to prevent out of memory errors and should be used with care. For more information, please refer to the Elasticsearch documentation on this topic. | - -#### Elasticsearch Security - -Define a secured connection to be able to communicate with a secured Elasticsearch instance. - -| YAML Path | Default Value | Description | -| --------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| es.security.username | | The basic authentication (x-pack) username. | -| es.security.password | | The basic authentication (x-pack) password. | -| es.security.ssl.enabled | false | Used to enable or disable TLS/SSL for the HTTP connection. | -| es.security.ssl.certificate | | The path to a PEM encoded file containing the certificate (or certificate chain) that will be presented to clients when they connect. | -| es.security.ssl.certificate_authorities | [ ] | A list of paths to PEM encoded CA certificate files that should be trusted, e.g. ['/path/to/ca.crt'].

    Note: if you are using a public CA that is already trusted by the Java runtime, you do not need to set the certificate_authorities. | -| es.security.ssl.selfSigned | false | Used to specify that the certificate was self-signed. | - -#### Elasticsearch backup settings - -| YAML path | Default value | Description | -| ------------------------ | ------------- | ------------------------------------------------------------------------ | -| es.backup.repositoryName | "" | The name of the snapshot repository to be used to back up Optimize data. | - -### Email - -Settings for the email server to send email notifications, e.g. when an alert is triggered. - -| YAML Path | Default Value | Description | -| ------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------- | -| email.enabled | false | A switch to enable the email sending functionality. | -| email.address | | Email address that can be used to send notifications. | -| email.hostname | | The smtp server name. | -| email.port | 587 | The smtp server port. This one is also used as SSL port for the security connection. | -| email.checkServerIdentity | false | A switch to control checking the identity of the email server. | -| email.authentication.enabled | | A switch to enable email server authentication. | -| email.authentication.username | | Username of your smtp server. | -| email.authentication.password | | Corresponding password to the given user of your smtp server. | -| email.authentication.securityProtocol | | States how the connection to the server should be secured. Possible values are 'NONE', 'STARTTLS' or 'SSL/TLS'. | - -### Digest - -Settings influencing the process digest feature. - -| YAML Path | Default value | Description | -| ------------------ | --------------- | -------------------------------------------------------------------- | -| digest.cronTrigger | 0 0 9 \* \* MON | Cron expression to define when enabled email digests are to be sent. | - -### Alert Notification Webhooks - -Camunda 7 only - -Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. - -| YAML Path | Default Value | Description | -| -------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| webhookAlerting.webhooks.${webhookName}.url | | The URL of the webhook. | -| webhookAlerting.webhooks.${webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | -| webhookAlerting.webhooks.${webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | -| webhookAlerting.webhooks.${webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | - -### History Cleanup Settings - -Settings for automatic cleanup of historic process/decision instances based on their end time. - -:::note -Two types of history cleanup are available for Camunda 8 users at this time - process data cleanup and external variable cleanup. For more information, see [History cleanup](/optimize/self-managed/optimize-deployment/configuration/history-cleanup.md). -::: - -| YAML Path | Default Value | Description | -| -------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying Elasticsearch database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | - -### Localization - -Define the languages that can be used by Optimize. - -| YAML Path | Default Value | Description | -| ----------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| localization.availableLocales | ['en','de'] | All locales available in the Optimize Frontend.

    Note: for languages other than the default there must be a `.json` file available under ./config/localization. | -| localization.fallbackLocale | 'en' | The fallback locale used if there is a locale requested that is not available in availableLocales. The fallbackLocale is required to be present in localization.availableLocales. | - -### UI Configuration - -Customize the Optimize UI e.g. by adjusting the logo, head background color etc. - -| YAML Path | Default Value | Description | -| --------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| ui.logoutHidden | false | Setting this property to true will hide the logout option from the user menu. This is useful if you are using single sign-on and it is not possible for users to logout. | - -### External Variable Ingestion REST API Configuration - -| YAML Path | Default Value | Description | -| ---------------------------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| externalVariable.import.enabled | false | Controls whether external ingested variable data is processed and imported to process instance data. | -| externalVariable.import.maxPageSize | 10000 | Determines the page size for the import of ingested external variable data to process instance data. | -| externalVariable.variableIndexRollover.maxIndexSizeGB | 50 | Specifies the maximum size for the external variable index. When shards get too large, query performance can slow down and rolling over an index can bring an improvement. Using this configuration, a rollover will occur when the current external variable index size matches or exceeds the maxIndexSizeGB threshold. | -| externalVariable.variableIndexRollover.scheduleIntervalInMinutes | 10 | The interval in minutes at which to check whether the conditions for a rollover of the external variable index are met, triggering one if required. This value should be greater than 0. | -| externalVariable.variableIngestion.maxBatchRequestBytes | 10485760 | Content length limit for a variable ingestion REST API bulk request in bytes. Requests will be rejected when exceeding that limit. Defaults to 10MB. In case this limit is raised you should carefully tune the heap memory accordingly, see Adjust Optimize heap size on how to do that. | -| externalVariable.variableIngestion.maxRequests | 5 | The maximum number of variable ingestion requests that can be serviced at any given time. | - -### Telemetry Configuration - -Camunda 7 only - -Configuration of initial telemetry settings. - -| YAML Path | Default Value | Description | -| ----------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Elasticsearch version.

    Legal note: Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | - -### Other - -Settings of plugin subsystem serialization format, variable import, Camunda endpoint. - -| YAML Path | Default Value | Description | -| --------------------------------------------- | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| plugin.directory | ./plugin | Defines the directory path in the local Optimize file system which should be checked for plugins. | -| plugin.variableImport.basePackages | | Look in the given base package list for variable import adaption plugins. If empty, the import is not influenced. | -| plugin.authenticationExtractor.basePackages | | Looks in the given base package list for authentication extractor plugins. If empty, the standard Optimize authentication mechanism is used. | -| plugin.engineRestFilter.basePackages | | Look in the given base package list for engine rest filter plugins. If empty, the REST calls are not influenced. | -| plugin.decisionInputImport.basePackages | | Look in the given base package list for Decision input import adaption plugins. If empty, the import is not influenced. | -| plugin.decisionOutputImport.basePackages | | Look in the given base package list for Decision output import adaption plugins. If empty, the import is not influenced. | -| plugin.elasticsearchCustomHeader.basePackages | | Look in the given base package list for Elasticsearch custom header plugins. If empty, Elasticsearch requests are not influenced. | -| serialization.engineDateFormat | yyyy-MM-dd'T'HH:mm:ss.SSSZ | Define a custom date format that should be used (should be the same as in the engine). | -| entity.authorizedEditors | 'all' | Define which users are authorized to Create, Edit, Copy and Delete Optimize entities outside of a collection. Available options: 'all', 'superuser', 'none'. | -| entity.kpiRefreshInterval | 600 | Define the interval in which the kpi import scheduler should run in seconds | -| export.csv.authorizedUsers | 'all' | Define which users are authorized to download CSVs. Available options: 'all', 'superuser', 'none'. | -| export.csv.limit | 1000 | Maximum number of records returned by CSV export.

    Note: Increasing this value comes at a memory cost for the Optimize application that varies based on the actual data. As a rough guideline, an export of a 50000 raw data report records containing 8 variables on each instance can cause temporary heap memory peaks of up to ~200MB with the actual CSV file having a size of ~20MB. Please adjust the heap memory accordingly, see [Adjust Optimize heap size](./getting-started.md#adjust-optimize-heap-size) on how to do that. | -| export.csv.delimiter | , | The delimiter used for the CSV export. The value defaults to a comma, however other common CSV delimiters such as semicolons (";") and tabs ("\\t") can also be used. | -| sharing.enabled | true | Enable/disable the possibility to share reports and dashboards. | diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/telemetry.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/telemetry.md deleted file mode 100644 index f9691209c35..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/telemetry.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -id: telemetry -title: "Telemetry" -description: "Learn about Optimize telemetry, what data is collected and why." ---- - -Camunda 7 only - -At Camunda, we strive to offer excellent user experience at a high and stable level. On a strict opt-in basis, we are looking to collect environment and usage data to further improve the user experience for you. These insights help us to understand typical environment setups and product usage patterns and will be used to inform product improvement decisions to your benefit. - -The telemetry reporting is disabled by default and only collects and sends data after you explicitly enable the telemetry configuration flag. The configuration can be changed by `superusers` at any time during runtime via a configuration menu option in the UI. - -The collected data will be sent once every 24 hours via HTTPS, and it is ensured that the performance of Optimize will not be negatively affected by the reporting, even if the telemetry reporter faces unexpected errors. Furthermore, no data will be collected and sent when you stop Optimize. - -## Collected data - -Below you find the full list of data we want to collect, followed by a real-world example. On a conceptual level, they can be categorized into general data and meta/environment data. - -### General data - -The general data category contains information about your Optimize installation: - -| Item | Explanation | -| --------------- | -------------------------------------------------------------------- | -| Installation | A unique installation ID stored in Optimize's Elasticsearch database | -| Product name | The name of the product (i.e. `Camunda Optimize`) | -| Product version | The version of Optimize you are running | -| Product edition | The edition of the product (i.e. "enterprise") | - -### Meta/environment data - -The meta/environment data category contains information about the environmental setup: - -| Item | Explanation | -| -------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Database vendor | The database vendor (i.e. `Elasticsearch`) | -| Database version | The version of Elasticsearch Optimize is using | -| License Key: Customer name | The customer name that appears in the license key you are using with this Optimize installation | -| License Key: Type | The type of license key used with this Optimize installation | -| License Key: Valid Until | The expiry date of the license key used with this Optimize installation | -| License Key: Unlimited | A flag that indicates whether this license key is unlimited | -| License Key: Features | A map which includes information on which products can be used with this license key | -| License Key: Raw | The raw license key string without signature. We add this just in case some properties are listed in the raw license key that have not yet been mapped to other fields (eg. the features map). | -| Engine Installation IDs | A list containing the ID of each engine connected to this Optimize installation | - -### Example - -Below is an example payload including all telemetry data currently sent by Optimize. - -``` -{ - "installation": "7b86edba-fcb7-11ea-adc1-0242ac120002", - "product": { - "name": "Camunda Optimize", - "version": "3.2.0", - "edition": "enterprise", - "internals": { - "database": { - "vendor": "elasticsearch", - "version": "7.0.0" - }, - "license-key": { - "customer": "a customer name", - "type": "UNIFIED", - "valid-until": "2025-01-01", - "unlimited": "false", - "features": { - "camundaBPM": "false", - "optimize": "true", - "cawemo": "false" - }, - "raw": - "customer = a customer name; expiryDate = 2025-01-01; optimize: true;" - }, - "engine-installation-ids": - [ "8343cc7a-8ad1-42d4-97d2-43452c0bdfa3", - "22607b92-fcb8-11ea-adc1-0242ac120002" ] - } - } -} -``` - -## How to enable telemetry - -### Optimize configuration - -You can enable telemetry before starting Optimize by setting the `initializeTelemetry` flag in your configuration file to `true`. Refer to the [configuration section](./system-configuration.md#telemetry-configuration) for more details. - -### UI - -Once Optimize is running, telemetry can be enabled (or disabled) via a modal accessible from the user menu. Only superusers are authorized to access this menu and alter the telemetry configuration. - -## Legal note - -Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. - -Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/user-management.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/user-management.md deleted file mode 100644 index 001faa9d4cc..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/user-management.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: user-management -title: "User access management" -description: "Define which users have access to Optimize." ---- - -Camunda 7 only - -:::note Good to know! - -Providing Optimize access to a user just enables them to log in to Optimize. To be able -to create reports, the user also needs to have permission to access the engine data. To see -how this can be done, refer to the [Authorization Management](./authorization-management.md) section. -::: - -You can use the credentials from the Camunda 7 users to access Optimize. However, for the users to gain access to Optimize, they need to be authorized. This is not done in Optimize itself, but needs to be configured in the Camunda 7 and can be achieved on different levels with different options. If you do not know how authorization in Camunda works, visit the [authorization service documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/). - -When defining an authorization to grant Optimize access, the most important aspect is that you grant access on resource type application with resource ID "optimize" (or "\*" if you want to grant access to all applications including Optimize). The permissions you can set, are either `ALL` or `ACCESS`. They are treated equally, so there is no difference between them. - -Authorizing users in admin can be done as follows: - -![Grant Optimize Access in Admin](img/Admin-GrantAccessAuthorizations.png) - -1. The first option allows access for Optimize on a global level. With this setting all users are allowed to log into Camunda Optimize. -2. The second option defines the access for a single user. The user `Kermit` can now log into Camunda Optimize. -3. The third option provides access on group level. All users belonging to the group `optimize-users` can log into Camunda Optimize. - -It is also possible to revoke the Optimize authorization for specific users or groups. For instance, you can define Optimize on a global scale, but exclude the `engineers` group: - -![Revoke Optimize Access for group 'engineers' in Admin](img/Admin-RevokeGroupAccess.png) - -When Optimize is configured to load data from multiple instances of Camunda 7, then it suffices to be granted by one instance for the user to be able to log into Optimize. Notice that, like for all authorizations, grants have precedence over revokes. That is, if there is a Camunda 7 instance that grants access to optimize to a user, the user can log in even if another instance revokes access to Optimize for this user. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/webhooks.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/webhooks.md deleted file mode 100644 index 68966a7d3bf..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/configuration/webhooks.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: webhooks -title: "Webhooks" -description: "Read about how to configure alert notification webhooks for alerts on custom systems." ---- - -Camunda 7 only - -In addition to email notifications, you can configure webhooks in Optimize to receive alert notifications on custom systems. This page describes how to set up your webhook configurations using the example of a simple Slack app. - -## The alert webhook configuration - -You can configure a list of webhooks in the Optimize configuration, see [Alert Notification Webhooks](./system-configuration.md#alert-notification-webhooks) for available configuration properties. - -### Alert webhook payload placeholders - -The webhook request body can be customized to integrate with any string encoded HTTP endpoint to your needs. -In order to make use of certain properties of an alert, you can make use of placeholders within the payload string. - -| Placeholder | Sample Value | Description | -| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| ALERT_MESSAGE | Camunda Optimize - Report Status
    Alert name: Too many incidents
    Report name: Count of incidents
    Status: Given threshold [60.0] was exceeded. Current value: 186.0. Please check your Optimize report for more information!
    http://optimize.myorg:8090/#/report/id/ | This is the full alert message that is also used in the email alert content. | -| ALERT_NAME | Some Alert | The name given to the alert when it was created. | -| ALERT_REPORT_LINK | http://optimize.myorg/#/report/id/ | The direct link to the report the alert is based on. | -| ALERT_CURRENT_VALUE | 186.0 | The current value of the number report the alert is based on. | -| ALERT_THRESHOLD_VALUE | 60.0 | The configured alert threshold value. | -| ALERT_THRESHOLD_OPERATOR | > | The threshold operator configured for the aler | -| ALERT_TYPE | new | The type of the alert notification. Can be one of:
    `new` - the threshold was just exceeded and the alert was triggered
    `reminder` - the threshold was exceeded previously already and this is a reminder notification
    `resolved` - the threshold is met again and the alert is resolved | -| ALERT_INTERVAL | 5 | The configured interval at which the alert condition is checked. | -| ALERT_INTERVAL_UNIT | seconds | The unit for the configured alert interval. Can be one of: seconds, minutes, hours, days, weeks, months | - -The placeholders can be used within the `defaultPayload` property of each webhook configuration: - -```yaml -webhookAlerting: - webhooks: - 'myWebhook': - ... - defaultPayload: 'The alert {{ALERT_NAME}} with the threshold of `{{ALERT_THRESHOLD_OPERATOR}}{{ALERT_THRESHOLD_VALUE}}` was triggered as *{{ALERT_TYPE}}*.' -``` - -### Example Webhook - Slack - -If your organization uses Slack, you can set up Optimize so that it can use a webhook to send alert notifications to a Slack channel of your choice. - -To configure the webhook in Optimize's `environment-config`, you first need to create a new Slack app for your organization's Slack workspace, as described in [Slack's own documentation here](https://api.slack.com/messaging/webhooks). You only need to follow the steps until you have your webhook URL - no need to write any code to use the webhook to post any messages, Optimize will take care of this for you. Once you have followed these steps, you can copy the Webhook URL from Slack's "Webhook URLs for Your Workspace" section into the configuration as follows: - -```bash -webhookAlerting: - webhooks: - # Name of the webhook, must be unique. - 'mySlackWebhook': - # URL of the webhook which can receive alerts from Optimize - url: 'https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX' - # Map of the headers of the request to the sent to the webhook URL - headers: - 'Content-type': 'application/json' - # HTTP Method for the webhook request - httpMethod: 'POST' - # The default payload structure with the alertMessagePlaceholder {{ALERT_MESSAGE}} for the alert text. - # Optimize will replace this placeholder with the content of the alert message. - defaultPayload: '{"text": "The alert *{{ALERT_NAME}}* was triggered as *{{ALERT_TYPE}}*, you can view the report <{{ALERT_REPORT_LINK}}|here>."}' -``` - -All configuration parameters are described in the [Alert Notification Webhooks Configuration Section](./system-configuration.md#alert-notification-webhooks). - -With this configuration, when you create an alert for a report in Optimize, `mySlackWebhook` will appear in the targets selection dropdown in the alert creation modal. Once you have selected the webhook from the dropdown and saved the alert, Optimize will send a message to the channel you have selected when creating your Slack app whenever an alert notification is triggered. The content of the message is the same as the content of the alert email notifications. One alert may send either or both email and webhook notifications. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/install-and-start.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/install-and-start.md deleted file mode 100644 index f5b3a75c618..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/install-and-start.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -id: install-and-start -title: "Installation" -description: "Install and configure Optimize Self-Managed." ---- - -## Camunda 8 stack - -Please refer to the [Installation Guide]($docs$/self-managed/platform-deployment/overview/) for details on how to install Optimize as part of a Camunda 8 stack. - -## Camunda 7 Enterprise stack - -Camunda 7 only - -This document describes the installation process of the Camunda Optimize and connect it to a Camunda 7 stack, as well as various configuration possibilities available after initial installation. - -Before proceeding with the installation, read the article about [supported environments]($docs$/reference/supported-environments). - -### Local installation - -If you wish to run Camunda Optimize natively on your hardware you can download one of the two offered distributions and run them. Especially the demo distribution might be useful to try out Camunda Optimize the first time, it also comes with a simple demo process to explore the functionality. - -#### Prerequisites - -If you intend to run Optimize on your local machine, ensure you have a supported JRE (Java Runtime Environment) installed; best refer to the [Java Runtime]($docs$/reference/supported-environments#camunda-platform-8-self-managed) section on which runtimes are supported. - -#### Demo Distribution with Elasticsearch - -The Optimize Demo distribution comes with an Elasticsearch instance. The supplied Elasticsearch server is not customized or tuned by Camunda in any manner. It is intended to make the process of trying out Optimize as easy as possible. The only requirement in addition to the demo distribution itself is a running engine (ideally on localhost). - -To install the demo distribution containing Elasticsearch, download the archive with the latest version from the [download page](https://docs.camunda.org/enterprise/download/#camunda-optimize) and extract it to the desired folder. After that, start Optimize by running the script `optimize-demo.sh` on Linux and Mac: - -```bash -./optimize-demo.sh -``` - -or `optimize-demo.bat` on Windows: - -```batch -.\optimize-demo.bat -``` - -The script ensures that a local version of Elasticsearch is started and waits until it has become available. Then, it starts Optimize, ensures it is running, and automatically opens a tab in a browser to make it very convenient for you to try out Optimize. - -In case you need to start an Elasticsearch instance only, without starting Optimize (e.g. to perform a reimport), you can use the `elasticsearch-startup.sh` script: - -```bash -./elasticsearch-startup.sh -``` - -or `elasticsearch-startup.bat` on Windows: - -```batch -.\elasticsearch-startup.bat -``` - -#### Production distribution without Elasticsearch - -This distribution is intended to be used in production. To install it, first [download](https://docs.camunda.org/enterprise/download/#camunda-optimize) the production archive, which contains all the required files to startup Camunda Optimize without Elasticsearch. After that, [configure the Elasticsearch connection](./configuration/getting-started.md#elasticsearch-configuration) to connect to your pre-installed Elasticsearch instance and [configure the Camunda 7 connection](./configuration/getting-started.md#camunda-platform-7-configuration) to connect Optimize to your running engine. You can then start your Optimize instance by running the script `optimize-startup.sh` on Linux and Mac: - -```bash -./optimize-startup.sh -``` - -or `optimize-startup.bat` on Windows: - -```batch -.\optimize-startup.bat -``` - -### Dockerized installation - -The Optimize Docker images can be used in production. They are hosted on our dedicated Docker registry and are available to enterprise customers who bought Optimize only. You can browse the available images in our [Docker registry](https://registry.camunda.cloud) after logging in with your credentials. - -Make sure to log in correctly: - -``` -$ docker login registry.camunda.cloud -Username: your_username -Password: ****** -Login Succeeded -``` - -After that, [configure the Elasticsearch connection](./configuration/getting-started.md#elasticsearch-configuration) to connect to your pre-installed Elasticsearch instance and [configure the Camunda connection](./configuration/getting-started.md#camunda-platform-7-configuration) to connect Optimize to your running engine. For very simple use cases with only one Camunda Engine and one Elasticsearch node, you can use environment variables instead of mounting configuration files into the Docker container: - -#### Getting started with the Optimize Docker image - -##### Full local setup - -To start the Optimize Docker image and connect to an already locally running Camunda 7 as well as Elasticsearch instance you could run the following command: - -``` -docker run -d --name optimize --network host \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -##### Connect to remote Camunda 7 and Elasticsearch - -If, however, your Camunda 7 as well as Elasticsearch instance reside on a different host, you may provide their destination via the corresponding environment variables: - -``` -docker run -d --name optimize -p 8090:8090 -p 8091:8091 \ - -e OPTIMIZE_CAMUNDABPM_REST_URL=http://yourCamBpm.org/engine-rest \ - -e OPTIMIZE_ELASTICSEARCH_HOST=yourElasticHost \ - -e OPTIMIZE_ELASTICSEARCH_HTTP_PORT=9200 \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -#### Available environment variables - -There is only a limited set of configuration keys exposed via environment variables. These mainly serve the purpose of testing and exploring Optimize. For production configurations, we recommend following the setup in documentation on [configuration using a `environment-config.yaml` file](#configuration-using-a-yaml-file). - -The most important environment variables you may have to configure are related to the connection to the Camunda 7 REST API, as well as Elasticsearch: - -- `OPTIMIZE_CAMUNDABPM_REST_URL`: The base URL that will be used for connections to the Camunda Engine REST API (default: `http://localhost:8080/engine-rest`) -- `OPTIMIZE_CAMUNDABPM_WEBAPPS_URL`: The endpoint where to find the Camunda web apps for the given engine (default: `http://localhost:8080/camunda`) -- `OPTIMIZE_ELASTICSEARCH_HOST`: The address/hostname under which the Elasticsearch node is available (default: `localhost`) -- `OPTIMIZE_ELASTICSEARCH_HTTP_PORT`: The port number used by Elasticsearch to accept HTTP connections (default: `9200`) -- `CAMUNDA_OPTIMIZE_ELASTICSEARCH_SECURITY_USERNAME`: The username for authentication in environments where a secured Elasticsearch connection is configured. -- `CAMUNDA_OPTIMIZE_ELASTICSEARCH_SECURITY_PASSWORD`: The password for authentication in environments where a secured Elasticsearch connection is configured. - -A complete sample can be found within [Connect to remote Camunda 7 and Elasticsearch](#connect-to-remote-camunda-platform-7-and-elasticsearch). - -Furthermore, there are also environment variables specific to the [event-based process](components/userguide/additional-features/event-based-processes.md) feature you may make use of: - -- `OPTIMIZE_CAMUNDA_BPM_EVENT_IMPORT_ENABLED`: Determines whether this instance of Optimize should convert historical data to event data usable for event-based processes (default: `false`) -- `OPTIMIZE_EVENT_BASED_PROCESSES_USER_IDS`: An array of user ids that are authorized to administer event-based processes (default: `[]`) -- `OPTIMIZE_EVENT_BASED_PROCESSES_IMPORT_ENABLED`: Determines whether this Optimize instance performs event-based process instance import. (default: `false`) - -Additionally, there are also runtime related environment variables such as: - -- `OPTIMIZE_JAVA_OPTS`: Allows you to configure/overwrite Java Virtual Machine (JVM) parameters; defaults to `-Xms1024m -Xmx1024m -XX:MetaspaceSize=256m -XX:MaxMetaspaceSize=256m`. - -In case you want to make use of the Optimize Public API, you can also set **one** of the following variables: - -- `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI` Complete URI to get public keys for JWT validation, e.g. `https://weblogin.cloud.company.com/.well-known/jwks.json`. For more details see [Public API Authorization](../../apis-tools/optimize-api/optimize-api-authorization.md). -- `OPTIMIZE_API_ACCESS_TOKEN` secret static shared token to be provided to the secured REST API on access in the authorization header. Will - be ignored if `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI` is also set. For more details see [Public API - Authorization](../../apis-tools/optimize-api/optimize-api-authorization.md). - -You can also adjust logging levels using environment variables as described in the [logging configuration](./configuration/logging.md). - -#### License key file - -If you want the Optimize Docker container to automatically recognize your [license key file](./configuration/license.md), you can use standard [Docker means](https://docs.docker.com/storage/volumes/) to make the file with the license key available inside the container. Replacing the `{{< absolutePathOnHostToLicenseFile >}}` with the absolute path to the license key file on your host can be done with the following command: - -``` -docker run -d --name optimize -p 8090:8090 -p 8091:8091 \ - -v {{< absolutePathOnHostToLicenseFile >}}:/optimize/config/OptimizeLicense.txt:ro \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -#### Configuration using a yaml file - -In a production environment, the limited set of [environment variables](#available-environment-variables) is usually not enough so that you want to prepare a custom `environment-config.yaml` file. Refer to the [Configuration](./configuration/system-configuration.md) section of the documentation for the available configuration parameters. - -You need to mount this configuration file into the Optimize Docker container to apply it. Replacing the `{{< absolutePathOnHostToConfigurationFile >}}` with the absolute path to the `environment-config.yaml` file on your host can be done using the following command: - -``` -docker run -d --name optimize -p 8090:8090 -p 8091:8091 \ - -v {{< absolutePathOnHostToConfigurationFile >}}:/optimize/config/environment-config.yaml:ro \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -In managed Docker container environments like [Kubernetes](https://kubernetes.io/), you may set this up using [ConfigMaps](https://kubernetes.io/docs/concepts/configuration/configmap/). - -### Usage - -You can start using Optimize right away by opening the following URL in your browser: [http://localhost:8090](http://localhost:8090) - -Then, you can use the users from the Camunda 7 to log in to Optimize. For details on how to configure the user access, consult the [user access management](./configuration/user-management.md) section. - -## Next steps - -To get started configuring the Optimize web container, Elasticsearch, Camunda 7, Camunda 8, and more, visit the [getting started section](./configuration/getting-started.md) of our configuration documentation. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md deleted file mode 100644 index 34b2b2b0421..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 2.1-to-2.2 -title: "Update notes (2.1 to 2.2)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 2.2.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize -do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: - -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). - -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3.md deleted file mode 100644 index ff442b325b8..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: 2.2-to-2.3 -title: "Update notes (2.2 to 2.3)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 2.3.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Broken links - -After the migration, you might encounter some unusual errors in Optimize: - -- Buttons or links are not working when you click on them. -- You get errors in your web browser when you open the Optimize page. - -In this case, clear your browser cache so your browser loads the new Optimize resources. - -### Broken raw data reports - -Apart from caching issues, there is the following list of known data update limitations: - -- Raw data reports with custom column order are broken showing the following error when opened: - - ```javascript - Cannot read property 'indexOf' of undefined - ``` - - To resolve this, either delete and recreate those reports or update to 2.4.0 which resolves the issue. - -- Combined process reports might cause the reports page to crash with the following error - - ```javascript - Oh no :( - Minified React error #130; visit http://facebook.github.io/react/docs/error-decoder.html?invariant=130&args[]=undefined&args[]= for the full message or use the non-minified dev environment for full errors and additional helpful warnings. - ``` - - To resolve this issue, update to 2.4.0 immediately. - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4.md deleted file mode 100644 index 1a61bfa6647..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: 2.3-to-2.4 -title: "Update notes (2.3 to 2.4)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 2.4.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Changes in the supported environments - -With this Optimize version, the supported versions of Elasticsearch also change. Now, Optimize only connects to versions 6.2.0+. See the [Supported Environments]($docs$/reference/supported-environments) sections for details. - -Hence, you need to update Elasticsearch to use the new Optimize version. See the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) on how to do that. Usually, the only thing you need to do is to perform a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html). - -## Known issues - -### Confusing warning during the update - -On executing the update, you may see the following warning a couple of times in the update log output: - -``` -Deprecated big difference between max_gram and min_gram in NGram Tokenizer, expected difference must be less than or equal to: [1] -``` - -You can safely ignore this warning. The update itself amends the relevant index settings so the warning will be resolved. - -## Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5.md deleted file mode 100644 index 093a5478a3d..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 2.4-to-2.5 -title: "Update notes (2.4 to 2.5)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 2.5.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Limitations - -If you intend to make use of the new [Multi-Tenancy-Feature](./../configuration/multi-tenancy.md), you need to perform a [full reimport](../../reimport) and may need to amend your existing reports by selecting the tenant you want the report to be based on. - -## Known issues - -### Changes in the plugin system - -There are required changes for plugins implementing `VariableImportAdapter`. -If you use such a plugin, perform the following steps: - -1. In the plugin, update the Optimize plugin dependency to version 2.5. -2. The class `PluginVariableDto` now contains the new field `tenantId`. Depending on your plugin implementation, it might be necessary to include handling this field to not lose it on import. -3. Build the new version of the plugin and replace the old `jar` with the new one. - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6.md deleted file mode 100644 index 60bc2ef46d3..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: 2.5-to-2.6 -title: "Update notes (2.5 to 2.6)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 2.6.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## New behavior of Optimize - -With the introduction of the new collection and permission concept, you might find the behavior of Optimize startling and thus the subsequent sections will guide you through the changes. - -### Collection permissions & private reports - -With Optimize 2.6.0, a resource permission system is introduced. This system provides private reports/dashboard entities in the **Home** section as well as the possibility to manage permissions on collection entity level in order to share it with other Optimize users. - -This ultimately means that after the migration to Optimize 2.6.0, each user only sees the entities they originally created. This includes reports, dashboards, and collections. In order for other users to be able to access those entities, they need to be copied into a collection and view access to this new collection must be granted to other users. - -#### Grant access to a private report - -Given the scenario that the user `john` owns a report `John's Report` that user `mary` was used to access in Optimize 2.5.0 the user `john` can share this report in Optimize 2.6.0 with `mary` following these steps: - -1. User `john` creates a collection named e.g. `John's Share`. - ![Create a Collection](img/private_report_access_1_create_collection.png) -1. User `john` grants user `mary` the viewer role on the collection `John's Share`. - ![Create Permission for Mary](img/private_report_access_2_create_view_permission_mary.png) -1. User `john` copies and moves the `John's Report` report to the `John's Share` collection. - ![Copy Report 1](img/private_report_access_3_1_copy_report.png) - ![Copy Report 2](img/private_report_access_3_2_copy_report.png) -1. User `mary` will now see the Collection `John's Share` in her **Home** section of Optimize. - ![Mary sees shared collection](img/private_report_access_4_mary_sees_collection.png) - -#### Grant access to an existing collection - -Given the scenario that the user `john` owns a collection `John's Collection` that user `mary` was used to access in Optimize 2.5.0, the user `john` can share this collection with `mary` in Optimize 2.6.0, granting user `mary` a permission role on that collection. Refer to **Step 2** in [grant access to a private report](#grant-access-to-a-private-report). - -#### Super User role - -You can now grant users `Super User` permissions, which allows them to bypass the owner/collection permissions, enabling them to access all available entities. This can, for example, be useful if entities are owned by users that are not available anymore. - -To grant Super User permissions, see the [Authentication & Security Section](./../configuration/system-configuration.md#security). - -## Known issues - -### Rebuild your Optimize plugins - -With Optimize 2.6.0, the plugin system was overhauled. For your plugins to continue to work, you have to rebuild them with the latest Optimize plugin artifact as an uber jar. Refer to the updated [plugin setup guide](./../plugins/plugin-system.md#set-up-your-environment). - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7.md deleted file mode 100644 index 9fd4de256d5..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: 2.6-to-2.7 -title: "Update notes (2.6 to 2.7)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 2.7.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Changes in the supported environments - -With this Optimize version, there are also changes in the supported versions of Elasticsearch and Camunda 7. - -### Elasticsearch - -Optimize now requires at least Elasticsearch `6.4.0`. -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. - -If you need to update your Elasticsearch cluster, refer to the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) on how to do that. Usually, the only thing you need to do is perform a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html). - -### Camunda 7 - -Optimize now requires at least Camunda 7 `7.10.6`. -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. - -### Java - -Optimize now only supports Java 8, 11, and 13. Support for 12 was dropped as it reached [end of support](https://www.oracle.com/technetwork/java/java-se-support-roadmap.html). -See the [Supported Environments]($docs$/reference/supported-environments/) sections for the full range of supported versions. - -## Known issues - -### Collection permissions get lost on failed identity sync - -Optimize has an identity synchronization in place that fetches all users from the engine that have access to Optimize. By doing this, Optimize can easily check if the user is allowed to access the application and is able to quickly display metadata, such as the email address and full name of the user. - -If you start Optimize `2.7` and the engine is down at the time of a user synchronization, it is possible that you will lose all your collection permissions. This is due to Optimize not being able to receive the correct authorizations for the collections and as a result, all the collection roles are removed. - -The easiest way to recover your permissions and regain access to your collections would be to add a user ID to the `auth.superUserIds` property of your [configuration file](./../configuration/system-configuration.md#security), then re-adding the necessary permissions as this user. - -After you have regained the roles of your collections, you should consider one of the two next follow-up steps: - -- Preferred solution: Update to Optimize 3.2.0 to fix the issue. -- Interim solution: If you anticipate the engine being taken down, we also recommend stopping Optimize to prevent the same scenario from reoccurring. In addition, you can also change the frequency at which this collection cleanup occurs by adjusting the `import.identitySync.cronTrigger` expression in your [configuration file](./../configuration/system-configuration.md#security) to `0 0 1 * * *`, which results in executing the sync once per day at 01:00 AM. - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0.md deleted file mode 100644 index aa3df00aabe..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -id: 2.7-to-3.0 -title: "Update notes (2.7 to 3.0)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.0.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). - -If you have done an Optimize update prior to this one, note the [changes in the update procedure](#changes-in-the-update-procedure). - -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Potential NullpointerException on Update to 3.0.0 - -In some circumstances, the update to 3.0.0 might fail with the following log output: - -``` - 06:00:00.000 - Starting step 1/9: UpdateIndexStep - ... - 06:00:02.066 - Error while executing update from 2.7.0 to 3.0.0 - java.lang.NullPointerException: null - at org.camunda.optimize.upgrade.steps.schema.UpdateIndexStep.execute(UpdateIndexStep.java:71) - ... -``` - -This is a known issue that occurs if you previously updated to Optimize 2.7.0. You can solve this issue by executing the following command on your Elasticsearch cluster before running the update again. - -``` -curl -s -XDELETE :9200/optimize-event_v2-000001 -``` - -The update should now successfully complete. - -### Cannot disable import from particular engine - -In 3.0.0, it is not possible to deactivate the import of a particular Optimize instance from a particular engine (via `engines.${engineAlias}.importEnabled`). In case your environment is using that feature for e.g. a [clustering setup](./../configuration/clustering.md), we recommend you to stay on Optimize 2.7.0 until the release of Optimize 3.1.0 (Scheduled for 14/07/2020) and then update straight to Optimize 3.1.0. - -## Limitations - -### User operation log import - -Optimize now imports the user operation log. Due to this, the engine user now requires engine permissions to read the user operation log, see also the [configuration documentation](./../configuration/system-configuration-platform-7.md). - -### Suspension filter - -Due to a limitation of the user operations log data retrieval in the engine API, process instance suspension states of instances suspended after Optimize has been started are not correctly imported. This leads to inaccuracies in the [Suspended Instances Only Filter](components/userguide/process-analysis/instance-state-filters.md#suspended-and-non-suspended-instances-only-filter), which will only apply to instances which were suspended before they were imported by Optimize. - -Furthermore, since the suspension state of process instances in Optimize is updated according to historic data logs, if you have [history cleanup](./../configuration/history-cleanup.md) enabled it is possible that the relevant data will be cleaned up before Optimize can import it, leading to inaccuracies in the state of suspended process instances which will then not appear in the appropriate filter. - -### Event-based processes - -There might be cases where an incorrect and lower than expected number of events are shown when mapping either process start and end events to nodes on your event based process, or -when mapping multiple engine task events from the same engine model. - -These are known issues and are [fixed](https://jira.camunda.com/browse/OPT-3515) in the upcoming Optimize 3.1.0 release. If using this version or newer, you can correct previously imported data in your event-based process either -by recreating or republishing the event based process. - -Alternatively, [forcing a reimport](./instructions.md#force-reimport-of-engine-data-in-optimize) -of the engine data after updating to a version with this fix will correct these errors too. - -## Changes in the update procedure - -Although Optimize 3.0.0 is a major version change, we still allow a rolling update from 2.7 to the new version. However, since the support for Elasticsearch changed to the latest major version 7.X, there is an additional step in the update routine involved. - -Before you can perform the actual update, you need to do a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) of Elasticsearch from 6.X to 7.X. The exact details can be found in the [Migration & Update Instructions](./instructions.md). - -Please note that the following updates are not supported by Elasticsearch: - -- 6.8 to 7.0. -- 6.7 to 7.1.–7.6.X. - -## Changes in the supported environments - -With this Optimize version, there are also changes in the supported versions of the Elasticsearch and Camunda 7. - -### Elasticsearch - -Optimize now requires at least Elasticsearch `7.0.0` and supports the latest major version up to `7.6.0`. -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. - -In case you need to update your Elasticsearch cluster, refer to the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) on how to do that. Usually, the only thing you need to do is to perform a [rolling update](https://www.elastic.co/guide/en/elastic-stack/current/upgrading-elasticsearch.html#rolling-upgrades). There's also a dedicated section in the [Migration & Update Instructions](./instructions.md) on how to perform the rolling update. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1.md deleted file mode 100644 index a9e4ecba168..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: 3.0-to-3.1 -title: "Update notes (3.0 to 3.1)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.1.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Changes in the supported environments - -With this Optimize version, there are also changes in the supported versions of Camunda 7. - -### Camunda 7 - -Optimize now requires at least Camunda 7 `7.11.13`. -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. - -## Breaking changes - -With Optimize 3.1.0, the [History Cleanup](./../configuration/history-cleanup.md) configuration was restructured and needs to be adjusted accordingly. - -Major changes are the removal of the global feature flag `historyCleanup.enabled` in favor of entity type specific feature flags as well as a relocation of process and decision specific configuration keys. Refer to the [configuration documentation](./../configuration/system-configuration.md#history-cleanup-settings) for details. - -With this release, Optimize now imports deployment data from the engine when importing definitions. If Optimize is importing from an authenticated engine, the configured user must now have READ permission on the `Deployment` resource. - -## Known issues - -### Event-based processes - event counts/suggestions - -As part of the update from Optimize 3.0 to 3.1, the event counts and the next suggested events used as part of the event based process feature are recalculated. Until the recalculation is complete, the event counts might be incorrect and the suggestions inaccurate. - -Once the recalculation is complete, the event counts will return to being correct and you will see more accurate suggested next events. - -### Decision report filter incompatibilities - update and runtime errors possible - -Due to a restriction in the database schema for decision reports, the usage of filters is limited in Optimize 3.1.0 as well as 3.2.0 and will only be fully working again in Optimize 3.3.0. -This results in the behavior that once a certain filter type was used, e.g. a fixed evaluation date filter, another filter type cannot be used anymore, e.g. a relative evaluation date filter. This issue can occur at runtime as well as during the update. - -Usually, you will see a log similar to this one when you hit this issue: - -``` -{"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"}],"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"},"status":400} -``` - -_We thus recommend removing all filters used on decision reports before updating to Optimize 3.1.0._ - -## Limitations - -### User permissions - -With Optimize 3.1, user and group related permissions are checked by Optimize to determine whether the current user is authorized to access other users/groups within Optimize, for example when adding new roles to a collection. - -Due to this, it is now required to explicitly grant users the relevant authorizations, otherwise they will not be able to see other users and groups in Optimize. More information on authorizations can be found [here](./../configuration/authorization-management.md#user-and-group-related-authorizations). - -### User operations log import - -With Optimize 3.1, the user operations log is imported to detect changes to running instances' suspension status. The user operations log informs Optimize when instance suspension requests have been received by the engine, and Optimize then reimports the relevant instances to ensure their suspension state is set correctly in Optimize. - -However, if instances are suspended using the engine API's `executionDate` parameter, with which suspension operations can be triggered with a delay, Optimize currently is not able to detect this delay, and will re-import the running process instances at the time the suspension operation is read from the user operations log, not at the time the suspension takes place. This can lead to inaccuracies in the suspension state of process instances in Optimize. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2.md deleted file mode 100644 index 898c0015b19..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: 3.1-to-3.2 -title: "Update notes (3.1 to 3.2)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.3.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Decision report filter incompatibilities - update and runtime errors possible - -Due to a restriction in the database schema for decision reports, the usage of filters is limited in Optimize 3.2.0 and will only be fully working again in Optimize 3.3.0. - -This results in the behavior that once a certain filter type was used, e.g. a fixed evaluation date filter, another filter type cannot be used anymore, e.g. a relative evaluation date filter. This issue can occur at runtime as well as during the update. - -Usually, you will see a log similar to this one when you hit this issue: - -``` -{"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"}],"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"},"status":400} -``` - -_We thus recommend removing all filters used on decision reports before updating to Optimize 3.2.0._ - -## Changes in the supported environments - -With this Optimize version there are also changes in the supported versions of Elasticsearch. - -### Elasticsearch - -Optimize now supports Elasticsearch versions 7.7 and 7.8. - -See the [Supported Environments]($docs$/reference/supported-environments/) sections for the full range of supported versions. - -### Camunda 7 - -Optimize now requires at least Camunda 7 `7.12.11`, and `7.11.x` is not supported anymore. -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. - -### Unexpected behavior - -#### Cancelled flow node filter - -With this version, Optimize now allows you to filter for process instances where a given set of flow nodes have been canceled, as well as for flow nodes or user tasks that have been canceled. - -However, any canceled flow nodes and user tasks already imported by Optimize before this release will not appear as canceled in Optimize so will continue to be treated the same as any other completed flow node or user task. To use these options for previously imported data, you will need to [force a reimport](../../reimport) from the engine. - -## Limitations - -### No running flow node instances visible if blocked by an incident - -Optimize 3.2.0 introduces the visibility of [incidents](components/userguide/process-analysis/metadata-filters.md#incident-filter), but in contrast to Camunda Cockpit, Optimize currently does not show flow node instances in flow node view reports for those flow node instances that are blocked by an incident. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3.md deleted file mode 100644 index ef57ec15e5c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: 3.2-to-3.3 -title: "Update notes (3.2 to 3.3)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.3.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Error during migration of dashboards when updating from Optimize 3.2.0 to 3.3.0 - -During the update from Optimize 3.2.0 to 3.3.0, you may encounter the following error: - -``` -Starting step 6/7: UpdateIndexStep on index: dashboard -Progress of task (id:FwvhN1jsRUe1JQD49-C3Qg:12009) on index optimize-dashboard_v4: 0% (total: 1, updated: 0, created: 0, deleted: 0) -An Elasticsearch task that is part of the update failed: Error{type='script_exception', reason='runtime error', phase='null'} - -``` - -This can happen if you started using an Optimize version prior to 3.1.0 in your environment in the past and did not manually edit/update at least one particular dashboard created with such a version since then. - -To recover from this situation, you can run the following update script on all Optimize dashboards on your Elasticsearch cluster: - -``` -curl --location --request POST 'localhost:9200/optimize-dashboard_v3/_update_by_query' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "script": { - "source": "if (ctx._source.availableFilters == null) { ctx._source.availableFilters = [] }", - "lang": "painless" - } -}' -``` - -Then, resume the update to Optimize 3.3.0 by rerunning it, thanks to Optimize updates being [resumable](https://camunda.com/blog/2021/01/camunda-optimize-3-3-0-released/#Resumable-Updates) since Optimize 3.3.0. - -## Breaking changes - -### Renamed environment folder to config - -The `environment` folder, which holds all configuration files, has been renamed to `config`. - -### Elasticsearch - -Optimize no longer supports Elasticsearch versions 7.0, 7.1 or 7.2. -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. - -### Docker image environment variables - -Previously it was possible to use the `JAVA_OPTS` environment variable on the official Optimize Docker image to configure the JVM that runs Optimize. With Optimize 3.3.0 this variable was renamed to `OPTIMIZE_JAVA_OPTS`. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md deleted file mode 100644 index fb5e42345d6..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: 3.3-to-3.4 -title: "Update notes (3.3 to 3.4)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.4.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). - -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize -do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: - -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) - -To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5.md deleted file mode 100644 index 14af8e54e84..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: 3.4-to-3.5 -title: "Update notes (3.4 to 3.5)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.5.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Limitations - -### Migration warning regarding incomplete UserTasks - -The migration from Optimize 3.4 to 3.5 includes some improvements to the Optimize process instance data structure. Previously, process instance data in Optimize held two distinct lists: one for all FlowNode data and one for UserTask data. To avoid redundancy, these lists are merged into one during this migration. - -In order to correctly merge the UserTask data contained in the two lists, specific ID fields are used to correlate UserTasks correctly. However, due to the nature of the Optimize import, UserTask data can temporarily exist within Optimize without some of these fields. Normally, these fields are updated by the next scheduled UserTask import, but if Optimize was shut down before this next UserTask import can run, the fields remain `null` and cannot be used during migration. - -Usually, this should only affect a small percentage of UserTasks and of this small percentage, the data that is lost during migration will only relate to the cancellation state or assignee/candidate group information. In practical terms, if you observe a warning regarding "x incomplete UserTasks that will be skipped during migration" in your update logs, this means that after the migration, x UserTasks in your system may be lacking assignee or candidate group information or may be marked as completed when in fact they were canceled. - -Note that any other UserTask data, old and new, will be complete. - -If this inaccuracy in past data is not acceptable to you, you can remedy this data loss by performing a reimport after migration. You can either run a complete reimport using [the reimport script](../../reimport), or alternatively use the below statements to only reset those imports responsible for the data that was skipped during migration. - -Ensure Optimize is shut down before executing these import resets. - -Reset the `identityLinkLog` import to reimport assignee and candidate group data: - -``` -curl --location --request DELETE 'http://:/-timestamp-based-import-index_v4/_doc/identityLinkLogImportIndex-' -``` - -Reset the `completedActivity` import to reimport the correct cancellation state data: - -``` -curl --location --request DELETE 'http://:/-timestamp-based-import-index_v4/_doc/activityImportIndex-' -``` - -For example, assuming Elasticsearch is at `localhost:9200`, the engine alias is `camunda-bpm`, and the index prefix is `optimize`, the request to reset the `identityLinkLog` import translates to: - -``` -curl --location --request DELETE 'http://localhost:9200/optimize-timestamp-based-import-index_v4/_doc/identityLinkLogImportIndex-camunda-bpm' -``` - -If you have more than one engine configured, both requests need to be executed once per engine alias. - -## Known issues - -### Report edit mode fails for reports with flow node filters - -After updating to Optimize 3.5.0, you may encounter an issue that you cannot enter the edit mode on -reports that use flow node selection filters. - -In such a case, when entering edit mode, you are confronted with the following error in the Web UI: - -``` - Cannot read property 'key' of undefined -``` - -This error can be resolved by running the following Elasticearch update query on your Optimize report index: - -``` -curl --location --request POST 'http://{esHost}:{esPort}/{indexPrefix}-single-process-report/_update_by_query' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "script" : { - "source": "if(ctx._source.data.filter.stream().anyMatch(filter -> \"executedFlowNodes\".equals(filter.type)) && ctx._source.data.definitions.length == 1){for (filter in ctx._source.data.filter){filter.appliedTo = [ctx._source.data.definitions[0].identifier];}}", - "lang": "painless" - } -}' -``` - -Applying this update query can be done anytime after the update to Optimize 3.5.0 was performed, even while Optimize 3.5.0 is already running. - -### Running 3.5 update on Optimize version 3.5 data results in NullPointerException - -The Optimize 3.5 update will not succeed if it is run on data which has already been updated to 3.5. This is because the 3.5 update relies on the 3.4 schema to be present in order to perform certain operations, which will fail with a `NullPointerException` if attempted on the 3.5 schema. This will cause the update to force quit. In this case, however, no further action is required as your data has already been updated to 3.5. - -## Unexpected behavior - -### Flow node selection in report configuration moved to flow node filter - -The flow node selection previously found in the report configuration menu has now been migrated to the flow node filter dropdown as a ["Flow Node Selection" Filter](components/userguide/process-analysis/flow-node-filters.md#flow-node-selection). Existing flow node selection configurations in old reports will be migrated to an equivalent Filter with the Optimize 3.5.0 migration. Note that this filter now also filters out instances which do not contain any flow nodes that match the filter. - -## Changes in requirements - -### Java - -With this release, support for Java 8 has been removed, meaning that Java 11 is now the only LTS version of Java that Optimize supports. See the [Supported Environments]($docs$/reference/supported-environments) sections for more information on supported versions. - -### Elasticsearch - -With this release, Optimize no longer supports Elasticsearch versions 7.5.1, 7.6.0 or 7.7.0. See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6.md deleted file mode 100644 index 480cbe79427..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: 3.5-to-3.6 -title: "Update notes (3.5 to 3.6)" ---- - -Camunda 7 only - -:::note Heads Up! -To update Optimize to version 3.6.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Default tenants - -If you have [default tenants configured](./../configuration/system-configuration-platform-7.md) for any connected engine in Optimize, -it might be that user task and flow node reports, as well as branch analysis, stops showing data after updating to 3.6.0. - -This is a known -issue that has been fixed as part of the 3.6.3 patch release. You can update from 3.6.0 to 3.6.3. Migration from either of these versions to -3.7.0 will be possible. - -## Changes in supported environments - -### Camunda 7 - -Optimize now requires at least Camunda 7 `7.14.0` and supports up to `7.16.0+`. Camunda 7 `7.13.x` is not supported anymore. - -See the [Supported Environments]($docs$/reference/supported-environments) sections for the full range of supported versions. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7.md deleted file mode 100644 index b3aa2a8817c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: 3.6-to-3.7 -title: "Update notes (3.6 to 3.7.x)" ---- - -Camunda 7 only - -:::note Heads up! -To update Optimize to version 3.7.x, perform the following steps: [Migration & Update Instructions](./instructions.md). -::: - -The update to 3.7.x can be performed from any 3.6.x release. - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -The Optimize 3.7.0 release contains a number of bugs related to dashboard templates, alerts, and the Report Builder. - -For details on the issues, refer to the [Optimize 3.7.1 Release Notes](https://jira.camunda.com/secure/ReleaseNote.jspa?projectId=10730&version=17434). - -The Optimize 3.7.0 - 3.7.1 releases contain a bug in which decision instance object variables are erroneously attempted to be imported. This can lead to the decision variable import getting stuck. - -For details on the issues refer to the [Optimize 3.7.2 Release Notes](https://jira.camunda.com/secure/ReleaseNote.jspa?projectId=10730&version=17441). - -The Optimize 3.7.0 - 3.7.2 releases contain a bug in which object variables that contain a property with an empty string value cause an exception upon import which can block the import of further variables. - -For details on the issue refer to the [Optimize 3.7.3 Release Notes](https://jira.camunda.com/secure/ReleaseNote.jspa?projectId=10730&version=17452). - -We thus recommend updating to 3.7.3 if you are already using 3.7.0, 3.7.1, or 3.7.2, or directly updating to 3.7.3 if you are still running a 3.6.x release. - -## New behavior - -### Added support for object and list variables - -With Optimize 3.7, we've added support for object and list process variables. Variables with type `Object` are now automatically imported and flattened into dedicated "sub variables" for each object property. If you have previously used a variable import plugin to achieve the same, you may disable this plugin after migrating to Optimize 3.7. - -Find more information about importing object variables [here](./../configuration/object-variables.md). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.7-to-3.8.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.7-to-3.8.md deleted file mode 100644 index 910a77a2600..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.7-to-3.8.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: 3.7-to-3.8 -title: "Update notes (3.7.x to 3.8.x)" ---- - -:::note Heads up! -To update Optimize to version 3.8.x, perform the following steps: [Migration & Update Instructions](./instructions.md). -::: - -The update to 3.8.x can be performed from any 3.7.x release. - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (for example, due to a new feature) -- Changes in translation resources - -## Known issues - -No known issues at the moment. - -## Changes in supported environments - -### Elasticsearch - -While OpenSearch was never officially supported by Optimize, up until Optimize 3.7, the version of the Elasticsearch client used was also compatible with OpenSearch. -With this release, the client has been updated to a version no longer compatible with OpenSearch, meaning that Optimize will also no longer work with OpenSearch. - -### Camunda 7 - -Optimize now requires at least Camunda 7 `7.15.0` and supports up to `7.17.0+`. Camunda 7 `7.14.x` is not supported anymore. -See the [supported environments]($docs$/reference/supported-environments/#camunda-platform-7--optimize-version-matrix) sections for the full range of supported versions. - -## New behavior - -Due to a general overhaul in the public API, the authentication to all API requests must now be performed via a `Bearer Token` in the request header. In previous versions, you had two possible ways to authenticate your API requests: by providing the secret as the query parameter `accessToken`, or by providing it in the request header as a `Bearer Token`. If you were using the latter method, no change is necessary and your requests will keep working as usual. If you were using the query parameter method, you will need to change your requests. For more information, see [Authorization](../../../apis-tools/optimize-api/optimize-api-authorization.md). - -## Changes in translation files - -In case you manage your own translations into different languages, you can find a list below with all the changes that need to be translated for this release. - -### Localization file - -The following terms have been added/removed to/from the localization file (`en.json`) since the last release: - -[en.json.diff](./translation-diffs/differences_localization_370_380.diff) - -- lines with a `+` in the beginning mark the addition/update of a term, lines with a `-` mark the removal of a term - -### Text from "What's new" dialogue - -For the purposes of translation, find the text for the `What's new` dialog below: - -``` -## Set and Track Time-Based Goals - -Set data-driven service level agreements (SLAs) on how long all your processes should take so you can quickly identify which processes are underperforming. - -## KPI Reports - -Create reports and alerts tracking percentages like fully automated instances or incident rate (%), plus SLA statistics on durations like P99 or P95 duration in addition to minimum, median, and maximum. - -## Improved UX - -Rename variables in plain language, filter out noisy outlier analysis heatmaps, and apply rolling date filters to your dashboards to focus on the most important data. - -For more details, review the [blog post](https://camunda.com/blog/2022/04/camunda-optimize-3-8-0-released/). -``` diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.8-to-3.9-preview.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.8-to-3.9-preview.md deleted file mode 100644 index af800606c30..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.8-to-3.9-preview.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -id: 3.8-to-3.9-preview-1 -title: "Update notes (3.8.x to 3.9.x-preview-1)" ---- - -:::note Heads up! -To update Optimize to version 3.9.x-preview-1, perform the following steps: [Migration & Update Instructions](./instructions.md). -::: - -The update to 3.9.x-preview-1 can be performed from any 3.8.x release. - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (for example, due to a new feature) -- Changes in translation resources - -## Known issues - -No known issues at the moment. - -## Changes in supported environments - -## New behavior - -## Changes in translation files - -In case you manage your own translations into different languages, you can find a list below with all the changes that need to be translated for this release. - -### Localization file - -The following terms have been added/removed to/from the localization file (`en.json`) since the last release: - -[en.json.diff](./translation-diffs/differences_localization_380_390_preview_1.diff) - -- Lines with a `+` in the beginning mark the addition/update of a term; lines with a `-` mark the removal of a term. - -### Text from "What's new" dialogue - -For the purposes of translation, find the text for the `What's new` dialog below: - -``` -## Process Overview - -See holistic statistics across your entire portfolio of processes with some suggested focus areas for improvement. - -## Process Onboarding - -Create a dedicated KPI collection and dashboard with one click, then modify your targets and share it with stakeholders. - -## KPI Overview - -See how all your process KPIs perform in one screen, then identify which processes need the most improvement. - -For more details, review the [blog post](https://camunda.com/blog/2022/07/camunda-optimize-3-9-0-preview-released/). -``` diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.9-preview-to-3.9.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.9-preview-to-3.9.md deleted file mode 100644 index 4fc63728760..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.9-preview-to-3.9.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -id: 3.9-preview-1-to-3.9 -title: "Update notes (3.9-preview-x to 3.9.x)" ---- - -:::note Heads up! -To update Optimize to version 3.9.x, perform the steps in the [migration and update instructions](./instructions.md). -::: - -The update to 3.9.x can be performed from any 3.8.x or any 3.9.0-preview release. - -Here you will find information about: - -- Limitations -- Known issues -- Changes in the supported environments -- Any unexpected behavior of Optimize (for example, due to a new feature) -- Changes in translation resources - -## Known issues - -If there are processes in Optimize that currently do not have a process owner assigned and a new process is deployed -via Web Modeler, a new process owner may be assigned to one of the previous -processes without an owner. This is not critical as this does not incur any changes in permissions, but is important to understand regarding who gets email notifications for processes. If an owner is set incorrectly, you can change it manually in the processes page. -This issue is resolved with the 3.9.1 version. - -## Changes in supported environments - -### Camunda 7 - -Optimize now requires at least Camunda 7 `7.16.0` and supports up to `7.18.0+`. Camunda 7 `7.15.x` is not supported anymore. -See the [supported environments]($docs$/reference/supported-environments/#camunda-platform-7--optimize-version-matrix) section for the full range of supported versions. - -### Elasticsearch - -Optimize now requires at least Elasticsearch `7.13.0`. -See the [supported environments]($docs$/reference/supported-environments) section for the full range of supported versions. - -If you need to update your Elasticsearch cluster, refer to the general [Elasticsearch update guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html). Usually, the only thing you need to do is perform a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html). - -## Changes in translation files - -In case you manage your own translations into different languages, you can find a diff below with all the changes that need to be translated for this release. - -### Localization file - -The following terms have been added to or removed from the localization file `en.json` since the last release: - -[en.json.diff](./translation-diffs/differences_localization_390_preview_1_390.diff) - -- Lines with a `+` in the beginning mark the addition/update of a term; lines with a `-` mark the removal of a term. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.9-to-3.10.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.9-to-3.10.md deleted file mode 100644 index cca623681a4..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/3.9-to-3.10.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: 3.9-to-3.10 -title: "Update notes (3.9.x to 3.10)" ---- - -:::note Heads up! -To update Optimize to version 3.10, perform the steps in the [migration and update instructions](./instructions.md). -::: - -The update to 3.10 can be performed from any 3.9.x release. - -Here you will find information about: - -- Limitations -- Known issues -- Changes in supported environments -- Any unexpected behavior of Optimize (for example, due to a new feature) -- Changes in translation resources - -## Changes in the configuration - -In the 3.10 version of Optimize, it is no longer possible to apply custom configuration to the UI header. The following -configuration options have therefore been removed: - -- ui.header.textColor -- ui.header.pathToLogoIcon -- ui.header.backgroundColor - -## Helm chart - -For Optimize 3.10.1, a new environment variable introduced the option of specifying a redirection URL. However, the change is not compatible with Camunda Helm charts until it is fixed in 3.10.3 (and Helm chart 8.2.9). Therefore, those Optimize versions are coupled to certain Camunda Helm chart versions: - -| Optimize version | Camunda Helm chart version | -| ------------------------ | -------------------------- | -| Optimize 3.10.1 - 3.10.2 | 8.2.0 - 8.2.8 | -| Optimize 3.10.3+ | 8.2.9+ | - -## Elasticsearch - -Optimize now supports Elasticsearch `8.5` and `8.6`, but it requires at least Elasticsearch `7.16.2`. -Additionally, when updating to Optimize 3.10.x please note there are temporary changes in Optimize's Elasticsearch support as detailed below: - -| Optimize version | Elasticsearch version | -| --------------------------------- | -------------------------------- | -| Optimize 3.10.0 - Optimize 3.10.3 | 7.16.2+, 7.17.0+, 8.5.0+, 8.6.0+ | -| Optimize 3.10.4 | 7.16.2+, 7.17.0+, 8.7.0+, 8.8.0+ | -| Optimize 3.10.5 - Optimize 3.10.x | 7.16.2+, 7.17.0+, 8.5.0+, 8.6.0+ | - -See the [supported environments]($docs$/reference/supported-environments) section for the full range of supported versions. - -If you need to update your Elasticsearch cluster, refer to the general [Elasticsearch update guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html). Usually, the only thing you need to do is perform a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html). - -## Changes in translation files - -In case you manage your own translations into different languages, you can find a diff below with all the changes that need to be translated for this release. - -### Localization file - -The following terms have been added to or removed from the localization file `en.json` since the last release: - -[en.json.diff](./translation-diffs/differences_localization_390_3100.diff) - -- Lines with a `+` in the beginning mark the addition/update of a term; lines with a `-` mark the removal of a term. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_1_create_collection.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_1_create_collection.png deleted file mode 100644 index 212e86b5511..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_1_create_collection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_2_create_view_permission_mary.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_2_create_view_permission_mary.png deleted file mode 100644 index 15001043127..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_2_create_view_permission_mary.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_1_copy_report.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_1_copy_report.png deleted file mode 100644 index f6a77e63a38..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_1_copy_report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_2_copy_report.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_2_copy_report.png deleted file mode 100644 index ff9bdb06f5b..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_2_copy_report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_4_mary_sees_collection.png b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_4_mary_sees_collection.png deleted file mode 100644 index 8ef4dfc1020..00000000000 Binary files a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/img/private_report_access_4_mary_sees_collection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/instructions.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/instructions.md deleted file mode 100644 index 5d41783315d..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/instructions.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -id: instructions -title: "Instructions" -description: "Find out how to update to a new version of Optimize without losing your reports and dashboards." ---- - -Optimize releases two new minor versions a year. These documents guide you through the process of migrating your Optimize from one Optimize minor version to the other. - -If you want to update Optimize by several versions, you cannot do that at once, but you need to perform the updates in sequential order. For instance, if you want to update from 2.5 to 3.0, you need to update first from 2.5 to 2.6, then from 2.6 to 2.7, and finally from 2.7 to 3.0. The following table shows the recommended update paths to the latest version: - -| Update from | Recommended update path to 3.10 | -| ----------- | ----------------------------------------------------------------- | -| 3.10 | You are on the latest version. | -| 3.0 - 3.9.x | Rolling update to 3.10 | -| 2.0 - 2.7 | 1. Rolling update to 2.7
    2. Rolling update from 2.7 to 3.0 | -| 1.0 - 1.5 | No update possible. Use the latest version directly. | - -## Migration instructions - -You can migrate from one version of Optimize to the next one without losing data. To migrate to the latest version, please perform the following steps: - -### 1. Preparation - -- Make sure that Elasticsearch has enough memory. To do that, shut down Elasticsearch and go the `config` folder of your Elasticsearch distribution. There you should find a file called `jvm.options`. Change the values of the two properties `Xms` and `Xmx` to at least `1g` so that Elasticsearch has enough memory configured. This configuration looks as follows: - -```bash --Xms1g --Xmx1g -``` - -- Restart Elasticsearch and make sure that the instance is up and running throughout the entire migration process. -- You will need to shut down Optimize before starting the migration, resulting in downtime during the entire migration process. -- [Back up your Elasticsearch instance](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html) in case something goes wrong during the migration process. This is recommended, but optional. -- Make sure that you have enough storage available to perform the migration. During the migration process it can be the case that up to twice the amount of the storage of your Elasticsearch data is needed. (Highly recommended) -- Back up your `environment-config.yaml` and `environment-logback.xml` located in the `config` folder of the root directory of your current Optimize. (Optional) -- If you are using Optimize plugins it might be required to adjust those plugins to the new version. To do this, go to the project where you developed your plugins, increase the project version in maven to new Optimize version and build the plugin again (checkout the [plugin guide](../plugins/plugin-system.md) for the details on that). Afterwards, add the plugin jar to the `plugin` folder of your new Optimize distribution. (Optional) -- Start the new Optimize version, as described in the [installation guide](../install-and-start.md). -- It is very likely that you configured the logging of Optimize to your needs and therefore you adjusted the `environment-logback.xml` in the `config` folder of the root directory of your **old** Optimize. You can now use the backed up logging configuration and put it in the `config` folder of the **new** Optimize to keep your logging adjustments. (Optional) - -### 2. Rolling update to the new Elasticsearch version - -You only need to execute this step if you want to update the Elasticsearch (ES) version during the update. In case the ES version stays the same, you can skip this step. - -The Elasticsearch update is usually performed in a rolling fashion. Read all about how to do the ES update in the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) and consult the [rolling ugprade](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html) guide of the ES documentation on how to conduct the rolling update. If you have a very simple setup, for instance, a cluster with only one ES node without plugins installed nor machine learning jobs nor special configuration, the update would essentially boil down to the following steps: - -1. Install the new ES version, e.g. using Docker, your favorite package manager, or just by downloading and extracting the new tar/zip archive to a new directory. -2. Copy the data from the old ES to the new ES. If you don't expect any new data coming to your old ES you can just copy the `data` folder from the old ES distribution and overwrite the `data` folder in the new ES distribution. -3. Copy your old configuration (`config/elasticsearch.yml`) over to the new ES installation. -4. Stop the old ES instance. -5. Start the new ES instance and check that everything looks fine. - -Although the above steps summarize the basic update procedure, it is still recommended to read through the Elasticsearch documentation to avoid any potential issues. - -:::note Heads Up! - -Note that the following updates are not supported by Elasticsearch: - -- 6.8 to 7.0. -- 6.7 to 7.1.–7.X (where X>1, e.g. 7.5) - -::: - -### 3. Perform the migration - -- Go to the [enterprise download page](https://docs.camunda.org/enterprise/download/#camunda-optimize) and download the new version of Optimize you want to update to. For instance, if your current version is Optimize 2.2, you should download the version 2.3. Extract the downloaded archive in your preferred directory. The archive contains the Optimize application itself and the executable to update Optimize from your old version to the new version. -- In the `config` folder of your **current** Optimize version, you have defined all configuration in the `environment-config.yaml` file, e.g. for Optimize to be able to connect to the engine and Elasticsearch. Copy the old configuration file and place it in the `config` folder of your **new** Optimize distribution. Bear in mind that the configuration settings might have changed and thus the new Optimize won't recognize your adjusted settings or complain about settings that are outdated and therefore refuses to startup. Best checkout the Update Notes subsections for deprecations. - -#### 3.1 Manual update script execution - -This approach requires you to manually execute the update script. You can perform this from any machine that has access to your Elasticsearch cluster. - -- Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `update/update.bat` on Windows -- During the execution the executable will output a warning to ask you to back-up your Elasticsearch data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. -- To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../configuration/logging.md). - -#### 3.2 Automatic update execution (Optimize >3.2.0) - -With the Optimize 3.2.0 release the update can also be executed as part of the Optimize startup. In order to make use of this functionality, the command flag `--upgrade` has to be passed to the Optimize startup script: - -```bash -For UNIX: -./optimize-startup.sh --upgrade - -For Windows: -./optimize-startup.bat --upgrade -``` - -This will run the update prior to starting up Optimize and only then start Optimize. - -In Docker environments this can be achieved by overwriting the default command of the Docker container (being `./optimize.sh`), e.g. like in the following [docker-compose](https://docs.docker.com/compose/) snippet: - -``` -version: '2.4' - -services: - optimize: - # Use the appropriate image tag depending on your version - image: registry.camunda.cloud/optimize-ee/optimize:8-latest # For Camunda 8 - # image: registry.camunda.cloud/optimize-ee/optimize:latest # For Camunda 7 - command: ["./optimize.sh", "--upgrade"] -``` - -However, as this may prolong the container boot time significantly which may conflict with [container status probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) in managed environments like [Kubernetes](https://kubernetes.io/) we recommend using the [init container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) feature there to run the update: - -``` - labels: - app: optimize -spec: - initContainers: - - name: migration - # Use the appropriate image tag depending on your version - image: registry.camunda.cloud/optimize-ee/optimize:8-latest # For Camunda 8 - # image: registry.camunda.cloud/optimize-ee/optimize:latest # For Camunda 7 - command: ['./upgrade/upgrade.sh', '--skip-warning'] - containers: - - name: optimize - # Use the appropriate image tag depending on your version - image: registry.camunda.cloud/optimize-ee/optimize:8-latest # For Camunda 8 - # image: registry.camunda.cloud/optimize-ee/optimize:7-latest # For Camunda 7 -``` - -### 4. Resume a canceled update - -From Optimize 3.3.0 onwards updates are resumable. So if the update process got interrupted either manually or due to an error you don't have to restore the Elasticsearch backup and start over but can simply rerun the update. On resume previously completed update steps will be detected and logged as being skipped. In the following log example **Step 1** was previously completed and is thus skipped: - -``` -./upgrade/upgrade.sh -... -INFO UpgradeProcedure - Skipping Step 1/2: UpdateIndexStep on index: process-instance as it was found to be previously completed already at: 2020-11-30T16:16:12.358Z. -INFO UpgradeProcedure - Starting step 2/2: UpdateIndexStep on index: decision-instance -... -``` - -### 5. Typical errors - -- Using an update script that does not match your version: - -```bash -Schema version saved in Metadata does not match required [2.X.0] -``` - -Let's assume have Optimize 2.1 and want to update to 2.3 and use the jar to update from 2.2 to 2.3. This error occurs because the jar expects Elasticsearch to have the schema version 2.1. This is because you downloaded the wrong Optimize artifact which contained the wrong update jar version. - -## Force reimport of engine data in Optimize - -It can be the case that features that were added with the new Optimize version do not work for data that was imported with the old version of Optimize. If you want to use new features on the old data, you can force a reimport of the engine data to Optimize. See [the reimport guide](./../reimport.md) on how to perform such a reimport. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_370_380.diff b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_370_380.diff deleted file mode 100644 index 1d7499fca21..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_370_380.diff +++ /dev/null @@ -1,165 +0,0 @@ -diff --git a/backend/src/main/resources/localization/en.json b/backend/src/main/resources/localization/en.json -index b5df6a771..4f81c6834 100644 ---- a/backend/src/main/resources/localization/en.json -+++ b/backend/src/main/resources/localization/en.json -@@ -3,6 +3,7 @@ - "navigation": { - "homepage": "Home", - "analysis": "Analysis", -+ "processes": "Processes", - "events": "Event Based Processes", - "telemetry": "Telemetry Settings", - "logout": "Logout", -@@ -160,6 +161,45 @@ - "empty": "There are no items created yet", - "welcome": "Welcome" - }, -+ "processes": { -+ "title": "Processes", -+ "empty": "There are no processes imported yet", -+ "owner": "Owner", -+ "timeGoal": "Goal: Time", -+ "displayData": "Goals display data from instances", -+ "endedThisMonth": "ended in the last 30 days", -+ "setGoal": "Set Goal", -+ "editGoal": "Edit Goal", -+ "goals": "Goals", -+ "goalRemoved": "Goals removed from '{processName}' process", -+ "addOwner": "Add Owner", -+ "editOwner": "Edit Owner", -+ "addProcessOwner": "Add Process Owner", -+ "ownerInfo": "The process owner is responsible for business metrics for the process. You can use the process owner to sort and filter processes.", -+ "ownerRemoveWarning": "The '{owner}' Owner will be removed from the process", -+ "noData": "No Data", -+ "timeGoals": { -+ "label": "Time goals", -+ "configure": "Configure duration goals", -+ "setDuration": "Set a duration goal by setting a percentage of process instances that should take less than a certain duration to complete.", -+ "availableGoals": "You can use duration goals to track how many instances are fully automated (target duration) or meet your service level agreements (SLAs).", -+ "targetDuration": "Target", -+ "slaDuration": "SLA", -+ "instancesTake": "of process instances take", -+ "instancesTook": "of instances took less than", -+ "lessThan": "less than", -+ "displayGoal": "Display goal", -+ "durationDistribution": "Duration distribution", -+ "durationDistributionInfo": "This chart shows durations for process instances ending in the current month.", -+ "saveGoals": "Save Goals", -+ "updateGoals": "Update Goals", -+ "resultPreview": "Result Preview:", -+ "instancesThisMonth": "Instances ended in the last 30 days", -+ "noInstances": "No instances occured in the time range.", -+ "setGoals": "Set goals for expected durations.", -+ "resultInfo": "This preview displays each goal's status based on its current configuration." -+ } -+ }, - "events": { - "new": "New Process", - "autogenerate": "Autogenerate", -@@ -313,6 +353,7 @@ - "userTask": "User Task", - "count": "Count", - "duration": "Duration", -+ "percentage": "Percentage", - "multi": "Count and Duration", - "evaluationCount": "Evaluation Count", - "variable": "Variable", -@@ -398,7 +439,15 @@ - "process": "Process Definition", - "process-plural": "Process Definitions", - "decision": "Decision Definition", -- "decision-plural": "Decision Definitions" -+ "decision-plural": "Decision Definitions", -+ "variables": { -+ "rename": "Rename Variables", -+ "variableName": "Variable Name", -+ "type": "Type", -+ "newName": "New Name", -+ "renameInfo": "You may provide a UI-only alias to override each over your variable names.", -+ "renameWarning": "Warning! Changes will update across all reports using this process definition. This can affect other users." -+ } - }, - "noDefinitionMessage": { - "process": "Select a Process Definition.", -@@ -474,21 +523,23 @@ - }, - "aggregation": { - "userTaskLegend": "User Task Duration", -- "durationLegend": "Duration aggregations", -+ "durationLegend": "Basic aggregations", - "variableLegend": "Variable aggregations", -+ "percentileLegend": "Percentile aggregations", - "sum": "Sum", - "min": "Minimum", - "avg": "Average", - "median": "Median", - "max": "Maximum", -- "multiProcessWarning": "Reports that are grouped by process cannot display median aggregations." -+ "p50": "P50 (median)" - }, - "aggregationShort": { - "sum": "Sum", - "min": "Min", - "avg": "Avg", - "median": "Med", -- "max": "Max" -+ "max": "Max", -+ "percentile": "P{value}" - }, - "userTaskDuration": { - "idle": "Idle", -@@ -557,7 +608,8 @@ - "runningEndedFlowNodeWarning": "Only completed flow nodes are considered when grouping by End Date. Therefore, adding 'running' flow node status filter will show no results", - "missingVariable": "Missing variable", - "nonExistingVariable": "Variable does not exist", -- "nonExistingFlowNode": "Flow Node(s) does not exist" -+ "nonExistingFlowNode": "Flow Node(s) does not exist", -+ "percentageOfInstances": "% of total instances that match the filter" - }, - "dashboard": { - "label": "Dashboard", -@@ -836,6 +888,7 @@ - "for": "for", - "download": "Download", - "view": "View", -+ "viewDocumentation": "View documentation", - "process": { - "label": "Process", - "label-plural": "Processes" -@@ -979,6 +1032,7 @@ - "completedOrCanceledFlowNodesOnly": "Completed or Canceled", - "includesOpenIncident": "Open", - "includesResolvedIncident": "Resolved", -+ "includesClosedIncident": "Closed", - "doesNotIncludeIncident": "No" - }, - "modalLabels": { -@@ -994,6 +1048,7 @@ - "completedOrCanceledFlowNodesOnly": "Completed or Canceled", - "includesOpenIncident": "Open Incidents", - "includesResolvedIncident": "Resolved Incidents", -+ "includesClosedIncident": "Closed Incidents", - "doesNotIncludeIncident": "Without Incidents" - } - }, -@@ -1209,7 +1264,9 @@ - "process": "Process", - "processEvents": "process events", - "ingestedEvents": "Events", -- "items": "Items" -+ "items": "Items", -+ "goals": "Goals", -+ "owner": "Owner" - }, - "permanent": "The '{name}' {type} will be permanently deleted.", - "noUndo": "Warning: This action cannot be undone.", -@@ -1244,7 +1301,8 @@ - }, - "csvLimit": { - "Warning": "Warning: CSV will not contain all requested data", -- "info": "This download will contain the first {exportLimit} of {totalCount} records. Filter your data, use the data export API, or ask your administrator to increase the system limit to resolve this issue." -+ "info": "This download will contain the first {exportLimit} of {totalCount} records. Filter your data, or ask your administrator to increase the system limit to resolve this issue.", -+ "exportApi": "You can also use the data export API to export larger amounts of data from Optimize." - }, - "unit": { - "automatic": "automatic", diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_380_390_preview_1.diff b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_380_390_preview_1.diff deleted file mode 100644 index 284d8d264e6..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_380_390_preview_1.diff +++ /dev/null @@ -1,134 +0,0 @@ -diff --git a/backend/src/main/resources/localization/en.json b/backend/src/main/resources/localization/en.json -index a42815a76..0df8800bc 100644 ---- a/backend/src/main/resources/localization/en.json -+++ b/backend/src/main/resources/localization/en.json -@@ -165,19 +165,23 @@ - "title": "Processes", - "empty": "There are no processes imported yet", - "owner": "Owner", -- "timeGoal": "Goal: Time", -+ "processOwner": "Process Owner", -+ "timeKpi": "KPI: Time", -+ "qualityKpi": "KPI: Quality", - "displayData": "Goals display data from instances", - "endedThisMonth": "ended in the last 30 days", - "setGoal": "Set Goal", - "editGoal": "Edit Goal", - "goals": "Goals", - "goalRemoved": "Goals removed from '{processName}' process", -- "addOwner": "Add Owner", -- "editOwner": "Edit Owner", -- "addProcessOwner": "Add Process Owner", - "ownerInfo": "The process owner is responsible for business metrics for the process. You can use the process owner to sort and filter processes.", - "ownerRemoveWarning": "The '{owner}' Owner will be removed from the process", - "noData": "No Data", -+ "processOverview": "Process Overview", -+ "configureProcess": "Configure Process", -+ "emailDigest": "Email Digest", -+ "digestInfo": "The process digest sends periodic emails with the existing KPI status to the Process Owner.", -+ "digestConfigured": "Your process digest has been successfully configured. An email has been sent to {name}.", - "timeGoals": { - "label": "Time goals", - "configure": "Configure duration goals", -@@ -321,10 +325,15 @@ - "noSources": "No Data Sources", - "templates": { - "blank": "Blank report", -+ "p75Duration": "KPI: 75th Percentile Duration", -+ "percentSLAMet": "KPI: % SLA Met", - "heatmap": "Heatmap: Flownode count", -- "number": "Number: Process instance duration", - "table": "Data Table: User task count", -- "chart": "Bar Chart: Process Instance count" -+ "chart": "Bar Chart: Process Instance count", -+ "percentNoIncidents": "KPI: Incident-Free Rate", -+ "percentSuccess": "KPI: Success Rate (edit filter to only show successful end events)", -+ "percentAutomated": "KPI: Automation Rate (edit filter to exclude user tasks)", -+ "number": "Number: Process instance duration" - }, - "instanceCount": { - "appliedFilters": "Applied filters", -@@ -445,8 +454,11 @@ - "variableName": "Variable Name", - "type": "Type", - "newName": "New Name", -- "renameInfo": "You may provide a UI-only alias to override each over your variable names.", -- "renameWarning": "Warning! Changes will update across all reports using this process definition. This can affect other users." -+ "renameInfo": "You may provide a UI-only alias that is displayed instead of the variable name defined in the Modeler. ", -+ "important": "Important!", -+ "followGuidelines": "Follow these guidelines to avoid unexpected behavior.", -+ "globalChanges": "Changes will update all reports using this process definition", -+ "useSameVariable": "Use the same alias for variables that appear in multiple process definitions" - } - }, - "noDefinitionMessage": { -@@ -504,7 +516,7 @@ - }, - "progressBar": { - "invalid": "Invalid Configuration", -- "goal": "Goal" -+ "goal": "Target" - }, - "combined": { - "multiSelect": { -@@ -565,12 +577,14 @@ - "yAxis": "Y Axis Label" - }, - "goal": { -- "legend": "Goal", -+ "legend": "Set Target", - "goalValue": "Goal value", - "baseline": "Baseline", - "target": "Target", - "invalidInput": "Enter a positive number", -- "lessThanTargetError": "Target must be greater than baseline" -+ "lessThanTargetError": "Target must be greater than baseline", -+ "setKpi": "Display as a process KPI", -+ "kpiDescription": "The status of this metric will be displayed directly on the process in the process page." - }, - "pointMarkers": { - "legend": "Line points", -@@ -639,9 +653,13 @@ - "portfolioPerformance_subTitle": "View a summary of up to 10 processes", - "operationsMonitoring": "Operations monitoring", - "operationsMonitoring_subTitle": "Monitor active processes and incidents", -- "completedInstances": "Total Completed Process Instances", -- "runningInstances": "Running Process Instances", -- "aggregateDuration": "Aggregated Process Duration (To Do: View alternate duration aggregations)", -+ "30DayThroughput": "Throughput (30-day rolling)", -+ "p75Duration": "75th Percentile Duration", -+ "p99Duration": "99th Percentile Duration", -+ "percentSLAMet": "% SLA Met", -+ "percentNoIncidents": "Incident-Free Rate", -+ "percentSuccess": "Success Rate (edit filter to only show successful end events)", -+ "percentAutomated": "Automation Rate (edit filter to exclude user tasks)", - "flownodeDuration": "Which process steps take too much time? (To Do: Add Target values for these process steps)", - "controlChart": "Is my process within control?", - "flownodeFrequency": "How often is each process step run?", -@@ -889,6 +907,8 @@ - "download": "Download", - "view": "View", - "viewDocumentation": "View documentation", -+ "viewMore": "View More", -+ "configure": "Configure", - "process": { - "label": "Process", - "label-plural": "Processes" -@@ -1084,6 +1104,7 @@ - "list": { - "appliedTo": "Applied to", - "invalidDefinition": "Data Source is missing version or tenant selection.", -+ "totalInstanceWarning": "Filter applies to the total instance count", - "operators": { - "isBetween": "is between", - "between": "between", -@@ -1235,7 +1256,8 @@ - "limitReached": "Process definition limit reached. Only ten processes allowed.", - "select": { - "process": "Select Process", -- "decision": "Select Decision" -+ "decision": "Select Decision", -+ "multiProcess": "Select one or more processes" - }, - "version": { - "label": "Version", \ No newline at end of file diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_390_3100.diff b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_390_3100.diff deleted file mode 100644 index 1f9a7c670ba..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_390_3100.diff +++ /dev/null @@ -1,319 +0,0 @@ -diff --git a/backend/src/main/resources/localization/en.json b/backend/src/main/resources/localization/en.json -index 50aed4cec..456d79926 100644 ---- a/backend/src/main/resources/localization/en.json -+++ b/backend/src/main/resources/localization/en.json -@@ -1,9 +1,9 @@ - { -+ "companyName": "Camunda", - "appName": "Optimize", -+ "appFullName": "Camunda Optimize", - "navigation": { -- "homepage": "Home", - "analysis": "Analysis", -- "processes": "Processes", - "events": "Event Based Processes", - "telemetry": "Telemetry Settings", - "logout": "Logout", -@@ -11,9 +11,28 @@ - "logoutSuccess": "You logged out successfully.", - "configLoadingError": "An error occurred while loading the UI configurations", - "userGuide": "User Guide", -- "help": "Help" -+ "help": "Help", -+ "profile": "Profile", -+ "appSwitcher": "App Switcher", -+ "dashboards": "Dashboards", -+ "collections": "Collections", -+ "apps": { -+ "zeebe": "Zeebe", -+ "operate": "Operate", -+ "console": "Console", -+ "tasklist": "Tasklist", -+ "optimize": "Optimize", -+ "modeler": "Modeler" -+ }, -+ "privacyPolicy": "Privacy policy", -+ "termsOfUse": "Terms of use", -+ "imprint": "Imprint", -+ "academy": "Camunda Academy", -+ "feedback": "Feedback and Support" - }, - "login": { -+ "label": "Log in", - "appName": "Optimize", - "username": "Username", - "password": "Password", -@@ -31,6 +50,7 @@ - "timezone": "Date and Time displayed in local timezone:" - }, - "license": { -+ "label": "License", - "licensedFor": "Licensed for", - "validUntil": "Valid until", - "redirectMessage": "You will be redirected to login page shortly. Click here to go to login page immediately", -@@ -137,7 +157,6 @@ - "manager-description": "Full rights editing Collection and Users", - "existing-identity": "A user or a group with this id already exists", - "inCollection": "in this collection", -- "missingAuthorizationsWarning": "Some data may be hidden from users due to missing authorizations.", - "deleteWarning": "The '{name}' {type} will be removed from the Collection." - }, - "sources": { -@@ -162,12 +181,17 @@ - "empty": "There are no items created yet", - "contactEditor": "Please contact an Optimize editor to create new items", - "contactManager": "Please contact the collection manager to create new items", -- "welcome": "Welcome" -+ "welcome": "Welcome", -+ "emptyState": { -+ "title": "Start by creating a Dashboard", -+ "description": "Click Create New Dashboard to get insights into business processes" -+ } - }, - "processes": { - "label": "process", - "label-plural": "processes", -- "list": "Processes List", -+ "adoptionDashboard": "Adoption Dashboard", -+ "defaultDashboardAndKPI": "Process Default Dashboards and KPIs", - "empty": "There are no processes imported yet", - "owner": "Owner", - "processOwner": "Process Owner", -@@ -180,8 +204,9 @@ - "goalRemoved": "Goals removed from '{processName}' process", - "ownerInfo": "The process owner is responsible for business metrics for the process. You can use the process owner to sort and filter processes.", - "ownerRemoveWarning": "The '{owner}' Owner will be removed from the process", -- "processOverview": "Processes Overview", - "configureProcess": "Configure Process", -+ "createDefaultDashboard": "Create New Default Dashboard", -+ "createDashboardMessage": "There is no dashboard for this process yet. Would you like Optimize to automatically create a dashboard based on a process template for you?", - "emailDigest": "Email Digest", - "digestInfo": "The process digest sends periodic emails with the existing KPI status to the Process Owner. By default, the digest will be sent every Monday at 9:00 AM (To change the default send time, please contact your administrator).", - "digestConfigured": "Your process digest has been successfully configured. An email has been sent to {name}.", -@@ -318,6 +343,8 @@ - "report": { - "label": "Report", - "label-plural": "Reports", -+ "textReport": "Text Report", -+ "externalUrl": "External URL", - "create": "Create Report", - "createNew": "Create New Report", - "new": "New Report", -@@ -330,6 +357,7 @@ - "displayName": "Display Name", - "displayNamePlaceholder": "Add optional display name", - "noSources": "No Data Sources", -+ "copyTooltip": "Copy {entity} to compare and analyze variants.
    Documentation", - "templates": { - "blank": "Blank report", - "p75Duration": "KPI: 75th Percentile Duration", -@@ -400,6 +428,13 @@ - "hour": "Hour", - "searchForVariable": "Search for variable…" - }, -+ "sorting": { -+ "label": "Sorting Order", -+ "order": { -+ "asc": "Ascending", -+ "desc": "Descending" -+ } -+ }, - "visualization": { - "label": "Visualization", - "number": "Number", -@@ -439,6 +474,11 @@ - "exceededInstances": "Exceeded-instances-{name}", - "noValueAvailable": "No actual value available.
    Cannot compare target and actual value." - }, -+ "updateReportPreview": { -+ "switchLabel": "Update Preview Automatically", -+ "buttonLabel": "Run", -+ "cannotUpdate": "This function only works with automatic preview update turned on" -+ }, - "invalidCombinationError": "Cannot display data for the given report settings. Please choose another combination!", - "noDataNotice": "No data", - "errorNotice": "Error loading data", -@@ -518,7 +558,10 @@ - "inputVariable": "InputVar", - "outputVariable": "OutputVar", - "objectVariable": "Object Variable", -- "numberOfOpenIncidents": "Open Incidents Count" -+ "numberOfOpenIncidents": "Open Incidents Count", -+ "numberOfIncidents": "Incidents Count", -+ "numberOfUserTasks": "Executed User Tasks Count", -+ "flowNodeDuration": "Dur" - }, - "pageError": "Only the first 10,000 instances can be displayed" - }, -@@ -598,16 +641,18 @@ - "legend": "Line points", - "enableMarkers": "Enable point markers" - }, -- "stackedBars": { -+ "display": { - "legend": "Display", -- "enableStackedBars": "Stacked bars" -+ "enableStackedBars": "Stacked bars", -+ "horizontalBars": "Horizontal bars" - }, - "limitPrecision": { - "legend": "Custom Precision", - "numberOf": { - "digits": "No. of digits", - "units": "No. of units" -- } -+ }, -+ "tooltip": "Precision of values displayed in the chart tooltips." - }, - "includeTableColumn": "Table columns to include", - "includeNewVariables": "Display data from new variables", -@@ -707,14 +752,17 @@ - "durationSLI": "Process Duration SLI Tracking" - }, - "addButton": { -- "addReport": "Add a Report", -+ "addTile": "Add a Tile", - "optimizeReport": "Optimize Report", - "addReportLabel": "Add Report", -+ "addTileLabel": "Add Tile", - "selectReportPlaceholder": "Select a Report", - "noReports": "No reports have been created", -- "externalUrl": "External URL", -- "newReport": "New Report from a template" -+ "externalWebsite": "External Website", -+ "newReport": "New Report from a template", -+ "text": "Text" - }, -+ "textReportEditNotification": "This is a feature we plan to build", - "noAuthorization": "Missing authorization", - "noReportAccess": "No access to report", - "filter": { -@@ -874,7 +922,7 @@ - "copy": "Copy", - "addACopy": "Add a copy", - "export": "Export", -- "importJSON": "Import JSON", -+ "importReportDashboard": "Import Report/Dashboard", - "change": "Change", - "copyName": "Copy {name}", - "copyLabel": "copy", -@@ -920,6 +968,7 @@ - "viewMore": "View More", - "viewLess": "View Less", - "configure": "Configure", -+ "new": "New", - "process": { - "label": "Process", - "label-plural": "Processes" -@@ -1111,7 +1160,11 @@ - "flowNodeSelection": "Flow Node Selection", - "flowNodeDate": "Flow Node Date", - "flowNodeStartDate": "Flow Node Start Date", -- "flowNodeEndDate": "Flow Node End Date" -+ "flowNodeEndDate": "Flow Node End Date", -+ "variable-plural": "Variables", -+ "inputVariable-plural": "Input Variables", -+ "outputVariable-plural": "Output Variables", -+ "flowNodeDuration-plural": "Flow Nodes Duration" - }, - "list": { - "appliedTo": "Applied to", -@@ -1420,6 +1473,7 @@ - "notAuthorizedError": "Could not authenticate you, please check your credentials.", - "notFoundError": "The server could not find the requested resource.", - "badRequestError": "The server was unable to process the request.", -+ "elasticsearchConnectionError": "The server has encountered Elasticsearch connection issues.", - "nonTenantScopeCompliantConflict": "Could not apply action due to conflicts with the collection data source. The definition for the report is available in the data source yet at least one tenant defined in the report is not available in the data source.", - "nonDefinitionScopeCompliantConflict": "Could not apply action due to conflicts with the collection data source. The report definition is not defined in the data source.", - "invalidLicenseError": "Invalid license provided.", -@@ -1431,6 +1485,89 @@ - "importDefinitionDoesNotExist": "Import failed because the imported entity requires definitions that don't exist.", - "importIndexVersionMismatch": "Import failed because the data structure of the imported entities do not match the current data structure. Please migrate the data in your source Optimize before exporting to ensure the Optimize Version matches.", - "importDefinitionForbidden": "Import failed because you are not authorized to access some of the required definitions for the imported entity.", -- "importFileInvalid": "Import failed because the provided file was invalid. Only JSON files exported from Optimize can be imported." -+ "importFileInvalid": "Import failed because the provided file was invalid. Only JSON files exported from Optimize can be imported.", -+ "invalidAlertEmailAddresses": "Users with the following email addresses are not available for receiving alerts: {invalidAlertEmails}" -+ }, -+ "textEditor": { -+ "toolbar": { -+ "history": { -+ "undo": "Undo", -+ "redo": "Redo" -+ }, -+ "align": { -+ "label": "Align", -+ "left": "Left", -+ "center": "Center", -+ "right": "Right" -+ }, -+ "styles": { -+ "bold": "Bold", -+ "italic": "Italic", -+ "underline": "Underline", -+ "strikethrough": "Strikethrough", -+ "code": "Code" -+ }, -+ "blockStyles": { -+ "bullet": "Bulleted List", -+ "code": "Code Block", -+ "h1": "Heading 1", -+ "h2": "Heading 2", -+ "h3": "Heading 3", -+ "number": "Numbered List", -+ "paragraph": "Normal", -+ "quote": "Quote Block" -+ }, -+ "insert": { -+ "label": "Insert", -+ "horizontalRule": "Horizontal Rule", -+ "image": "Image", -+ "link": "Link" -+ } -+ }, -+ "plugins": { -+ "images": { -+ "title": "Insert Image", -+ "urlLabel": "Image URL", -+ "urlPlaceholder": "https://picture/some-picture.jpg", -+ "altTextLabel": "Image name (optional)", -+ "altTextPlaceholder": "Some picture" -+ }, -+ "link": { -+ "title": "Insert Link", -+ "urlLabel": "Link URL", -+ "urlPlaceholder": "https://example.com", -+ "altTextLabel": "Display text (optional)", -+ "altTextPlaceholder": "Link to example.com" -+ } -+ } -+ }, -+ "managementDashboard": { -+ "dashboardName": "Adoption Dashboard", -+ "report": { -+ "processInstanceUsage": "Process Instance Usage", -+ "incidentFreeRate": "Overall Incident-Free Rate", -+ "automationRate": "Automation Rate (<1 hour)", -+ "longRunningInstances": "Long-Running Process Instances", -+ "automationCandidates": "Automation Candidates", -+ "activeBottlenecks": "Active Bottlenecks" -+ } -+ }, -+ "instantDashboard": { -+ "dashboardName": "Process performance overview", -+ "kpiDashboardName": "KPI Dashboard", -+ "report": { -+ "percentSLAMet": "% SLA Met", -+ "flownodeDuration": "Which process steps take too much time? (To Do: Add Target values for these process steps)", -+ "controlChart": "Is my process within control?", -+ "activeIncidentsHeatmap": "Where are the active incidents?", -+ "percentNoIncidents": "Incident-Free Rate", -+ "incidentDurationHeatmap": "Where are the worst incidents?", -+ "p99Duration": "99th Percentile Duration", -+ "incidentDurationTrend": "Are we improving incident handling?", -+ "30DayThroughput": "Throughput (30-day rolling)", -+ "p75Duration": "75th Percentile Duration", -+ "instanceTrends": "How frequently is this process run?", -+ "flownodeFrequency": "How often is each process step run?" -+ } - } - } diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_390_preview_1_390.diff b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_390_preview_1_390.diff deleted file mode 100644 index 33a60ad1874..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/migration-update/translation-diffs/differences_localization_390_preview_1_390.diff +++ /dev/null @@ -1,267 +0,0 @@ -diff --git a/backend/src/main/resources/localization/en.json b/backend/src/main/resources/localization/en.json -index a42815a76..50aed4cec 100644 ---- a/backend/src/main/resources/localization/en.json -+++ b/backend/src/main/resources/localization/en.json -@@ -88,10 +88,10 @@ - "home": { - "createBtn": { - "default": "Create New", -- "collection": "New Collection", -- "dashboard": "New Dashboard", -+ "collection": "Collection", -+ "dashboard": "Dashboard", - "report": { -- "default": "New Report", -+ "default": "Report", - "process": "Process Report", - "combined": "Combined Process Report", - "decision": "Decision Report" -@@ -145,6 +145,7 @@ - "decision": "Decision Table", - "process": "Process", - "notCreated": "There are no sources added yet", -+ "contactManager": "Please contact the collection manager to add new sources", - "add": "Add Source", - "definitionName": "Definition Name", - "deleteWarning": "The '{name}' Data Source will be removed from the Collection.", -@@ -159,25 +160,35 @@ - "unauthorizedTenant": "(Unauthorized Tenant)" - }, - "empty": "There are no items created yet", -+ "contactEditor": "Please contact an Optimize editor to create new items", -+ "contactManager": "Please contact the collection manager to create new items", - "welcome": "Welcome" - }, - "processes": { -- "title": "Processes", -+ "label": "process", -+ "label-plural": "processes", -+ "list": "Processes List", - "empty": "There are no processes imported yet", - "owner": "Owner", -- "timeGoal": "Goal: Time", -- "displayData": "Goals display data from instances", -- "endedThisMonth": "ended in the last 30 days", -+ "processOwner": "Process Owner", -+ "timeKpi": "KPI: Time", -+ "qualityKpi": "KPI: Quality", -+ "kpiInfo": "KPI values are based on certain reports.", - "setGoal": "Set Goal", - "editGoal": "Edit Goal", - "goals": "Goals", - "goalRemoved": "Goals removed from '{processName}' process", -- "addOwner": "Add Owner", -- "editOwner": "Edit Owner", -- "addProcessOwner": "Add Process Owner", - "ownerInfo": "The process owner is responsible for business metrics for the process. You can use the process owner to sort and filter processes.", - "ownerRemoveWarning": "The '{owner}' Owner will be removed from the process", -- "noData": "No Data", -+ "processOverview": "Processes Overview", -+ "configureProcess": "Configure Process", -+ "emailDigest": "Email Digest", -+ "digestInfo": "The process digest sends periodic emails with the existing KPI status to the Process Owner. By default, the digest will be sent every Monday at 9:00 AM (To change the default send time, please contact your administrator).", -+ "digestConfigured": "Your process digest has been successfully configured. An email has been sent to {name}.", -+ "configureKpis": "Learn more about KPIs", -+ "analysing": "Analysing {count} {label}.", -+ "processesListed": "{total} {label} listed", -+ "processesListedOf": "{count} of {total} {label} listed", - "timeGoals": { - "label": "Time goals", - "configure": "Configure duration goals", -@@ -321,10 +332,15 @@ - "noSources": "No Data Sources", - "templates": { - "blank": "Blank report", -+ "p75Duration": "KPI: 75th Percentile Duration", -+ "percentSLAMet": "KPI: % SLA Met", - "heatmap": "Heatmap: Flownode count", -- "number": "Number: Process instance duration", - "table": "Data Table: User task count", -- "chart": "Bar Chart: Process Instance count" -+ "chart": "Bar Chart: Process Instance count", -+ "percentNoIncidents": "KPI: Incident-Free Rate", -+ "percentSuccess": "KPI: Success Rate (edit filter to only show successful end events)", -+ "percentAutomated": "KPI: Automation Rate (edit filter to exclude user tasks)", -+ "number": "Number: Process instance duration" - }, - "instanceCount": { - "appliedFilters": "Applied filters", -@@ -445,8 +461,11 @@ - "variableName": "Variable Name", - "type": "Type", - "newName": "New Name", -- "renameInfo": "You may provide a UI-only alias to override each over your variable names.", -- "renameWarning": "Warning! Changes will update across all reports using this process definition. This can affect other users." -+ "renameInfo": "You may provide a UI-only alias that is displayed instead of the variable name defined in the Modeler. ", -+ "important": "Important!", -+ "followGuidelines": "Follow these guidelines to avoid unexpected behavior.", -+ "globalChanges": "Changes will update all reports using this process definition", -+ "useSameVariable": "Use the same alias for variables that appear in multiple process definitions" - } - }, - "noDefinitionMessage": { -@@ -498,13 +517,14 @@ - "multipleVariable": "Var", - "inputVariable": "InputVar", - "outputVariable": "OutputVar", -- "objectVariable": "Object Variable" -+ "objectVariable": "Object Variable", -+ "numberOfOpenIncidents": "Open Incidents Count" - }, - "pageError": "Only the first 10,000 instances can be displayed" - }, - "progressBar": { - "invalid": "Invalid Configuration", -- "goal": "Goal" -+ "goal": "Target" - }, - "combined": { - "multiSelect": { -@@ -565,12 +585,14 @@ - "yAxis": "Y Axis Label" - }, - "goal": { -- "legend": "Goal", -+ "legend": "Set Target", - "goalValue": "Goal value", - "baseline": "Baseline", - "target": "Target", - "invalidInput": "Enter a positive number", -- "lessThanTargetError": "Target must be greater than baseline" -+ "lessThanTargetError": "Target must be greater than baseline", -+ "setKpi": "Display as a process KPI", -+ "kpiDescription": "The status of this metric will be displayed directly on the process in the process page." - }, - "pointMarkers": { - "legend": "Line points", -@@ -581,7 +603,7 @@ - "enableStackedBars": "Stacked bars" - }, - "limitPrecision": { -- "legend": "Limit Precision", -+ "legend": "Custom Precision", - "numberOf": { - "digits": "No. of digits", - "units": "No. of units" -@@ -639,9 +661,13 @@ - "portfolioPerformance_subTitle": "View a summary of up to 10 processes", - "operationsMonitoring": "Operations monitoring", - "operationsMonitoring_subTitle": "Monitor active processes and incidents", -- "completedInstances": "Total Completed Process Instances", -- "runningInstances": "Running Process Instances", -- "aggregateDuration": "Aggregated Process Duration (To Do: View alternate duration aggregations)", -+ "30DayThroughput": "Throughput (30-day rolling)", -+ "p75Duration": "75th Percentile Duration", -+ "p99Duration": "99th Percentile Duration", -+ "percentSLAMet": "% SLA Met", -+ "percentNoIncidents": "Incident-Free Rate", -+ "percentSuccess": "Success Rate (edit filter to only show successful end events)", -+ "percentAutomated": "Automation Rate (edit filter to exclude user tasks)", - "flownodeDuration": "Which process steps take too much time? (To Do: Add Target values for these process steps)", - "controlChart": "Is my process within control?", - "flownodeFrequency": "How often is each process step run?", -@@ -682,12 +708,12 @@ - }, - "addButton": { - "addReport": "Add a Report", -- "selectReport": "Select Report", -- "addExternal": "Add External Source", -+ "optimizeReport": "Optimize Report", - "addReportLabel": "Add Report", - "selectReportPlaceholder": "Select a Report", - "noReports": "No reports have been created", -- "externalUrl": "External URL" -+ "externalUrl": "External URL", -+ "newReport": "New Report from a template" - }, - "noAuthorization": "Missing authorization", - "noReportAccess": "No access to report", -@@ -712,8 +738,8 @@ - }, - "types": { - "state": "Instance State", -- "instanceStartDate": "Start Date", -- "instanceEndDate": "End Date", -+ "instanceStartDate": "Instance Start Date", -+ "instanceEndDate": "Instance End Date", - "variable": "Variable", - "runningInstancesOnly": "Running", - "completedInstancesOnly": "Completed", -@@ -765,6 +791,7 @@ - "apply": "Apply Changes", - "create": "Create Alert", - "notCreated": "There are no Alerts created yet", -+ "contactManager": "Please contact the collection manager to create new Alerts", - "inactiveStatus": "Alert inactive", - "activateInfo": "To active add email or target system", - "existingAlerts": "Existing alerts", -@@ -884,11 +911,15 @@ - "dataSource": "Data Source", - "selected": "Selected", - "documentation": "documentation", -+ "here": "here", - "open": "Open", - "for": "for", - "download": "Download", - "view": "View", - "viewDocumentation": "View documentation", -+ "viewMore": "View More", -+ "viewLess": "View Less", -+ "configure": "Configure", - "process": { - "label": "Process", - "label-plural": "Processes" -@@ -1054,19 +1085,20 @@ - } - }, - "types": { -- "instanceState": "Process Instance State", -+ "instanceState": "Instance State", - "date": "Instance Date", - "assignee": "Assignee", - "candidateGroup": "Candidate Group", - "instanceStartDate": "Start Date", - "instanceEndDate": "End Date", -- "duration": "Duration", - "instance": "Process instance", -- "instanceDuration": "Process Instance Duration", -+ "duration": "Duration", -+ "instanceDuration": "Instance Duration", -+ "processInstanceDuration": "Process Instance Duration", - "flowNodeDuration": "Flow Node Duration", - "variable": "Variable", - "multipleVariable": "Variable", -- "flowNode": "Flow Node", -+ "flowNode": "Flow Node Execution", - "evaluationDateTime": "Evaluation Date Time", - "inputVariable": "Input Variable", - "outputVariable": "Output Variable", -@@ -1084,6 +1116,7 @@ - "list": { - "appliedTo": "Applied to", - "invalidDefinition": "Data Source is missing version or tenant selection.", -+ "totalInstanceWarning": "Filter applies to the total instance count", - "operators": { - "isBetween": "is between", - "between": "between", -@@ -1216,6 +1249,10 @@ - "view": "Filter Flow Nodes", - "decision": "Filter Decision Evaluations" - }, -+ "dropdownInfo": { -+ "instance": "Return Instances filtered by...", -+ "view": "Return Flow Node Data filtered by..." -+ }, - "allVisible": { - "instance": "All instances visible", - "view": "All Flow Node data visible", -@@ -1235,7 +1272,8 @@ - "limitReached": "Process definition limit reached. Only ten processes allowed.", - "select": { - "process": "Select Process", -- "decision": "Select Decision" -+ "decision": "Select Decision", -+ "multiProcess": "Select one or more processes" - }, - "version": { - "label": "Version", diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin.md deleted file mode 100644 index 7824bf222be..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: businesskey-import-plugin -title: "Business key import customization" -description: "Adapt the process instance import so you can customize the associated business keys." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature enables you to customize business keys during the process instance import, e.g. if your business keys contain sensitive information that requires anonymization. - -The Optimize plugin system contains the following interface: - -```java -public interface BusinessKeyImportAdapter { - - String adaptBusinessKeys(String businessKey); -} -``` - -Implement this to adjust the business keys of the process instances to be imported. Given is the business key of a process instance that would be imported if no further action is performed. The returned string is the customized business key of the process instance that will be imported. - -The following shows an example of a customization of business keys during the process instance import in the package `optimize.plugin` where every business key is set to 'foo'. - -```java -package org.mycompany.optimize.plugin; - -import org.camunda.optimize.plugin.importing.businesskey.BusinessKeyImportAdapter; -import java.util.List; - - public class MyCustomBusinessKeyImportAdapter implements BusinessKeyImportAdapter { - - @Override - public String adaptBusinessKey(String businessKey) { - return "foo"; - } - -} -``` - -Now, when `MyCustomBusinessKeyImportAdapter`, packaged as a `jar` file, is added to Optimize's `plugin` folder, we just have to add the following property to the `environment-config.yaml` file: - -```yaml -plugin: - businessKeyImport: - # Look in the given base package list for businesskey import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -For more information on how this plugin works, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-business-key-import-plugins). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/decision-import-plugin.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/decision-import-plugin.md deleted file mode 100644 index 6b26a796bf8..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/decision-import-plugin.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -id: decision-import-plugin -title: "Decision inputs and outputs import customization" -description: "Enrich or filter the Decision inputs and outputs so you can customize which and how these are imported to Optimize." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature enables you to enrich, modify, or filter the decision input and output instances, e.g., if instances in Camunda contain IDs of instances in another database and you would like to resolve those references to the actual values. - -The plugin system contains the following interfaces: - -```java -public interface DecisionInputImportAdapter { - - List adaptInputs(List inputs); -} -``` - -```java -public interface DecisionOutputImportAdapter { - - List adaptOutputs(List outputs); -} -``` - -Implement these to adjust the input and output instances to be imported. The methods take a list of instances that would be imported if no further action is performed as parameter. The returned list is the customized list with the enriched/filtered instances that will be imported. To create new instances, you can use the `PluginDecisionInputDto` and `PluginDecisionOutputDto` classes as data transfer object (DTO), which are also contained in the plugin system. - -:::note -All class members need to be set in order, otherwise the instance is ignored, as this may lead to problems during data analysis. - -The data from the engine is imported in batches. This means the `adaptInput/adaptOutput` method is called once per batch rather than once for all data. For instance, if you have 100 000 decision instances in total and if the batch size is 10,000, the plugin function will be called 10 times. -::: - -Next, package your plugin into a `jar` file and then add the `jar` file to the `plugin` folder of your Optimize directory. Finally, add the name of the base package of your custom `DecisionOutputImportAdapter/DecisionInputImportAdapter` to the `environment-config.yaml` file: - -```yaml -plugin: - decisionInputImport: - # Look in the given base package list for decision input import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] - decisionOutputImport: - # Look in the given base package list for decision output import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -The following shows an example of a customization of the decision input import in the package `org.mycompany.optimize.plugin`, where every string input is assigned the value 'foo': - -```java -package org.mycompany.optimize.plugin; - -import org.camunda.optimize.plugin.importing.variable.DecisionInputImportAdapter; -import org.camunda.optimize.plugin.importing.variable.PluginDecisionInputDto; - -import java.util.List; - -public class SetAllStringInputsToFoo implements DecisionInputImportAdapter { - - public List adaptInputs(List inputs) { - for (PluginDecisionInputDto input : inputs) { - if (input.getType().toLowerCase().equals("string")) { - input.setValue("foo"); - } - } - return inputs; - } -} -``` - -Now, when `SetAllStringInputsToFoo`, packaged as a `jar` file, is added to the `plugin` folder, we just have to add the following property to the `environment-config.yaml` file to make the plugin work: - -```yaml -plugin: - decisionInputImport: - # Look in the given base package list for decision input import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-decision-import-plugins). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/elasticsearch-header.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/elasticsearch-header.md deleted file mode 100644 index 4c19fb1047c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/elasticsearch-header.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: elasticsearch-header -title: "Elasticsearch header" -description: "Register your own hook into the Optimize Elasticsearch client to add custom headers to requests." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature allows you to register your own hook into the Optimize Elasticsearch client, allowing you to add custom headers to all requests made to Elasticsearch. The plugin is invoked before every request to Elasticsearch is made, allowing different -headers and values to be added per request. This plugin is also loaded during the update and reimport. - -For that, the Optimize plugin system provides the following interface: - -```java -public interface ElasticsearchCustomHeaderSupplier { - - CustomHeader getElasticsearchCustomHeader(); -} -``` - -Implement this interface and return the custom header you would like to be added to Elasticsearch requests. The `CustomHeader` -class has a single Constructor taking two arguments, as follows: - -```java -public CustomHeader(String headerName, String headerValue) -``` - -The following example returns a header that will be added: - -```java -package com.example.optimize.elasticsearch.headers; - -import org.camunda.optimize.plugin.elasticsearch.CustomHeader; -import org.camunda.optimize.plugin.elasticsearch.ElasticsearchCustomHeaderSupplier; - -public class AddAuthorizationHeaderPlugin implements ElasticsearchCustomHeaderSupplier { - - private String currentToken; - - public CustomHeader getElasticsearchCustomHeader() { - if (currentToken == null || currentTokenExpiresWithinFifteenMinutes()) { - currentToken = fetchNewToken(); - } - return new CustomHeader("Authorization", currentToken); - } -} -``` - -Similar to the other plugins' setup, you have to package your plugin in a `jar`, add it to Optimize's `plugin` folder, and make Optimize find it by adding the following configuration to `environment-config.yaml`: - -```yaml -plugin: - elasticsearchCustomHeader: - # Look in the given base package list for Elasticsearch custom header fetching plugins. - # If empty, ES requests are not influenced. - basePackages: ["com.example.optimize.elasticsearch.headers"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-elasticsearch-header-plugins). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin.md deleted file mode 100644 index 3ab8fc6c08f..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: engine-rest-filter-plugin -title: "Engine REST filter" -description: "Register your own REST filter that is called for every REST call to the engine." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature allows you to register your own filter that is called for every REST call to one of the configured process engines. -For that, the Optimize plugin system provides the following interface: - -```java -public interface EngineRestFilter { - - void filter(ClientRequestContext requestContext, String engineAlias, String engineName) throws IOException; -} -``` - -Implement this interface to adjust the JAX-RS client request, which is represented by `requestContext`, sent to the process engine's REST API. -If the modification depends on the process engine, you can analyze the value of `engineAlias` and/or `engineName` to decide what adjustment is needed. - -The following example shows a filter that simply adds a custom header to every REST call: - -```java -package com.example.optimize.enginerestplugin; - -import java.io.IOException; -import jakarta.ws.rs.client.ClientRequestContext; - -public class AddCustomTokenFilter implements EngineRestFilter { - - @Override - public void filter(ClientRequestContext requestContext, String engineAlias, String engineName) throws IOException { - requestContext.getHeaders().add("Custom-Token", "SomeCustomToken"); - } - -} -``` - -Similar to other plugins, you have to package your plugin in a `jar`, add it to the `plugin` folder, and enable Optimize to find it by adding the following configuration to `environment-config.yaml`: - -```yaml -plugin: - engineRestFilter: - #Look in the given base package list for engine rest filter plugins. - #If empty, the REST calls are not influenced. - basePackages: ["com.example.optimize.enginerestplugin"] -``` diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/plugin-system.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/plugin-system.md deleted file mode 100644 index 9a2e01fd976..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/plugin-system.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -id: plugin-system -title: "Optimize plugin system" -description: "Explains the principle of plugins in Optimize and how they can be added." ---- - -Camunda 7 only - -Optimize allows you to adapt the behavior of Optimize, e.g. to decide which kind of data should be analyzed and to tackle technical issues. - -Have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples) to see some use cases for the plugin system and how plugins can be implemented and used. - -## Setup your environment - -First, add the Optimize plugin to your project via maven: - -```xml - - org.camunda.optimize - plugin - {{< currentVersionAlias >}} - -``` - -:::note -It is important to use the same plugin environment version as the Optimize version you plan to use. -Optimize rejects plugins that are built with different Optimize versions to avoid compatibility problems. -This also means that to update to newer Optimize versions it is necessary to build the plugin again with the new version. -::: - -To tell Maven where to find the plugin environment, add the following repository to your project: - -```xml - - - camunda-bpm-nexus - camunda-bpm-nexus - - https://artifacts.camunda.com/artifactory/camunda-optimize/ - - - -``` - -:::note -To make this work, you need to add your nexus credentials and the server to your `settings.xml`. -::: - -It is required to create an uber `jar` so Optimize can load third-party dependencies and to validate the used Optimize version. -You can add the following to your project: - -```xml - - install - - - org.apache.maven.plugins - maven-assembly-plugin - 3.1.0 - - - package - - single - - - ${project.artifactId} - - jar-with-dependencies - - - - - - - -``` - -:::note -By default, Optimize loads plugin classes isolated from the classes used in Optimize. -This allows you to use library versions for the plugin that differ from those used in Optimize. -::: - -If you want to use the provided Optimize dependencies instead, it is possible to exclude them from -the uber `jar` by setting the scope of those dependencies to `provided`. Then, Optimize does not load them from the plugin. -This might have side effects if the used version in the plugin is different to the one provided by Optimize. -To get an overview of what is already provided by Optimize, have a look at -the [third-party libraries]($docs$/reference/dependencies). - -## Debug your plugin - -To start Optimize in debug mode, execute the Optimize start script with a debug parameter. - -On Unix systems, this could look like the following - -- For the demo distribution: - -``` -./optimize-demo.sh --debug -``` - -- For the production distribution: - -``` -./optimize-startup.sh --debug -``` - -On a Windows system this could look like the following: - -- For the demo distribution: - -``` -.\optimize-demo.bat --debug -``` - -- For the production distribution: - -``` -.\optimize-startup.bat --debug -``` - -By default, this will open up a debug port on 9999. Once you have set this up, you need to open the project where you implemented the plugin in your favorite IDE and connect to the debug port. - -To change the default debug port, have a look into `optimize-startup.sh` on Linux/Mac or `optimize-startup.bat` on Windows systems. There, you should find a variable called `DEBUG_PORT` which allows you to customize the port. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/single-sign-on.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/single-sign-on.md deleted file mode 100644 index 4d2439a6918..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/single-sign-on.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: single-sign-on -title: "Single sign on" -description: "Register your own hook into the Optimize authentication system such that you can integrate Optimize with your single sign on system." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature allows you to register your own hook into the Optimize authentication system such that you can -integrate Optimize with your single sign on system. This allows you to skip the log in via the Optimize interface. - -For that, the Optimize plugin system provides the following interface: - -```java -public interface AuthenticationExtractor { - - AuthenticationResult extractAuthenticatedUser(HttpServletRequest servletRequest); -} -``` - -Implement this interface to extract your custom auth header from the JAX-RS servlet request, which is represented by `servletRequest`. -With the given request you are able to extract your information both from the request header and from the request cookies. - -The following example extracts a header with the name `user` and if the header exists, the user name from the header is authenticated: - -```java -package com.example.optimize.security.authentication; - -import org.camunda.optimize.plugin.security.authentication.AuthenticationExtractor; -import org.camunda.optimize.plugin.security.authentication.AuthenticationResult; - -import jakarta.servlet.http.HttpServletRequest; - -public class AutomaticallySignInUserFromHeaderPlugin implements AuthenticationExtractor { - - @Override - public AuthenticationResult extractAuthenticatedUser(HttpServletRequest servletRequest) { - String userToAuthenticate = servletRequest.getHeader("user"); - AuthenticationResult result = new AuthenticationResult(); - result.setAuthenticatedUser(userToAuthenticate); - result.setAuthenticated(userToAuthenticate != null); - return result; - } -} -``` - -Similar to the other plugins' setup, you have to package your plugin in a `jar`, add it to Optimize's `plugin` folder, and make Optimize find it by adding the following configuration to `environment-config.yaml`: - -```yaml -plugin: - authenticationExtractor: - # Looks in the given base package list for authentication extractor plugins. - # If empty, the standard Optimize authentication mechanism is used. - basePackages: ["com.example.optimize.security.authentication"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-sso-plugins). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/variable-import-plugin.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/variable-import-plugin.md deleted file mode 100644 index e049c30d77c..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/plugins/variable-import-plugin.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: variable-import-plugin -title: "Variable import customization" -description: "Enrich or filter the variable import so you can customize which and how variables are imported to Optimize." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature enables you to enrich or filter the variable import, e.g., if variables in Camunda contain IDs of variables in another database and you would like to resolve those references to the actual values. - -The Optimize plugin system contains the following interface: - -```java -public interface VariableImportAdapter { - - List adaptVariables(List variables); -} -``` - -Implement this to adjust the variables to be imported. Given is a list of variables that would be imported if no further action is performed. The returned list is the customized list with the enriched/filtered variables that will be imported. To create new variable instances, you can use the `PluginVariableDto` class as data transfer object (DTO), which is also contained in the plugin system. - -:::note -All DTO class members need to be set in order, otherwise the variable is ignored, as this may lead to problems during data analysis. - -The data from the engine is imported in batches. This means the `adaptVariables` method is called once per batch rather than once for all data. For instance, if you have 100,000 variables in total and the batch size is 10,000, the plugin function will be called 10 times. -::: - -The following shows an example of a customization of the variable import in the package `optimize.plugin`, where every string variable is assigned the value 'foo': - -```java -package org.mycompany.optimize.plugin; - -import org.camunda.optimize.plugin.importing.variable.PluginVariableDto; -import org.camunda.optimize.plugin.importing.variable.VariableImportAdapter; - -import java.util.List; - - public class MyCustomVariableImportAdapter implements VariableImportAdapter { - - @Override - public List adaptVariables(List list) { - for (PluginVariableDto pluginVariableDto : list) { - if(pluginVariableDto.getType().toLowerCase().equals("string")) { - pluginVariableDto.setValue("foo"); - } - } - return list; - } - -} -``` - -Now when `MyCustomVariableImportAdapter`, packaged as a `jar` file, is added to Optimize's `plugin` folder, we just have to add the following property to the `environment-config.yaml` file to make the plugin work: - -```yaml -plugin: - variableImport: - # Look in the given base package list for variable import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-variable-import-plugins). diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/reimport.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/reimport.md deleted file mode 100644 index 20f459d8828..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/reimport.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: reimport -title: "Camunda engine data reimport" -description: "Find out how to reimport Camunda engine data without losing your reports and dashboards." ---- - -Camunda 7 only - -There are cases where you might want to remove all Camunda 7 engine data from Optimize which has been imported from connected Camunda engines but don't want to lose Optimize entities such as collections, reports, or dashboards you created. - -:::note Warning! -Triggering a reimport causes the current data imported from the engine to be deleted and a new import cycle to be started. That also means that data which has already been removed from the engine (e.g. using the [history cleanup feature](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup)) is irreversibly lost. - -When triggering a reimport, all existing event-based processes get unpublished and reset to the `mapped` state. This is due to the fact that event-based processes may include Camunda engine data, yet the reimport does not take into account which sources event-based processes are actually based on and as such clears the data for all of them. - -You then have to manually publish event-based processes after you have restarted Optimize. -::: - -To reimport engine data, perform the following -steps: - -1. Stop Optimize, but keep Elasticsearch running (hint: to only start Elasticsearch without Optimize, you can use `elasticsearch-startup.sh` or `elasticsearch-startup.bat` scripts). -2. From the Optimize installation root run `./reimport/reimport.sh` on Linux or `reimport/reimport.bat` on Windows and wait for it to finish - - - In Docker environments, you can override the command the container executes on start to call the reimport script, e.g. in [docker-compose](https://docs.docker.com/compose/) this could look like the following: - - ``` - version: '2.4' - - services: - optimize: - image: registry.camunda.cloud/optimize-ee/optimize:latest - command: ["./reimport/reimport.sh"] - ``` - -3. Start Optimize again. Optimize will now import all the engine data from scratch. -4. If you made use of event-based processes you will have to manually publish them again. diff --git a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/version-policy.md b/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/version-policy.md deleted file mode 100644 index 8aca9db5574..00000000000 --- a/optimize_versioned_docs/version-3.10.0/self-managed/optimize-deployment/version-policy.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: version-policy -title: "Version policy" -description: "Learn about the Versioning policy for Camunda Optimize." ---- - -## Version Policy - -Camunda Optimize versions are denoted as X.Y.Z as well as by an optional [pre-release](https://semver.org/spec/v2.0.0.html#spec-item-9) version being either '-alpha-[0-9]' or '-preview-[0-9]'. X is the [major version](https://semver.org/spec/v2.0.0.html#spec-item-4), Y is the [minor version](https://semver.org/spec/v2.0.0.html#spec-item-7), Z is the [patch version](https://semver.org/spec/v2.0.0.html#spec-item-6) as defined by the [Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html) specification. - -## Release Cadence - -Camunda Optimize has a new release every month. Every six months, a new minor/major is published, while all other months an alpha and/or patch release are released. The release dates are aligned with the rest of the [Camunda 8 Platform]($docs$/reference/release-policy). - -## Pre-Release Versions - -There are two types of [pre-release](https://semver.org/spec/v2.0.0.html#spec-item-9) versions of Camunda Optimize that are published. - -### Alpha Releases - -There is an Optimize alpha release every month, excluding those in which a minor version is released. Alpha releases are intended for non-production usages in trying out recent, potentially yet unfinished new features. -They serve the purpose of early customer feedback and don't offer any update paths going forward. This means from running an alpha version there is no update possible to either the following alpha or any other following releases of Camunda Optimize. diff --git a/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/event-ingestion.md b/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/event-ingestion.md index 996539c0037..952e44ec10b 100644 --- a/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/event-ingestion.md +++ b/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/event-ingestion.md @@ -108,43 +108,45 @@ POST `/api/ingestion/event/batch` ##### Request body - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] +```json +[ + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", + "source": "order-service", + "type": "orderCreated", + "time": "2020-01-01T10:00:00.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", + "source": "order-service", + "type": "orderValidated", + "time": "2020-01-01T10:00:10.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", + "source": "shipping-service", + "type": "packageShipped", + "traceid": "id1", + "group": "shop", + "time": "2020-01-01T10:00:20.000Z" + } +] +``` #### Response @@ -168,6 +170,7 @@ POST `/api/ingestion/event/batch` ##### Request Body: +``` [ { "specversion": "1.0", @@ -184,6 +187,7 @@ POST `/api/ingestion/event/batch` } } ] +``` #### Response diff --git a/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/external-variable-ingestion.md b/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/external-variable-ingestion.md index 7038767ee5a..3e72ed8a736 100644 --- a/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/external-variable-ingestion.md +++ b/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/external-variable-ingestion.md @@ -92,24 +92,26 @@ POST `/api/ingestion/variable` Request Body: - [ - { - "id": "7689fced-2639-4408-9de1-cf8f72769f43", - "name": "address", - "type": "string", - "value": "Main Street 1", - "processInstanceId": "c6393461-02bb-4f62-a4b7-f2f8d9bbbac1", - "processDefinitionKey": "shippingProcess" - }, - { - "id": "993f4e73-7f6a-46a6-bd45-f4f8e3470ba1", - "name": "amount", - "type": "integer", - "value": "500", - "processInstanceId": "8282ed49-2243-44df-be5e-1bf893755d8f", - "processDefinitionKey": "orderProcess" - } - ] +```json +[ + { + "id": "7689fced-2639-4408-9de1-cf8f72769f43", + "name": "address", + "type": "string", + "value": "Main Street 1", + "processInstanceId": "c6393461-02bb-4f62-a4b7-f2f8d9bbbac1", + "processDefinitionKey": "shippingProcess" + }, + { + "id": "993f4e73-7f6a-46a6-bd45-f4f8e3470ba1", + "name": "amount", + "type": "integer", + "value": "500", + "processInstanceId": "8282ed49-2243-44df-be5e-1bf893755d8f", + "processDefinitionKey": "orderProcess" + } +] +``` ### Response diff --git a/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/report/get-data-export.md b/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/report/get-data-export.md index 9890cb9b14f..294918cec56 100644 --- a/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/report/get-data-export.md +++ b/optimize_versioned_docs/version-3.11.0/apis-tools/optimize-api/report/get-data-export.md @@ -81,6 +81,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -113,6 +114,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? } ] } +``` ##### Response @@ -130,6 +132,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -162,6 +165,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p } ] } +``` ##### Response diff --git a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md index 7bd865abbb7..a4b441b41e7 100644 --- a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md +++ b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md @@ -14,20 +14,20 @@ in Optimize. Using any other history level will result in less data and/or funct history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an engine before Optimize has imported it, that data will not be available in Optimize. -| YAML Path | Default Value | Description | -| ---------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engines.${engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | -| engines.${engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | -| engines.${engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | -| engines.${engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | -| engines.${engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | -| engines.${engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | -| engines.${engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | -| engines.${engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | -| engines.${engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | -| engines.${engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | -| engines.${engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | -| engines.${engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | +| YAML Path | Default Value | Description | +| ----------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| engines.$\{engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | +| engines.$\{engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | +| engines.$\{engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | +| engines.$\{engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | +| engines.$\{engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | +| engines.$\{engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | +| engines.$\{engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | +| engines.$\{engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | +| engines.$\{engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | +| engines.$\{engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | +| engines.$\{engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | +| engines.$\{engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | ## Camunda 7 common import settings diff --git a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration.md b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration.md index c97c7ba91e7..5631a0cfe74 100644 --- a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration.md +++ b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/system-configuration.md @@ -6,10 +6,6 @@ description: "An overview of all possible configuration options in Optimize." All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. -:::note -When converting configuration properties to environment variables, ensure the `CAMUNDA_OPTIMIZE_` prefix is used (for example, `CAMUNDA_OPTIMIZE_API_ACCESSTOKEN`). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. -::: - You can see a sample configuration file with all possible configuration fields and their default values [here](service-config.yaml). @@ -216,16 +212,16 @@ Settings influencing the process digest feature. Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. -| YAML Path | Default Value | Description | -| -------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| webhookAlerting.webhooks.${webhookName}.url | | The URL of the webhook. | -| webhookAlerting.webhooks.${webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | -| webhookAlerting.webhooks.${webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | -| webhookAlerting.webhooks.${webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | +| YAML Path | Default Value | Description | +| --------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| webhookAlerting.webhooks.$\{webhookName}.url | | The URL of the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | ### History Cleanup Settings @@ -235,20 +231,20 @@ Settings for automatic cleanup of historic process/decision instances based on t Two types of history cleanup are available for Camunda 8 users at this time - process data cleanup and external variable cleanup. For more information, see [History cleanup](/optimize/self-managed/optimize-deployment/configuration/history-cleanup.md). ::: -| YAML Path | Default Value | Description | -| -------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying Elasticsearch database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | +| YAML Path | Default Value | Description | +| --------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying Elasticsearch database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | +| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | +| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | +| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | +| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | +| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.ttl | | Time to live to use for process instances of the process definition with the $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the $\{key}. | +| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | +| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its $\{key}. | +| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.$\{key}.ttl | | Time to live to use for decision instances of the decision definition with the $\{key}. | +| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | ### Localization @@ -284,9 +280,9 @@ Customize the Optimize UI e.g. by adjusting the logo, head background color etc. Configuration of initial telemetry settings. -| YAML Path | Default Value | Description | -| ----------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Elasticsearch version.

    Legal note: Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | +| YAML Path | Default Value | Description | +| ----------------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Elasticsearch version.

    Legal note: Before you install Camunda Optimize version ≥ 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | ### Other diff --git a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/telemetry.md b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/telemetry.md index f9691209c35..769006e73f2 100644 --- a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/telemetry.md +++ b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/configuration/telemetry.md @@ -92,6 +92,6 @@ Once Optimize is running, telemetry can be enabled (or disabled) via a modal acc ## Legal note -Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. +Before you install Camunda Optimize version ≥ 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. diff --git a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md index 34b2b2b0421..4674ff3bd50 100644 --- a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md +++ b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md @@ -21,7 +21,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). +To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md index fb5e42345d6..e5b648b16b7 100644 --- a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md +++ b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md @@ -22,7 +22,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/instructions.md b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/instructions.md index 1c5742f0e42..95edf0e439f 100644 --- a/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/instructions.md +++ b/optimize_versioned_docs/version-3.11.0/self-managed/optimize-deployment/migration-update/instructions.md @@ -71,7 +71,7 @@ This approach requires you to manually execute the update script. You can perfor - Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `update/update.bat` on Windows - During the execution the executable will output a warning to ask you to back-up your Elasticsearch data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. +- Feel free to [file a support case](https://camunda.com/services/enterprise-support-guide/) if any errors occur during the migration process. - To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../configuration/logging.md). #### 3.2 Automatic update execution (Optimize >3.2.0) diff --git a/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/event-ingestion.md b/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/event-ingestion.md index 28f1b05a9f6..22ab6d51022 100644 --- a/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/event-ingestion.md +++ b/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/event-ingestion.md @@ -108,43 +108,45 @@ POST `/api/ingestion/event/batch` ##### Request body - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] +```json +[ + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", + "source": "order-service", + "type": "orderCreated", + "time": "2020-01-01T10:00:00.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", + "source": "order-service", + "type": "orderValidated", + "time": "2020-01-01T10:00:10.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", + "source": "shipping-service", + "type": "packageShipped", + "traceid": "id1", + "group": "shop", + "time": "2020-01-01T10:00:20.000Z" + } +] +``` #### Response @@ -168,6 +170,7 @@ POST `/api/ingestion/event/batch` ##### Request Body: +``` [ { "specversion": "1.0", @@ -184,6 +187,7 @@ POST `/api/ingestion/event/batch` } } ] +``` #### Response diff --git a/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/external-variable-ingestion.md b/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/external-variable-ingestion.md index fcbe096af6c..34d377e01e2 100644 --- a/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/external-variable-ingestion.md +++ b/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/external-variable-ingestion.md @@ -92,6 +92,7 @@ POST `/api/ingestion/variable` Request Body: +``` [ { "id": "7689fced-2639-4408-9de1-cf8f72769f43", @@ -110,6 +111,7 @@ Request Body: "processDefinitionKey": "orderProcess" } ] +``` ### Response diff --git a/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/report/get-data-export.md b/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/report/get-data-export.md index 10b5e04e9ac..b38bbc867d3 100644 --- a/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/report/get-data-export.md +++ b/optimize_versioned_docs/version-3.12.0/apis-tools/optimize-api/report/get-data-export.md @@ -81,6 +81,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -113,6 +114,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? } ] } +``` ##### Response @@ -130,6 +132,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -162,6 +165,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p } ] } +``` ##### Response diff --git a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md index e5491a96aea..13b76efb7d6 100644 --- a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md +++ b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md @@ -18,20 +18,20 @@ in Optimize. Using any other history level will result in less data and/or funct history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an engine before Optimize has imported it, that data will not be available in Optimize. -| YAML path | Default value | Description | -| ---------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engines.${engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | -| engines.${engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | -| engines.${engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | -| engines.${engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | -| engines.${engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | -| engines.${engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | -| engines.${engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | -| engines.${engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | -| engines.${engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | -| engines.${engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | -| engines.${engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | -| engines.${engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | +| YAML path | Default value | Description | +| ----------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| engines.$\{engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | +| engines.$\{engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | +| engines.$\{engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | +| engines.$\{engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | +| engines.$\{engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | +| engines.$\{engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | +| engines.$\{engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | +| engines.$\{engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | +| engines.$\{engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | +| engines.$\{engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | +| engines.$\{engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | +| engines.$\{engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | ## Camunda 7 common import settings diff --git a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration.md b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration.md index 988bc579baf..8430a31cf7b 100644 --- a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration.md +++ b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/system-configuration.md @@ -6,10 +6,6 @@ description: "An overview of all possible configuration options in Optimize." All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. -:::note -When converting configuration properties to environment variables, ensure the `CAMUNDA_OPTIMIZE_` prefix is used (for example, `CAMUNDA_OPTIMIZE_API_ACCESSTOKEN`). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. -::: - You can see a sample configuration file with all possible configuration fields and their default values [here](service-config.yaml). @@ -216,16 +212,16 @@ Settings influencing the process digest feature. Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. -| YAML path | Default value | Description | -| -------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| webhookAlerting.webhooks.${webhookName}.url | | The URL of the webhook. | -| webhookAlerting.webhooks.${webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | -| webhookAlerting.webhooks.${webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | -| webhookAlerting.webhooks.${webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | +| YAML path | Default value | Description | +| --------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| webhookAlerting.webhooks.$\{webhookName}.url | | The URL of the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | ### History cleanup settings @@ -235,20 +231,20 @@ Settings for automatic cleanup of historic process/decision instances based on t Two types of history cleanup are available for Camunda 8 users at this time - process data cleanup and external variable cleanup. For more information, see [History cleanup](/optimize/self-managed/optimize-deployment/configuration/history-cleanup.md). ::: -| YAML path | Default value | Description | -| -------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying Elasticsearch database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | +| YAML path | Default value | Description | +| --------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying Elasticsearch database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | +| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | +| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | +| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | +| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | +| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.ttl | | Time to live to use for process instances of the process definition with the $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the $\{key}. | +| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | +| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its $\{key}. | +| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.$\{key}.ttl | | Time to live to use for decision instances of the decision definition with the $\{key}. | +| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | ### Localization @@ -284,9 +280,9 @@ Customize the Optimize UI e.g. by adjusting the logo, head background color etc. Configuration of initial telemetry settings. -| YAML path | Default value | Description | -| ----------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Elasticsearch version.

    Legal note: Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | +| YAML path | Default value | Description | +| ----------------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Elasticsearch version.

    Legal note: Before you install Camunda Optimize version ≥ 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | ### Other diff --git a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/telemetry.md b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/telemetry.md index f9691209c35..769006e73f2 100644 --- a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/telemetry.md +++ b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/configuration/telemetry.md @@ -92,6 +92,6 @@ Once Optimize is running, telemetry can be enabled (or disabled) via a modal acc ## Legal note -Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. +Before you install Camunda Optimize version ≥ 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. diff --git a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md index 34b2b2b0421..4674ff3bd50 100644 --- a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md +++ b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md @@ -21,7 +21,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). +To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md index fb5e42345d6..e5b648b16b7 100644 --- a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md +++ b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md @@ -22,7 +22,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/instructions.md b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/instructions.md index 7c2752ea43e..5c9ea5d89bc 100644 --- a/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/instructions.md +++ b/optimize_versioned_docs/version-3.12.0/self-managed/optimize-deployment/migration-update/instructions.md @@ -71,7 +71,7 @@ This approach requires you to manually execute the update script. You can perfor - Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `update/update.bat` on Windows - During the execution the executable will output a warning to ask you to back-up your Elasticsearch data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. +- Feel free to [file a support case](https://camunda.com/services/enterprise-support-guide/) if any errors occur during the migration process. - To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../configuration/logging.md). #### 3.2 Automatic update execution (Optimize >3.2.0) diff --git a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/event-ingestion.md b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/event-ingestion.md index 28f1b05a9f6..22ab6d51022 100644 --- a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/event-ingestion.md +++ b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/event-ingestion.md @@ -108,43 +108,45 @@ POST `/api/ingestion/event/batch` ##### Request body - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] +```json +[ + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", + "source": "order-service", + "type": "orderCreated", + "time": "2020-01-01T10:00:00.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", + "source": "order-service", + "type": "orderValidated", + "time": "2020-01-01T10:00:10.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", + "source": "shipping-service", + "type": "packageShipped", + "traceid": "id1", + "group": "shop", + "time": "2020-01-01T10:00:20.000Z" + } +] +``` #### Response @@ -168,6 +170,7 @@ POST `/api/ingestion/event/batch` ##### Request Body: +``` [ { "specversion": "1.0", @@ -184,6 +187,7 @@ POST `/api/ingestion/event/batch` } } ] +``` #### Response diff --git a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/external-variable-ingestion.md b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/external-variable-ingestion.md index fcbe096af6c..34d377e01e2 100644 --- a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/external-variable-ingestion.md +++ b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/external-variable-ingestion.md @@ -92,6 +92,7 @@ POST `/api/ingestion/variable` Request Body: +``` [ { "id": "7689fced-2639-4408-9de1-cf8f72769f43", @@ -110,6 +111,7 @@ Request Body: "processDefinitionKey": "orderProcess" } ] +``` ### Response diff --git a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/report/get-data-export.md b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/report/get-data-export.md index 10b5e04e9ac..b38bbc867d3 100644 --- a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/report/get-data-export.md +++ b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/report/get-data-export.md @@ -81,6 +81,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -113,6 +114,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? } ] } +``` ##### Response @@ -130,6 +132,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -162,6 +165,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p } ] } +``` ##### Response diff --git a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/tutorial.md b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/tutorial.md index 6f7e9c69177..82487959d0a 100644 --- a/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/tutorial.md +++ b/optimize_versioned_docs/version-3.13.0/apis-tools/optimize-api/tutorial.md @@ -31,6 +31,10 @@ To set up your credentials, create an `.env` file which will be protected by the These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CLIENT_ID`, `CAMUNDA_CLIENT_SECRET`, and `CAMUNDA_OPTIMIZE_BASE_URL`. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md index e5491a96aea..13b76efb7d6 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md @@ -18,20 +18,20 @@ in Optimize. Using any other history level will result in less data and/or funct history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an engine before Optimize has imported it, that data will not be available in Optimize. -| YAML path | Default value | Description | -| ---------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engines.${engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | -| engines.${engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | -| engines.${engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | -| engines.${engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | -| engines.${engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | -| engines.${engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | -| engines.${engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | -| engines.${engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | -| engines.${engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | -| engines.${engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | -| engines.${engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | -| engines.${engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | +| YAML path | Default value | Description | +| ----------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| engines.$\{engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | +| engines.$\{engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | +| engines.$\{engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | +| engines.$\{engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | +| engines.$\{engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | +| engines.$\{engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | +| engines.$\{engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | +| engines.$\{engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | +| engines.$\{engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | +| engines.$\{engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | +| engines.$\{engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | +| engines.$\{engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | ## Camunda 7 common import settings diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration.md index 7fcac865c84..a1ddfdd93f1 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/system-configuration.md @@ -6,10 +6,6 @@ description: "An overview of all possible configuration options in Optimize." All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. -:::note -When converting configuration properties to environment variables, ensure the `CAMUNDA_OPTIMIZE_` prefix is used (for example, `CAMUNDA_OPTIMIZE_API_ACCESSTOKEN`). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. -::: - You can see a sample configuration file with all possible configuration fields and their default values [here](service-config.yaml). @@ -202,24 +198,19 @@ This section details everything related to building the connection to OpenSearch You can define a number of connection points in a cluster. Therefore, everything under `opensearch.connection.nodes` is a list of nodes Optimize can connect to. If you have built an OpenSearch cluster with several nodes, it is recommended to define several connection points so if one node fails, Optimize is still able to talk to the cluster. ::: -| YAML path | Default value | Description | -| ----------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| opensearch.connection.timeout | 10000 | Maximum time without connection to OpenSearch that Optimize should wait until a timeout triggers. | -| opensearch.connection.responseConsumerBufferLimitInMb | 100 | Maximum size of the OpenSearch response consumer heap buffer. This can be increased to resolve errors from OpenSearch relating to the entity content being too long. | -| opensearch.connection.pathPrefix | | The path prefix under which OpenSearch is available. | -| opensearch.connection.nodes[*].host | localhost | The address/hostname under which the OpenSearch node is available. | -| opensearch.connection.nodes[*].httpPort | 9200 | A port number used by OpenSearch to accept HTTP connections. | -| opensearch.connection.proxy.enabled | false | Whether an HTTP proxy should be used for requests to OpenSearch. | -| opensearch.connection.proxy.host | null | The proxy host to use, must be set if `opensearch.connection.proxy.enabled = true`. | -| opensearch.connection.proxy.port | null | The proxy port to use, must be set if `opensearch.connection.proxy.enabled = true`. | -| opensearch.connection.proxy.sslEnabled | false | Whether this proxy is using a secured connection (HTTPS). | -| opensearch.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | +| YAML path | Default value | Description | +| ---------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------- | +| opensearch.connection.timeout | 10000 | Maximum time without connection to OpenSearch that Optimize should wait until a timeout triggers. | +| opensearch.connection.pathPrefix | | The path prefix under which OpenSearch is available. | +| opensearch.connection.nodes[*].host | localhost | The address/hostname under which the OpenSearch node is available. | +| opensearch.connection.nodes[*].httpPort | 9200 | A port number used by OpenSearch to accept HTTP connections. | +| opensearch.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | #### Index settings | YAML path | Default value | Description | | ------------------------------------------------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| opensearch.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias `namopensearch`. Custom values allow you to operate multiple isolated Optimize instances on one OpenSearch cluster.

    NOTE: Changing this after Optimize has already run will create new empty indexes. | +| opensearch.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias names. Custom values allow you to operate multiple isolated Optimize instances on one OpenSearch cluster.

    NOTE: Changing this after Optimize has already run will create new empty indexes. | | opensearch.settings.index.number_of_replicas | 1 | How often data should be replicated to handle node failures. | | opensearch.settings.index.number_of_shards | 1 | How many shards should be used in the cluster for process instance and decision instance indices. All other indices will be made up of a single shard.

    NOTE: This property only applies the first time Optimize is started and the schema/mapping is deployed on OpenSearch. If you want this property to take effect again, you need to delete all indices (and with that all data) and restart Optimize. | | opensearch.settings.index.refresh_interval | 2s | How long OpenSearch waits until the documents are available for search. A positive value defines the duration in seconds. A value of -1 means a refresh needs to be done manually. | @@ -231,8 +222,8 @@ Define a secured connection to be able to communicate with a secured OpenSearch | YAML path | Default value | Description | | ----------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| opensearch.security.username | | The basic authentication (x-pack) username. | -| opensearch.security.password | | The basic authentication (x-pack) password. | +| opensearch.security.username | | The basic authentication username. | +| opensearch.security.password | | The basic authentication password. | | opensearch.security.ssl.enabled | false | Used to enable or disable TLS/SSL for the HTTP connection. | | opensearch.security.ssl.certificate | | The path to a PEM encoded file containing the certificate (or certificate chain) that will be presented to clients when they connect. | | opensearch.security.ssl.certificate_authorities | [ ] | A list of paths to PEM encoded CA certificate files that should be trusted, for example ['/path/to/ca.crt'].

    NOTE: if you are using a public CA that is already trusted by the Java runtime, you do not need to set the certificate_authorities. | @@ -278,16 +269,16 @@ Settings influencing the process digest feature. Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. -| YAML path | Default value | Description | -| -------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| webhookAlerting.webhooks.${webhookName}.url | | The URL of the webhook. | -| webhookAlerting.webhooks.${webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | -| webhookAlerting.webhooks.${webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | -| webhookAlerting.webhooks.${webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | +| YAML path | Default value | Description | +| --------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| webhookAlerting.webhooks.$\{webhookName}.url | | The URL of the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | ### History cleanup settings @@ -297,20 +288,20 @@ Settings for automatic cleanup of historic process/decision instances based on t Two types of history cleanup are available for Camunda 8 users at this time - process data cleanup and external variable cleanup. For more information, see [History cleanup](/optimize/self-managed/optimize-deployment/configuration/history-cleanup.md). ::: -| YAML path | Default value | Description | -| -------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | +| YAML path | Default value | Description | +| --------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | +| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | +| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | +| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | +| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | +| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.ttl | | Time to live to use for process instances of the process definition with the $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the $\{key}. | +| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | +| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its $\{key}. | +| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.$\{key}.ttl | | Time to live to use for decision instances of the decision definition with the $\{key}. | +| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | ### Localization @@ -347,9 +338,9 @@ Customize the Optimize UI e.g. by adjusting the logo, head background color etc. Configuration of initial telemetry settings. -| YAML path | Default value | Description | -| ----------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Database version.

    Legal note: Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | +| YAML path | Default value | Description | +| ----------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| telemetry.initializeTelemetry | false | Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Database version.

    Legal note: Before you install Camunda Optimize version ≥ 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. | ### Other diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/telemetry.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/telemetry.md index f9691209c35..769006e73f2 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/telemetry.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/configuration/telemetry.md @@ -92,6 +92,6 @@ Once Optimize is running, telemetry can be enabled (or disabled) via a modal acc ## Legal note -Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. +Before you install Camunda Optimize version ≥ 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md index 34b2b2b0421..4674ff3bd50 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md @@ -21,7 +21,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). +To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md index fb5e42345d6..e5b648b16b7 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md @@ -22,7 +22,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/instructions.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/instructions.md index 70d8601ea86..2315eb68ffe 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/instructions.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/migration-update/instructions.md @@ -71,7 +71,7 @@ This approach requires you to manually execute the update script. You can perfor - Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `update/update.bat` on Windows - During the execution the executable will output a warning to ask you to back-up your Elasticsearch data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. +- Feel free to [file a support case](https://camunda.com/services/enterprise-support-guide/) if any errors occur during the migration process. - To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../configuration/logging.md). #### 3.2 Automatic update execution (Optimize >3.2.0) diff --git a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/reimport.md b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/reimport.md index 20f459d8828..5b1007df00e 100644 --- a/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/reimport.md +++ b/optimize_versioned_docs/version-3.13.0/self-managed/optimize-deployment/reimport.md @@ -16,6 +16,9 @@ When triggering a reimport, all existing event-based processes get unpublished a You then have to manually publish event-based processes after you have restarted Optimize. ::: +:::note +Engine data reimport is only available when using Optimize with ElasticSearch as a database. +::: To reimport engine data, perform the following steps: diff --git a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/event-ingestion.md b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/event-ingestion.md index 28f1b05a9f6..6e43d73fd6c 100644 --- a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/event-ingestion.md +++ b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/event-ingestion.md @@ -48,7 +48,7 @@ The following request headers have to be provided with every ingest request: | Name | Type | Constraints | Description | | -------------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion) | String | REQUIRED | The version of the CloudEvents specification, which the event uses, must be `1.0`. See [CloudEvents - Version 1.0 - specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion). | -| [id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id) | String | REQUIRED | Uniquely identifies an event, see [CloudEvents - Version 1.0 - id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id). | +| [ID](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id) | String | REQUIRED | Uniquely identifies an event, see [CloudEvents - Version 1.0 - ID](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id). | | [source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1) | String | REQUIRED | Identifies the context in which an event happened, see [CloudEvents - Version 1.0 - source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1). A use-case could be if you have conflicting types across different sources. For example, a `type:OrderProcessed` originating from both `order-service` and `shipping-service`. In this case, the `source` field provides means to clearly separate between the origins of a particular event. Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. | | [type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | String | REQUIRED | This attribute contains a value describing the type of event related to the originating occurrence, see [CloudEvents - Version 1.0 - type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type). Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. The value `camunda` cannot be used for this field. | | [time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | [Timestamp](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type-system) | OPTIONAL | Timestamp of when the occurrence happened, see [CloudEvents - Version 1.0 - time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#time). String encoding: [RFC 3339](https://tools.ietf.org/html/rfc3339). If not present, a default value of the time the event was received will be created. | @@ -108,43 +108,45 @@ POST `/api/ingestion/event/batch` ##### Request body - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] +```json +[ + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", + "source": "order-service", + "type": "orderCreated", + "time": "2020-01-01T10:00:00.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", + "source": "order-service", + "type": "orderValidated", + "time": "2020-01-01T10:00:10.000Z", + "traceid": "id1", + "group": "shop", + "data": { + "numberField": 1, + "stringField": "example" + } + }, + { + "specversion": "1.0", + "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", + "source": "shipping-service", + "type": "packageShipped", + "traceid": "id1", + "group": "shop", + "time": "2020-01-01T10:00:20.000Z" + } +] +``` #### Response @@ -156,7 +158,7 @@ The API allows you to update any previously ingested cloud event by ingesting an The following request would update the first cloud event that got ingested in the [ingest three cloud events sample](#ingest-cloud-events). Note that on an update, the cloud event needs to be provided as a whole; it's not possible to perform partial updates through this API. -In this example, an additional field `newField` is added to the data block of the cloud event with the id `1edc4160-74e5-4ffc-af59-2d281cf5aca341`. +In this example, an additional field `newField` is added to the data block of the cloud event with the ID `1edc4160-74e5-4ffc-af59-2d281cf5aca341`. #### Request @@ -168,6 +170,7 @@ POST `/api/ingestion/event/batch` ##### Request Body: +``` [ { "specversion": "1.0", @@ -184,6 +187,7 @@ POST `/api/ingestion/event/batch` } } ] +``` #### Response diff --git a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/external-variable-ingestion.md b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/external-variable-ingestion.md index fcbe096af6c..34d377e01e2 100644 --- a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/external-variable-ingestion.md +++ b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/external-variable-ingestion.md @@ -92,6 +92,7 @@ POST `/api/ingestion/variable` Request Body: +``` [ { "id": "7689fced-2639-4408-9de1-cf8f72769f43", @@ -110,6 +111,7 @@ Request Body: "processDefinitionKey": "orderProcess" } ] +``` ### Response diff --git a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/report/get-data-export.md b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/report/get-data-export.md index 10b5e04e9ac..b38bbc867d3 100644 --- a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/report/get-data-export.md +++ b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/report/get-data-export.md @@ -81,6 +81,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -113,6 +114,7 @@ GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? } ] } +``` ##### Response @@ -130,6 +132,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p ##### Response content +``` { "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", "numberOfRecordsInResponse": 2, @@ -162,6 +165,7 @@ Note here the use of the query parameter `searchRequestId` to retrieve further p } ] } +``` ##### Response diff --git a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/tutorial.md b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/tutorial.md index 6f7e9c69177..19f2a08559e 100644 --- a/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/tutorial.md +++ b/optimize_versioned_docs/version-3.14.0/apis-tools/optimize-api/tutorial.md @@ -25,12 +25,16 @@ Make sure you keep the generated client credentials in a safe place. The **Clien ## Set up authentication -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. +If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client ID and client secret. Then, we return the actual token that can be passed as an authorization header in each request. To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `OPTIMIZE_CLIENT_ID`, `OPTIMIZE_CLIENT_SECRET`, `OPTIMIZE_BASE_URL`, and `OPTIMIZE_AUDIENCE`, which is `optimize.camunda.io` in a Camunda 8 SaaS environment. For example, your audience may be defined as `OPTIMIZE_AUDIENCE=optimize.camunda.io`. These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CLIENT_ID`, `CAMUNDA_CLIENT_SECRET`, and `CAMUNDA_OPTIMIZE_BASE_URL`. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/advanced-features/import-guide.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/advanced-features/import-guide.md index f23cda4b3b0..451ba66436d 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/advanced-features/import-guide.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/advanced-features/import-guide.md @@ -14,17 +14,17 @@ In general, the import assumes the following setup: - A Camunda engine from which Optimize imports the data. - The Optimize backend, where the data is transformed into an appropriate format for efficient data analysis. -- [Elasticsearch](https://www.elastic.co/guide/index.html), which is the database Optimize persists all formatted data to. +- [Elasticsearch (ES)](https://www.elastic.co/guide/index.html) or [OpenSearch (OS)](https://opensearch.org/), which serves as the database that Optimize uses to persist all of its formatted data. The following depicts the setup and how the components communicate with each other: ![Optimize Import Structure](img/Optimize-Structure.png) -Optimize queries the engine data using a dedicated Optimize REST-API within the engine, transforms the data, and stores it in its own Elasticsearch database such that it can be quickly and easily queried by Optimize when evaluating reports or performing analyses. The reason for having a dedicated REST endpoint for Optimize is performance: the default REST-API adds a lot of complexity to retrieve the data from the engine database, which can result in low performance for large data sets. +Optimize queries the engine data using a dedicated Optimize REST-API within the engine, transforms the data, and stores it in its own database such that it can be quickly and easily queried by Optimize when evaluating reports or performing analyses. The reason for having a dedicated REST endpoint for Optimize is performance: the default REST-API adds a lot of complexity to retrieve the data from the engine database, which can result in low performance for large data sets. Note the following limitations regarding the data in Optimize's database: -- The data is only a near real-time representation of the engine database. This means Elasticsearch may not contain the data of the most recent time frame, e.g. the last two minutes, but all the previous data should be synchronized. +- The data is only a near real-time representation of the engine database. This means the database may not contain the data of the most recent time frame, e.g. the last two minutes, but all the previous data should be synchronized. - Optimize only imports the data it needs for its analysis. The rest is omitted and won't be available for further investigation. Currently, Optimize imports: - The history of the activity instances - The history of the process instances @@ -47,7 +47,7 @@ This section gives an overview of how fast Optimize imports certain data sets. T It is very likely that these metrics change for different data sets because the speed of the import depends on how the data is distributed. -The import is also affected by how the involved components are set up. For instance, if you deploy the Camunda engine on a different machine than Optimize and Elasticsearch to provide both applications with more computation resources, the process is likely to speed up. If the Camunda engine and Optimize are physically far away from each other, the network latency might slow down the import. +The import is also affected by how the involved components are set up. For instance, if you deploy the Camunda engine on a different machine than Optimize and Elasticsearch/OpenSearch to provide both applications with more computation resources, the process is likely to speed up. If the Camunda engine and Optimize are physically far away from each other, the network latency might slow down the import. ### Setup @@ -135,7 +135,7 @@ During execution, the following steps are performed: 2. Map entities and add an import job 3. [Execute the import](#execute-the-import). 1. Poll a job - 2. Persist the new entities to Elasticsearch + 2. Persist the new entities to the database ### Start an import round @@ -175,33 +175,37 @@ First, the `ImportScheduler` retrieves the newest index, which identifies the la #### Map entities and add an import job -All fetched entities are mapped to a representation that allows Optimize to query the data very quickly. Subsequently, an import job is created and added to the queue to persist the data in Elasticsearch. +All fetched entities are mapped to a representation that allows Optimize to query the data very quickly. Subsequently, an import job is created and added to the queue to persist the data in the database. ### Execute the import Full aggregation of the data is performed by a dedicated `ImportJobExecutor` for each entity type, which waits for `ImportJob` instances to be added to the execution queue. As soon as a job is in the queue, the executor: - Polls the job with the new Optimize entities -- Persists the new entities to Elasticsearch +- Persists the new entities to the database The data from the engine and Optimize do not have a one-to-one relationship, i.e., one entity type in Optimize may consist of data aggregated from different data types of the engine. For example, the historic process instance is first mapped to an Optimize `ProcessInstance`. However, for the heatmap analysis it is also necessary for `ProcessInstance` to contain all activities that were executed in the process instance. -Therefore, the Optimize `ProcessInstance` is an aggregation of the engine's historic process instance and other related data: historic activity instance data, user task data, and variable data are all [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) within Optimize's `ProcessInstance` representation. +Therefore, the Optimize `ProcessInstance` is an aggregation of the engine's historic process instance and other related data: historic activity instance data, user task data, and variable data are all nested documents ([ES](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) / [OS](https://opensearch.org/docs/latest/field-types/supported-field-types/nested/)) within Optimize's `ProcessInstance` representation. :::note -Optimize uses [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html), the above mentioned data is an example of documents that are nested within Optimize's `ProcessInstance` index. +Optimize uses nested documents ([ES](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) / [OS](https://opensearch.org/docs/latest/field-types/supported-field-types/nested/)), the above mentioned data is an example of documents that are nested within Optimize's `ProcessInstance` index. -Elasticsearch applies restrictions regarding how many objects can be nested within one document. If your data includes too many nested documents, you may experience import failures. To avoid this, you can temporarily increase the nested object limit in Optimize's [index configuration](./../configuration/system-configuration.md#index-settings). Note that this might cause memory errors. +Elasticsearch and OpenSearch apply restrictions regarding how many objects can be nested within one document. If your data includes too many nested documents, you may experience import failures. To avoid this, you can temporarily increase the nested object limit in Optimize's [index configuration](./../configuration/system-configuration.md#index-settings). Note that this might cause memory errors. ::: Import executions per engine entity are actually independent from another. Each follows a [producer-consumer-pattern](https://dzone.com/articles/producer-consumer-pattern), where the type specific `ImportService` is the single producer and a dedicated single `ImportJobExecutor` is the consumer of its import jobs, decoupled by a queue. So, both are executed in different threads. To adjust the processing speed of the executor, the queue size and the number of threads that process the import jobs can be configured: +:::note +Although the parameters below include `ElasticSearch` in their name, they apply to both ElasticSearch and OpenSearch installations. For backward compatibility reasons, the parameters have not been renamed. +::: + ```yaml import: # Number of threads being used to process the import jobs per data type that are writing - # data to elasticsearch. + # data to the database. elasticsearchJobExecutorThreadCount: 1 - # Adjust the queue size of the import jobs per data type that store data to elasticsearch. + # Adjust the queue size of the import jobs per data type that store data to the database. # A too large value might cause memory problems. elasticsearchJobExecutorQueueSize: 5 ``` diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md index e5491a96aea..13b76efb7d6 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7.md @@ -18,20 +18,20 @@ in Optimize. Using any other history level will result in less data and/or funct history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an engine before Optimize has imported it, that data will not be available in Optimize. -| YAML path | Default value | Description | -| ---------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| engines.${engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | -| engines.${engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | -| engines.${engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | -| engines.${engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | -| engines.${engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | -| engines.${engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | -| engines.${engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | -| engines.${engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | -| engines.${engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | -| engines.${engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | -| engines.${engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | -| engines.${engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | +| YAML path | Default value | Description | +| ----------------------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| engines.$\{engineAlias}.name | default | The process engine's name on the platform, this is the unique engine identifier on the platforms REST API. | +| engines.$\{engineAlias}.defaultTenant.id | null | A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation. | +| engines.$\{engineAlias}.defaultTenant.name | null | The name used for this default tenant when displayed in the UI. | +| engines.$\{engineAlias}.excludeTenant | [ ] | Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant) | +| engines.$\{engineAlias}.rest | http://localhost:8080/engine-rest | A base URL that will be used for connections to the Camunda Engine REST API. | +| engines.$\{engineAlias}.importEnabled | true | Determines whether this instance of Optimize should import definition & historical data from this engine. | +| engines.$\{engineAlias}.eventImportEnabled | false | Determines whether this instance of Optimize should convert historical data to event data usable for event based processes. | +| engines.$\{engineAlias}.authentication.enabled | false | Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password. | +| engines.$\{engineAlias}.authentication.user | | When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • `READ` & `READ_HISTORY` permission on the Process and Decision Definition resources
    • `READ` permission on _all_ ("\*") Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data. | +| engines.$\{engineAlias}.authentication.password | | When basic authentication is enabled, this password is used to authenticate against the engine. | +| engines.$\{engineAlias}.webapps.endpoint | http://localhost:8080/camunda | Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit | +| engines.$\{engineAlias}.webapps.enabled | true | Enables/disables linking to other Camunda Web Applications | ## Camunda 7 common import settings diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration.md index 4f4fbe64dc5..7868c25f1a7 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/configuration/system-configuration.md @@ -6,10 +6,6 @@ description: "An overview of all possible configuration options in Optimize." All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. -:::note -When converting configuration properties to environment variables, ensure the `CAMUNDA_OPTIMIZE_` prefix is used (for example, `CAMUNDA_OPTIMIZE_API_ACCESSTOKEN`). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. -::: - You can see a sample configuration file with all possible configuration fields and their default values [here](service-config.yaml). @@ -202,25 +198,20 @@ This section details everything related to building the connection to OpenSearch You can define a number of connection points in a cluster. Therefore, everything under `opensearch.connection.nodes` is a list of nodes Optimize can connect to. If you have built an OpenSearch cluster with several nodes, it is recommended to define several connection points so if one node fails, Optimize is still able to talk to the cluster. ::: -| YAML path | Default value | Description | -| ----------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| opensearch.connection.timeout | 10000 | Maximum time without connection to OpenSearch that Optimize should wait until a timeout triggers. | -| opensearch.connection.responseConsumerBufferLimitInMb | 100 | Maximum size of the OpenSearch response consumer heap buffer. This can be increased to resolve errors from OpenSearch relating to the entity content being too long. | -| opensearch.connection.pathPrefix | | The path prefix under which OpenSearch is available. | -| opensearch.connection.nodes[*].host | localhost | The address/hostname under which the OpenSearch node is available. | -| opensearch.connection.nodes[*].httpPort | 9205 | A port number used by OpenSearch to accept HTTP connections. | -| opensearch.connection.proxy.enabled | false | Whether an HTTP proxy should be used for requests to OpenSearch. | -| opensearch.connection.proxy.host | null | The proxy host to use, must be set if `opensearch.connection.proxy.enabled = true`. | -| opensearch.connection.proxy.port | null | The proxy port to use, must be set if `opensearch.connection.proxy.enabled = true`. | -| opensearch.connection.proxy.sslEnabled | false | Whether this proxy is using a secured connection (HTTPS). | -| opensearch.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | -| opensearch.connection.awsEnabled | false | Determines if AWS credentials shall be used for authentication | +| YAML path | Default value | Description | +| ---------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------- | +| opensearch.connection.timeout | 10000 | Maximum time without connection to OpenSearch that Optimize should wait until a timeout triggers. | +| opensearch.connection.pathPrefix | | The path prefix under which OpenSearch is available. | +| opensearch.connection.nodes[*].host | localhost | The address/hostname under which the OpenSearch node is available. | +| opensearch.connection.nodes[*].httpPort | 9205 | A port number used by OpenSearch to accept HTTP connections. | +| opensearch.connection.skipHostnameVerification | false | Determines whether the hostname verification should be skipped. | +| opensearch.connection.awsEnabled | false | Determines if AWS credentials shall be used for authentication | #### Index settings | YAML path | Default value | Description | | ------------------------------------------------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| opensearch.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias `namopensearch`. Custom values allow you to operate multiple isolated Optimize instances on one OpenSearch cluster.

    NOTE: Changing this after Optimize has already run will create new empty indexes. | +| opensearch.settings.index.prefix | optimize | The prefix prepended to all Optimize index and alias names. Custom values allow you to operate multiple isolated Optimize instances on one OpenSearch cluster.

    NOTE: Changing this after Optimize has already run will create new empty indexes. | | opensearch.settings.index.number_of_replicas | 1 | How often data should be replicated to handle node failures. | | opensearch.settings.index.number_of_shards | 1 | How many shards should be used in the cluster for process instance and decision instance indices. All other indices will be made up of a single shard.

    NOTE: This property only applies the first time Optimize is started and the schema/mapping is deployed on OpenSearch. If you want this property to take effect again, you need to delete all indices (and with that all data) and restart Optimize. | | opensearch.settings.index.refresh_interval | 2s | How long OpenSearch waits until the documents are available for search. A positive value defines the duration in seconds. A value of -1 means a refresh needs to be done manually. | @@ -232,8 +223,8 @@ Define a secured connection to be able to communicate with a secured OpenSearch | YAML path | Default value | Description | | ----------------------------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| opensearch.security.username | | The basic authentication (x-pack) username. | -| opensearch.security.password | | The basic authentication (x-pack) password. | +| opensearch.security.username | | The basic authentication username. | +| opensearch.security.password | | The basic authentication password. | | opensearch.security.ssl.enabled | false | Used to enable or disable TLS/SSL for the HTTP connection. | | opensearch.security.ssl.certificate | | The path to a PEM encoded file containing the certificate (or certificate chain) that will be presented to clients when they connect. | | opensearch.security.ssl.certificate_authorities | [ ] | A list of paths to PEM encoded CA certificate files that should be trusted, for example ['/path/to/ca.crt'].

    NOTE: if you are using a public CA that is already trusted by the Java runtime, you do not need to set the certificate_authorities. | @@ -279,16 +270,16 @@ Settings influencing the process digest feature. Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. -| YAML path | Default value | Description | -| -------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| webhookAlerting.webhooks.${webhookName}.url | | The URL of the webhook. | -| webhookAlerting.webhooks.${webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | -| webhookAlerting.webhooks.${webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | -| webhookAlerting.webhooks.${webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | -| webhookAlerting.webhooks.${webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | -| webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true. | +| YAML path | Default value | Description | +| --------------------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| webhookAlerting.webhooks.$\{webhookName}.url | | The URL of the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.headers | | A map of the headers of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.httpMethod | | The HTTP Method of the request to be sent to the webhook. | +| webhookAlerting.webhooks.$\{webhookName}.defaultPayload | | The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.enabled | | Whether an HTTP proxy should be used for requests to the webhook URL. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.host | | The proxy host to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.port | | The proxy port to use, must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | +| webhookAlerting.webhooks.$\{webhookName}.proxy.sslEnabled | | Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.$\{webhookName}.proxy.enabled = true. | ### History cleanup settings @@ -298,20 +289,20 @@ Settings for automatic cleanup of historic process/decision instances based on t Two types of history cleanup are available for Camunda 8 users at this time - process data cleanup and external variable cleanup. For more information, see [History cleanup](/optimize/self-managed/optimize-deployment/configuration/history-cleanup.md). ::: -| YAML path | Default value | Description | -| -------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | +| YAML path | Default value | Description | +| --------------------------------------------------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | +| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | +| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | +| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | +| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | +| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.ttl | | Time to live to use for process instances of the process definition with the $\{key}. | +| historyCleanup.processDataCleanup .perProcessDefinitionConfig.$\{key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the $\{key}. | +| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | +| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its $\{key}. | +| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.$\{key}.ttl | | Time to live to use for decision instances of the decision definition with the $\{key}. | +| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | ### Localization diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md index 34b2b2b0421..c7f9665f08a 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2.md @@ -21,7 +21,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). +To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../../reimport.md). diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md index 69a8362bd1a..d042f1c64cb 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4.md @@ -22,7 +22,7 @@ Here you will find information about: When updating Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) +- [Process instance parts](components/userguide/process-analysis/report-analysis/process-instance-parts.md) +- [Canceled instances only filter](components/userguide/process-analysis/instance-state-filters.md#canceled-instances-only-filter) To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../../reimport.md). diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md index a4a22d70fc2..ebf210b86f1 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/migration-update/camunda-7/instructions.md @@ -60,7 +60,7 @@ This approach requires you to manually execute the update script. You can perfor - Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `./upgrade/upgrade.bat` on Windows. For OpenSearch installations, please make sure to set the environment variable `CAMUNDA_OPTIMIZE_DATABASE=opensearch` before executing the update script. - During the execution the executable will output a warning to ask you to back-up your database data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. +- Feel free to [file a support case](https://camunda.com/services/enterprise-support-guide/) if any errors occur during the migration process. - To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../../configuration/logging.md). #### 3.2 Automatic update execution (Optimize >3.2.0) diff --git a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/reimport.md b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/reimport.md index 20f459d8828..5b1007df00e 100644 --- a/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/reimport.md +++ b/optimize_versioned_docs/version-3.14.0/self-managed/optimize-deployment/reimport.md @@ -16,6 +16,9 @@ When triggering a reimport, all existing event-based processes get unpublished a You then have to manually publish event-based processes after you have restarted Optimize. ::: +:::note +Engine data reimport is only available when using Optimize with ElasticSearch as a database. +::: To reimport engine data, perform the following steps: diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/alerts.md b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/alerts.md deleted file mode 100644 index 3b02da68695..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/alerts.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: alerts -title: Alerts -description: Get a notification as soon as your system is behaving in an unexpected manner. ---- - -:::note -You must configure the email service to receive notifications. See the [technical guide](../../../self-managed/optimize-deployment/setup/installation.md) for which properties need to be defined. -::: - -Optimize's alerting functionality can be used to notify you when your report hits a predefined critical value. You can create alerts for any number reports that exist within a collection. Inside the collection, navigate to the **Alerts** tab to create and view all alerts defined for reports in this collection. You can manage an alert by moving the mouse over the alert entry and clicking the **Edit** or **Delete** buttons in the context menu on the right side of the page. - -![Alert overview](./img/alerts-overview.png) - -Click **Create New Alert** to create a new alert. You will then see the following modal: - -![Alert modal overview](./img/alert-modal-description.png) - -To give the alert a name, select the report and define a target webhook or email address of the person who will receive the alert. - -:::note -Alerts can only be created for reports which are visualized as a single number and are in the same collection as the alert. Visit the [report section](../creating-reports.md) on how to define single number reports. -::: - -Additionally, set a threshold which defines when an alert should be triggered. A notification will be sent to the configured email address or webhook as soon as a report value hits the threshold. - -If reminder notifications are enabled, the alert will continue to send notifications for as long as the value is above (or below, as defined) the threshold. Finally, you'll get a resolve notification as soon as the report value is within a typical range. - -For example, say you defined an alert which should be triggered when the report value becomes greater than 50. -You also enabled reminder notifications to be sent each hour. Here's what that would look like: - -![Notifications graph](./img/alert-notifications-graph.png) - -## Send alerts to external systems - -It's possible to configure Optimize to send alerts to an external system when needed. For details on how to configure and add target systems, visit the [technical guide](../../../self-managed/optimize-deployment/setup/installation.md). Once at least one target system is configured, alerts will have a new input option to select one of the configured systems. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/event-based-processes.md b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/event-based-processes.md deleted file mode 100644 index 659d4abde16..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/event-based-processes.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -id: event-based-processes -title: Event-based processes -description: Create and analyze reports backed by ingested events. ---- - -Camunda Platform 7 only - -## Overview - -Event-based processes are BPMN processes that can be created inside Optimize and based on events. These events can be loaded from an external system or created from internal BPMN processes. They are particularly useful to create reports and dashboards based on a process that is not fully automated with Camunda Platform yet. - -Once the event-based process feature is correctly configured, you will see a new link in the navigation to go to the event-based process list. From there, you can see, create, or edit your event-based processes. - -:::note -When Camunda activity events are used in event-based processes, Camunda admin authorizations are not inherited for the event-based process. The authorization to use an event-based process is solely managed via the access management of event-based processes when [publishing an event-based process](#publishing-an-event-based-process) or at any time via the [edit access option](#event-based-process-list---edit-access) in the event-based process list. - -Visit our [technical guide](../../../self-managed/optimize-deployment/setup/installation.md) on authorization management and event-based processes for the reasoning behind this behavior. -::: - -## Set up - -You need to set up the event-based processes feature to make use of this feature. See the [technical guide](../../../self-managed/optimize-deployment/setup/installation.md) for more information. - -## Event-based process list - -All currently available event-based processes are listed under the main navigation item **Event-based processes**. From there, it is possible to see their state, which can be one of the following: - -- `Unmapped` - The process model is created, but no single event is mapped to a flow node. -- `Mapped` - The process model contains at least one mapping of an event to a flow node. -- `Published` - The event-based process is published and can be used in reports by users that are authorized to access it. -- `Unpublished Changes` - The process model contains changes that are not reflected in the currently published state of the event-based process; it needs to get republished manually. - -![Process List](./img/processList.png) - -### Event-based process list - edit access - -To manage authorizations for a published event-based process, the **Edit Access** option in the dropdown menu of each event-based process list entry allows you to authorize users or groups to create reports for these processes in Optimize. - -![Process List - Edit Access](./img/editAccess.png) - -## Creating an event-based process - -There are three ways to create an event-based process: - -### Auto-generate - -:::note -The process auto-generation feature is currently in early beta stage. -::: - -The first way to create an event-based process is to allow Optimize to auto-generate the model based on provided configuration. Using this option, you can specify which event sources should be used for the process, including both Camunda and external events. - -Note that for external events, it is currently only possible to select all the external events. - -![Autogenerate a process](./img/auto-generation.png) - -Optimize will attempt to generate an overall model based on these sources, determining the order of events in the model by sampling stored instances. After auto-generation is complete, you will see the process in [view mode](#view-mode), with the model's nodes fully mapped to their corresponding events. - -To make changes to the autogenerated process, modify either the model itself, the process name, or the process mappings in the same way as any other event-based process by entering [edit mode](#edit-mode). - -### Model a process - -The second way to create an event-based process is to model it manually using the integrated BPMN modeler. - -### Upload BPMN model - -Finally, you can create an event-based process by uploading a `.bpmn` file directly into Optimize. - -## Edit mode - -![Edit Mode](./img/editMode.png) - -The edit mode allows you to build and map your event-based process. Using this mode, you can perform all kinds of operations, such as: - -- Rename the process. -- Model the process using the integrated BPMN modeler. -- Map your diagram nodes to an event from the event table. -- Edit event sources for the events to display in the event table. -- Save the current state with your applied changes. -- Cancel changes you already applied to the process. - -### Modeling - -Modeling can be done using the integrated modeler shown in the screenshot above. To maximize the modeling area, collapse the table during the modeling by clicking on the **Collapse** button in the top right of the table. - -### Event sources - -To map BPMN nodes to events, add event sources to the process first by clicking the **Add Event Sources** button available at the top of the table. - -In this view, it is possible to add two types of events to the events list: - -#### External events - -Events that were ingested into Optimize from an external system. These events can be imported into Optimize using the event ingestion API Optimize provides. - -Defining the `group` property when ingesting the events will allow selecting events that belong to a group. If the group property is not defined or left empty during ingestion of an event, Optimize will consider it `ungrouped`. - -![Selecting External Events](./img/externalEvents.png) - -#### Camunda events - -![Add Source Modal](./img/sourceModal.png) - -These are events generated from an existing Camunda BPMN process. Only processes for which Optimize has imported at least one event will be visible for selection. This means the process has to have at least one instance and Optimize has to have been configured to import data from that process. - -See the [technical guide](../../../self-managed/optimize-deployment/setup/installation.md) for more information on how this is configured. - -To add such events, provide the following details: - -- The target process definition that you would like to generate the events from - -- The trace ID location: A trace ID uniquely identifies a process instance across system boundaries. One example would be an invoice number for an invoice handling process. For a Camunda process, it is possible to select a trace ID that exists either in a variable or in the process business key. - -- Which events to display in the table: - -Adding events for every flow node might not be necessary for the event-based process. Therefore, we provide the ability to only add the events that are necessary. There are three options available: - -- Process start and end: This will add only two events in the table, one event is triggered when the process starts and one when it ends. - -- Start and end flow node events: The number of events added to the table will depend on how many start and end events are in the process. For example, if there is one start event and two end events, three events will be added. - -- Start and end flow node events: This option will add events for every flow node in the process. - -Once this information is defined and the sources are added, the events will appear in the table as shown below. - -![Events Table](./img/eventsTable.png) - -#### Events table - -Each event in the table will have the following properties: - -- Mapped as (start/end): Defines whether the event indicates start of BPMN node or the end of it. - -- Event name - -- Group - - - For external events, this corresponds to the group of the ingested event. - - For Camunda process events, this corresponds to the name of the process definition. - -- Source: External system or Camunda process event. - -- Count: How many times this event was triggered. See [additional notes](#event-counts) for more information. - -To assist during event mapping, the events table offers suggestions of potential events to be mapped based on the selected node. This is indicated by a blue strap near the suggested event. The event suggestion only works when adding all external events as a source with no Camunda events. - -### Mapping events - -Mapping is the process of linking BPMN flow nodes to events. - -To start mapping, take the following steps: - -1. Select the node that you would like to map from the diagram. -2. To link the selected node to an event, enable the checkbox of that event from the table. Afterwards, a checkmark sign will be shown on top of the node to indicate that the event has been mapped successfully. - -:::note -Not all BPMN nodes can be mapped. Only events and activities can be mapped to events. -::: - -Once all the necessary nodes are mapped, you can save your diagram to go the view mode. - -## View mode - -The view mode gives you a quick overview of which flow nodes have been mapped to events and allows you to enter the edit mode, publish, or delete the current event-based process. - -![View Mode of event-based processes](./img/processView.png) - -### Publishing an event-based process - -Once you have built and mapped your event-based process, you need to publish it to make it available for reports and dashboards. To publish your process, click the **Publish** button in the view mode of your event-based process. - -![Publish modal](./img/publishModal.png) - -In the shown modal, you can see who will have access to use the event-based process. By default, the process is only available to the user who created it. If you would like to allow other users to use the process in reports, click **Change...** to open the permissions options. - -![permissions modal](./img/usersModal.png) - -In this modal, it is possible to search for users and groups and add them to the list of users who have access to the process. Once that is done, you can save the changes and publish your process. - -Publishing the process will take some time to correlate all events and generate your event-based process. Once the publishing is done, a notification will appear indicating this. - -Now the process is ready and can be used like any other process to create reports and dashboards. - -## External ingested events - -After ingesting events into Optimize from an external system, each individual event will appear in the external events table. - -![External Events](./img/external-events.png) - -By default, the table shows all ingested events sorted by the timestamp from newest to oldest. However, it is also possible to search for events or sort the results by event name, source, group, or trace ID. - -### Deleting ingested events - -One or multiple events can be selected and deleted as shown in the figure below: - -![Deleting External Events](./img/deleting-events.png) - -:::note -When deleting an event mapped to a published event-based process, only the corresponding flow node instance will be removed from the process and no change will happen on the process instance level until the process is republished. - -For example, if you delete an ingested event that was mapped to the only end event within a process, the corresponding process instance will still be considered complete until the process is republished. -::: - -## Additional notes - -### Event-based process auto-generation - -Event-based process auto-generation attempts to determine the order of events based on a sample of stored instances. Due to the nature of sampling, it is possible that the generated model may not always appear as you might expect. - -In some cases, it is possible that some sequence flows may be hidden by overlapping elements on the generated model. - -If both an event source and an embedded subprocess contained within that source are included for auto-generation, they will appear in the auto-generated model as independent processes. - -In the case where external events are configured as an event source, it is possible that Optimize will not be able to determine a model containing all external events. In this scenario, -Optimize will auto-generate a model containing only the external events that it could determine the order of. - -In any of the above scenarios, you are able to correct the model to suit your needs using the editor. Like any other event-based process, an auto-generated model can be edited so you can make any necessary corrections after auto-generation is complete. - -### Published event-based processes - -In some scenarios, reports created using event-based processes might not show all the information expected. - -To avoid this, we encourage you to avoid including the following elements when modelling your event-based processes: - -- Inclusive gateways: These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Inclusive Gateway](./img/inclusive_gateway.png) - -- Complex gateways: These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Complex Gateway](./img/complex_gateway.png) - -- Mixed gateway directions: Mixed gateways are gateways which have no clear direction, instead being a combination of opening and closing gateways. These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Mixed Direction Gateway](./img/mixed_direction_gateway.png) - -- Chained gateways: A chained gateway is one that occurs as part of a sequence of consecutive gateways. These may be modeled in an event-based process diagram. However, visual data flow will be interrupted on reports such as heatmaps. - -![Chained Gateway](./img/chained_gateway.png) - -### Event counts - -Event counts in the table may not match the values you expected. There are three possible explanations for this: - -- If you have enabled history cleanup, the counts will still include events from process instances that have since been cleaned up. -- For events from Camunda processes, the count value represents the number of times that event has occurred across all versions and tenants of that process, regardless of how the event source is configured. -- The counts for external events will still include ingested events that have since been deleted using the [event inspection feature](#deleting-ingested-events). diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/export-import.md b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/export-import.md deleted file mode 100644 index b8c5c9c53a8..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/export-import.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -id: export-import -title: Export and import -description: Export and import Optimize entities. ---- - -With Optimize 3.3.0, a new exporting/importing feature has been added to Optimize. Currently, this feature is limited to reports and dashboards. In the future, users will be able to export and import collections as well. Currently, only superusers are authorized to export and import Optimize entities. - -## Exporting entities - -Superusers can export entities by accessing the **Export** option in the entity menu. This downloads a JSON file which -includes all relevant information that defines the selected entity. This file can later be used to import the exported entity into -a different Optimize installation. - -![Exporting a Process Report](./img/export.png) - -## Importing entities - -### Prerequisites - -Exported entities can be imported both as private entities and into a selected collection, provided the following prerequisites are met: - -- All definitions the entity requires exist in the target Optimize. -- When importing into a collection, the collection data sources must include all relevant definitions for the entity. -- The importing user is authorized to access the relevant definitions. -- The entity data structures match. To ensure matching data structures, confirm the Optimize version of the source is the same as the version of the target Optimize. - -If any of the above conditions are not met, the import will fail. Optimize will display an error message explaining why the import was not successful to enable you to fix the issue and retry. - -### Importing private entities - -To import an entity as a private entity, use the **Import JSON** option from the **Create New** menu on the welcome page. The entity will appear in the entity list once the import is finished and can be interacted with as usual. - -![Importing a private entity](./img/private-import.png) - -### Importing entities into a collection - -To add the entity to an existing collection, use the same **Import JSON** option from the **Create New** menu from within the selected collection. This will import the entity into the collection. Any user that has access to this collection can now also access the imported entity. - -:::note -The collection must have all data sources required by the imported entity or the import will fail. -::: - -![Importing an entity into a Collection](./img/collection-import.png) diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/filters.md b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/filters.md deleted file mode 100644 index 6abc3147534..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/filters.md +++ /dev/null @@ -1,293 +0,0 @@ ---- -id: filters -title: Filters -description: Define filters in Optimize to narrow down your view to only a subset of process instances. ---- - -Locating flaws in your process models can be a huge challenge when you have millions of process instances to sift through. Define filters in Optimize to narrow down your view to only a subset of process instances. - -Camunda Optimize offers various ways of filtering your data, such as filter by: - -- [Filter behavior](#filter-behavior) -- [Instance state filters](#instance-state-filters) - - [Running completed instances only filter](#running-completed-instances-only-filter) - - [Canceled instances only filter](#canceled-instances-only-filter) - - [Non canceled instances only filter](#non-canceled-instances-only-filter) - - [Suspended and non suspended instances only filter](#suspended-and-non-suspended-instances-only-filter) -- [Flow node status filter](#flow-node-status-filter) -- [Date filters](#date-filters) - - [Process instance date filter](#process-instance-date-filter) - - [Flow node date filter](#flow-node-date-filter) -- [Duration filters](#duration-filters) - - [Process instance duration filter](#process-instance-duration-filter) - - [Flow node duration filter](#flow-node-duration-filter) -- [Flow node filter](#flow-node-filter) -- [Flow node selection](#flow-node-selection) -- [Variable filter](#variable-filter) - - [Boolean variables](#boolean-variables) - - [String variables](#string-variables) - - [Numeric variables](#numeric-variables) - - [Date variables](#date-variables) - - [List variable filters](#list-variable-filters) - - [Combine multiple variables filters with OR logic](#combine-multiple-variables-filters-with-or-logic) -- [Assignee and candidate group filters](#assignee-and-candidate-group-filters) -- [Incident filter](#incident-filter) -- [Combined filters](#combined-filters) - -## Filter behavior - -There are two ways to filter data in Optimize: - -1. Instance filters: All filters can be used to filter instances in single reports and during branch analysis. -2. Flow node data filters: These filters can be used if you not only want to filter instances, but you additionally need to filter the content of instances (for example, flow nodes). Since not all filters can be applied on flow nodes, only compatible ones can be used as a flow node data filter. Flow node filters also exclude all instances from the result which do not contain at least one flow node that matches the filter. - -To summarize, instance filters remove rows, while flow node data filters remove columns. - -Additionally, if the report contains multiple processes, filters need to specify which definition they apply to. Some filters can apply to multiple definitions at once, while other filters are specific to a certain process definition. For example, because they rely on the flow nodes present in the definition. - -## Instance state filters - -### Running completed instances only filter - -By default, a report considers all process instances, regardless of whether they are still running. This behavior can be adjusted with the **Running Instances Only** and **Completed Instances Only** filters. Be aware that setting one of those filters (e.g. **Running Instances Only**) while the other one is already set (e.g. **Completed Instances Only**), will show a warning message since these two filters are incompatible with each other and will not show any data. - -### Canceled instances only filter - -If the **Canceled Instances Only Filter** is applied, the report will only consider those instances which were terminated before completion, either -internally or externally. Be aware that adding this filter along with the **Running Instances Only** will show a warning message since these filters are incompatible and will not show any data. - -### Non canceled instances only filter - -As opposed to the **Canceled Instances Only Filter**, applying the **Non Canceled Instances Only** filter will make Optimize query only those instances which were _not_ canceled during -their execution. This means only active and completed instances are considered. Externally or internally terminated instances are not included in the report. - -### Suspended and non suspended instances only filter - -By default, a report considers all process instances, regardless of whether they are suspended or not. Adding this filter makes it possible to only evaluate process instances that are in the suspension state. Note that if you have enabled history cleanup, this might affect the accuracy of this filer given the suspension state is imported from historic data. - -## Flow node status filter - -Some flow nodes can take a relatively long time to complete (e.g. user tasks or long-running service tasks). By default, a report includes all flow nodes in the calculations, whether they are currently running, canceled, or already completed. You can change this behavior by adding a flow node status filter as a [flow node data filter](#filter-behavior). - -Adding one of the flow node status options will filter both instances and flow nodes according to the selected status: - -- For instance reports: The filter will only include instances that have at least one flow node matching the filter criteria. This behavior can be seen if you are in variable, incident, or raw data reports. -- For flow node reports: Flow nodes that do not match the filter criteria will be excluded from the results. - -This behavior can be seen if you are in flow nodes or user task reports. - -Here are the possible options for this filter: - -- Running flow nodes only: Your report will only collect information from flow nodes that are currently running. -- Completed flow nodes only: Considers only successfully completed flow nodes. -- Canceled flow nodes only: Considers only canceled flow nodes. -- Completed or canceled flow nodes only: Considers all completed flow nodes regardless of whether they were canceled or not. - -:::note -For incident reports, flow node status filters always behave as instance filters and do not filter flow nodes. -::: - -## Date filters - -In Optimize, there are two kinds of date filters: the start and the end date filter. Each of these filters can be applied on [process instance](#process-instance-date-filter) and on [flow node](#flow-node-date-filter) dates. - -There are multiple ways in which you can define your date filters: - -- Set the filter to a current amount of time. For example, today, this week, this month, etc. In such cases, the filter does not remain static, but moves with time to deliver a subset of the data according to the selected time interval. - -:::note -Within date filters, weeks begin on Monday, not Sunday. This is not configurable in Optimize. -::: - -- Set it to a previous amount of time. For example, yesterday, last week, last month, etc. This filter also moves with time and is automatically adjusted to cover completed periods of time. - -Take the following example: Today is Wednesday, March 11. If you set a process instance start date filter to `Last... + week`, you get all process instances that were started from Monday, March 2 to Sunday, March 8. A week passes, and we now have Wednesday, March 18. Applying the same filter now filters the process instances which were started from Monday, March 9 to Sunday, March 15. - -- To cover previous time periods up the current moment of time, you can use the 'Rolling' option. - -Take the following example: today is March 28. If you set a process instance start date filter to the last three days, you get all process instances that were started from March 26 to March 28. A day passes, and we now have March 29. Applying the same filter now filters the process instances which were started from March 27 to March 29. - -- If you do not want the filter to be completely dynamic, you can also select `Between`, `Before`, or `After`. -- The `Between` option only considers process instances started or ended within a fixed date range (e.g. filter all process instances between 2018-01-01 and 2018-01-26). This range is fixed and does not change. -- In the same way, you can select `After` or `Before` options to only consider process instances that started or ended after/before a fixed date. - -The start and the end date filters are independent and can be applied to a report simultaneously. However, be aware that each of these filters can only exist once. If, for example, you define a new start date filter when another one already exists, the second one will replace the first one. - -### Process instance date filter - -Applying a process instance start or end date filter will result in a report considering only process instances that started or ended within the defined range of dates. - -:::note -Reports with a process instance end date filter applied will only consider completed process instances. -::: - -As an alternative way to create a process instance start date filter, you can directly select the desired filter interval in the chart itself if your report is visualized as bar or line chart. - -### Flow node date filter - -Similar to process instance date filters, flow node date filters allow you to filter the report based on flow node start or end dates. - -:::note -Reports with a flow node end date filter will only consider data from completed flow nodes. -::: - -This filter type can be applied either as a [process instance](#filter-behavior) or as a [flow node](#filter-behavior) filter: - -- When applied as a process instance filter, you are required to select the flow nodes that are to be relevant to the filter, yielding a report which will only consider those process instances where one or more of the selected flow nodes match the configured filter. - -![Flow Node date filter](./img/flowNode-date-filter.png) - -- When added as a flow node filter, there is no flow node selection. The resulting report automatically only includes data from those flow nodes which match the given filter. - -## Duration filters - -### Process instance duration filter - -The **Process Instance Duration Filter** allows you to only regard process instances whose execution from start to end took a certain amount of time. For instance, you can filter process instances that took more than three days or less than five seconds. - -:::note -This filter shows only completed process instances, since the total duration of running process instances is not yet known. -::: - -![Process instance duration filter in Camunda Optimize](./img/duration-filter.png) - -### Flow node duration filter - -If the **Flow Node Duration Filter** is applied as an instance filter, it will only regard process instances where one or more flow nodes took a certain amount of time for their execution. For instance, you can filter process instances where a flow node took more than three days or less than five seconds. - -If applied as a flow node filter, it will filter flow nodes and only show the flow nodes that were selected in the filter. - -![Flow Node duration filter in Camunda Optimize](./img/flowNode-duration-filter.png) - -:::note -For incident reports, flow node duration filters always behave as instance filters regardless of where they were defined. -::: - -## Flow node filter - -Retrieve only those process instances that executed certain flow nodes within your process by using the `Flow Node Filter`. Selecting several values at once means that all the selected flow nodes need to have been executed at least once in the process instance lifetime. At the top of the flow node filter modal you can see a preview of the filter you are about to create. You can also filter process instances where certain flow nodes were not executed. - -![Flow node filter in Camunda Optimize](./img/flownode-filter.png) - -## Flow node selection - -In flow node and user tasks reports, all flow nodes are included in the result by default. This could result in many table rows or chart entries which makes the visualization hard to read. This filter allows you to specify which flow nodes are included and deselect the ones that are not relevant to the report. - -![Specifying which nodes are included in the report](./img/flowNodeSelection.png) - -## Variable filter - -Use the `Variable Filter` to retrieve only those process instances which hold the specified variable value for the selected variable. - -:::note -Variable filters can only filter for the final value of the variable. - -For instance, assume you want to analyze only those process instances which have the variable `department` with the value `marketing`. Say you also have some instances where this variable had the value `marketing` at the start of the execution, yet this was later reassigned to the value `sales`. These instances will not be included in the filter. -::: - -To use complex types like object, use the **Variable Import Customization** feature to transform your object variables into primitive type variables. - -Start creating a variable filter by searching for and selecting a variable from the suggested list of variable names. - -![Searching through the variables in variable filter](./img/variable-filter.png) - -There are four types of variables that you can filter for: - -### Boolean variables - -They can have the state `true`, `false`, `null`, or `undefined`. - -### String variables - -Two types of operators are available for variables of type `String`. You can either filter by an exact variable value (`is` and `is not`) or filter by a substring (`contains` and `does not contain`). - -For the operators `is` and `is not`, the first 10 values are loaded and displayed. If the variable has more than 10 values, a `Load More` button is shown to be able to extend the list as much as you need. You can also search through the whole list of values using the search input field. The list only contains variable values that already appeared in one of the instances of the process. - -To filter by a variable value that is not in the list of available values, click the **+ Value** button and add a custom variable value. - -In case the `is` option of the toggle button is selected, checking one or more values means that you want to see only those process instances where the variable value equals one of the checked values (this corresponds to the `or` operator in boolean logic.) - -In case the `is not` option of the toggle button is selected, checking one or more values means that you want to see only those process instances where the variable value does not equal any of the checked values (this corresponds to the `and` operator in the boolean logic.) - -For the operators `contains` and `does not contain`, you can add one or multiple values that should match a substring of the variable value. For the `contains` operator, adding one or more values means that you want to see only those process instances where the variable value contains one of the entered values (this corresponds to the `or` operator in boolean logic). - -In case the `does not contain` operator is selected, adding one or more values means that you want to see only those process instances where the variable value does not contain any of the entered values (this corresponds to the `and` operator in boolean logic.) - -There is an option to include the null or undefined values of the selected variable in the result. By using the same option, it is also possible to show all the values except the null or undefined by selecting the `is not` option of the toggle button. - -### Numeric variables - -Here you have an input field to define whether the variable value in the process instance should be equal, not equal, less than, or greater than a certain value. You can even add more input fields and apply the same operation several times at once. - -If the `is` option of the toggle button is selected, adding one or more values means that you want to see only those process instances where the variable value equals one of the checked values (this corresponds to the `or` operator in boolean logic.) - -If the `is not` option of the toggle button is selected, adding one or more values means that you want to see only those process instances where the variable value does not equal any of the checked values (this corresponds to the `and` operator in boolean logic.) - -In case the `is less than` or `is greater than` option is selected, only one value can be entered. - -Null or undefined options can be included or excluded from the results in a way similar to string variables. - -### Date variables - -This filters all instances where the selected date variable has a value within a specified date range. All the options that are available to configure [date filters](#date-filters) are also available for date variables. - -Similar to the other variables, there are two input switches that allow you to exclude or include process instances where a particular date variable is either `null` or `undefined`. - -### List variable filters - -To filter based on the value of a [list variable](../../../self-managed/optimize-deployment/setup/object-variables.md#list-variables), the applied filter will depend on the primitive type of items within the list. For example, you will be creating a numeric variable filter for a variable which is a list of numbers, a string variable filter for a list of strings, and so on. It is important to note here that filters are applied on each individual item within the list variable and not the list itself. - -For example, an "is" filter on a list of string values filters for those instances where any individual list item is equal to the given term. For example, instances whose list variable "contains" the selected value. - -Similarly, the "contains" filter matches process instances whose list variable contains at least one value which in turn contains the given substring. - -### Combine multiple variables filters with OR logic - -Additionally, to use variable filters individually, there is also the option of combining all the previously mentioned variable filters with OR logic. This means that variables which fulfill the condition specified in at least one filter will be displayed. - -## Assignee and candidate group filters - -These filters allow you to include or exclude instances based on the assignee or the candidate group of at least one user task of a particular process instance. - -![Assignee/Candidate group filter modal](./img/assignee-filter.png) - -As shown in the example, it is possible to select one or more assignees or even filter for unassigned instances. - -This filter has different behavior depending on where it was [defined](#filter-behavior): - -- As a `Flow Node data filter` applied on a user task report: This filter only includes user task instances that satisfy _all_ assignee/candidateGroup filters defined in the report at once. Mutually exclusive filters like having both an inclusive and an exclusive filter on the same assignee do not yield any results in user task reports. - -- As an `instance filter`: This filter includes all process instances where _at least one_ user task satisfies one particular assignee/candidateGroup criterion. This means multiple mutually exclusive assignee/candidateGroup filter entries might still yield results for these reports (e.g. if the process definition contains multiple user tasks). - -## Incident filter - -This filter has a different behavior depending on where it was [defined](#filter-behavior): - -- As an `instance filter`: This filter will retrieve only those process instances that contain open, resolved, or no incidents (depending on your selection). Here are some examples where this filter can be useful: - - - Creating reports that contain no incidents since the instances that have incidents have very long durations and are influencing your data. - - - To monitor all the instances from multiple engines that have open incidents. - - On the other hand, this filter is not useful for counting the number of incidents because instances with an open or resolved instance filter might still contain instances from the other type. - -- As a `Flow Node data filter`: This filter will additionally filter the instance incident states to only include incidents of the same type (open or resolved). As an example, This filter can be used to count the number of open or resolved incidents since it considers the incidents of that type exclusively. This filter is currently only useful if you are in an incident view report. - -:::note -The incident filter does not currently filter flow nodes regardless of where it was defined. -::: - -## Combined filters - -All the previously mentioned filters can be combined. Only those process instances which match all the configured filters -are considered in the report or analysis. The [duration filter](#duration-filters), [flow node filter](#flow-node-filter), and [variable filter](#variable-filter) can be defined several times. See the following screenshot for a possible combination of filters: - -![Combined filter in Camunda Optimize](./img/combined-filter.png) - -Everyone who has access to the report can add their own filters. For example, by creating a dashboard that contains that report and using dashboard filters. Note that filters can apply to all processes or a subset of processes. - -Filters added in such a way are always combined with the filters set in the report edit mode. That way, users can reduce the set of process instances that are considered when evaluating the report, but not increase the number of instances evaluated above the set the report author specified. - -In essence, if two copies of the same process are present, Optimize combines them with OR logic, and their filters or variables can be combined with the same logic. Therefore, it's possible to compare two differently filtered slices of the same process on the same report (with the group by process feature) or combine them (without group by process). - -Users can get access to a report via the sharing functionality or if the report is in a shared collection. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/footer.md b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/footer.md deleted file mode 100644 index f29afdf5ea5..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/footer.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: footer -title: Footer -description: Take a closer look at engine connection status, Elasticsearch connection, import progress, and more. ---- - -## Footer - -In the **Footer** section of Optimize, you can see some important information, such as: - -- Engine connections status: For each engine that is connected to Optimize, the connection status is shown. This enables you to be aware of any engine connection problems Optimize may be experiencing. -- Status of the connection to Elasticsearch -- Import progress: Indicates if Optimize is currently importing data from the engine. Analysis can only be performed in Optimize on data that has already been imported. -- Timezone: The timezone used to display all date and time information -- Optimize version - -![footer overview](./img/footer-overview.png) - -There are three possible states of engine connection: - -- Connected and import finished (green circle) -- Connected and import is not completed (spinner) -- Not connected (red circle) - -Elasticsearch can be either connected or not (green and red circles respectively). - -![footer engine connections](./img/footer-engine-connections.png) diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alert-modal-description.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alert-modal-description.png deleted file mode 100644 index b98abd565e6..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alert-modal-description.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alert-notifications-graph.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alert-notifications-graph.png deleted file mode 100644 index 9ddf9464e98..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alert-notifications-graph.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alerts-overview.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alerts-overview.png deleted file mode 100644 index c48966f6451..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/alerts-overview.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/assignee-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/assignee-filter.png deleted file mode 100644 index bbffc0dc0ff..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/assignee-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/auto-generation.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/auto-generation.png deleted file mode 100644 index 0f494b7c5c2..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/auto-generation.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/chained_gateway.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/chained_gateway.png deleted file mode 100644 index 769571c2eaf..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/chained_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/collection-import.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/collection-import.png deleted file mode 100644 index 4e8fd633db9..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/collection-import.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/combined-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/combined-filter.png deleted file mode 100644 index 3f89e1f8024..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/combined-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/complex_gateway.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/complex_gateway.png deleted file mode 100644 index 5b89d866384..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/complex_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/deleting-events.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/deleting-events.png deleted file mode 100644 index 5485aec7846..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/deleting-events.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/duration-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/duration-filter.png deleted file mode 100644 index 314cc6cb440..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/duration-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editAccess.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editAccess.png deleted file mode 100644 index f1da83045a7..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editAccess.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editModal.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editModal.png deleted file mode 100644 index a503ade438c..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editMode.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editMode.png deleted file mode 100644 index e78092a3a07..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/editMode.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/eventsTable.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/eventsTable.png deleted file mode 100644 index 14dead16e3b..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/eventsTable.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/export.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/export.png deleted file mode 100644 index c558b83390a..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/export.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/external-events.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/external-events.png deleted file mode 100644 index 8c1fb25e03d..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/external-events.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/externalEvents.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/externalEvents.png deleted file mode 100644 index 991e989396f..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/externalEvents.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/fixed-start-date-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/fixed-start-date-filter.png deleted file mode 100644 index f2c6b62bd5d..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/fixed-start-date-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNode-date-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNode-date-filter.png deleted file mode 100644 index 07bb077c4da..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNode-date-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNode-duration-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNode-duration-filter.png deleted file mode 100644 index ef99a06bbab..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNode-duration-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNodeSelection.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNodeSelection.png deleted file mode 100644 index 0e43ac375ff..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flowNodeSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flownode-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flownode-filter.png deleted file mode 100644 index 134b3737d7f..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/flownode-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/footer-engine-connections.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/footer-engine-connections.png deleted file mode 100644 index bd3ec59fe34..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/footer-engine-connections.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/footer-overview.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/footer-overview.png deleted file mode 100644 index 801fa4a52df..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/footer-overview.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/inclusive_gateway.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/inclusive_gateway.png deleted file mode 100644 index cc38685dc4f..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/inclusive_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/mixed_direction_gateway.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/mixed_direction_gateway.png deleted file mode 100644 index 3285e113a79..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/mixed_direction_gateway.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/private-import.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/private-import.png deleted file mode 100644 index e1c47022bf4..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/private-import.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/processList.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/processList.png deleted file mode 100644 index 1d1826a280b..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/processList.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/processView.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/processView.png deleted file mode 100644 index 385f391003e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/processView.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/publishModal.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/publishModal.png deleted file mode 100644 index c7802339d7e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/publishModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/relative-start-date-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/relative-start-date-filter.png deleted file mode 100644 index 60551f00eda..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/relative-start-date-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/report-with-filterlist-open.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/report-with-filterlist-open.png deleted file mode 100644 index 017daeebf99..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/report-with-filterlist-open.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/report-with-flownode-filterlist-open.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/report-with-flownode-filterlist-open.png deleted file mode 100644 index 5a5273d4919..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/report-with-flownode-filterlist-open.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/sourceModal.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/sourceModal.png deleted file mode 100644 index c773d5d973a..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/sourceModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/usersModal.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/usersModal.png deleted file mode 100644 index 187b9cee13c..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/usersModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-boolean.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-boolean.png deleted file mode 100644 index 34386854e84..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-boolean.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-date.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-date.png deleted file mode 100644 index 5ea830d1dcc..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-date.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-numeric.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-numeric.png deleted file mode 100644 index 03d24e962e7..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-numeric.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-string.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-string.png deleted file mode 100644 index 53ed3907191..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter-string.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter.png deleted file mode 100644 index c67e74c66c7..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-filter.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-labeling-panel.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-labeling-panel.png deleted file mode 100644 index abe0c6f9555..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/variable-labeling-panel.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/zoom-in.png b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/zoom-in.png deleted file mode 100644 index caa1dc51064..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/img/zoom-in.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/variable-labeling.md b/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/variable-labeling.md deleted file mode 100644 index 90a367f8fe9..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/additional-features/variable-labeling.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: variable-labeling -title: Variable labeling -description: Add a label for a variable in a process definition. ---- - -The variable labeling functionality allows users to add, update, and delete batches of variable labels so your data is more understandable by business users. This allows Optimize to display a variable's label instead of its original name anywhere the given process definition is being used. Some examples of that would be -when viewing and configuring reports, dashboards, or event-based processes. - -To use this feature, navigate to the definition edit window from inside a report and click **Edit Variables** to access the label edit panel. You will then see the following panel: - -![Label Edit panel](./img/variable-labeling-panel.png) - -Delete a label by inputting an empty field for its value. - -## Limitations - -:::note -This feature is currently not supported in outlier analysis and csv export. This means that during outlier analysis, the original name of a variable is displayed. -::: - -Keep in mind that when applying variable filters in multi-definition reports and multi-definition dashboards, the filters are applied to all variables across definitions which have the same name and type. This happens even in the case that the variables are labeled differently across definitions. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/collections-dashboards-reports.md b/optimize_versioned_docs/version-3.7.0/components/userguide/collections-dashboards-reports.md deleted file mode 100644 index 8152e9e67df..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/collections-dashboards-reports.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: collections-dashboards-reports -title: Collections, dashboards, and reports -description: "Let's take a closer look at the structure of Optimize and its permissions." ---- - -Within Optimize, **reports** are based on a _single_ visualization, similar to a single chart or graph. **Dashboards** are aggregations of these visualizations, similar to a full spreadsheet of data collections, or a combination of several comparative charts and graphs. **Collections** are groups of these data sets, similar to project folders for organizational purposes where we can nest a series of dashboards and/or reports within. - -The Optimize landing page shows a list of all existing dashboards and reports you've created, as well as collections you have access to. Click on a collection to view its accompanying reports and dashboards. - -![home page](./img/home.png) - -In addition to the name of the dashboard, report, or collection, you can also see the date it was last modified. Alongside collections, dashboards, and combined reports, you can also see how many entities are contained within (e.g. how many reports are on a dashboard). You can also see how many users and groups have access to a collection. - -Clicking on a report, dashboard, or collection takes you to its corresponding details page. When moving the mouse over one of these entities, you can access a context menu that allows you to edit, copy, or delete the entity. Multiple entities can be selected and deleted at once using the bulk menu which appears after selecting at least one entity. When copying an entity, you also have the option to move that copy into a collection. - -![copy sales dashboard](./img/copy.png) - -To find a collection, report, or dashboard, use the search field on the top of the page to filter the list by the name of the entity. - -To create a dashboard or report, use the **Create New** button available in the top right corner of the page. - -## User permissions - -:::note -Adding user groups to collections is currently only available in Camunda 7. -::: - -By default, if you create a collection, only you can access the collection and the contents within. To share a collection with other users, add them to the collection. - -![users and user groups](./img/users.png) - -You are automatically assigned the manager role when creating a new collection. There can be multiple managers for a collection. However, there must be at least one manager for every collection. Managers can do the following: - -- Add, edit, and remove dashboards and reports to the collection. -- Edit the collection name and delete the collection using the context menu in the header. -- Add, edit, and remove other users and user groups to collections via the collection's **Users** tab. - -A manager can add a new user or group to the collection using the **Add** button. Use the ID of the user/group to add them. Every user/group has a role assigned to them that specifies their access rights to the collection. - -![add user or user group](./img/addUser.png) - -An editor may edit, delete, and create new dashboards or reports in the collection. Editors may not edit the name of the collection, delete the collection, or change anything in the **Users** tab. - -Those with read-only access to the collection may only view the components contained within, as well as copy them. Viewers cannot create, edit, or delete components in a collection. They are also not allowed to rename or delete the collection itself, or change anything in the **Users** tab. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/combined-reports.md b/optimize_versioned_docs/version-3.7.0/components/userguide/combined-reports.md deleted file mode 100644 index 97985777573..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/combined-reports.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: combined-reports -title: Combined reports -description: Occasionally, it is necessary to compare multiple reports or visualize them together in one diagram. ---- - -Camunda Platform 7 only - -## Creating a combined report - -Occasionally, it is necessary to compare multiple reports or visualize them together in one diagram. This can be achieved by creating a special type of report called a **combined report**. To create a new combined report, visit the homepage and click **Create New > New Report > Combined Process Report**. - -![Creating a Combined report](./img/combined-report-create.png) - -Then, you are redirected to the combined report builder. There, view the selection panel on the right to select multiple reports to combine. - -:::note -If the combined report is inside a collection, only reports in the same collection can be combined. If the combined report is not in a collection, it can only combine reports that are also not in a collection. -::: - -A preview of the selected reports will be displayed in the panel on the left. - -![combined report builder](./img/combined-report.png) - -For example, combining two reports with a table visualization results in the following view: - -![Combining two reports with a table visualization](./img/table-report.png) - -And combining two reports with line chart visualization results in the following view: - -![Combining two reports with line chart visualization](./img/area-chart-report.png) - -You can change the color of chart reports by clicking on the color box near the name of the report. - -You can also drag items in the list of selected reports to change their order in the report view. - -:::note -Not all reports can be combined with each other given differences in their configurations, e.g. a different visualization, may make them incompatible. Therefore, when selecting a report, only the other reports that are combinable with the selected one will appear. -::: - -Only reports that match the following criteria can be combined: - -- Same group by -- Same visualization -- Same view but combining user task duration (work, idle, and total). Flow node duration reports are also possible. -- Distributed reports cannot be combined -- Multi-measure reports including reports containing multiple aggregations or multiple user task duration times cannot be combined. -- Process definition can be different. -- Furthermore, it is possible to combine reports grouped by start date with reports grouped by end date under the condition that the date interval is the same. - -Moreover, only the following visualizations are possible to combine and will show up in the combined selection list: - -- Bar chart -- Line chart -- Table -- Number - -Currently, it is not possible to combine decision reports. - -It is also possible to update the name of the report, save it, and add it to a dashboard exactly like the normal report. The combined reports will also show up in the reports list along with the normal reports. - -### Configure combined reports - -You can configure the combined report using the cog wheel button available on the top right side of the screen. - -For example, in all chart reports, you can change what to show in the tooltips, change the axis names, and set a goal line as shown in the figure below. - -![Configurations available for combined reports](./img/combined-config.png) diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/creating-dashboards.md b/optimize_versioned_docs/version-3.7.0/components/userguide/creating-dashboards.md deleted file mode 100644 index 57f37beccb1..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/creating-dashboards.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -id: creating-dashboards -title: Creating dashboards -description: "Define reports in Camunda Optimize according to Key Performance Indicators relevant to your business objectives." ---- - -## Overview - -Often, it is desired to get a quick overview of the business performance by monitoring the underlying processes. To achieve that, you can define reports in Camunda Optimize according to KPIs (Key Performance Indicators) relevant to your business objectives. A dashboard shows multiple reports, so you can get an up-to-date view of the productivity of your system. - -The dashboard consists of the **edit mode** and **view mode**. - -To create a new dashboard, click the **Create New** button on the homepage or collection page and select the **New dashboard** option. This opens a dialog where you can set the dashboard name and select one of multiple dashboard templates. When not creating a blank dashboard, select a process definition. This process definition is used to create new reports for the dashboard. - -Creating a dashboard from a template also creates new reports which are saved as soon as the dashboard is saved. - -![create new dashboard](./img/dashboardTemplate.png) - -## Edit mode - -The edit mode allows you to configure the dashboard and adjust it to your needs, including the following operations: - -- Rename your dashboard -- Add/remove a report -- Save the current state with your applied changes -- Cancel changes you already applied to the dashboard -- Set filters available on the dashboard -- Set a default auto refresh rate to periodically update the dashboard in [view mode](#view-mode) - -![edit mode](./img/dashboard-dashboardEditActions.png) - -Once you have prepared all your reports, you can now start to assemble them into a dashboard. Above the dashboard grid, click **Add a report** to open a modal where you can select one of your defined reports and add it to the dashboard: - -![add a report modal](./img/dashboard-addAReportModal.png) - -:::note -If the dashboard is inside a collection, only reports that are in the same collection can be added. If the dashboard is not in a collection, it can only contain reports that are also not in a collection. -::: - -In the **Add report** modal, click **Add External Source** to enter the URL of an external data source which should be added to the dashboard. This allows you to create dashboards that combine Optimize reports with data from other services. Such external reports are added as iframes to the dashboard. - -To move the report to your desired location, drag it in any direction. As soon as you release the dragged report, it snaps to the closest grid position. Dragging the handle on the lower right corner of each report will resize it. Delete the report from your dashboard by clicking the **x** button on the top right corner of each report. - -![edit actions](./img/dashboard-reportEditActions.png) - -### Adding filters in edit mode - -In the dashboard edit mode, there is an **Add a Filter** button which shows a **Filters** panel. This panel allows you to specify filters which will become available for the dashboard. The following filters are available: - -- Start date: Allows filtering by process instance start date -- End date: Allows filtering by process instance end date -- Instance state: Allows filtering by process instance state, such as running, completed, or canceled -- Variable: Allows filtering by process instance variable value -- Assignee: Allows filtering flow node data by their assignee -- Candidate Group: Allows filtering flow node data by their candidate group - -![filter edits](./img/filter-editMode.png) - -For Variable Filters, specify which variable the filter should apply to. For string and number variables, provide a list of values which should be allowed to be filtered by. - -Additionally, it is possible to allow dashboard users to filter by their own values by checking the **Allow viewer to add filter values** box. In contrast to report filters, adding a value in the modal will not immediately filter by this value, it will only make this value available to filter by in the dashboard. - -For **Assignee** and **Candidate Group** filters, the dashboard editor can specify which assignees and candidate groups are available to filter by. In contrast to report filters, adding an assignee or candidate group to the filter will not immediately filter by this value, it will only make this value available to filter by in the dashboard. Additionally, it is possible to allow dashboard users to filter by their own values by checking the **Allow viewer to add filter values** box. - -The list of variable names, variable values, assignees, and candidate groups is compiled from all reports on the dashboard. - -#### Setting a default dashboard filter - -After specifying available filters in the dashboard edit mode, editors of the dashboard can also set a default filter. A default filter is always applied when a user initially opens the dashboard. Viewers can still remove filter values to see unfiltered reports, but if a user does not perform any steps to change the filter manually, they will see the reports with the defined default filter. - -To set a default filter, dashboard editors can use the added filter options in the filter area. Whatever filter configuration is set there when the dashboard is saved becomes the default filter for the dashboard. - -## View mode - -Once you have defined what your dashboard should look like, the view mode provides you with all the features to monitor you process, such as: - -- Full-screen: Display the dashboard in full-screen and only see the essential information of your dashboard - the reports - and hide the header, control panel, and footer. While in full-screen mode, you can click on the **Toggle Theme** button to switch between the default light theme and a dark theme. - -- Auto-refresh: This feature periodically updates the dashboard with the latest data. You can decide how often the update should be performed by setting a time span reaching from 1 to 60 minutes. An animation indicates when the next update is occurring. If you do not wish to use that feature anymore, you can disable it anytime. - -:::note -The refresh rate will not be saved unless it is selected in the [edit mode](#edit-mode) of the dashboard. -If it was selected in the view mode, the refresh rate will not be saved when refreshing the dashboard page manually or switching to another page in between. -::: - -- Alerts: If the created dashboard exists inside a collection, it is possible to create and manage created alerts for the reports inside the dashboard. - -![process performance overview](./img/dashboard-viewMode-monitorFeatures.png) - -To share the dashboard with other people or embed it in a webpage, use the sharing feature of the dashboard. Click on the **Share** button, which opens up a popover. After turning the **Enable sharing** switch on, a link is generated which you can send to people who do not have access to Camunda Optimize, and thus enable them to see the dashboard. - -If you applied filters on the dashboard, you can include them in the shared version of the dashboard by enabling the **Share with current filters applied** checkbox. If the checkbox is not checked, the shared dashboard will include the default filters if any have been set. - -![sharing](./img/dashboard-sharingPopover.png) - -You can also click the **Embed Link** button to copy a code to paste into your webpage. Everyone that views the webpage can then see the content of the dashboard. The shared versions of the dashboard allow only to view the dashboard itself. There is no possibility to alter it or interact with any other features of Optimize. Revoke the sharing anytime by disabling the share switch. - -To hide the header of the shared dashboard or specific part of it, add the following parameter to the share URL: - -`header : titleOnly / linkOnly / hidden` - -For example, to completely hide the header from the shared dashboard, add `header=hidden` as shown: - -`http://?header=hidden` - -### Interacting with reports - -To see more details about the report on the dashboard, interact with the reports. The kind of interaction always depends on the report itself. - -If the interactions do not suffice to get the desired information, or you want to edit the report, directly access the report by clicking on its title. - -### Adding filters in view mode - -In the dashboard view mode, there is a **Filters** button which opens a panel that shows all filters available for this dashboard. More filters can be made available in the dashboard edit mode. If the dashboard editor checked the **Allow viewer to add filter values** box for assignee, candidate group, or variable filters, dashboard viewers can add their own values to filter by. - -![filters in view mode](./img/filter-viewMode.png) - -Filters apply to all process reports on the dashboard. If a report already has filters set, they will be combined with the dashboard filter. For example, if a report has a filter to only show running instances and a dashboard filter for suspended instances is set, the report will only show instances that are both running and suspended. Dashboard filters are not applied to decision reports or external reports. - -Variable filters are only applied to reports whose process definition includes the variable. Otherwise, the filter is ignored for that report. Other dashboard filters and filters defined directly on the report are still applied. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/creating-reports.md b/optimize_versioned_docs/version-3.7.0/components/userguide/creating-reports.md deleted file mode 100644 index c66a2f476c6..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/creating-reports.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: creating-reports -title: Creating reports -description: "Reports offer the ability to view your data from different angles, and thus capture all aspects that influence your processes, show new trends, or depict your current business state." ---- - -## Overview - -Reports offer the ability to view your data from different angles, and thus capture all aspects that influence your processes, show new trends, or depict your current business state. - -Each report consists of the [edit mode](./process-analysis/report-analysis/edit-mode.md) and [view mode](./process-analysis/report-analysis/view-mode.md) to perform different kinds of actions on it. - -## Creating a single report - -To create a custom report based on a key performance indicator (KPI) you’d like to analyze, and to incorporate this report into a dashboard, follow the steps below: - -1. On the right side of the **Home** page, select **Create New > New Report**. Here we’ll take a look at a single process, though you can also view data from multiple processes. -2. Click the text box under **Select Process** and select the process you’d like to analyze. -3. Select the type of report you’d like to use on the right side of the **Create new Report** box. As with dashboards, Optimize offers preconfigured templates such as heatmaps and tables. We’ll begin with a heatmap. -4. Click **Create Report**. -5. Set up and customize your report. Begin by naming your report in the text box at the top of the page, pre-filled with **New Report**. -6. In the gray text box to the right, confirm your data source, and select what you’d like to review from the process (in this case, we are viewing flow nodes.) You can also group by topics such as duration or start date. -7. If you’d like, filter the process instance or flow nodes. For example, you can filter by duration, only viewing process instances running for more than seven days. -8. Finally, you have the option to view particular sets of data from the instance, like instance count or absolute value, by selecting the gear icon to the left of your data customization. You can also choose how you’d like to visualize your data in the box beneath **Visualization** (i.e. bar chart, pie chart, etc.). Once you’ve made your selections, click **Save**. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/data-sources.md b/optimize_versioned_docs/version-3.7.0/components/userguide/data-sources.md deleted file mode 100644 index 79b31a8b0db..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/data-sources.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: data-sources -title: Data sources ---- - -If you create a collection, you can add data sources that can be used to create reports. To see the existing data sources or add additional ones, go to the **Data Sources** tab of the collection. - -![add source by definition](./img/sourceByDefinition.png) - -Using the **Add** button, a manager can add one or more sources to the collection by selecting the definitions that need to be added. - -![add source by tenant](./img/sourceByTenant.png) - -The added sources will appear in the process/decision selection list inside the report builder where they can be used to create reports. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/decision-filter.md b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/decision-filter.md deleted file mode 100644 index ad8e2c7fde4..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/decision-filter.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: decision-filter -title: Filters -description: Narrow down your view on the decision by creating reports based on a subset of all decision evaluations. ---- - -Camunda Platform 7 only - -Similar to [filters for process analysis](../additional-features/filters.md), you can define filters for your decision reports. - -You can filter by the [evaluation date](#evaluation-date-filter) of the decision, or by [input and output variables](#variable-filter). This screenshot shows how to add a filter to your decision report: - -![Decision Report with open filter list in Camunda Optimize](./img/report-with-filterlist-open.png) - -## Evaluation date filter - -Applying an evaluation date filter will result in the report considering only decision evaluations which occurred within the defined date range. Only one evaluation date filter can be defined for any report. - -Like the [process instance date filters](../additional-features/filters.md#date-filters), you can define a fixed or relative filter. Read the appropriate section in the process filter guide for details about the differences. - -As an alternative way to create an evaluation date filter, you can use your mouse to select the area you want to create the filter for if your report is visualized as a bar or line chart. - -![Zooming into a section of the chart](./img/zoom-in.png) - -## Variable filter - -Using the input or output variable filter retrieves only those decisions where the evaluation had certain variable values as either input or output. For example, assume you want to analyze only those decision evaluations where the output variable **Classification** had the value **budget**. You can achieve this by creating an output variable filter, selecting the **Classification** variable from the input and check the **budget** option. - -Depending on the variable type, different ways to specify the value range are available. Read the [variable filter section](../additional-features/filters.md#variable-filter) in the filter guide to see all possible options. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/decision-report.md b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/decision-report.md deleted file mode 100644 index 6c8cc711bf7..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/decision-report.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: decision-report -title: Single report -description: Explore, discover, and get insights into your decision evaluations. ---- - -Camunda Platform 7 only - -Decision reports are very similar to process reports, but allow you to gain insights in your decision definitions, rather than process definitions. - -To create a decision report, click on the **Decision Report** option using the **Create New** dropdown button available on the homepage. - -![Create a new Decision Report from the Report list page](./img/dmn_report_create.png) - -There are a number of different reports you can create based on decisions: - -## Raw data - -Similar to the raw data process report, this allows you to view a table listing all available decision data. This can come in handy if you found interesting insights in certain decision evaluations and need detailed information about those evaluations, or you are exploring a decision definition with a limited number of evaluations. - -You can reorder the columns and click on any column header to sort the table by this column. Using the configuration dialog, you can also define which columns to show and whether to include the evaluation count number in the report. These settings are only available in the edit mode of the report. - -To create a raw data report, select **Raw Data** from the view dropdown. The other fields are filled automatically. - -![Decision Raw Data Table in Camunda Optimize](./img/dmn_raw_data_report.png) - -## Evaluation count - -This view option allows you to create reports that show how often the decision was evaluated. Depending on the group by selection, this could be either the total number of evaluations, a chart displaying how this number of evaluations developed over time, or how they were distributed across variables or rules. As always, you can define [filters](../additional-features/filters.md) to specify which decision evaluations to include in the report. - -#### Group by: None - -This shows a single number indicating the total amount of evaluations for this decision definition and version in the current filter. Using the configuration dialog, you can limit the precision of the number and define a goal to create a progress bar. Details of both options are described in the [process report configuration section](../process-analysis/report-analysis/edit-mode.md#number). - -![Progress Bar visualization for Decision Evaluation Count](./img/dmn_progress_bar.png) - -#### Group by: Rules - -This report shows the decision table with an additional column to the right. This column contains information on how often each rule matched an evaluation. It also shows a bar indicating how frequently a single rule was matched. You can turn off the numbers or the bar in the configuration dialog. - -![Decision Table with evaluation count information](./img/dmn_decision_table.png) - -#### Group by: Evaluation date - -Using this group by option allows you to see the development of evaluations over time. The result can be visualized as table or chart. In combination with filters, this allows you to create powerful reports. For example, to show during which time period the decision resulted in a certain output variable. If you visualize such a report as a chart, you have access to all the [chart visualization options](../process-analysis/report-analysis/edit-mode.md#charts-line-bar-pie) process reports have, too. - -![Line Chart showing decision evaluations by date](./img/dmn_date_chart.png) - -#### Group by: Input or output variable - -This option allows you to choose a variable from the decision definition to group the results by. In the report, you will see which values this variable had over all evaluations in the filter and how often each value was encountered when evaluating the decision. This type of report can be visualized as table or chart. - -![Pie Chart depicting distribution of output variable values](./img/dmn_pie_chart.png) diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_date_chart.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_date_chart.png deleted file mode 100644 index c0a5645454e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_date_chart.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_decision_table.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_decision_table.png deleted file mode 100644 index d14109fe919..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_decision_table.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_pie_chart.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_pie_chart.png deleted file mode 100644 index bd85020b7f1..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_pie_chart.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_progress_bar.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_progress_bar.png deleted file mode 100644 index a1acb41eb3d..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_progress_bar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_raw_data_report.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_raw_data_report.png deleted file mode 100644 index 6b96a3f7f4d..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_raw_data_report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_report_create.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_report_create.png deleted file mode 100644 index f05df857a19..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/dmn_report_create.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/report-with-filterlist-open.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/report-with-filterlist-open.png deleted file mode 100644 index 9548dcdeab7..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/report-with-filterlist-open.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/zoom-in.png b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/zoom-in.png deleted file mode 100644 index a67069eae51..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/img/zoom-in.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/overview.md b/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/overview.md deleted file mode 100644 index c1f2497bdbc..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/decision-analysis/overview.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: overview -title: Overview -description: Explore, discover and get insights into your decisions that otherwise would be hidden. ---- - -Camunda Platform 7 only - -Decision reports provide you with the ability to view your data from different angles and thus capture all aspects that influence your decisions, show new trends, or depict your current business state. - -You can also define filters which help you narrow down your view to what you are interested in. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/addUser.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/addUser.png deleted file mode 100644 index 0498752b136..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/addUser.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/area-chart-report.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/area-chart-report.png deleted file mode 100644 index 6cb87748328..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/area-chart-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/collection.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/collection.png deleted file mode 100644 index 7b618e709b8..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/collection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-config.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-config.png deleted file mode 100644 index be6a371856a..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-config.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-report-create.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-report-create.png deleted file mode 100644 index 3849a161540..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-report-create.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-report.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-report.png deleted file mode 100644 index 4d6735600f0..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/combined-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/copy.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/copy.png deleted file mode 100644 index 19c338d3c8e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/copy.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-addAReportModal.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-addAReportModal.png deleted file mode 100644 index 64aa3dda057..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-addAReportModal.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-dashboardEditActions.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-dashboardEditActions.png deleted file mode 100644 index ee6786ed79b..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-dashboardEditActions.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-reportEditActions.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-reportEditActions.png deleted file mode 100644 index 35de7e7d489..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-reportEditActions.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-sharingPopover.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-sharingPopover.png deleted file mode 100644 index dc14b7fe776..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-sharingPopover.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-viewMode-monitorFeatures.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-viewMode-monitorFeatures.png deleted file mode 100644 index a6283be869e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboard-viewMode-monitorFeatures.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboardTemplate.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboardTemplate.png deleted file mode 100644 index feae00b34f4..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/dashboardTemplate.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/filter-editMode.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/filter-editMode.png deleted file mode 100644 index e97ebb1242d..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/filter-editMode.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/filter-viewMode.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/filter-viewMode.png deleted file mode 100644 index 9ecfd8c8cce..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/filter-viewMode.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/home.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/home.png deleted file mode 100644 index 68f49378a19..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/home.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/report-reportEditActions.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/report-reportEditActions.png deleted file mode 100644 index 8f0431a47a6..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/report-reportEditActions.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/reportTemplate.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/reportTemplate.png deleted file mode 100644 index 80013d42cf3..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/reportTemplate.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/sourceByDefinition.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/sourceByDefinition.png deleted file mode 100644 index 4d947751b11..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/sourceByDefinition.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/sourceByTenant.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/sourceByTenant.png deleted file mode 100644 index ee4b8f96795..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/sourceByTenant.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/sources.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/sources.png deleted file mode 100644 index 14aee190b45..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/sources.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/table-report.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/table-report.png deleted file mode 100644 index 8d5a5f5c69e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/table-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/img/users.png b/optimize_versioned_docs/version-3.7.0/components/userguide/img/users.png deleted file mode 100644 index cd3784a638c..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/img/users.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/branch-analysis.md b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/branch-analysis.md deleted file mode 100644 index 8bfe088b20c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/branch-analysis.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: branch-analysis -title: Branch analysis -description: Optimize provides you with a statistical analysis for a given end event and a gateway. ---- - -## Overview - -If a process contains more than one end event, it is useful to know which path tokens took to reach a specific end event. Optimize provides you with a statistical analysis for a given end event and a gateway. This analysis includes how tokens were split at the gateway in question, and how many of the tokens of each branch reached the end event. - -![branch analysis](./img/analysis-1.png) - -## Branch analysis in Optimize - -Select a process definition using the **Select Process** option in the top left of the page. After selecting a process definition and version, the diagram of the process is displayed on the page. - -By default, all process instances for the selected process definition are included in the analysis. You can reduce this set of process instances by applying filters. - -To perform a statistical analysis on the selected process, specify a gateway and an end event. Moving your mouse cursor over the end event and gateway inputs at the top of the screen highlights available elements in the diagram. Likewise, mouse over an element to see whether it is an end event or gateway. - -Additionally, if you move your mouse over an end event, you see detailed information about this end event, like how many instances reached this end event. Click on an element to select or deselect it. You can also clear the selection using the **x** button in the control panel on top. Changing the process definition also clears the selection. - -After selecting an end event and gateway, a statistical analysis is shown next to the diagram. The analysis consists of two bar charts and a statistical summary. Both charts contain a bar for every sequence flow leaving the selected gateway. - -![branch analysis second example](./img/analysis-2.png) - -The first chart shows the distribution of process instances over the various sequence flows, showing how often each sequence flow has been executed, independently of whether the process instance then reached the selected end event. - -The second chart displays the relative amount of process instances that reached the selected end event after taking the respective outgoing sequence flow. - -Process instances which have taken more than one path (e.g. by looping back to a flow node before the gateway and then taking a different sequence flow) are not considered in the statistical analysis. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/analysis-1.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/analysis-1.png deleted file mode 100644 index 0132bf75241..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/analysis-1.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/analysis-2.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/analysis-2.png deleted file mode 100644 index 687db1c4edb..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/analysis-2.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_1_heatMap.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_1_heatMap.png deleted file mode 100644 index d2e4102d922..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_1_heatMap.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_2_distribution.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_2_distribution.png deleted file mode 100644 index c5faa30d363..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_2_distribution.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_3_Variables.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_3_Variables.png deleted file mode 100644 index 32272f540a5..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/img/outlierExample_3_Variables.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/outlier-analysis.md b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/outlier-analysis.md deleted file mode 100644 index 7b3e8337047..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/outlier-analysis.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: outlier-analysis -title: Outlier analysis -description: Outlier analysis allows you to easily identify process instances that took significantly longer than others to complete a flow node. ---- - -## Overview - -Outlier analysis allows you to easily identify process instances that took significantly longer than others to complete a flow node, and subsequently slow down your process. - -## Outlier analysis in action - -Select a process definition that you would like to analyze. Once a definition is selected, a **heatmap** is displayed which highlights the flow nodes where Optimize identified many duration outliers. In our example, the **Approve Invoice** activity has duration outliers. When hovering over the task, you can see how many instances were identified and how much longer they took than the average duration. - -![outlier analysis example 1](./img/outlierExample_1_heatMap.png) - -Click on **View Details** to directly see a duration distribution chart for the specific flow node. The duration distribution chart contains information about how long the identified outliers took, also in comparison to the other flow node instance durations. - -![outlier analysis example 2](./img/outlierExample_2_distribution.png) - -## Significant variable values - -When looking at the duration outlier instances, you can analyze the data further to find the root cause of why these instances took so long. Click on the significant variables tab to view a table that lists significant variable values in the outlier instances. - -It also allows you to see how many times this variable value occurred in the outlier instances compared to the rest of the process instances. This can give you a good idea of whether there is a correlation between a variable value and a flow node taking more time than expected. In our example, we can see that for most of our duration outliers, the delay variable was set to `true`. - -![outlier analysis example 3](./img/outlierExample_3_Variables.png) diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/overview.md b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/overview.md deleted file mode 100644 index 027161fe31f..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: overview -title: Overview -description: Further analyze your reports, and learn more about branch and outlier analysis. ---- - -The following documentation provides an opportunity to further analyze your reports through several methods: - -- [Outlier analysis](./outlier-analysis.md): Outlier analysis allows you to easily identify process instances where certain flow node instances took significantly longer than others and subsequently slow down your process. -- [Branch analysis](./branch-analysis.md): If a process contains more than one end event, it is useful to know which path tokens took to reach a specific end event. Optimize provides you with a statistical analysis for a given end event and a gateway. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/edit-mode.md b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/edit-mode.md deleted file mode 100644 index 6e14f0c9a3a..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/edit-mode.md +++ /dev/null @@ -1,385 +0,0 @@ ---- -id: edit-mode -title: Edit mode -description: The edit mode allows you to configure the report and adjust it to your needs. ---- - -## Edit mode - -The edit mode allows you to configure the report and adjust it to your needs. The following operations are possible within edit mode: - -- Rename your report -- Build a report -- Configure your report -- Save the current state with your applied changes -- Cancel changes you already applied to the report - -Building a report is the crux of the report edit mode. The building process itself is composed of several steps, which happen in the control panel. - -### Select process definitions - -Every report relates to one or multiple process definitions, versions, and tenants. You must choose at least one process definition you want to create a report for. - -To add a process definition to the report, click **Add** at the top of the **Data Source** section of the report control panel. This opens a dialog showing all process definitions you can use in the report. You can select up to 10 definitions to add to the report. If there are many process definitions, you can use the input field to search for the definition you are looking for. - -![Process definition selection in the report builder in Camunda Optimize](./img/report-processDefinitionSelection.png) - -For every added process definition, you can set a display name and a specific version or version range. To do so, click the **Edit** button in the process definition card. There are also buttons to remove the definition from the report or add a copy of the definition. - -When editing a process definition, using the version dropdown, you can choose between all versions, the latest version, or a specific set of versions. - -![Process Version selection in the report builder in Camunda Optimize](./img/report-versionSelection.png) - -- **All** option: Every process instance across all versions of the process definition will be considered in your report. -- **Always display latest** option: Makes your report always refer to the latest version. Keep in mind that if a new version of the process is deployed, the report will automatically consider process instances of this new version only. -- **Specific version** option: Specify one or multiple existing versions of the process. - -Data from older versions is mapped to the most recent version in the selection. Therefore, the report data can seem to be inconsistent, which is due to changes that occurred within the diagram through the different versions. For example, the old versions do not contain newly added tasks or a part of the diagram was removed because it was considered to be obsolete. - -![Process definition selection for a multi-tenancy scenario](./img/tenantSelection.png) - -By default, all process instances for the selected process definitions are included in a report. You can reduce this set of process instances by applying a [filter](../../additional-features/filters.md). - -### Define the report - -In this section of the report builder, you are characterizing the output of the report. Basically, you are defining "I want to view ... grouped by ... visualized as ...". To understand better what "View" and "Group by" mean, you can use the analogy of a graph, where "View" is the y-axis and "Group by" is the x-axis. - -First, you need to select which part of the data you want to view. Optimize differentiates between the view (e.g. process instance or flow node) and the measure (e.g. count or duration): - -1. Raw Data: View just a table with the actual data listed as rows. This can come in handy if you found interesting insights in certain process instances and need detailed information about those instances, or you are exploring a process definition with a limited number of instances. This report type also allows you to inspect raw [object variable values](../../../../self-managed/optimize-deployment/setup/object-variables.md). -2. Process instance - -- Count: View how many process instances were executed. -- Duration: View how long the process instances took to complete. - -3. Incident - -- Count: View how many incidents occurred on the process. -- Resolution duration: View how long the incident took to get resolved. - -4. Flow node - -- Count: View how often the flow nodes (e.g. tasks) have been executed. -- Duration: View how long each flow node took to complete. - -5. User task - -- Count: View how often each user task has been executed. -- Duration: View how long each user task took to complete. - -6. Variable: View an aggregation of values for a specific numeric variable of the process definition. - -It is possible to display both count and duration measures for a single view in the same report. - -Subsequently, you need to define how to group the data. Think of it as applying a metric to your input, where you break up the data by date, flow nodes, variable values, or other properties. For that, you have different options: - -- **None**: Do not split up the data. -- **Flow nodes**: Cluster the data by flow nodes. -- **User tasks**: Cluster the data by user tasks. -- **Duration**: Cluster the data by duration. Depending on the selected view, this can be the duration of process instances, flow nodes, or user tasks. -- **Start date**: Group instances together that were started during the same date period or time, e.g. hour, day or month. Depending on the selected view, this can be the start date of process instances, flow nodes, or user tasks. -- **End date**: Group instances together that were finished during the same date period or time, e.g. hour, day or month. Depending on the selected view, this can be the start date of process instances, flow nodes, or user tasks. -- **Running date of the process instance**: Group process instances together that were running during the same date period or time, e.g. hour, day, or month. -- **Variable**: Process instances with the same value for the selected variable are grouped together. -- **Assignee**: Only available for user task views. Tasks are grouped together according to their current assignee. -- **Candidate group**: Only available for user task views. Tasks with the same candidate group are grouped together. -- **Process**: Only available for process instance reports with multiple definitions. Data from the same process is grouped together. - -Finally, define how you want the data to be visualized. Examples are heatmap, table, bar, or line chart. - -Not all the above view, group by, and visualization options can be combined. For instance, if you choose `Flow Node: Count` as view, the data is automatically grouped by flow nodes as no other combination would be valid. - -All possible combinations can also be found in the following table: - -| View | Group by | Visualize as | -| --------------------------------------------------- | --------------------------------------------------------------- | --------------------- | -| Raw Data | None | Table | -| Process instance: Count, Process instance: Duration | None | Number | -| Process instance: Count | Start Date, End Date, Running Date, Variable, Duration, Process | Table, Chart | -| Process instance: Duration | Start Date, End Date, Variable, Process | Table, Chart | -| Incident: Count, Incident Duration | None | Number | -| Incident: Count, Incident Duration | Flow Nodes | Table, Chart, Heatmap | -| Flow Node: Count, Flow Node: Duration | Flow Nodes | Table, Chart, Heatmap | -| Flow Node: Count | Start Date, End Date, Duration, Variable | Table, Chart | -| Flow Node: Duration | Start Date, End Date, Variable | Table, Chart | -| User Task: Count, User Task: Duration | User Tasks | Table, Chart, Heatmap | -| User Task: Count, User Task: Duration | Start Date, End Date, Assignee, Candidate Group | Table, Chart | -| User Task: Count | Duration | Table, Chart | -| Variable | None | Number | - -:::note -You might sometimes see a warning message indicating that the data is limited to a certain number of points. This happens because the available stored data, in this case is very large, and it is not possible to display all the data in the selected visualization. -::: - -### Reports with a second "Group by" option - -Using the second "Group by" option, it is possible to apply another grouping to your data to display extra details such as dates, variable values, or assignees. This option will be shown below the first "Group by" option if the current report combination supports it. Here is an overview of the reports that supports a second "Group by": - -### Flow node reports - -Flow node names can be applied as a second "Group by". If the report contains multiple process definitions, the data can also be grouped by process as a second "Group by". - -### User task reports - -User task names, assignees, and candidate groups can be applied as a second "Group by". - -For example, if your report is grouped by assignee/candidate group, it is possible to add another grouping by user task to see which user task your users/group are working on or have completed in the past. If the report contains multiple process definitions, the data can also be grouped by process as a second "Group by". - -:::note -Reports using assignee/candidate groups are only available in Camunda Platform 7. -::: - -![Distributed User Task report](./img/distributed-report.png) - -Refer to the table below for an overview of all report combinations that support a second "Group by": - -| View | Group by | Second group by | -| ------------------------- | ------------------------- | ---------------------------------------------------------------------------------- | -| User Task Count, Duration | User Tasks | Assignee, Candidate Group, Process (only for multi-definition reports) | -| User Task Count, Duration | Start Date, End Date | Assignee, Candidate Group, User Tasks, Process (only for multi-definition reports) | -| User Task Count, Duration | Assignee, Candidate Group | User Tasks, Process (only for multi-definition reports) | -| User Task Count | Duration | User Tasks, Process (only for multi-definition reports) | - -### Process instance reports - -Refer to the table below for the process instance count and duration reports that support a second "Group by": - -| View | Group by | Second group by | -| -------------------------------- | ---------------------- | ----------------------------------------------------------------- | -| Process Instance Count, Duration | Start Date, End Date | Variable, Process (only for multi-definition reports) | -| Process Instance Count, Duration | Variable | Start Date, End Date, Process (only for multi-definition reports) | -| Process Instance Count | Running Date, Duration | Process (only for multi-definition reports) | - -The diagram below shows a report grouped by `Start Date` and a boolean variable: - -![Distributed process instance report](./img/distributedByVar.png) - -### Duration and variable report aggregation - -For duration and variable views, the default aggregation type is the average. You can add and change different aggregations like minimum, maximum, and median in the report configuration panel. Note that the median is an estimate and the other operations are exact values. - -![Duration Aggregation Selection](./img/durationAggregation.png) - -Reports with multiple aggregations that have a [second "Group by"](#reports-with-a-second-group-by-option) can only be visualized as table. - -### User task duration time - -:::note -The following information regarding idle versus work is currently applicable only to Camunda Platform 7. -::: - -In user task duration reports, you have the opportunity to select which part of the user task's lifecycle you want to see in the report: - -- Idle: View how long each user task was considered idle (not claimed by an assignee/user) during its execution. -- Work: View how long each user task was considered to be worked on by assignees/users (claimed by an assignee/user) during its execution. -- Total: View how long each user task took to complete. - -It is possible to display and compare multiple user task duration times in the same report. Reports with multiple user task duration times that have a [second "Group by"](#reports-with-a-second-group-by-option) can only be visualized as table. - -:::note -User tasks which have been completed yet have no claim operations are evaluated as follows: if the user task was canceled, the task is considered to have been idle whereas user tasks which were completed are considered to have been worked on programmatically or via a custom UI, meaning the time between start and end is considered work time. -::: - -### Target value comparison - -Based on flow node duration heatmaps, Optimize allows you to specify a target value for every activity. For example, if a user task has to be completed within one day, you can set the target value to one day. If the time it takes to complete the user task exceeds this target value, the task is highlighted in the heatmap. - -To set target values and create a target value comparison heatmap, you need to be in the edit mode of a report which has the following configuration: - -| View | Flow node duration/user task duration | -| ------------ | ------------------------------------- | -| Group by | Flow nodes/user tasks | -| Visualize as | Heatmap | - -If your report has this configuration, a target value button is visible. Clicking on the **Target Value** button for the first time opens an overlay containing the process diagram and a table with all flow nodes. You can also see the actual duration value for every flow node. - -To set a target value for a flow node, use the number and unit fields in the last column. If the target value number field for a flow node is empty, this flow node has no target value set (the selected time unit is ignored in that case). - -![Setting Target Values](./img/targetvalue-2.png) - -If you set a target value for a flow node, this target value is represented as a badge on the flow node in the diagram in the upper part of the overlay. You can click on any flow node in the diagram to jump to the target value input field in the table. - -If you have a user task report, you can only select user tasks here, as only those are included in the report result. When selecting a target value input field in the table, the corresponding diagram element is highlighted. To save the target value configuration, click **Apply**. - -After you save the target values, the normal duration heatmap is replaced with a target value visualization. In this new visualization, flow nodes with an average duration larger than the specified target value are highlighted in red. - -If you mouse over one of the nodes, the tooltip shows the following: - -- The target duration value -- The actual duration -- The relative percentage the actual value is of the target value -- A button to download a list of process instance IDs that exceed the target value - -You can also see the target value as a badge on the diagram. - -![Target Value Comparison](./img/targetvalue-1.png) - -After the initial target values for a report are set, you can use the target value button to toggle between the target value and the regular duration view mode. If you want to change target values, use the gear button to open the overlay again. - -As with any change to a report configuration, to persist target values and show them in the report view mode and on dashboards, you need to save the report using the **Save** button in the upper right corner. - -### Process instance parts - -In some cases, you may not be interested in the duration of the whole process instance, but only a certain part of it. For that scenario, there is an additional button called **Process Instance Part** available for every process instance duration view that only shows data for a single process definition. - -Clicking this button opens an overlay letting you select the start and end of the part of the process instance you are interested in. After confirming the selection, the displayed duration refers to the selected part only instead of the whole instance. - -In some cases it can happen that the same activity is executed multiple times in the same process instance, e.g. if the process contains loops or parallel gateways. In such cases, Optimize considers only the part between the start date of the first instance of the start node and the end date of the first instance of the end node. - -![Process Instance Part Modal](./img/process-part.png) - -### Configure a report - -The configuration panel groups all the settings that can be applied to a report in one place. To see the panel, click on the cog button available in the edit mode of any report. Every visualization has different settings that can be configured in the panel. - -When you save changes to the report configuration, they apply to the report view mode and any dashboard this report is displayed on. - -### Number - -Number reports are any reports that are visualized as a single number (e.g. `Process Instance: Count` grouped by `None` or `Process Instance: Duration` Grouped by `None`). - -In number reports, the following configurations are possible: - -### Number precision - -Number precision can be configured from the panel to limit the most significant units to be shown. - -For example, we have a report that calculates the total process instances duration. When the precision limit is not set, you will see all possible units, e.g.: `1y 5m 2wk 5d 3h 16min 3s 170ms`. In case you are only interested in certain units - e.g. months - you can omit all insignificant units by limiting the precision as shown in the figure below: - -![Number report configurations](./img/NumberConfiguration.png) - -### Number goal value (progress bar) - -Number reports appear as progress bar when the goal option is enabled from the panel as shown. The baseline and the target value of the progress bar can be also set using the panel. - -![Progress Bar Visualization](./img/progressbar.png) - -You can toggle between the progress bar and the single number visualization using the same goal line switch. - -A red line indicator appears on the progress bar when its value exceeds the goal value. On the right side of the indicator, the bar turns into a darker color to clearly show the exceeded amount. - -![Progress Bar Visualization](./img/progressbarExceeded.png) - -### Table settings - -In table reports, the following configurations are possible: - -### Show instance count - -Displays the total instance count on the right side of the visualization. If you save the report while this option is enabled, the number will also be shown on any dashboard this report is added to and when the report is shared. - -### Hide, show, and reorder table columns - -The table settings allow you to hide specific columns using the configuration menu as shown in the figure below: - -![raw data configuration](./img/rawdata.png) - -When working with raw data table reports, you can also re-order the table columns using drag-and-drop on the header of the respective column. - -### Sorting by table column - -To sort a table by a specific column, click on the header of that column. Doing that will show a small caret icon in the header of the column indicating which column the table is currently sorted by and the direction of this sorting (ascending or descending) as shown: - -![Sorting a table in Optimize](./img/sorting.png) - -Clicking again on the same column header will reverse the direction of sorting. - -Saving the reports will also preserve the applied sorting. - -The sorting currently works for all table reports except for: - -- Combined table reports -- Reports grouped by integer type variables - -### Absolute and relative values - -When configuring a count report, you have the opportunity to configure which columns are included in the table. You can hide or show the corresponding columns using the switches for absolute and relative value. - -### Custom bucket size for date variables - -When evaluating a report which is grouped by a date variable and displayed as a table, Optimize offers you the option to select your preferred unit specifying the custom result bucket size from the report configuration menu. The available units are year, month, week, day, and automatic. - -The default unit is automatic, which will create evenly spaced result buckets based on the values of the date variable. This configuration option is also available for charts. - -### Custom bucket size and baseline - -When evaluating a report which is grouped by duration or a number variable, Optimize offers you the option to specify your preferred result bucket size as well as a custom baseline in the report configuration menu. The bucket size determines the width of one bucket, and the baseline specifies the start of the first bucket. - -For example, say a report contains the variable values 0.3, 6, and 13, and you set a bucket size of 5. By default, Optimize would now return a bucket for the values 0.3 to 5.3, one for 5.3 to 10.3, and one for 10.3 to 15.3. You may prefer your bucket start and end points to be a round number, in which case you should set your baseline to 0. With a baseline of 0 and bucket size 5, the result buckets now span 0 to 5, 5 to 10, and 10 to 15. - -If these configuration fields are not set, by default Optimize will create evenly spaced result buckets with a range based on the minimum and maximum values of the number variable. - -This configuration option is also available for charts. - -### Charts (line, bar, pie) - -In bar chart and line chart reports, it is possible to select the color of the graph, add names to the x-axis and y-axis, and edit many other settings as shown in the figure below: - -![chart visualization configurations](./img/chartConfiguration.png) - -In charts, you can hide/show absolute and relative values that appear in the tooltips. - -### Show instance count - -Displays the total instance count on the right side of the visualization. If you save the report while this option is enabled, the number will also be shown on any dashboard this report is added to and when the report is shared. - -### Chart goal line - -Optimize allows you to set a goal line in bar chart and line chart visualizations. Using this feature, it is possible to highlight anything above or below a certain value. - -A good use case for such functionality is the following example: - -First, go to the edit mode of a report and choose the following configuration: - -| View | Count frequency of process instance | -| ------------ | ------------------------------------- | -| Group by | Start date of process instance: Month | -| Visualize as | Bar chart | - -Let us say that the number of completed process instances should always be above six. A goal line can be used as follows: - -Set the target value input field to six and select the above button. If the number of process instances is below six, it will be highlighted in red as shown: - -![Bar charts goal line](./img/targetValue.png) - -This feature can be also used with every other bar chart and line chart visualization. Here is another example where the target value is used with line chart visualization: - -![Line chart goal line](./img/targetline.png) - -### Custom bucket size for date variables - -When evaluating a report which is grouped by a date variable and displayed as a chart, Optimize offers you the option to select your preferred unit specifying the custom result bucket size in the report configuration menu. - -The available units are year, month, week, day, and automatic. The default unit is automatic, which will create evenly spaced result buckets based on the values of the date variable. This configuration option is also available for tables. - -### Custom bucket size and baseline - -When evaluating a report which is grouped by duration or a number variable, Optimize offers you the option to specify your preferred result bucket size as well as a custom baseline in the report configuration menu. The bucket size determines the width of one bucket, and the baseline specifies the start of the first bucket. - -For example, say a report contains the variable values 0.3, 6, and 13 and you set a bucket size of 5. By default, Optimize would now return a bucket for the values 0.3 to 5.3, one for 5.3 to 10.3, and one for 10.3 to 15.3. You may prefer your bucket start and end points to be a round number, in which case you should set your baseline to 0. With a baseline of 0 and bucket size 5, the result buckets now span 0 to 5, 5 to 10, and 10 to 15. - -If these configuration fields are not set, Optimize will create evenly spaced result buckets with a range based on the minimum and maximum values of the number variable by default. - -This configuration option is also available for tables. - -### Stacked bar chart - -When evaluating a report which has a second "Group by", Optimize offers you the option to stack the bar chart bars instead of displaying them near each other. Stacking bars would be useful when the focus of the chart is to compare the totals (e.g. flow node count, process instance count, etc.) and one part of the totals (e.g. flow node, variable value, etc.) - -This configuration option is also available for bar/line charts. - -![Stacked bar chart report](./img/stackedBar.png) - -### Bar/line chart - -When evaluating a report which has both count and duration measures, Optimize offers you the option to display one of the measures as bars and the other measure as a line. This would help to differentiate between duration and count values displayed in the visualization. By default, the count measure is displayed as bars and the duration as a line. You can also switch between them by using the configuration option shown. - -![Bar/Line chart report](./img/barLine.png) - -### Heatmaps - -When enabling absolute or relative values switches, all tooltips for all flow nodes stay visible. This is also possible when you have defined target values. If you save the report in this state, the tooltips will also be shown on any dashboard this report is added to. - -![Heatmap tooltips](./img/heatmap.png) - -As for charts and table reports, it is possible to display the total instance count on the right-hand side of the visualization. If you save the report while this option is enabled, the number will also be shown on any dashboard this report is added to and when the report is shared. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/NumberConfiguration.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/NumberConfiguration.png deleted file mode 100644 index e5c0b4bd011..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/NumberConfiguration.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/barLine.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/barLine.png deleted file mode 100644 index 3dff8bdae3b..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/barLine.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/chartConfiguration.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/chartConfiguration.png deleted file mode 100644 index aa3e2fdf734..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/chartConfiguration.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/distributed-report.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/distributed-report.png deleted file mode 100644 index a93d781f0a9..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/distributed-report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/distributedByVar.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/distributedByVar.png deleted file mode 100644 index 4f13f9d2197..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/distributedByVar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/durationAggregation.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/durationAggregation.png deleted file mode 100644 index aacabc0e369..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/durationAggregation.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/heatmap.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/heatmap.png deleted file mode 100644 index dfe612976ad..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/heatmap.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/process-part.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/process-part.png deleted file mode 100644 index 52207324172..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/process-part.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/progressbar.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/progressbar.png deleted file mode 100644 index 323e90e6bde..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/progressbar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/progressbarExceeded.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/progressbarExceeded.png deleted file mode 100644 index 75844710c80..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/progressbarExceeded.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/rawdata.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/rawdata.png deleted file mode 100644 index 60359e56c11..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/rawdata.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-processDefinitionSelection.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-processDefinitionSelection.png deleted file mode 100644 index 161ffe3e1ac..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-processDefinitionSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-sharingPopover.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-sharingPopover.png deleted file mode 100644 index 04f51dce620..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-sharingPopover.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-versionSelection.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-versionSelection.png deleted file mode 100644 index 918805d3fd1..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/report-versionSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/sorting.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/sorting.png deleted file mode 100644 index 5fab0a99f42..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/sorting.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/stackedBar.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/stackedBar.png deleted file mode 100644 index fa826b57083..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/stackedBar.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetValue.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetValue.png deleted file mode 100644 index 8116e8e1ac1..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetValue.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetline.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetline.png deleted file mode 100644 index fe3bab78575..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetline.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetvalue-1.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetvalue-1.png deleted file mode 100644 index 7bdb1630f6e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetvalue-1.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetvalue-2.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetvalue-2.png deleted file mode 100644 index 96bbe786fdd..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/targetvalue-2.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/tenantSelection.png b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/tenantSelection.png deleted file mode 100644 index 804a0fd71b1..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/img/tenantSelection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/overview.md b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/overview.md deleted file mode 100644 index 5cf7cceccf1..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/overview.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: overview -title: Report process analysis -description: After creating a report, utilize process analysis for a closer look at your process instances. ---- - -## Edit mode - -The [edit mode](./edit-mode.md) allows you to configure the report and adjust it to your needs. The following operations are possible within edit mode: - -- Rename your report -- Build a report -- Configure your report -- Save the current state with your applied changes -- Cancel changes you already applied to the report - -## View mode - -Once you have defined what your report should look like, the [view mode](./view-mode.md) gives you a full view of the report visualization. To see more details about the report, you can interact with it, e.g. by moving your mouse over individual data points in diagrams or zooming in or out of heatmaps. The kind of interaction always depends on the report itself. diff --git a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/view-mode.md b/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/view-mode.md deleted file mode 100644 index ad0e109bb75..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/userguide/process-analysis/report-analysis/view-mode.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: view-mode -title: View mode -description: Once you have defined what your report should look like, the view mode gives you a full view of the report visualization. ---- - -## View mode - -Once you have defined what your report should look like, the view mode gives you a full view of the report visualization. To see more details about the report, you can interact with it, e.g. by moving your mouse over individual data points in diagrams or zooming in or out of heatmaps. The kind of interaction always depends on the report itself. - -The view mode also provides you with different kinds of actions, such as: - -![report sharing popover in Camunda Optimize](./img/report-sharingPopover.png) - -- Download CSV: In case you want to download the data of the report, you can click the **Download CSV** button. The downloaded file will include the report information in a table format. - -- Sharing: In case you want to share the report with other people or want to embed it in a webpage, you can use the sharing feature of the report. Just click on the **Share** button, which opens up a popover. After enabling the **Enable sharing** switch, a link is generated which you can send to people who do not have access to Camunda Optimize and thus enable them to see the report. - - You can also use the **Embed Link** button if you wish to insert the report into your webpage. Everyone that views the webpage can then see content of the report. The shared versions of the report allow you to view the report itself only. There is no possibility to alter it or interact with any other features of Optimize. You can revoke the sharing any time by disabling the share switch. - - If you prefer to hide the header of the shared report or specific part of it, you can do that by adding the following parameter to the share URL: - - ``` - header : titleOnly / linkOnly / hidden - ``` - - For example, to completely hide the header from the shared report, you can add `header=hidden` as shown: - - ``` - http://?header=hidden - ``` - -- Alerts: If the created report is inside a collection, you can use the **Alert** dropdown to create and manage Alerts for that report. Since alerts can only be created on reports that have a number visualization, the **Alerts** dropdown will be only be visible for such reports. diff --git a/optimize_versioned_docs/version-3.7.0/components/what-is-optimize.md b/optimize_versioned_docs/version-3.7.0/components/what-is-optimize.md deleted file mode 100644 index 6c275985a3c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/components/what-is-optimize.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: what-is-optimize -title: What is Optimize? -description: "Leverage process data and analyze areas for improvement." ---- - -:::note -New to Optimize? Visit our introductory guide to [Optimize]($docs$/guides/improve-processes-with-optimize/) to get started. -::: - -Camunda Cloud is built to handle three key aspects of process automation: - -- Design -- Automate -- Improve - -A user can design process flows through our [Modeler]($docs$/components/modeler/about-modeler/). In a production scenario, you can deploy through Desktop Modeler, Web Modeler, or programmatically. A user can use [Tasklist]($docs$/components/tasklist/introduction/) to review and complete tasks, and [Operate]($docs$/components/operate/) to view and analyze process instances. - -Beyond these design and automate cornerstones lies an important component to leverage our process data and analyze areas for improvement: Optimize. - -Geared toward business stakeholders, Optimize offers business intelligence tooling for Camunda enterprise customers. By leveraging data collected during process execution, you can access reports, share process intelligence, analyze bottlenecks, and examine areas in business processes for improvement. - -As you run process instances through the server, Optimize makes REST API calls into the Camunda server, takes new historical data generated since the previous call, and stores the data in its own Elasticsearch database. - -As a result, you can analyze reports and dashboards, and reap actionable insights independently of what is happening inside the Camunda server itself (meaning no effects on runtime). - -Review heatmap displays for a closer look at the number of instances that took longer than average, based on duration distribution. You can also visualize a heatmap by counting the number of activity instances, comparing them to the total number of process instances, and obtaining a percentage. - -Unlike standard business intelligence tools, Optimize understands the user’s goals and leads them through continuous process improvement. Optimize is purpose-built to help rapidly identify the constraints of your system. - -In the following sections, we’ll walk through using and analyzing Optimize. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md deleted file mode 100644 index 1b2287f9034..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 2.1-to-2.2 -title: "Update notes (2.1 to 2.2)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 2.2.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -When upgrading Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize -do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: - -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}). -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}). - -To enable this feature for your old data, follow the steps in the [engine data reimport guide]({{< ref "/technical-guide/reimport/_index.md" >}}). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3.md deleted file mode 100644 index 3b55d0d21f7..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: 2.2-to-2.3 -title: "Update notes (2.2 to 2.3)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 2.3.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Broken links - -After the migration, you might encounter some unusual errors in Optimize: - -- Buttons or links are not working when you click on them. -- You get errors in your web browser when you open the Optimize page. - -In this case, clear your browser cache so your browser loads the new Optimize resources. - -### Broken raw data reports - -Apart from caching issues, there is the following list of known data update limitations: - -- Raw data reports with custom column order are broken showing the following error when opened: - - ```javascript - Cannot read property 'indexOf' of undefined - ``` - To resolve this, either delete and recreate those reports or update to 2.4.0 which resolves the issue. - -- Combined reports might cause the reports page to crash with the following error - - ```javascript - Oh no :( - Minified React error #130; visit http://facebook.github.io/react/docs/error-decoder.html?invariant=130&args[]=undefined&args[]= for the full message or use the non-minified dev environment for full errors and additional helpful warnings. - ``` - To resolve this issue, update to 2.4.0 immediately. - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4.md deleted file mode 100644 index 342dd074fde..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: 2.3-to-2.4 -title: "Update notes (2.3 to 2.4)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 2.4.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Changes in the supported environments - -With this Optimize version, the supported versions of Elasticsearch also change. Now, Optimize only connects to versions 6.2.0+. See the [Supported Environments]($docs$/reference/supported-environments/#elasticsearch) sections for details. - -Hence, you need to update Elasticsearch to use the new Optimize version. See the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) on how to do that. Usually, the only thing you need to do is to perform a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html). - -## Known issues - -### Confusing warning during the update - -On executing the update, you may see the following warning a couple of times in the update log output: - -``` -Deprecated big difference between max_gram and min_gram in NGram Tokenizer, expected difference must be less than or equal to: [1] -``` - -You can safely ignore this warning. The update itself amends the relevant index settings so the warning will be resolved. - -## Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5.md deleted file mode 100644 index 6c404a50c96..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 2.4-to-2.5 -title: "Update notes (2.4 to 2.5)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 2.5.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Limitations - -If you intend to make use of the new [Multi-Tenancy-Feature](./../setup/multi-tenancy.md), you need to perform a [full reimport](../../reimport) and may need to amend your existing reports by selecting the tenant you want the report to be based on. - -## Known issues - -### Changes in the plugin system - -There are required changes for plugins implementing `VariableImportAdapter`. -If you use such a plugin, perform the following steps: - -1. In the plugin, update the Optimize plugin dependency to version 2.5. -2. The class `PluginVariableDto` now contains the new field `tenantId`. Depending on your plugin implementation, it might be necessary to include handling this field to not lose it on import. -3. Build the new version of the plugin and replace the old `jar` with the new one. - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6.md deleted file mode 100644 index 2c00a1afa25..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: 2.5-to-2.6 -title: "Update notes (2.5 to 2.6)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 2.6.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## New behavior of Optimize - -With the introduction of the new collection and permission concept, you might find the behavior of Optimize startling and thus the subsequent sections will guide you through the changes. - -### Collection permissions & private reports - -With Optimize 2.6.0, a resource permission system is introduced. This system provides private reports/dashboard entities in the **Home** section as well as the possibility to manage permissions on collection entity level in order to share it with other Optimize users. - -This ultimately means that after the migration to Optimize 2.6.0, each user only sees the entities they originally created. This includes reports, dashboards, and collections. In order for other users to be able to access those entities, they need to be copied into a collection and view access to this new collection must be granted to other users. - -#### Grant access to a private report - -Given the scenario that the user `john` owns a report `John's Report` that user `mary` was used to access in Optimize 2.5.0 the user `john` can share this report in Optimize 2.6.0 with `mary` following these steps: - -1. User `john` creates a collection named e.g. `John's Share`. -![Create a Collection](img/private_report_access_1_create_collection.png) -1. User `john` grants user `mary` the viewer role on the collection `John's Share`. -![Create Permission for Mary](img/private_report_access_2_create_view_permission_mary.png) -1. User `john` copies and moves the `John's Report` report to the `John's Share` collection. -![Copy Report 1](img/private_report_access_3_1_copy_report.png) -![Copy Report 2](img/private_report_access_3_2_copy_report.png) -1. User `mary` will now see the Collection `John's Share` in her **Home** section of Optimize. -![Mary sees shared collection](img/private_report_access_4_mary_sees_collection.png) - -#### Grant access to an existing collection - -Given the scenario that the user `john` owns a collection `John's Collection` that user `mary` was used to access in Optimize 2.5.0, the user `john` can share this collection with `mary` in Optimize 2.6.0, granting user `mary` a permission role on that collection. Refer to **Step 2** in [grant access to a private report](#grant-access-to-a-private-report). - -#### Super User role - -You can now grant users `Super User` permissions, which allows them to bypass the owner/collection permissions, enabling them to access all available entities. This can, for example, be useful if entities are owned by users that are not available anymore. - -To grant Super User permissions, see the [Authentication & Security Section](./../setup/configuration.md#security). - -## Known issues - -### Rebuild your Optimize plugins - -With Optimize 2.6.0, the plugin system was overhauled. For your plugins to continue to work, you have to rebuild them with the latest Optimize plugin artifact as an uber jar. Refer to the updated [plugin setup guide](./../plugins/plugin-system.md#set-up-your-environment). - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7.md deleted file mode 100644 index 7d2b10a34a2..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: 2.6-to-2.7 -title: "Update notes (2.6 to 2.7)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 2.7.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Changes in the supported environments - -With this Optimize version, there are also changes in the supported versions of Elasticsearch and Camunda Platform. - -### Elasticsearch - -Optimize now requires at least Elasticsearch `6.4.0`. -See the [Supported Environments]($docs$/reference/supported-environments/#elasticsearch) sections for the full range of supported versions. - -If you need to update your Elasticsearch cluster, refer to the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) on how to do that. Usually, the only thing you need to do is perform a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html). - -### Camunda Platform - -Optimize now requires at least Camunda Platform `7.10.6`. -See the [Supported Environments]($docs$/reference/supported-environments/#camunda-platform) sections for the full range of supported versions. - -### Java - -Optimize now only supports Java 8, 11, and 13. Support for 12 was dropped as it reached [end of support](https://www.oracle.com/technetwork/java/java-se-support-roadmap.html). -See the [Supported Environments]($docs$/reference/supported-environments/) sections for the full range of supported versions. - -## Known issues - -### Collection permissions get lost on failed identity sync - -Optimize has an identity synchronization in place that fetches all users from the engine that have access to Optimize. By doing this, Optimize can easily check if the user is allowed to access the application and is able to quickly display metadata, such as the email address and full name of the user. - -If you start Optimize `2.7` and the engine is down at the time of a user synchronization, it is possible that you will lose all your collection permissions. This is due to Optimize not being able to receive the correct authorizations for the collections and as a result, all the collection roles are removed. - -The easiest way to recover your permissions and regain access to your collections would be to add a user ID to the `auth.superUserIds` property of your [configuration file](./../setup/configuration.md#security), then re-adding the necessary permissions as this user. - -After you have regained the roles of your collections, you should consider one of the two next follow-up steps: - -* Preferred solution: Update to Optimize 3.2.0 to fix the issue. -* Interim solution: If you anticipate the engine being taken down, we also recommend stopping Optimize to prevent the same scenario from reoccurring. In addition, you can also change the frequency at which this collection cleanup occurs by adjusting the `import.identitySync.cronTrigger` expression in your [configuration file](./../setup/configuration.md#security) to `0 0 1 * * *`, which results in executing the sync once per day at 01:00 AM. - -### Misinterpreted cron expressions - -The configuration of Optimize allows you to define when the history cleanup is triggered using cron expression notation. However, the values are incorrectly interpreted in Optimize. For example, the `historyCleanup.cronTrigger` configuration has the default value `0 1 * * *`, which should be 01:00 AM every day. Unfortunately, a bug causes this to be interpreted as every hour. - -To fix this, use the Spring [cron expression notation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronExpression.html). For instance, the default value for `historyCleanup.cronTrigger` would then be `0 0 1 * * *`. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0.md deleted file mode 100644 index 642ce6d9327..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -id: 2.7-to-3.0 -title: "Update notes (2.7 to 3.0)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.0.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). - -If you have done an Optimize update prior to this one, note the [changes in the update procedure](#changes-in-the-update-procedure). - -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Potential NullpointerException on Update to 3.0.0 - -In some circumstances, the update to 3.0.0 might fail with the following log output: - -``` - 06:00:00.000 - Starting step 1/9: UpdateIndexStep - ... - 06:00:02.066 - Error while executing update from 2.7.0 to 3.0.0 - java.lang.NullPointerException: null - at org.camunda.optimize.upgrade.steps.schema.UpdateIndexStep.execute(UpdateIndexStep.java:71) - ... -``` - -This is a known issue that occurs if you previously updated to Optimize 2.7.0. You can solve this issue by executing the following command on your Elasticsearch cluster before running the update again. - -``` -curl -s -XDELETE :9200/optimize-event_v2-000001 -``` - -The update should now successfully complete. - -### Cannot disable import from particular engine - -In 3.0.0, it is not possible to deactivate the import of a particular Optimize instance from a particular engine (via `engines.${engineAlias}.importEnabled`). In case your environment is using that feature for e.g. a [clustering setup](../../setup/clustering/), we recommend you to stay on Optimize 2.7.0 until the release of Optimize 3.1.0 (Scheduled for 14/07/2020) and then update straight to Optimize 3.1.0. - -## Limitations - -### User operation log import - -Optimize now imports the user operation log. Due to this, the engine user now requires engine permissions to read the user operation log, see also the [configuration documentation](../../setup/configuration/#connection-to-camunda-platform). - -### Suspension filter - -Due to a limitation of the user operations log data retrieval in the engine API, process instance suspension states of instances suspended after Optimize has been started are not correctly imported. This leads to inaccuracies in the [Suspended Instances Only Filter](../../../components/userguide/additional-features/filters.md#suspended-and-non-suspended-instances-only-filter), which will only apply to instances which were suspended before they were imported by Optimize. - -Furthermore, since the suspension state of process instances in Optimize is updated according to historic data logs, if you have [history cleanup](../../setup/history-cleanup/) enabled it is possible that the relevant data will be cleaned up before Optimize can import it, leading to inaccuracies in the state of suspended process instances which will then not appear in the appropriate filter. - -### Event-based processes - -There might be cases where an incorrect and lower than expected number of events are shown when mapping either process start and end events to nodes on your event based process, or -when mapping multiple engine task events from the same engine model. - -These are known issues and are [fixed](https://jira.camunda.com/browse/OPT-3515) in the upcoming Optimize 3.1.0 release. If using this version or newer, you can correct previously imported data in your event-based process either -by recreating or republishing the event based process. - -Alternatively, [forcing a reimport](./instructions.md#force-reimport-of-engine-data-in-optimize) -of the engine data after upgrading to a version with this fix will correct these errors too. - -## Changes in the update procedure - -Although Optimize 3.0.0 is a major version change, we still allow a rolling update from 2.7 to the new version. However, since the support for Elasticsearch changed to the latest major version 7.X, there is an additional step in the update routine involved. - -Before you can perform the actual update, you need to do a [rolling update](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) of Elasticsearch from 6.X to 7.X. The exact details can be found in the [Migration & Update Instructions](./instructions.md). - -Please note that the following updates are not supported by Elasticsearch: - -* 6.8 to 7.0. -* 6.7 to 7.1.–7.6.X. - -## Changes in the supported environments - -With this Optimize version, there are also changes in the supported versions of the Elasticsearch and Camunda Platform. - -### Elasticsearch - -Optimize now requires at least Elasticsearch `7.0.0` and supports the latest major version up to `7.6.0`. -See the [Supported Environments]($docs$/reference/supported-environments/#elasticsearch) sections for the full range of supported versions. - -In case you need to update your Elasticsearch cluster, refer to the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) on how to do that. Usually, the only thing you need to do is to perform a [rolling update](https://www.elastic.co/guide/en/elastic-stack/current/upgrading-elasticsearch.html#rolling-upgrades). There's also a dedicated section in the [Migration & Update Instructions](./instructions.md) on how to perform the rolling update. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1.md deleted file mode 100644 index 7f681d57b55..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: 3.0-to-3.1 -title: "Update notes (3.0 to 3.1)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.1.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Changes in the supported environments - -With this Optimize version, there are also changes in the supported versions of Camunda Platform. - -### Camunda Platform - -Optimize now requires at least Camunda Platform `7.11.13`. -See the [Supported Environments]($docs$/reference/supported-environments/#camunda-platform) sections for the full range of supported versions. - -## Breaking changes - -With Optimize 3.1.0, the [History Cleanup](../../setup/history-cleanup/) configuration was restructured and needs to be adjusted accordingly. - -Major changes are the removal of the global feature flag `historyCleanup.enabled` in favor of entity type specific feature flags as well as a relocation of process and decision specific configuration keys. Refer to the [configuration documentation](../../setup/configuration/#history-cleanup-settings) for details. - -With this release, Optimize now imports deployment data from the engine when importing definitions. If Optimize is importing from an authenticated engine, the configured user must now have READ permission on the `Deployment` resource. - -## Known issues - -### Event-based processes - event counts/suggestions - -As part of the update from Optimize 3.0 to 3.1, the event counts and the next suggested events used as part of the event based process feature are recalculated. Until the recalculation is complete, the event counts might be incorrect and the suggestions inaccurate. - -Once the recalculation is complete, the event counts will return to being correct and you will see more accurate suggested next events. - -### Decision report filter incompatibilities - update and runtime errors possible - -Due to a restriction in the database schema for decision reports, the usage of filters is limited in Optimize 3.1.0 as well as 3.2.0 and will only be fully working again in Optimize 3.3.0. -This results in the behavior that once a certain filter type was used, e.g. a fixed evaluation date filter, another filter type cannot be used anymore, e.g. a relative evaluation date filter. This issue can occur at runtime as well as during the update. - -Usually, you will see a log similar to this one when you hit this issue: - -``` -{"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"}],"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"},"status":400} -``` - -*We thus recommend removing all filters used on decision reports before upgrading to Optimize 3.1.0.* - -## Limitations - -### User permissions - -With Optimize 3.1, user and group related permissions are checked by Optimize to determine whether the current user is authorized to access other users/groups within Optimize, for example when adding new roles to a collection. - -Due to this, it is now required to explicitly grant users the relevant authorizations, otherwise they will not be able to see other users and groups in Optimize. More information on authorizations can be found [here](./../setup/authorization-management.md#user-and-group-related-authorizations). - -### User operations log import - -With Optimize 3.1, the user operations log is imported to detect changes to running instances' suspension status. The user operations log informs Optimize when instance suspension requests have been received by the engine, and Optimize then reimports the relevant instances to ensure their suspension state is set correctly in Optimize. - -However, if instances are suspended using the engine API's `executionDate` parameter, with which suspension operations can be triggered with a delay, Optimize currently is not able to detect this delay, and will re-import the running process instances at the time the suspension operation is read from the user operations log, not at the time the suspension takes place. This can lead to inaccuracies in the suspension state of process instances in Optimize. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2.md deleted file mode 100644 index 6072761ab6c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: 3.1-to-3.2 -title: "Update notes (3.1 to 3.2)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.3.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Decision report filter incompatibilities - update and runtime errors possible - -Due to a restriction in the database schema for decision reports, the usage of filters is limited in Optimize 3.2.0 and will only be fully working again in Optimize 3.3.0. - -This results in the behavior that once a certain filter type was used, e.g. a fixed evaluation date filter, another filter type cannot be used anymore, e.g. a relative evaluation date filter. This issue can occur at runtime as well as during the update. - -Usually, you will see a log similar to this one when you hit this issue: - -``` -{"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"}],"type":"mapper_parsing_exception","reason":"object mapping for [data.filter.data.start] tried to parse field [start] as object, but found a concrete value"},"status":400} -``` - -*We thus recommend removing all filters used on decision reports before upgrading to Optimize 3.2.0.* - -## Changes in the supported environments - -With this Optimize version there are also changes in the supported versions of Elasticsearch. - -### Elasticsearch - -Optimize now supports Elasticsearch versions 7.7 and 7.8. - -See the [Supported Environments]($docs$/reference/supported-environments/#elasticsearch) sections for the full range of supported versions. - -### Camunda Platform - -Optimize now requires at least Camunda Platform `7.12.11`, and `7.11.x` is not supported anymore. -See the [Supported Environments]($docs$/reference/supported-environments/#camunda-platform) sections for the full range of supported versions. - -### Unexpected behavior - -#### Cancelled flow node filter - -With this version, Optimize now allows you to filter for process instances where a given set of flow nodes have been canceled, as well as for flow nodes or user tasks that have been canceled. - -However, any canceled flow nodes and user tasks already imported by Optimize before this release will not appear as canceled in Optimize so will continue to be treated the same as any other completed flow node or user task. To use these options for previously imported data, you will need to [force a reimport](../../reimport) from the engine. - -## Limitations - -### No running flow node instances visible if blocked by an incident - -Optimize 3.2.0 introduces the visibility of [incidents](../../../components/userguide/additional-features/filters.md#incident-filter), but in contrast to Camunda Cockpit, Optimize currently does not show flow node instances in flow node view reports for those flow node instances that are blocked by an incident. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3.md deleted file mode 100644 index d28c30d34cf..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: 3.2-to-3.3 -title: "Update notes (3.2 to 3.3)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.3.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Error during migration of dashboards when updating from Optimize 3.2.0 to 3.3.0 - -During the update from Optimize 3.2.0 to 3.3.0, you may encounter the following error: - -``` -Starting step 6/7: UpdateIndexStep on index: dashboard -Progress of task (id:FwvhN1jsRUe1JQD49-C3Qg:12009) on index optimize-dashboard_v4: 0% (total: 1, updated: 0, created: 0, deleted: 0) -An Elasticsearch task that is part of the update failed: Error{type='script_exception', reason='runtime error', phase='null'} - -``` - -This can happen if you started using an Optimize version prior to 3.1.0 in your environment in the past and did not manually edit/update at least one particular dashboard created with such a version since then. - -To recover from this situation, you can run the following update script on all Optimize dashboards on your Elasticsearch cluster: - -``` -curl --location --request POST 'localhost:9200/optimize-dashboard_v3/_update_by_query' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "script": { - "source": "if (ctx._source.availableFilters == null) { ctx._source.availableFilters = [] }", - "lang": "painless" - } -}' -``` -Then, resume the update to Optimize 3.3.0 by rerunning it, thanks to Optimize updates being [resumable](https://camunda.com/blog/2021/01/camunda-optimize-3-3-0-released/#Resumable-Updates) since Optimize 3.3.0. - -## Breaking changes - -### Renamed environment folder to config - -The `environment` folder, which holds all configuration files, has been renamed to `config`. - -### Elasticsearch - -Optimize no longer supports Elasticsearch versions 7.0, 7.1 or 7.2. -See the [Supported Environments]($docs$/reference/supported-environments/#elasticsearch) sections for the full range of supported versions. - -### Docker image environment variables - -Previously it was possible to use the `JAVA_OPTS` environment variable on the official Optimize Docker image to configure the JVM that runs Optimize. With Optimize 3.3.0 this variable was renamed to `OPTIMIZE_JAVA_OPTS`. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md deleted file mode 100644 index b7abee7ab0c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: 3.3-to-3.4 -title: "Update notes (3.3 to 3.4)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.4.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). - -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -When upgrading Optimize, certain features might not work out of the box for the old data. This is because old versions of Optimize -do not fetch data that is necessary for the new feature to work. For this update, the following features do not work on the old data: - -- [Process Instance Parts]({{< ref "/user-guide/process/single-report/_index.md#process-instance-parts" >}}) -- [Canceled Instances Only Filter]({{< ref "/user-guide/process/filter/_index.md#canceled-instances-only-filter" >}}) - -To enable this feature for your old data, follow the steps in the [engine data reimport guide](./../reimport.md). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5.md deleted file mode 100644 index 949321bb300..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: 3.4-to-3.5 -title: "Update notes (3.4 to 3.5)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.5.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Limitations - -### Migration warning regarding incomplete UserTasks - -The migration from Optimize 3.4 to 3.5 includes some improvements to the Optimize process instance data structure. Previously, process instance data in Optimize held two distinct lists: one for all FlowNode data and one for UserTask data. To avoid redundancy, these lists are merged into one during this migration. - -In order to correctly merge the UserTask data contained in the two lists, specific ID fields are used to correlate UserTasks correctly. However, due to the nature of the Optimize import, UserTask data can temporarily exist within Optimize without some of these fields. Normally, these fields are updated by the next scheduled UserTask import, but if Optimize was shut down before this next UserTask import can run, the fields remain `null` and cannot be used during migration. - -Usually, this should only affect a small percentage of UserTasks and of this small percentage, the data that is lost during migration will only relate to the cancellation state or assignee/candidate group information. In practical terms, if you observe a warning regarding "x incomplete UserTasks that will be skipped during migration" in your update logs, this means that after the migration, x UserTasks in your system may be lacking assignee or candidate group information or may be marked as completed when in fact they were canceled. - -Note that any other UserTask data, old and new, will be complete. - -If this inaccuracy in past data is not acceptable to you, you can remedy this data loss by performing a reimport after migration. You can either run a complete reimport using [the reimport script](../../reimport), or alternatively use the below statements to only reset those imports responsible for the data that was skipped during migration. - -Ensure Optimize is shut down before executing these import resets. - -Reset the `identityLinkLog` import to reimport assignee and candidate group data: - -``` -curl --location --request DELETE 'http://:/-timestamp-based-import-index_v4/_doc/identityLinkLogImportIndex-' -``` - -Reset the `completedActivity` import to reimport the correct cancellation state data: - -``` -curl --location --request DELETE 'http://:/-timestamp-based-import-index_v4/_doc/activityImportIndex-' -``` - -For example, assuming Elasticsearch is at `localhost:9200`, the engine alias is `camunda-bpm`, and the index prefix is `optimize`, the request to reset the `identityLinkLog` import translates to: - -``` -curl --location --request DELETE 'http://localhost:9200/optimize-timestamp-based-import-index_v4/_doc/identityLinkLogImportIndex-camunda-bpm' -``` - -If you have more than one engine configured, both requests need to be executed once per engine alias. - -## Known issues - -### Report edit mode fails for reports with flow node filters - -After updating to Optimize 3.5.0, you may encounter an issue that you cannot enter the edit mode on -reports that use flow node selection filters. - -In such a case, when entering edit mode, you are confronted with the following error in the Web UI: - -``` - Cannot read property 'key' of undefined -``` - -This error can be resolved by running the following Elasticearch update query on your Optimize report index: - -``` -curl --location --request POST 'http://{esHost}:{esPort}/{indexPrefix}-single-process-report/_update_by_query' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "script" : { - "source": "if(ctx._source.data.filter.stream().anyMatch(filter -> \"executedFlowNodes\".equals(filter.type)) && ctx._source.data.definitions.length == 1){for (filter in ctx._source.data.filter){filter.appliedTo = [ctx._source.data.definitions[0].identifier];}}", - "lang": "painless" - } -}' -``` - -Applying this update query can be done anytime after the update to Optimize 3.5.0 was performed, even while Optimize 3.5.0 is already running. - -### Running 3.5 update on Optimize version 3.5 data results in NullPointerException - -The Optimize 3.5 update will not succeed if it is run on data which has already been updated to 3.5. This is because the 3.5 update relies on the 3.4 schema to be present in order to perform certain operations, which will fail with a `NullPointerException` if attempted on the 3.5 schema. This will cause the update to force quit. In this case, however, no further action is required as your data has already been updated to 3.5. - -## Unexpected behavior - -### Flow node selection in report configuration moved to flow node filter - -The flow node selection previously found in the report configuration menu has now been migrated to the flow node filter dropdown as a ["Flow Node Selection" Filter](../../../components/userguide/additional-features/filters.md#flow-node-selection). Existing flow node selection configurations in old reports will be migrated to an equivalent Filter with the Optimize 3.5.0 migration. Note that this filter now also filters out instances which do not contain any flow nodes that match the filter. - -## Changes in requirements - -### Java - -With this release, support for Java 8 has been removed, meaning that Java 11 is now the only LTS version of Java that Optimize supports. See the [Supported Environments]($docs$/reference/supported-environments/) sections for more information on supported versions. - -### Elasticsearch - -With this release, Optimize no longer supports Elasticsearch versions 7.5.1, 7.6.0 or 7.7.0. See the [Supported Environments]($docs$/reference/supported-environments/#elasticsearch) sections for the full range of supported versions. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6.md deleted file mode 100644 index 8293ecfc67d..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: 3.5-to-3.6 -title: "Update notes (3.5 to 3.6)" ---- - -Camunda Platform 7 only - -:::note Heads Up! -To update Optimize to version 3.6.0, perform the following steps first: [Migration & Update Instructions](./instructions.md). -::: - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -### Default tenants - -If you have [default tenants configured](../../setup/configuration/#connection-to-camunda-platform) for any connected engine in Optimize, -it might be that user task and flow node reports, as well as branch analysis, stops showing data after updating to 3.6.0. - -This is a known -issue that has been fixed as part of the 3.6.3 patch release. You can update from 3.6.0 to 3.6.3. Migration from either of these versions to -3.7.0 will be possible. - -## Changes in supported environments - -### Camunda Platform - -Optimize now requires at least Camunda Platform `7.14.0` and supports up to `7.16.0+`. Camunda Platform `7.13.x` is not supported anymore. - -See the [Supported Environments]($docs$/reference/supported-environments/#camunda-platform)) sections for the full range of supported versions. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7.md deleted file mode 100644 index 1010bb2799c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: 3.6-to-3.7 -title: "Update notes (3.6 to 3.7.x)" ---- - -Camunda Platform 7 only - -:::note Heads up! -To update Optimize to version 3.7.x, perform the following steps: [Migration & Update Instructions](./instructions.md). -::: - -The update to 3.7.x can be performed from any 3.6.x release. - -Here you will find information about: - -* Limitations -* Known issues -* Changes in the supported environments -* Any unexpected behavior of Optimize (e.g due to a new feature) - -## Known issues - -The Optimize 3.7.0 release contains a number of bugs related to dashboard templates, alerts, and the report builder. We thus recommend updating to 3.7.1 if you are already using 3.7.0, or directly updating to 3.7.1 if you are still running a 3.6.x release. - -For details on the issues, refer to the [Optimize 3.7.1 Release Notes](https://jira.camunda.com/secure/ReleaseNote.jspa?projectId=10730&version=17434). - -## New behavior - -### Added support for object and list variables - -With Optimize 3.7, we've added support for object and list variables. Variables with type `Object` are now automatically imported and flattened into dedicated "sub variables" for each object property. If you have previously used a variable import plugin to achieve the same, you may disable this plugin after migrating to Optimize 3.7. - -Find more information about importing object variables [here](../../setup/object-variables). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_1_create_collection.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_1_create_collection.png deleted file mode 100644 index 212e86b5511..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_1_create_collection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_2_create_view_permission_mary.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_2_create_view_permission_mary.png deleted file mode 100644 index 15001043127..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_2_create_view_permission_mary.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_1_copy_report.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_1_copy_report.png deleted file mode 100644 index f6a77e63a38..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_1_copy_report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_2_copy_report.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_2_copy_report.png deleted file mode 100644 index ff9bdb06f5b..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_3_2_copy_report.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_4_mary_sees_collection.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_4_mary_sees_collection.png deleted file mode 100644 index 8ef4dfc1020..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/img/private_report_access_4_mary_sees_collection.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/instructions.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/instructions.md deleted file mode 100644 index 3dc82eca13b..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/migration-update/instructions.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -id: instructions -title: "Instructions" -description: "Find out how to update to a new version of Optimize without losing your reports and dashboards." ---- - -Camunda Platform 7 only - -Optimize releases two new minor versions a year. These documents guide you through the process of migrating your Optimize from one Optimize minor version to the other. - -If you want to update Optimize by several versions, you cannot do that at once, but you need to perform the updates in sequential order. For instance, if you want to update from 2.5 to 3.0, you need to update first from 2.5 to 2.6, then from 2.6 to 2.7, and finally from 2.7 to 3.0. The following table shows the recommended update paths to the latest version: - -| Update from | Recommended update path to 3.7 | -| --- | --- | -| 3.7 | You are on the latest version. | -| 3.0 - 3.6 | 1. Rolling update to 3.6
    | -| 2.0 - 2.7 | 1. Rolling update to 2.7
    2. Rolling update from 2.7 to 3.0 | -| 1.0 - 1.5 | No update possible. Use the latest version directly. | - -## Migration instructions - -You can migrate from one version of Optimize to the next one without losing data. To migrate to the latest version, please perform the following steps: - -### 1. Preparation - -- Make sure that Elasticsearch has enough memory. To do that, shut down Elasticsearch and go the `config` folder of your Elasticsearch distribution. There you should find a file called `jvm.options`. Change the values of the two properties `Xms` and `Xmx` to at least `1g` so that Elasticsearch has enough memory configured. This configuration looks as follows: - -```bash --Xms1g --Xmx1g -``` - -- Restart Elasticsearch and make sure that the instance is up and running throughout the entire migration process. -- You will need to shut down Optimize before starting the migration, resulting in downtime during the entire migration process. -- [Back up your Elasticsearch instance](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html) in case something goes wrong during the migration process. This is recommended, but optional. -- Make sure that you have enough storage available to perform the migration. During the migration process it can be the case that up to twice the amount of the storage of your Elasticsearch data is needed. (Highly recommended) -- Back up your `environment-config.yaml` and `environment-logback.xml` located in the `config` folder of the root directory of your current Optimize. (Optional) -- If you are using Optimize plugins it might be required to adjust those plugins to the new version. To do this, go to the project where you developed your plugins, increase the project version in maven to new Optimize version and build the plugin again (checkout the [plugin guide]({{< ref "/technical-guide/plugins/_index.md" >}}) for the details on that). Afterwards, add the plugin jar to the `plugin` folder of your new Optimize distribution. (Optional) -- Start the new Optimize version, as described in the [installation guide]({{< ref "/technical-guide/setup/setup/_index.md" >}}). -- It is very likely that you configured the logging of Optimize to your needs and therefore you adjusted the `environment-logback.xml` in the `config` folder of the root directory of your **old** Optimize. You can now use the backed up logging configuration and put it in the `config` folder of the **new** Optimize to keep your logging adjustments. (Optional) - -### 2. Rolling update to the new Elasticsearch version - -You only need to execute this step if you want to update the Elasticsearch (ES) version during the update. In case the ES version stays the same, you can skip this step. - -The Elasticsearch update is usually performed in a rolling fashion. Read all about how to do the ES update in the general [Elasticsearch Update Guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html) and consult the [rolling ugprade](https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html) guide of the ES documentation on how to conduct the rolling update. If you have a very simple setup, for instance, a cluster with only one ES node without plugins installed nor machine learning jobs nor special configuration, the update would essentially boil down to the following steps: - -1. Install the new ES version, e.g. using Docker, your favorite package manager, or just by downloading and extracting the new tar/zip archive to a new directory. -2. Copy the data from the old ES to the new ES. If you don't expect any new data coming to your old ES you can just copy the `data` folder from the old ES distribution and overwrite the `data` folder in the new ES distribution. -3. Copy your old configuration (`config/elasticsearch.yml`) over to the new ES installation. -4. Stop the old ES instance. -5. Start the new ES instance and check that everything looks fine. - -Although the above steps summarize the basic update procedure, it is still recommended to read through the Elasticsearch documentation to avoid any potential issues. - -:::note Heads Up! -Note that the following updates are not supported by Elasticsearch: - -* 6.8 to 7.0. -* 6.7 to 7.1.–7.X (where X>1, e.g. 7.5) -::: - -### 3. Perform the migration - -- Go to the [enterprise download page](https://docs.camunda.org/enterprise/download/#camunda-optimize) and download the new version of Optimize you want to update to. For instance, if your current version is Optimize 2.2, you should download the version 2.3. Extract the downloaded archive in your preferred directory. The archive contains the Optimize application itself and the executable to update Optimize from your old version to the new version. -- In the `config` folder of your **current** Optimize version, you have defined all configuration in the `environment-config.yaml` file, e.g. for Optimize to be able to connect to the engine and Elasticsearch. Copy the old configuration file and place it in the `config` folder of your **new** Optimize distribution. Bear in mind that the configuration settings might have changed and thus the new Optimize won't recognize your adjusted settings or complain about settings that are outdated and therefore refuses to startup. Best checkout the Update Notes subsections for deprecations. - -#### 3.1 Manual update script execution - -This approach requires you to manually execute the update script. You can perform this from any machine that has access to your Elasticsearch cluster. - -- Open up a terminal, change to the root directory of your **new** Optimize version and run the following command: `./upgrade/upgrade.sh` on Linux or `update/update.bat` on Windows -- During the execution the executable will output a warning to ask you to back-up your Elasticsearch data. Type `yes` to confirm that you have backed up the data. -- Feel free to [file a support case](https://docs.camunda.org/enterprise/support/) if any errors occur during the migration process. -- To get more verbose information about the update, you can adjust the logging level as it is described in the [configuration documentation](./../setup/configuration.md#logging). - -#### 3.2 Automatic update execution (Optimize >3.2.0) - -With the Optimize 3.2.0 release the update can also be executed as part of the Optimize startup. In order to make use of this functionality, the command flag `--upgrade` has to be passed to the Optimize startup script: - -```bash -For UNIX: -./optimize-startup.sh --upgrade - -For Windows: -./optimize-startup.bat --upgrade -``` - -This will run the update prior to starting up Optimize and only then start Optimize. - -In Docker environments this can be achieved by overwriting the default command of the Docker container (being `./optimize.sh`), e.g. like in the following [docker-compose](https://docs.docker.com/compose/) snippet: - -``` -version: '2.4' - -services: - optimize: - # Use the appropriate image tag depending on your version - image: registry.camunda.cloud/optimize-ee/optimize:8-latest # For Camunda 8 - # image: registry.camunda.cloud/optimize-ee/optimize:latest # For Camunda 7 - command: ["./optimize.sh", "--upgrade"] -``` - -However, as this may prolong the container boot time significantly which may conflict with [container status probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) in managed environments like [Kubernetes](https://kubernetes.io/) we recommend using the [init container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) feature there to run the update: - -``` - labels: - app: optimize -spec: - initContainers: - - name: migration - # Use the appropriate image tag depending on your version - image: registry.camunda.cloud/optimize-ee/optimize:8-latest # For Camunda 8 - # image: registry.camunda.cloud/optimize-ee/optimize:latest # For Camunda 7 - command: ['./upgrade/upgrade.sh', '--skip-warning'] - containers: - - name: optimize - # Use the appropriate image tag depending on your version - image: registry.camunda.cloud/optimize-ee/optimize:8-latest # For Camunda 8 - # image: registry.camunda.cloud/optimize-ee/optimize:7-latest # For Camunda 7 -``` - -### 4. Resume a canceled update - -From Optimize 3.3.0 onwards updates are resumable. So if the update process got interrupted either manually or due to an error you don't have to restore the Elasticsearch backup and start over but can simply rerun the update. On resume previously completed update steps will be detected and logged as being skipped. In the following log example **Step 1** was previously completed and is thus skipped: - -``` -./upgrade/upgrade.sh -... -INFO UpgradeProcedure - Skipping Step 1/2: UpdateIndexStep on index: process-instance as it was found to be previously completed already at: 2020-11-30T16:16:12.358Z. -INFO UpgradeProcedure - Starting step 2/2: UpdateIndexStep on index: decision-instance -... -``` - -### 5. Typical errors - -- Using an update script that does not match your version: - -```bash -Schema version saved in Metadata does not match required [2.X.0] -``` - -Let's assume have Optimize 2.1 and want to update to 2.3 and use the jar to update from 2.2 to 2.3. This error occurs because the jar expects Elasticsearch to have the schema version 2.1. This is because you downloaded the wrong Optimize artifact which contained the wrong update jar version. - -## Force reimport of engine data in Optimize - -It can be the case that features that were added with the new Optimize version do not work for data that was imported with the old version of Optimize. If you want to use new features on the old data, you can force a reimport of the engine data to Optimize. See [the reimport guide](./../reimport.md) on how to perform such a reimport. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/engine-data-deletion.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/engine-data-deletion.md deleted file mode 100644 index 3aebb5c2458..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/engine-data-deletion.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: engine-data-deletion -title: "Engine data deletion" -description: "Explains how Optimize copes with the deletion of engine data." ---- - -Camunda Platform 7 only - -The engine slows down if the historic data grows significantly over time, particularly in cases where the amount of data streaming in each day is large. One solution to this is to remove old data from the engine on a regular basis, yet still importing the data to Optimize so it can be used for deeper analytics. - -To support the described use-case, Optimize does not care if you delete any data on the engine side. Specifically, Optimize does not sync with the engine on data deletion. If you want to remove any data from Optimize, you can either erase the data from Elasticsearch or use the [Optimize History Cleanup Feature](./../setup/history-cleanup.md). - -The subsections below describe the ways in which Optimize handles data deletion from the engine. - -## Deletion of historic data - -There are two possible ways of doing this: - -* **Historic Cleanup**: If you have enabled the [history cleanup](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup), historic data will be removed in the engine based on configurable time-to-live. -* **Manual Deletion**: You trigger a [manual deletion](https://docs.camunda.org/manual/latest/reference/rest/history/process-instance/post-delete/), e.g. via REST-API. - -Optimize can handle this case as it imports the old data first, and once it has imported everything, it will only add to its database new data streaming in. - -## Manual deletion of deployments/definitions - -In most cases, the deletion of deployments and definitions will not cause any problems. If you have several versions of a definition deployed, e.g. 1-4, and you delete the definition/deployment with definition version 1, then this wouldn't cause any issues. The assumption here is that Optimize has imported the definitions and related historical data beforehand. Otherwise, the deleted definition is lost. - -However, there are two scenarios where Optimize will behave differently. The first is depicted as follows: - -1. You deploy a (process/decision) definition A for the first time. -2. Optimize imports definition A with version 1. -3. You delete the definition/deployment of the definition without having added another version of it. Definition A with version 1 is removed from the engine. -4. You deploy the definition A with the same ID again. -5. Optimize imports another definition A with version 1. - -Optimize identifies the unique definitions by the combination of `definition key`, `version`, and `tenant`, so in this case will have imported the same definition twice. Optimize handles this by marking the definition with the oldest deployment time as deleted. When selecting definitions in Optimize, definitions that are considered to be deleted will not be selectable for reporting. Any data that Optimize has imported related to the deleted definition will appear in reports that use the non-deleted definition. - -To prevent this from happening, avoid deleting and redeploying the same definition (same definition key, tenant, and version) to the engine. - -Secondly, when a definition is deleted in the engine before it has been imported by Optimize and the corresponding instance data is still present in the history tables of the engine, Optimize will attempt to import this instance data for both decision reports and for creating event-based processes. In this scenario, Optimize will simply skip the import of the instance data for definitions that it has not already imported and that have since been deleted in the engine. This data will not be available in Optimize. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-Service-Polling.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-Service-Polling.png deleted file mode 100644 index 9ed62fdd2b7..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-Service-Polling.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-logistic_large.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-logistic_large.png deleted file mode 100644 index ef4dd3b1b28..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-logistic_large.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-logistic_medium.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-logistic_medium.png deleted file mode 100644 index 36a088439ee..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-logistic_medium.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-sales.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-sales.png deleted file mode 100644 index f6c4b23a6a2..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Import-performance-diagramms-sales.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Optimize-Import-Process.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Optimize-Import-Process.png deleted file mode 100644 index bf753ac8a72..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Optimize-Import-Process.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Optimize-Structure.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Optimize-Structure.png deleted file mode 100644 index 63c59d49899..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/img/Optimize-Structure.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/import-guide.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/import-guide.md deleted file mode 100644 index 27b407955f3..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/optimize-explained/import-guide.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -id: import-guide -title: "Data import" -description: "Shows how the import generally works and an example of import performance." ---- - -Camunda Platform 7 only - -This document provides instructions on how the import of the engine data to Optimize works. - -## Architecture overview - -In general, the import assumes the following setup: - -- A Camunda engine from which Optimize imports the data. -- The Optimize backend, where the data is transformed into an appropriate format for efficient data analysis. -- [Elasticsearch](https://www.elastic.co/guide/index.html), which is the database Optimize persists all formatted data to. - -The following depicts the setup and how the components communicate with each other: - -![Optimize Import Structure](img/Optimize-Structure.png) - -Optimize queries the engine data using a dedicated Optimize REST-API within the engine, transforms the data, and stores it in its own Elasticsearch database such that it can be quickly and easily queried by Optimize when evaluating reports or performing analyses. The reason for having a dedicated REST endpoint for Optimize is performance: the default REST-API adds a lot of complexity to retrieve the data from the engine database, which can result in low performance for large data sets. - -Note the following limitations regarding the data in Optimize's database: - -- The data is only a near real-time representation of the engine database. This means Elasticsearch may not contain the data of the most recent time frame, e.g. the last two minutes, but all the previous data should be synchronized. -- Optimize only imports the data it needs for its analysis. The rest is omitted and won't be available for further investigation. Currently, Optimize imports: - - The history of the activity instances - - The history of the process instances - - The history of variables with the limitation that Optimize only imports primitive types and keeps only the latest version of the variable - - The history of user tasks belonging to process instances - - The history of incidents with the exception of incidents that occurred due to the history cleanup job or a timer start event job running out of retries - - Process definitions - - Process definition XMLs - - Decision definitions - - Definition deployment information - - Historic decision instances with input and output - - Tenants - - The historic identity link logs - -Refer to the [Import Procedure](#import-procedure) section for a more detailed description of how Optimize imports engine data. - -## Import performance overview - -This section gives an overview of how fast Optimize imports certain data sets. The purpose of these estimates is to help you evaluate whether Optimize's import performance meets your demands. - -It is very likely that these metrics change for different data sets because the speed of the import depends on how the data is distributed. - -The import is also affected by how the involved components are set up. For instance, if you deploy the Camunda engine on a different machine than Optimize and Elasticsearch to provide both applications with more computation resources, the process is likely to speed up. If the Camunda engine and Optimize are physically far away from each other, the network latency might slow down the import. - -### Setup - -The following components were used for these import tests: - -| Component | Version | -| - | - | -| Camunda Platform | 7.10.3 | -| Camunda Platform Database | PostgreSQL 11.1 | -| Elasticsearch | 6.5.4 | -| Optimize | 2.4.0 | - -The Optimize configuration with the default settings was used, as described in detail in the [configuration overview](./../setup/configuration.md). - -The following hardware specifications were used for each dedicated host - -- Elasticsearch: - - Processor: 8 vCPUs\* - - Working Memory: 8 GB - - Storage: local 120GB SSD -- Camunda Platform: - - Processor: 4 vCPUs\* - - Working Memory: 4 GB -- Camunda Platform Database (PostgreSQL): - - Processor: 8 vCPUs\* - - Working Memory: 2 GB - - Storage: local 480GB SSD -- Optimize: - - Processor: 4 vCPUs\* - - Working Memory: 8 GB - -\*one vCPU equals one single hardware hyper-thread on an Intel Xeon E5 v2 CPU (Ivy Bridge) with a base frequency of 2.5 GHz. - -The time was measured from the start of Optimize until the entire data import to Optimize was finished. - -### Large size data set - -This data set contains the following amount of instances: - -| Number of Process Definitions | Number of Activity Instances | Number of Process Instances | Number of Variable Instances | Number of Decision Definitions | Number of Decision Instances | -| - | - | - | - | - | - | -| 21 | 123 162 903 | 10 000 000 | 119 849 175 | 4 | 2 500 006 | - -Here, you can see how the data is distributed over the different process definitions: - -![Data Distribution](img/Import-performance-diagramms-logistic_large.png) - -Results: - -- **Duration of importing the whole data set:** ~120 minutes -- **Speed of the import:** ~1400 process instances per second during the import process - -### Medium size data set - -This data set contains the following amount of instances: - -| Number of Process Definitions | Number of Activity Instances | Number of Process Instances | Number of Variable Instances | -| - | - | - | - | -| 20 | 21 932 786 | 2 000 000 | 6 913 889 | - -Here you can see how the data is distributed over the different process definitions: - -![Data Distribution](img/Import-performance-diagramms-logistic_medium.png) - -Results: - -- **Duration of importing the whole data set:** ~ 10 minutes -- **Speed of the import:** ~1500 process instances per second during the import process - -## Import procedure - -:::note Heads up! -Understanding the details of the import procedure is not necessary to make Optimize work. In addition, there is no guarantee that the following description is either complete or up-to-date. -::: - -The following image illustrates the components involved in the import process as well as basic interactions between them: - -![Optimize Procedure](img/Optimize-Import-Process.png) - -During execution, the following steps are performed: - -1. [Start an import round](#start-an-import-round) -2. [Prepare the import](#prepare-the-import) - - - 2.1 Poll a new page - - 2.2 Map entities and add an import job - -3. [Execute the import](#execute-the-import) - - - 3.1 Poll a job - - 3.2 Persist the new entities to Elasticsearch - -### Start an import round - -The import process is automatically scheduled in rounds by the `Import Scheduler` after startup of Optimize. In each import round, multiple `Import Services` are scheduled to run, each fetches data of one specific entity type. For example, one service is responsible for importing the historic activity instances and another one for the process definitions. - -For each service, it is checked if new data is available. Once all entities for one import service have been imported, the service starts to back off. To be more precise, before it can be scheduled again it stays idle for a certain period of time, controlled by the "backoff" interval and a "backoff" counter. After the idle time has passed, the service can perform another try to import new data. Each round in which no new data could be imported, the counter is incremented. Thus, the backoff counter will act as a multiplier for the backoff time and increase the idle time between two import rounds. This mechanism is configurable using the following properties: - -```yaml - handler: - backoff: - # Interval which is used for the backoff time calculation. - initial: 1000 - # Once all pages are consumed, the import service component will - # start scheduling fetching tasks in increasing periods of time, - # controlled by 'backoff' counter. - # This property sets maximal backoff interval in seconds - max: 30 -``` - -If you would like to rapidly update data imported into Optimize, you have to reduce this value. However, this will cause additional strain on the engine and might influence the performance of the engine if you set a low value. - -More information about the import configuration can be found in the [configuration section](./../setup/configuration.md#engine-common-settings). - -### Prepare the import - -The preparation of the import is executed by the `ImportService`. Every `ImportService` implementation performs several steps: - -#### Poll a new page - -The whole polling/preparation workflow of the engine data is done in pages, meaning only a limited amount of entities is fetched on each execution. For example, say the engine has 1000 historic activity instances and the page size is 100. As a consequence, the engine would be polled 10 times. This prevents running out of memory and overloading the network. - -Polling a new page does not only consist of the `ImportService`, but the `IndexHandler`, and the `EntityFetcher` are also involved. The following image depicts how those components are connected with each other: - -![ImportService Polling Procedure](img/Import-Service-Polling.png) - -First, the `ImportScheduler` retrieves the newest index, which identifies the last imported page. This index is passed to the `ImportService` to order it to import a new page of data. With the index and the page size, the fetching of the engine data is delegated to the `EntityFetcher`. - -#### Map entities and add an import job - -All fetched entities are mapped to a representation that allows Optimize to query the data very quickly. Subsequently, an import job is created and added to the queue to persist the data in Elasticsearch. - -### Execute the import - -Full aggregation of the data is performed by a dedicated `ImportJobExecutor` for each entity type, which waits for `ImportJob` instances to be added to the execution queue. As soon as a job is in the queue, the executor: - -- Polls the job with the new Optimize entities -- Persists the new entities to Elasticsearch - -The data from the engine and Optimize do not have a one-to-one relationship, i.e., one entity type in Optimize may consist of data aggregated from different data types of the engine. For example, the historic process instance is first mapped to an Optimize `ProcessInstance`. However, for the heatmap analysis it is also necessary for `ProcessInstance` to contain all activities that were executed in the process instance. - -Therefore, the Optimize `ProcessInstance` is an aggregation of the engine's historic process instance and other related data: historic activity instance data, user task data, and variable data are all [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html) within Optimize's `ProcessInstance` representation. - -:::note -Optimize uses [nested documents](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html), the above mentioned data is an example of documents that are nested within Optimize's `ProcessInstance` index. - -Elasticsearch applies restrictions regarding how many objects can be nested within one document. If your data includes too many nested documents, you may experience import failures. To avoid this, you can temporarily increase the nested object limit in Optimize's [index configuration](./../setup/configuration.md#index-settings). Note that this might cause memory errors. -::: - -Import executions per engine entity are actually independent from another. Each follows a [producer-consumer-pattern](https://dzone.com/articles/producer-consumer-pattern), where the type specific `ImportService` is the single producer and a dedicated single `ImportJobExecutor` is the consumer of its import jobs, decoupled by a queue. So, both are executed in different threads. To adjust the processing speed of the executor, the queue size and the number of threads that process the import jobs can be configured: - -```yaml -import: - # Number of threads being used to process the import jobs per data type that are writing - # data to elasticsearch. - elasticsearchJobExecutorThreadCount: 1 - # Adjust the queue size of the import jobs per data type that store data to elasticsearch. - # A too large value might cause memory problems. - elasticsearchJobExecutorQueueSize: 5 -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin.md deleted file mode 100644 index 21c03ef72d1..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: businesskey-import-plugin -title: "Business key import customization" -description: "Adapt the process instance import so you can customize the associated business keys." ---- - -Camunda Platform 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature enables you to customize business keys during the process instance import, e.g. if your business keys contain sensitive information that requires anonymization. - -The Optimize plugin system contains the following interface: - -```java -public interface BusinessKeyImportAdapter { - - String adaptBusinessKeys(String businessKey); -} -``` - -Implement this to adjust the business keys of the process instances to be imported. Given is the business key of a process instance that would be imported if no further action is performed. The returned string is the customized business key of the process instance that will be imported. - -The following shows an example of a customization of business keys during the process instance import in the package `optimize.plugin` where every business key is set to 'foo'. - -```java -package org.mycompany.optimize.plugin; - -import org.camunda.optimize.plugin.importing.businesskey.BusinessKeyImportAdapter; -import java.util.List; - - public class MyCustomBusinessKeyImportAdapter implements BusinessKeyImportAdapter { - - @Override - public String adaptBusinessKey(String businessKey) { - return "foo"; - } - -} -``` - -Now, when `MyCustomBusinessKeyImportAdapter`, packaged as a `jar` file, is added to Optimize's `plugin` folder, we just have to add the following property to the `environment-config.yaml` file: - -```yaml -plugin: - businessKeyImport: - # Look in the given base package list for businesskey import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -For more information on how this plugin works, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-business-key-import-plugins). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/decision-import-plugin.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/decision-import-plugin.md deleted file mode 100644 index c6b0867025a..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/decision-import-plugin.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -id: decision-import-plugin -title: "Decision inputs and outputs import customization" -description: "Enrich or filter the Decision inputs and outputs so you can customize which and how these are imported to Optimize." ---- - -Camunda Platform 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature enables you to enrich, modify, or filter the decision input and output instances, e.g., if instances in Camunda contain IDs of instances in another database and you would like to resolve those references to the actual values. - -The plugin system contains the following interfaces: - -```java -public interface DecisionInputImportAdapter { - - List adaptInputs(List inputs); -} -``` - -```java -public interface DecisionOutputImportAdapter { - - List adaptOutputs(List outputs); -} -``` - -Implement these to adjust the input and output instances to be imported. The methods take a list of instances that would be imported if no further action is performed as parameter. The returned list is the customized list with the enriched/filtered instances that will be imported. To create new instances, you can use the `PluginDecisionInputDto` and `PluginDecisionOutputDto` classes as data transfer object (DTO), which are also contained in the plugin system. - -:::note -All class members need to be set in order, otherwise the instance is ignored, as this may lead to problems during data analysis. - -The data from the engine is imported in batches. This means the `adaptInput/adaptOutput` method is called once per batch rather than once for all data. For instance, if you have 100 000 decision instances in total and if the batch size is 10,000, the plugin function will be called 10 times. -::: - -Next, package your plugin into a `jar` file and then add the `jar` file to the `plugin` folder of your Optimize directory. Finally, add the name of the base package of your custom `DecisionOutputImportAdapter/DecisionInputImportAdapter` to the `environment-config.yaml` file: - -```yaml -plugin: - decisionInputImport: - # Look in the given base package list for decision input import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] - decisionOutputImport: - # Look in the given base package list for decision output import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -The following shows an example of a customization of the decision input import in the package `org.mycompany.optimize.plugin`, where every string input is assigned the value 'foo': - -```java -package org.mycompany.optimize.plugin; - -import org.camunda.optimize.plugin.importing.variable.DecisionInputImportAdapter; -import org.camunda.optimize.plugin.importing.variable.PluginDecisionInputDto; - -import java.util.List; - -public class SetAllStringInputsToFoo implements DecisionInputImportAdapter { - - public List adaptInputs(List inputs) { - for (PluginDecisionInputDto input : inputs) { - if (input.getType().toLowerCase().equals("string")) { - input.setValue("foo"); - } - } - return inputs; - } -} -``` - -Now, when `SetAllStringInputsToFoo`, packaged as a `jar` file, is added to the `plugin` folder, we just have to add the following property to the `environment-config.yaml` file to make the plugin work: - -```yaml -plugin: - decisionInputImport: - # Look in the given base package list for decision input import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-decision-import-plugins). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/elasticsearch-header.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/elasticsearch-header.md deleted file mode 100644 index 4c19fb1047c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/elasticsearch-header.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: elasticsearch-header -title: "Elasticsearch header" -description: "Register your own hook into the Optimize Elasticsearch client to add custom headers to requests." ---- - -Camunda 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature allows you to register your own hook into the Optimize Elasticsearch client, allowing you to add custom headers to all requests made to Elasticsearch. The plugin is invoked before every request to Elasticsearch is made, allowing different -headers and values to be added per request. This plugin is also loaded during the update and reimport. - -For that, the Optimize plugin system provides the following interface: - -```java -public interface ElasticsearchCustomHeaderSupplier { - - CustomHeader getElasticsearchCustomHeader(); -} -``` - -Implement this interface and return the custom header you would like to be added to Elasticsearch requests. The `CustomHeader` -class has a single Constructor taking two arguments, as follows: - -```java -public CustomHeader(String headerName, String headerValue) -``` - -The following example returns a header that will be added: - -```java -package com.example.optimize.elasticsearch.headers; - -import org.camunda.optimize.plugin.elasticsearch.CustomHeader; -import org.camunda.optimize.plugin.elasticsearch.ElasticsearchCustomHeaderSupplier; - -public class AddAuthorizationHeaderPlugin implements ElasticsearchCustomHeaderSupplier { - - private String currentToken; - - public CustomHeader getElasticsearchCustomHeader() { - if (currentToken == null || currentTokenExpiresWithinFifteenMinutes()) { - currentToken = fetchNewToken(); - } - return new CustomHeader("Authorization", currentToken); - } -} -``` - -Similar to the other plugins' setup, you have to package your plugin in a `jar`, add it to Optimize's `plugin` folder, and make Optimize find it by adding the following configuration to `environment-config.yaml`: - -```yaml -plugin: - elasticsearchCustomHeader: - # Look in the given base package list for Elasticsearch custom header fetching plugins. - # If empty, ES requests are not influenced. - basePackages: ["com.example.optimize.elasticsearch.headers"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-elasticsearch-header-plugins). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin.md deleted file mode 100644 index 69e54a57e19..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: engine-rest-filter-plugin -title: "Engine REST filter" -description: "Register your own REST filter that is called for every REST call to the engine." ---- - -Camunda Platform 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature allows you to register your own filter that is called for every REST call to one of the configured process engines. -For that, the Optimize plugin system provides the following interface: - -```java -public interface EngineRestFilter { - - void filter(ClientRequestContext requestContext, String engineAlias, String engineName) throws IOException; -} -``` - -Implement this interface to adjust the JAX-RS client request, which is represented by `requestContext`, sent to the process engine's REST API. -If the modification depends on the process engine, you can analyze the value of `engineAlias` and/or `engineName` to decide what adjustment is needed. - -The following example shows a filter that simply adds a custom header to every REST call: - -```java -package com.example.optimize.enginerestplugin; - -import java.io.IOException; -import javax.ws.rs.client.ClientRequestContext; - -public class AddCustomTokenFilter implements EngineRestFilter { - - @Override - public void filter(ClientRequestContext requestContext, String engineAlias, String engineName) throws IOException { - requestContext.getHeaders().add("Custom-Token", "SomeCustomToken"); - } - -} -``` - -Similar to other plugins, you have to package your plugin in a `jar`, add it to the `plugin` folder, and enable Optimize to find it by adding the following configuration to `environment-config.yaml`: - -```yaml -plugin: - engineRestFilter: - #Look in the given base package list for engine rest filter plugins. - #If empty, the REST calls are not influenced. - basePackages: ["com.example.optimize.enginerestplugin"] -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/plugin-system.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/plugin-system.md deleted file mode 100644 index a4c61022cf2..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/plugin-system.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -id: plugin-system -title: "Optimize plugin system" -description: "Explains the principle of plugins in Optimize and how they can be added." ---- - -Camunda Platform 7 only - -Optimize allows you to adapt the behavior of Optimize, e.g. to decide which kind of data should be analyzed and to tackle technical issues. - -Have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples) to see some use cases for the plugin system and how plugins can be implemented and used. - -## Setup your environment - -First, add the Optimize plugin to your project via maven: - -```xml - - org.camunda.optimize - plugin - {{< currentVersionAlias >}} - -``` - -:::note -It is important to use the same plugin environment version as the Optimize version you plan to use. -Optimize rejects plugins that are built with different Optimize versions to avoid compatibility problems. -This also means that to update to newer Optimize versions it is necessary to build the plugin again with the new version. -::: - -To tell Maven where to find the plugin environment, add the following repository to your project: - -```xml - - - camunda-bpm-nexus - camunda-bpm-nexus - - https://artifacts.camunda.com/artifactory/camunda-optimize/ - - - -``` - -:::note -To make this work, you need to add your nexus credentials and the server to your `settings.xml`. -::: - -It is required to create an uber `jar` so Optimize can load third-party dependencies and to validate the used Optimize version. -You can add the following to your project: - -```xml - - install - - - org.apache.maven.plugins - maven-assembly-plugin - 3.1.0 - - - package - - single - - - ${project.artifactId} - - jar-with-dependencies - - - - - - - -``` - -:::note -By default, Optimize loads plugin classes isolated from the classes used in Optimize. -This allows you to use library versions for the plugin that differ from those used in Optimize. -::: - -If you want to use the provided Optimize dependencies instead, it is possible to exclude them from -the uber `jar` by setting the scope of those dependencies to `provided`. Then, Optimize does not load them from the plugin. -This might have side effects if the used version in the plugin is different to the one provided by Optimize. -To get an overview of what is already provided by Optimize, have a look at -the [third-party libraries]($docs$/reference/dependencies/). - -## Debug your plugin - -To start Optimize in debug mode, execute the Optimize start script with a debug parameter. - -On Unix systems, this could look like the following - -- For the demo distribution: - -``` -./optimize-demo.sh --debug -``` - -- For the production distribution: - -``` -./optimize-startup.sh --debug -``` - -On a Windows system this could look like the following: - -- For the demo distribution: - -``` -.\optimize-demo.bat --debug -``` - -- For the production distribution: - -``` -.\optimize-startup.bat --debug -``` - -By default, this will open up a debug port on 9999. Once you have set this up, you need to open the project where you implemented the plugin in your favorite IDE and connect to the debug port. - -To change the default debug port, have a look into `optimize-startup.sh` on Linux/Mac or `optimize-startup.bat` on Windows systems. There, you should find a variable called `DEBUG_PORT` which allows you to customize the port. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/single-sign-on.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/single-sign-on.md deleted file mode 100644 index 8aac260bfb2..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/single-sign-on.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: single-sign-on -title: "Single sign on" -description: "Register your own hook into the Optimize authentication system such that you can integrate Optimize with your single sign on system." ---- - -Camunda Platform 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature allows you to register your own hook into the Optimize authentication system such that you can -integrate Optimize with your single sign on system. This allows you to skip the log in via the Optimize interface. - -For that, the Optimize plugin system provides the following interface: - -```java -public interface AuthenticationExtractor { - - AuthenticationResult extractAuthenticatedUser(HttpServletRequest servletRequest); -} -``` - -Implement this interface to extract your custom auth header from the JAX-RS servlet request, which is represented by `servletRequest`. -With the given request you are able to extract your information both from the request header and from the request cookies. - -The following example extracts a header with the name `user` and if the header exists, the user name from the header is authenticated: - -```java -package com.example.optimize.security.authentication; - -import org.camunda.optimize.plugin.security.authentication.AuthenticationExtractor; -import org.camunda.optimize.plugin.security.authentication.AuthenticationResult; - -import javax.servlet.http.HttpServletRequest; - -public class AutomaticallySignInUserFromHeaderPlugin implements AuthenticationExtractor { - - @Override - public AuthenticationResult extractAuthenticatedUser(HttpServletRequest servletRequest) { - String userToAuthenticate = servletRequest.getHeader("user"); - AuthenticationResult result = new AuthenticationResult(); - result.setAuthenticatedUser(userToAuthenticate); - result.setAuthenticated(userToAuthenticate != null); - return result; - } -} -``` - -Similar to the other plugins' setup, you have to package your plugin in a `jar`, add it to Optimize's `plugin` folder, and make Optimize find it by adding the following configuration to `environment-config.yaml`: - -```yaml -plugin: - authenticationExtractor: - # Looks in the given base package list for authentication extractor plugins. - # If empty, the standard Optimize authentication mechanism is used. - basePackages: ["com.example.optimize.security.authentication"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-sso-plugins). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/variable-import-plugin.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/variable-import-plugin.md deleted file mode 100644 index c60fbff211b..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/plugins/variable-import-plugin.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: variable-import-plugin -title: "Variable import customization" -description: "Enrich or filter the variable import so you can customize which and how variables are imported to Optimize." ---- - -Camunda Platform 7 only - -Before implementing the plugin, make sure that you have [set up your environment](./plugin-system.md#set-up-your-environment). - -This feature enables you to enrich or filter the variable import, e.g., if variables in Camunda contain IDs of variables in another database and you would like to resolve those references to the actual values. - -The Optimize plugin system contains the following interface: - -```java -public interface VariableImportAdapter { - - List adaptVariables(List variables); -} -``` - -Implement this to adjust the variables to be imported. Given is a list of variables that would be imported if no further action is performed. The returned list is the customized list with the enriched/filtered variables that will be imported. To create new variable instances, you can use the `PluginVariableDto` class as data transfer object (DTO), which is also contained in the plugin system. - -:::note -All DTO class members need to be set in order, otherwise the variable is ignored, as this may lead to problems during data analysis. - -The data from the engine is imported in batches. This means the `adaptVariables` method is called once per batch rather than once for all data. For instance, if you have 100,000 variables in total and the batch size is 10,000, the plugin function will be called 10 times. -::: - -The following shows an example of a customization of the variable import in the package `optimize.plugin`, where every string variable is assigned the value 'foo': - -```java -package org.mycompany.optimize.plugin; - -import org.camunda.optimize.plugin.importing.variable.PluginVariableDto; -import org.camunda.optimize.plugin.importing.variable.VariableImportAdapter; - -import java.util.List; - - public class MyCustomVariableImportAdapter implements VariableImportAdapter { - - @Override - public List adaptVariables(List list) { - for (PluginVariableDto pluginVariableDto : list) { - if(pluginVariableDto.getType().toLowerCase().equals("string")) { - pluginVariableDto.setValue("foo"); - } - } - return list; - } - -} -``` - -Now when `MyCustomVariableImportAdapter`, packaged as a `jar` file, is added to Optimize's `plugin` folder, we just have to add the following property to the `environment-config.yaml` file to make the plugin work: - -```yaml -plugin: - variableImport: - # Look in the given base package list for variable import adaption plugins. - # If empty, the import is not influenced. - basePackages: ["org.mycompany.optimize.plugin"] -``` - -For more information and example implementations, have a look at the [Optimize Examples Repository](https://github.com/camunda/camunda-optimize-examples#getting-started-with-variable-import-plugins). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/reimport.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/reimport.md deleted file mode 100644 index b6c769f69fe..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/reimport.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: reimport -title: "Camunda engine data reimport" -description: "Find out how to reimport Camunda engine data without losing your reports and dashboards." ---- - -Camunda Platform 7 only - -There are cases where you might want to remove all Camunda Platform engine data from Optimize which has been imported from connected Camunda engines but don't want to lose Optimize entities such as collections, reports, or dashboards you created. - -:::note Warning! -Triggering a reimport causes the current data imported from the engine to be deleted and a new import cycle to be started. That also means that data which has already been removed from the engine (e.g. using the [history cleanup feature](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup)) is irreversibly lost. - -When triggering a reimport, all existing event-based processes get unpublished and reset to the `mapped` state. This is due to the fact that event-based processes may include Camunda engine data, yet the reimport does not take into account which sources event-based processes are actually based on and as such clears the data for all of them. - -You then have to manually publish event-based processes after you have restarted Optimize. -::: - -To reimport engine data, perform the following -steps: - -1. Stop Optimize, but keep Elasticsearch running (hint: to only start Elasticsearch without Optimize, you can use `elasticsearch-startup.sh` or `elasticsearch-startup.bat` scripts). -2. From the Optimize installation root run `./reimport/reimport.sh` on Linux or `reimport/reimport.bat` on Windows and wait for it to finish - - * In Docker environments, you can override the command the container executes on start to call the reimport script, e.g. in [docker-compose](https://docs.docker.com/compose/) this could look like the following: - - ``` - version: '2.4' - - services: - optimize: - image: registry.camunda.cloud/optimize-ee/optimize:latest - command: ["./reimport/reimport.sh"] - ``` - -3. Start Optimize again. Optimize will now import all the engine data from scratch. -4. If you made use of event-based processes you will have to manually publish them again. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/authorization.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/authorization.md deleted file mode 100644 index c8bcc9cead0..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/authorization.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: authorization -title: "Authorization" ---- - -Most requests of the Public REST API need to include an authorization token -either as an [`Authorization`](https://tools.ietf.org/html/rfc7235#section-4.2) request header or as a URI Query Parameter named `access_token`. - -Given a valid token `mySecret` the header would need to be set in one of the following ways: - -``` -Authorization: mySecret -``` - -``` -Authorization: Bearer mySecret -``` - -For sending the token as a query parameter the HTTP query would look like the following: - -``` -DELETE /api/public/dashboard/{dashboard-ID}?access_token=mySecret -``` - -The token to be used to access the Optimize API is a configurable shared secret. -Refer to [Public API Configuration](../../setup/configuration/#public-api) -for the particular configuration key to set this token. - -The following is an example configuration with a token value of `mySecret`: - - api: - accessToken: mySecret - \ No newline at end of file diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/delete-dashboard.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/delete-dashboard.md deleted file mode 100644 index 94fa55256d6..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/delete-dashboard.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -id: delete-dashboard -title: "Delete dashboards" -description: "The REST API to delete dashboards from Optimize." ---- - -Camunda Platform 7 only - -## Purpose - -The dashboards deletion API allows you to delete dashboards by ID from Optimize. - -:::note Heads up! -The deletion of a dashboard does not affect the referenced reports. -::: - -## Method & HTTP target resource - -DELETE `/api/public/dashboard/{dashboard-ID}` - -Where `dashboard-ID` is the ID of the dashboard you wish to delete. - -## Request headers - -The following request headers have to be provided with every delete request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|See [Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every delete request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|See [Authorization](../../authorization)| - -* Only required if not set as a request header - -## Request body - -No request body is required. - -## Result - -No response body. - -## Response codes - -Possible HTTP Response status codes: - -|Code|Description| -|--- |--- | -|204|Request successful.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|404|The requested dashboard was not found, please check the provided dashboard-ID.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -### Delete a dashboard - -Let's assume you want to delete a dashboard with the ID `e6c5abb1-6a18-44e7-8480-d562d511ba62`, this is what it would look like: - -DELETE `/api/public/dashboard/e6c5aaa1-6a18-44e7-8480-d562d511ba62?access_token=mySecret` - -##### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/export-dashboard-definitions.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/export-dashboard-definitions.md deleted file mode 100644 index 7ce76ed87a2..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/export-dashboard-definitions.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -id: export-dashboard-definitions -title: "Export dashboard definitions" -description: "The REST API to export dashboard definitions." ---- - -## Purpose - -This API allows users to export dashboard definitions which can later be imported into another Optimize system. Note that exporting a dashboard also exports all reports contained within the dashboard. The dashboards to be exported may be within a Collection or private entities, the API has access to both. - -The obtained list of entity exports can be imported into other Optimize systems either using the dedicated [import API](../../import-entities) or [via UI](../../../../components/userguide/additional-features/export-import.md#importing-entities). - -## Method & HTTP target resource - -POST `/api/public/export/dashboard/definition/json` - -## Request headers - -The following request headers have to be provided with every request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|[Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|See [Authorization](../../authorization)| - -* Only required if not set as a request header - -## Request body - -The request body should contain a JSON array of dashboard IDs to be exported. - -## Result - -The response contains a list of exported dashboard definitions as well as all report definitions contained within the dashboards. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|204|Request successful.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|404|At least one of the given dashboard IDs does not exist.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -### Export two dashboards - -Assuming you want to export the two dashboards with IDs `123` and `456` and have configured the accessToken `mySecret`, this is what it would look like: - -POST `/api/public/export/dashboard/definition/json?access_token=mySecret` - -with request body: - -``` -[ "123", "456" ] -``` - -##### Response - -Status 200. - -##### Response content - -The response contains the two exported dashboard definitions as well as all three process reports contained within the two dashboards. - -``` -[ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - "exportEntityType": "single_process_report", - "name": "Number: Process instance duration", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "625c2411-b95f-4442-936b-1976b9511d4a", - "exportEntityType": "single_process_report", - "name": "Heatmap: Flownode count", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "94a7252e-d5c3-45ea-9906-75271cc0cac2", - "exportEntityType": "single_process_report", - "name": "Data Table: User task count", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "123", - "exportEntityType": "dashboard", - "name": "Dashboard 1", - "sourceIndexVersion": 5, - "reports": [ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - ... - }, - { - "id": "625c2411-b95f-4442-936b-1976b9511d4a", - ... - } - ], - "availableFilters": [...], - "collectionId": null - }, - { - "id": "456", - "exportEntityType": "dashboard", - "name": "Dashboard 2", - "sourceIndexVersion": 5, - "reports": [ - { - "id": "94a7252e-d5c3-45ea-9906-75271cc0cac2", - ... - } - ], - "availableFilters": [...], - "collectionId": null - } -] -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/get-dashboard-ids.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/get-dashboard-ids.md deleted file mode 100644 index 767f91e9b17..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/dashboard/get-dashboard-ids.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -id: get-dashboard-ids -title: "Get dashboard IDs" -description: "The REST API to retrieve all dashboard IDs in a given collection." ---- - -## Purpose - -This API allows users to retrieve all dashboard IDs from a given collection. - -## Method & HTTP target resource - -GET `/api/public/dashboard` - -## Request headers - -The following request headers have to be provided with every request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|[Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|[Authorization](../../authorization)| -|collectionId|REQUIRED|The ID of the collection for which to retrieve the dashboard IDs.| - -* Only required if not set as a request header - -## Request body - -No request body is required. - -## Result - -The response contains a list of IDs of the dashboards existing in the collection with the given collection ID. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|200|Request successful.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -### Retrieve all dashboard IDs from a collection - -Assuming you want to retrieve all dashboard IDs in the collection with the ID `1234` and have configured the accessToken `mySecret`, this is what it would look like: - -GET `/api/public/dashboard?collectionId=1234&access_token=mySecret` - -##### Response - -Status 200. - -##### Response content - -``` -[ - { - "id": "9b0eb845-e8ed-4824-bd85-8cd69038f2f5" - }, - { - "id": "1a866c7c-563e-4f6b-adf1-c4648531f7d4" - } -] -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/event-ingestion.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/event-ingestion.md deleted file mode 100644 index 53e06f8b3a2..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/event-ingestion.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -id: event-ingestion -title: "Event ingestion" -description: "The REST API to ingest external events into Optimize." ---- - -Camunda Platform 7 only - -## Purpose - -The Event Ingestion REST API ingests business process related event data from any third-party system to Camunda Optimize. These events can then be correlated into an [event-based process](../../../components/userguide/additional-features/event-based-processes.md) in Optimize to get business insights into business processes that are not yet fully modeled nor automated using the Camunda Platform. - -## Functionality - -The Event Ingestion REST API has the following functionality: - -1. Ingest new event data in batches, see the example on [ingesting three cloud events](#ingest-cloud-events). -2. Reingest/override previously ingested events, see the example on [reingesting cloud events](#reingest-cloud-events). - -## CloudEvents compliance - -To provide the best interoperability possible, the Optimize Event Ingestion REST API implements the [CloudEvents Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/spec.md) specification, which is hosted by the [Cloud Native Computing Foundation (CNCF)](https://www.cncf.io/). - -In particular, the Optimize Event Ingestion REST API is a CloudEvents consumer implemented as an HTTP Web Hook, as defined by the [CloudEvents HTTP 1.1 Web Hooks for Event Delivery - Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md) specification. Following the [Structured Content Mode](https://github.com/cloudevents/spec/blob/v1.0/http-protocol-binding.md#32-structured-content-mode) of the [HTTP Protocol Binding for CloudEvents - Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/http-protocol-binding.md), event context attributes and event data is encoded in the [JSON Batch Format](https://github.com/cloudevents/spec/blob/v1.0/json-format.md#4-json-batch-format) of the [CloudEvents JSON Event Format Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/json-format.md). - -## Authorization - -As required by the [CloudEvents HTTP 1.1 Web Hooks for Event Delivery - Version 1.0](https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#3-authorization) specification, every [Event Ingestion REST API request](#method-and-http-target-resource) needs to include an authorization token either as an [`Authorization`](https://tools.ietf.org/html/rfc7235#section-4.2) request header, or as a [URI Query Parameter](https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#32-uri-query-parameter) named `access_token`. - -Details on how to configure and pass this token can be found [here](../authorization). - -## Method and HTTP target resource - -POST `/api/ingestion/event/batch` - -## Request headers - -The following request headers have to be provided with every ingest request: - -| Header | Constraints | Value | -| --- | --- | --- | -| Authorization | REQUIRED | See [Authorization](#authorization) | -| Content-Length | REQUIRED | Size in bytes of the entity-body, also see [Content-Length](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Length). | -| Content-Type | REQUIRED | Must be one of: `application/cloudevents-batch+json` or `application/json` | - -## Request body - -[JSON Batch Format](https://github.com/cloudevents/spec/blob/v1.0/json-format.md#4-json-batch-format) compliant JSON Array of CloudEvent JSON Objects: - -| Name | Type | Constraints | Description | -| --- | --- | --- | --- | -| [specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion) | String | REQUIRED | The version of the CloudEvents specification, which the event uses, must be `1.0`. See [CloudEvents - Version 1.0 - specversion](https://github.com/cloudevents/spec/blob/v1.0/spec.md#specversion). | -| [id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id) | String | REQUIRED | Uniquely identifies an event, see [CloudEvents - Version 1.0 - id](https://github.com/cloudevents/spec/blob/v1.0/spec.md#id).| -| [source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1) | String | REQUIRED | Identifies the context in which an event happened, see [CloudEvents - Version 1.0 - source](https://github.com/cloudevents/spec/blob/v1.0/spec.md#source-1). A use-case could be if you have conflicting types across different sources. For example, a `type:OrderProcessed` originating from both `order-service` and `shipping-service`. In this case, the `source` field provides means to clearly separate between the origins of a particular event. Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. | -| [type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | String | REQUIRED | This attribute contains a value describing the type of event related to the originating occurrence, see [CloudEvents - Version 1.0 - type](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type). Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. The value `camunda` cannot be used for this field. | -| [time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type) | [Timestamp](https://github.com/cloudevents/spec/blob/v1.0/spec.md#type-system) | OPTIONAL | Timestamp of when the occurrence happened, see [CloudEvents - Version 1.0 - time](https://github.com/cloudevents/spec/blob/v1.0/spec.md#time). String encoding: [RFC 3339](https://tools.ietf.org/html/rfc3339). If not present, a default value of the time the event was received will be created. | -| [data](https://github.com/cloudevents/spec/blob/v1.0/spec.md#event-data) | Object | OPTIONAL | Event payload data that is part of the event, see [CloudEvents - Version 1.0 - Event Data](https://github.com/cloudevents/spec/blob/v1.0/spec.md#event-data). This CloudEvents Consumer API only accepts data encoded as `application/json`, the optional attribute [CloudEvents - Version 1.0 - datacontenttype](https://github.com/cloudevents/spec/blob/v1.0/spec.md#datacontenttype) is thus not required to be provided by the producer. Furthermore, there are no schema restrictions on the `data` attribute and thus the attribute [CloudEvents - Version 1.0 - dataschema](https://github.com/cloudevents/spec/blob/v1.0/spec.md#datacontenttype) is also not required to be provided. Producer may provide any valid JSON object, but only simple properties of that object will get converted to variables of a process instances of an [event-based process](./../setup/setup-event-based-processes.md) instance later on. | -| group | String | OPTIONAL | This is an OPTIONAL [CloudEvents Extension Context Attribute](https://github.com/cloudevents/spec/blob/v1.0/spec.md#extension-context-attributes) that is specific to this API. A group identifier that may allow to easier identify a group of related events for a user at the stage of mapping events to a process model. An example could be a domain of events that are most likely related to each other; for example, `billing`. When this field is provided, it will be used to allow adding events that belong to a group to the [mapping table](../../../components/userguide/additional-features/event-based-processes.md#external-events). Optimize handles groups case-sensitively. Note: The triplet of `type`, `source`, and `group` will be used as a unique identifier for classes of events. | -| traceid | String | REQUIRED | This is a REQUIRED [CloudEvents Extension Context Attribute](https://github.com/cloudevents/spec/blob/v1.0/spec.md#extension-context-attributes) that is specific to this API. A traceid is a correlation key that relates multiple events to a single business transaction or process instance in BPMN terms. Events with the same traceid will get correlated into one process instance of an Event Based Process. | - -The following is an example of a valid propertie's `data` value. Each of those properties would be available as a variable in any [event-based process](./../setup/setup-event-based-processes.md) where an event containing this as `data` was mapped: - -``` - { - "reviewSuccessful": true, - "amount": 10.5, - "customerId": "lovelyCustomer1" - } -``` - -Nested objects, such as `customer` in this example, would not be available as a variable in event-based processes where an event containing this as `data` value was mapped: - -``` - { - "customer": { - "firstName":"John", - "lasTName":"Doe" - } - } - ``` - -## Result - -This method returns no content. - -## Response codes - -Possible HTTP response status codes: - -| Code | Description | -| --- | --- | -| 204 | Request successful | -| 400 | Returned if some of the properties in the request body are invalid or missing. | -| 401 | Secret incorrect or missing in HTTP Header `X-Optimize-API-Secret`. See [Authorization](#authorization) on how to authenticate. | -| 403 | The Event Based Process feature is not enabled. | -| 429 | The maximum number of requests that can be serviced at any time has been reached. The response will include a `Retry-After` HTTP header specifying the recommended number of seconds before the request should be retried. See [Configuration](../../setup/configuration/#event-ingestion-rest-api-configuration) for information on how to configure this limit. | -| 500 | Some error occurred while processing the ingested event, best check the Optimize log. | - -## Example - -### Ingest cloud events - -#### Request - -POST `/api/ingestion/event/batch` - -Request Body: - - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca342", - "source": "order-service", - "type": "orderValidated", - "time": "2020-01-01T10:00:10.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example" - } - }, - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca343", - "source": "shipping-service", - "type": "packageShipped", - "traceid": "id1", - "group": "shop", - "time": "2020-01-01T10:00:20.000Z" - } - ] - -#### Response - -Status 204. - -### Reingest cloud events - -The API allows you to update any previously ingested cloud event by ingesting an event using the same event `id`. - -The following request would update the first cloud event that got ingested in the [ingest three cloud events sample](#ingest-cloud-events). Note that on an update, the cloud event needs to be provided as a whole; it's not possible to perform partial updates through this API. - -In this example, an additional field `newField` is added to the data block of the cloud event with the id `1edc4160-74e5-4ffc-af59-2d281cf5aca341`. - -#### Request - -POST `/api/ingestion/event/batch` - -Request body: - - [ - { - "specversion": "1.0", - "id": "1edc4160-74e5-4ffc-af59-2d281cf5aca341", - "source": "order-service", - "type": "orderCreated", - "time": "2020-01-01T10:00:00.000Z", - "traceid": "id1", - "group": "shop", - "data": { - "numberField": 1, - "stringField": "example", - "newField": "allNew" - } - } - ] - -#### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/external-variable-ingestion.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/external-variable-ingestion.md deleted file mode 100644 index 6b5dfff97fe..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/external-variable-ingestion.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -id: external-variable-ingestion -title: "External variable ingestion" -description: "The REST API to ingest external variable data into Optimize." ---- - -:::note Heads Up! -The external variable ingestion API is a beta feature and will be subject -to future changes. -::: - -## Purpose - -With the external variable ingestion API, variable data held in external systems can be ingested into Optimize directly, -without the need for these variables to be present in your Camunda platform data. This can be useful when external -business data, which is relevant for process analysis in Optimize, is to be associated with specific process instances. - -Especially if this data changes over time, it is advisable to use this REST API to persist external variable updates to Optimize, as otherwise Optimize may not be aware of data changes in the external system. - -## Functionality - -The external variable ingestion API allows users to ingest batches of variable data which Optimize stores in a dedicated -index. All variable data includes a reference to the process instance each variable belongs to, this reference then -enables Optimize to import external variable data from the dedicated index to their respective process instances at -regular intervals. Once Optimize has updated the process instance data, the external variables are available for report -evaluations in Optimize. - -## Limitations - -Note that external variables should be treated as separate from engine variables. If you ingest variables that are already present in the engine, engine imports may override the ingested data and vice versa, leading to unreliable report results. - -Similarly, if the same ingested batch contains variables with duplicate IDs, you may experience unexpected report results because Optimize will assume only one of the updates per ID and batch to be the most up to date one. - -Additionally, ensure the reference information (process instance ID and process definition key) is accurate, as otherwise Optimize will not be able to correctly associate variables with instance data and may create new instance indices, resulting in data which will not be usable in reports. External variables can only be ingested for process instances and will not be affected by any configured variable plugin. - -## Configuration - -Refer to -the [configuration section](../../setup/configuration#external-variable-ingestion-rest-api-configuration) to learn more -about how to set up external variable ingestion. - -## Method & HTTP target resource - -POST `/api/ingestion/variable` - -## Request headers - -The following request headers have to be provided with every variable ingestion request: - -| Header | Constraints | Value | -| --- | --- | --- | -| Authorization | REQUIRED* | See [Authorization](../authorization). | -| Content-Type | REQUIRED | `application/json` | - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every delete request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|See [Authorization](../authorization)| - -* Only required if not set as a request header - -## Request body - -The request body contains an array of variable JSON Objects: - -| Name | Type | Constraints | Description| -| - | - | - | - | -| id | String | REQUIRED | The unique identifier of this variable. | -| name | String | REQUIRED | The name of the variable. | -| type | String | REQUIRED | The type of the variable. Must be one of: String, Short, Long, Double, Integer, Boolean, or Date. | -| value | String | REQUIRED | The current value of the variable. | -| processInstanceId | String | REQUIRED | The ID of the process instance this variable is to be associated with. | -| processDefinitionKey | String | REQUIRED | The definition key of the process instance this variable is to be associated with. | - -## Result - -This method returns no content. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|204|Request successful.| -|400|Returned if some properties in the request body are invalid or missing.| -|401|Secret incorrect or missing. See [Authorization](../authorization) on how to authorize.| - -## Example - -#### Request - -POST `/api/ingestion/variable` - -Request Body: - - [ - { - "id": "7689fced-2639-4408-9de1-cf8f72769f43", - "name": "address", - "type": "string", - "value": "Main Street 1", - "processInstanceId": "c6393461-02bb-4f62-a4b7-f2f8d9bbbac1", - "processDefinitionKey": "shippingProcess" - }, - { - "id": "993f4e73-7f6a-46a6-bd45-f4f8e3470ba1", - "name": "amount", - "type": "integer", - "value": "500", - "processInstanceId": "8282ed49-2243-44df-be5e-1bf893755d8f", - "processDefinitionKey": "orderProcess" - } - ] - -#### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/health-readiness.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/health-readiness.md deleted file mode 100644 index 3d6e581d653..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/health-readiness.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: health-readiness -title: "Health readiness" -description: "The REST API to check the readiness of Optimize." ---- - -Camunda Platform 7 only - -## Purpose - -The purpose of Health-Readiness REST API is to return information indicating whether Optimize is ready to be used. - -## Method & HTTP target resource - -GET `/api/readyz` - -## Response - -The response is an empty body with the status code indicating the readiness of Optimize. The following responses are available: - -- `200`: This indicates that Optimize is ready to use. It is connected to both Elasticsearch and at least one of its configured engines. -- `503`: This indicates that Optimize is not ready to use. It cannot connect to either Elasticsearch or any of its configured engines. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/import-entities.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/import-entities.md deleted file mode 100644 index 6c519924e7d..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/import-entities.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -id: import-entities -title: "Import entities" -description: "The REST API to import entity definitions." ---- - -## Purpose - -This API allows users to import entity definitions such as reports and dashboards into existing collections. These entity definitions may be obtained either using the [report](../report/export-report-definitions/) or [dashboard](../dashboard/export-dashboard-definitions) export API or [via the UI](../../../components/userguide/additional-features/export-import.md#exporting-entities). - -### Prerequisites - -For importing via API, the following prerequisites must be met: - -- All definitions the entities require exist in the target Optimize. -- The target collection, identified using the `collectionId` query parameter, must exist in the target system. -- The collection data sources must include all relevant definitions for the entities. -- The entity data structures match. To ensure matching data structures, confirm that the Optimize version of the source is the same as the version of the target Optimize. - -If any of the above conditions are not met, the import will fail with an error response; refer to the error message in the response for more information. - -## Method & HTTP target resource - -POST `/api/public/import` - -## Request headers - -The following request headers have to be provided with every request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|[Authorization](../authorization)| - -- Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|[Authorization](../authorization)| -|collectionId|REQUIRED|The ID of the collection for which to retrieve the report IDs.| - -- Only required if not set as a request header - -## Request body - -The request body should contain a JSON array of entity definitions to be imported. These entity definitions may be obtained by using the [report](../report/export-report-definitions) or [dashboard](../dashboard/export-dashboard-definitions) export APIs or by [manually exporting entities](../../../components/userguide/additional-features/export-import.md#exporting-entities) via the Optimize UI. - -## Result - -The response contains a list of IDs of the newly created entities in the target system. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|200|Request successful.| -|400|The provided list of entities is invalid. This can occur if any of the above listed [prerequisites](#prerequisites) are not met. Check the `detailedMessage` of the error response for more information.| -|401|Secret incorrect or missing in HTTP header. See [Authorization](../authorization) on how to authenticate.| -|404|The given target collection ID does not exist.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -### Import two entities - -Assuming you want to import a report and a dashboard into the collection with ID `123`, this is what it would look like: - -POST `/api/public/import?collectionId=123&access_token=mySecret` - -With the following request body: - -``` -[ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - "exportEntityType": "single_process_report", - "name": "Number: Process instance duration", - "sourceIndexVersion": 8, - "collectionId": null, - "data": {...} - }, - { - "id": "b0eb845-e8ed-4824-bd85-8cd69038f2f5", - "exportEntityType": "dashboard", - "name": "Dashboard 1", - "sourceIndexVersion": 5, - "reports": [ - { - "id": "61ae2232-51e1-4c35-b72c-c7152ba264f9", - ... - } - ], - "availableFilters": [...], - "collectionId": null - } -] -``` - -##### Response - -Status 200. - -##### Response Content - -``` -[ - { - "id": "e8ca18b9-e637-45c8-87da-0a2b08b34d6e" - }, - { - "id": "290b3425-ba33-4fbb-b20b-a4f236036847" - } -] -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/delete-report.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/delete-report.md deleted file mode 100644 index afd3c620d71..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/delete-report.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -id: delete-report -title: "Delete reports" -description: "The REST API to delete reports from Optimize." ---- - -Camunda Platform 7 only - -## Purpose - -The report deletion API allows you to delete reports by ID from Optimize. - -:::note Heads up! -During deletion a report will get removed from any dashboard or combined report it is referenced by. In case a report is referenced by an alert, the corresponding alert will get deleted too. -::: - -## Method & HTTP target resource - -DELETE `/api/public/report/{report-ID}` - -Where `report-ID` is the ID of the report you wish to delete. - -## Request headers - -The following request headers have to be provided with every delete request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|See [Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every delete request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|See [Authorization](../../authorization)| - -* Only required if not set as a request header - -## Request body - -No request body is required. - -## Result - -No response body. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|204|Request successful.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|404|The requested report was not found, please check the provided report-ID.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -### Delete a report - -Let's assume you want to delete a report with the ID `e6c5abb1-6a18-44e7-8480-d562d511ba62`, this is what it would look like: - -DELETE `/api/public/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62?access_token=mySecret` - -##### Response - -Status 204. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/export-report-definitions.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/export-report-definitions.md deleted file mode 100644 index b5e5caf4c13..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/export-report-definitions.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: export-report-definitions -title: "Export report definitions" -description: "The REST API to export report definitions." ---- - -## Purpose - -This API allows users to export report definitions which can later be imported into another Optimize system. The reports to be exported may be within a collection or private entities, the API has access to both. - -The obtained list of entity exports can be imported into other Optimize systems either using the dedicated [import API](../../import-entities) or [via UI](../../../../components/userguide/additional-features/export-import.md#importing-entities). - -## Method & HTTP target resource - -POST `/api/public/export/report/definition/json` - -## Request headers - -The following request headers have to be provided with every request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|[Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|See [Authorization](../../authorization)| - -* Only required if not set as a request header - -## Request body - -The request body should contain a JSON array of report IDs to be exported. - -## Result - -The response contains a list of exported report definitions. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|204|Request successful.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|404|At least one of the given report IDs does not exist.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -### Export two reports - -Assuming you want to export the two reports with IDs `123` and `456` and have configured the accessToken `mySecret`, this is what it would look like: - -POST `/api/public/export/report/definition/json?access_token=mySecret` - -with request body: - -``` -[ "123", "456" ] -``` - -##### Response - -Status 200. - -##### Response content - -``` -[ - { - "id": "123", - "exportEntityType": "single_process_report", - "name": "Number: Process instance duration", - "sourceIndexVersion": 8, - "collectionId": "40cb3657-bdcb-459d-93ce-06877ac7244a", - "data": {...} - }, - { - "id": "456", - "exportEntityType": "single_process_report", - "name": "Heatmap: Flownode count", - "sourceIndexVersion": 8, - "collectionId": "40cb3657-bdcb-459d-93ce-06877ac7244a", - "data": {...} - } -] -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/get-data-export.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/get-data-export.md deleted file mode 100644 index 8aa98f46c74..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/get-data-export.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -id: get-data-export -title: "Export report result data" -description: "The REST API to export report result data from Optimize." ---- - -Camunda Platform 7 only - -## Purpose - -The data export API allows users to export large amounts of data in a machine-readable format (JSON) from Optimize. - -## Functionality - -Users can export all report types (except combined reports) from `Optimize` using the Data Export API. Moreover, raw data reports can be exported in a paginated fashion, so that large amounts of data can be consumed in chunks by the client. - -### Pagination - -The simplest way to paginate through the results is to perform a search request with all the `REQUIRED` header/query parameters as described in the sections below (but without `searchRequestId`), then pass the `searchRequestId` returned in each response to the next request, until no more documents are returned. Note that it's often the case, but not guaranteed, that the `searchRequestId` remains stable through the entire pagination, so always use the `searchRequestId` from the most current response to make your next request. - -## Method & HTTP target resource - -GET `/api/public/export/report/{report-ID}/result/json` - -Where `report-ID` is the ID of the report you wish to export. - -## Request headers - -The following request headers have to be provided with every data export request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|[Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every data export request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|[Authorization](../../authorization)| -|limit|REQUIRED|Maximum number of records per page. Please note that the limit will only be considered when performing the request for the first page of a raw data report. The following requests for a given searchRequestId will have the same page size as the first request.| -|paginationTimeout|REQUIRED|The amount of time (in seconds) for which a search context will be held in memory, so that the remaining pages of the result can be retrieved. For more information on how to paginate through the results, please refer to the section [Pagination](#pagination).| -|searchRequestId|Optional|The ID of a previous search for which you wish to retrieve the next page of results. For more information on how to get and use a searchRequestId please refer to the section [Pagination](#pagination).| - -* Only required if not set as a request header - -## Request body - -No request body is required. - -## Result - -|Content|Value| -|--- |--- | -|searchRequestId|The ID of the performed search. The following pages from this search can be retrieved by using this ID. For more information please refer to the section [Pagination](#pagination).| -|numberOfRecordsInResponse|Number of records in the JSON Response. This is a number between [0, limit]| -|totalNumberOfRecords|The total number of records (from all pages) for this report export| -|reportId|The ID of the exported report| -|message|In case there is additional information relevant to this request, this field will contain a message describing it. The response will only contain this field if there is a message to be shown| -|data [Array]|An array containing numberOfRecordsInResponse report data records in JSON Format| - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|200|Request successful.| -|400|Returned if some of the properties from the request are invalid or missing.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|404|The requested report was not found, please check the provided report-ID.| -|500|Some error occurred while processing the export request, best check the Optimize log.| - -## Example - -### Export a raw data report - -Let's assume you want to export a report with the ID `e6c5abb1-6a18-44e7-8480-d562d511ba62`, with a maximum of two records per page, an access token `mySecret` and a pagination timeout of 60s, this is what it would look like - -#### Initial API call - -GET `/api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json? -paginationTimeout=60&access_token=mySecret&limit=2` - -##### Response content - - { - "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", - "numberOfRecordsInResponse": 2, - "totalNumberOfRecords": 11, - "reportId": "e6c5abb1-6a18-44e7-8480-d562d511ba62", - "data": [ - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1801", - "processInstanceId": "1809", - "businessKey": "aBusinessKey", - "startDate": "2021-12-02T17:21:49.330+0200", - "endDate": "2021-12-02T17:21:49.330+0200", - "duration": 0, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - }, - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1801", - "processInstanceId": "1804", - "businessKey": "aBusinessKey", - "startDate": "2021-12-02T17:21:49.297+0200", - "endDate": "2021-12-02T17:21:49.298+0200", - "duration": 1, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - } - ] - } - -##### Response - -Status 200. - -#### Subsequent API calls - -Note here the use of the query parameter `searchRequestId` to retrieve further pages from the initial search. - -`GET /api/public/export/report/e6c5aaa1-6a18-44e7-8480-d562d511ba62/result/json?paginationTimeout=60&access_token=mySecret&searchRequestId=FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ&limit=2` - -##### Response content - - { - "searchRequestId": "FGluY2x1ZGVfY29udGV4dF91dWlkDXF1ZXJ", - "numberOfRecordsInResponse": 2, - "totalNumberOfRecords": 11, - "reportId": "e6c5abb1-6a18-44e7-8480-d562d511ba62", - "data": [ - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1bc9474d-5762-11ec-8b2c-0242ac120003", - "processInstanceId": "1bdafab8-5762-11ec-8b2c-0242ac120003", - "businessKey": "aBusinessKey", - "startDate": "2021-12-07T15:32:22.739+0200", - "endDate": "2021-12-07T15:32:22.740+0200", - "duration": 1, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - }, - { - "processDefinitionKey": "aProcess", - "processDefinitionId": "aProcess:1:1bc9474d-5762-11ec-8b2c-0242ac120003", - "processInstanceId": "1bda3763-5762-11ec-8b2c-0242ac120003", - "businessKey": "aBusinessKey", - "startDate": "2021-12-07T15:32:22.735+0200", - "endDate": "2021-12-07T15:32:22.735+0200", - "duration": 0, - "engineName": "camunda-bpm", - "tenantId": null, - "variables": {} - } - ] - } - -##### Response - -Status 200. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/get-report-ids.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/get-report-ids.md deleted file mode 100644 index a3e7e9fd7cc..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/rest-api/report/get-report-ids.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -id: get-report-ids -title: "Get report IDs" -description: "The REST API to retrieve all report IDs in a given collection." ---- - -## Purpose - -This API allows users to retrieve all report IDs from a given collection. - -## Method & HTTP target resource - -GET `/api/public/report` - -## Request headers - -The following request headers have to be provided with every request: - -|Header|Constraints|Value| -|--- |--- |--- | -|Authorization|REQUIRED*|[Authorization](../../authorization)| - -* Only required if not set as a query parameter - -## Query parameters - -The following query parameters have to be provided with every request: - -|Parameter|Constraints|Value| -|--- |--- |--- | -|access_token|REQUIRED*|[Authorization](../../authorization)| -|collectionId|REQUIRED|The ID of the Collection for which to retrieve the report IDs.| - -* Only required if not set as a request header - -## Request body - -No request body is required. - -## Result - -The response contains a list of IDs of the reports existing in the collection with the given collection ID. - -## Response codes - -Possible HTTP response status codes: - -|Code|Description| -|--- |--- | -|200|Request successful.| -|401|Secret incorrect or missing in HTTP Header. See [Authorization](../../authorization) on how to authenticate.| -|500|Some error occurred while processing the request, best check the Optimize log.| - -## Example - -#### Retrieve all report IDs from a collection - -Assuming you want to retrieve all report IDs in the collection with the ID `1234` and have configured the accessToken `mySecret`, this is what it would look like: - -GET `/api/public/report?collectionId=1234&access_token=mySecret` - -###### Response - -Status 200. - -###### Response content - -``` -[ - { - "id": "9b0eb845-e8ed-4824-bd85-8cd69038f2f5" - }, - { - "id": "1a866c7c-563e-4f6b-adf1-c4648531f7d4" - } -] -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup.md deleted file mode 100644 index 62caad66255..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: setup -title: "Self-Managed setup" -description: "Install and configure Optimize Self-Managed." ---- - -## Install using Docker - -The `camunda/optimize:8-latest` Docker image can be used to run Optimize in Self-Managed as a container. Certain environment -variables need to be set for this to work correctly. See below for an example of how this could be done as -part of a `docker-compose` file: - -``` -optimize: - container_name: optimize - image: camunda/optimize:8-latest - ports: - - 8090:8090 - environment: - - SPRING_PROFILES_ACTIVE=ccsm - - CAMUNDA_OPTIMIZE_IAM_ISSUER_URL=http://localhost:9090 - - CAMUNDA_OPTIMIZE_IAM_CLIENTID=optimize - - CAMUNDA_OPTIMIZE_IAM_CLIENTSECRET=secret - - OPTIMIZE_ELASTICSEARCH_HOST=localhost - - OPTIMIZE_ELASTICSEARCH_HTTP_PORT=9200 - - CAMUNDA_OPTIMIZE_SECURITY_AUTH_COOKIE_SAME_SITE_ENABLED=false - - CAMUNDA_OPTIMIZE_ENTERPRISE=false - - CAMUNDA_OPTIMIZE_ZEEBE_ENABLED=true - - CAMUNDA_OPTIMIZE_ZEEBE_NAME=zeebe-record - - CAMUNDA_OPTIMIZE_ZEEBE_PARTITION_COUNT=1 - - CAMUNDA_OPTIMIZE_SHARING_ENABLED=false - - CAMUNDA_OPTIMIZE_UI_LOGOUT_HIDDEN=true -``` - -Some configuration properties are optional and have default values. See a description of these properties and their default values in the table below: - -Name | Description | Default value ------|---------------------------------------------------------------------------------------------------------|-------------- -SPRING_PROFILES_ACTIVE | Determines the mode Optimize is to be run in. For Self-Managed, set to `ccsm`. | -CAMUNDA_OPTIMIZE_IAM_ISSUER_URL| The URL at which IAM can be accessed by Optimize. | -CAMUNDA_OPTIMIZE_IAM_CLIENTID | The Client ID used to register Optimize with IAM. | -CAMUNDA_OPTIMIZE_IAM_CLIENTSECRET | The secret used when registering Optimize with IAM. | -OPTIMIZE_ELASTICSEARCH_HOST | The address/hostname under which the Elasticsearch node is available. | localhost -OPTIMIZE_ELASTICSEARCH_HTTP_PORT | The port number used by Elasticsearch to accept HTTP connections. | 9200 -CAMUNDA_OPTIMIZE_SECURITY_AUTH_COOKIE_SAME_SITE_ENABLED| Determines if `same-site` is enabled for Optimize cookies. This must be set to `false`. | true -CAMUNDA_OPTIMIZE_ELASTICSEARCH_SECURITY_USERNAME | The username for authentication in environments where a secured Elasticsearch connection is configured. | null -CAMUNDA_OPTIMIZE_ELASTICSEARCH_SECURITY_PASSWORD | The password for authentication in environments where a secured Elasticsearch connection is configured. | null -CAMUNDA_OPTIMIZE_ENTERPRISE | This should only be set to `true` if an Enterprise License has been acquired. | true -CAMUNDA_OPTIMIZE_ZEEBE_ENABLED | Enables import of Zeebe data in Optimize. | false -CAMUNDA_OPTIMIZE_ZEEBE_NAME | The record prefix for exported Zeebe records. | zeebe-record -CAMUNDA_OPTIMIZE_ZEEBE_PARTITION_COUNT | The number of partitions configured in Zeebe. | 1 -CAMUNDA_OPTIMIZE_SHARING_ENABLED | Disables the sharing feature (this is not currently supported). | false -CAMUNDA_OPTIMIZE_UI_LOGOUT_HIDDEN | Disables the logout button (logout is handled by IAM). | 1 - -## Requirements - -Self-Managed Optimize must be able to connect to Elasticsearch to write and read data. In addition, Optimize needs to connect to IAM for authentication purposes. Both of these requirements can be configured with the options described above. - -Optimize must also be configured as a client in IAM, and users will only be granted access to Optimize if they have a role -that has `write:*` permission for Optimize. - -For Optimize to import Zeebe data, Optimize must also be configured to be aware of the record prefix used when the records are exported to Elasticsearch. This can also be configured per the example above. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/authorization-management.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/authorization-management.md deleted file mode 100644 index 67d2cc6da7f..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/authorization-management.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -id: authorization-management -title: "Authorization management" -description: "Define which data users are authorized to see." ---- - -Camunda Platform 7 only - -User authorization management differs depending on whether the entities to manage the authorizations for are originating from adjacent systems like imported data from connected Camunda-BPM engines such as process instances, or whether the entities are fully managed by Camunda Optimize, such as [event-based processes and instances](../../../components/userguide/additional-features/event-based-processes.md) or [collections](../../../components/userguide/collections-dashboards-reports.md). For entities originating from adjacent systems authorizations are managed in the Camunda Platform via Camunda Admin, for the latter the authorizations are managed in Camunda Optimize. - -## Camunda Platform data authorizations - -The authorization to process or decision data, as well as tenants and user data imported from any connected Camunda REST-API, is not managed in Optimize itself but needs to be configured in the Camunda Platform and can be achieved on different levels with different options. - -If you do not know how authorization in Camunda works, visit the [authorization service documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/). This has the advantage that you don't need to define the authorizations several times. - -### Process or decision definition related authorizations - -You can specify which user has access to certain process or decision definitions, including data related to that definition. By that we mean the user can only see, create, edit, and delete reports to definitions they are authorized to. - -When defining an authorization to grant or deny access to certain definitions, the most important aspect is that you grant access on the resource type "process definition" and "decision definition". You can then relate to a specific definition by providing the definition key as resource ID or use "\*" as resource ID if you want to grant the access to all definitions. To grant access to a definition, you need to set either `ALL` or `READ_HISTORY` as permission. Both permission settings are treated equally in Optimize, so there is no difference between them. - -As an example, have a look how adding authorizations for process definitions could be done in Camunda Admin: - -![Grant Optimize Access in Admin](img/Admin-GrantDefinitionAuthorizations.png) - -1. The first option grants global read access for the process definition `invoice`. With this setting all users are allowed to see, update, create, and delete reports related to the process definition `invoice` in Optimize. -2. The second option defines an authorization for a single user. The user `Kermit` can now see, update, create, and delete reports related to the process definition `invoice` in Optimize. -3. The third option provides access on group level. All users belonging to the group `optimize-users` can see, update, create, and delete reports related to the process definition `invoice` in Optimize. - -It is also possible to revoke the definition authorization for specific users or groups. For instance, you can define access for all process definitions on a global scale, but exclude the `engineers` group from access reports related to the `invoice` process: - -![Revoke Optimize Access for group 'engineers' in Admin](img/Admin-RevokeDefinitionAuthorization.png) - -Decision definitions are managed in the same manner in the `Authorizations -> Decision Definition` section of the Authorizations Management of the Camunda Platform. - -### User and Group related Authorizations - -To allow logged-in users to see other users and groups in Optimize (for example, to add them to a collection), they have to be granted **read** permissions for the resource type **User** as well as the resource type **Group**. Access can be granted or denied either for all users/groups or for specific user/group IDs only. This can be done in Camunda Admin as illustrated in the definitions authorization example above. - -## Optimize entity authorization - -There are entities that only exist in Camunda Optimize and authorizations to these are not managed via Camunda Admin but within Optimize. - -### Collections - -[Collections](../../../components/userguide/collections-dashboards-reports.md) are the only way to share Camunda Optimize reports and dashboards with other users. Access to them is directly managed via the UI of collections; see the corresponding user guide section on [Collection - User Permissions](../../../components/userguide/collections-dashboards-reports.md#user-permissions). - -### Event based processes - -Although [event-based processes](../../../components/userguide/additional-features/event-based-processes.md) may include data originating from adjacent systems like the Camunda Engine when using [Camunda Activity Event Sources](../../../components/userguide/additional-features/event-based-processes.md#event-sources), they do not enforce any authorizations from Camunda Admin. The reason for that is that multiple sources can get combined in a single [event-based process](../../../components/userguide/additional-features/event-based-processes.md) that may contain conflicting authorizations. It is thus required to authorize users or groups to [event-based processes](../../../components/userguide/additional-features/event-based-processes.md) either directly when [publishing](../../../components/userguide/additional-features/event-based-processes.md#publishing-an-event-based-process) them or later on via the [event-based process - Edit Access](../../../components/userguide/additional-features/event-based-processes.md#event-based-process-list---edit-access) option. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/clustering.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/clustering.md deleted file mode 100644 index 8b84557f60c..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/clustering.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -id: clustering -title: "Clustering" -description: "Read about how to run Optimize in a cluster." ---- - -Camunda Platform 7 only - -This document describes the set up of a Camunda Optimize cluster which is mainly useful in a failover scenario, but also provides means of load-balancing in terms of distributing import and user load. - -## Configuration - -There are two configuration requirements to address in order to operate Camunda Optimize successfully in a cluster scenario. -Both of these aspects are explained in detail in the following subsections. - -### 1. Import - define importing instance - -It is important to configure the cluster in the sense that only one instance at a time is actively importing from a particular Camunda Platform engine. - -:::note Warning -If more than one instance is importing data from one and the same Camunda Platform engine concurrently, inconsistencies can occur. -::: - -The configuration property [`engines.${engineAlias}.importEnabled`](../configuration/#connection-to-camunda-platform) allows to disable the import from a particular configured engine. - -Given a simple failover cluster consisting of two instances connected to one engine, the engine configurations in the `environment-config.yaml` would look like the following: - -Instance 1 (import from engine `default` enabled): - -``` -... -engines: - 'camunda-bpm': - name: default - rest: 'http://localhost:8080/engine-rest' - importEnabled: true - -historyCleanup: - processDataCleanup: - enabled: true - decisionDataCleanup: - enabled: true -... -``` - -Instance 2 (import from engine `camunda-bpm` disabled): - -``` -... -engines: - 'camunda-bpm': - name: default - rest: 'http://localhost:8080/engine-rest' - importEnabled: false -... -``` - -:::note -The importing instance has the [history cleanup enabled](./configuration.md). It is strongly recommended all non-importing Optimize instances in the cluster do not enable history cleanup to prevent any conflicts when the [history cleanup](./history-cleanup.md) is performed. -::: - -### 1.1 Import - event based process import - -In the context of event-based process import and clustering, there are two additional configuration properties to consider carefully. - -One is specific to each configured Camunda engine [`engines.${engineAlias}.eventImportEnabled`](../configuration/#connection-to-camunda-platform) and controls whether data from this engine is imported as event source data as well for [event-based processes](../../../components/userguide/additional-features/event-based-processes.md). You need to enable this on the same cluster node for which the [`engines.${engineAlias}.importEnabled`](../configuration/#connection-to-camunda-platform) configuration flag is set to `true`. - -[`eventBasedProcess.eventImport.enabled`](../configuration/#event-based-process-configuration) controls whether the particular cluster node processes events to create event based process instances. This allows you to run a dedicated node that performs this operation, while other nodes might just feed in Camunda activity events. - -### 2. Distributed user sessions - configure shared secret token - -If more than one Camunda Optimize instance are accessible by users for e.g. a failover scenario a shared secret token needs to be configured for all the instances. -This enables distributed sessions among all instances and users do not lose their session when being routed to another instance. - -The relevant configuration property is [`auth.token.secret`](../configuration/#security) which needs to be configured in the `environment-configuration.yaml` of each Camunda Optimize instance that is part of the cluster. - -It is recommended to use a secret token with a length of at least 64 characters generated using a sufficiently good random number generator, for example the one provided by `/dev/urandom` on Linux systems. - -The following example command would generate a 64-character random string: - -``` -< /dev/urandom tr -dc A-Za-z0-9 | head -c64; echo -``` - -The corresponding `environment-config.yaml` entry would look the **same for all instances of the cluster**: - -``` -auth: - token: - secret: '' -``` - -## Example setup - -The tiniest cluster setup consisting of one importing instance from a given `default` engine and another instance where the import is disabled would look like the following: - -![Two Optimize instances](./img/Optimize-Clustering.png) - -The HTTP/S Load-Balancer would route user requests to either of the two instances, while Optimize #1 would also care about importing data from the engine to the shared -Elasticsearch instance/cluster and Optimize #2 only accesses the engine in order to authenticate and authorize users. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/common-problems.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/common-problems.md deleted file mode 100644 index e0dc0891435..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/common-problems.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: common-problems -title: "Common problems" -description: "Information to help troubleshoot common problems." ---- - -This section aims to provide initial help to troubleshoot common issues. This guide is not intended to be a complete list of possible problems, nor does it provide detailed step-by-step solutions; its intention is merely to point you in the right direction when investigating what may be causing the issue you are experiencing. - -## Optimize is missing some or all definitions - -It is possible that the user you are logged in as does not have the relevant authorizations to view all definitions in Optimize. Refer to the [authorization management section](./authorization-management.md#process-or-decision-definition-related-authorizations) to confirm the user has all required authorizations. - -Another common cause for this type of problem are issues with Optimize's data import, for example due to underlying problems with the engine data. In this case, the Optimize logs should contain more information on what is causing Optimize to not import the definition data correctly. If you are unsure on how to interpret what you find in the logs, create a support ticket. - -## Report assignee, candidate group, variable or suspension state data is inaccurate or missing - -Optimize relies on specific engine logs to retrieve data about assignees, candidate groups, variables, and instance suspension state. If the engine history settings are not set correctly, these logs may be missing from the engine data Optimize imports. Refer to the [history level documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#choose-a-history-level) to ensure it is set correctly. - -Additionally, similar to the issue regarding missing definition data, it is possible that the Optimize import has encountered an issue. In this case, refer to your Optimize logs for more information. - -## Error message indicating that an index is set to read only - -This often occurs when Elasticsearch is running out of disk space. If this is the case, adjusting your Elasticsearch setup accordingly should resolve the issue. Note that you may need to manually unlock your indices afterwards, refer to [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html) on how to do this. - -## Exception indicating an error while checking the engine version - -The most common cause for this issue is that the engine endpoint Optimize uses is not configured correctly. Check your [configuration](../configuration/#connection-to-camunda-platform) and ensure the engine REST URL is set correctly. - -## Server language results in UI/server errors - -When Optimize is running with its language set to one with characters that it can't recognize, such as Turkish, you may observe logged issues and unusable elements in the UI. We recommend running Optimize on a server with its language set to English. - -## Update issues - -Always check the [migration and update instructions](./../migration-update/instructions.md) for the versions you are migrating, often this section already documents the problem you are experiencing along with the solution. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/configuration.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/configuration.md deleted file mode 100644 index 841e338b05e..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/configuration.md +++ /dev/null @@ -1,416 +0,0 @@ ---- -id: configuration -title: "Configuration" -description: "An overview of all possible configuration options in Optimize." ---- - -Camunda Platform 7 only - -## Logging - -Camunda Optimize provides logging facilities that are preconfigured to use -_INFO_ logging level which provides minimal output of information in log files. -This level can be adjusted using the `environment-logback.xml` configuration file. - -Even though one could potentially configure logging levels for all packages, it -is recommended to set logging levels for the following three Optimize parts only using exact package -reference as follows: - -* Optimize runtime environment: - -```xml - -``` - -* Optimize update: - -```xml - - - -``` - -* Communication to Elasticsearch: - -```xml - -``` - -If you are running Optimize with Docker, you can use the following environment variables to configure its logging levels. - -- `OPTIMIZE_LOG_LEVEL` sets the logging level for the Optimize log -- `UPGRADE_LOG_LEVEL` sets the logging level for the Optimize update log -- `ES_LOG_LEVEL` sets the logging level for Elasticsearch - -Whether using the configuration file or Docker environment variables, to define the granularity of the information shown in the log you can set one of the following log levels: - -- **error**: shows errors only. -- **warn**: like **error**, but displays warnings as well. -- **info**: logs everything from **warn** and the most important information about state changes or actions in Optimize. -- **debug**: in addition to **info**, writes information about the scheduling process, alerting as well as the import of the engine data. -- **trace**: like **debug**, but in addition, writes all requests sent to the Camunda engine as well as all queries towards Elasticsearch to the log output. - -## System configuration - -All distributions of Camunda Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `config` which contains a file called `environment-config.yaml` with values that override the default Optimize properties. - -You can see a sample configuration file with all possible configuration fields -and their default values [here](service-config.yaml). - -In the following section, you will find descriptions and default values of the configuration fields with their respective YAML path. - -:::note Heads Up -For changes in the configuration to take effect, you need to restart Optimize! -::: - -### Java system properties & OS environment variable placeholders - -To externalize configuration properties from the `environment-config.yaml`, Optimize provides variable placeholder support. - -The order in which placeholders are resolved is the following: - -1. Java system properties -2. OS environment variables - -The placeholder format is `${VARIABLE_NAME}` and allows you to refer to a value of a Java system property or OS environment variable of your choice. -The `VARIABLE_NAME` is required to contain only lowercase or uppercase letters, digits and underscore `_` characters and shall not begin with a digit. The corresponding regular expression is `([a-zA-Z_]+[a-zA-Z0-9_]*)`. - -The following example illustrates the usage: - -``` -security: - auth: - token: - secret: ${AUTH_TOKEN_SECRET} -``` - -Given this variable is set before Optimize is started, for example on Unix systems with: - -``` -export AUTH_TOKEN_SECRET=sampleTokenValue -``` - -The value will be resolved at startup to `sampleTokenValue`. - -However, if the same variable is provided at the same time as a Java system property, for example via passing `-DAUTH_TOKEN_SECRET=othertokenValue` to the Optimize startup script: - -``` -./optimize-startup.sh -DAUTH_TOKEN_SECRET=othertokenValue -``` - -The value would be resolved to `othertokenValue` as Java system properties have precedence over OS environment variables. - -:::note -For Windows users, to pass Java system properties to the provided Windows Batch script `optimize-startup.bat`, you have to put them into double quotes when using the `cmd.exe` shell, as shown below. -::: - -``` -optimize-startup.bat "-DAUTH_TOKEN_SECRET=othertokenValue" -``` - -For the Windows Powershell in three double quotes: - -``` -./optimize-startup.bat """-DAUTH_TOKEN_SECRET=othertokenValue""" -``` - -#### Default values - -For variable placeholders it's also possible to provide default values using the following format: `${VARIABLE_NAME:DEFAULT_VALUE}`. The `DEFAULT_VALUE` can contain any character except `}`. - -The following example illustrates the usage: - -``` -security: - auth: - token: - secret: ${AUTH_TOKEN_SECRET:defaultSecret} -``` - -### Security - -These values control mechanisms of Optimize related security, e.g. security headers and authentication. - -| YAML Path | Default Value | Description | -| - | - | - | -| | -| security.auth.token.lifeMin | 60 | Optimize uses token-based authentication to keep track of which users are logged in. Define the lifetime of the token in minutes. | -| security.auth.token.secret | null | Optional secret used to sign authentication tokens, it's recommended to use at least a 64-character secret. If set to `null` a random secret will be generated with each startup of Optimize. | -| security.auth.superUserIds | [ ] | List of user IDs that are granted full permission to all collections, reports, and dashboards.

    Note: For reports, these users are still required to be granted access to the corresponding process/decision definitions in Camunda Platform Admin. See [Authorization Management](./authorization-management.md). | -| security.auth.superGroupIds | [ ] | List of group IDs that are granted full permission to all collections, reports, and dashboards. All members of the groups specified will have superuser permissions in Optimize.

    Note: For reports, these groups are still required to be granted access to the corresponding process/decision definitions in Camunda Platform Admin. See [Authorization Management](./authorization-management.md). | -| security.responseHeaders.HSTS.max-age | 31536000 | HTTP Strict Transport Security (HSTS) is a web security policy mechanism which helps to protect websites against protocol downgrade attacks and cookie hijacking. This field defines the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. If you set the number to a negative value no HSTS header is sent. | -| security.responseHeaders.HSTS.includeSubDomains | true | HTTP Strict Transport Security (HSTS) is a web security policy mechanism which helps to protect websites against protocol downgrade attacks and cookie hijacking. If this optional parameter is specified, this rule applies to all the site’s subdomains as well. | -| security.responseHeaders.X-XSS-Protection | 1; mode=block | This header enables the cross-site scripting (XSS) filter in your browser. Can have one of the following options:
    • `0`: Filter disabled.
    • `1`: Filter enabled. If a cross-site scripting attack is detected, in order to stop the attack, the browser will sanitize the page.
    • `1; mode=block`: Filter enabled. Rather than sanitize the page, when a XSS attack is detected, the browser will prevent rendering of the page.
    • `1; report=http://[YOURDOMAIN]/your_report_URI`: Filter enabled. The browser will sanitize the page and report the violation. This is a Chromium function utilizing CSP violation reports to send details to a URI of your choice.
    | -| security.responseHeaders.X-Content-Type-Options | true | Setting this header will prevent the browser from interpreting files as a different MIME type to what is specified in the Content-Type HTTP header (e.g. treating text/plain as text/css). | -| security.responseHeaders.Content-Security-Policy | base-uri 'self' | A Content Security Policy (CSP) has significant impact on the way browsers render pages. By default Optimize uses the base-uri directive which restricts the URLs that can be used to the Optimize pages. Find more details in [Mozilla's Content Security Policy Guide](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy). | - -### Public API - -This section focuses on common properties related to the Public REST API of Optimize. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|api.accessToken|null|Secret token to be provided to the secured REST API on access. If set to `null` an error will be thrown and requests will get rejected.

    It is mandatory to configure a value if the majority of Public REST API is to be used.| - -### Container - -Settings related to embedded Jetty container, which serves the Optimize application. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|container.host|localhost|A host name or IP address to identify a specific network interface on which to listen.| -|container.ports.http|8090|A port number that will be used by Optimize to process HTTP connections. If set to null, or left empty, HTTP connections won't be accepted.| -|container.ports.https|8091|A port number that will be used by Optimize to process secure HTTPS connections.| -|container.keystore.location|keystore.jks|HTTPS requires an SSL Certificate. When you generate an SSL Certificate, you are creating a keystore file and a keystore password for use when the browser interface connects. This field specifies the location of this keystore file.| -|container.keystore.password|optimize|Password of keystore file.| -|container.status.connections.max|10|Maximum number of web socket connections accepted for status report.| -|container.accessUrl|null|Optional URL to access Optimize (used for links to Optimize in e.g. alert emails). If no value specified the container host and port are used instead.| - -### Connection to Camunda Platform - -Configuration for engines used to import data. Note that you have to have -at least one engine configured at all times. You can configure multiple engines -to import data from. Each engine configuration should have a unique alias associated -with it and represented by `${engineAlias}`. - -Note that each connected engine must have its respective history level set to `FULL` in order to see all available data -in Optimize. Using any other history level will result in less data and/or functionality within Optimize. Furthermore, -history in a connected engine should be configured for long enough for Optimize to import it. If data is removed from an -engine before Optimize has imported it, that data will not be available in Optimize. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|engines.${engineAlias}.name|default|The process engine's name on the platform, this is the unique engine identifier on the platforms REST API.| -|engines.${engineAlias}.defaultTenant.id|null|A default tenantID to associate all imported data with if there is no tenant configured in the engine itself. This property is only relevant in the context of a `One Process Engine Per Tenant` tenancy. For details consult the Multi-Tenancy documentation.| -|engines.${engineAlias}.defaultTenant.name|null|The name used for this default tenant when displayed in the UI.| -|engines.${engineAlias}.excludeTenant|[ ]|Comma-separated list of tenant IDs to be excluded when importing data from the specified engine. When left empty, data from all tenants will be imported. Please note that the `defaultTenant` cannot be excluded (and therefore also not the entities with `null` as tenant)| -|engines.${engineAlias}.rest|http://localhost:8080/engine-rest|A base URL that will be used for connections to the Camunda Engine REST API.| -|engines.${engineAlias}.importEnabled|true|Determines whether this instance of Optimize should import definition & historical data from this engine.| -|engines.${engineAlias}.eventImportEnabled|false|Determines whether this instance of Optimize should convert historical data to event data usable for event based processes.| -|engines.${engineAlias}.authentication.enabled|false|Toggles basic authentication on or off. When enabling basic authentication, please be aware that you also need to adjust the values of the user and password.| -|engines.${engineAlias}.authentication.user||When basic authentication is enabled, this user is used to authenticate against the engine.

    Note: when enabled, it is required that the user has
    • READ_HISTORY permission on the Process and Decision Definition resources
    • READ permission on *all* ("*")Authorization, Group, User, Tenant, Deployment & User Operation Log resources
    to enable users to log in and Optimize to import the engine data.| -|engines.${engineAlias}.authentication.password||When basic authentication is enabled, this password is used to authenticate against the engine.| -|engines.${engineAlias}.webapps.endpoint|http://localhost:8080/camunda|Defines the endpoint where the Camunda webapps are found. This allows Optimize to directly link to the other Camunda Web Applications, e.g. to jump from Optimize directly to a dedicated process instance in Cockpit| -|engines.${engineAlias}.webapps.enabled|true|Enables/disables linking to other Camunda Web Applications| - -### Engine common settings - -Settings used by Optimize, which are common among all configured engines, such as -REST API endpoint locations, timeouts, etc. - -| YAML Path | Default Value | Description | -|-----------------------------------------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| engine-commons.connection.timeout | 0 | Maximum time in milliseconds without connection to the engine that Optimize should wait until a timeout is triggered. If set to zero, no timeout will be triggered. | -| engine-commons.read.timeout | 0 | Maximum time a request to the engine should last before a timeout triggers. A value of zero means to wait an infinite amount of time. | -| import.data.activity-instance.maxPageSize | 10000 | Determines the page size for historic activity instance fetching. | -| import.data.incident.maxPageSize | 10000 | Determines the page size for historic incident fetching. | -| import.data.process-definition-xml.maxPageSize | 2 | Determines the page size for process definition XML model fetching. Should be a low value, as large models will lead to memory or timeout problems. | -| import.data.process-definition.maxPageSize | 10000 | Determines the page size for process definition entities fetching. | -| import.data.process-instance.maxPageSize | 10000 | Determines the page size for historic decision instance fetching. | -| import.data.variable.maxPageSize | 10000 | Determines the page size for historic variable instance fetching. | -| import.data.variable.includeObjectVariableValue | true | Controls whether Optimize fetches the serialized value of object variables from the Camunda Runtime REST API. By default, this is active for backwards compatibility. If no variable plugin to handle object variables is installed, it can be turned off to reduce the overhead of the variable import.

    Note: Disabling the object variable value transmission is only effective with Camunda Platform 7.13.11+, 7.14.5+ and 7.15.0+. | -| import.data.user-task-instance.maxPageSize | 10000 | Determines the page size for historic User Task instance fetching. | -| import.data.identity-link-log.maxPageSize | 10000 | Determines the page size for historic identity link log fetching. | -| import.data.decision-definition-xml.maxPageSize | 2 | Determines the page size for decision definition xml model fetching. Should be a low value, as large models will lead to memory or timeout problems. | -| import.data.decision-definition.maxPageSize | 10000 | Determines the page size for decision definition entities fetching. | -| import.data.decision-instance.maxPageSize | 10000 | Overwrites the maximum page size for historic decision instance fetching. | -| import.data.tenant.maxPageSize | 10000 | Overwrites the maximum page size for tenant fetching. | -| import.data.group.maxPageSize | 10000 | Overwrites the maximum page size for groups fetching. | -| import.data.authorization.maxPageSize | 10000 | Overwrites the maximum page size for authorizations fetching. | -| import.data.dmn.enabled | true | Determines if the DMN/decision data, such as decision definitions and instances, should be imported. | -| import.data.user-task-worker.enabled | true | Determines if the User Task worker data, such as assignee or candidate group of a User Task, should be imported. | -| import.data.user-task-worker.metadata.includeUserMetaData | true | Determines whether Optimize imports and displays assignee user metadata, otherwise only the user id is shown. | -| import.data.user-task-worker.metadata.cronTrigger | `0 */3 * * *` | Cron expression for when to fully refresh the internal metadata cache, it defaults to every third hour. Otherwise deleted assignees/candidateGroups or metadata changes are not reflected in Optimize. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. For details on the format please refer to: Cron Expression Description Spring Cron Expression Documentation | -| import.data.user-task-worker.metadata.maxPageSize | 10000 | The max page size when multiple users or groups are iterated during the metadata refresh. | -| import.data.user-task-worker.metadata.maxEntryLimit | 100000 | The entry limit of the cache that holds the metadata, if you need more entries you can increase that limit. When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. Please refer to the "Adjust Optimize heap size" documentation. | -| import.skipDataAfterNestedDocLimitReached | false | Some data can no longer be imported to a given document if its number of nested documents has reached the configured limit. Enable this setting to skip this data during import if the nested document limit has been reached. | -| import.elasticsearchJobExecutorThreadCount | 1 | Number of threads being used to process the import jobs per data type that are writing data to elasticsearch. | -| import.elasticsearchJobExecutorQueueSize | 5 | Adjust the queue size of the import jobs per data type that store data to elasticsearch. If the value is too large it might cause memory problems. | -| import.handler.backoff.interval | 5000 | Interval in milliseconds which is used for the backoff time calculation. | -| import.handler.backoff.max | 15 | Once all pages are consumed, the import scheduler component will start scheduling fetching tasks in increasing periods of time, controlled by "backoff" counter. | -| import.handler.backoff.isEnabled | true | Tells if the backoff is enabled of not. | -| import.indexType | import-index | The name of the import index type. | -| import.importIndexStorageIntervalInSec | 10 | States how often the import index should be stored to Elasticsearch. | -| import.currentTimeBackoffMilliseconds | 300000 | This is the time interval the import backs off from the current tip of the time during the ongoing import cycle. This ensures that potentially missed concurrent writes in the engine are reread going back by the amount of this time interval. | -| import.identitySync.includeUserMetaData | true | Whether to include metaData (firstName, lastName, email) when synchronizing users. If disabled only user IDs will be shown on user search and in collection permissions. | -| import.identitySync.collectionRoleCleanupEnabled | false | Whether collection role cleanup should be performed. If enabled, users that no longer exist in the identity provider will be automatically removed from collection permissions. | -| import.identitySync.cronTrigger | `0 */2 * * *` | Cron expression for when the identity sync should run, defaults to every second hour. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here.

    For details on the format please refer to:
    • [Cron Expression Description](https://en.wikipedia.org/wiki/Cron)
    • [Spring Cron Expression Documentation](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/scheduling/support/CronSequenceGenerator.html)
    | -| import.identitySync.maxPageSize | 10000 | The max page size when multiple users or groups are iterated during the import. | -| import.identitySync.maxEntryLimit | 100000 | The entry limit of the user/group search cache. When increasing the limit, keep in mind to account for this by increasing the JVM heap memory as well. Please refer to the "Adjust Optimize heap size" documentation on how to configure the heap size. | - -### Elasticsearch - -Settings related to Elasticsearch. - -#### Connection settings - -Everything that is related to building the connection to Elasticsearch. - -Please note that you can define a number of connection points -in a cluster. Therefore, everything that is under `es.connection.nodes` is a list of nodes Optimize can connect to. -If you have built an Elasticsearch cluster with several nodes it is recommended to define several connection points so that -if one node fails, Optimize is still able to talk to the cluster. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|es.connection.timeout|10000|Maximum time without connection to Elasticsearch that Optimize should wait until a timeout triggers.| -|es.connection.responseConsumerBufferLimitInMb|100|Maximum size of the Elasticsearch response consumer heap buffer. This can be increased to resolve errors from Elasticsearch relating to the entity content being too long| -|es.connection.nodes[*].host|localhost|The address/hostname under which the Elasticsearch node is available.| -|es.connection.nodes[*].httpPort|9200|A port number used by Elasticsearch to accept HTTP connections.| -|es.connection.proxy.enabled|false|Whether an HTTP proxy should be used for requests to Elasticsearch.| -|es.connection.proxy.host|null|The proxy host to use, must be set if es.connection.proxy.enabled = true.| -|es.connection.proxy.port|null|The proxy port to use, must be set if es.connection.proxy.enabled = true.| -|es.connection.proxy.sslEnabled|false|Whether this proxy is using a secured connection (HTTPS).| - - -#### Index settings - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|es.settings.index.prefix|optimize|The prefix prepended to all Optimize index and alias names. Custom values allow to operate multiple isolated Optimize instances on one Elasticsearch cluster.

    NOTE: Changing this after Optimize was already run before will create new empty indexes.| -|es.settings.index.number_of_replicas|1|How often data should be replicated to handle node failures.| -|es.settings.index.number_of_shards|1|How many shards should be used in the cluster for process instance and decision instance indices. All other indices will be made up of a single shard.

    Note: this property only applies the first time Optimize is started and the schema/mapping is deployed on Elasticsearch. If you want this property to take effect again, you need to delete all indices (and with that all data) and restart Optimize.| -|es.settings.index.refresh_interval|2s|How long Elasticsearch waits until the documents are available for search. A positive value defines the duration in seconds. A value of -1 means that a refresh needs to be done manually.| -|es.settings.index.nested_documents_limit|10000|Optimize uses nested documents to store list information such as activities or variables belonging to a process instance. This setting defines the maximum number of activities/variables/incidents that a single process instance can contain. This limit helps to prevent out of memory errors and should be used with care. For more information, please refer to the Elasticsearch documentation on this topic.| - - -#### Elasticsearch Security - -Define a secured connection to be able to communicate with a secured Elasticsearch instance. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|es.security.username||The basic authentication (x-pack) username.| -|es.security.password||The basic authentication (x-pack) password.| -|es.security.ssl.enabled|false|Used to enable or disable TLS/SSL for the HTTP connection.| -|es.security.ssl.certificate||The path to a PEM encoded file containing the certificate (or certificate chain) that will be presented to clients when they connect.| -|es.security.ssl.certificate_authorities|[ ]|A list of paths to PEM encoded CA certificate files that should be trusted, e.g. ['/path/to/ca.crt'].

    Note: if you are using a public CA that is already trusted by the Java runtime, you do not need to set the certificate_authorities.| - -### Email - -Settings for the email server to send email notifications, e.g. when an alert is triggered. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|email.enabled|false|A switch to enable the email sending functionality.| -|email.address||Email address that can be used to send notifications.| -|email.hostname||The smtp server name.| -|email.port|587|The smtp server port. This one is also used as SSL port for the security connection.| -|email.authentication.enabled||A switch to enable email server authentication.| -|email.authentication.username||Username of your smtp server.| -|email.authentication.password||Corresponding password to the given user of your smtp server.| -|email.authentication.securityProtocol||States how the connection to the server should be secured. Possible values are 'NONE', 'STARTTLS' or 'SSL/TLS'.| - -### Alert Notification Webhooks - -Settings for webhooks which can receive custom alert notifications. You can configure multiple webhooks which will be available to select from when creating or editing alerts. Each webhook configuration should have a unique human readable name which will appear in the Optimize UI. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|webhookAlerting.webhooks.${webhookName}.url||The URL of the webhook.| -|webhookAlerting.webhooks.${webhookName}.headers||A map of the headers of the request to be sent to the webhook.| -|webhookAlerting.webhooks.${webhookName}.httpMethod||The HTTP Method of the request to be sent to the webhook.| -|webhookAlerting.webhooks.${webhookName}.defaultPayload||The payload of the request to be sent to the webhook. This should include placeholder keys that allow you to define dynamic content. See [Alert Webhook Payload Placeholders](../webhooks#alert-webhook-payload-placeholders) for available values.| -|webhookAlerting.webhooks.${webhookName}.proxy.enabled||Whether an HTTP proxy should be used for requests to the webhook URL.| -|webhookAlerting.webhooks.${webhookName}.proxy.host||The proxy host to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true.| -|webhookAlerting.webhooks.${webhookName}.proxy.port||The proxy port to use, must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true.| -|webhookAlerting.webhooks.${webhookName}.proxy.sslEnabled||Whether this proxy is using a secured connection (HTTPS). Must be set if webhookAlerting.webhooks.${webhookName}.proxy.enabled = true.| - -### History Cleanup Settings - -Settings for automatic cleanup of historic process/decision instances based on their end time. - -| YAML Path | Default Value | Description | -| --- | --- | --- | -| historyCleanup.cronTrigger | `'0 1 * * *'` | Cron expression to schedule when the cleanup should be executed, defaults to 01:00 A.M. As the cleanup can cause considerable load on the underlying Elasticsearch database it is recommended to schedule it outside of office hours. You can either use the default Cron (5 fields) or the Spring Cron (6 fields) expression format here. | -| historyCleanup.ttl | 'P2Y' | Global time to live (ttl) period for process/decision/event data. The relevant property differs between entities. For process data, it's the `endTime` of the process instance. For decision data, it's the `evaluationTime` and for ingested events it's the `time` field. The format of the string is ISO_8601 duration. The default value is 2 years. For details on the notation refer to: [https://en.wikipedia.org/wiki/ISO_8601#Durations](https://en.wikipedia.org/wiki/ISO_8601#Durations) Note: The time component of the ISO_8601 duration is not supported. Only years (Y), months (M) and days (D) are. | -| historyCleanup.processDataCleanup.enabled | false | A switch to activate the history cleanup of process data. \[true/false\] | -| historyCleanup.processDataCleanup.cleanupMode | 'all' | Global type of the cleanup to perform for process instances, possible values: 'all' - delete everything related and including the process instance that passed the defined ttl 'variables' - only delete variables of a process instance Note: This doesn't affect the decision instance cleanup which always deletes the whole instance. | -| historyCleanup.processDataCleanup.batchSize | 10000 | Defines the batch size in which Camunda engine process instance data gets cleaned up. It may be reduced if requests fail due to request size constraints. In most cases, this should not be necessary and has only been experienced when connecting to an AWS Elasticsearch instance. | -| historyCleanup.processDataCleanup.perProcessDefinitionConfig | | A list of process definition specific configuration parameters that will overwrite the global cleanup settings for the specific process definition identified by its ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.ttl | | Time to live to use for process instances of the process definition with the ${key}. | -| historyCleanup.processDataCleanup .perProcessDefinitionConfig.${key}.cleanupMode | | Cleanup mode to use for process instances of the process definition with the ${key}. | -| historyCleanup.decisionDataCleanup.enabled | false | A switch to activate the history cleanup of decision data. \[true/false\] | -| historyCleanup.decisionDataCleanup.perDecisionDefinitionConfig | | A list of decision definition specific configuration parameters that will overwrite the global cleanup settings for the specific decision definition identified by its ${key}. | -| historyCleanup.decisionDataCleanup .perDecisionDefinitionConfig.${key}.ttl | | Time to live to use for decision instances of the decision definition with the ${key}. | -| historyCleanup.ingestedEventCleanup.enabled | false | A switch to activate the history cleanup of ingested event data. \[true/false\] | - -### Localization - -Define the languages that can be used by Optimize. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|localization.availableLocales|['en','de']|All locales available in the Optimize Frontend.

    Note: for languages other than the default there must be a `.json` file available under ./config/localization.| -|localization.fallbackLocale|'en'|The fallback locale used if there is a locale requested that is not available in availableLocales. The fallbackLocale is required to be present in localization.availableLocales.| - -### UI Configuration - -Customize the Optimize UI e.g. by adjusting the logo, head background color etc. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|ui.header.textColor|'dark'|Determines the color theme of the text in the header. Currently 'dark' and 'light' are supported.| -|ui.header.pathToLogoIcon|'logo/camunda_icon.svg'|Path to the logo that is displayed in the header of Optimize. Path can be: relative: starting from the config folder you can provide a relative path. absolute: full path in the file system. Supported image formats can be found here.| -|ui.header.backgroundColor|'#FFFFFF'|A hex encoded color that should be used as background color for the header. Default color is white.| -|ui.logoutHidden|false|Setting this property to true will hide the logout option from the user menu. This is useful if you are using single sign-on and it is not possible for users to logout.| - -### Event Based Process Configuration - -Configuration of the Optimize event based process feature. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|eventBasedProcess.authorizedUserIds|[ ]|A list of userIds that are authorized to manage (Create, Update, Publish & Delete) event based processes.| -|eventBasedProcess.authorizedGroupIds|[ ]|A list of groupIds that are authorized to manage (Create, Update, Publish & Delete) event based processes.| -|eventBasedProcess.eventImport.enabled|false|Determines whether this Optimize instance performs event based process instance import.| -|eventBasedProcess.eventImport.maxPageSize|5000|The batch size of events being correlated to process instances of event based processes.| -|eventBasedProcess.eventIndexRollover.scheduleIntervalInMinutes|10|The interval in minutes at which to check whether the conditions for a rollover of eligible indices are met, triggering one if required. This value should be greater than 0.| -|eventBasedProcess.eventIndexRollover.maxIndexSizeGB|50|Specifies the maximum total index size for events (excluding replicas). When shards get too large, query performance can slow down and rolling over an index can bring an improvement. Using this configuration, a rollover will occur when triggered and the current event index size matches or exceeds the maxIndexSizeGB threshold.| - -### Event Ingestion REST API Configuration - -Configuration of the Optimize [Event Ingestion REST API](../../rest-api/event-ingestion) for [Event Based Processes](../../../components/userguide/additional-features/event-based-processes.md). - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|eventBasedProcess.eventIngestion.maxBatchRequestBytes|10485760|Content length limit for an ingestion REST API bulk request in bytes. Requests will be rejected when exceeding that limit. Defaults to 10MB. In case this limit is raised you should carefully tune the heap memory accordingly, see Adjust Optimize heap size on how to do that.| -|eventBasedProcess.eventIngestion.maxRequests|5|The maximum number of event ingestion requests that can be serviced at any given time.| - -### External Variable Ingestion REST API Configuration - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|externalVariable.import.enabled|false|Controls whether external ingested variable data is processed and imported to process instance data.| -|externalVariable.import.maxPageSize|10000|Determines the page size for the import of ingested external variable data to process instance data.| -|externalVariable.variableIndexRollover.maxIndexSizeGB|50|Specifies the maximum size for the external variable index. When shards get too large, query performance can slow down and rolling over an index can bring an improvement. Using this configuration, a rollover will occur when the current external variable index size matches or exceeds the maxIndexSizeGB threshold.| -|externalVariable.variableIndexRollover.scheduleIntervalInMinutes|10|The interval in minutes at which to check whether the conditions for a rollover of the external variable index are met, triggering one if required. This value should be greater than 0.| -|externalVariable.variableIngestion.maxBatchRequestBytes|10485760|Content length limit for a variable ingestion REST API bulk request in bytes. Requests will be rejected when exceeding that limit. Defaults to 10MB. In case this limit is raised you should carefully tune the heap memory accordingly, see Adjust Optimize heap size on how to do that.| -|externalVariable.variableIngestion.maxRequests|5|The maximum number of variable ingestion requests that can be serviced at any given time.| - - -### Telemetry Configuration - -Configuration of initial telemetry settings. - -|YAML Path|Default Value|Description| -|--- |--- |--- | -|telemetry.initializeTelemetry|false|Decides whether telemetry is initially enabled or disabled when Optimize starts. Thereafter, telemetry can be turned on and off in the UI by superusers. If enabled, information about the setup and usage of the Optimize is sent to remote Camunda servers for the sake of analytical evaluation. When enabled, the following information is sent every 24 hours: Optimize version, License Key, Optimize installation ID, Elasticsearch version.

    Legal note: Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. Camunda cannot be held responsible in the event of unauthorized installation or activation of this function.| - -### Other - -Settings of plugin subsystem serialization format, variable import, Camunda endpoint. - -| YAML Path | Default Value | Description | -| --- | --- | --- | -| plugin.directory | ./plugin | Defines the directory path in the local Optimize file system which should be checked for plugins. | -| plugin.variableImport.basePackages | | Look in the given base package list for variable import adaption plugins. If empty, the import is not influenced. | -| plugin.authenticationExtractor.basePackages | | Looks in the given base package list for authentication extractor plugins. If empty, the standard Optimize authentication mechanism is used. | -| plugin.engineRestFilter.basePackages | | Look in the given base package list for engine rest filter plugins. If empty, the REST calls are not influenced. | -| plugin.decisionInputImport.basePackages | | Look in the given base package list for Decision input import adaption plugins. If empty, the import is not influenced. | -| plugin.decisionOutputImport.basePackages | | Look in the given base package list for Decision output import adaption plugins. If empty, the import is not influenced. | -| plugin.elasticsearchCustomHeader.basePackages | | Look in the given base package list for Elasticsearch custom header plugins. If empty, Elasticsearch requests are not influenced. | -| serialization.engineDateFormat | yyyy-MM-dd'T'HH:mm:ss.SSSZ | Define a custom date format that should be used (should be the same as in the engine). | -| export.csv.limit | 1000 | Maximum number of records returned by CSV export.

    Note: Increasing this value comes at a memory cost for the Optimize application that varies based on the actual data. As a rough guideline, an export of a 50000 raw data report records containing 8 variables on each instance can cause temporary heap memory peaks of up to ~200MB with the actual CSV file having a size of ~20MB. Please adjust the heap memory accordingly, see [Adjust Optimize heap size](../setup/installation.md#adjust-optimize-heap-size) on how to do that. | -| export.csv.delimiter | , | The delimiter used for the CSV export. The value defaults to a comma, however other common CSV delimiters such as semicolons (";") and tabs ("\\t") can also be used. | -| sharing.enabled | true | Enable/disable the possibility to share reports and dashboards. | \ No newline at end of file diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/history-cleanup.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/history-cleanup.md deleted file mode 100644 index 0b526af3d8f..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/history-cleanup.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -id: history-cleanup -title: "History cleanup" -description: "Make sure that old data is automatically removed from Optimize." ---- - -Camunda Platform 7 only - -To satisfy data protection laws or just for general storage management purposes, Optimize provides an automated cleanup functionality. - -:::note -By default, the history cleanup is disabled in Optimize. Before enabling it, you should consider the type of cleanup and time to live period that fits to your needs. Otherwise, historic data intended for analysis might get lost irreversibly. - -The default [engine history cleanup](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup) works differently than the one in Optimize due to the possible cleanup strategies. The current implementation in Optimize is equivalent to the [end time strategy](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#end-time-based-strategy) of the Engine. -::: - -## Setup - -The most important settings are `cronTrigger` and `ttl`; their global default configuration is the following: - -``` -historyCleanup: - cronTrigger: '0 1 * * *' - ttl: 'P2Y' -``` - -`cronTrigger` - defines at what interval and when the history cleanup should be performed in the format of a cron expression. The default is 1AM every day. To avoid any impact on daily business, it is recommended to schedule the cleanup outside of business hours. - -See the [Configuration Description](../configuration/#history-cleanup-settings) for further insights into this property and its format. - -`ttl` - is the global time to live period of data contained in Optimize. The field that defines the age of a particular entity differs between process, decision, and event data. Refer to the corresponding subsection in regard to that. -The default value is `'P2Y'`, which means by default data older than _2 years_ at the point in time when the cleanup is executed gets cleaned up. -For details on the notation, see the [Configuration Description](../configuration/#history-cleanup-settings) of the ttl property. - -All the remaining settings are entity type specific and will be explained in the following subsections. - -### Process data cleanup - -The age of process instance data is determined by the `endTime` field of each process instance. Running instances are never cleaned up. - -To enable the cleanup of process instance data, the `historyCleanup.processDataCleanup.enabled` property needs to be set to `true`. - -Another important configuration parameter for process instance cleanup is the `historyCleanup.processDataCleanup.cleanupMode`. It determines what in particular gets deleted when a process instance is cleaned up. The default value of `all` results in the whole process instance being deleted. -For other options, review the [configuration description](../configuration/#history-cleanup-settings) of the `historyCleanup.processDataCleanup.cleanupMode` property. - -To set up a process definition-specific `ttl` or different `cleanupMode` you can also provide process specific settings using the `perProcessDefinitionConfig` list which overrides the global settings for the corresponding definition key. - -In this example, process instances of the key `MyProcessDefinitionKey` would be cleaned up after two months instead of two years, and when the cleanup is performed, only their associated variables would be deleted instead of the complete process instance. - -``` -historyCleanup: - ttl: 'P2Y' - processDataCleanup: - enabled: true - cleanupMode: 'all' - perProcessDefinitionConfig: - 'MyProcessDefinitionKey': - ttl: 'P2M' - cleanupMode: 'variables' -``` - -### Decision data cleanup - -The age of decision instance data is determined by the `evaluationTime` field of each decision instance. - -To enable the cleanup of decision instance data, the `historyCleanup.decisionDataCleanup.enabled` property needs to be set to `true`. - -Like for the [Process Data Cleanup](#process-data-cleanup), it is possible to configure a decision definition specific `ttl` using the `perDecisionDefinitionConfig` list. - -``` -historyCleanup: - ttl: 'P2Y' - decisionDataCleanup: - enabled: true - perDecisionDefinitionConfig: - 'myDecisionDefinitionKey': - ttl: 'P3M' -``` - -### Ingested event cleanup - -The age of ingested event data is determined by the [`time`](../../rest-api/event-ingestion/#request-body) field provided for each event at the time of ingestion. - -To enable the cleanup of event data, the `historyCleanup.ingestedEventCleanup.enabled` property needs to be set to `true`. - -``` -historyCleanup: - ttl: 'P2Y' - ingestedEventCleanup: - enabled: true -``` - -:::note -The ingested event cleanup does not cascade down to potentially existing [event-based processes](../../../components/userguide/additional-features/event-based-processes.md) that may contain data originating from ingested events. To make sure data of ingested events is also removed from event-based processes, you need to enable the [Process Data Cleanup](#process-data-cleanup) as well. -::: - -## Example - -Here is an example of what a complete cleanup configuration might look like: - -``` -historyCleanup: - cronTrigger: '0 1 * * 0' - ttl: 'P1Y' - processDataCleanup: - enabled: true - cleanupMode: 'variables' - perProcessDefinitionConfig: - 'VeryConfidentProcess': - ttl: 'P1M' - cleanupMode: 'all' - 'KeepTwoMonthsProcess': - ttl: 'P2M' - decisionDataCleanup: - enabled: true - perDecisionDefinitionConfig: - 'myDecisionDefinitionKey': - ttl: 'P3M' - ingestedEventCleanup: - enabled: true -``` - -The above configuration results in the following setup: - -- The cleanup is scheduled to run every Sunday at 1AM. -- The global `ttl` of any data is one year. -- The process data cleanup is enabled. -- The `cleanupMode` performed on all process instances that passed the `ttl` period is just clearing their variable data but keeping the overall instance data like activityInstances. -- There is a process specific setup for the process definition key `'VeryConfidentProcess'` that has a special `ttl` of one month and those will be deleted completely due the specific `cleanupMode: 'all'` configuration for them. -- There is another process specific setup for the process definition key `'KeepTwoMonthsProcess'` that has a special `ttl` of two months. -- The decision data cleanup is enabled. -- There is a decision definition specific setup for the definition key `myDecisionDefinitionKey` that has a special `ttl` of three months. -- The ingested event cleanup is enabled. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-GrantAccessAuthorizations.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-GrantAccessAuthorizations.png deleted file mode 100644 index bc16527ed3a..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-GrantAccessAuthorizations.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-GrantDefinitionAuthorizations.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-GrantDefinitionAuthorizations.png deleted file mode 100644 index e8f26b37477..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-GrantDefinitionAuthorizations.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-RevokeDefinitionAuthorization.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-RevokeDefinitionAuthorization.png deleted file mode 100644 index bb0d4fc94a7..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-RevokeDefinitionAuthorization.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-RevokeGroupAccess.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-RevokeGroupAccess.png deleted file mode 100644 index 78e4bbc0b04..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Admin-RevokeGroupAccess.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Clustered-Engine-Distributed-Database.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Clustered-Engine-Distributed-Database.png deleted file mode 100644 index abac7ae0c86..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Clustered-Engine-Distributed-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Clustered-Engine-Shared-Database.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Clustered-Engine-Shared-Database.png deleted file mode 100644 index a748f087080..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Clustered-Engine-Shared-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Multiple-Engine-Distributed-Database.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Multiple-Engine-Distributed-Database.png deleted file mode 100644 index 6912a0dc1ff..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Multiple-Engine-Distributed-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Multiple-Engine-Shared-Database.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Multiple-Engine-Shared-Database.png deleted file mode 100644 index 632069a2338..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Multiple-Engine-Shared-Database.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Optimize-Clustering.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Optimize-Clustering.png deleted file mode 100644 index 868a649b5ac..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/Optimize-Clustering.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/admin-tenant-authorization.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/admin-tenant-authorization.png deleted file mode 100644 index abf4a4d12e9..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/admin-tenant-authorization.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/license-guide.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/license-guide.png deleted file mode 100644 index 0b34971b70e..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/license-guide.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/shared-elasticsearch-cluster.png b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/shared-elasticsearch-cluster.png deleted file mode 100644 index f430ee9fcf2..00000000000 Binary files a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/img/shared-elasticsearch-cluster.png and /dev/null differ diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/installation.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/installation.md deleted file mode 100644 index e5c240b2355..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/installation.md +++ /dev/null @@ -1,283 +0,0 @@ ---- -id: installation -title: "Installation" -description: "Read about how to install Optimize." ---- - -Camunda Platform 7 only - -## Installation guide - -This document describes the installation process of the Camunda Optimize distribution, as well as various configuration possibilities available after initial installation. - -Before proceeding with the installation, read the article about [supported environments]($docs$/reference/supported-environments/). - -### Prerequisites - -If you intend to run Optimize on your local machine, ensure you have a supported JRE (Java Runtime Environment) installed; best refer to the [Java Runtime]($docs$/reference/supported-environments/#java-runtime) section on which runtimes are supported. - -### Demo Distribution with Elasticsearch - -The Optimize Demo distribution comes with an Elasticsearch instance. The supplied Elasticsearch server is not customized or tuned by Camunda in any manner. It is intended to make the process of trying out Optimize as easy as possible. The only requirement in addition to the demo distribution itself is a running engine (ideally on localhost). - -To install the demo distribution containing Elasticsearch, download the archive with the latest version from the [download page](https://docs.camunda.org/enterprise/download/#camunda-optimize) and extract it to the desired folder. After that, start Optimize by running the script `optimize-demo.sh` on Linux and Mac: - -```bash -./optimize-demo.sh -``` - -or `optimize-demo.bat` on Windows: - -```batch -.\optimize-demo.bat -``` - -The script ensures that a local version of Elasticsearch is started and waits until it has become available. Then, it starts Optimize, ensures it is running, and automatically opens a tab in a browser to make it very convenient for you to try out Optimize. - -In case you need to start an Elasticsearch instance only, without starting Optimize (e.g. to perform a reimport), you can use the `elasticsearch-startup.sh` script: - -```bash -./elasticsearch-startup.sh -``` - -or `elasticsearch-startup.bat` on Windows: - -```batch -.\elasticsearch-startup.bat -``` - -### Production distribution without Elasticsearch - -This distribution is intended to be used in production. To install it, first [download](https://docs.camunda.org/enterprise/download/#camunda-optimize) the production archive, which contains all the required files to startup Camunda Optimize without Elasticsearch. After that, [configure the Elasticsearch connection](#elasticsearch-configuration) to connect to your pre-installed Elasticsearch instance and [configure the Camunda Platform connection](#camunda-platform-configuration) to connect Optimize to your running engine. You can then start your Optimize instance by running the script `optimize-startup.sh` on Linux and Mac: - -```bash -./optimize-startup.sh -``` - -or `optimize-startup.bat` on Windows: - -```batch -.\optimize-startup.bat -``` - -### Production Docker image without Elasticsearch - -The Optimize Docker images can be used in production. They are hosted on our dedicated Docker registry and are available to enterprise customers who bought Optimize only. You can browse the available images in our [Docker registry](https://registry.camunda.cloud) after logging in with your credentials. - -Make sure to log in correctly: - -``` -$ docker login registry.camunda.cloud -Username: your_username -Password: ****** -Login Succeeded -``` - -After that, [configure the Elasticsearch connection](#elasticsearch-configuration) to connect to your pre-installed Elasticsearch instance and [configure the Camunda Platform connection](#camunda-platform-configuration) to connect Optimize to your running engine. For very simple use cases with only one Camunda Engine and one Elasticsearch node, you can use environment variables instead of mounting configuration files into the Docker container: - -#### Getting started with the Optimize Docker image - -##### Full local setup - -To start the Optimize Docker image and connect to an already locally running Camunda Platform as well as Elasticsearch instance you could run the following command: - -``` -docker run -d --name optimize --network host \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -##### Connect to remote Camunda Platform and Elasticsearch - -If, however, your Camunda Platform as well as Elasticsearch instance reside on a different host, you may provide their destination via the corresponding environment variables: - -``` -docker run -d --name optimize -p 8090:8090 -p 8091:8091 \ - -e OPTIMIZE_CAMUNDABPM_REST_URL=http://yourCamBpm.org/engine-rest \ - -e OPTIMIZE_ELASTICSEARCH_HOST=yourElasticHost \ - -e OPTIMIZE_ELASTICSEARCH_HTTP_PORT=9200 \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -#### Available environment variables - -There is only a limited set of configuration keys exposed via environment variables. These mainly serve the purpose of testing and exploring Optimize. For production configurations, we recommend following the setup in documentation on [configuration using a `environment-config.yaml` file](#configuration-using-a-yaml-file). - -The most important environment variables you may have to configure are related to the connection to the Camunda Platform REST API, as well as Elasticsearch: - -- `OPTIMIZE_CAMUNDABPM_REST_URL`: The base URL that will be used for connections to the Camunda Engine REST API (default: `http://localhost:8080/engine-rest`) -- `OPTIMIZE_CAMUNDABPM_WEBAPPS_URL`: The endpoint where to find the Camunda web apps for the given engine (default: `http://localhost:8080/camunda`) -- `OPTIMIZE_ELASTICSEARCH_HOST`: The address/hostname under which the Elasticsearch node is available (default: `localhost`) -- `OPTIMIZE_ELASTICSEARCH_HTTP_PORT`: The port number used by Elasticsearch to accept HTTP connections (default: `9200`) - -A complete sample can be found within [Connect to remote Camunda Platform and Elasticsearch](#connect-to-remote-camunda-platform-and-elasticsearch). - -Furthermore, there are also environment variables specific to the [event-based process](../../../components/userguide/additional-features/event-based-processes.md) feature you may make use of: - -- `OPTIMIZE_CAMUNDA_BPM_EVENT_IMPORT_ENABLED`: Determines whether this instance of Optimize should convert historical data to event data usable for event-based processes (default: `false`) -- `OPTIMIZE_EVENT_BASED_PROCESSES_USER_IDS`: An array of user ids that are authorized to administer event-based processes (default: `[]`) -- `OPTIMIZE_EVENT_BASED_PROCESSES_IMPORT_ENABLED`: Determines whether this Optimize instance performs event-based process instance import. (default: `false`) -- `OPTIMIZE_EVENT_INGESTION_ACCESS_TOKEN`: Secret token to be provided on the [Ingestion REST API](../../rest-api/event-ingestion) when ingesting data. - -Additionally, there are also runtime related environment variables such as: - -- `OPTIMIZE_JAVA_OPTS`: Allows you to configure/overwrite Java Virtual Machine (JVM) parameters; defaults to `-Xms1024m -Xmx1024m -XX:MetaspaceSize=256m -XX:MaxMetaspaceSize=256m`. - -You can also adjust logging levels using environment variables as described in the [logging configuration](../configuration#logging). - -#### License key file - -If you want the Optimize Docker container to automatically recognize your [license key file](./license.md), you can use standard [Docker means](https://docs.docker.com/storage/volumes/) to make the file with the license key available inside the container. Replacing the `{{< absolutePathOnHostToLicenseFile >}}` with the absolute path to the license key file on your host can be done with the following command: - -``` -docker run -d --name optimize -p 8090:8090 -p 8091:8091 \ - -v {{< absolutePathOnHostToLicenseFile >}}:/optimize/config/OptimizeLicense.txt:ro \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -#### Configuration using a yaml file - -In a production environment, the limited set of [environment variables](#available-environment-variables) is usually not enough so that you want to prepare a custom `environment-config.yaml` file. Refer to the [Configuration](../configuration) section of the documentation for the available configuration parameters. - -Similar to the [license key file](#license-key-file), you then need to mount this configuration file into the Optimize Docker container to apply it. Replacing the `{{< absolutePathOnHostToConfigurationFile >}}` with the absolute path to the `environment-config.yaml` file on your host can be done using the following command: - -``` -docker run -d --name optimize -p 8090:8090 -p 8091:8091 \ - -v {{< absolutePathOnHostToConfigurationFile >}}:/optimize/config/environment-config.yaml:ro \ - registry.camunda.cloud/optimize-ee/optimize:{{< currentVersionAlias >}} -``` - -In managed Docker container environments like [Kubernetes](https://kubernetes.io/), you may set this up using [ConfigMaps](https://kubernetes.io/docs/concepts/configuration/configmap/). - -### Usage - -You can start using Optimize right away by opening the following URL in your browser: [http://localhost:8090](http://localhost:8090) - -Then, you can use the users from the Camunda Platform to log in to Optimize. For details on how to configure the user access, consult the [user access management](./user-management.md) section. - -Before you can fully utilize all features of Optimize, you need to wait until all data has been imported. A green circle in the footer indicates when the import is finished. - -### Health - readiness - -To check whether Optimize is ready to use, you can make use of the [health-readiness endpoint](./../rest-api/health-readiness.md), exposed as part of Optimize's REST API. - -### Configuration - -All distributions of Optimize come with a predefined set of configuration options that can be overwritten by the user, based on current environment requirements. To do that, have a look into the folder named `environment`. There are two files, one called `environment-config.yaml` with values that override the default Optimize properties and another called `environment-logback.xml`, which sets the logging configuration. - -You can see all supported values and read about logging configuration [here](./configuration.md). - -#### Optimize web container configuration - -Refer to the [configuration section on container settings](./configuration.md) for more information on how to adjust the Optimize web container configuration. - -#### Elasticsearch configuration - -You can customize the [Elasticsearch connection settings](./configuration.md#connection-settings) as well as the [index settings](./configuration.md#index-settings). - -#### Camunda Platform configuration - -To perform an import and provide the full set of features, Optimize requires a connection to the REST API of the Camunda engine. For details on how to configure the connection to the Camunda Platform, refer to the [Camunda Platform configuration section](./configuration.md#connection-to-camunda-platform). - -### Import of the data set - -By default, Optimize comes without any data available. To start using all the features of the system, you have to perform a data import from the Camunda Platform. This process is triggered automatically when starting Optimize. - -If you are interested in the details of the import, refer to the dedicated [import overview section](./../optimize-explained/import-guide.md). - -## Hardware resources - -We recommend to carefully choose hardware resources that are allocated to the server with Optimize. - -Be aware that Optimize is using data structures that are different from data stored -by the Camunda Platform Engine. The final amount of space on the hard drive required by Optimize will -depend on your replication settings, but as a rule of thumb, you could expect Optimize to use 30% of the space that -your relational database is using. - -### How your data influences Elasticsearch requirements - -The Elasticsearch requirements are heavily influenced by the makeup of your data set. This is mainly because Optimize creates -one instance index per definition, so the amount of indices in your Elasticsearch instance will grow with the amount of definitions -you have deployed. - -This is why we recommend a minimum of 1 GB of Elasticsearch heap space to provide for all non-instance indices plus additional -space in relation to how many definitions and instances your data set has. - -By default, Optimize uses [one shard per instance index](../configuration/#index-settings) and performance tests have shown -that a shard size of 10GB is enough for approximately 1 million instances. Elasticsearch recommends to aim for -[20 shards or fewer per GB of heap memory](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html#shard-count-recommendation), -so you will need 1GB of additional heap memory per 20 definitions. -Elasticsearch also recommends a [shard size between 10 and 50 GB](https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html#shard-size-recommendation), -so if you expect your definitions to have more than 5 million instances, we recommend you increase the number of shards -per instance index accordingly in [Optimize's index configurations](../configuration/#index-settings). - -Note that these guidelines are based on test data that may deviate from yours. If your instance data for example includes -a large amount of variables this may result in a larger shard size. - -In this case, we recommend that you test the import with realistic data and adjust the number of shards accordingly. - -### Example scenarios - -:::note Heads Up! -Exact hardware requirements highly depend on a number of factors such as: size of the data, -network speed, current load on the engine and its underlying database. Therefore, we cannot -guarantee that the following requirements will satisfy every use case. -::: - -##### 20 Definitions with less than 50k Instances per definition - -We recommend to use one shard per instance index, so 20 shards overall for instance indices alone. -Aiming for 20 shards per GB of Elasticsearch JVM heap space results in 1 GB of heap memory additionally to the base requirement of 1 GB. - -Based on performance tests, a shard size of 10 GB should be enough for up to 1 million instances per definition, so you -can expect the instance index shards to be no larger than 10GB. - -- Camunda Optimize: - - 2 CPU Threads - - 512 MB RAM -- Elasticsearch: - - 2 CPU Threads - - 4 GB RAM (2 GB JVM Heap Memory, see [setting JVM heap size](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/advanced-configuration.html#setting-jvm-heap-size)) - - [Local SSD storage recommended](https://www.elastic.co/guide/en/elasticsearch/guide/master/hardware.html#_disks) - -##### 40 Definitions with up to 10 million instances per definition - -We recommend to use two shards per instance index, so 80 shards for instance indices alone. -Aiming for 20 shards per GB of Elasticsearch JVM heap space results in 4 GB of heap memory additionally to the base requirement of 1 GB. - -Based on performance tests, a shard size of 10 GB is enough for approximately 1 million instances per definition, so in this scenario, you can expect a shard size of 50 GB for each instance index shard. - -- Camunda Optimize: - - 2 CPU Threads - - 2 GB RAM -- Elasticsearch: - - 4 CPU Threads - - 10 GB RAM (5 GB JVM Heap Memory, see [setting JVM heap size](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/advanced-configuration.html#setting-jvm-heap-size)) - - [Local SSD storage recommended](https://www.elastic.co/guide/en/elasticsearch/guide/master/hardware.html#_disks) - -## Recommended additional configurations - -### Adjust engine heap size - -Sending huge process definition diagrams via Rest API might cause the engine to crash if the engine heap size is inadequately limited. Thus, it is recommended to increase the heap size of the engine to at least 2 GB; for example, by adding the following Java command line property when starting the engine: - -```bash --Xmx2048m -``` - -Also, it is recommended to decrease the [deployment cache size](https://docs.camunda.org/manual/latest/user-guide/process-engine/deployment-cache/#customize-the-maximum-capacity-of-the-cache) to `500`, e.g. by: - -```bash - -``` - -### Adjust Optimize heap size - -By default, Optimize is configured with 1GB JVM heap memory. Depending on your setup and actual data, you might still encounter situations where you need more than this default for a seamless operation of Optimize. To increase the maximum heap size, you can set the environment variable `OPTIMIZE_JAVA_OPTS` and provide the desired JVM system properties; for example, for 2GB of Heap: - -```bash -OPTIMIZE_JAVA_OPTS=-Xmx2048m -``` - -### Maximum result limits for queries - -It's possible that engine queries [consume a lot of memory](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-api/#query-maximum-results-limit). To mitigate this risk, you can [limit the number of results](https://docs.camunda.org/manual/latest/reference/deployment-descriptors/tags/process-engine/#queryMaxResultsLimit) a query can return. If you do this, we recommend setting `queryMaxResultsLimit` to `10000` so the Optimize import works without any problems. This value should still be low enough so you don't run into any problems with the previously mentioned heap configurations. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/license.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/license.md deleted file mode 100644 index 0a8427e3ab9..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/license.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: optimize-license -title: "Optimize license key" -description: "When you log in to Optimize for the first time, you are redirected to the license page where you can enter your license key." ---- - -When you log in to Optimize for the first time, you are redirected to the license page. Here, enter your license key to be able to use Camunda Optimize. - -![Optimize license page with no license key in the text field and submit button below](img/license-guide.png) - -Alternatively, you can add a file with the license key to the path `${optimize-root-folder}/config/OptimizeLicense.txt`; it will be automatically loaded to the database unless it already contains a license key. - -If you are using the Optimize Docker images and want Optimize to automatically recognize your license key, refer to the [installation guide](./installation.md#license-key-file) on how to achieve this. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/localization.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/localization.md deleted file mode 100644 index 5b8142b61b6..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/localization.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: localization -title: "Localization" -description: "Localization of Optimize." ---- - -Camunda Platform 7 only - -To present a localized version of Optimize to users corresponding to their default browser language, Optimize provides the possibility to configure localizations. - -## Default locale configuration - -The distributions of Optimize contain the default localization files under `./config/localization/`. - -The default localizations available are `en` for English and `de` for German. - -Additionally, English is configured as the default `fallbackLocale`. Fallback in this case means whenever a user has a browser configured with a language that is not present in the `availableLocales` list, Optimize will use the `fallbackLocale`. - -The default locale configuration in `./config/environment-config.yaml` looks like the following: - -``` -locales: - availableLocales: ['en', 'de'] - fallbackLocale: 'en' -``` - -For more details on the configuration keys, refer to the [localization configuration section](../configuration/#localization). - -## Custom locale configuration - -Custom locales can be added by creating a locale file under `./config/localization/` and adding it to the `availableLocales` configuration. - -:::note -Configuring a custom locale means you have to maintain it yourself and update it in the context of an Optimize update. - -There is currently no changelog of new localization entries available, and it is required that each localization file contains an entry for each key used by Optimize. -::: - -As an example, a custom localization can be created by making a copy of the `./config/localization/en.json` named `/config/localization/es.json` and adding it to the available locales in `./config/environment-config.yaml` - -``` -locales: - availableLocales: ['en', 'de', 'es'] - fallbackLocale: 'en' -``` diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/multi-tenancy.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/multi-tenancy.md deleted file mode 100644 index 5e180cb9a31..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/multi-tenancy.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: multi-tenancy -title: "Multi-tenancy" -description: "Learn about the supported multi-tenancy scenarios." ---- - -Camunda Platform 7 only - -Learn how to set up multi-tenancy with Optimize. - -## Possible multi-tenancy scenarios - -As described in the [Camunda Platform documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/multi-tenancy/), there are two possible multi-tenant scenarios which are also supported by Optimize: - -- [Possible multi-tenancy scenarios](#possible-multi-tenancy-scenarios) - - [Single process engine with tenant-identifiers](#single-process-engine-with-tenant-identifiers) - - [One process engine per tenant](#one-process-engine-per-tenant) - -### Single process engine with tenant-identifiers - -Tenant-identifiers available in the Camunda Platform Engine are automatically imported into Optimize and tenant-based access authorization is enforced based on the configured `Tenant Authorizations` within the Camunda Platform. This means there is no additional setup required for Optimize in order to support this multi-tenancy scenario. - -Users granted tenant access via the Camunda Platform will be able to create and see reports for that particular tenant in Optimize. In the following screenshot, the user `demo` is granted access to data of the tenant with the id `firstTenant` and will be able to select that tenant in the report builder. Other users, without the particular firstTenant authorization, will not be able to select that tenant in the report builder nor be able to see results of reports that are based on that tenant. - -![Tenant Authorization](img/admin-tenant-authorization.png) - -### One process engine per tenant - -In the case of a multi-engine scenario where tenant-specific data is isolated by deploying to dedicated engines, there are no tenant identifiers present in the particular engines themselves. For a single Optimize instance that is configured to import from each of those engines to support this scenario, it is required to configure a `defaultTenant` for each of those engines. - -The effect of configuring a `defaultTenant` per engine is that all data records imported from the particular engine where no engine-side tenant identifier is present this `defaultTenant` will be added automatically. Optimize users will be authorized to those default tenants based on whether they are authorized to access the particular engine the data originates from. So in this scenario, it is not necessary to configure any `Tenant Authorizations` in the Camunda Platform itself. - -The following `environment-config.yaml` configuration snippet illustrates the configuration of this `defaultTenant` on two different engines. - -``` -... -engines: - "engineTenant1": - name: engineTenant1 - defaultTenant: - # the id used for this default tenant on persisted entities - id: tenant1 - # the name used for this tenant when displayed in the UI - name: First Tenant - ... - "engineTenant2": - name: engineTenant2 - defaultTenant: - # the id used for this default tenant on persisted entities - id: tenant2 - # the name used for this tenant when displayed in the UI - name: Second Tenant -... -``` - -Optimize users who have a `Optimize Application Authorization` on both engines will be able to distinguish between data of both engines by selecting the corresponding tenant in the report builder. - -:::note Heads up! -Once a `defaultTenant.id` is configured and data imported, you cannot change it any more without doing a [full reimport](./../migration-update/instructions.md#force-reimport-of-engine-data-in-optimize) as any changes to the configuration cannot be applied to already imported data records. -::: diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/multiple-engines.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/multiple-engines.md deleted file mode 100644 index 522044672e3..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/multiple-engines.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -id: multiple-engines -title: "Multiple process engines" -description: "Learn how to set up multiple process engines with Optimize and which scenarios are supported." ---- - -Camunda Platform 7 only - -Learn how to set up multiple process engines with Optimize and which scenarios are supported. - -## Possible multiple process engine scenarios - -There are two possible setups where multiple process engines can be used: - -- [Possible multiple process engine scenarios](#possible-multiple-process-engine-scenarios) - - [Multiple engines with distributed databases](#multiple-engines-with-distributed-databases) - - [Multiple engines with a shared database](#multiple-engines-with-a-shared-database) -- [Authentication and authorization in the multiple engine setup](#authentication-and-authorization-in-the-multiple-engine-setup) - -Check which scenario corresponds to your setup because the configuration of multiple engines to Optimize is not always suited for the best import performance. - -:::note Heads Up! -There are two restrictions for the multiple engines feature: - -1. The process engines are assumed to have distinct process definitions, which means that one process definition (same key, tenant and version) is not deployed on two or more engines at the same time. -Alternatively, each engine could be configured with default tenant identifiers as described in the [One Tenant Per Engine Scenario](../multi-tenancy/#one-process-engine-per-tenant). -2. The engines are assumed to have distinct tenant identifiers, which means one particular tenantId is not deployed on two or more engines at the same time. -::: - -### Multiple engines with distributed databases - -In this scenario, you have multiple process engines and each engine has its own database as illustrated in the following diagram: - -![Clustered Engine with distributed Database](img/Clustered-Engine-Distributed-Database.png) - -Now, you are able to connect each engine to Optimize. The data will then automatically be imported into Optimize. The following diagram depicts the setup: - -![Multiple Engines connected to Optimize, each having its own Database](img/Multiple-Engine-Distributed-Database.png) - -To set up the connections to the engines, you need to add the information to the [configuration file](./configuration.md#connection-to-camunda-platform). For the sake of simplicity, let's assume we have two microservices, `Payment` and `Inventory`, each having their own engine with its own database and processes. Both are accessible in the local network. The `Payment` engine has the port `8080` and the `Inventory` engine the port `1234`. Now an excerpt of the configuration could look as follows: - -```yaml -engines: - payment: - name: default - rest: http://localhost:8080/engine-rest - authentication: - enabled: false - password: "" - user: "" - enabled: true - inventory: - name: default - rest: http://localhost:1234/engine-rest - authentication: - enabled: false - password: "" - user: "" - enabled: true -``` - -`payment` and `inventory` are custom names that were chosen to distinguish where the data was originally imported from later on. - -### Multiple engines with a shared database - -In this scenario you have multiple engines distributed in a cluster, where each engine instance is connected to a shared database. See the following diagram for an illustration: - -![Clustered Engine with shared Database](img/Clustered-Engine-Shared-Database.png) - -Now it could be possible to connect each engine to Optimize. Since every engine accesses the same data through the shared database, Optimize would import the engine data multiple times. There is also no guarantee that importing the same data multiple times will not cause any data corruption. For this reason, we do not recommend using the setup from [multiple engines with distributed databases](#multiple-engines-with-distributed-databases). - -In the scenario of multiple engines with a shared database, it might make sense to balance the work load on each engine during the import. You can place a load balancer between the engines and Optimize, which ensures that the data is imported only once and the load is distributed among all engines. Thus, Optimize would only communicate to the load balancer. The following diagram depicts the described setup: - -![Multiple Engines with shared Database connected to Optimize](img/Multiple-Engine-Shared-Database.png) - -In general, tests have shown that Optimize puts a very low strain on the engine and its impact on the engine's operations are in almost all cases neglectable. - -## Authentication and authorization in the multiple engine setup - -When you configure multiple engines in Optimize, each process engine can host different users with a different set of authorizations. If a user is logging in, Optimize will try to authenticate and authorize the user on each configured engine. In case you are not familiar with how -the authorization/authentication works for a single engine scenario, visit the [User Access Management](./user-management.md) and [Authorization Management](./authorization-management.md) documentation first. - -To determine if a user is allowed to log in and which resources they are allowed to access within the multiple engine scenario, Optimize uses the following algorithm: - -_Given the user X logs into Optimize, go through the list of configured engines and try to authenticate the user X, for each successful authentication fetch the permissions of X for applications and process definitions from that engine and allow X to access Optimize if authorized by at least one engine._ - -To give you a better understanding of how that works, let's take the following multiple engine scenario: - -``` -- Engine `payment`: - - User without Optimize Application Authorization: Scooter, Walter - - User with Optimize Application Authorization: Gonzo - - Authorized Definitions for Gonzo, Scooter, Walter: Payment Processing - -- Engine `inventory`: - - User with Optimize Application Authorization: Piggy, Scooter - - Authorized Definitions for Piggy, Scooter: Inventory Checkout - -- Engine `order`: - - User with Optimize Application Authorization: Gonzo - - Authorized Definitions for Gonzo: Order Handling - -``` - -Here are some examples that might help you to understand the authentication/authorization procedure: - -- If `Piggy` logged in to Optimize, she would be granted access to Optimize and can create reports for the definition `Inventory Checkout`. -- If `Rizzo` logged in to Optimize, he would be rejected because the user `Rizzo` is not known to any engine. -- If `Walter` logged in to Optimize, he would be rejected despite being authorized to access the definition `Payment Processing` on engine `payment` because `Walter` does not have the `Optimize Application Authorization` required to access Optimize. -- If `Scooter` logged in to Optimize, he would be granted access to Optimize and can create reports for the definition `Inventory Checkout`. He wouldn't - get permissions for the `Payment Processining` or the `Order Handling` definition, since he doesn't have Optimize permissions on the `payment` or `order` engine. -- If `Gonzo` logged in to Optimize, he would be granted access to Optimize and can create reports for the definition `Payment Processining` as well as the `Order Handling` definition, since definitions authorizations are loaded from all engines the user could be authenticated with (in particular `payment` and `order`). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/object-variables.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/object-variables.md deleted file mode 100644 index 2c3116befdd..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/object-variables.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: object-variables -title: "Object and list variable support" -description: "Learn how Optimize imports and handles object and list variables." ---- - -## Object variables - -Complex object variables can be imported into Optimize and thereafter be used in reports and filters. During import, Optimize flattens the given object variable to create individual variables for each property of the object, resulting in multiple "sub variables" for each imported object variable. - -For example, an object variable called `user` with the properties `firstName` and `lastName` will result in two flattened variables: `user.firstName` and `user.lastName`. These variables can be used within reports and filters. - -Additionally, to the flattened properties, Optimize also imports the entire raw value of the object variable. In the above example, this would result in a variable called `user` with value `{"firstName": "John", "lastName": "Smith"}`. This raw object variable can be inspected in Raw Data Reports but is not supported in other report types or filters. - -## List variables - -Optimize also supports object variables which are JSON serialized lists of primitive types, for example a list of strings or numbers. Note that for Camunda Platform and external variables, the `type` of list variables must still be set to `Object`. During import, Optimize also evaluate how many entries are in a given list and persists this in an additional `_listSize` variable. - -For example, a list variable with the name `users` and the values `["John Smith", "Jane Smith"]` will result in two imported variables: one `users` variable with the two given values, and one variable called `users._listSize` with value `2`. Both can be used in reports and filters. - -However, filters are not yet fully optimized for list support, and some filter terms may be initially misleading. This is because filters currently apply to each list item individually rather than the entire list. For example, an "is" filter on a list of string values filters for those instances where any individual list item is equal to the given term, for example, instances whose list variable "contains" the selected value. - -Similarly, the "contains" filter matches process instances whose list variable contains at least one value which in turn contains the given substring. - -The value of list properties within objects as well as variables which are lists of objects rather than primitives can be inspected in the raw object variable value column accessible in raw data reports. - -## Variable plugins - -Any configured [variable plugins](../../plugins/variable-import-plugin) are applied _before_ Optimize creates the flattened property "sub variables", meaning the configured plugins have access to the raw JSON object variables only. Any modifications applied to the JSON object variables will then be persisted to the "sub variables" when Optimize flattens the resulting objects in the next step of the import cycle. - -## Optimize configuration - -The import of object variable values is enabled by default and can be disabled using the `import.data.variable.includeObjectVariableValue` [configuration](../configuration/#engine-common-settings). - -## Other system configurations - -Depending on where the imported object variables originate, the following configuration is required to ensure that your system produces object variable data that Optimize can import correctly: - -### Platform object variables - -If you are importing object variables from Camunda Platform, it is required to configure the Platform's spin serialization so process variables are by default **serialized as JSON**. Refer to the [Platform documentation](https://docs.camunda.org/manual/latest/user-guide/data-formats/json/#serializing-process-variables) for more information on how to set up JSON serialization. - -Furthermore, to allow Optimize to correctly parse date properties within the object variable, ensure date properties of objects are serialized using a common **date format** other than timestamps. If date properties are serialized as timestamps, these properties cannot be identified and parsed as dates when importing into Optimize and will instead be persisted as number variables. - -### Zeebe object variables - -If you are creating object variables using a Zeebe process, ensure date properties within the JSON object are stored using a common **date format** other than timestamps. If Optimize imports timestamp date properties, these properties cannot be identified and parsed as dates and will instead be persisted as number variables. - -### External object variables - -External variables of type object require an additional field called `serializationDataFormat` which specifies which data format was used to serialize the given object. - -Refer to the [external object variable API section](../../rest-api/external-variable-ingestion) for further details on how to ingest external variables. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/secure-elasticsearch.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/secure-elasticsearch.md deleted file mode 100644 index be82ea02594..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/secure-elasticsearch.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: secure-elasticsearch -title: "Secure Elasticsearch" -description: "Secure your Elasticsearch instance so that the communication is encrypted and only authorized users have access to Elasticsearch." ---- - -Camunda Platform 7 only - -It is possible to connect Optimize to an Elasticsearch instance or cluster which is secured by the Elasticsearch extension X-Pack. Bear in mind that X-Pack is an enterprise feature of Elasticsearch and might require you to obtain a license before it can be used in a commercial context. - -## Installing X-Pack - -All the information about how to install X-Pack to secure Elasticsearch can be found in the [installing X-Pack in Elasticsearch guide](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/installing-xpack-es.html#installing-xpack-es). Follow the steps 1 to 5. - -## Securing Elasticsearch - -To enable TLS for Elasticsearch communication once X-Pack is installed, follow these instructions: [Encrypting Communications in Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/configuring-tls.html). - -Optimize v2.4.0+ communicates with Elasticsearch only via HTTP, the minimum encryption setup for HTTP client communications is described here: [Encrypting HTTP Client Communications](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/configuring-tls.html#tls-http). However, if you operate an Elasticsearch cluster, it is recommended to encrypt the transport layer as well, see [Encrypting Communications Between Nodes in a Cluster](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/configuring-tls.html#tls-transport). - -Some comments on the guide: - -- To secure Elasticsearch HTTP communication, you should have added at least the following settings to the Elasticsearch configuration file `elasticsearch.yml` in the config folder of your Elasticsearch distribution (adjust the values): - - ``` - xpack.security.http.ssl.enabled: true - xpack.security.http.key: path/to/server.key - xpack.security.http.certificate: path/to/server.crt - xpack.security.http.certificate_authorities: path/to/ca.crt - ``` - -- If you want to use hostname verification within your cluster, run the `certutil cert` command once for each of your nodes and provide the --name, --dns, and --ip options. -- If you used the `--dns` or `--ip` options with the certutil cert command and you want to enable strict hostname checking, set the [verification mode](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/security-settings.html#ssl-tls-settings) to `full`. Otherwise, the verification mode needs to be set to `certificate`. The respective Elasticsearch setting is called `xpack.security.transport.ssl.verification_mode` and needs to be added to the `elasticsearch.yml` configuration file. -- It is recommended to set up a user just for Optimize which has only the necessary rights to execute queries in Elasticsearch. For instance, you could add an `optimize` user with the password `IloveOptimize` and give them `superuser` rights by executing the following command in the Elasticsearch root directory: - - ``` - ./bin/x-pack/users useradd optimize -p IloveOptimize -r superuser - ``` - -:::note Warning! -Aalthough the `superuser` role does allow Optimize to communicate with the secured Elasticsearch instance, this role also grants full access to the cluster. A user with the superuser role can also manage users, roles and impersonate any other user in the system which is a security risk for your system. -See [Setting Up User Authentication](https://www.elastic.co/guide/en/x-pack/6.2/setting-up-authentication.html) for further details. -::: - -## Enable Optimize to connect to the secured HTTP Elasticsearch instance - -Now that you have configured your Elasticsearch instance, you need to set up the connection security settings accordingly -to allow Optimize to connect to the secured Elasticsearch instance via HTTPS. - -All the necessary Optimize settings can be found in the [configuration guide](./configuration.md#elasticsearch-security). diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/security-instructions.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/security-instructions.md deleted file mode 100644 index 9da2b7fe013..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/security-instructions.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: security-instructions -title: "Security instructions" -description: "Learn how to secure your Optimize distribution against potential attacks." ---- - -Camunda Platform 7 only - -This page provides an overview of how to secure a Camunda Optimize installation. For Camunda's security policy, a list of security notices, and a guide on how to report vulnerabilities, visit the [general security documentation](https://docs.camunda.org/security/). - -This guide also identifies areas where we consider security issues to be relevant for the Camunda Optimize product and list those in the subsequent sections. Compliance for those areas is ensured based on common industry best practices and influenced by security requirements of standards like OWASP Top 10 and others. - -It is essential to know that Optimize does not operate on its own, but needs the Camunda Platform engine to import the data from and Elasticsearch to store the data. A detailed description of the setup can be found in the [architecture overview]({{< ref "/technical-guide/optimize-explained/import/import-overview.md" >}}) guide. There are three components that are affected by security which are detailed in the following subsections: - -- [Secure Elasticsearch](#secure-elasticsearch) - -## Secure the engine - -The BPMN platform with its process engine is a full standalone application which has a dedicated [security](https://docs.camunda.org/manual/latest/user-guide/security/) guide. The sections that are of major importance for the communication with Optimize are: [enabling authentication for the REST API](https://docs.camunda.org/manual/latest/user-guide/security/#enabling-authentication-for-the-rest-api/#enabling-authentication-for-the-rest-api) and [enabling SSL/HTTPS](https://docs.camunda.org/manual/latest/user-guide/security/#enabling-authentication-for-the-rest-api). - -## Secure Optimize - -Optimize already comes with a myriad of settings and security mechanism by default. In the following you will find the parts that still need manual adjustments. - -### Disable HTTP - -For security reasons, we recommend using Optimize over HTTPS and disabling HTTP. You can disable HTTP by setting the HTTP property in the container settings to an empty/null value. Consult the respective section in the [configuration guide](./configuration.md#container) for the more details. - -### Fine tune Optimize security headers - -Over time, various client-side security mechanisms have been developed to protect web applications from various attacks. Some of these security mechanisms are only activated if the web application sends the corresponding HTTP headers in its server responses. - -Optimize adds several of these headers which can be fine-tuned in the [configuration](./configuration.md#security) to ensure appropriate security. - -### Authentication - -Authentication controls who can access Optimize. Read all about how to restrict the application access in the [user access management guide](./user-management.md). - -### Authorization - -Authorization controls what data a user can access and change in Optimize once authenticated. Authentication is a prerequisite to authorization. Read all about how to restrict the data access in the [authorization management guide](./authorization-management.md). - -## Secure Elasticsearch - -Optimize stores its data in Elasticsearch, which is a search engine that acts as a document-based datastore. To protect access to this data, Elasticsearch must be configured correctly. The documentation guide on [how to secure Elasticsearch](./secure-elasticsearch.md) provides a detailed description on how to restrict data access and secure the connection to Elasticsearch. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/service-config.yaml b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/service-config.yaml deleted file mode 100644 index 80671238ca5..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/service-config.yaml +++ /dev/null @@ -1,506 +0,0 @@ ---- -security: - # everything that's related to authentication - auth: - cookie: - same-site: - # decides if the optimize auth cookie has the same site cookie flag set - enabled: true - token: - # Optimize uses token-based authentication to keep track of which users are - # logged in. Define when a token is supposed to expire. - lifeMin: 60 - # Optional secret used to sign authentication tokens, it's recommended to use at least a 64 character secret. - # If set `null` a random secret will be generated with each startup of Optimize. - secret: null - # List of user ids that are granted full permission to all collections, reports & dashboards - # Note: For reports these users are still required to be granted access to the corresponding process/decision - # definitions in Camunda Platform Admin - superUserIds: [] - - # Here you can define HTTP response headers that Optimize can send in its responses - # to increase the security of your application. - # Find more information here: https://owasp.org/www-project-secure-headers/ - responseHeaders: - # HTTP Strict Transport Security (HSTS) is a web security policy mechanism which helps to protect websites - # against protocol downgrade attacks and cookie hijacking. - # More - HSTS: - # The time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. - # If you set the number to a negative value no HSTS header is sent. - max-age: 31536000 - # If this optional parameter is specified, this rule applies to all of the site’s subdomains as well. - includeSubDomains: true - # This header enables the cross-site scripting (XSS) filter in your browser. - # Can have one of the following options: - # * 0: Filter disabled. - # * 1: Filter enabled. If a cross-site scripting attack is detected, in order to stop the attack, - # the browser will sanitize the page. - # * 1; mode=block: Filter enabled. Rather than sanitize the page, when a XSS attack is detected, the browser will - # prevent rendering of the page. - # * 1; report=http://[YOURDOMAIN]/your_report_URI: Filter enabled. The browser will sanitize the page and - # report the violation. This is a Chromium function utilizing CSP - # violation reports to send details to a URI of your choice. - X-XSS-Protection: 1; mode=block - # Setting this header will prevent the browser from interpreting files as a different MIME type to - # what is specified in the Content-Type HTTP header (e.g. treating text/plain as text/css). - X-Content-Type-Options: true - # A Content Security Policy (CSP) has significant impact on the way browsers render pages. - # By default Optimize uses the base-uri directive which restricts the URLs that can be used to the Optimize pages. - # Find more details: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy - Content-Security-Policy: base-uri 'self' - -# A global access token used by all public APIs of Optimize -api: - # Secret token to be provided on public APIs of Optimize. - # If set `null` an error will be thrown on any API access - accessToken: ${OPTIMIZE_API_ACCESS_TOKEN:null} - -container: - # A host name or IP address, to identify a specific network interface on - # which to listen. - host: localhost - ports: - # A port number that will be used by Optimize to process HTTP connections. - # If set to null, ~ or left empty, http connections won't be accepted. - http: 8090 - # A port number that will be used by Optimize to process - # secure HTTPS connections. - https: 8091 - # HTTPS requires an SSL Certificate. When you generate an SSL Certificate, - # you are creating a keystore file and a keystore password for use when the - # browser interface connects - keystore: - location: keystore.jks - password: optimize - # configuration of status reporting web socket - status: - # max number of threads\sessions that will be kept to report status - connections: - max: 10 - # Optional url to access Optimize (used for links to Optimize in e.g. alert emails) - accessUrl: null - -# Configuration for engines used to import data. Please note that you have to have at -# least one engine configured at all times. -engines: - # An alias of the engine, which will be used for internal purposes like - # logging and displaying which data belong to which engine. - 'camunda-bpm': - # The process engines name on the platform, this is the unique engine identifier on the platforms REST API. - name: default - # A default tenant to the be injected on data from this engine where no tenant is configured in the engine itself. - # This property is only relevant in the context of a `One Process Engine Per Tenant`. - # For details consult the Multi-Tenancy documentation. - defaultTenant: - # the id used for this default tenant on persisted entities - id: null - # the name used for this tenant when displayed in the UI - name: null - #A base URL that will be used for connections to the Camunda Engine REST API. - rest: "http://localhost:8080/engine-rest" - # Determines whether this instance of Optimize should import definition & historical data from this engine. - importEnabled: true - # Determines whether this instance of Optimize should convert historical data to event data - # usable for event based processes. - eventImportEnabled: false - authentication: - # Toggles basic authentication on or off. When enabling basic - # authentication, please be aware that you also need to adjust the values - # of the user and password. - # Also note, when enabled, it is required that the user has - # * READ & READ_HISTORY permission on the Process and Decision Definition resources - # * READ permission on the Authorization, Group, User, Deployment & Tenant resources - # to enable users to log in and Optimize to import the engine data. - enabled: false - # When basic authentication is enabled, this password is used to - # authenticate against the engine. - password: '' - # When basic authentication is enabled, this user is used to authenticate - # against the engine. - user: '' - # The webapps configuration allows Optimize to directly link - # to the other Camunda Web Applications, e.g. to jump from - # Optimize directly to a dedicated process instance in Cockpit - webapps: - # Defines the endpoint where to find the camunda webapps for the given engine - endpoint: "http://localhost:8080/camunda" - # Enables/disables linking to other Camunda Web Applications - enabled: true - -engine-commons: - connection: - #Maximum time without connection to the engine, Optimize should wait - #until a time out is triggered. A value of zero means to wait an - # infinite amount of time. - timeout: 0 - read: - # Maximum time a request to the engine should last, - # before a timeout triggers. A value of zero means to wait an - # infinite amount of time. - timeout: 0 - -import: - data: - activity-instance: - # Determines the page size for historic activity instance fetching. - maxPageSize: 10000 - incident: - # Determines the page size for historic incident fetching. - maxPageSize: 10000 - process-definition-xml: - # Determines the page size for process definition xml model - # fetching. Should be a low value, as large models will lead to - # memory or timeout problems. - maxPageSize: 2 - process-definition: - # Determines the page size for process definition fetching. - maxPageSize: 10000 - process-instance: - # Determines the maximum page size for historic process instance fetching. - maxPageSize: 10000 - variable: - # Determines the page size for historic variable instance fetching. - maxPageSize: 10000 - # Controls whether Optimize fetches the serialized value of object variables from the Camunda Runtime REST API. - # By default this is active for backwards compatibility. If no variable plugin to handle object - # variables is installed, it can be turned off to reduce the overhead of the variable import. - includeObjectVariableValue: true - user-task-instance: - # Determines the page size for historic user task instance fetching - maxPageSize: 10000 - identity-link-log: - # Determines the page size for identity link log fetching. - maxPageSize: 10000 - decision-definition-xml: - # Determines the page size for decision definition xml model - # fetching. Should be a low value, as large models will lead to - # memory or timeout problems. - maxPageSize: 2 - decision-definition: - # Determines the page size for decision definition fetching. - maxPageSize: 10000 - decision-instance: - # Determines the page size for historic decision instance fetching. - maxPageSize: 10000 - tenant: - # Determines the page size for tenants fetching. - maxPageSize: 10000 - group: - # Determines the page size for groups fetching. - maxPageSize: 10000 - authorization: - # Determines the page size for authorizations fetching. - maxPageSize: 10000 - dmn: - # Determines if the DMN/decision data, such as decision definitions and instances - # should be imported. - enabled: true - user-task-worker: - # Determines if the user task worker data, such as assignee or candidate group of - # a user task, should be imported. - enabled: true - # This sub-section controls to what extent and how Optimize fetches and displays metadata of user task workers. - # The particular metadata is first-, last name and the email of the users or the names of the candidate groups. - # The data is displayed in the context of reports when grouping/distributing by assignees/candidateGroups or - # when filtering on them. - metadata: - # Determines whether Optimize imports and displays assignee user metadata, otherwise only the user id is shown. - includeUserMetaData: true - # Cron expression for when to fully refresh the internal metadata cache, it defaults to every third hour. - # Otherwise deleted assignees/candidateGroups or metadata changes are not reflected in Optimize. - cronTrigger: '0 */3 * * *' - # The max page size when multiple users or groups are iterated during the metadata refresh. - maxPageSize: 10000 - # The entry limit of the cache that holds the metadata, if you need more entries you can increase that limit. - # When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. - # Please refer to the technical guide on how to configure the heap size. - maxEntryLimit: 100000 - # Some data can no longer be imported to a given document if its number of nested documents has reached the configured - # limit. Enable this setting to skip this data during import if the nested document limit has been reached. - skipDataAfterNestedDocLimitReached: false - # Number of threads being used to process the import jobs per data type that are writing data to elasticsearch. - elasticsearchJobExecutorThreadCount: 1 - # Adjust the queue size of the import jobs per data type that store data to elasticsearch. - # A too large value might cause memory problems. - elasticsearchJobExecutorQueueSize: 5 - handler: - backoff: - # Interval which is used for the backoff time calculation. - initial: 1000 - # Once all pages are consumed, the import service component will - # start scheduling fetching tasks in increasing periods of time, - # controlled by 'backoff' counter. - # This property sets maximal backoff interval in seconds - max: 30 - #States how often the import index should be stored to Elasticsearch. - importIndexStorageIntervalInSec: 10 - # the time interval the import backs off from the current tip of the time, to reread potentially missed concurrent writes - currentTimeBackoffMilliseconds: 300000 - # The identity sync enables Optimize to build up a in memory cache containing Optimize authorized users & groups. - # This data is used in the collection permissions to allow convenient search capabilities - # and to display member meta-data such as first name, last name or email. - identitySync: - # Whether to include metaData (firstName, lastName, email) when synchronizing users - includeUserMetaData: true - # Whether collection role cleanup should be performed - collectionRoleCleanupEnabled: true - # Cron expression for when the identity sync should run, defaults to every second hour. - cronTrigger: '0 */2 * * *' - # The max page size when multiple users or groups are iterated during the import. - maxPageSize: 10000 - # The entry limit of the cache, if you need more entries you can increase that limit. - # When increasing the limit, keep in mind to account for that by increasing the JVM heap memory as well. - # Please refer to the technical guide on how to configure the heap size. - maxEntryLimit: 100000 - -# everything that is related with configuring Elasticsearch or creating -# a connection to it. -es: - connection: - # Maximum time without connection to Elasticsearch, Optimize should - # wait until a time out triggers. - timeout: 10000 - # Maximum size of the Elasticsearch response consumer heap buffer. This can be increased to resolve errors - # from Elasticsearch relating to the entity content being too long - responseConsumerBufferLimitInMb: 100 - # a list of Elasticsearch nodes Optimize can connect to. If you have built - # an Elasticsearch cluster with several nodes it is recommended to define - # several connection points in case one node fails. - nodes: - # the address/hostname under which the Elasticsearch node is available. - - host: 'localhost' - # A port number used by Elasticsearch to accept HTTP connections. - httpPort: 9200 - # HTTP forward proxy configuration - proxy: - # whether an HTTP proxy should be used for requests to elasticsearch - enabled: false - # the host of the proxy to use - host: null - # the port of the proxy to use - port: null - # whether this proxy is using a secured connection - sslEnabled: false - - - # Elasticsearch security settings - security: - # the basic auth (x-pack) username - username: null - # the basic auth (x-pack) password - password: null - # SSL/HTTPS secured connection settings - ssl: - # path to a PEM encoded file containing the certificate (or certificate chain) - # that will be presented to clients when they connect. - certificate: null - # A list of paths to PEM encoded CA certificate files that should be trusted, e.g. ['/path/to/ca.crt']. - # Note: if you are using a public CA that is already trusted by the Java runtime, - # you do not need to set the certificate_authorities. - certificate_authorities: [] - # used to enable or disable TLS/SSL for the HTTP connection - enabled: false - - # Maximum time a request to elasticsearch should last, before a timeout - # triggers. - scrollTimeout: 60000 - settings: - # the maximum number of buckets returned for an aggregation - aggregationBucketLimit: 1000 - index: - # the prefix prepended to all Optimize index and alias names - # NOTE: Changing this after Optimize was already run before, will create new empty indexes - prefix: 'optimize' - # How often should the data replicated in case of node failure. - number_of_replicas: 1 - # How many shards should be used in the cluster for process instance and decision instance indices. - # All other indices will be made up of a single shard - # NOTE: this property only applies the first time Optimize is started and - # the schema/mapping is deployed on Elasticsearch. If you want to take - # this property to take effect again, you need to delete all indexes (with it all data) - # and restart Optimize. - number_of_shards: 1 - # How long Elasticsearch waits until the documents are available - # for search. A positive value defines the duration in seconds. - # A value of -1 means that a refresh needs to be done manually. - refresh_interval: 2s - # Optimize uses nested documents to store list information such as activities or variables belonging to a - # process instance. So this setting defines the maximum number of activities/variables/incidents that a single - # process instance can contain. This limit helps to prevent out of memory errors and should be used with care. - nested_documents_limit: 10000 - -plugin: - # Defines the directory path in the local Optimize file system which should be checked for plugins - directory: './plugin' - variableImport: - # Look in the given base package list for variable import adaption plugins. - # If empty, the import is not influenced. - basePackages: [] - engineRestFilter: - # Look in the given base package list for engine rest filter plugins. - # If empty, the REST calls are not influenced. - basePackages: [] - authenticationExtractor: - # Looks in the given base package list for authentication extractor plugins. - # If empty, the standard Optimize authentication mechanism is used. - basePackages: [] - decisionInputImport: - # Look in the given base package list for Decision input import adaption plugins. - # If empty, the import is not influenced. - basePackages: [] - decisionOutputImport: - # Look in the given base package list for Decision output import adaption plugins. - # If empty, the import is not influenced. - basePackages: [] - elasticsearchCustomHeader: - # Look in the given base package list for Elasticsearch custom header fetching plugins. - # If empty, ES requests are not influenced. - basePackages: [] - -serialization: - # Define a custom date format that should be used for - # fetching date data from the engine(should be the same as in the engine) - engineDateFormat: yyyy-MM-dd'T'HH:mm:ss.SSSZ - -alerting: - quartz: - jobStore: 'org.quartz.simpl.RAMJobStore' - -email: - # A switch to control email sending process. - enabled: false - # Email address that can be used to send alerts - address: '' - # The smtp server name - hostname: '' - # The smtp server port. This one is also used as SSL port for the security connection. - port: 587 - # Define configuration properties for the authentication of the email server - authentication: - # A switch to control whether the email server requires authentication - enabled: true - # Username of your smtp server - username: '' - # Corresponding password to the given user of your smtp server - password: '' - # States how the connection to the server should be secured. - # Possible values are 'NONE', 'STARTTLS' or 'SSL/TLS' - securityProtocol: 'NONE' - -export: - csv: - # Maximum number of records returned by CSV export - # Note: Increasing this value comes at a memory cost for the Optimize application that varies based on the actual data. - # As a rough guideline, an export of a 50000 records raw data report containing 8 variables on each instance - # can cause temporary heap memory peaks of up to ~200MB with the actual CSV file having a size of ~20MB. - # Please adjust the heap memory accordingly. - limit: 1000 - -sharing: - # decides if the sharing feature of Optimize can be used in the UI. - enabled: true - -historyCleanup: - # cron expression for when the cleanup should run - cronTrigger: '0 1 * * *' - # default time to live (ttl) for data, when reached the corresponding process/decision/event instances will get cleaned up - # Format is ISO_8601 duration https://en.wikipedia.org/wiki/ISO_8601#Durations - ttl: 'P2Y' - processDataCleanup: - # switch for the camunda process data cleanup, defaults to false - enabled: false - # type of process data cleanup to perform, possible values: - # 'all' - delete everything related to the process instance - # 'variables' - only delete associated variables of a process instance - cleanupMode: 'all' - # Defines the batch size in which camunda engine process instance data gets cleaned up - # may be reduced if requests fail due to request size constraints - batchSize: 10000 - # process definition specific configuration parameters that will overwrite the general parameters (ttl, processDataCleanupMode) - # for the specific processDefinition key - perProcessDefinitionConfig: - # 'myProcessDefinitionKey': - # ttl: 'P2M' - # cleanupMode: 'variables' - decisionDataCleanup: - # switch for the camunda decision data cleanup, defaults to false - enabled: false - # decision definition specific configuration parameters that will overwrite the general parameters (ttl) - # for the specific decisionDefinition key - perDecisionDefinitionConfig: - # 'myDecisionDefinitionKey': - # ttl: 'P2M' - ingestedEventCleanup: - # switch for the ingested event data cleanup, defaults to false - enabled: false - -locales: - # all locales available - # Note: for others than the default there must be a .json file available under ./config/localization. - availableLocales: ['en', 'de'] - # the fallback locale is used if there is a locale requested that is not available in availableLocales - fallbackLocale: 'en' - -ui: - header: - # determines the color theme of the text in the header. Currently 'dark' and 'light' are supported. - textColor: 'dark' - # Path to the logo that is displayed in the header of Optimize. - # Path can be: - # * relative: starting from the config folder you can provide a relative path. - # * absolute: full path in the file system. - # - # Supported image formats can be found here: - # https://developer.mozilla.org/en-US/docs/Web/HTML/Element/img#Supported_image_formats - pathToLogoIcon: 'logo/camunda_icon.svg' - # a hex encoded color that should be used as background color for the header. Default color is white. - backgroundColor: '#FFFFFF' - -eventBasedProcess: - # A list of userIds that are authorized to manage (Create, Update, Publish & Delete) event based processes. - authorizedUserIds: [] - # A list of groupIds that are authorized to manage (Create, Update, Publish & Delete) event based processes. - authorizedGroupIds: [] - eventImport: - # Determines whether this Optimize instance performs event based process instance import. - enabled: false - # The batch size of events being correlated to process instances of event based processes. - maxPageSize: 5000 - eventIngestion: - # Content length limit for an ingestion REST API Bulk request in bytes. - # Requests will be rejected when exceeding that limit. - # Defaults to 10MB. - maxBatchRequestBytes: 10485760 - # The maximum number of requests to the event ingestion endpoint that can be served at a time - maxRequests: 5 - eventIndexRollover: - # scheduleIntervalInMinutes specifies how frequently the rollover API should be called to see if a rollover of the - # event index is required (whether the rollover is triggered depends on the conditions specified by maxIndexSizeGB). - scheduleIntervalInMinutes: 10 - # A rollover is triggered when the size of the current event index matches or exceeds the maxIndexSizeGB threshold. - maxIndexSizeGB: 50 - -externalVariable: - import: - # Controls whether external ingested variable data is processed and imported into process instance data - enabled: false - # Determines the page size for the external variable import, that got ingested via the external variable API - maxPageSize: 10000 - variableIngestion: - # Content length limit for the external variable ingestion request in bytes. - # Requests will be rejected when exceeding that limit. Defaults to 10MB. - maxBatchRequestBytes: 10485760 - # The maximum number of requests to the external variable ingestion endpoint that can be served at a time. - maxRequests: 5 - variableIndexRollover: - # scheduleIntervalInMinutes specifies how frequently the rollover API should be called to see if a rollover of the - # external variable index is required (whether the rollover is triggered depends on the conditions specified by maxIndexSizeGB). - scheduleIntervalInMinutes: 10 - # A rollover is triggered when the size of the current external variable index matches or exceeds the maxIndexSizeGB threshold. - maxIndexSizeGB: 50 - -telemetry: - # Sets the initial property value of telemetry configuration once when it has never been enabled/disabled before. - # Telemetry can later be enabled/disabled in the UI by superusers - initializeTelemetry: false \ No newline at end of file diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/setup-event-based-processes.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/setup-event-based-processes.md deleted file mode 100644 index 003cf1d5618..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/setup-event-based-processes.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: setup-event-based-processes -title: "Event-based processes" -description: "Read everything about how to configure event-based processes in Optimize." ---- - -Camunda Platform 7 only - -Event-based processes are BPMN processes that can be created inside Optimize which are based on events originating from external systems. - -Event ingestion is the process of sending event data from external systems to Camunda Optimize to support business processes that are not fully automated with Camunda Platform yet. -Based on this data, it is possible to create process models inside Optimize - called event-based processes - that can be used in reports. - -To enable this feature, refer to [event-based process configuration](#event-based-process-configuration). - -## Event based process configuration - -To make use of ingested events and create event-based process mappings for them, the event-based process feature needs to be enabled in the [Optimize configuration](../configuration/#event-based-process-configuration). - -This also includes authorizing particular users by their userId or user groups by their groupId to be able to create so-called event-based processes that can be used by other users of Optimize once published. - -A full configuration example authorizing the user `demo` and all members of the `sales` user group to manage event-based processes, enabling the event-based process import as well as configuring a [Public API](../configuration/#public-api) accessToken with the value `secret`, would look like the following: - - api: - accessToken: secret - - eventBasedProcess: - authorizedUserIds: ['demo'] - authorizedGroupIds: ['sales'] - eventImport: - enabled: true - -## Use Camunda activity event sources for event based processes - -:::note Authorization to event-based processes -When Camunda activity events are used in event-based processes, Camunda Admin Authorizations are not inherited for the event-based process. The authorization to use an event-based process is solely managed via the access management of event-based processes when [publishing an event-based process](../../../components/userguide/additional-features/event-based-processes.md#publishing-an-event-based-process) or at any time via the [Edit Access Option](../../../components/userguide/additional-features/event-based-processes.md#event-based-process-list---edit-access) in the event-based process List. - -Visit [Authorization Management - event-based process](./authorization-management.md#event-based-processes) for the reasoning behind this behavior. -::: - -To publish event-based processes that include [Camunda Event Sources](../../../components/userguide/additional-features/event-based-processes.md#camunda-events), it is required to set [`engines.${engineAlias}.eventImportEnabled`](../configuration/#connection-to-camunda-platform) to `true` for the connected engine the Camunda process originates from. - -:::note Heads Up! -You need to [reimport data](./../migration-update/instructions.md#force-reimport-of-engine-data-in-optimize) from this engine to have all historic Camunda events available for event-based processes. Otherwise, only new events will be included. -::: - -As an example, in order to be able to create event processes based on Camunda events from the configured engine named `camunda-bpm`, the configuration of that engine needs to have the `importEnabled` configuration property as well as the `eventImportEnabled` set to `true`: - - engines: - 'camunda-bpm': - importEnabled: true - eventImportEnabled: true diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/shared-elasticsearch-cluster.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/shared-elasticsearch-cluster.md deleted file mode 100644 index 337c6d8b752..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/shared-elasticsearch-cluster.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: shared-elasticsearch-cluster -title: "Shared Elasticsearch cluster" -description: "Operate multiple Optimize instances on a shared Elasticsearch cluster." ---- - -Camunda Platform 7 only - -In case you have a large shared Elasticsearch cluster that you want to operate multiple Optimize instances on that are intended to run in complete isolation from each other, it is required to change the [`es.settings.index.prefix`](../configuration/#index-settings) setting for each Optimize instance. - -:::note Heads Up! -Although a shared Elasticsearch cluster setup is possible, it's recommended to operate a dedicated Elasticsearch cluster per Optimize instance. - -This is due to the fact that a dedicated cluster provides the highest reliability (no resource sharing and no breaking side effects due to misconfiguration) and flexibility (e.g. Elasticsearch and/or Optimize updates can be performed independently between different Optimize setups). -::: - -The following illustration demonstrates this use case with two Optimize instances that connect to the same Elasticsearch cluster but are configured with different `es.settings.index.prefix` values. This results in different indexes and aliases created on the cluster, strictly isolating the data of both Optimize instances, so no instance accesses the data of the other instance. - -:::note Warning -Changing the value of `es.settings.index.prefix` after an instance was already running results in new indexes being created with the new prefix value. There is no support in migrating data between indexes based on different prefixes. -::: - -![Shared Elasticsearch Cluster Setup](img/shared-elasticsearch-cluster.png) diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/telemetry.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/telemetry.md deleted file mode 100644 index 3585df6430d..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/telemetry.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -id: telemetry -title: "Telemetry" -description: "Learn about Optimize telemetry, what data is collected and why." ---- - -Camunda Platform 7 only - -At Camunda, we strive to offer excellent user experience at a high and stable level. On a strict opt-in basis, we are looking to collect environment and usage data to further improve the user experience for you. These insights help us to understand typical environment setups and product usage patterns and will be used to inform product improvement decisions to your benefit. - -The telemetry reporting is disabled by default and only collects and sends data after you explicitly enable the telemetry configuration flag. The configuration can be changed by `superusers` at any time during runtime via a configuration menu option in the UI. - -The collected data will be sent once every 24 hours via HTTPS, and it is ensured that the performance of Optimize will not be negatively affected by the reporting, even if the telemetry reporter faces unexpected errors. Furthermore, no data will be collected and sent when you stop Optimize. - -## Collected data - -Below you find the full list of data we want to collect, followed by a real-world example. On a conceptual level, they can be categorized into general data and meta/environment data. - -### General data - -The general data category contains information about your Optimize installation: - -| Item | Explanation | -| -- | -- | -| Installation | A unique installation ID stored in Optimize's Elasticsearch database | -| Product name | The name of the product (i.e. `Camunda Optimize`) | -| Product version | The version of Optimize you are running | -| Product edition | The edition of the product (i.e. "enterprise") | - -### Meta/environment data - -The meta/environment data category contains information about the environmental setup: - -| Item | Explanation | -| -- | -- | -| Database vendor | The database vendor (i.e. `Elasticsearch`) | -| Database version | The version of Elasticsearch Optimize is using | -| License Key: Customer name | The customer name that appears in the license key you are using with this Optimize installation | -| License Key: Type | The type of license key used with this Optimize installation | -| License Key: Valid Until | The expiry date of the license key used with this Optimize installation | -| License Key: Unlimited | A flag that indicates whether this license key is unlimited | -| License Key: Features | A map which includes information on which products can be used with this license key | -| License Key: Raw | The raw license key string without signature. We add this just in case some properties are listed in the raw license key that have not yet been mapped to other fields (eg. the features map). | -| Engine Installation IDs | A list containing the ID of each engine connected to this Optimize installation | - -### Example - -Below is an example payload including all telemetry data currently sent by Optimize. - -``` -{ - "installation": "7b86edba-fcb7-11ea-adc1-0242ac120002", - "product": { - "name": "Camunda Optimize", - "version": "3.2.0", - "edition": "enterprise", - "internals": { - "database": { - "vendor": "elasticsearch", - "version": "7.0.0" - }, - "license-key": { - "customer": "a customer name", - "type": "UNIFIED", - "valid-until": "2025-01-01", - "unlimited": "false", - "features": { - "camundaBPM": "false", - "optimize": "true", - "cawemo": "false" - }, - "raw": - "customer = a customer name; expiryDate = 2025-01-01; optimize: true;" - }, - "engine-installation-ids": - [ "8343cc7a-8ad1-42d4-97d2-43452c0bdfa3", - "22607b92-fcb8-11ea-adc1-0242ac120002" ] - } - } -} -``` - -## How to enable telemetry - -### Optimize configuration - -You can enable telemetry before starting Optimize by setting the `initializeTelemetry` flag in your configuration file to `true`. Refer to the [configuration section](../configuration#telemetry-configuration) for more details. - -### UI - -Once Optimize is running, telemetry can be enabled (or disabled) via a modal accessible from the user menu. Only superusers are authorized to access this menu and alter the telemetry configuration. - -## Legal note - -Before you install Camunda Optimize version >= 3.2.0 or activate the telemetric functionality, please make sure that you are authorized to take this step, and that the installation or activation of the telemetric functionality is not in conflict with any internal company policies, compliance guidelines, any contractual or other provisions or obligations of your company. - -Camunda cannot be held responsible in the event of unauthorized installation or activation of this function. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/user-management.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/user-management.md deleted file mode 100644 index b8774759707..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/user-management.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: user-management -title: "User access management" -description: "Define which users have access to Optimize." ---- - -Camunda Platform 7 only - -:::note Good to know! - -Providing Optimize access to a user just enables them to log in to Optimize. To be able -to create reports, the user also needs to have permission to access the engine data. To see -how this can be done, refer to the [Authorization Management](./authorization-management.md) section. -::: - -You can use the credentials from the Camunda Platform users to access Optimize. However, for the users to gain access to Optimize, they need to be authorized. This is not done in Optimize itself, but needs to be configured in the Camunda Platform and can be achieved on different levels with different options. If you do not know how authorization in Camunda works, visit the [authorization service documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/). - -When defining an authorization to grant Optimize access, the most important aspect is that you grant access on resource type application with resource ID "optimize" (or "\*" if you want to grant access to all applications including Optimize). The permissions you can set, are either `ALL` or `ACCESS`. They are treated equally, so there is no difference between them. - -Authorizing users in admin can be done as follows: - -![Grant Optimize Access in Admin](img/Admin-GrantAccessAuthorizations.png) - -1. The first option allows access for Optimize on a global level. With this setting all users are allowed to log into Camunda Optimize. -2. The second option defines the access for a single user. The user `Kermit` can now log into Camunda Optimize. -3. The third option provides access on group level. All users belonging to the group `optimize-users` can log into Camunda Optimize. - -It is also possible to revoke the Optimize authorization for specific users or groups. For instance, you can define Optimize on a global scale, but exclude the `engineers` group: - -![Revoke Optimize Access for group 'engineers' in Admin](img/Admin-RevokeGroupAccess.png) - -When Optimize is configured to load data from multiple instances of Camunda Platform, then it suffices to be granted by one instance for the user to be able to log into Optimize. Notice that, like for all authorizations, grants have precedence over revokes. That is, if there is a Camunda Platform instance that grants access to optimize to a user, the user can log in even if another instance revokes access to Optimize for this user. diff --git a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/webhooks.md b/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/webhooks.md deleted file mode 100644 index 74f6815365b..00000000000 --- a/optimize_versioned_docs/version-3.7.0/self-managed/optimize-deployment/setup/webhooks.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: webhooks -title: "Webhooks" -description: "Read about how to configure alert notification webhooks for alerts on custom systems." ---- - -Camunda Platform 7 only - -In addition to email notifications, you can configure webhooks in Optimize to receive alert notifications on custom systems. This page describes how to set up your webhook configurations using the example of a simple Slack app. - -## The alert webhook configuration - -You can configure a list of webhooks in the Optimize configuration, see [Alert Notification Webhooks](../configuration/#alert-notification-webhooks) for available configuration properties. - -### Alert webhook payload placeholders - -The webhook request body can be customized to integrate with any string encoded HTTP endpoint to your needs. -In order to make use of certain properties of an alert, you can make use of placeholders within the payload string. - -|Placeholder|Sample Value|Description| -|--- |--- |--- | -|ALERT_MESSAGE|Camunda Optimize - Report Status
    Alert name: Too many incidents
    Report name: Count of incidents
    Status: Given threshold [60.0] was exceeded. Current value: 186.0. Please check your Optimize report for more information!
    http://optimize.myorg:8090/#/report/id/|This is the full alert message that is also used in the email alert content.| -|ALERT_NAME|Some Alert|The name given to the alert when it was created.| -|ALERT_REPORT_LINK|http://optimize.myorg/#/report/id/|The direct link to the report the alert is based on.| -|ALERT_CURRENT_VALUE|186.0|The current value of the number report the alert is based on.| -|ALERT_THRESHOLD_VALUE|60.0|The configured alert threshold value.| -|ALERT_THRESHOLD_OPERATOR|>|The threshold operator configured for the aler| -|ALERT_TYPE|new|The type of the alert notification. Can be one of:
    `new` - the threshold was just exceeded and the alert was triggered
    `reminder` - the threshold was exceeded previously already and this is a reminder notification
    `resolved` - the threshold is met again and the alert is resolved| -|ALERT_INTERVAL|5|The configured interval at which the alert condition is checked.| -|ALERT_INTERVAL_UNIT|seconds|The unit for the configured alert interval. Can be one of: seconds, minutes, hours, days, weeks, months| - -The placeholders can be used within the `defaultPayload` property of each webhook configuration: - -```yaml -webhookAlerting: - webhooks: - 'myWebhook': - ... - defaultPayload: 'The alert {{ALERT_NAME}} with the threshold of `{{ALERT_THRESHOLD_OPERATOR}}{{ALERT_THRESHOLD_VALUE}}` was triggered as *{{ALERT_TYPE}}*.' -``` - -### Example Webhook - Slack - -If your organization uses Slack, you can set up Optimize so that it can use a webhook to send alert notifications to a Slack channel of your choice. - -To configure the webhook in Optimize's `environment-config`, you first need to create a new Slack app for your organization's Slack workspace, as described in [Slack's own documentation here](https://api.slack.com/messaging/webhooks). You only need to follow the steps until you have your webhook URL - no need to write any code to use the webhook to post any messages, Optimize will take care of this for you. Once you have followed these steps, you can copy the Webhook URL from Slack's "Webhook URLs for Your Workspace" section into the configuration as follows: - -```bash -webhookAlerting: - webhooks: - # Name of the webhook, must be unique. - 'mySlackWebhook': - # URL of the webhook which can receive alerts from Optimize - url: 'https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX' - # Map of the headers of the request to the sent to the webhook URL - headers: - 'Content-type': 'application/json' - # HTTP Method for the webhook request - httpMethod: 'POST' - # The default payload structure with the alertMessagePlaceholder {{ALERT_MESSAGE}} for the alert text. - # Optimize will replace this placeholder with the content of the alert message. - defaultPayload: '{"text": "The alert *{{ALERT_NAME}}* was triggered as *{{ALERT_TYPE}}*, you can view the report <{{ALERT_REPORT_LINK}}|here>."}' -``` - -All configuration parameters are described in the [Alert Notification Webhooks Configuration Section](./configuration.md#alert-notification-webhooks). - -With this configuration, when you create an alert for a report in Optimize, `mySlackWebhook` will appear in the targets selection dropdown in the alert creation modal. Once you have selected the webhook from the dropdown and saved the alert, Optimize will send a message to the channel you have selected when creating your Slack app whenever an alert notification is triggered. The content of the message is the same as the content of the alert email notifications. One alert may send either or both email and webhook notifications. diff --git a/optimize_versioned_sidebars/version-3.10.0-sidebars.json b/optimize_versioned_sidebars/version-3.10.0-sidebars.json deleted file mode 100644 index 5486d44ef6b..00000000000 --- a/optimize_versioned_sidebars/version-3.10.0-sidebars.json +++ /dev/null @@ -1,2900 +0,0 @@ -{ - "Components": [ - { - "type": "link", - "label": "Introduction to Components", - "href": "/docs/8.2/components/" - }, - - { - "Concepts": [ - { - "type": "link", - "label": "What is Camunda 8?", - "href": "/docs/8.2/components/concepts/what-is-camunda-8/" - }, - { - "type": "link", - "label": "Clusters", - "href": "/docs/8.2/components/concepts/clusters/" - }, - { - "type": "link", - "label": "Processes", - "href": "/docs/8.2/components/concepts/processes/" - }, - { - "type": "link", - "label": "Job workers", - "href": "/docs/8.2/components/concepts/job-workers/" - }, - { - "type": "link", - "label": "Process instance creation", - "href": "/docs/8.2/components/concepts/process-instance-creation/" - }, - { - "type": "link", - "label": "Messages", - "href": "/docs/8.2/components/concepts/messages/" - }, - { - "type": "link", - "label": "Signals", - "href": "/docs/8.2/components/concepts/signals/" - }, - { - "type": "link", - "label": "Incidents", - "href": "/docs/8.2/components/concepts/incidents/" - }, - { - "type": "link", - "label": "Variables", - "href": "/docs/8.2/components/concepts/variables/" - }, - { - "type": "link", - "label": "Expressions", - "href": "/docs/8.2/components/concepts/expressions/" - }, - { - "type": "link", - "label": "Workflow patterns", - "href": "/docs/8.2/components/concepts/workflow-patterns/" - }, - { - "type": "link", - "label": "Process instance modification", - "href": "/docs/8.2/components/concepts/process-instance-modification/" - }, - { - "type": "link", - "label": "Data retention", - "href": "/docs/8.2/components/concepts/data-retention/" - }, - { - "type": "link", - "label": "Encryption at rest", - "href": "/docs/8.2/components/concepts/encryption-at-rest/" - }, - { - "type": "link", - "label": "Backups", - "href": "/docs/8.2/components/concepts/backups/" - } - ] - }, - - { - "Console": [ - { - "type": "link", - "label": "Introduction to Camunda Console", - "href": "/docs/8.2/components/console/introduction-to-console/" - }, - - { - "Manage your organization": [ - { - "type": "link", - "label": "Organization management", - "href": "/docs/8.2/components/console/manage-organization/organization-settings/" - }, - { - "type": "link", - "label": "Manage users of your organization", - "href": "/docs/8.2/components/console/manage-organization/manage-users/" - }, - { - "type": "link", - "label": "View organization activity", - "href": "/docs/8.2/components/console/manage-organization/view-organization-activity/" - }, - { - "type": "link", - "label": "Enable alpha features", - "href": "/docs/8.2/components/console/manage-organization/enable-alpha-features/" - }, - { - "type": "link", - "label": "Usage history", - "href": "/docs/8.2/components/console/manage-organization/usage-history/" - }, - { - "type": "link", - "label": "Usage Alerts", - "href": "/docs/8.2/components/console/manage-organization/usage-alerts/" - }, - { - "type": "link", - "label": "Advanced search", - "href": "/docs/8.2/components/console/manage-organization/advanced-search/" - }, - { - "type": "link", - "label": "Switch organization", - "href": "/docs/8.2/components/console/manage-organization/switch-organization/" - }, - { - "type": "link", - "label": "Connect your IDP with Camunda", - "href": "/docs/8.2/components/console/manage-organization/external-sso/" - }, - { - "type": "link", - "label": "Delete your Camunda account", - "href": "/docs/8.2/components/console/manage-organization/delete-account/" - } - ] - }, - - { - "Manage clusters": [ - { - "type": "link", - "label": "Create a cluster", - "href": "/docs/8.2/components/console/manage-clusters/create-cluster/" - }, - { - "type": "link", - "label": "Rename your cluster", - "href": "/docs/8.2/components/console/manage-clusters/rename-cluster/" - }, - { - "type": "link", - "label": "Delete your cluster", - "href": "/docs/8.2/components/console/manage-clusters/delete-cluster/" - }, - { - "type": "link", - "label": "Manage API clients", - "href": "/docs/8.2/components/console/manage-clusters/manage-api-clients/" - }, - { - "type": "link", - "label": "Manage alerts", - "href": "/docs/8.2/components/console/manage-clusters/manage-alerts/" - }, - { - "type": "link", - "label": "Manage IP allowlists", - "href": "/docs/8.2/components/console/manage-clusters/manage-ip-allowlists/" - }, - { - "type": "link", - "label": "Manage secrets", - "href": "/docs/8.2/components/console/manage-clusters/manage-secrets/" - } - ] - }, - - { - "Manage your plan": [ - { - "type": "link", - "label": "Available plans", - "href": "/docs/8.2/components/console/manage-plan/available-plans/" - }, - { - "type": "link", - "label": "Upgrade to a Starter Plan", - "href": "/docs/8.2/components/console/manage-plan/upgrade-to-starter-plan/" - }, - { - "type": "link", - "label": "Update billing reservations", - "href": "/docs/8.2/components/console/manage-plan/update-billing-reservations/" - }, - { - "type": "link", - "label": "Update your credit card", - "href": "/docs/8.2/components/console/manage-plan/update-creditcard/" - }, - { - "type": "link", - "label": "Retrieve invoices or update your billing information", - "href": "/docs/8.2/components/console/manage-plan/retrieve-invoices-or-update-billing-info/" - }, - { - "type": "link", - "label": "Cancel Starter plan subscription", - "href": "/docs/8.2/components/console/manage-plan/cancel-starter-subscription/" - } - ] - }, - - { - "Troubleshooting": [ - { - "type": "link", - "label": "Common pitfalls", - "href": "/docs/8.2/components/console/console-troubleshooting/common-pitfalls/" - } - ] - } - ] - }, - - { - "Modeler": [ - { - "type": "link", - "label": "About Modeler", - "href": "/docs/8.2/components/modeler/about-modeler/" - }, - - { - "Web Modeler": [ - { - "type": "link", - "label": "Launch Web Modeler", - "href": "/docs/8.2/components/modeler/web-modeler/launch-web-modeler/" - }, - { - "type": "link", - "label": "Model your first diagram", - "href": "/docs/8.2/components/modeler/web-modeler/model-your-first-diagram/" - }, - { - "type": "link", - "label": "Import diagram", - "href": "/docs/8.2/components/modeler/web-modeler/import-diagram/" - }, - { - "type": "link", - "label": "Fix problems in your diagram", - "href": "/docs/8.2/components/modeler/web-modeler/fix-problems-in-your-diagram/" - }, - { - "type": "link", - "label": "Run or publish your process", - "href": "/docs/8.2/components/modeler/web-modeler/run-or-publish-your-process/" - }, - - { - "Collaboration": [ - { - "type": "link", - "label": "Collaborate with your team", - "href": "/docs/8.2/components/modeler/web-modeler/collaboration/" - }, - { - "type": "link", - "label": "Collaborate with modes", - "href": "/docs/8.2/components/modeler/web-modeler/collaborate-with-modes/" - }, - { - "type": "link", - "label": "Design mode for business users", - "href": "/docs/8.2/components/modeler/web-modeler/design-your-process/" - }, - { - "type": "link", - "label": "Implement mode for developers", - "href": "/docs/8.2/components/modeler/web-modeler/implement-your-process/" - }, - { - "type": "link", - "label": "Play mode for rapid validation", - "href": "/docs/8.2/components/modeler/web-modeler/play-your-process/" - } - ] - }, - - { - "type": "link", - "label": "Milestones", - "href": "/docs/8.2/components/modeler/web-modeler/milestones/" - }, - { - "type": "link", - "label": "Token simulation", - "href": "/docs/8.2/components/modeler/web-modeler/token-simulation/" - }, - - { - "Advanced modeling": [ - { - "type": "link", - "label": "Call activity linking", - "href": "/docs/8.2/components/modeler/web-modeler/advanced-modeling/call-activity-linking/" - }, - { - "type": "link", - "label": "Business rule task linking", - "href": "/docs/8.2/components/modeler/web-modeler/advanced-modeling/business-rule-task-linking/" - } - ] - } - ] - }, - - { - "Desktop Modeler": [ - { - "type": "link", - "label": "About", - "href": "/docs/8.2/components/modeler/desktop-modeler/" - }, - { - "type": "link", - "label": "Installation", - "href": "/docs/8.2/components/modeler/desktop-modeler/install-the-modeler/" - }, - { - "type": "link", - "label": "Model your first diagram", - "href": "/docs/8.2/components/modeler/desktop-modeler/model-your-first-diagram/" - }, - { - "type": "link", - "label": "Deploy your first diagram", - "href": "/docs/8.2/components/modeler/desktop-modeler/connect-to-camunda-8/" - }, - { - "type": "link", - "label": "Start a new process instance", - "href": "/docs/8.2/components/modeler/desktop-modeler/start-instance/" - }, - - { - "Element templates": [ - { - "type": "link", - "label": "About element templates", - "href": "/docs/8.2/components/modeler/desktop-modeler/element-templates/about-templates/" - }, - { - "type": "link", - "label": "Configuring templates", - "href": "/docs/8.2/components/modeler/desktop-modeler/element-templates/configuring-templates/" - }, - { - "type": "link", - "label": "Using templates", - "href": "/docs/8.2/components/modeler/desktop-modeler/element-templates/using-templates/" - }, - { - "type": "link", - "label": "Defining templates", - "href": "/docs/8.2/components/modeler/desktop-modeler/element-templates/defining-templates/" - }, - { - "type": "link", - "label": "Defining templates in Camunda 7", - "href": "/docs/8.2/components/modeler/desktop-modeler/element-templates/c7-defining-templates/" - }, - { - "type": "link", - "label": "Additional resources", - "href": "/docs/8.2/components/modeler/desktop-modeler/element-templates/additional-resources/" - } - ] - }, - - { - "Additional configuration": [ - { - "type": "link", - "label": "Flags", - "href": "/docs/8.2/components/modeler/desktop-modeler/flags/" - }, - { - "type": "link", - "label": "Plugins", - "href": "/docs/8.2/components/modeler/desktop-modeler/plugins/" - }, - { - "type": "link", - "label": "Custom lint rules", - "href": "/docs/8.2/components/modeler/desktop-modeler/custom-lint-rules/" - }, - { - "type": "link", - "label": "Search paths", - "href": "/docs/8.2/components/modeler/desktop-modeler/search-paths/" - }, - { - "type": "link", - "label": "Telemetry", - "href": "/docs/8.2/components/modeler/desktop-modeler/telemetry/" - } - ] - }, - - { - "type": "link", - "label": "Troubleshooting", - "href": "/docs/8.2/components/modeler/desktop-modeler/troubleshooting/" - } - ] - }, - - { - "BPMN": [ - { - "type": "link", - "label": "BPMN in Modeler", - "href": "/docs/8.2/components/modeler/bpmn/" - }, - { - "type": "link", - "label": "BPMN primer", - "href": "/docs/8.2/components/modeler/bpmn/bpmn-primer/" - }, - { - "type": "link", - "label": "BPMN coverage", - "href": "/docs/8.2/components/modeler/bpmn/bpmn-coverage/" - }, - { - "type": "link", - "label": "Data flow", - "href": "/docs/8.2/components/modeler/bpmn/data-flow/" - }, - - { - "Tasks": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/modeler/bpmn/tasks/" - }, - { - "type": "link", - "label": "Service tasks", - "href": "/docs/8.2/components/modeler/bpmn/service-tasks/" - }, - { - "type": "link", - "label": "User tasks", - "href": "/docs/8.2/components/modeler/bpmn/user-tasks/" - }, - { - "type": "link", - "label": "Receive tasks", - "href": "/docs/8.2/components/modeler/bpmn/receive-tasks/" - }, - { - "type": "link", - "label": "Business rule tasks", - "href": "/docs/8.2/components/modeler/bpmn/business-rule-tasks/" - }, - { - "type": "link", - "label": "Script tasks", - "href": "/docs/8.2/components/modeler/bpmn/script-tasks/" - }, - { - "type": "link", - "label": "Send tasks", - "href": "/docs/8.2/components/modeler/bpmn/send-tasks/" - }, - { - "type": "link", - "label": "Manual tasks", - "href": "/docs/8.2/components/modeler/bpmn/manual-tasks/" - }, - { - "type": "link", - "label": "Undefined tasks", - "href": "/docs/8.2/components/modeler/bpmn/undefined-tasks/" - } - ] - }, - - { - "Gateways": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/modeler/bpmn/gateways/" - }, - { - "type": "link", - "label": "Exclusive gateway", - "href": "/docs/8.2/components/modeler/bpmn/exclusive-gateways/" - }, - { - "type": "link", - "label": "Parallel gateway", - "href": "/docs/8.2/components/modeler/bpmn/parallel-gateways/" - }, - { - "type": "link", - "label": "Event-based gateway", - "href": "/docs/8.2/components/modeler/bpmn/event-based-gateways/" - }, - { - "type": "link", - "label": "Inclusive gateway", - "href": "/docs/8.2/components/modeler/bpmn/inclusive-gateways/" - } - ] - }, - - { - "Events": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/modeler/bpmn/events/" - }, - { - "type": "link", - "label": "None events", - "href": "/docs/8.2/components/modeler/bpmn/none-events/" - }, - { - "type": "link", - "label": "Message events", - "href": "/docs/8.2/components/modeler/bpmn/message-events/" - }, - { - "type": "link", - "label": "Signal events", - "href": "/docs/8.2/components/modeler/bpmn/signal-events/" - }, - { - "type": "link", - "label": "Timer events", - "href": "/docs/8.2/components/modeler/bpmn/timer-events/" - }, - { - "type": "link", - "label": "Error events", - "href": "/docs/8.2/components/modeler/bpmn/error-events/" - }, - { - "type": "link", - "label": "Escalation events", - "href": "/docs/8.2/components/modeler/bpmn/escalation-events/" - }, - { - "type": "link", - "label": "Terminate events", - "href": "/docs/8.2/components/modeler/bpmn/terminate-events/" - }, - { - "type": "link", - "label": "Link events", - "href": "/docs/8.2/components/modeler/bpmn/link-events/" - } - ] - }, - - { - "Subprocesses": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/modeler/bpmn/subprocesses/" - }, - { - "type": "link", - "label": "Embedded subprocess", - "href": "/docs/8.2/components/modeler/bpmn/embedded-subprocesses/" - }, - { - "type": "link", - "label": "Call activities", - "href": "/docs/8.2/components/modeler/bpmn/call-activities/" - }, - { - "type": "link", - "label": "Event subprocess", - "href": "/docs/8.2/components/modeler/bpmn/event-subprocesses/" - } - ] - }, - - { - "Markers": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/modeler/bpmn/markers/" - }, - { - "type": "link", - "label": "Multi-instance", - "href": "/docs/8.2/components/modeler/bpmn/multi-instance/" - } - ] - } - ] - }, - - { - "DMN": [ - { - "type": "link", - "label": "DMN in Modeler", - "href": "/docs/8.2/components/modeler/dmn/" - }, - { - "type": "link", - "label": "Decision requirements graph", - "href": "/docs/8.2/components/modeler/dmn/decision-requirements-graph/" - }, - - { - "Decision table": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/modeler/dmn/decision-table/" - }, - { - "type": "link", - "label": "Input", - "href": "/docs/8.2/components/modeler/dmn/decision-table-input/" - }, - { - "type": "link", - "label": "Output", - "href": "/docs/8.2/components/modeler/dmn/decision-table-output/" - }, - { - "type": "link", - "label": "Rule", - "href": "/docs/8.2/components/modeler/dmn/decision-table-rule/" - }, - { - "type": "link", - "label": "Hit policy", - "href": "/docs/8.2/components/modeler/dmn/decision-table-hit-policy/" - } - ] - }, - - { - "type": "link", - "label": "Decision literal expression", - "href": "/docs/8.2/components/modeler/dmn/decision-literal-expression/" - }, - { - "type": "link", - "label": "Data types", - "href": "/docs/8.2/components/modeler/dmn/dmn-data-types/" - } - ] - }, - - { - "FEEL expressions": [ - { - "type": "link", - "label": "What is FEEL?", - "href": "/docs/8.2/components/modeler/feel/what-is-feel/" - }, - { - "type": "link", - "label": "Data types", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-data-types/" - }, - { - "type": "link", - "label": "Unary-tests", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-unary-tests/" - }, - - { - "Expressions": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-expressions-introduction/" - }, - { - "type": "link", - "label": "Boolean expressions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-boolean-expressions/" - }, - { - "type": "link", - "label": "String expressions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-string-expressions/" - }, - { - "type": "link", - "label": "Numeric expressions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-numeric-expressions/" - }, - { - "type": "link", - "label": "List expressions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-list-expressions/" - }, - { - "type": "link", - "label": "Context expressions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-context-expressions/" - }, - { - "type": "link", - "label": "Temporal expressions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-temporal-expressions/" - }, - { - "type": "link", - "label": "Variables", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-variables/" - }, - { - "type": "link", - "label": "Control flow", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-control-flow/" - }, - { - "type": "link", - "label": "Functions", - "href": "/docs/8.2/components/modeler/feel/language-guide/feel-functions/" - } - ] - }, - - { - "Built-in Functions": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-introduction/" - }, - { - "type": "link", - "label": "Conversion functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-conversion/" - }, - { - "type": "link", - "label": "Boolean functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-boolean/" - }, - { - "type": "link", - "label": "String functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-string/" - }, - { - "type": "link", - "label": "Numeric functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-numeric/" - }, - { - "type": "link", - "label": "List functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-list/" - }, - { - "type": "link", - "label": "Context functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-context/" - }, - { - "type": "link", - "label": "Temporal functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-temporal/" - }, - { - "type": "link", - "label": "Range functions", - "href": "/docs/8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-range/" - } - ] - } - ] - }, - - { - "Camunda Forms": [ - { - "type": "link", - "label": "What are Camunda Forms?", - "href": "/docs/8.2/components/modeler/forms/camunda-forms-reference/" - }, - - { - "Form Element Library": [ - { - "type": "link", - "label": "Overview of Form Elements", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library/" - }, - { - "type": "link", - "label": "Text view", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-text/" - }, - { - "type": "link", - "label": "Textfield", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-textfield/" - }, - { - "type": "link", - "label": "Text area", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-textarea/" - }, - { - "type": "link", - "label": "Number", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-number/" - }, - { - "type": "link", - "label": "Datetime", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-datetime/" - }, - { - "type": "link", - "label": "Checkbox", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-checkbox/" - }, - { - "type": "link", - "label": "Radio", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-radio/" - }, - { - "type": "link", - "label": "Select", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-select/" - }, - { - "type": "link", - "label": "Checklist", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-checklist/" - }, - { - "type": "link", - "label": "Taglist", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-taglist/" - }, - { - "type": "link", - "label": "Image view", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-image/" - }, - { - "type": "link", - "label": "Button", - "href": "/docs/8.2/components/modeler/forms/form-element-library/forms-element-library-button/" - } - ] - }, - - { - "Configuration": [ - { - "type": "link", - "label": "Data binding", - "href": "/docs/8.2/components/modeler/forms/configuration/forms-config-data-binding/" - }, - { - "type": "link", - "label": "Options Source", - "href": "/docs/8.2/components/modeler/forms/configuration/forms-config-options/" - } - ] - } - ] - }, - - { - "type": "link", - "label": "Data handling", - "href": "/docs/8.2/components/modeler/data-handling/" - } - ] - }, - - { - "Connectors": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/8.2/components/connectors/introduction-to-connectors/" - }, - { - "type": "link", - "label": "Types of Connectors", - "href": "/docs/8.2/components/connectors/connector-types/" - }, - - { - "Use Connectors": [ - { - "type": "link", - "label": "Using Connectors", - "href": "/docs/8.2/components/connectors/use-connectors/" - }, - { - "type": "link", - "label": "Using inbound Connectors", - "href": "/docs/8.2/components/connectors/use-connectors/inbound/" - }, - { - "type": "link", - "label": "Using outbound Connectors", - "href": "/docs/8.2/components/connectors/use-connectors/outbound/" - } - ] - }, - - { - "Out-of-the-box Connectors": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/available-connectors-overview/" - }, - { - "type": "link", - "label": "Asana Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/asana/" - }, - { - "type": "link", - "label": "Automation Anywhere Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/automation-anywhere/" - }, - - { - "AWS": [ - { - "type": "link", - "label": "AWS DynamoDB Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/amazon-dynamodb/" - }, - { - "type": "link", - "label": "AWS EventBridge Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge/" - }, - { - "type": "link", - "label": "AWS Lambda Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/aws-lambda/" - }, - { - "type": "link", - "label": "AWS SNS Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sns/" - }, - { - "type": "link", - "label": "AWS SQS Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sqs/" - } - ] - }, - - { - "type": "link", - "label": "Blue Prism Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/blueprism/" - }, - { - "type": "link", - "label": "EasyPost Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/easy-post/" - }, - { - "type": "link", - "label": "GitHub Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/github/" - }, - { - "type": "link", - "label": "GitLab Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/gitlab/" - }, - - { - "Google": [ - { - "type": "link", - "label": "Google Drive Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/googledrive/" - }, - { - "type": "link", - "label": "Google Maps Platform Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/google-maps-platform/" - }, - { - "type": "link", - "label": "Google Sheets Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/google-sheets/" - } - ] - }, - - { - "type": "link", - "label": "Hugging Face Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/hugging-face/" - }, - { - "type": "link", - "label": "Kafka Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/kafka/" - }, - - { - "Microsoft": [ - { - "type": "link", - "label": "Azure OpenAI Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/azure-open-ai/" - }, - { - "type": "link", - "label": "Microsoft Teams Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/microsoft-teams/" - }, - { - "type": "link", - "label": "Microsoft 365 Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/microsoft-o365-mail/" - } - ] - }, - - { - "type": "link", - "label": "OpenAI Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/openai/" - }, - { - "type": "link", - "label": "Camunda Operate Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/operate/" - }, - { - "type": "link", - "label": "RabbitMQ Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/rabbitmq/" - }, - { - "type": "link", - "label": "Salesforce Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/salesforce/" - }, - { - "type": "link", - "label": "Slack Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/slack/" - }, - { - "type": "link", - "label": "SendGrid Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/sendgrid/" - }, - { - "type": "link", - "label": "Twilio Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/twilio/" - }, - { - "type": "link", - "label": "UiPath Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/uipath/" - }, - { - "type": "link", - "label": "WhatsApp Connector", - "href": "/docs/8.2/components/connectors/out-of-the-box-connectors/whatsapp/" - } - ] - }, - - { - "Protocol Connectors": [ - { - "type": "link", - "label": "GraphQL Connector", - "href": "/docs/8.2/components/connectors/protocol/graphql/" - }, - { - "type": "link", - "label": "HTTP Webhook Connector", - "href": "/docs/8.2/components/connectors/protocol/http-webhook/" - }, - { - "type": "link", - "label": "HTTP Polling Connector", - "href": "/docs/8.2/components/connectors/protocol/polling/" - }, - { - "type": "link", - "label": "REST Connector", - "href": "/docs/8.2/components/connectors/protocol/rest/" - } - ] - }, - - { - "type": "link", - "label": "Manage Connector templates", - "href": "/docs/8.2/components/connectors/manage-connector-templates/" - }, - - { - "Building custom Connectors": [ - { - "type": "link", - "label": "Connector SDK", - "href": "/docs/8.2/components/connectors/custom-built-connectors/connector-sdk/" - }, - { - "type": "link", - "label": "Connector templates", - "href": "/docs/8.2/components/connectors/custom-built-connectors/connector-templates/" - } - ] - } - ] - }, - - { - "Zeebe": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/8.2/components/zeebe/zeebe-overview/" - }, - - { - "Technical concepts": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/zeebe/technical-concepts/technical-concepts-overview/" - }, - { - "type": "link", - "label": "Architecture", - "href": "/docs/8.2/components/zeebe/technical-concepts/architecture/" - }, - { - "type": "link", - "label": "Clustering", - "href": "/docs/8.2/components/zeebe/technical-concepts/clustering/" - }, - { - "type": "link", - "label": "Partitions", - "href": "/docs/8.2/components/zeebe/technical-concepts/partitions/" - }, - { - "type": "link", - "label": "Internal processing", - "href": "/docs/8.2/components/zeebe/technical-concepts/internal-processing/" - }, - { - "type": "link", - "label": "Process lifecycles", - "href": "/docs/8.2/components/zeebe/technical-concepts/process-lifecycles/" - }, - { - "type": "link", - "label": "Protocols", - "href": "/docs/8.2/components/zeebe/technical-concepts/protocols/" - } - ] - } - ] - }, - - { - "Operate": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/8.2/components/operate/operate-introduction/" - }, - - { - "User guide": [ - { - "type": "link", - "label": "Getting familiar with Operate", - "href": "/docs/8.2/components/operate/userguide/basic-operate-navigation/" - }, - { - "type": "link", - "label": "Variables and incidents", - "href": "/docs/8.2/components/operate/userguide/resolve-incidents-update-variables/" - }, - { - "type": "link", - "label": "Selections and operations", - "href": "/docs/8.2/components/operate/userguide/selections-operations/" - }, - { - "type": "link", - "label": "Delete finished instances", - "href": "/docs/8.2/components/operate/userguide/delete-finished-instances/" - }, - { - "type": "link", - "label": "Process instance modification", - "href": "/docs/8.2/components/operate/userguide/process-instance-modification/" - } - ] - } - ] - }, - - { - "Tasklist": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/8.2/components/tasklist/introduction-to-tasklist/" - }, - - { - "User guide": [ - { - "type": "link", - "label": "Overview and example use case", - "href": "/docs/8.2/components/tasklist/userguide/using-tasklist/" - } - ] - } - ] - }, - - { - "Optimize": [ - "components/what-is-optimize", - { - "User guide": [ - "components/userguide/collections-dashboards-reports", - "components/userguide/user-permissions", - "components/userguide/data-sources", - { - "Dashboards": [ - "components/userguide/creating-dashboards", - "components/userguide/edit-mode", - "components/userguide/view-mode" - ] - }, - { - "Dashboards maintained by Camunda": [ - "components/userguide/process-dashboards", - "components/userguide/instant-preview-dashboards" - ] - }, - "components/userguide/creating-reports", - "components/userguide/combined-process-reports", - { - "Process analysis": [ - "components/userguide/process-analysis/process-analysis-overview", - "components/userguide/process-analysis/outlier-analysis", - "components/userguide/process-analysis/branch-analysis", - { - "Report analysis": [ - "components/userguide/process-analysis/report-analysis/overview", - { - "Edit mode": [ - "components/userguide/process-analysis/report-analysis/edit-mode", - "components/userguide/process-analysis/report-analysis/select-process-definitions", - "components/userguide/process-analysis/report-analysis/define-reports", - "components/userguide/process-analysis/report-analysis/measures", - "components/userguide/process-analysis/report-analysis/compare-target-values", - "components/userguide/process-analysis/report-analysis/process-instance-parts", - "components/userguide/process-analysis/report-analysis/configure-reports" - ] - }, - "components/userguide/process-analysis/report-analysis/view-mode" - ] - }, - { - "Filters": [ - "components/userguide/process-analysis/filters", - "components/userguide/process-analysis/metadata-filters", - "components/userguide/process-analysis/instance-state-filters", - "components/userguide/process-analysis/flow-node-filters", - "components/userguide/process-analysis/process-instance-filters", - "components/userguide/process-analysis/variable-filters" - ] - } - ] - }, - { - "Decision analysis": [ - "components/userguide/decision-analysis/decision-analysis-overview", - "components/userguide/decision-analysis/decision-report", - "components/userguide/decision-analysis/decision-filter" - ] - }, - { - "Additional features": [ - "components/userguide/additional-features/alerts", - "components/userguide/additional-features/event-based-processes", - "components/userguide/additional-features/export-import", - "components/userguide/additional-features/footer", - "components/userguide/additional-features/variable-labeling", - "components/userguide/additional-features/process-variants-comparison", - "components/userguide/additional-features/ml-dataset" - ] - } - ] - } - ] - }, - - { - "Best Practices": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/components/best-practices/best-practices-overview/" - }, - - { - "Project management": [ - { - "type": "link", - "label": "Following the customer success path", - "href": "/docs/8.2/components/best-practices/management/following-the-customer-success-path/" - }, - { - "type": "link", - "label": "Doing a proper POC", - "href": "/docs/8.2/components/best-practices/management/doing-a-proper-poc/" - } - ] - }, - - { - "Architecture": [ - { - "type": "link", - "label": "Deciding about your stack", - "href": "/docs/8.2/components/best-practices/architecture/deciding-about-your-stack/" - }, - { - "type": "link", - "label": "Sizing your environment", - "href": "/docs/8.2/components/best-practices/architecture/sizing-your-environment/" - }, - { - "type": "link", - "label": "Understanding human task management", - "href": "/docs/8.2/components/best-practices/architecture/understanding-human-tasks-management/" - } - ] - }, - - { - "Development": [ - { - "type": "link", - "label": "Connecting the workflow engine with your world", - "href": "/docs/8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world/" - }, - { - "type": "link", - "label": "Service integration patterns with BPMN", - "href": "/docs/8.2/components/best-practices/development/service-integration-patterns/" - }, - { - "type": "link", - "label": "Writing good workers", - "href": "/docs/8.2/components/best-practices/development/writing-good-workers/" - }, - { - "type": "link", - "label": "Dealing with problems and exceptions", - "href": "/docs/8.2/components/best-practices/development/dealing-with-problems-and-exceptions/" - }, - { - "type": "link", - "label": "Handling data in processes", - "href": "/docs/8.2/components/best-practices/development/handling-data-in-processes/" - }, - { - "type": "link", - "label": "Routing events to processes", - "href": "/docs/8.2/components/best-practices/development/routing-events-to-processes/" - }, - { - "type": "link", - "label": "Testing process definitions", - "href": "/docs/8.2/components/best-practices/development/testing-process-definitions/" - } - ] - }, - - { - "Modeling": [ - { - "type": "link", - "label": "Creating readable process models", - "href": "/docs/8.2/components/best-practices/modeling/creating-readable-process-models/" - }, - { - "type": "link", - "label": "Naming BPMN elements", - "href": "/docs/8.2/components/best-practices/modeling/naming-bpmn-elements/" - }, - { - "type": "link", - "label": "Naming technically relevant IDs", - "href": "/docs/8.2/components/best-practices/modeling/naming-technically-relevant-ids/" - }, - { - "type": "link", - "label": "Modeling beyond the happy path", - "href": "/docs/8.2/components/best-practices/modeling/modeling-beyond-the-happy-path/" - }, - { - "type": "link", - "label": "Modeling with situation patterns", - "href": "/docs/8.2/components/best-practices/modeling/modeling-with-situation-patterns/" - }, - { - "type": "link", - "label": "Building flexibility into BPMN models", - "href": "/docs/8.2/components/best-practices/modeling/building-flexibility-into-bpmn-models/" - }, - { - "type": "link", - "label": "Choosing the DMN hit policy", - "href": "/docs/8.2/components/best-practices/modeling/choosing-the-dmn-hit-policy/" - } - ] - }, - - { - "Operations": [ - { - "type": "link", - "label": "Versioning process definitions", - "href": "/docs/8.2/components/best-practices/operations/versioning-process-definitions/" - }, - { - "type": "link", - "label": "Reporting about processes", - "href": "/docs/8.2/components/best-practices/operations/reporting-about-processes/" - } - ] - }, - - { - "Camunda 7 specific": [ - { - "type": "link", - "label": "Deciding about your Camunda 7 stack", - "href": "/docs/8.2/components/best-practices/architecture/deciding-about-your-stack-c7/" - }, - { - "type": "link", - "label": "Sizing your Camunda 7 environment", - "href": "/docs/8.2/components/best-practices/architecture/sizing-your-environment-c7/" - }, - { - "type": "link", - "label": "Invoking services from a Camunda 7 process", - "href": "/docs/8.2/components/best-practices/development/invoking-services-from-the-process-c7/" - }, - { - "type": "link", - "label": "Understanding Camunda 7 transaction handling", - "href": "/docs/8.2/components/best-practices/development/understanding-transaction-handling-c7/" - }, - { - "type": "link", - "label": "Operating Camunda 7", - "href": "/docs/8.2/components/best-practices/operations/operating-camunda-c7/" - }, - { - "type": "link", - "label": "Performance tuning Camunda 7", - "href": "/docs/8.2/components/best-practices/operations/performance-tuning-camunda-c7/" - }, - { - "type": "link", - "label": "Securing Camunda 7", - "href": "/docs/8.2/components/best-practices/operations/securing-camunda-c7/" - }, - { - "type": "link", - "label": "Extending human task management in Camunda 7", - "href": "/docs/8.2/components/best-practices/architecture/extending-human-task-management-c7/" - } - ] - } - ] - } - ], - "APIs & Tools": [ - { - "type": "link", - "label": "Working with APIs & tools", - "href": "/docs/8.2/apis-tools/working-with-apis-tools/" - }, - - { - "APIs": [ - { - "type": "link", - "label": "Administration API clients (REST)", - "href": "/docs/8.2/apis-tools/administration-api-reference/" - }, - - { - "Operate API (REST)": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/apis-tools/operate-api/overview/" - }, - { - "type": "link", - "label": "Tutorial", - "href": "/docs/8.2/apis-tools/operate-api/tutorial/" - } - ] - }, - - { - "Optimize API (REST)": [ - "apis-tools/optimize-api/optimize-api-authorization", - { - "Configuration": [ - "apis-tools/optimize-api/configuration/enable-sharing", - "apis-tools/optimize-api/configuration/disable-sharing" - ] - }, - { - "Dashboard": [ - "apis-tools/optimize-api/dashboard/get-dashboard-ids", - "apis-tools/optimize-api/dashboard/delete-dashboard", - "apis-tools/optimize-api/dashboard/export-dashboard-definitions" - ] - }, - { - "Report": [ - "apis-tools/optimize-api/report/get-report-ids", - "apis-tools/optimize-api/report/delete-report", - "apis-tools/optimize-api/report/export-report-definitions", - "apis-tools/optimize-api/report/get-data-export" - ] - }, - "apis-tools/optimize-api/event-ingestion", - "apis-tools/optimize-api/external-variable-ingestion", - "apis-tools/optimize-api/health-readiness", - "apis-tools/optimize-api/import-entities", - "apis-tools/optimize-api/variable-labeling" - ] - }, - - { - "Tasklist API (GraphQL)": [ - { - "type": "link", - "label": "Schema Documentation", - "href": "/docs/8.2/apis-tools/tasklist-api/" - }, - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/apis-tools/tasklist-api/tasklist-api-overview/" - }, - { - "type": "link", - "label": "Tutorial", - "href": "/docs/8.2/apis-tools/tasklist-api/tasklist-api-tutorial/" - }, - { - "type": "link", - "label": "GraphQL to REST API migration", - "href": "/docs/8.2/apis-tools/tasklist-api/tasklist-api-graphql-to-rest-migration/" - }, - - { - "Directives": [ - { - "type": "link", - "label": "deprecated", - "href": "/docs/8.2/apis-tools/tasklist-api/directives/deprecated/" - }, - { - "type": "link", - "label": "include", - "href": "/docs/8.2/apis-tools/tasklist-api/directives/include/" - }, - { - "type": "link", - "label": "skip", - "href": "/docs/8.2/apis-tools/tasklist-api/directives/skip/" - }, - { - "type": "link", - "label": "specifiedBy", - "href": "/docs/8.2/apis-tools/tasklist-api/directives/specified-by/" - } - ] - }, - - { - "Enums": [ - { - "type": "link", - "label": "Sort", - "href": "/docs/8.2/apis-tools/tasklist-api/enums/sort/" - }, - { - "type": "link", - "label": "TaskSortFields", - "href": "/docs/8.2/apis-tools/tasklist-api/enums/task-sort-fields/" - }, - { - "type": "link", - "label": "TaskState", - "href": "/docs/8.2/apis-tools/tasklist-api/enums/task-state/" - } - ] - }, - - { - "Inputs": [ - { - "type": "link", - "label": "DateFilter", - "href": "/docs/8.2/apis-tools/tasklist-api/inputs/date-filter-input/" - }, - { - "type": "link", - "label": "TaskOrderBy", - "href": "/docs/8.2/apis-tools/tasklist-api/inputs/task-order-by/" - }, - { - "type": "link", - "label": "TaskQuery", - "href": "/docs/8.2/apis-tools/tasklist-api/inputs/task-query/" - }, - { - "type": "link", - "label": "VariableInput", - "href": "/docs/8.2/apis-tools/tasklist-api/inputs/variable-input/" - } - ] - }, - - { - "Mutations": [ - { - "type": "link", - "label": "claimTask", - "href": "/docs/8.2/apis-tools/tasklist-api/mutations/claim-task/" - }, - { - "type": "link", - "label": "completeTask", - "href": "/docs/8.2/apis-tools/tasklist-api/mutations/complete-task/" - }, - { - "type": "link", - "label": "deleteProcessInstance", - "href": "/docs/8.2/apis-tools/tasklist-api/mutations/delete-process-instance/" - }, - { - "type": "link", - "label": "unclaimTask", - "href": "/docs/8.2/apis-tools/tasklist-api/mutations/unclaim-task/" - } - ] - }, - - { - "Objects": [ - { - "type": "link", - "label": "Form", - "href": "/docs/8.2/apis-tools/tasklist-api/objects/form/" - }, - { - "type": "link", - "label": "Task", - "href": "/docs/8.2/apis-tools/tasklist-api/objects/task/" - }, - { - "type": "link", - "label": "User", - "href": "/docs/8.2/apis-tools/tasklist-api/objects/user/" - }, - { - "type": "link", - "label": "Variable", - "href": "/docs/8.2/apis-tools/tasklist-api/objects/variable/" - } - ] - }, - - { - "Queries": [ - { - "type": "link", - "label": "currentUser", - "href": "/docs/8.2/apis-tools/tasklist-api/queries/current-user/" - }, - { - "type": "link", - "label": "form", - "href": "/docs/8.2/apis-tools/tasklist-api/queries/form/" - }, - { - "type": "link", - "label": "task", - "href": "/docs/8.2/apis-tools/tasklist-api/queries/task/" - }, - { - "type": "link", - "label": "tasks", - "href": "/docs/8.2/apis-tools/tasklist-api/queries/tasks/" - }, - { - "type": "link", - "label": "variable", - "href": "/docs/8.2/apis-tools/tasklist-api/queries/variable/" - }, - { - "type": "link", - "label": "variables", - "href": "/docs/8.2/apis-tools/tasklist-api/queries/variables/" - } - ] - }, - - { - "Scalars": [ - { - "type": "link", - "label": "DateTime", - "href": "/docs/8.2/apis-tools/tasklist-api/scalars/datetime/" - }, - { - "type": "link", - "label": "Boolean", - "href": "/docs/8.2/apis-tools/tasklist-api/scalars/boolean/" - }, - { - "type": "link", - "label": "ID", - "href": "/docs/8.2/apis-tools/tasklist-api/scalars/id/" - }, - { - "type": "link", - "label": "Int", - "href": "/docs/8.2/apis-tools/tasklist-api/scalars/int/" - }, - { - "type": "link", - "label": "String", - "href": "/docs/8.2/apis-tools/tasklist-api/scalars/string/" - } - ] - } - ] - }, - - { - "Tasklist API (REST)": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-overview/" - }, - { - "type": "link", - "label": "Authentication", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-authentication/" - }, - - { - "Controllers": [ - { - "type": "link", - "label": "Form API", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-form-controller/" - }, - { - "type": "link", - "label": "Task API", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller/" - }, - { - "type": "link", - "label": "Variables API", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller/" - } - ] - }, - - { - "Schemas": [ - { - "Enums": [ - { - "type": "link", - "label": "Sort", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/enums/sort/" - }, - { - "type": "link", - "label": "TaskSortFields", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/enums/task-sort-fields/" - }, - { - "type": "link", - "label": "TaskState", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/enums/task-state/" - } - ] - }, - - { - "Models": [ - { - "type": "link", - "label": "DateFilter", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/models/date-filter-input/" - }, - { - "type": "link", - "label": "TaskOrderBy", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/models/task-order-by/" - } - ] - }, - - { - "Requests": [ - { - "type": "link", - "label": "TaskAssignRequest", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/requests/task-assign-request/" - }, - { - "type": "link", - "label": "TaskCompleteRequest", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/requests/task-complete-request/" - }, - { - "type": "link", - "label": "TaskSearchRequest", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/requests/task-search-request/" - }, - { - "type": "link", - "label": "VariableInput", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/requests/variable-input/" - }, - { - "type": "link", - "label": "VariablesSearchRequest", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/requests/variables-search-request/" - } - ] - }, - - { - "Responses": [ - { - "type": "link", - "label": "ErrorResponse", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/responses/error-response/" - }, - { - "type": "link", - "label": "FormResponse", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/responses/form-response/" - }, - { - "type": "link", - "label": "TaskResponse", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/responses/task-response/" - }, - { - "type": "link", - "label": "TaskSearchResponse", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/responses/task-search-response/" - }, - { - "type": "link", - "label": "VariableResponse", - "href": "/docs/8.2/apis-tools/tasklist-api-rest/schemas/responses/variable-response/" - } - ] - } - ] - } - ] - }, - - { - "type": "link", - "label": "Web Modeler API (REST, beta)", - "href": "/docs/8.2/apis-tools/web-modeler-api/" - }, - { - "type": "link", - "label": "Zeebe API (gRPC)", - "href": "/docs/8.2/apis-tools/grpc/" - } - ] - }, - - { - "Clients": [ - { - "CLI client": [ - { - "type": "link", - "label": "Quick reference", - "href": "/docs/8.2/apis-tools/cli-client/" - }, - { - "type": "link", - "label": "Getting started with the CLI client", - "href": "/docs/8.2/apis-tools/cli-client/cli-get-started/" - } - ] - }, - - { - "Go client": [ - { - "type": "link", - "label": "Quick reference", - "href": "/docs/8.2/apis-tools/go-client/" - }, - { - "type": "link", - "label": "Getting started with the Go client", - "href": "/docs/8.2/apis-tools/go-client/go-get-started/" - } - ] - }, - - { - "Java client": [ - { - "type": "link", - "label": "Quick reference", - "href": "/docs/8.2/apis-tools/java-client/" - }, - { - "type": "link", - "label": "Job worker", - "href": "/docs/8.2/apis-tools/java-client/job-worker/" - }, - { - "type": "link", - "label": "Logging", - "href": "/docs/8.2/apis-tools/java-client/logging/" - }, - { - "type": "link", - "label": "Zeebe Process Test", - "href": "/docs/8.2/apis-tools/java-client/zeebe-process-test/" - }, - - { - "Examples": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/apis-tools/java-client-examples/" - }, - { - "type": "link", - "label": "Deploy a process", - "href": "/docs/8.2/apis-tools/java-client-examples/process-deploy/" - }, - { - "type": "link", - "label": "Create a process instance", - "href": "/docs/8.2/apis-tools/java-client-examples/process-instance-create/" - }, - { - "type": "link", - "label": "Create non-blocking process instances", - "href": "/docs/8.2/apis-tools/java-client-examples/process-instance-create-nonblocking/" - }, - { - "type": "link", - "label": "Create a process instance with results", - "href": "/docs/8.2/apis-tools/java-client-examples/process-instance-create-with-result/" - }, - { - "type": "link", - "label": "Evaluate a decision", - "href": "/docs/8.2/apis-tools/java-client-examples/decision-evaluate/" - }, - { - "type": "link", - "label": "Open a job worker", - "href": "/docs/8.2/apis-tools/java-client-examples/job-worker-open/" - }, - { - "type": "link", - "label": "Handle variables as POJO", - "href": "/docs/8.2/apis-tools/java-client-examples/data-pojo/" - }, - { - "type": "link", - "label": "Request cluster topology", - "href": "/docs/8.2/apis-tools/java-client-examples/cluster-topology-request/" - } - ] - } - ] - }, - - { - "Community clients": [ - { - "type": "link", - "label": "Component clients", - "href": "/docs/8.2/apis-tools/community-clients/" - }, - - { - "Zeebe clients": [ - { - "type": "link", - "label": "C#", - "href": "/docs/8.2/apis-tools/community-clients/c-sharp/" - }, - { - "type": "link", - "label": "JavaScript/Node.js", - "href": "/docs/8.2/apis-tools/community-clients/javascript/" - }, - { - "type": "link", - "label": "Micronaut", - "href": "/docs/8.2/apis-tools/community-clients/micronaut/" - }, - { - "type": "link", - "label": "Python", - "href": "/docs/8.2/apis-tools/community-clients/python/" - }, - { - "type": "link", - "label": "Ruby", - "href": "/docs/8.2/apis-tools/community-clients/ruby/" - }, - { - "type": "link", - "label": "Rust", - "href": "/docs/8.2/apis-tools/community-clients/rust/" - }, - { - "type": "link", - "label": "Spring", - "href": "/docs/8.2/apis-tools/community-clients/spring/" - }, - { - "type": "link", - "label": "Quarkus", - "href": "/docs/8.2/apis-tools/community-clients/quarkus/" - } - ] - } - ] - }, - - { - "type": "link", - "label": "Build your own client", - "href": "/docs/8.2/apis-tools/build-your-own-client/" - } - ] - } - ], - "Self-Managed": [ - { - "type": "link", - "label": "Camunda 8 Self-Managed", - "href": "/docs/8.2/self-managed/about-self-managed/" - }, - - { - "Installation": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/platform-deployment/overview/" - }, - - { - "Helm/Kubernetes": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/overview/" - }, - { - "type": "link", - "label": "Deploy", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/deploy/" - }, - { - "type": "link", - "label": "Upgrade", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/upgrade/" - }, - - { - "Platforms": [ - { - "type": "link", - "label": "Amazon EKS", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/" - }, - { - "type": "link", - "label": "Microsoft AKS", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/platforms/microsoft-aks/" - }, - { - "type": "link", - "label": "Google GKE", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/platforms/google-gke/" - }, - { - "type": "link", - "label": "Red Hat OpenShift", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift/" - } - ] - }, - - { - "Guides": [ - { - "type": "link", - "label": "Local Kubernetes Cluster", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster/" - }, - { - "type": "link", - "label": "Accessing components without Ingress", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/accessing-components-without-ingress/" - }, - { - "type": "link", - "label": "Combined and separated Ingress setup", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup/" - }, - { - "type": "link", - "label": "Using Existing Keycloak", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/using-existing-keycloak/" - }, - { - "type": "link", - "label": "Installing in an air-gapped environment", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/air-gapped-installation/" - }, - { - "type": "link", - "label": "Running custom Connectors", - "href": "/docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/running-custom-connectors/" - } - ] - }, - - { - "type": "link", - "label": "Troubleshooting", - "href": "/docs/8.2/self-managed/platform-deployment/troubleshooting/" - } - ] - }, - - { - "type": "link", - "label": "Docker", - "href": "/docs/8.2/self-managed/platform-deployment/docker/" - }, - { - "type": "link", - "label": "Manual", - "href": "/docs/8.2/self-managed/platform-deployment/manual/" - } - ] - }, - - { - "Concepts": [ - { - "Access control": [ - { - "type": "link", - "label": "Applications", - "href": "/docs/8.2/self-managed/concepts/access-control/applications/" - }, - { - "type": "link", - "label": "APIs", - "href": "/docs/8.2/self-managed/concepts/access-control/apis/" - }, - { - "type": "link", - "label": "Groups", - "href": "/docs/8.2/self-managed/concepts/access-control/groups/" - }, - { - "type": "link", - "label": "Permissions", - "href": "/docs/8.2/self-managed/concepts/access-control/permissions/" - }, - { - "type": "link", - "label": "Resource authorizations", - "href": "/docs/8.2/self-managed/concepts/access-control/resource-authorizations/" - }, - { - "type": "link", - "label": "Roles", - "href": "/docs/8.2/self-managed/concepts/access-control/roles/" - }, - { - "type": "link", - "label": "Users", - "href": "/docs/8.2/self-managed/concepts/access-control/users/" - } - ] - }, - - { - "Authentication": [ - { - "type": "link", - "label": "Machine-to-machine (M2M) tokens", - "href": "/docs/8.2/self-managed/concepts/authentication/m2m-tokens/" - } - ] - }, - - { - "type": "link", - "label": "Exporters", - "href": "/docs/8.2/self-managed/concepts/exporters/" - }, - { - "type": "link", - "label": "Elasticsearch privileges", - "href": "/docs/8.2/self-managed/concepts/elasticsearch-privileges/" - } - ] - }, - - { - "Zeebe": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/zeebe-deployment/zeebe-installation/" - }, - - { - "Zeebe Gateway": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/zeebe-deployment/zeebe-gateway/overview/" - }, - { - "type": "link", - "label": "Interceptors", - "href": "/docs/8.2/self-managed/zeebe-deployment/zeebe-gateway/interceptors/" - } - ] - }, - - { - "Configuration": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/" - }, - { - "type": "link", - "label": "Logging", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/logging/" - }, - { - "type": "link", - "label": "Gateway health probes", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/gateway-health-probes/" - }, - { - "type": "link", - "label": "Environment variables", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/environment-variables/" - }, - { - "type": "link", - "label": "Fixed partitioning", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/fixed-partitioning/" - }, - { - "type": "link", - "label": "Priority election", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/priority-election/" - }, - { - "type": "link", - "label": "Broker configuration", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/broker-config/" - }, - { - "type": "link", - "label": "Gateway configuration", - "href": "/docs/8.2/self-managed/zeebe-deployment/configuration/gateway-config/" - } - ] - }, - - { - "Security": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/zeebe-deployment/security/" - }, - { - "type": "link", - "label": "Client authorization", - "href": "/docs/8.2/self-managed/zeebe-deployment/security/client-authorization/" - }, - { - "type": "link", - "label": "Secure client communication", - "href": "/docs/8.2/self-managed/zeebe-deployment/security/secure-client-communication/" - }, - { - "type": "link", - "label": "Secure cluster communication", - "href": "/docs/8.2/self-managed/zeebe-deployment/security/secure-cluster-communication/" - } - ] - }, - - { - "Operation": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/zeebe-in-production/" - }, - { - "type": "link", - "label": "Resource planning", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/resource-planning/" - }, - { - "type": "link", - "label": "Network ports", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/network-ports/" - }, - { - "type": "link", - "label": "Setting up a Zeebe cluster", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/setting-up-a-cluster/" - }, - { - "type": "link", - "label": "Metrics", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/metrics/" - }, - { - "type": "link", - "label": "Health status", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/health/" - }, - { - "type": "link", - "label": "Backpressure", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/backpressure/" - }, - { - "type": "link", - "label": "Disk space", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/disk-space/" - }, - { - "type": "link", - "label": "Update Zeebe", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/update-zeebe/" - }, - { - "type": "link", - "label": "Rebalancing", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/rebalancing/" - }, - { - "type": "link", - "label": "Management API", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/management-api/" - }, - { - "type": "link", - "label": "Backups", - "href": "/docs/8.2/self-managed/zeebe-deployment/operations/backups/" - } - ] - }, - - { - "Exporters": [ - { - "type": "link", - "label": "Install Zeebe exporters", - "href": "/docs/8.2/self-managed/zeebe-deployment/exporters/install-zeebe-exporters/" - }, - { - "type": "link", - "label": "Elasticsearch", - "href": "/docs/8.2/self-managed/zeebe-deployment/exporters/elasticsearch-exporter/" - }, - { - "type": "link", - "label": "OpenSearch", - "href": "/docs/8.2/self-managed/zeebe-deployment/exporters/opensearch-exporter/" - } - ] - } - ] - }, - - { - "Operate": [ - { - "type": "link", - "label": "Installation", - "href": "/docs/8.2/self-managed/operate-deployment/install-and-start/" - }, - { - "type": "link", - "label": "Configuration", - "href": "/docs/8.2/self-managed/operate-deployment/operate-configuration/" - }, - { - "type": "link", - "label": "Data retention", - "href": "/docs/8.2/self-managed/operate-deployment/data-retention/" - }, - { - "type": "link", - "label": "Schema and migration", - "href": "/docs/8.2/self-managed/operate-deployment/schema-and-migration/" - }, - { - "type": "link", - "label": "Importer and archiver", - "href": "/docs/8.2/self-managed/operate-deployment/importer-and-archiver/" - }, - { - "type": "link", - "label": "Authentication and authorization", - "href": "/docs/8.2/self-managed/operate-deployment/operate-authentication/" - }, - { - "type": "link", - "label": "Usage metrics", - "href": "/docs/8.2/self-managed/operate-deployment/usage-metrics/" - } - ] - }, - - { - "Tasklist": [ - { - "type": "link", - "label": "Installation", - "href": "/docs/8.2/self-managed/tasklist-deployment/install-and-start/" - }, - { - "type": "link", - "label": "Configuration", - "href": "/docs/8.2/self-managed/tasklist-deployment/tasklist-configuration/" - }, - { - "type": "link", - "label": "Data retention", - "href": "/docs/8.2/self-managed/tasklist-deployment/data-retention/" - }, - { - "type": "link", - "label": "Importer and archiver", - "href": "/docs/8.2/self-managed/tasklist-deployment/importer-and-archiver/" - }, - { - "type": "link", - "label": "Authentication", - "href": "/docs/8.2/self-managed/tasklist-deployment/tasklist-authentication/" - }, - { - "type": "link", - "label": "Usage metrics", - "href": "/docs/8.2/self-managed/tasklist-deployment/usage-metrics/" - } - ] - }, - - { - "Connectors": [ - { - "type": "link", - "label": "Installation", - "href": "/docs/8.2/self-managed/connectors-deployment/install-and-start/" - }, - { - "type": "link", - "label": "Configuration", - "href": "/docs/8.2/self-managed/connectors-deployment/connectors-configuration/" - } - ] - }, - - { - "Optimize": [ - "self-managed/optimize-deployment/install-and-start", - "self-managed/optimize-deployment/version-policy", - { - "Configuration": [ - "self-managed/optimize-deployment/configuration/getting-started", - { - "System configuration": [ - "self-managed/optimize-deployment/configuration/system-configuration", - "self-managed/optimize-deployment/configuration/system-configuration-platform-8", - "self-managed/optimize-deployment/configuration/system-configuration-platform-7", - "self-managed/optimize-deployment/configuration/event-based-process-configuration" - ] - }, - "self-managed/optimize-deployment/configuration/logging", - "self-managed/optimize-deployment/configuration/optimize-license", - "self-managed/optimize-deployment/configuration/security-instructions", - "self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster", - "self-managed/optimize-deployment/configuration/history-cleanup", - "self-managed/optimize-deployment/configuration/localization", - "self-managed/optimize-deployment/configuration/object-variables", - "self-managed/optimize-deployment/configuration/clustering", - "self-managed/optimize-deployment/configuration/webhooks", - "self-managed/optimize-deployment/configuration/authorization-management", - "self-managed/optimize-deployment/configuration/user-management", - "self-managed/optimize-deployment/configuration/multi-tenancy", - "self-managed/optimize-deployment/configuration/multiple-engines", - "self-managed/optimize-deployment/configuration/setup-event-based-processes", - "self-managed/optimize-deployment/configuration/telemetry", - "self-managed/optimize-deployment/configuration/common-problems" - ] - }, - { - "Plugins": [ - "self-managed/optimize-deployment/plugins/plugin-system", - "self-managed/optimize-deployment/plugins/businesskey-import-plugin", - "self-managed/optimize-deployment/plugins/decision-import-plugin", - "self-managed/optimize-deployment/plugins/elasticsearch-header", - "self-managed/optimize-deployment/plugins/engine-rest-filter-plugin", - "self-managed/optimize-deployment/plugins/single-sign-on", - "self-managed/optimize-deployment/plugins/variable-import-plugin" - ] - }, - "self-managed/optimize-deployment/reimport", - { - "Migration & update": [ - "self-managed/optimize-deployment/migration-update/instructions", - "self-managed/optimize-deployment/migration-update/3.9-to-3.10", - "self-managed/optimize-deployment/migration-update/3.9-preview-1-to-3.9", - "self-managed/optimize-deployment/migration-update/3.8-to-3.9-preview-1", - "self-managed/optimize-deployment/migration-update/3.7-to-3.8", - "self-managed/optimize-deployment/migration-update/3.6-to-3.7", - "self-managed/optimize-deployment/migration-update/3.5-to-3.6", - "self-managed/optimize-deployment/migration-update/3.4-to-3.5", - "self-managed/optimize-deployment/migration-update/3.3-to-3.4", - "self-managed/optimize-deployment/migration-update/3.2-to-3.3", - "self-managed/optimize-deployment/migration-update/3.1-to-3.2", - "self-managed/optimize-deployment/migration-update/3.0-to-3.1", - "self-managed/optimize-deployment/migration-update/2.7-to-3.0", - "self-managed/optimize-deployment/migration-update/2.6-to-2.7", - "self-managed/optimize-deployment/migration-update/2.5-to-2.6", - "self-managed/optimize-deployment/migration-update/2.4-to-2.5", - "self-managed/optimize-deployment/migration-update/2.3-to-2.4", - "self-managed/optimize-deployment/migration-update/2.2-to-2.3", - "self-managed/optimize-deployment/migration-update/2.1-to-2.2" - ] - }, - { - "Advanced features": [ - "self-managed/optimize-deployment/advanced-features/engine-data-deletion", - "self-managed/optimize-deployment/advanced-features/import-guide" - ] - } - ] - }, - - { - "Identity": [ - { - "type": "link", - "label": "What is Identity?", - "href": "/docs/8.2/self-managed/identity/what-is-identity/" - }, - { - "type": "link", - "label": "Installation and first steps", - "href": "/docs/8.2/self-managed/identity/getting-started/install-identity/" - }, - - { - "User guide": [ - { - "Configuration": [ - { - "type": "link", - "label": "Making Identity production ready", - "href": "/docs/8.2/self-managed/identity/user-guide/configuration/making-identity-production-ready/" - }, - { - "type": "link", - "label": "Configuring an external identity provider", - "href": "/docs/8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider/" - }, - { - "type": "link", - "label": "Configure logging", - "href": "/docs/8.2/self-managed/identity/user-guide/configuration/configure-logging/" - }, - { - "type": "link", - "label": "Connect to an existing Keycloak instance", - "href": "/docs/8.2/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak/" - } - ] - }, - - { - "Roles": [ - { - "type": "link", - "label": "Add and assign a role", - "href": "/docs/8.2/self-managed/identity/user-guide/roles/add-assign-role/" - }, - { - "type": "link", - "label": "Add and assign a permission", - "href": "/docs/8.2/self-managed/identity/user-guide/roles/add-assign-permission/" - } - ] - }, - - { - "Groups": [ - { - "type": "link", - "label": "Create a group", - "href": "/docs/8.2/self-managed/identity/user-guide/groups/create-group/" - }, - { - "type": "link", - "label": "Assign users and roles to a group", - "href": "/docs/8.2/self-managed/identity/user-guide/groups/assign-users-roles-to-group/" - } - ] - }, - - { - "Authorizations": [ - { - "type": "link", - "label": "Managing resource authorizations", - "href": "/docs/8.2/self-managed/identity/user-guide/authorizations/managing-resource-authorizations/" - }, - { - "type": "link", - "label": "Managing user access", - "href": "/docs/8.2/self-managed/identity/user-guide/authorizations/managing-user-access/" - }, - { - "type": "link", - "label": "Generating machine-to-machine (M2M) tokens", - "href": "/docs/8.2/self-managed/identity/user-guide/authorizations/generating-m2m-tokens/" - } - ] - }, - - { - "Additional features": [ - { - "type": "link", - "label": "Adding an API", - "href": "/docs/8.2/self-managed/identity/user-guide/additional-features/adding-an-api/" - }, - { - "type": "link", - "label": "Incorporate applications", - "href": "/docs/8.2/self-managed/identity/user-guide/additional-features/incorporate-applications/" - } - ] - } - ] - }, - - { - "Deployment": [ - { - "type": "link", - "label": "Configuration variables", - "href": "/docs/8.2/self-managed/identity/deployment/configuration-variables/" - }, - { - "type": "link", - "label": "Application monitoring", - "href": "/docs/8.2/self-managed/identity/deployment/application-monitoring/" - }, - { - "type": "link", - "label": "Starting configuration", - "href": "/docs/8.2/self-managed/identity/deployment/starting-configuration-for-identity/" - } - ] - }, - - { - "Troubleshooting": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/identity/troubleshooting/troubleshoot-identity/" - }, - { - "type": "link", - "label": "Common problems", - "href": "/docs/8.2/self-managed/identity/troubleshooting/common-problems/" - } - ] - } - ] - }, - - { - "Modeler": [ - { - "Web Modeler": [ - { - "type": "link", - "label": "Installation", - "href": "/docs/8.2/self-managed/modeler/web-modeler/installation/" - }, - - { - "Configuration": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/8.2/self-managed/modeler/web-modeler/configuration/" - }, - { - "type": "link", - "label": "Database", - "href": "/docs/8.2/self-managed/modeler/web-modeler/configuration/database/" - }, - { - "type": "link", - "label": "Logging", - "href": "/docs/8.2/self-managed/modeler/web-modeler/configuration/logging/" - } - ] - }, - - { - "Troubleshooting": [ - { - "type": "link", - "label": "Database connection", - "href": "/docs/8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection/" - }, - { - "type": "link", - "label": "Zeebe connection", - "href": "/docs/8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection/" - }, - { - "type": "link", - "label": "Login issues", - "href": "/docs/8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login/" - } - ] - } - ] - }, - - { - "Desktop Modeler": [ - { - "type": "link", - "label": "Deploy diagram", - "href": "/docs/8.2/self-managed/modeler/desktop-modeler/deploy-to-self-managed/" - } - ] - } - ] - }, - - { - "Backup and restore": [ - { - "type": "link", - "label": "Backup and restore", - "href": "/docs/8.2/self-managed/backup-restore/backup-and-restore/" - }, - { - "type": "link", - "label": "Backup and restore Optimize data", - "href": "/docs/8.2/self-managed/backup-restore/optimize-backup/" - }, - { - "type": "link", - "label": "Backup and restore Operate and Tasklist data", - "href": "/docs/8.2/self-managed/backup-restore/operate-tasklist-backup/" - }, - { - "type": "link", - "label": "Backup and restore Zeebe data", - "href": "/docs/8.2/self-managed/backup-restore/zeebe-backup-and-restore/" - }, - { - "type": "link", - "label": "Backup and restore Web Modeler data", - "href": "/docs/8.2/self-managed/backup-restore/modeler-backup-and-restore/" - } - ] - }, - - { - "Troubleshooting": [ - { - "type": "link", - "label": "Log levels", - "href": "/docs/8.2/self-managed/troubleshooting/log-levels/" - } - ] - } - ] -} diff --git a/optimize_versioned_sidebars/version-3.11.0-sidebars.json b/optimize_versioned_sidebars/version-3.11.0-sidebars.json index 48b9370a317..71aafb62345 100644 --- a/optimize_versioned_sidebars/version-3.11.0-sidebars.json +++ b/optimize_versioned_sidebars/version-3.11.0-sidebars.json @@ -1226,66 +1226,6 @@ "type": "link", "label": "Connector templates", "href": "/docs/8.3/components/connectors/custom-built-connectors/connector-templates/" - }, - - { - "Update guide": [ - { - "type": "link", - "label": "Connector SDK updates", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/introduction/" - }, - { - "type": "link", - "label": "Update 0.10 to 0.11", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/0100-to-0110/" - }, - { - "type": "link", - "label": "Update 0.9 to 0.10", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/090-to-0100/" - }, - { - "type": "link", - "label": "Update 0.8 to 0.9", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/080-to-090/" - }, - { - "type": "link", - "label": "Update 0.7 to 0.8", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/070-to-080/" - }, - { - "type": "link", - "label": "Update 0.6 to 0.7", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/060-to-070/" - }, - { - "type": "link", - "label": "Update 0.5 to 0.6", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/050-to-060/" - }, - { - "type": "link", - "label": "Update 0.4 to 0.5", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/040-to-050/" - }, - { - "type": "link", - "label": "Update 0.3 to 0.4", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/030-to-040/" - }, - { - "type": "link", - "label": "Update 0.2 to 0.3", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/020-to-030/" - }, - { - "type": "link", - "label": "Update 0.1 to 0.2", - "href": "/docs/8.3/components/connectors/custom-built-connectors/update-guide/010-to-020/" - } - ] } ] } diff --git a/optimize_versioned_sidebars/version-3.12.0-sidebars.json b/optimize_versioned_sidebars/version-3.12.0-sidebars.json index a221179be75..ec3a8f20036 100644 --- a/optimize_versioned_sidebars/version-3.12.0-sidebars.json +++ b/optimize_versioned_sidebars/version-3.12.0-sidebars.json @@ -1281,66 +1281,6 @@ "type": "link", "label": "Connector templates", "href": "/docs/8.4/components/connectors/custom-built-connectors/connector-templates/" - }, - - { - "Update guide": [ - { - "type": "link", - "label": "Connector SDK updates", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/introduction/" - }, - { - "type": "link", - "label": "Update 0.10 to 0.11", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/0100-to-0110/" - }, - { - "type": "link", - "label": "Update 0.9 to 0.10", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/090-to-0100/" - }, - { - "type": "link", - "label": "Update 0.8 to 0.9", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/080-to-090/" - }, - { - "type": "link", - "label": "Update 0.7 to 0.8", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/070-to-080/" - }, - { - "type": "link", - "label": "Update 0.6 to 0.7", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/060-to-070/" - }, - { - "type": "link", - "label": "Update 0.5 to 0.6", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/050-to-060/" - }, - { - "type": "link", - "label": "Update 0.4 to 0.5", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/040-to-050/" - }, - { - "type": "link", - "label": "Update 0.3 to 0.4", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/030-to-040/" - }, - { - "type": "link", - "label": "Update 0.2 to 0.3", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/020-to-030/" - }, - { - "type": "link", - "label": "Update 0.1 to 0.2", - "href": "/docs/8.4/components/connectors/custom-built-connectors/update-guide/010-to-020/" - } - ] } ] } diff --git a/optimize_versioned_sidebars/version-3.13.0-sidebars.json b/optimize_versioned_sidebars/version-3.13.0-sidebars.json index 1689b524000..34efe61688c 100644 --- a/optimize_versioned_sidebars/version-3.13.0-sidebars.json +++ b/optimize_versioned_sidebars/version-3.13.0-sidebars.json @@ -1332,66 +1332,6 @@ "type": "link", "label": "Connector templates", "href": "/docs/8.5/components/connectors/custom-built-connectors/connector-templates/" - }, - - { - "Update guide": [ - { - "type": "link", - "label": "Connector SDK updates", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/introduction/" - }, - { - "type": "link", - "label": "Update 0.10 to 0.11", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/0100-to-0110/" - }, - { - "type": "link", - "label": "Update 0.9 to 0.10", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/090-to-0100/" - }, - { - "type": "link", - "label": "Update 0.8 to 0.9", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/080-to-090/" - }, - { - "type": "link", - "label": "Update 0.7 to 0.8", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/070-to-080/" - }, - { - "type": "link", - "label": "Update 0.6 to 0.7", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/060-to-070/" - }, - { - "type": "link", - "label": "Update 0.5 to 0.6", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/050-to-060/" - }, - { - "type": "link", - "label": "Update 0.4 to 0.5", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/040-to-050/" - }, - { - "type": "link", - "label": "Update 0.3 to 0.4", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/030-to-040/" - }, - { - "type": "link", - "label": "Update 0.2 to 0.3", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/020-to-030/" - }, - { - "type": "link", - "label": "Update 0.1 to 0.2", - "href": "/docs/8.5/components/connectors/custom-built-connectors/update-guide/010-to-020/" - } - ] } ] } diff --git a/optimize_versioned_sidebars/version-3.14.0-sidebars.json b/optimize_versioned_sidebars/version-3.14.0-sidebars.json index 83b60563c18..638ba5e0288 100644 --- a/optimize_versioned_sidebars/version-3.14.0-sidebars.json +++ b/optimize_versioned_sidebars/version-3.14.0-sidebars.json @@ -1360,65 +1360,6 @@ "type": "link", "label": "Connector templates", "href": "/docs/components/connectors/custom-built-connectors/connector-templates/" - }, - { - "Update guide": [ - { - "type": "link", - "label": "Connector SDK updates", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/introduction/" - }, - { - "type": "link", - "label": "Update 0.10 to 0.11", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/0100-to-0110/" - }, - { - "type": "link", - "label": "Update 0.9 to 0.10", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/090-to-0100/" - }, - { - "type": "link", - "label": "Update 0.8 to 0.9", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/080-to-090/" - }, - { - "type": "link", - "label": "Update 0.7 to 0.8", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/070-to-080/" - }, - { - "type": "link", - "label": "Update 0.6 to 0.7", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/060-to-070/" - }, - { - "type": "link", - "label": "Update 0.5 to 0.6", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/050-to-060/" - }, - { - "type": "link", - "label": "Update 0.4 to 0.5", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/040-to-050/" - }, - { - "type": "link", - "label": "Update 0.3 to 0.4", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/030-to-040/" - }, - { - "type": "link", - "label": "Update 0.2 to 0.3", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/020-to-030/" - }, - { - "type": "link", - "label": "Update 0.1 to 0.2", - "href": "/docs/components/connectors/custom-built-connectors/update-guide/010-to-020/" - } - ] } ] } @@ -2485,11 +2426,6 @@ ] } ] - }, - { - "type": "link", - "label": "Migrate to Zeebe user tasks", - "href": "/docs/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks/" } ] }, diff --git a/optimize_versioned_sidebars/version-3.7.0-sidebars.json b/optimize_versioned_sidebars/version-3.7.0-sidebars.json deleted file mode 100644 index 9e287c53fa7..00000000000 --- a/optimize_versioned_sidebars/version-3.7.0-sidebars.json +++ /dev/null @@ -1,1260 +0,0 @@ -{ - "Components": [ - { - "type": "link", - "label": "Overview Components", - "href": "/docs/1.3/components/" - }, - - { - "Concepts": [ - { - "type": "link", - "label": "What is Camunda Cloud?", - "href": "/docs/1.3/components/concepts/what-is-camunda-cloud/" - }, - { - "type": "link", - "label": "Processes", - "href": "/docs/1.3/components/concepts/processes/" - }, - { - "type": "link", - "label": "Job workers", - "href": "/docs/1.3/components/concepts/job-workers/" - }, - { - "type": "link", - "label": "Process instance creation", - "href": "/docs/1.3/components/concepts/process-instance-creation/" - }, - { - "type": "link", - "label": "Messages", - "href": "/docs/1.3/components/concepts/messages/" - }, - { - "type": "link", - "label": "Incidents", - "href": "/docs/1.3/components/concepts/incidents/" - }, - { - "type": "link", - "label": "Variables", - "href": "/docs/1.3/components/concepts/variables/" - }, - { - "type": "link", - "label": "Expressions", - "href": "/docs/1.3/components/concepts/expressions/" - } - ] - }, - - { - "Cloud Console": [ - { - "type": "link", - "label": "Introduction to Cloud Console", - "href": "/docs/1.3/components/cloud-console/introduction/" - }, - - { - "Manage your organization": [ - { - "type": "link", - "label": "Organization management", - "href": "/docs/1.3/components/cloud-console/manage-organization/organization-settings/" - }, - { - "type": "link", - "label": "Manage users of your organization", - "href": "/docs/1.3/components/cloud-console/manage-organization/manage-users/" - }, - { - "type": "link", - "label": "View organization activity", - "href": "/docs/1.3/components/cloud-console/manage-organization/view-organization-activity/" - }, - { - "type": "link", - "label": "Usage history", - "href": "/docs/1.3/components/cloud-console/manage-organization/usage-history/" - }, - { - "type": "link", - "label": "Update billing reservations", - "href": "/docs/1.3/components/cloud-console/manage-organization/update-billing-reservations/" - }, - { - "type": "link", - "label": "Switch organization", - "href": "/docs/1.3/components/cloud-console/manage-organization/switch-organization/" - } - ] - }, - - { - "Manage clusters": [ - { - "type": "link", - "label": "Create a cluster", - "href": "/docs/1.3/components/cloud-console/manage-clusters/create-cluster/" - }, - { - "type": "link", - "label": "Rename your cluster", - "href": "/docs/1.3/components/cloud-console/manage-clusters/rename-cluster/" - }, - { - "type": "link", - "label": "Delete your cluster", - "href": "/docs/1.3/components/cloud-console/manage-clusters/delete-cluster/" - }, - { - "type": "link", - "label": "Manage API clients", - "href": "/docs/1.3/components/cloud-console/manage-clusters/manage-api-clients/" - }, - { - "type": "link", - "label": "Manage alerts", - "href": "/docs/1.3/components/cloud-console/manage-clusters/manage-alerts/" - }, - { - "type": "link", - "label": "Manage IP Whitelists", - "href": "/docs/1.3/components/cloud-console/manage-clusters/manage-ip-whitelists/" - } - ] - }, - - { - "Manage your plan": [ - { - "type": "link", - "label": "Available plans", - "href": "/docs/1.3/components/cloud-console/manage-plan/available-plans/" - }, - { - "type": "link", - "label": "Upgrade to a Professional Plan", - "href": "/docs/1.3/components/cloud-console/manage-plan/upgrade-to-professional-plan/" - } - ] - }, - - { - "Troubleshooting": [ - { - "type": "link", - "label": "Common pitfalls", - "href": "/docs/1.3/components/cloud-console/troubleshooting/common-pitfalls/" - }, - { - "type": "link", - "label": "Feedback and support", - "href": "/docs/1.3/components/cloud-console/troubleshooting/feedback-and-support/" - } - ] - } - ] - }, - - { - "Modeler": [ - { - "type": "link", - "label": "About Modeler", - "href": "/docs/1.3/components/modeler/about-modeler/" - }, - - { - "Web Modeler": [ - { - "type": "link", - "label": "New Web Modeler", - "href": "/docs/1.3/components/modeler/web-modeler/new-web-modeler/" - }, - { - "type": "link", - "label": "Launch Web Modeler", - "href": "/docs/1.3/components/modeler/web-modeler/launch-web-modeler/" - }, - { - "type": "link", - "label": "Model your first diagram", - "href": "/docs/1.3/components/modeler/web-modeler/model-your-first-diagram/" - }, - { - "type": "link", - "label": "Import diagram", - "href": "/docs/1.3/components/modeler/web-modeler/import-diagram/" - }, - { - "type": "link", - "label": "Save and deploy your diagram", - "href": "/docs/1.3/components/modeler/web-modeler/save-and-deploy/" - }, - { - "type": "link", - "label": "Start a new process instance", - "href": "/docs/1.3/components/modeler/web-modeler/start-instance/" - }, - { - "type": "link", - "label": "Collaboration", - "href": "/docs/1.3/components/modeler/web-modeler/collaboration/" - }, - { - "type": "link", - "label": "Milestones", - "href": "/docs/1.3/components/modeler/web-modeler/milestones/" - }, - { - "type": "link", - "label": "Token simulation", - "href": "/docs/1.3/components/modeler/web-modeler/token-simulation/" - } - ] - }, - - { - "Desktop Modeler": [ - { - "type": "link", - "label": "Install the Modeler", - "href": "/docs/1.3/components/modeler/desktop-modeler/install-the-modeler/" - }, - { - "type": "link", - "label": "Model your first diagram", - "href": "/docs/1.3/components/modeler/desktop-modeler/model-your-first-diagram/" - }, - { - "type": "link", - "label": "Connect to Camunda Cloud", - "href": "/docs/1.3/components/modeler/desktop-modeler/connect-to-camunda-cloud/" - }, - { - "type": "link", - "label": "Start a new process instance", - "href": "/docs/1.3/components/modeler/desktop-modeler/start-instance/" - }, - - { - "Element templates": [ - { - "type": "link", - "label": "About element templates", - "href": "/docs/1.3/components/modeler/desktop-modeler/element-templates/about-templates/" - }, - { - "type": "link", - "label": "Configuring templates", - "href": "/docs/1.3/components/modeler/desktop-modeler/element-templates/configuring-templates/" - }, - { - "type": "link", - "label": "Using templates", - "href": "/docs/1.3/components/modeler/desktop-modeler/element-templates/using-templates/" - }, - { - "type": "link", - "label": "Defining templates", - "href": "/docs/1.3/components/modeler/desktop-modeler/element-templates/defining-templates/" - }, - { - "type": "link", - "label": "Additional resources", - "href": "/docs/1.3/components/modeler/desktop-modeler/element-templates/additional-resources/" - } - ] - }, - - { - "Additional configuration": [ - { - "type": "link", - "label": "Flags", - "href": "/docs/1.3/components/modeler/desktop-modeler/flags/" - }, - { - "type": "link", - "label": "Plugins", - "href": "/docs/1.3/components/modeler/desktop-modeler/plugins/" - }, - { - "type": "link", - "label": "Search paths", - "href": "/docs/1.3/components/modeler/desktop-modeler/search-paths/" - }, - { - "type": "link", - "label": "Telemetry", - "href": "/docs/1.3/components/modeler/desktop-modeler/telemetry/" - } - ] - } - ] - }, - - { - "BPMN": [ - { - "type": "link", - "label": "BPMN in Modeler", - "href": "/docs/1.3/components/modeler/bpmn/" - }, - { - "type": "link", - "label": "BPMN primer", - "href": "/docs/1.3/components/modeler/bpmn/bpmn-primer/" - }, - { - "type": "link", - "label": "BPMN coverage", - "href": "/docs/1.3/components/modeler/bpmn/bpmn-coverage/" - }, - { - "type": "link", - "label": "Data flow", - "href": "/docs/1.3/components/modeler/bpmn/data-flow/" - }, - - { - "Tasks": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/modeler/bpmn/tasks/" - }, - { - "type": "link", - "label": "Service tasks", - "href": "/docs/1.3/components/modeler/bpmn/service-tasks/" - }, - { - "type": "link", - "label": "User tasks", - "href": "/docs/1.3/components/modeler/bpmn/user-tasks/" - }, - { - "type": "link", - "label": "Receive tasks", - "href": "/docs/1.3/components/modeler/bpmn/receive-tasks/" - }, - { - "type": "link", - "label": "Business rule tasks", - "href": "/docs/1.3/components/modeler/bpmn/business-rule-tasks/" - }, - { - "type": "link", - "label": "Script tasks", - "href": "/docs/1.3/components/modeler/bpmn/script-tasks/" - }, - { - "type": "link", - "label": "Send tasks", - "href": "/docs/1.3/components/modeler/bpmn/send-tasks/" - }, - { - "type": "link", - "label": "Manual tasks", - "href": "/docs/1.3/components/modeler/bpmn/manual-tasks/" - } - ] - }, - - { - "Gateways": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/modeler/bpmn/gateways/" - }, - { - "type": "link", - "label": "Exclusive gateway", - "href": "/docs/1.3/components/modeler/bpmn/exclusive-gateways/" - }, - { - "type": "link", - "label": "Parallel gateway", - "href": "/docs/1.3/components/modeler/bpmn/parallel-gateways/" - }, - { - "type": "link", - "label": "Event-based gateway", - "href": "/docs/1.3/components/modeler/bpmn/event-based-gateways/" - } - ] - }, - - { - "Events": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/modeler/bpmn/events/" - }, - { - "type": "link", - "label": "None events", - "href": "/docs/1.3/components/modeler/bpmn/none-events/" - }, - { - "type": "link", - "label": "Message events", - "href": "/docs/1.3/components/modeler/bpmn/message-events/" - }, - { - "type": "link", - "label": "Timer events", - "href": "/docs/1.3/components/modeler/bpmn/timer-events/" - }, - { - "type": "link", - "label": "Error events", - "href": "/docs/1.3/components/modeler/bpmn/error-events/" - } - ] - }, - - { - "Subprocesses": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/modeler/bpmn/subprocesses/" - }, - { - "type": "link", - "label": "Embedded subprocess", - "href": "/docs/1.3/components/modeler/bpmn/embedded-subprocesses/" - }, - { - "type": "link", - "label": "Call activities", - "href": "/docs/1.3/components/modeler/bpmn/call-activities/" - }, - { - "type": "link", - "label": "Event subprocess", - "href": "/docs/1.3/components/modeler/bpmn/event-subprocesses/" - } - ] - }, - - { - "Markers": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/modeler/bpmn/markers/" - }, - { - "type": "link", - "label": "Multi-Instance", - "href": "/docs/1.3/components/modeler/bpmn/multi-instance/" - } - ] - } - ] - }, - - { - "DMN": [ - { - "type": "link", - "label": "Editing DMN in Desktop Modeler", - "href": "/docs/1.3/components/modeler/dmn/" - } - ] - }, - - { - "Forms": [ - { - "type": "link", - "label": "Camunda Forms Reference", - "href": "/docs/1.3/components/modeler/forms/camunda-forms-reference/" - } - ] - } - ] - }, - - { - "Zeebe": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/1.3/components/zeebe/zeebe-overview/" - }, - - { - "Technical concepts": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/zeebe/technical-concepts/" - }, - { - "type": "link", - "label": "Architecture", - "href": "/docs/1.3/components/zeebe/technical-concepts/architecture/" - }, - { - "type": "link", - "label": "Clustering", - "href": "/docs/1.3/components/zeebe/technical-concepts/clustering/" - }, - { - "type": "link", - "label": "Partitions", - "href": "/docs/1.3/components/zeebe/technical-concepts/partitions/" - }, - { - "type": "link", - "label": "Internal processing", - "href": "/docs/1.3/components/zeebe/technical-concepts/internal-processing/" - }, - { - "type": "link", - "label": "Process lifecycles", - "href": "/docs/1.3/components/zeebe/technical-concepts/process-lifecycles/" - }, - { - "type": "link", - "label": "Protocols", - "href": "/docs/1.3/components/zeebe/technical-concepts/protocols/" - }, - { - "type": "link", - "label": "Exporters", - "href": "/docs/1.3/components/zeebe/technical-concepts/exporters/" - } - ] - }, - - { - "Open Source community": [ - { - "type": "link", - "label": "Community contributions", - "href": "/docs/1.3/components/zeebe/open-source/community-contributions/" - }, - { - "type": "link", - "label": "Get help and get involved", - "href": "/docs/1.3/components/zeebe/open-source/get-help-get-involved/" - } - ] - } - ] - }, - - { - "Operate": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/1.3/components/operate/" - }, - - { - "User guide": [ - { - "type": "link", - "label": "Getting familiar with Operate", - "href": "/docs/1.3/components/operate/userguide/basic-operate-navigation/" - }, - { - "type": "link", - "label": "Variables and incidents", - "href": "/docs/1.3/components/operate/userguide/resolve-incidents-update-variables/" - }, - { - "type": "link", - "label": "Selections and operations", - "href": "/docs/1.3/components/operate/userguide/selections-operations/" - }, - { - "type": "link", - "label": "Delete finished instances", - "href": "/docs/1.3/components/operate/userguide/delete-finished-instances/" - }, - { - "type": "link", - "label": "Giving feedback and asking questions", - "href": "/docs/1.3/components/operate/userguide/operate-feedback-and-questions/" - } - ] - } - ] - }, - - { - "Optimize": [ - "components/what-is-optimize", - { - "User guide": [ - "components/userguide/collections-dashboards-reports", - "components/userguide/data-sources", - "components/userguide/creating-dashboards", - "components/userguide/creating-reports", - "components/userguide/combined-reports", - { - "Process analysis": [ - "components/userguide/process-analysis/overview", - "components/userguide/process-analysis/outlier-analysis", - "components/userguide/process-analysis/branch-analysis", - { - "Report analysis": [ - "components/userguide/process-analysis/report-analysis/overview", - "components/userguide/process-analysis/report-analysis/edit-mode", - "components/userguide/process-analysis/report-analysis/view-mode" - ] - } - ] - }, - { - "Decision analysis": [ - "components/userguide/decision-analysis/overview", - "components/userguide/decision-analysis/decision-report", - "components/userguide/decision-analysis/decision-filter" - ] - }, - { - "Additional features": [ - "components/userguide/additional-features/alerts", - "components/userguide/additional-features/event-based-processes", - "components/userguide/additional-features/export-import", - "components/userguide/additional-features/filters", - "components/userguide/additional-features/footer", - "components/userguide/additional-features/variable-labeling" - ] - } - ] - } - ] - }, - - { - "Tasklist": [ - { - "type": "link", - "label": "Introduction", - "href": "/docs/1.3/components/tasklist/introduction/" - }, - - { - "User guide": [ - { - "API mode": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/tasklist/userguide/api/overview/" - }, - { - "type": "link", - "label": "Tutorial", - "href": "/docs/1.3/components/tasklist/userguide/api/tutorial/" - } - ] - }, - - { - "User interface mode": [ - { - "type": "link", - "label": "Overview and example use case", - "href": "/docs/1.3/components/tasklist/userguide/user-interface/overview/" - } - ] - } - ] - } - ] - }, - - { - "Best Practices": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/components/best-practices/overview/" - }, - - { - "Project Management": [ - { - "type": "link", - "label": "Following the Customer Success Path", - "href": "/docs/1.3/components/best-practices/management/following-the-customer-success-path/" - }, - { - "type": "link", - "label": "Doing a proper POC", - "href": "/docs/1.3/components/best-practices/management/doing-a-proper-poc/" - } - ] - }, - - { - "Architecture": [ - { - "type": "link", - "label": "Deciding about your stack", - "href": "/docs/1.3/components/best-practices/architecture/deciding-about-your-stack/" - }, - { - "type": "link", - "label": "Sizing your environment", - "href": "/docs/1.3/components/best-practices/architecture/sizing-your-environment/" - }, - { - "type": "link", - "label": "Understanding human task management", - "href": "/docs/1.3/components/best-practices/architecture/understanding-human-tasks-management/" - } - ] - }, - - { - "Development": [ - { - "type": "link", - "label": "Connecting the workflow engine with your world", - "href": "/docs/1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world/" - }, - { - "type": "link", - "label": "Service integration patterns with BPMN", - "href": "/docs/1.3/components/best-practices/development/service-integration-patterns/" - }, - { - "type": "link", - "label": "Writing good workers", - "href": "/docs/1.3/components/best-practices/development/writing-good-workers/" - }, - { - "type": "link", - "label": "Dealing with problems and exceptions", - "href": "/docs/1.3/components/best-practices/development/dealing-with-problems-and-exceptions/" - }, - { - "type": "link", - "label": "Handling data in processes", - "href": "/docs/1.3/components/best-practices/development/handling-data-in-processes/" - }, - { - "type": "link", - "label": "Routing events to processes", - "href": "/docs/1.3/components/best-practices/development/routing-events-to-processes/" - }, - { - "type": "link", - "label": "Testing process definitions", - "href": "/docs/1.3/components/best-practices/development/testing-process-definitions/" - } - ] - }, - - { - "Modeling": [ - { - "type": "link", - "label": "Creating readable process models", - "href": "/docs/1.3/components/best-practices/modeling/creating-readable-process-models/" - }, - { - "type": "link", - "label": "Naming BPMN elements", - "href": "/docs/1.3/components/best-practices/modeling/naming-bpmn-elements/" - }, - { - "type": "link", - "label": "Naming technically relevant IDs", - "href": "/docs/1.3/components/best-practices/modeling/naming-technically-relevant-ids/" - }, - { - "type": "link", - "label": "Modeling beyond the happy path", - "href": "/docs/1.3/components/best-practices/modeling/modeling-beyond-the-happy-path/" - }, - { - "type": "link", - "label": "Modeling with situation patterns", - "href": "/docs/1.3/components/best-practices/modeling/modeling-with-situation-patterns/" - }, - { - "type": "link", - "label": "Building flexibility into BPMN models", - "href": "/docs/1.3/components/best-practices/modeling/building-flexibility-into-bpmn-models/" - }, - { - "type": "link", - "label": "Choosing the DMN Hit Policy", - "href": "/docs/1.3/components/best-practices/modeling/choosing-the-dmn-hit-policy/" - } - ] - }, - - { - "Operations": [ - { - "type": "link", - "label": "Versioning process definitions", - "href": "/docs/1.3/components/best-practices/operations/versioning-process-definitions/" - }, - { - "type": "link", - "label": "Reporting about processes", - "href": "/docs/1.3/components/best-practices/operations/reporting-about-processes/" - } - ] - }, - - { - "Camunda 7 specific": [ - { - "type": "link", - "label": "Deciding about your Camunda Platform 7 stack", - "href": "/docs/1.3/components/best-practices/architecture/deciding-about-your-stack-c7/" - }, - { - "type": "link", - "label": "Sizing Your Camunda Platform 7 Environment", - "href": "/docs/1.3/components/best-practices/architecture/sizing-your-environment-c7/" - }, - { - "type": "link", - "label": "Invoking services from a Camunda 7 process", - "href": "/docs/1.3/components/best-practices/development/invoking-services-from-the-process-c7/" - }, - { - "type": "link", - "label": "Understanding Camunda 7 transaction handling", - "href": "/docs/1.3/components/best-practices/development/understanding-transaction-handling-c7/" - }, - { - "type": "link", - "label": "Operating Camunda 7", - "href": "/docs/1.3/components/best-practices/operations/operating-camunda-c7/" - }, - { - "type": "link", - "label": "Performance tuning Camunda 7", - "href": "/docs/1.3/components/best-practices/operations/performance-tuning-camunda-c7/" - }, - { - "type": "link", - "label": "Securing Camunda 7", - "href": "/docs/1.3/components/best-practices/operations/securing-camunda-c7/" - }, - { - "type": "link", - "label": "Extending human task management in Camunda Platform 7", - "href": "/docs/1.3/components/best-practices/architecture/extending-human-task-management-c7/" - } - ] - } - ] - } - ], - "Self-Managed": [ - { - "type": "link", - "label": "Camunda Cloud Self-Managed", - "href": "/docs/1.3/self-managed/about-self-managed/" - }, - - { - "Zeebe": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/self-managed/zeebe-deployment/" - }, - - { - "Local installation": [ - { - "type": "link", - "label": "Install", - "href": "/docs/1.3/self-managed/zeebe-deployment/local/install/" - }, - { - "type": "link", - "label": "Quickstart", - "href": "/docs/1.3/self-managed/zeebe-deployment/local/quickstart/" - } - ] - }, - - { - "type": "link", - "label": "Docker container", - "href": "/docs/1.3/self-managed/zeebe-deployment/docker/install/" - }, - - { - "Kubernetes deployment": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/self-managed/zeebe-deployment/kubernetes/" - }, - { - "type": "link", - "label": "Camunda Cloud Helm charts", - "href": "/docs/1.3/self-managed/zeebe-deployment/kubernetes/helm/installing-helm/" - }, - { - "type": "link", - "label": "Accessing Operate and Tasklist outside the cluster", - "href": "/docs/1.3/self-managed/zeebe-deployment/kubernetes/helm/accessing-operate-tasklist/" - } - ] - }, - - { - "Configuration": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/self-managed/zeebe-deployment/configuration/" - }, - { - "type": "link", - "label": "Logging", - "href": "/docs/1.3/self-managed/zeebe-deployment/configuration/logging/" - }, - { - "type": "link", - "label": "Gateway health probes", - "href": "/docs/1.3/self-managed/zeebe-deployment/configuration/gateway-health-probes/" - }, - { - "type": "link", - "label": "Environment variables", - "href": "/docs/1.3/self-managed/zeebe-deployment/configuration/environment-variables/" - }, - { - "type": "link", - "label": "Fixed partitioning", - "href": "/docs/1.3/self-managed/zeebe-deployment/configuration/fixed-partitioning/" - }, - { - "type": "link", - "label": "Priority election", - "href": "/docs/1.3/self-managed/zeebe-deployment/configuration/priority-election/" - } - ] - }, - - { - "Security": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/self-managed/zeebe-deployment/security/" - }, - { - "type": "link", - "label": "Secure client communication", - "href": "/docs/1.3/self-managed/zeebe-deployment/security/secure-client-communication/" - }, - { - "type": "link", - "label": "Client authorization", - "href": "/docs/1.3/self-managed/zeebe-deployment/security/client-authorization/" - }, - { - "type": "link", - "label": "Secure cluster communication", - "href": "/docs/1.3/self-managed/zeebe-deployment/security/secure-cluster-communication/" - } - ] - }, - - { - "Operation": [ - { - "type": "link", - "label": "Overview", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/" - }, - { - "type": "link", - "label": "Resource planning", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/resource-planning/" - }, - { - "type": "link", - "label": "Network ports", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/network-ports/" - }, - { - "type": "link", - "label": "Setting up a Zeebe cluster", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/setting-up-a-cluster/" - }, - { - "type": "link", - "label": "Metrics", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/metrics/" - }, - { - "type": "link", - "label": "Health status", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/health/" - }, - { - "type": "link", - "label": "Backpressure", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/backpressure/" - }, - { - "type": "link", - "label": "Disk space", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/disk-space/" - }, - { - "type": "link", - "label": "Update Zeebe", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/update-zeebe/" - }, - { - "type": "link", - "label": "Rebalancing", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/rebalancing/" - }, - { - "type": "link", - "label": "Backups", - "href": "/docs/1.3/self-managed/zeebe-deployment/operations/backups/" - } - ] - } - ] - }, - - { - "Operate": [ - { - "type": "link", - "label": "Install and start Operate", - "href": "/docs/1.3/self-managed/operate-deployment/install-and-start/" - }, - { - "type": "link", - "label": "Configuration", - "href": "/docs/1.3/self-managed/operate-deployment/configuration/" - }, - { - "type": "link", - "label": "Data retention", - "href": "/docs/1.3/self-managed/operate-deployment/data-retention/" - }, - { - "type": "link", - "label": "Schema and migration", - "href": "/docs/1.3/self-managed/operate-deployment/schema-and-migration/" - }, - { - "type": "link", - "label": "Importer and archiver", - "href": "/docs/1.3/self-managed/operate-deployment/importer-and-archiver/" - }, - { - "type": "link", - "label": "Authentication", - "href": "/docs/1.3/self-managed/operate-deployment/authentication/" - }, - { - "type": "link", - "label": "Usage metrics", - "href": "/docs/1.3/self-managed/operate-deployment/usage-metrics/" - } - ] - }, - - { - "Tasklist": [ - { - "type": "link", - "label": "Install and start Tasklist", - "href": "/docs/1.3/self-managed/tasklist-deployment/install-and-start/" - }, - { - "type": "link", - "label": "Configuration", - "href": "/docs/1.3/self-managed/tasklist-deployment/configuration/" - }, - { - "type": "link", - "label": "Authentication", - "href": "/docs/1.3/self-managed/tasklist-deployment/authentication/" - }, - { - "type": "link", - "label": "Usage metrics", - "href": "/docs/1.3/self-managed/tasklist-deployment/usage-metrics/" - } - ] - }, - - { - "Optimize": [ - { - "Setup": [ - "self-managed/optimize-deployment/setup/installation", - "self-managed/optimize-deployment/setup/optimize-license", - "self-managed/optimize-deployment/setup/security-instructions", - "self-managed/optimize-deployment/setup/configuration", - "self-managed/optimize-deployment/setup/user-management", - "self-managed/optimize-deployment/setup/authorization-management", - "self-managed/optimize-deployment/setup/secure-elasticsearch", - "self-managed/optimize-deployment/setup/shared-elasticsearch-cluster", - "self-managed/optimize-deployment/setup/history-cleanup", - "self-managed/optimize-deployment/setup/localization", - "self-managed/optimize-deployment/setup/multi-tenancy", - "self-managed/optimize-deployment/setup/multiple-engines", - "self-managed/optimize-deployment/setup/object-variables", - "self-managed/optimize-deployment/setup/clustering", - "self-managed/optimize-deployment/setup/webhooks", - "self-managed/optimize-deployment/setup/setup-event-based-processes", - "self-managed/optimize-deployment/setup/telemetry", - "self-managed/optimize-deployment/setup/common-problems" - ] - }, - "self-managed/optimize-deployment/setup", - { - "Plugins": [ - "self-managed/optimize-deployment/plugins/plugin-system", - "self-managed/optimize-deployment/plugins/businesskey-import-plugin", - "self-managed/optimize-deployment/plugins/decision-import-plugin", - "self-managed/optimize-deployment/plugins/elasticsearch-header", - "self-managed/optimize-deployment/plugins/engine-rest-filter-plugin", - "self-managed/optimize-deployment/plugins/single-sign-on", - "self-managed/optimize-deployment/plugins/variable-import-plugin" - ] - }, - { - "REST API": [ - "self-managed/optimize-deployment/rest-api/authorization", - { - "Dashboard": [ - "self-managed/optimize-deployment/rest-api/dashboard/get-dashboard-ids", - "self-managed/optimize-deployment/rest-api/dashboard/delete-dashboard", - "self-managed/optimize-deployment/rest-api/dashboard/export-dashboard-definitions" - ] - }, - { - "Report": [ - "self-managed/optimize-deployment/rest-api/report/get-report-ids", - "self-managed/optimize-deployment/rest-api/report/delete-report", - "self-managed/optimize-deployment/rest-api/report/export-report-definitions", - "self-managed/optimize-deployment/rest-api/report/get-data-export" - ] - }, - "self-managed/optimize-deployment/rest-api/event-ingestion", - "self-managed/optimize-deployment/rest-api/external-variable-ingestion", - "self-managed/optimize-deployment/rest-api/health-readiness", - "self-managed/optimize-deployment/rest-api/import-entities" - ] - }, - "self-managed/optimize-deployment/reimport", - { - "Migration & Update": [ - "self-managed/optimize-deployment/migration-update/instructions", - "self-managed/optimize-deployment/migration-update/3.6-to-3.7", - "self-managed/optimize-deployment/migration-update/3.5-to-3.6", - "self-managed/optimize-deployment/migration-update/3.4-to-3.5", - "self-managed/optimize-deployment/migration-update/3.3-to-3.4", - "self-managed/optimize-deployment/migration-update/3.2-to-3.3", - "self-managed/optimize-deployment/migration-update/3.1-to-3.2", - "self-managed/optimize-deployment/migration-update/3.0-to-3.1", - "self-managed/optimize-deployment/migration-update/2.7-to-3.0", - "self-managed/optimize-deployment/migration-update/2.6-to-2.7", - "self-managed/optimize-deployment/migration-update/2.5-to-2.6", - "self-managed/optimize-deployment/migration-update/2.4-to-2.5", - "self-managed/optimize-deployment/migration-update/2.3-to-2.4", - "self-managed/optimize-deployment/migration-update/2.2-to-2.3", - "self-managed/optimize-deployment/migration-update/2.1-to-2.2" - ] - }, - { - "Optimize Explained": [ - "self-managed/optimize-deployment/optimize-explained/engine-data-deletion", - "self-managed/optimize-deployment/optimize-explained/import-guide" - ] - } - ] - }, - - { - "IAM": [ - { - "type": "link", - "label": "What is IAM?", - "href": "/docs/1.3/self-managed/iam/what-is-iam/" - }, - - { - "Getting started": [ - { - "Running IAM with Docker": [ - { - "type": "link", - "label": "Step 1: Setup environment", - "href": "/docs/1.3/self-managed/iam/getting-started/docker/setup-environment/" - }, - { - "type": "link", - "label": "Step 2: Start IAM", - "href": "/docs/1.3/self-managed/iam/getting-started/docker/start-iam/" - }, - { - "type": "link", - "label": "Step 3: Accessing the UI", - "href": "/docs/1.3/self-managed/iam/getting-started/docker/accessing-the-ui/" - } - ] - } - ] - }, - - { - "Deployment": [ - { - "type": "link", - "label": "Configuration variables", - "href": "/docs/1.3/self-managed/iam/deployment/configuration-variables/" - }, - { - "type": "link", - "label": "Making IAM production-ready", - "href": "/docs/1.3/self-managed/iam/deployment/making-iam-production-ready/" - } - ] - } - ] - }, - - { - "Troubleshooting": [ - { - "type": "link", - "label": "Log levels", - "href": "/docs/1.3/self-managed/troubleshooting/log-levels/" - } - ] - } - ] -} diff --git a/optimize_versions.json b/optimize_versions.json index 4aeeb0e8139..ac645f524f1 100644 --- a/optimize_versions.json +++ b/optimize_versions.json @@ -1 +1 @@ -["3.14.0", "3.13.0", "3.12.0", "3.11.0", "3.10.0", "3.7.0"] +["3.14.0", "3.13.0", "3.12.0", "3.11.0"] diff --git a/package-lock.json b/package-lock.json index 845585d0c06..74a3fb1512f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,48 +1,47 @@ { "name": "camunda-cloud-documentation", "version": "0.0.0", - "lockfileVersion": 2, + "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "camunda-cloud-documentation", "version": "0.0.0", "dependencies": { - "@auth0/auth0-react": "^2.2.1", - "@bpmn-io/form-js": "^1.7.3", + "@auth0/auth0-react": "^2.2.4", + "@bpmn-io/form-js": "^1.12.0", "@docusaurus/core": "^2.4.1", "@docusaurus/preset-classic": "^2.4.1", "@docusaurus/theme-mermaid": "^2.4.1", "@mdx-js/react": "^1.6.22", "@saucelabs/theme-github-codeblock": "^0.2.3", - "clsx": "^1.2.1", + "clsx": "^2.1.1", "docusaurus": "^1.14.7", "docusaurus-plugin-openapi-docs": "^2.0.4", "docusaurus-theme-openapi-docs": "^2.0.4", - "mixpanel-browser": "^2.47.0", + "mixpanel-browser": "^2.56.0", "pushfeedback-react": "^0.1.30", "react": "^17.0.2", "react-dom": "^17.0.2", - "react-player": "^2.11.0", + "react-player": "^2.16.0", "unist-util-visit": "^5.0.0" }, "devDependencies": { - "@playwright/test": "^1.32.2", - "@swc/core": "^1.3.49", - "@types/jest": "^29.5.4", + "@playwright/test": "^1.49.0", + "@swc/core": "^1.9.3", + "@types/jest": "^29.5.14", "husky": "^8.0.3", - "jest": "^29.6.4", + "jest": "^29.7.0", "lint-staged": "^14.0.1", - "playwright": "^1.32.2", - "prettier": "3.0.0", - "replace-in-file": "^7.1.0", + "playwright": "^1.49.0", + "prettier": "3.3.3", + "replace-in-file": "^7.2.0", "swc-loader": "^0.2.3" } }, "node_modules/@algolia/autocomplete-core": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", - "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", + "license": "MIT", "dependencies": { "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", "@algolia/autocomplete-shared": "1.9.3" @@ -50,8 +49,7 @@ }, "node_modules/@algolia/autocomplete-plugin-algolia-insights": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", - "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", + "license": "MIT", "dependencies": { "@algolia/autocomplete-shared": "1.9.3" }, @@ -61,8 +59,7 @@ }, "node_modules/@algolia/autocomplete-preset-algolia": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", - "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", + "license": "MIT", "dependencies": { "@algolia/autocomplete-shared": "1.9.3" }, @@ -73,8 +70,7 @@ }, "node_modules/@algolia/autocomplete-shared": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", - "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", + "license": "MIT", "peerDependencies": { "@algolia/client-search": ">= 4.9.1 < 6", "algoliasearch": ">= 4.9.1 < 6" @@ -82,29 +78,25 @@ }, "node_modules/@algolia/cache-browser-local-storage": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.19.1.tgz", - "integrity": "sha512-FYAZWcGsFTTaSAwj9Std8UML3Bu8dyWDncM7Ls8g+58UOe4XYdlgzXWbrIgjaguP63pCCbMoExKr61B+ztK3tw==", + "license": "MIT", "dependencies": { "@algolia/cache-common": "4.19.1" } }, "node_modules/@algolia/cache-common": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.19.1.tgz", - "integrity": "sha512-XGghi3l0qA38HiqdoUY+wvGyBsGvKZ6U3vTiMBT4hArhP3fOGLXpIINgMiiGjTe4FVlTa5a/7Zf2bwlIHfRqqg==" + "license": "MIT" }, "node_modules/@algolia/cache-in-memory": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.19.1.tgz", - "integrity": "sha512-+PDWL+XALGvIginigzu8oU6eWw+o76Z8zHbBovWYcrtWOEtinbl7a7UTt3x3lthv+wNuFr/YD1Gf+B+A9V8n5w==", + "license": "MIT", "dependencies": { "@algolia/cache-common": "4.19.1" } }, "node_modules/@algolia/client-account": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.19.1.tgz", - "integrity": "sha512-Oy0ritA2k7AMxQ2JwNpfaEcgXEDgeyKu0V7E7xt/ZJRdXfEpZcwp9TOg4TJHC7Ia62gIeT2Y/ynzsxccPw92GA==", + "license": "MIT", "dependencies": { "@algolia/client-common": "4.19.1", "@algolia/client-search": "4.19.1", @@ -113,8 +105,7 @@ }, "node_modules/@algolia/client-analytics": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.19.1.tgz", - "integrity": "sha512-5QCq2zmgdZLIQhHqwl55ZvKVpLM3DNWjFI4T+bHr3rGu23ew2bLO4YtyxaZeChmDb85jUdPDouDlCumGfk6wOg==", + "license": "MIT", "dependencies": { "@algolia/client-common": "4.19.1", "@algolia/client-search": "4.19.1", @@ -124,8 +115,7 @@ }, "node_modules/@algolia/client-common": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.19.1.tgz", - "integrity": "sha512-3kAIVqTcPrjfS389KQvKzliC559x+BDRxtWamVJt8IVp7LGnjq+aVAXg4Xogkur1MUrScTZ59/AaUd5EdpyXgA==", + "license": "MIT", "dependencies": { "@algolia/requester-common": "4.19.1", "@algolia/transporter": "4.19.1" @@ -133,8 +123,7 @@ }, "node_modules/@algolia/client-personalization": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.19.1.tgz", - "integrity": "sha512-8CWz4/H5FA+krm9HMw2HUQenizC/DxUtsI5oYC0Jxxyce1vsr8cb1aEiSJArQT6IzMynrERif1RVWLac1m36xw==", + "license": "MIT", "dependencies": { "@algolia/client-common": "4.19.1", "@algolia/requester-common": "4.19.1", @@ -143,8 +132,7 @@ }, "node_modules/@algolia/client-search": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.19.1.tgz", - "integrity": "sha512-mBecfMFS4N+yK/p0ZbK53vrZbL6OtWMk8YmnOv1i0LXx4pelY8TFhqKoTit3NPVPwoSNN0vdSN9dTu1xr1XOVw==", + "license": "MIT", "dependencies": { "@algolia/client-common": "4.19.1", "@algolia/requester-common": "4.19.1", @@ -153,47 +141,40 @@ }, "node_modules/@algolia/events": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", - "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" + "license": "MIT" }, "node_modules/@algolia/logger-common": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.19.1.tgz", - "integrity": "sha512-i6pLPZW/+/YXKis8gpmSiNk1lOmYCmRI6+x6d2Qk1OdfvX051nRVdalRbEcVTpSQX6FQAoyeaui0cUfLYW5Elw==" + "license": "MIT" }, "node_modules/@algolia/logger-console": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.19.1.tgz", - "integrity": "sha512-jj72k9GKb9W0c7TyC3cuZtTr0CngLBLmc8trzZlXdfvQiigpUdvTi1KoWIb2ZMcRBG7Tl8hSb81zEY3zI2RlXg==", + "license": "MIT", "dependencies": { "@algolia/logger-common": "4.19.1" } }, "node_modules/@algolia/requester-browser-xhr": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.19.1.tgz", - "integrity": "sha512-09K/+t7lptsweRTueHnSnmPqIxbHMowejAkn9XIcJMLdseS3zl8ObnS5GWea86mu3vy4+8H+ZBKkUN82Zsq/zg==", + "license": "MIT", "dependencies": { "@algolia/requester-common": "4.19.1" } }, "node_modules/@algolia/requester-common": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.19.1.tgz", - "integrity": "sha512-BisRkcWVxrDzF1YPhAckmi2CFYK+jdMT60q10d7z3PX+w6fPPukxHRnZwooiTUrzFe50UBmLItGizWHP5bDzVQ==" + "license": "MIT" }, "node_modules/@algolia/requester-node-http": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.19.1.tgz", - "integrity": "sha512-6DK52DHviBHTG2BK/Vv2GIlEw7i+vxm7ypZW0Z7vybGCNDeWzADx+/TmxjkES2h15+FZOqVf/Ja677gePsVItA==", + "license": "MIT", "dependencies": { "@algolia/requester-common": "4.19.1" } }, "node_modules/@algolia/transporter": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.19.1.tgz", - "integrity": "sha512-nkpvPWbpuzxo1flEYqNIbGz7xhfhGOKGAZS7tzC+TELgEmi7z99qRyTfNSUlW7LZmB3ACdnqAo+9A9KFBENviQ==", + "license": "MIT", "dependencies": { "@algolia/cache-common": "4.19.1", "@algolia/logger-common": "4.19.1", @@ -201,11 +182,12 @@ } }, "node_modules/@ampproject/remapping": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.1.2.tgz", - "integrity": "sha512-hoyByceqwKirw7w3Z7gnIIZC3Wx3J484Y3L/cMpXFbr7d9ZQj2mODrirNzcJa+SM3UlpWXYvKV4RlRpFXlWgXg==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", "dependencies": { - "@jridgewell/trace-mapping": "^0.3.0" + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -213,8 +195,7 @@ }, "node_modules/@apidevtools/json-schema-ref-parser": { "version": "10.1.0", - "resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-10.1.0.tgz", - "integrity": "sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==", + "license": "MIT", "dependencies": { "@jsdevtools/ono": "^7.1.3", "@types/json-schema": "^7.0.11", @@ -230,11 +211,11 @@ } }, "node_modules/@auth0/auth0-react": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@auth0/auth0-react/-/auth0-react-2.2.1.tgz", - "integrity": "sha512-4L4FZvSqIwzVk5mwWFbWzfJ4Zq11dgS0v4KIGKro5tL9dgOnBGq+Ino/1mzexPV1LJHBkfwXG4+IaPiQNz5CGg==", + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/@auth0/auth0-react/-/auth0-react-2.2.4.tgz", + "integrity": "sha512-l29PQC0WdgkCoOc6WeMAY26gsy/yXJICW0jHfj0nz8rZZphYKrLNqTRWFFCMJY+sagza9tSgB1kG/UvQYgGh9A==", "dependencies": { - "@auth0/auth0-spa-js": "^2.1.2" + "@auth0/auth0-spa-js": "^2.1.3" }, "peerDependencies": { "react": "^16.11.0 || ^17 || ^18", @@ -242,49 +223,51 @@ } }, "node_modules/@auth0/auth0-spa-js": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@auth0/auth0-spa-js/-/auth0-spa-js-2.1.2.tgz", - "integrity": "sha512-xdA65Z/U7++Y7L9Uwh8Q8OVOs6qgFz+fb7GAzHFjpr1icO37B//xdzLXm7ZRgA19RWrsNe1nme3h896igJSvvw==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@auth0/auth0-spa-js/-/auth0-spa-js-2.1.3.tgz", + "integrity": "sha512-NMTBNuuG4g3rame1aCnNS5qFYIzsTUV5qTFPRfTyYFS1feS6jsCBR+eTq9YkxCp1yuoM2UIcjunPaoPl77U9xQ==" }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.20.1.tgz", - "integrity": "sha512-EWZ4mE2diW3QALKvDMiXnbZpRvlj+nayZ112nK93SnhqOtpdsbVD4W+2tEoT3YNBAG9RBR0ISY758ZkOgsn6pQ==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.2.tgz", + "integrity": "sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.18.9.tgz", - "integrity": "sha512-1LIb1eL8APMy91/IMW+31ckrfBM4yCoLaVzoDhZUKSM4cu1L1nIidyxkCgzPAgrC5WEz36IPEr/eSeSF9pIn+g==", - "dependencies": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.18.9", - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-module-transforms": "^7.18.9", - "@babel/helpers": "^7.18.9", - "@babel/parser": "^7.18.9", - "@babel/template": "^7.18.6", - "@babel/traverse": "^7.18.9", - "@babel/types": "^7.18.9", - "convert-source-map": "^1.7.0", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz", + "integrity": "sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.26.0", + "@babel/generator": "^7.26.0", + "@babel/helper-compilation-targets": "^7.25.9", + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helpers": "^7.26.0", + "@babel/parser": "^7.26.0", + "@babel/template": "^7.25.9", + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.26.0", + "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" + "json5": "^2.2.3", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -294,22 +277,29 @@ "url": "https://opencollective.com/babel" } }, + "node_modules/@babel/core/node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/generator": { - "version": "7.20.4", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.20.4.tgz", - "integrity": "sha512-luCf7yk/cm7yab6CAW1aiFnmEfBJplb/JojV56MYEK7ziWfGmFlTfmL9Ehwfy4gFhbjBfWO1wj7/TuSbVNEEtA==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz", + "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==", "dependencies": { - "@babel/types": "^7.20.2", - "@jridgewell/gen-mapping": "^0.3.2", - "jsesc": "^2.5.1" + "@babel/parser": "^7.26.2", + "@babel/types": "^7.26.0", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" @@ -317,8 +307,7 @@ }, "node_modules/@babel/helper-annotate-as-pure": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz", - "integrity": "sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA==", + "license": "MIT", "dependencies": { "@babel/types": "^7.18.6" }, @@ -328,8 +317,7 @@ }, "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz", - "integrity": "sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==", + "license": "MIT", "dependencies": { "@babel/helper-explode-assignable-expression": "^7.18.6", "@babel/types": "^7.18.9" @@ -339,34 +327,44 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.20.0.tgz", - "integrity": "sha512-0jp//vDGp9e8hZzBc6N/KwA5ZK3Wsm/pfm4CrY7vzegkVxc65SgSn6wYOnwHe9Js9HRQ1YTCKLGPzDtaS3RoLQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz", + "integrity": "sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==", "dependencies": { - "@babel/compat-data": "^7.20.0", - "@babel/helper-validator-option": "^7.18.6", - "browserslist": "^4.21.3", - "semver": "^6.3.0" + "@babel/compat-data": "^7.25.9", + "@babel/helper-validator-option": "^7.25.9", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" } }, "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } }, + "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, "node_modules/@babel/helper-create-class-features-plugin": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.20.2.tgz", - "integrity": "sha512-k22GoYRAHPYr9I+Gvy2ZQlAe5mGy8BqWst2wRt8cwIufWTxrsVshhIBvYNqC80N0GSFWTsqRVexOtfzlgOEDvA==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "@babel/helper-environment-visitor": "^7.18.9", @@ -385,8 +383,7 @@ }, "node_modules/@babel/helper-create-regexp-features-plugin": { "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.19.0.tgz", - "integrity": "sha512-htnV+mHX32DF81amCDrwIDr8nrp1PTm+3wfBN9/v8QJOLEioOCOG7qNyq0nHeFiWbT3Eb7gsPwEmV64UCQ1jzw==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "regexpu-core": "^5.1.0" @@ -400,8 +397,7 @@ }, "node_modules/@babel/helper-define-polyfill-provider": { "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz", - "integrity": "sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww==", + "license": "MIT", "dependencies": { "@babel/helper-compilation-targets": "^7.17.7", "@babel/helper-plugin-utils": "^7.16.7", @@ -416,24 +412,21 @@ }, "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/helper-environment-visitor": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-explode-assignable-expression": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz", - "integrity": "sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==", + "license": "MIT", "dependencies": { "@babel/types": "^7.18.6" }, @@ -443,8 +436,7 @@ }, "node_modules/@babel/helper-function-name": { "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "license": "MIT", "dependencies": { "@babel/template": "^7.18.10", "@babel/types": "^7.19.0" @@ -455,8 +447,7 @@ }, "node_modules/@babel/helper-hoist-variables": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "license": "MIT", "dependencies": { "@babel/types": "^7.18.6" }, @@ -466,8 +457,7 @@ }, "node_modules/@babel/helper-member-expression-to-functions": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.18.9.tgz", - "integrity": "sha512-RxifAh2ZoVU67PyKIO4AMi1wTenGfMR/O/ae0CCRqwgBAt5v7xjdtRw7UoSbsreKrQn5t7r89eruK/9JjYHuDg==", + "license": "MIT", "dependencies": { "@babel/types": "^7.18.9" }, @@ -476,38 +466,36 @@ } }, "node_modules/@babel/helper-module-imports": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz", - "integrity": "sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", + "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.20.2.tgz", - "integrity": "sha512-zvBKyJXRbmK07XhMuujYoJ48B5yvvmM6+wcpv6Ivj4Yg6qO7NOZOSnvZN9CRl1zz1Z4cKf8YejmCMh8clOoOeA==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", + "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-simple-access": "^7.20.2", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/helper-validator-identifier": "^7.19.1", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2" + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-optimise-call-expression": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz", - "integrity": "sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==", + "license": "MIT", "dependencies": { "@babel/types": "^7.18.6" }, @@ -516,17 +504,16 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz", - "integrity": "sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz", + "integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-remap-async-to-generator": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz", - "integrity": "sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "@babel/helper-environment-visitor": "^7.18.9", @@ -542,8 +529,7 @@ }, "node_modules/@babel/helper-replace-supers": { "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.19.1.tgz", - "integrity": "sha512-T7ahH7wV0Hfs46SFh5Jz3s0B6+o8g3c+7TMxu7xKfmHikg7EAZ3I2Qk9LFhjxXq8sL7UkP5JflezNwoZa8WvWw==", + "license": "MIT", "dependencies": { "@babel/helper-environment-visitor": "^7.18.9", "@babel/helper-member-expression-to-functions": "^7.18.9", @@ -557,8 +543,7 @@ }, "node_modules/@babel/helper-simple-access": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz", - "integrity": "sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA==", + "license": "MIT", "dependencies": { "@babel/types": "^7.20.2" }, @@ -568,8 +553,7 @@ }, "node_modules/@babel/helper-skip-transparent-expression-wrappers": { "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.20.0.tgz", - "integrity": "sha512-5y1JYeNKfvnT8sZcK9DVRtpTbGiomYIHviSP3OQWmDPU3DeH4a1ZlT/N2lyQ5P8egjcRaT/Y9aNqUxK0WsnIIg==", + "license": "MIT", "dependencies": { "@babel/types": "^7.20.0" }, @@ -579,8 +563,7 @@ }, "node_modules/@babel/helper-split-export-declaration": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "license": "MIT", "dependencies": { "@babel/types": "^7.18.6" }, @@ -589,33 +572,32 @@ } }, "node_modules/@babel/helper-string-parser": { - "version": "7.19.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz", - "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz", - "integrity": "sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", + "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-wrap-function": { "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.19.0.tgz", - "integrity": "sha512-txX8aN8CZyYGTwcLhlk87KRqncAzhh5TpQamZUa0/u3an36NtDpUP6bQgBCBcLeBs09R/OwQu3OjK0k/HwfNDg==", + "license": "MIT", "dependencies": { "@babel/helper-function-name": "^7.19.0", "@babel/template": "^7.18.10", @@ -627,13 +609,12 @@ } }, "node_modules/@babel/helpers": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.20.1.tgz", - "integrity": "sha512-J77mUVaDTUJFZ5BpP6mMn6OIl3rEWymk2ZxDBQJUG3P+PbmyMcF3bYWvz0ma69Af1oobDqT/iAsvzhB58xhQUg==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz", + "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==", "dependencies": { - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.0" + "@babel/template": "^7.25.9", + "@babel/types": "^7.26.0" }, "engines": { "node": ">=6.9.0" @@ -641,8 +622,7 @@ }, "node_modules/@babel/highlight": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "license": "MIT", "dependencies": { "@babel/helper-validator-identifier": "^7.18.6", "chalk": "^2.0.0", @@ -653,9 +633,12 @@ } }, "node_modules/@babel/parser": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.21.3.tgz", - "integrity": "sha512-lobG0d7aOfQRXh8AyklEAgZGvA4FShxo6xQbUrrT/cNBPUdIDojlokwJsQyCC/eKia7ifqM0yP+2DRZ4WKw2RQ==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", + "dependencies": { + "@babel/types": "^7.26.0" + }, "bin": { "parser": "bin/babel-parser.js" }, @@ -665,8 +648,7 @@ }, "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz", - "integrity": "sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -679,8 +661,7 @@ }, "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.18.9.tgz", - "integrity": "sha512-AHrP9jadvH7qlOj6PINbgSuphjQUAK7AOT7DPjBo9EHoLhQTnnK5u45e1Hd4DbSQEO9nqPWtQ89r+XEOWFScKg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9", "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", @@ -695,8 +676,7 @@ }, "node_modules/@babel/plugin-proposal-async-generator-functions": { "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.1.tgz", - "integrity": "sha512-Gh5rchzSwE4kC+o/6T8waD0WHEQIsDmjltY8WnWRXHUdH8axZhuH86Ov9M72YhJfDrZseQwuuWaaIT/TmePp3g==", + "license": "MIT", "dependencies": { "@babel/helper-environment-visitor": "^7.18.9", "@babel/helper-plugin-utils": "^7.19.0", @@ -712,8 +692,7 @@ }, "node_modules/@babel/plugin-proposal-class-properties": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", - "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", + "license": "MIT", "dependencies": { "@babel/helper-create-class-features-plugin": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -727,8 +706,7 @@ }, "node_modules/@babel/plugin-proposal-class-static-block": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.6.tgz", - "integrity": "sha512-+I3oIiNxrCpup3Gi8n5IGMwj0gOCAjcJUSQEcotNnCCPMEnixawOQ+KeJPlgfjzx+FKQ1QSyZOWe7wmoJp7vhw==", + "license": "MIT", "dependencies": { "@babel/helper-create-class-features-plugin": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6", @@ -743,8 +721,7 @@ }, "node_modules/@babel/plugin-proposal-dynamic-import": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz", - "integrity": "sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-dynamic-import": "^7.8.3" @@ -758,8 +735,7 @@ }, "node_modules/@babel/plugin-proposal-export-namespace-from": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz", - "integrity": "sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-export-namespace-from": "^7.8.3" @@ -773,8 +749,7 @@ }, "node_modules/@babel/plugin-proposal-json-strings": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz", - "integrity": "sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-json-strings": "^7.8.3" @@ -788,8 +763,7 @@ }, "node_modules/@babel/plugin-proposal-logical-assignment-operators": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.18.9.tgz", - "integrity": "sha512-128YbMpjCrP35IOExw2Fq+x55LMP42DzhOhX2aNNIdI9avSWl2PI0yuBWarr3RYpZBSPtabfadkH2yeRiMD61Q==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" @@ -803,8 +777,7 @@ }, "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", - "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" @@ -818,8 +791,7 @@ }, "node_modules/@babel/plugin-proposal-numeric-separator": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", - "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-numeric-separator": "^7.10.4" @@ -833,8 +805,7 @@ }, "node_modules/@babel/plugin-proposal-object-rest-spread": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.2.tgz", - "integrity": "sha512-Ks6uej9WFK+fvIMesSqbAto5dD8Dz4VuuFvGJFKgIGSkJuRGcrwGECPA1fDgQK3/DbExBJpEkTeYeB8geIFCSQ==", + "license": "MIT", "dependencies": { "@babel/compat-data": "^7.20.1", "@babel/helper-compilation-targets": "^7.20.0", @@ -851,8 +822,7 @@ }, "node_modules/@babel/plugin-proposal-optional-catch-binding": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz", - "integrity": "sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" @@ -866,8 +836,7 @@ }, "node_modules/@babel/plugin-proposal-optional-chaining": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.18.9.tgz", - "integrity": "sha512-v5nwt4IqBXihxGsW2QmCWMDS3B3bzGIk/EQVZz2ei7f3NJl8NzAJVvUmpDW5q1CRNY+Beb/k58UAH1Km1N411w==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9", "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", @@ -882,8 +851,7 @@ }, "node_modules/@babel/plugin-proposal-private-methods": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", - "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", + "license": "MIT", "dependencies": { "@babel/helper-create-class-features-plugin": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -897,8 +865,7 @@ }, "node_modules/@babel/plugin-proposal-private-property-in-object": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.18.6.tgz", - "integrity": "sha512-9Rysx7FOctvT5ouj5JODjAFAkgGoudQuLPamZb0v1TGLpapdNaftzifU8NTWQm0IRjqoYypdrSmyWgkocDQ8Dw==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "@babel/helper-create-class-features-plugin": "^7.18.6", @@ -914,8 +881,7 @@ }, "node_modules/@babel/plugin-proposal-unicode-property-regex": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", - "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", + "license": "MIT", "dependencies": { "@babel/helper-create-regexp-features-plugin": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -929,8 +895,7 @@ }, "node_modules/@babel/plugin-syntax-async-generators": { "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -940,9 +905,8 @@ }, "node_modules/@babel/plugin-syntax-bigint": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", - "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -952,8 +916,7 @@ }, "node_modules/@babel/plugin-syntax-class-properties": { "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.12.13" }, @@ -963,8 +926,7 @@ }, "node_modules/@babel/plugin-syntax-class-static-block": { "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -977,8 +939,7 @@ }, "node_modules/@babel/plugin-syntax-dynamic-import": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -988,8 +949,7 @@ }, "node_modules/@babel/plugin-syntax-export-namespace-from": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.3" }, @@ -999,8 +959,7 @@ }, "node_modules/@babel/plugin-syntax-import-assertions": { "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.20.0.tgz", - "integrity": "sha512-IUh1vakzNoWalR8ch/areW7qFopR2AEw03JlG7BbrDqmQ4X3q9uuipQwSGrUn7oGiemKjtSLDhNtQHzMHr1JdQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.19.0" }, @@ -1011,11 +970,25 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz", + "integrity": "sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/plugin-syntax-import-meta": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", - "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1025,8 +998,7 @@ }, "node_modules/@babel/plugin-syntax-json-strings": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1036,8 +1008,7 @@ }, "node_modules/@babel/plugin-syntax-jsx": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz", - "integrity": "sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1050,8 +1021,7 @@ }, "node_modules/@babel/plugin-syntax-logical-assignment-operators": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1061,8 +1031,7 @@ }, "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1072,8 +1041,7 @@ }, "node_modules/@babel/plugin-syntax-numeric-separator": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1083,8 +1051,7 @@ }, "node_modules/@babel/plugin-syntax-object-rest-spread": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1094,8 +1061,7 @@ }, "node_modules/@babel/plugin-syntax-optional-catch-binding": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1105,8 +1071,7 @@ }, "node_modules/@babel/plugin-syntax-optional-chaining": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1116,8 +1081,7 @@ }, "node_modules/@babel/plugin-syntax-private-property-in-object": { "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -1130,8 +1094,7 @@ }, "node_modules/@babel/plugin-syntax-top-level-await": { "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -1144,8 +1107,7 @@ }, "node_modules/@babel/plugin-syntax-typescript": { "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.20.0.tgz", - "integrity": "sha512-rd9TkG+u1CExzS4SM1BlMEhMXwFLKVjOAFFCDx9PbX5ycJWDoWMcwdJH9RhkPu1dOgn5TrxLot/Gx6lWFuAUNQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.19.0" }, @@ -1158,8 +1120,7 @@ }, "node_modules/@babel/plugin-transform-arrow-functions": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.18.6.tgz", - "integrity": "sha512-9S9X9RUefzrsHZmKMbDXxweEH+YlE8JJEuat9FdvW9Qh1cw7W64jELCtWNkPBPX5En45uy28KGvA/AySqUh8CQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1172,8 +1133,7 @@ }, "node_modules/@babel/plugin-transform-async-to-generator": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.18.6.tgz", - "integrity": "sha512-ARE5wZLKnTgPW7/1ftQmSi1CmkqqHo2DNmtztFhvgtOWSDfq0Cq9/9L+KnZNYSNrydBekhW3rwShduf59RoXag==", + "license": "MIT", "dependencies": { "@babel/helper-module-imports": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6", @@ -1188,8 +1148,7 @@ }, "node_modules/@babel/plugin-transform-block-scoped-functions": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz", - "integrity": "sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1202,8 +1161,7 @@ }, "node_modules/@babel/plugin-transform-block-scoping": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.20.2.tgz", - "integrity": "sha512-y5V15+04ry69OV2wULmwhEA6jwSWXO1TwAtIwiPXcvHcoOQUqpyMVd2bDsQJMW8AurjulIyUV8kDqtjSwHy1uQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.20.2" }, @@ -1216,8 +1174,7 @@ }, "node_modules/@babel/plugin-transform-classes": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.20.2.tgz", - "integrity": "sha512-9rbPp0lCVVoagvtEyQKSo5L8oo0nQS/iif+lwlAz29MccX2642vWDlSZK+2T2buxbopotId2ld7zZAzRfz9j1g==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "@babel/helper-compilation-targets": "^7.20.0", @@ -1238,8 +1195,7 @@ }, "node_modules/@babel/plugin-transform-computed-properties": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.18.9.tgz", - "integrity": "sha512-+i0ZU1bCDymKakLxn5srGHrsAPRELC2WIbzwjLhHW9SIE1cPYkLCL0NlnXMZaM1vhfgA2+M7hySk42VBvrkBRw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9" }, @@ -1252,8 +1208,7 @@ }, "node_modules/@babel/plugin-transform-destructuring": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.20.2.tgz", - "integrity": "sha512-mENM+ZHrvEgxLTBXUiQ621rRXZes3KWUv6NdQlrnr1TkWVw+hUjQBZuP2X32qKlrlG2BzgR95gkuCRSkJl8vIw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.20.2" }, @@ -1266,8 +1221,7 @@ }, "node_modules/@babel/plugin-transform-dotall-regex": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz", - "integrity": "sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==", + "license": "MIT", "dependencies": { "@babel/helper-create-regexp-features-plugin": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -1281,8 +1235,7 @@ }, "node_modules/@babel/plugin-transform-duplicate-keys": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz", - "integrity": "sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9" }, @@ -1295,8 +1248,7 @@ }, "node_modules/@babel/plugin-transform-exponentiation-operator": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz", - "integrity": "sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==", + "license": "MIT", "dependencies": { "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -1310,8 +1262,7 @@ }, "node_modules/@babel/plugin-transform-for-of": { "version": "7.18.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz", - "integrity": "sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1324,8 +1275,7 @@ }, "node_modules/@babel/plugin-transform-function-name": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz", - "integrity": "sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==", + "license": "MIT", "dependencies": { "@babel/helper-compilation-targets": "^7.18.9", "@babel/helper-function-name": "^7.18.9", @@ -1340,8 +1290,7 @@ }, "node_modules/@babel/plugin-transform-literals": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz", - "integrity": "sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9" }, @@ -1354,8 +1303,7 @@ }, "node_modules/@babel/plugin-transform-member-expression-literals": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz", - "integrity": "sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1368,8 +1316,7 @@ }, "node_modules/@babel/plugin-transform-modules-amd": { "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.19.6.tgz", - "integrity": "sha512-uG3od2mXvAtIFQIh0xrpLH6r5fpSQN04gIVovl+ODLdUMANokxQLZnPBHcjmv3GxRjnqwLuHvppjjcelqUFZvg==", + "license": "MIT", "dependencies": { "@babel/helper-module-transforms": "^7.19.6", "@babel/helper-plugin-utils": "^7.19.0" @@ -1383,8 +1330,7 @@ }, "node_modules/@babel/plugin-transform-modules-commonjs": { "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.19.6.tgz", - "integrity": "sha512-8PIa1ym4XRTKuSsOUXqDG0YaOlEuTVvHMe5JCfgBMOtHvJKw/4NGovEGN33viISshG/rZNVrACiBmPQLvWN8xQ==", + "license": "MIT", "dependencies": { "@babel/helper-module-transforms": "^7.19.6", "@babel/helper-plugin-utils": "^7.19.0", @@ -1399,8 +1345,7 @@ }, "node_modules/@babel/plugin-transform-modules-systemjs": { "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.19.6.tgz", - "integrity": "sha512-fqGLBepcc3kErfR9R3DnVpURmckXP7gj7bAlrTQyBxrigFqszZCkFkcoxzCp2v32XmwXLvbw+8Yq9/b+QqksjQ==", + "license": "MIT", "dependencies": { "@babel/helper-hoist-variables": "^7.18.6", "@babel/helper-module-transforms": "^7.19.6", @@ -1416,8 +1361,7 @@ }, "node_modules/@babel/plugin-transform-modules-umd": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", - "integrity": "sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==", + "license": "MIT", "dependencies": { "@babel/helper-module-transforms": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -1431,8 +1375,7 @@ }, "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.19.1.tgz", - "integrity": "sha512-oWk9l9WItWBQYS4FgXD4Uyy5kq898lvkXpXQxoJEY1RnvPk4R/Dvu2ebXU9q8lP+rlMwUQTFf2Ok6d78ODa0kw==", + "license": "MIT", "dependencies": { "@babel/helper-create-regexp-features-plugin": "^7.19.0", "@babel/helper-plugin-utils": "^7.19.0" @@ -1446,8 +1389,7 @@ }, "node_modules/@babel/plugin-transform-new-target": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz", - "integrity": "sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1460,8 +1402,7 @@ }, "node_modules/@babel/plugin-transform-object-super": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz", - "integrity": "sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/helper-replace-supers": "^7.18.6" @@ -1475,8 +1416,7 @@ }, "node_modules/@babel/plugin-transform-parameters": { "version": "7.20.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.20.3.tgz", - "integrity": "sha512-oZg/Fpx0YDrj13KsLyO8I/CX3Zdw7z0O9qOd95SqcoIzuqy/WTGWvePeHAnZCN54SfdyjHcb1S30gc8zlzlHcA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.20.2" }, @@ -1489,8 +1429,7 @@ }, "node_modules/@babel/plugin-transform-property-literals": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz", - "integrity": "sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1503,8 +1442,7 @@ }, "node_modules/@babel/plugin-transform-react-constant-elements": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.20.2.tgz", - "integrity": "sha512-KS/G8YI8uwMGKErLFOHS/ekhqdHhpEloxs43NecQHVgo2QuQSyJhGIY1fL8UGl9wy5ItVwwoUL4YxVqsplGq2g==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.20.2" }, @@ -1517,8 +1455,7 @@ }, "node_modules/@babel/plugin-transform-react-display-name": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.18.6.tgz", - "integrity": "sha512-TV4sQ+T013n61uMoygyMRm+xf04Bd5oqFpv2jAEQwSZ8NwQA7zeRPg1LMVg2PWi3zWBz+CLKD+v5bcpZ/BS0aA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1531,8 +1468,7 @@ }, "node_modules/@babel/plugin-transform-react-jsx": { "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.19.0.tgz", - "integrity": "sha512-UVEvX3tXie3Szm3emi1+G63jyw1w5IcMY0FSKM+CRnKRI5Mr1YbCNgsSTwoTwKphQEG9P+QqmuRFneJPZuHNhg==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "@babel/helper-module-imports": "^7.18.6", @@ -1549,8 +1485,7 @@ }, "node_modules/@babel/plugin-transform-react-jsx-development": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz", - "integrity": "sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA==", + "license": "MIT", "dependencies": { "@babel/plugin-transform-react-jsx": "^7.18.6" }, @@ -1563,8 +1498,7 @@ }, "node_modules/@babel/plugin-transform-react-pure-annotations": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.6.tgz", - "integrity": "sha512-I8VfEPg9r2TRDdvnHgPepTKvuRomzA8+u+nhY7qSI1fR2hRNebasZEETLyM5mAUr0Ku56OkXJ0I7NHJnO6cJiQ==", + "license": "MIT", "dependencies": { "@babel/helper-annotate-as-pure": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -1578,8 +1512,7 @@ }, "node_modules/@babel/plugin-transform-regenerator": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.6.tgz", - "integrity": "sha512-poqRI2+qiSdeldcz4wTSTXBRryoq3Gc70ye7m7UD5Ww0nE29IXqMl6r7Nd15WBgRd74vloEMlShtH6CKxVzfmQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "regenerator-transform": "^0.15.0" @@ -1593,8 +1526,7 @@ }, "node_modules/@babel/plugin-transform-reserved-words": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz", - "integrity": "sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1607,8 +1539,7 @@ }, "node_modules/@babel/plugin-transform-runtime": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.18.9.tgz", - "integrity": "sha512-wS8uJwBt7/b/mzE13ktsJdmS4JP/j7PQSaADtnb4I2wL0zK51MQ0pmF8/Jy0wUIS96fr+fXT6S/ifiPXnvrlSg==", + "license": "MIT", "dependencies": { "@babel/helper-module-imports": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.9", @@ -1626,16 +1557,14 @@ }, "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/plugin-transform-shorthand-properties": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz", - "integrity": "sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1648,8 +1577,7 @@ }, "node_modules/@babel/plugin-transform-spread": { "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.19.0.tgz", - "integrity": "sha512-RsuMk7j6n+r752EtzyScnWkQyuJdli6LdO5Klv8Yx0OfPVTcQkIUfS8clx5e9yHXzlnhOZF3CbQ8C2uP5j074w==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.19.0", "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9" @@ -1663,8 +1591,7 @@ }, "node_modules/@babel/plugin-transform-sticky-regex": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz", - "integrity": "sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6" }, @@ -1677,8 +1604,7 @@ }, "node_modules/@babel/plugin-transform-template-literals": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz", - "integrity": "sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9" }, @@ -1691,8 +1617,7 @@ }, "node_modules/@babel/plugin-transform-typeof-symbol": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz", - "integrity": "sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9" }, @@ -1705,8 +1630,7 @@ }, "node_modules/@babel/plugin-transform-typescript": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.20.2.tgz", - "integrity": "sha512-jvS+ngBfrnTUBfOQq8NfGnSbF9BrqlR6hjJ2yVxMkmO5nL/cdifNbI30EfjRlN4g5wYWNnMPyj5Sa6R1pbLeag==", + "license": "MIT", "dependencies": { "@babel/helper-create-class-features-plugin": "^7.20.2", "@babel/helper-plugin-utils": "^7.20.2", @@ -1721,8 +1645,7 @@ }, "node_modules/@babel/plugin-transform-unicode-escapes": { "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz", - "integrity": "sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.9" }, @@ -1735,8 +1658,7 @@ }, "node_modules/@babel/plugin-transform-unicode-regex": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz", - "integrity": "sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==", + "license": "MIT", "dependencies": { "@babel/helper-create-regexp-features-plugin": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6" @@ -1750,9 +1672,7 @@ }, "node_modules/@babel/polyfill": { "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/polyfill/-/polyfill-7.12.1.tgz", - "integrity": "sha512-X0pi0V6gxLi6lFZpGmeNa4zxtwEmCs42isWLNjZZDE0Y8yVfgu0T2OAHlzBbdYlqbW/YXVvoBHpATEM+goCj8g==", - "deprecated": "🚨 This package has been deprecated in favor of separate inclusion of a polyfill and regenerator-runtime (when needed). See the @babel/polyfill docs (https://babeljs.io/docs/en/babel-polyfill) for more information.", + "license": "MIT", "dependencies": { "core-js": "^2.6.5", "regenerator-runtime": "^0.13.4" @@ -1760,15 +1680,12 @@ }, "node_modules/@babel/polyfill/node_modules/core-js": { "version": "2.6.12", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.12.tgz", - "integrity": "sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ==", - "deprecated": "core-js@<3.23.3 is no longer maintained and not recommended for usage due to the number of issues. Because of the V8 engine whims, feature detection in old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Some versions have web compatibility issues. Please, upgrade your dependencies to the actual version of core-js.", - "hasInstallScript": true + "hasInstallScript": true, + "license": "MIT" }, "node_modules/@babel/preset-env": { "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.20.2.tgz", - "integrity": "sha512-1G0efQEWR1EHkKvKHqbG+IN/QdgwfByUpM5V5QroDzGV2t3S/WXNQd693cHiHTlCFMpr9B6FkPFXDA2lQcKoDg==", + "license": "MIT", "dependencies": { "@babel/compat-data": "^7.20.1", "@babel/helper-compilation-targets": "^7.20.0", @@ -1855,8 +1772,7 @@ }, "node_modules/@babel/preset-env/node_modules/babel-plugin-polyfill-corejs3": { "version": "0.6.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.6.0.tgz", - "integrity": "sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA==", + "license": "MIT", "dependencies": { "@babel/helper-define-polyfill-provider": "^0.3.3", "core-js-compat": "^3.25.1" @@ -1867,8 +1783,7 @@ }, "node_modules/@babel/preset-env/node_modules/babel-plugin-polyfill-regenerator": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.1.tgz", - "integrity": "sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw==", + "license": "MIT", "dependencies": { "@babel/helper-define-polyfill-provider": "^0.3.3" }, @@ -1878,16 +1793,14 @@ }, "node_modules/@babel/preset-env/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/preset-modules": { "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", @@ -1901,8 +1814,7 @@ }, "node_modules/@babel/preset-react": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.18.6.tgz", - "integrity": "sha512-zXr6atUmyYdiWRVLOZahakYmOBHtWc2WGCkP8PYTgZi0iJXDY2CN180TdrIW4OGOAdLc7TifzDIvtx6izaRIzg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/helper-validator-option": "^7.18.6", @@ -1920,8 +1832,7 @@ }, "node_modules/@babel/preset-typescript": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.18.6.tgz", - "integrity": "sha512-s9ik86kXBAnD760aybBucdpnLsAt0jK1xqJn2juOn9lkOvSHV60os5hxoVJsPzMQxvnUJFAlkont2DvvaYEBtQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", "@babel/helper-validator-option": "^7.18.6", @@ -1936,8 +1847,7 @@ }, "node_modules/@babel/register": { "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.21.0.tgz", - "integrity": "sha512-9nKsPmYDi5DidAqJaQooxIhsLJiNMkGr8ypQ8Uic7cIox7UCDsM7HuUGxdGT7mSDTYbqzIdsOWzfBton/YJrMw==", + "license": "MIT", "dependencies": { "clone-deep": "^4.0.1", "find-cache-dir": "^2.0.0", @@ -1954,8 +1864,7 @@ }, "node_modules/@babel/register/node_modules/find-cache-dir": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "license": "MIT", "dependencies": { "commondir": "^1.0.1", "make-dir": "^2.0.0", @@ -1967,8 +1876,7 @@ }, "node_modules/@babel/register/node_modules/find-up": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "license": "MIT", "dependencies": { "locate-path": "^3.0.0" }, @@ -1978,8 +1886,7 @@ }, "node_modules/@babel/register/node_modules/locate-path": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "license": "MIT", "dependencies": { "p-locate": "^3.0.0", "path-exists": "^3.0.0" @@ -1990,8 +1897,7 @@ }, "node_modules/@babel/register/node_modules/make-dir": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "license": "MIT", "dependencies": { "pify": "^4.0.1", "semver": "^5.6.0" @@ -2002,8 +1908,7 @@ }, "node_modules/@babel/register/node_modules/p-locate": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "license": "MIT", "dependencies": { "p-limit": "^2.0.0" }, @@ -2013,16 +1918,14 @@ }, "node_modules/@babel/register/node_modules/path-exists": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/@babel/register/node_modules/pkg-dir": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "license": "MIT", "dependencies": { "find-up": "^3.0.0" }, @@ -2032,16 +1935,14 @@ }, "node_modules/@babel/register/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/@babel/runtime": { "version": "7.22.11", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.11.tgz", - "integrity": "sha512-ee7jVNlWN09+KftVOu9n7S8gQzD/Z6hN/I8VBRXW4P1+Xe7kJGXMwu8vds4aGIMHZnNbdpSWCfZZtinytpcAvA==", + "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -2051,8 +1952,7 @@ }, "node_modules/@babel/runtime-corejs3": { "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.18.9.tgz", - "integrity": "sha512-qZEWeccZCrHA2Au4/X05QW5CMdm4VjUDCrGq5gf1ZDcM4hRqreKrtwAn7yci9zfgAS9apvnsFXiGBHBAxZdK9A==", + "license": "MIT", "dependencies": { "core-js-pure": "^3.20.2", "regenerator-runtime": "^0.13.4" @@ -2063,36 +1963,32 @@ }, "node_modules/@babel/runtime/node_modules/regenerator-runtime": { "version": "0.14.0", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", - "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" + "license": "MIT" }, "node_modules/@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz", + "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==", "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.25.9", + "@babel/parser": "^7.25.9", + "@babel/types": "^7.25.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.20.1.tgz", - "integrity": "sha512-d3tN8fkVJwFLkHkBN479SOsw4DMZnz8cdbL/gvuDuzy3TS6Nfw80HuQqhw1pITbIruHyh7d1fMA47kWzmcUEGA==", - "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.1", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.20.1", - "@babel/types": "^7.20.0", - "debug": "^4.1.0", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.9.tgz", + "integrity": "sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==", + "dependencies": { + "@babel/code-frame": "^7.25.9", + "@babel/generator": "^7.25.9", + "@babel/parser": "^7.25.9", + "@babel/template": "^7.25.9", + "@babel/types": "^7.25.9", + "debug": "^4.3.1", "globals": "^11.1.0" }, "engines": { @@ -2100,13 +1996,12 @@ } }, "node_modules/@babel/types": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.21.3.tgz", - "integrity": "sha512-sBGdETxC+/M4o/zKC0sl6sjWv62WFR/uzxrJ6uYyMLZOUlPnwzw0tKgVHOXxaAd5l2g8pEDM5RZ495GPQI77kg==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz", + "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==", "dependencies": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", - "to-fast-properties": "^2.0.0" + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -2114,54 +2009,57 @@ }, "node_modules/@bcoe/v8-coverage": { "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", - "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@bpmn-io/cm-theme": { "version": "0.1.0-alpha.2", - "resolved": "https://registry.npmjs.org/@bpmn-io/cm-theme/-/cm-theme-0.1.0-alpha.2.tgz", - "integrity": "sha512-ZILgiYzxk3KMvxplUXmdRFQo45/JehDPg5k9tWfehmzUOSE13ssyLPil8uCloMQnb3yyzyOWTjb/wzKXTHlFQw==", + "license": "MIT", "dependencies": { "@codemirror/language": "^6.3.1", "@codemirror/view": "^6.5.1", "@lezer/highlight": "^1.1.4" + }, + "workspaces": { + "packages": [ + "preview-themes" + ] } }, "node_modules/@bpmn-io/draggle": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@bpmn-io/draggle/-/draggle-4.1.0.tgz", - "integrity": "sha512-gHRjQGJEpEwVxspNwNhnqHHAt8cE1l1cObFEf5YSuSXVxTLZcNAQOgmEDJ+QMk1UPDKfnQwvbeDdv5ytCnksfw==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@bpmn-io/draggle/-/draggle-4.1.1.tgz", + "integrity": "sha512-2frw1gBl5I3XGrIDg4CBy6bpJiOuslKUOg9T91Fke6bIttFkF0zxlTKh4E4zU8g7gAo4ze0HnKMZDgHxea+Itw==", "dependencies": { "contra": "^1.9.4" } }, "node_modules/@bpmn-io/feel-editor": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@bpmn-io/feel-editor/-/feel-editor-1.2.0.tgz", - "integrity": "sha512-402yrNL+a58d9AiNE48IScTiWDGmB+8Fpiq9eqg/sKCzhdHahl5fZyl+cksfcyJjzJF1byUOhYy3UxL3/tbLmQ==", - "dependencies": { - "@bpmn-io/feel-lint": "^1.2.0", - "@codemirror/autocomplete": "^6.12.0", - "@codemirror/commands": "^6.3.3", - "@codemirror/language": "^6.10.0", - "@codemirror/lint": "^6.4.2", - "@codemirror/state": "^6.4.0", - "@codemirror/view": "^6.23.0", + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/@bpmn-io/feel-editor/-/feel-editor-1.9.1.tgz", + "integrity": "sha512-UxSORdh5cwKM4fib4f9ov6J1/BHGpQVNtA+wPyEdKQyCyz3wqwE2/xe5wneVR1j5QFC5m2Na8nTy4a1TDFvZTw==", + "dependencies": { + "@bpmn-io/feel-lint": "^1.3.0", + "@codemirror/autocomplete": "^6.16.2", + "@codemirror/commands": "^6.6.0", + "@codemirror/language": "^6.10.2", + "@codemirror/lint": "^6.8.0", + "@codemirror/state": "^6.4.1", + "@codemirror/view": "^6.28.1", "@lezer/highlight": "^1.2.0", - "lang-feel": "^2.0.0", - "min-dom": "^4.1.0" + "lang-feel": "^2.1.1", + "min-dom": "^4.2.1" }, "engines": { "node": ">= 16" } }, "node_modules/@bpmn-io/feel-lint": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@bpmn-io/feel-lint/-/feel-lint-1.2.0.tgz", - "integrity": "sha512-nsvAYxiSbWyjpd3gNnJd+60aTWrZvngYnZfe+GpmkM/pQoOgtF17GhD/p4fgaeAd/uUP3q9sO6EWRX+OU/p9dw==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@bpmn-io/feel-lint/-/feel-lint-1.3.1.tgz", + "integrity": "sha512-wcFkJKhOm/iqCt5bzkKvxL5Dr9wKwUD+t164bQYbJsTYouAqmkkxiGsoqck42hXwdIhMSguZ+vqQ3hj5QdiYCA==", "dependencies": { - "@codemirror/language": "^6.8.0", + "@codemirror/language": "^6.10.0", "lezer-feel": "^1.2.3" }, "engines": { @@ -2169,41 +2067,41 @@ } }, "node_modules/@bpmn-io/form-js": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js/-/form-js-1.7.3.tgz", - "integrity": "sha512-CPDUwS3lftH/lHG08o4kBthgO3Qz918jwr6KnQ0O6Vtm6KjNLsjwZrbf3RPmLuAJzLfQMxh9oA8dTTZJM4s1UQ==", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@bpmn-io/form-js/-/form-js-1.12.0.tgz", + "integrity": "sha512-X+/fln7Pa6S/MUkDNbepKxAChdT5gcJl6m8dm63M7s51h0neP8os31/22bT1FPsHplQhNETkNoSkS9IYA2afGw==", "dependencies": { - "@bpmn-io/form-js-carbon-styles": "^1.7.3", - "@bpmn-io/form-js-editor": "^1.7.3", - "@bpmn-io/form-js-playground": "^1.7.3", - "@bpmn-io/form-js-viewer": "^1.7.3" + "@bpmn-io/form-js-carbon-styles": "^1.12.0", + "@bpmn-io/form-js-editor": "^1.12.0", + "@bpmn-io/form-js-playground": "^1.12.0", + "@bpmn-io/form-js-viewer": "^1.12.0" } }, "node_modules/@bpmn-io/form-js-carbon-styles": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-carbon-styles/-/form-js-carbon-styles-1.7.3.tgz", - "integrity": "sha512-5W3zoa4VxY8eaKr4mLu/yRdugzhan8fTKsJGXfJR+iW0ErGdAo969rIxEbINsAKBqNiWxdipoWsEgvPxKbh/VQ==" + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-carbon-styles/-/form-js-carbon-styles-1.12.0.tgz", + "integrity": "sha512-yY2C02wvYj0j74gBV2aVX5irR3XeJbw9D6dqdTfMsMkydNH1jN9YX8jezxWHZRHxe0iEF3VBrc6WwEfWwBXHjw==" }, "node_modules/@bpmn-io/form-js-editor": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-editor/-/form-js-editor-1.7.3.tgz", - "integrity": "sha512-6kHZQJslKavw1M8JgZpOXyeIRj3wk+2TjosT81iWqreVnDS434cUpd8HTgXZBlrPugYmGjDd1e4Oa2CVVxixWw==", - "dependencies": { - "@bpmn-io/draggle": "^4.0.0", - "@bpmn-io/form-js-viewer": "^1.7.3", - "@bpmn-io/properties-panel": "^3.18.1", - "array-move": "^3.0.1", - "big.js": "^6.2.1", - "ids": "^1.0.0", - "min-dash": "^4.2.1", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-editor/-/form-js-editor-1.12.0.tgz", + "integrity": "sha512-qcNQ74w/Nolg6HA0OP6lXabloYTh6q2TDriWLtQRxBsFCkYFhRepNa1zHr5iP//FQfc7Wh6YMBUdKsj38ZTXoQ==", + "dependencies": { + "@bpmn-io/draggle": "^4.1.1", + "@bpmn-io/form-js-viewer": "^1.12.0", + "@bpmn-io/properties-panel": "^3.25.0", + "array-move": "^4.0.0", + "big.js": "^6.2.2", + "ids": "^1.0.5", + "min-dash": "^4.2.2", "min-dom": "^4.1.0", "preact": "^10.5.14" } }, "node_modules/@bpmn-io/form-js-editor/node_modules/big.js": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz", - "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.2.tgz", + "integrity": "sha512-y/ie+Faknx7sZA5MfGA2xKlu0GDv8RWrXGsmlteyJQ2lvoKv9GBK/fpRMc2qlSoBAgNxrixICFCBefIq8WCQpQ==", "engines": { "node": "*" }, @@ -2213,51 +2111,52 @@ } }, "node_modules/@bpmn-io/form-js-playground": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-playground/-/form-js-playground-1.7.3.tgz", - "integrity": "sha512-l87drk8rA3/yQa6RhOwRsVIp0kKGSg+asOXLZppNePh1lcaw0iF4taSa2PQeiI2d1GwnA3fjht+NhzzE/r+2/Q==", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-playground/-/form-js-playground-1.12.0.tgz", + "integrity": "sha512-JAw0fK+DxK8uZvEQ/zDfpZxvDVeyyZHCxn9hdVGBga464NQvaWHuOMMMfQAu5LYlR+Gb57OK/t8sst3RhabIvw==", "dependencies": { - "@bpmn-io/form-js-editor": "^1.7.3", - "@bpmn-io/form-js-viewer": "^1.7.3", - "@codemirror/autocomplete": "^6.12.0", - "@codemirror/commands": "^6.1.2", + "@bpmn-io/form-js-editor": "^1.12.0", + "@bpmn-io/form-js-viewer": "^1.12.0", + "@codemirror/autocomplete": "^6.18.3", + "@codemirror/commands": "^6.7.1", "@codemirror/lang-json": "^6.0.1", - "@codemirror/language": "^6.10.0", - "@codemirror/lint": "^6.0.0", - "@codemirror/state": "^6.1.1", - "@codemirror/view": "^6.23.1", - "classnames": "^2.3.1", + "@codemirror/language": "^6.10.3", + "@codemirror/lint": "^6.8.2", + "@codemirror/state": "^6.4.1", + "@codemirror/view": "^6.34.3", + "classnames": "^2.5.1", "codemirror": "^6.0.1", "downloadjs": "^1.4.7", - "file-drops": "^0.4.0", - "mitt": "^3.0.0", + "file-drops": "^0.5.0", + "mitt": "^3.0.1", "preact": "^10.5.14" } }, "node_modules/@bpmn-io/form-js-viewer": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-viewer/-/form-js-viewer-1.7.3.tgz", - "integrity": "sha512-/XEDHzZbxmYXAp10ClPQu8h/4CoYqPdUYkppD/fL+UXFTO9ZJFBMn2TGgJEwXZP3H6/m6fSFqPFuWnVudWkCYg==", - "dependencies": { - "@carbon/grid": "^11.11.0", - "big.js": "^6.2.1", - "classnames": "^2.3.1", - "didi": "^10.0.1", - "dompurify": "^3.0.8", - "feelers": "^1.3.0", - "feelin": "^3.0.0", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-viewer/-/form-js-viewer-1.12.0.tgz", + "integrity": "sha512-31jwd3gjcfFlzA3yEKE+Mfo9EtYkCPST1kAHe8Ifv6aRXb08oMxgZQmDfWYdtcIayAD1n6IAaEKTr0RnDf4BOQ==", + "dependencies": { + "@carbon/grid": "^11.29.0", + "big.js": "^6.2.2", + "classnames": "^2.5.1", + "didi": "^10.2.2", + "dompurify": "^3.2.0", + "feelers": "^1.4.0", + "feelin": "^3.2.0", "flatpickr": "^4.6.13", - "ids": "^1.0.0", - "lodash": "^4.5.0", - "min-dash": "^4.2.1", - "preact": "^10.5.14", - "showdown": "^2.1.0" + "ids": "^1.0.5", + "lodash": "^4.17.21", + "luxon": "^3.5.0", + "marked": "^15.0.1", + "min-dash": "^4.2.2", + "preact": "^10.5.14" } }, "node_modules/@bpmn-io/form-js-viewer/node_modules/big.js": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz", - "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.2.tgz", + "integrity": "sha512-y/ie+Faknx7sZA5MfGA2xKlu0GDv8RWrXGsmlteyJQ2lvoKv9GBK/fpRMc2qlSoBAgNxrixICFCBefIq8WCQpQ==", "engines": { "node": "*" }, @@ -2267,14 +2166,14 @@ } }, "node_modules/@bpmn-io/properties-panel": { - "version": "3.18.1", - "resolved": "https://registry.npmjs.org/@bpmn-io/properties-panel/-/properties-panel-3.18.1.tgz", - "integrity": "sha512-ygBhVH99IFG1VbMlbvInXQUeqHwQH4uaajFaUi3OsjELpM1WcmHQ72fXPd1tC/OtJJhZoQajHiTI8SdL38t9ug==", + "version": "3.25.0", + "resolved": "https://registry.npmjs.org/@bpmn-io/properties-panel/-/properties-panel-3.25.0.tgz", + "integrity": "sha512-SRGgj8uJc1Yyjcht2g36Q+xKR7sTx5VZXvcwDrdmQKlx5Y3nRmvmMjDGzeGDJDb7pNU1DSlaBJic84uISDBMWg==", "dependencies": { - "@bpmn-io/feel-editor": "^1.2.0", - "@codemirror/view": "^6.14.0", + "@bpmn-io/feel-editor": "^1.9.0", + "@codemirror/view": "^6.28.1", "classnames": "^2.3.1", - "feelers": "^1.3.0", + "feelers": "^1.4.0", "focus-trap": "^7.5.2", "min-dash": "^4.1.1", "min-dom": "^4.0.3" @@ -2285,26 +2184,31 @@ }, "node_modules/@braintree/sanitize-url": { "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", - "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==" + "license": "MIT" }, "node_modules/@carbon/grid": { - "version": "11.21.0", - "resolved": "https://registry.npmjs.org/@carbon/grid/-/grid-11.21.0.tgz", - "integrity": "sha512-Zzhos2we+HqM0obdQgma+OvLoM9dNGq07YcLxFxrc/vEOn/D01sner6dyMMqS2y8036zIaoqVMGArSzPfoxrLA==", + "version": "11.29.0", + "resolved": "https://registry.npmjs.org/@carbon/grid/-/grid-11.29.0.tgz", + "integrity": "sha512-SAJhTexN6TjbItcUczOqhzgHBGXLhvUhlTdyqj+wzUH0tqEN8g6gLp+1sn9+rL+kV4obSb/7bdSESZtwQr/tQg==", + "hasInstallScript": true, "dependencies": { - "@carbon/layout": "^11.20.0" + "@carbon/layout": "^11.28.0", + "@ibm/telemetry-js": "^1.5.0" } }, "node_modules/@carbon/layout": { - "version": "11.20.0", - "resolved": "https://registry.npmjs.org/@carbon/layout/-/layout-11.20.0.tgz", - "integrity": "sha512-G9eJE3xb/J98Id9VvTA/b4v+2i/c+IiHAhxNPc0PPpPN6C/r6U4gJsG4yPgQnbuIU42cP9L8OvCrQr0mbrCMlA==" + "version": "11.28.0", + "resolved": "https://registry.npmjs.org/@carbon/layout/-/layout-11.28.0.tgz", + "integrity": "sha512-Yl0Dsxs00EgAaCKpZCXgebuf9BwiBK66a1Oiao6D12p3ViciZ4L18mlRgOPBcDlolP2tUtncz48TlfkWC097hQ==", + "hasInstallScript": true, + "dependencies": { + "@ibm/telemetry-js": "^1.5.0" + } }, "node_modules/@codemirror/autocomplete": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.13.0.tgz", - "integrity": "sha512-SuDrho1klTINfbcMPnyro1ZxU9xJtwDMtb62R8TjL/tOl71IoOsvBo1a9x+hDvHhIzkTcJHy2VC+rmpGgYkRSw==", + "version": "6.18.3", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.3.tgz", + "integrity": "sha512-1dNIOmiM0z4BIBwxmxEfA1yoxh1MF/6KPBbh20a5vphGV0ictKlgQsbJs6D6SkR6iJpGbpwRsa6PFMNlg9T9pQ==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -2319,29 +2223,28 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.3.3", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.3.tgz", - "integrity": "sha512-dO4hcF0fGT9tu1Pj1D2PvGvxjeGkbC6RGcZw6Qs74TH+Ed1gw98jmUgd2axWvIZEqTeTuFrg1lEB1KV6cK9h1A==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.7.1.tgz", + "integrity": "sha512-llTrboQYw5H4THfhN4U3qCnSZ1SOJ60ohhz+SzU0ADGtwlc533DtklQP0vSFaQuCPDn3BPpOd1GbbnUtwNjsrw==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.4.0", - "@codemirror/view": "^6.0.0", + "@codemirror/view": "^6.27.0", "@lezer/common": "^1.1.0" } }, "node_modules/@codemirror/lang-json": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.1.tgz", - "integrity": "sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ==", + "license": "MIT", "dependencies": { "@codemirror/language": "^6.0.0", "@lezer/json": "^1.0.0" } }, "node_modules/@codemirror/language": { - "version": "6.10.1", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.1.tgz", - "integrity": "sha512-5GrXzrhq6k+gL5fjkAwt90nYDmjlzTIJV8THnxNFtNKWotMIlzzN+CpqxqwXOECnUdOndmSeWntVrVcv5axWRQ==", + "version": "6.10.6", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.6.tgz", + "integrity": "sha512-KrsbdCnxEztLVbB5PycWXFxas4EOyk/fPAfruSOnDDppevQgid2XZ+KbJ9u+fDikP/e7MW7HPBTvTb8JlZK9vA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.23.0", @@ -2352,19 +2255,19 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.5.0.tgz", - "integrity": "sha512-+5YyicIaaAZKU8K43IQi8TBy6mF6giGeWAH7N96Z5LC30Wm5JMjqxOYIE9mxwMG1NbhT2mA3l9hA4uuKUM3E5g==", + "version": "6.8.4", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.4.tgz", + "integrity": "sha512-u4q7PnZlJUojeRe8FJa/njJcMctISGgPQ4PnWsd9268R4ZTtU+tfFYmwkBvgcrK2+QQ8tYFVALVb5fVJykKc5A==", "dependencies": { "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", + "@codemirror/view": "^6.35.0", "crelt": "^1.0.5" } }, "node_modules/@codemirror/search": { - "version": "6.5.6", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.6.tgz", - "integrity": "sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q==", + "version": "6.5.8", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.8.tgz", + "integrity": "sha512-PoWtZvo7c1XFeZWmmyaOp2G0XVbOnm+fJzvghqGAktBW3cufwJUWvSCcNG0ppXiBEM05mZu6RhMtXPv2hpllig==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2373,13 +2276,12 @@ }, "node_modules/@codemirror/state": { "version": "6.4.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", - "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" + "license": "MIT" }, "node_modules/@codemirror/view": { - "version": "6.25.1", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.25.1.tgz", - "integrity": "sha512-2LXLxsQnHDdfGzDvjzAwZh2ZviNJm7im6tGpa0IONIDnFd8RZ80D2SNi8PDi6YjKcMoMRK20v6OmKIdsrwsyoQ==", + "version": "6.35.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.35.0.tgz", + "integrity": "sha512-I0tYy63q5XkaWsJ8QRv5h6ves7kvtrBWjBcnf/bzohFJQc5c14a1AQRdE8QpPF9eMp5Mq2FMm59TCj1gDfE7kw==", "dependencies": { "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", @@ -2388,8 +2290,7 @@ }, "node_modules/@colors/colors": { "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", - "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "license": "MIT", "optional": true, "engines": { "node": ">=0.1.90" @@ -2397,13 +2298,11 @@ }, "node_modules/@docsearch/css": { "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.5.2.tgz", - "integrity": "sha512-SPiDHaWKQZpwR2siD0KQUwlStvIAnEyK6tAE2h2Wuoq8ue9skzhlyVQ1ddzOxX6khULnAALDiR/isSF3bnuciA==" + "license": "MIT" }, "node_modules/@docsearch/react": { "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.5.2.tgz", - "integrity": "sha512-9Ahcrs5z2jq/DcAvYtvlqEBHImbm4YJI8M9y0x6Tqg598P40HTEkX7hsMcIuThI+hTFxRGZ9hll0Wygm2yEjng==", + "license": "MIT", "dependencies": { "@algolia/autocomplete-core": "1.9.3", "@algolia/autocomplete-preset-algolia": "1.9.3", @@ -2433,8 +2332,7 @@ }, "node_modules/@docusaurus/core": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz", - "integrity": "sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==", + "license": "MIT", "dependencies": { "@babel/core": "^7.18.6", "@babel/generator": "^7.18.7", @@ -2521,8 +2419,7 @@ }, "node_modules/@docusaurus/core/node_modules/ansi-regex": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -2532,8 +2429,7 @@ }, "node_modules/@docusaurus/core/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -2546,8 +2442,7 @@ }, "node_modules/@docusaurus/core/node_modules/boxen": { "version": "6.2.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", - "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "license": "MIT", "dependencies": { "ansi-align": "^3.0.1", "camelcase": "^6.2.0", @@ -2567,8 +2462,7 @@ }, "node_modules/@docusaurus/core/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2582,8 +2476,7 @@ }, "node_modules/@docusaurus/core/node_modules/cli-boxes": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -2593,8 +2486,7 @@ }, "node_modules/@docusaurus/core/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -2604,26 +2496,22 @@ }, "node_modules/@docusaurus/core/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/@docusaurus/core/node_modules/emoji-regex": { "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + "license": "MIT" }, "node_modules/@docusaurus/core/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@docusaurus/core/node_modules/string-width": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", @@ -2638,8 +2526,7 @@ }, "node_modules/@docusaurus/core/node_modules/strip-ansi": { "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", + "license": "MIT", "dependencies": { "ansi-regex": "^6.0.1" }, @@ -2652,8 +2539,7 @@ }, "node_modules/@docusaurus/core/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -2663,8 +2549,7 @@ }, "node_modules/@docusaurus/core/node_modules/type-fest": { "version": "2.15.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.15.1.tgz", - "integrity": "sha512-LYSjcIz3NmoQksXq/3/B7Nfad+T8mkaI628agAAnHCpXPTBRMK2ygt3eABpzII8CbZZM8dLdVQ4Gr8ousjFjMw==", + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=12.20" }, @@ -2674,8 +2559,7 @@ }, "node_modules/@docusaurus/core/node_modules/widest-line": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", - "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "license": "MIT", "dependencies": { "string-width": "^5.0.1" }, @@ -2688,8 +2572,7 @@ }, "node_modules/@docusaurus/core/node_modules/wrap-ansi": { "version": "8.0.1", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.0.1.tgz", - "integrity": "sha512-QFF+ufAqhoYHvoHdajT/Po7KoXVBPXS2bgjIam5isfWJPfIOnQZ50JtUiVvCv/sjgacf3yRrt2ZKUZ/V4itN4g==", + "license": "MIT", "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", @@ -2704,8 +2587,7 @@ }, "node_modules/@docusaurus/core/node_modules/wrap-ansi/node_modules/ansi-styles": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.1.0.tgz", - "integrity": "sha512-VbqNsoz55SYGczauuup0MFUyXNQviSpFTj1RQtFzmQLk18qbVSpTFFGMT293rmDaQuKCT6InmbuEyUne4mTuxQ==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -2715,8 +2597,7 @@ }, "node_modules/@docusaurus/cssnano-preset": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.1.tgz", - "integrity": "sha512-ka+vqXwtcW1NbXxWsh6yA1Ckii1klY9E53cJ4O9J09nkMBgrNX3iEFED1fWdv8wf4mJjvGi5RLZ2p9hJNjsLyQ==", + "license": "MIT", "dependencies": { "cssnano-preset-advanced": "^5.3.8", "postcss": "^8.4.14", @@ -2729,8 +2610,7 @@ }, "node_modules/@docusaurus/logger": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.1.tgz", - "integrity": "sha512-5h5ysIIWYIDHyTVd8BjheZmQZmEgWDR54aQ1BX9pjFfpyzFo5puKXKYrYJXbjEHGyVhEzmB9UXwbxGfaZhOjcg==", + "license": "MIT", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.4.0" @@ -2741,8 +2621,7 @@ }, "node_modules/@docusaurus/logger/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -2755,8 +2634,7 @@ }, "node_modules/@docusaurus/logger/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2770,8 +2648,7 @@ }, "node_modules/@docusaurus/logger/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -2781,21 +2658,18 @@ }, "node_modules/@docusaurus/logger/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/@docusaurus/logger/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@docusaurus/logger/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -2805,8 +2679,7 @@ }, "node_modules/@docusaurus/mdx-loader": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.1.tgz", - "integrity": "sha512-4KhUhEavteIAmbBj7LVFnrVYDiU51H5YWW1zY6SmBSte/YLhDutztLTBE0PQl1Grux1jzUJeaSvAzHpTn6JJDQ==", + "license": "MIT", "dependencies": { "@babel/parser": "^7.18.8", "@babel/traverse": "^7.18.8", @@ -2836,8 +2709,7 @@ }, "node_modules/@docusaurus/mdx-loader/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -2850,8 +2722,7 @@ }, "node_modules/@docusaurus/mdx-loader/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -2863,8 +2734,7 @@ }, "node_modules/@docusaurus/module-type-aliases": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.1.tgz", - "integrity": "sha512-gLBuIFM8Dp2XOCWffUDSjtxY7jQgKvYujt7Mx5s4FCTfoL5dN1EVbnrn+O2Wvh8b0a77D57qoIDY7ghgmatR1A==", + "license": "MIT", "dependencies": { "@docusaurus/react-loadable": "5.5.2", "@docusaurus/types": "2.4.1", @@ -2882,8 +2752,7 @@ }, "node_modules/@docusaurus/plugin-content-blog": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.1.tgz", - "integrity": "sha512-E2i7Knz5YIbE1XELI6RlTnZnGgS52cUO4BlCiCUCvQHbR+s1xeIWz4C6BtaVnlug0Ccz7nFSksfwDpVlkujg5Q==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/logger": "2.4.1", @@ -2912,8 +2781,7 @@ }, "node_modules/@docusaurus/plugin-content-blog/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -2926,8 +2794,7 @@ }, "node_modules/@docusaurus/plugin-content-blog/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -2939,8 +2806,7 @@ }, "node_modules/@docusaurus/plugin-content-docs": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.1.tgz", - "integrity": "sha512-Lo7lSIcpswa2Kv4HEeUcGYqaasMUQNpjTXpV0N8G6jXgZaQurqp7E8NGYeGbDXnb48czmHWbzDL4S3+BbK0VzA==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/logger": "2.4.1", @@ -2969,8 +2835,7 @@ }, "node_modules/@docusaurus/plugin-content-pages": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.1.tgz", - "integrity": "sha512-/UjuH/76KLaUlL+o1OvyORynv6FURzjurSjvn2lbWTFc4tpYY2qLYTlKpTCBVPhlLUQsfyFnshEJDLmPneq2oA==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/mdx-loader": "2.4.1", @@ -2991,8 +2856,7 @@ }, "node_modules/@docusaurus/plugin-debug": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.1.tgz", - "integrity": "sha512-7Yu9UPzRShlrH/G8btOpR0e6INFZr0EegWplMjOqelIwAcx3PKyR8mgPTxGTxcqiYj6hxSCRN0D8R7YrzImwNA==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/types": "2.4.1", @@ -3011,8 +2875,7 @@ }, "node_modules/@docusaurus/plugin-google-analytics": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.1.tgz", - "integrity": "sha512-dyZJdJiCoL+rcfnm0RPkLt/o732HvLiEwmtoNzOoz9MSZz117UH2J6U2vUDtzUzwtFLIf32KkeyzisbwUCgcaQ==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/types": "2.4.1", @@ -3029,8 +2892,7 @@ }, "node_modules/@docusaurus/plugin-google-gtag": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.1.tgz", - "integrity": "sha512-mKIefK+2kGTQBYvloNEKtDmnRD7bxHLsBcxgnbt4oZwzi2nxCGjPX6+9SQO2KCN5HZbNrYmGo5GJfMgoRvy6uA==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/types": "2.4.1", @@ -3047,8 +2909,7 @@ }, "node_modules/@docusaurus/plugin-google-tag-manager": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.1.tgz", - "integrity": "sha512-Zg4Ii9CMOLfpeV2nG74lVTWNtisFaH9QNtEw48R5QE1KIwDBdTVaiSA18G1EujZjrzJJzXN79VhINSbOJO/r3g==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/types": "2.4.1", @@ -3065,8 +2926,7 @@ }, "node_modules/@docusaurus/plugin-sitemap": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.1.tgz", - "integrity": "sha512-lZx+ijt/+atQ3FVE8FOHV/+X3kuok688OydDXrqKRJyXBJZKgGjA2Qa8RjQ4f27V2woaXhtnyrdPop/+OjVMRg==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/logger": "2.4.1", @@ -3088,8 +2948,7 @@ }, "node_modules/@docusaurus/preset-classic": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.1.tgz", - "integrity": "sha512-P4//+I4zDqQJ+UDgoFrjIFaQ1MeS9UD1cvxVQaI6O7iBmiHQm0MGROP1TbE7HlxlDPXFJjZUK3x3cAoK63smGQ==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/plugin-content-blog": "2.4.1", @@ -3115,8 +2974,7 @@ }, "node_modules/@docusaurus/react-loadable": { "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "license": "MIT", "dependencies": { "@types/react": "*", "prop-types": "^15.6.2" @@ -3127,8 +2985,7 @@ }, "node_modules/@docusaurus/theme-classic": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.1.tgz", - "integrity": "sha512-Rz0wKUa+LTW1PLXmwnf8mn85EBzaGSt6qamqtmnh9Hflkc+EqiYMhtUJeLdV+wsgYq4aG0ANc+bpUDpsUhdnwg==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/mdx-loader": "2.4.1", @@ -3164,10 +3021,16 @@ "react-dom": "^16.8.4 || ^17.0.0" } }, + "node_modules/@docusaurus/theme-classic/node_modules/clsx": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/@docusaurus/theme-common": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.1.tgz", - "integrity": "sha512-G7Zau1W5rQTaFFB3x3soQoZpkgMbl/SYNG8PfMFIjKa3M3q8n0m/GRf5/H/e5BqOvt8c+ZWIXGCiz+kUCSHovA==", + "license": "MIT", "dependencies": { "@docusaurus/mdx-loader": "2.4.1", "@docusaurus/module-type-aliases": "2.4.1", @@ -3194,10 +3057,16 @@ "react-dom": "^16.8.4 || ^17.0.0" } }, + "node_modules/@docusaurus/theme-common/node_modules/clsx": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/@docusaurus/theme-mermaid": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-mermaid/-/theme-mermaid-2.4.1.tgz", - "integrity": "sha512-cM0ImKIqZfjmlaC+uAjep39kNBvb1bjz429QBHGs32maob4+UnRzVPPpCUCltyPVb4xjG5h1Tyq4pHzhtIikqA==", + "license": "MIT", "dependencies": { "@docusaurus/core": "2.4.1", "@docusaurus/module-type-aliases": "2.4.1", @@ -3218,8 +3087,7 @@ }, "node_modules/@docusaurus/theme-search-algolia": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.1.tgz", - "integrity": "sha512-6BcqW2lnLhZCXuMAvPRezFs1DpmEKzXFKlYjruuas+Xy3AQeFzDJKTJFIm49N77WFCTyxff8d3E4Q9pi/+5McQ==", + "license": "MIT", "dependencies": { "@docsearch/react": "^3.1.1", "@docusaurus/core": "2.4.1", @@ -3246,10 +3114,16 @@ "react-dom": "^16.8.4 || ^17.0.0" } }, + "node_modules/@docusaurus/theme-search-algolia/node_modules/clsx": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/@docusaurus/theme-translations": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.1.tgz", - "integrity": "sha512-T1RAGP+f86CA1kfE8ejZ3T3pUU3XcyvrGMfC/zxCtc2BsnoexuNI9Vk2CmuKCb+Tacvhxjv5unhxXce0+NKyvA==", + "license": "MIT", "dependencies": { "fs-extra": "^10.1.0", "tslib": "^2.4.0" @@ -3260,8 +3134,7 @@ }, "node_modules/@docusaurus/types": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz", - "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==", + "license": "MIT", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*", @@ -3279,8 +3152,7 @@ }, "node_modules/@docusaurus/utils": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.1.tgz", - "integrity": "sha512-1lvEZdAQhKNht9aPXPoh69eeKnV0/62ROhQeFKKxmzd0zkcuE/Oc5Gpnt00y/f5bIsmOsYMY7Pqfm/5rteT5GA==", + "license": "MIT", "dependencies": { "@docusaurus/logger": "2.4.1", "@svgr/webpack": "^6.2.1", @@ -3313,8 +3185,7 @@ }, "node_modules/@docusaurus/utils-common": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.1.tgz", - "integrity": "sha512-bCVGdZU+z/qVcIiEQdyx0K13OC5mYwxhSuDUR95oFbKVuXYRrTVrwZIqQljuo1fyJvFTKHiL9L9skQOPokuFNQ==", + "license": "MIT", "dependencies": { "tslib": "^2.4.0" }, @@ -3332,8 +3203,7 @@ }, "node_modules/@docusaurus/utils-validation": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.1.tgz", - "integrity": "sha512-unII3hlJlDwZ3w8U+pMO3Lx3RhI4YEbY3YNsQj4yzrkZzlpqZOLuAiZK2JyULnD+TKbceKU0WyWkQXtYbLNDFA==", + "license": "MIT", "dependencies": { "@docusaurus/logger": "2.4.1", "@docusaurus/utils": "2.4.1", @@ -3347,8 +3217,7 @@ }, "node_modules/@docusaurus/utils/node_modules/escape-string-regexp": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -3358,36 +3227,39 @@ }, "node_modules/@exodus/schemasafe": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@exodus/schemasafe/-/schemasafe-1.3.0.tgz", - "integrity": "sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==" + "license": "MIT" }, "node_modules/@hapi/hoek": { "version": "9.2.1", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.2.1.tgz", - "integrity": "sha512-gfta+H8aziZsm8pZa0vj04KO6biEiisppNgA1kbJvFrrWu9Vm7eaUEy76DIxsuTaWvti5fkJVhllWc6ZTE+Mdw==" + "license": "BSD-3-Clause" }, "node_modules/@hapi/topo": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", - "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "license": "BSD-3-Clause", "dependencies": { "@hapi/hoek": "^9.0.0" } }, "node_modules/@hookform/error-message": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@hookform/error-message/-/error-message-2.0.1.tgz", - "integrity": "sha512-U410sAr92xgxT1idlu9WWOVjndxLdgPUHEB8Schr27C9eh7/xUnITWpCMF93s+lGiG++D4JnbSnrb5A21AdSNg==", + "license": "MIT", "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0", "react-hook-form": "^7.0.0" } }, + "node_modules/@ibm/telemetry-js": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@ibm/telemetry-js/-/telemetry-js-1.8.0.tgz", + "integrity": "sha512-1u/8f5TtDHXWNQe+YfIESesZGX2PmhEfyU0znlyFvATch+xc5fPYjXj2gWKMTmdKsDawqAm/BkJBQjx2CDlZww==", + "bin": { + "ibmtelemetry": "dist/collect.js" + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", @@ -3402,8 +3274,7 @@ }, "node_modules/@isaacs/cliui/node_modules/ansi-regex": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -3413,8 +3284,7 @@ }, "node_modules/@isaacs/cliui/node_modules/ansi-styles": { "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -3424,13 +3294,11 @@ }, "node_modules/@isaacs/cliui/node_modules/emoji-regex": { "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + "license": "MIT" }, "node_modules/@isaacs/cliui/node_modules/string-width": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", @@ -3445,8 +3313,7 @@ }, "node_modules/@isaacs/cliui/node_modules/strip-ansi": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", "dependencies": { "ansi-regex": "^6.0.1" }, @@ -3459,8 +3326,7 @@ }, "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", @@ -3475,9 +3341,8 @@ }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", - "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, + "license": "ISC", "dependencies": { "camelcase": "^5.3.1", "find-up": "^4.1.0", @@ -3491,27 +3356,24 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/camelcase": { "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3522,33 +3384,32 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@istanbuljs/schema": { "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/console": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.6.4.tgz", - "integrity": "sha512-wNK6gC0Ha9QeEPSkeJedQuTQqxZYnDPuDcDhVuVatRvMkL4D0VTvFVZj+Yuh6caG2aOfzkUZ36KtCmLNtR02hw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", "slash": "^3.0.0" }, "engines": { @@ -3557,9 +3418,8 @@ }, "node_modules/@jest/console/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -3572,9 +3432,8 @@ }, "node_modules/@jest/console/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -3588,9 +3447,8 @@ }, "node_modules/@jest/console/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -3600,24 +3458,21 @@ }, "node_modules/@jest/console/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@jest/console/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/console/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -3626,15 +3481,16 @@ } }, "node_modules/@jest/core": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.6.4.tgz", - "integrity": "sha512-U/vq5ccNTSVgYH7mHnodHmCffGWHJnz/E1BEWlLuK5pM4FZmGfBn/nrJGLjUsSmyx3otCeqc1T31F4y08AMDLg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/console": "^29.6.4", - "@jest/reporters": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", "ansi-escapes": "^4.2.1", @@ -3642,21 +3498,21 @@ "ci-info": "^3.2.0", "exit": "^0.1.2", "graceful-fs": "^4.2.9", - "jest-changed-files": "^29.6.3", - "jest-config": "^29.6.4", - "jest-haste-map": "^29.6.4", - "jest-message-util": "^29.6.3", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", "jest-regex-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-resolve-dependencies": "^29.6.4", - "jest-runner": "^29.6.4", - "jest-runtime": "^29.6.4", - "jest-snapshot": "^29.6.4", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", - "jest-watcher": "^29.6.4", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", "micromatch": "^4.0.4", - "pretty-format": "^29.6.3", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "strip-ansi": "^6.0.0" }, @@ -3674,9 +3530,8 @@ }, "node_modules/@jest/core/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -3689,9 +3544,8 @@ }, "node_modules/@jest/core/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -3704,9 +3558,9 @@ } }, "node_modules/@jest/core/node_modules/ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", "dev": true, "funding": [ { @@ -3714,15 +3568,15 @@ "url": "https://github.com/sponsors/sibiraj-s" } ], + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/core/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -3732,24 +3586,21 @@ }, "node_modules/@jest/core/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@jest/core/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/core/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -3758,38 +3609,41 @@ } }, "node_modules/@jest/environment": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.6.4.tgz", - "integrity": "sha512-sQ0SULEjA1XUTHmkBRl7A1dyITM9yb1yb3ZNKPX3KlTd6IG7mWUe3e2yfExtC2Zz1Q+mMckOLHmL/qLiuQJrBQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/fake-timers": "^29.6.4", + "@jest/fake-timers": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", - "jest-mock": "^29.6.3" + "jest-mock": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/expect": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.6.4.tgz", - "integrity": "sha512-Warhsa7d23+3X5bLbrbYvaehcgX5TLYhI03JKoedTiI8uJU4IhqYBWF7OSSgUyz4IgLpUYPkK0AehA5/fRclAA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", "dev": true, + "license": "MIT", "dependencies": { - "expect": "^29.6.4", - "jest-snapshot": "^29.6.4" + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/expect-utils": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.6.4.tgz", - "integrity": "sha512-FEhkJhqtvBwgSpiTrocquJCdXPsyvNKcl/n7A3u7X4pVoF4bswm11c9d4AV+kfq2Gpv/mM8x7E7DsRvH+djkrg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", "dev": true, + "license": "MIT", "dependencies": { "jest-get-type": "^29.6.3" }, @@ -3798,47 +3652,50 @@ } }, "node_modules/@jest/fake-timers": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.6.4.tgz", - "integrity": "sha512-6UkCwzoBK60edXIIWb0/KWkuj7R7Qq91vVInOe3De6DSpaEiqjKcJw4F7XUet24Wupahj9J6PlR09JqJ5ySDHw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@sinonjs/fake-timers": "^10.0.2", "@types/node": "*", - "jest-message-util": "^29.6.3", - "jest-mock": "^29.6.3", - "jest-util": "^29.6.3" + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/globals": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.6.4.tgz", - "integrity": "sha512-wVIn5bdtjlChhXAzVXavcY/3PEjf4VqM174BM3eGL5kMxLiZD5CLnbmkEyA1Dwh9q8XjP6E8RwjBsY/iCWrWsA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/environment": "^29.6.4", - "@jest/expect": "^29.6.4", + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", "@jest/types": "^29.6.3", - "jest-mock": "^29.6.3" + "jest-mock": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/reporters": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.6.4.tgz", - "integrity": "sha512-sxUjWxm7QdchdrD3NfWKrL8FBsortZeibSJv4XLjESOOjSUOkjQcb0ZHJwfhEGIvBvTluTzfG2yZWZhkrXJu8g==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", "dev": true, + "license": "MIT", "dependencies": { "@bcoe/v8-coverage": "^0.2.3", - "@jest/console": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", "@jest/types": "^29.6.3", "@jridgewell/trace-mapping": "^0.3.18", "@types/node": "*", @@ -3852,9 +3709,9 @@ "istanbul-lib-report": "^3.0.0", "istanbul-lib-source-maps": "^4.0.0", "istanbul-reports": "^3.1.3", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3", - "jest-worker": "^29.6.4", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", "slash": "^3.0.0", "string-length": "^4.0.1", "strip-ansi": "^6.0.0", @@ -3874,9 +3731,8 @@ }, "node_modules/@jest/reporters/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -3889,9 +3745,8 @@ }, "node_modules/@jest/reporters/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -3905,9 +3760,8 @@ }, "node_modules/@jest/reporters/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -3917,27 +3771,26 @@ }, "node_modules/@jest/reporters/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@jest/reporters/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/reporters/node_modules/jest-worker": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.4.tgz", - "integrity": "sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*", - "jest-util": "^29.6.3", + "jest-util": "^29.7.0", "merge-stream": "^2.0.0", "supports-color": "^8.0.0" }, @@ -3947,9 +3800,8 @@ }, "node_modules/@jest/reporters/node_modules/jest-worker/node_modules/supports-color": { "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -3962,9 +3814,8 @@ }, "node_modules/@jest/reporters/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -3974,9 +3825,8 @@ }, "node_modules/@jest/schemas": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", - "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", "dev": true, + "license": "MIT", "dependencies": { "@sinclair/typebox": "^0.27.8" }, @@ -3986,9 +3836,8 @@ }, "node_modules/@jest/source-map": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", - "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "^0.3.18", "callsites": "^3.0.0", @@ -3999,12 +3848,13 @@ } }, "node_modules/@jest/test-result": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.6.4.tgz", - "integrity": "sha512-uQ1C0AUEN90/dsyEirgMLlouROgSY+Wc/JanVVk0OiUKa5UFh7sJpMEM3aoUBAz2BRNvUJ8j3d294WFuRxSyOQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/console": "^29.6.4", + "@jest/console": "^29.7.0", "@jest/types": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", "collect-v8-coverage": "^1.0.0" @@ -4014,14 +3864,15 @@ } }, "node_modules/@jest/test-sequencer": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.6.4.tgz", - "integrity": "sha512-E84M6LbpcRq3fT4ckfKs9ryVanwkaIB0Ws9bw3/yP4seRLg/VaCZ/LgW0MCq5wwk4/iP/qnilD41aj2fsw2RMg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/test-result": "^29.6.4", + "@jest/test-result": "^29.7.0", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", + "jest-haste-map": "^29.7.0", "slash": "^3.0.0" }, "engines": { @@ -4029,10 +3880,11 @@ } }, "node_modules/@jest/transform": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.6.4.tgz", - "integrity": "sha512-8thgRSiXUqtr/pPGY/OsyHuMjGyhVnWrFAwoxmIemlBuiMyU1WFs0tXoNxzcr4A4uErs/ABre76SGmrr5ab/AA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@jest/types": "^29.6.3", @@ -4042,9 +3894,9 @@ "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", + "jest-haste-map": "^29.7.0", "jest-regex-util": "^29.6.3", - "jest-util": "^29.6.3", + "jest-util": "^29.7.0", "micromatch": "^4.0.4", "pirates": "^4.0.4", "slash": "^3.0.0", @@ -4056,9 +3908,8 @@ }, "node_modules/@jest/transform/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -4071,9 +3922,8 @@ }, "node_modules/@jest/transform/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -4087,9 +3937,8 @@ }, "node_modules/@jest/transform/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -4099,30 +3948,26 @@ }, "node_modules/@jest/transform/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@jest/transform/node_modules/convert-source-map": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@jest/transform/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/transform/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -4132,9 +3977,8 @@ }, "node_modules/@jest/transform/node_modules/write-file-atomic": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", - "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", "dev": true, + "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^3.0.7" @@ -4145,9 +3989,8 @@ }, "node_modules/@jest/types": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", - "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", @@ -4162,9 +4005,8 @@ }, "node_modules/@jest/types/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -4177,9 +4019,8 @@ }, "node_modules/@jest/types/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -4193,9 +4034,8 @@ }, "node_modules/@jest/types/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -4205,24 +4045,21 @@ }, "node_modules/@jest/types/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@jest/types/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/@jest/types/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -4231,13 +4068,13 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", - "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -4245,29 +4082,27 @@ }, "node_modules/@jridgewell/resolve-uri": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", - "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "license": "MIT", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", - "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -4275,31 +4110,28 @@ }, "node_modules/@jsdevtools/ono": { "version": "7.1.3", - "resolved": "https://registry.npmjs.org/@jsdevtools/ono/-/ono-7.1.3.tgz", - "integrity": "sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==" + "license": "MIT" }, "node_modules/@leichtgewicht/ip-codec": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", - "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==" + "license": "MIT" }, "node_modules/@lezer/common": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", - "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz", + "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==" }, "node_modules/@lezer/highlight": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", - "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz", + "integrity": "sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/json": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@lezer/json/-/json-1.0.2.tgz", - "integrity": "sha512-xHT2P4S5eeCYECyKNPhr4cbEL9tc8w83SPwRC373o9uEdrvGKTZoJVAGxpOsZckMlEh9W23Pc72ew918RWQOBQ==", + "license": "MIT", "dependencies": { "@lezer/common": "^1.2.0", "@lezer/highlight": "^1.0.0", @@ -4307,17 +4139,17 @@ } }, "node_modules/@lezer/lr": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.0.tgz", - "integrity": "sha512-Wst46p51km8gH0ZUmeNrtpRYmdlRHUpN1DQd3GFAyKANi8WVz8c2jHYTf1CVScFaCjQw1iO3ZZdqGDxQPRErTg==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz", + "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/markdown": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.2.0.tgz", - "integrity": "sha512-d7MwsfAukZJo1GpPrcPGa3MxaFFOqNp0gbqF+3F7pTeNDOgeJN1muXzx1XXDPt+Ac+/voCzsH7qXqnn+xReG/g==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.3.2.tgz", + "integrity": "sha512-Wu7B6VnrKTbBEohqa63h5vxXjiC4pO5ZQJ/TDbhJxPQaaIoRD/6UVDhSDtVsCwVZV12vvN9KxuLL3ATMnlG0oQ==", "dependencies": { "@lezer/common": "^1.0.0", "@lezer/highlight": "^1.0.0" @@ -4325,8 +4157,7 @@ }, "node_modules/@mdx-js/mdx": { "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz", - "integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==", + "license": "MIT", "dependencies": { "@babel/core": "7.12.9", "@babel/plugin-syntax-jsx": "7.12.1", @@ -4355,8 +4186,7 @@ }, "node_modules/@mdx-js/mdx/node_modules/@babel/core": { "version": "7.12.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", - "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/generator": "^7.12.5", @@ -4385,8 +4215,7 @@ }, "node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": { "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", - "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -4396,16 +4225,14 @@ }, "node_modules/@mdx-js/mdx/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/@mdx-js/mdx/node_modules/unified": { "version": "9.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", - "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "license": "MIT", "dependencies": { "bail": "^1.0.0", "extend": "^3.0.0", @@ -4421,8 +4248,7 @@ }, "node_modules/@mdx-js/mdx/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -4435,8 +4261,7 @@ }, "node_modules/@mdx-js/mdx/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -4448,8 +4273,7 @@ }, "node_modules/@mdx-js/react": { "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-1.6.22.tgz", - "integrity": "sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -4460,8 +4284,7 @@ }, "node_modules/@mdx-js/util": { "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", - "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -4469,8 +4292,7 @@ }, "node_modules/@mrmlnc/readdir-enhanced": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", + "license": "MIT", "dependencies": { "call-me-maybe": "^1.0.1", "glob-to-regexp": "^0.3.0" @@ -4481,13 +4303,11 @@ }, "node_modules/@mrmlnc/readdir-enhanced/node_modules/glob-to-regexp": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha512-Iozmtbqv0noj0uDDqoL0zNq0VBEfK2YFoMAZoxJe4cwphvLR+JskfF30QhXHOR4m3KrE6NLRYw+U9MRXvifyig==" + "license": "BSD" }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" @@ -4498,16 +4318,14 @@ }, "node_modules/@nodelib/fs.stat": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.walk": { "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" @@ -4518,8 +4336,7 @@ }, "node_modules/@paloaltonetworks/openapi-to-postmanv2": { "version": "3.1.0-hotfix.1", - "resolved": "https://registry.npmjs.org/@paloaltonetworks/openapi-to-postmanv2/-/openapi-to-postmanv2-3.1.0-hotfix.1.tgz", - "integrity": "sha512-0bdaPCEyQbnUo4xpOu7EzxXXkDx4BAXqc8QSbVBlzlVB5KoTLJiKKB4c3fa4BXbK+3u/OqfLbeNCebc2EC8ngA==", + "license": "Apache-2.0", "dependencies": { "@paloaltonetworks/postman-collection": "^4.1.0", "ajv": "8.1.0", @@ -4542,8 +4359,7 @@ }, "node_modules/@paloaltonetworks/openapi-to-postmanv2/node_modules/ajv": { "version": "8.1.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.1.0.tgz", - "integrity": "sha512-B/Sk2Ix7A36fs/ZkuGLIR86EdjbgR6fsAcbx9lOP/QBSXujDNbVmIS/U4Itz5k8fPFDeVZl/zQ/gJW4Jrq6XjQ==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -4557,26 +4373,22 @@ }, "node_modules/@paloaltonetworks/openapi-to-postmanv2/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/@paloaltonetworks/openapi-to-postmanv2/node_modules/async": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.1.tgz", - "integrity": "sha512-XdD5lRO/87udXCMC9meWdYiR+Nq6ZjUfXidViUZGu2F1MO4T3XwZ1et0hb2++BgLfhyJwy44BGB/yx80ABx8hg==" + "license": "MIT" }, "node_modules/@paloaltonetworks/openapi-to-postmanv2/node_modules/commander": { "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + "license": "MIT" }, "node_modules/@paloaltonetworks/openapi-to-postmanv2/node_modules/js-yaml": { "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -4587,14 +4399,12 @@ }, "node_modules/@paloaltonetworks/openapi-to-postmanv2/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/@paloaltonetworks/postman-code-generators": { "version": "1.1.15-patch.2", - "resolved": "https://registry.npmjs.org/@paloaltonetworks/postman-code-generators/-/postman-code-generators-1.1.15-patch.2.tgz", - "integrity": "sha512-tRnAKtV4M8wLxcVnAx6ZCjCqbrR1xiqJNQkf1A71K8UxEP3N/+EspT82N5c0555w02oYFk21ViHuzuhm4gaGLw==", "hasInstallScript": true, + "license": "Apache-2.0", "dependencies": { "@paloaltonetworks/postman-collection": "^4.1.0", "async": "^3.2.4", @@ -4607,13 +4417,11 @@ }, "node_modules/@paloaltonetworks/postman-code-generators/node_modules/async": { "version": "3.2.5", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", - "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" + "license": "MIT" }, "node_modules/@paloaltonetworks/postman-collection": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@paloaltonetworks/postman-collection/-/postman-collection-4.1.1.tgz", - "integrity": "sha512-9JHHkkD8Xb4rvdKob7TDPRfqfmdG3KU0aO5gJyyjvMFbOVysam5I0d8/9HPOuJXWkUHGo3Sn+ov2Fcm2bnJ52Q==", + "license": "Apache-2.0", "dependencies": { "file-type": "3.9.0", "http-reasons": "0.1.0", @@ -4632,16 +4440,14 @@ }, "node_modules/@paloaltonetworks/postman-collection/node_modules/file-type": { "version": "3.9.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", - "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/@paloaltonetworks/postman-collection/node_modules/iconv-lite": { "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" }, @@ -4651,8 +4457,7 @@ }, "node_modules/@paloaltonetworks/postman-collection/node_modules/semver": { "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", + "license": "ISC", "dependencies": { "lru-cache": "^6.0.0" }, @@ -4665,41 +4470,33 @@ }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", "optional": true, "engines": { "node": ">=14" } }, "node_modules/@playwright/test": { - "version": "1.32.2", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.32.2.tgz", - "integrity": "sha512-nhaTSDpEdTTttdkDE8Z6K3icuG1DVRxrl98Qq0Lfc63SS9a2sjc9+x8ezysh7MzCKz6Y+nArml3/mmt+gqRmQQ==", + "version": "1.49.0", "dev": true, + "license": "Apache-2.0", "dependencies": { - "@types/node": "*", - "playwright-core": "1.32.2" + "playwright": "1.49.0" }, "bin": { "playwright": "cli.js" }, "engines": { - "node": ">=14" - }, - "optionalDependencies": { - "fsevents": "2.3.2" + "node": ">=18" } }, "node_modules/@polka/url": { "version": "1.0.0-next.21", - "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz", - "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==" + "license": "MIT" }, "node_modules/@redocly/ajv": { "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@redocly/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-9GWx27t7xWhDIR02PA18nzBdLcKQRgc46xNQvjFkrYk4UOmvKhJ/dawwiX0cCOeetN5LcaaiqQbVOWYK62SGHw==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -4713,13 +4510,11 @@ }, "node_modules/@redocly/ajv/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/@redocly/openapi-core": { "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.7.0.tgz", - "integrity": "sha512-mDl9tq96WjMElX4RX+oyqfTiquBNXzFRWres/JN6AlWhBbhFOz2nXnCCIILcjZkRchKFDKShU+pqHpvPJ7xVDQ==", + "license": "MIT", "dependencies": { "@redocly/ajv": "^8.11.0", "colorette": "^1.2.0", @@ -4738,21 +4533,18 @@ }, "node_modules/@redocly/openapi-core/node_modules/brace-expansion": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } }, "node_modules/@redocly/openapi-core/node_modules/colorette": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.4.0.tgz", - "integrity": "sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g==" + "license": "MIT" }, "node_modules/@redocly/openapi-core/node_modules/minimatch": { "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "license": "ISC", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -4762,8 +4554,7 @@ }, "node_modules/@reduxjs/toolkit": { "version": "1.9.7", - "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-1.9.7.tgz", - "integrity": "sha512-t7v8ZPxhhKgOKtU+uyJT13lu4vL7az5aFi4IdoDs/eS548edn2M8Ik9h8fxgvMjGoAUVFSt6ZC1P5cWmQ014QQ==", + "license": "MIT", "dependencies": { "immer": "^9.0.21", "redux": "^4.2.1", @@ -4783,65 +4574,66 @@ } } }, + "node_modules/@rrweb/types": { + "version": "2.0.0-alpha.17", + "resolved": "https://registry.npmjs.org/@rrweb/types/-/types-2.0.0-alpha.17.tgz", + "integrity": "sha512-AfDTVUuCyCaIG0lTSqYtrZqJX39ZEYzs4fYKnexhQ+id+kbZIpIJtaut5cto6dWZbB3SEe4fW0o90Po3LvTmfg==", + "dependencies": { + "rrweb-snapshot": "^2.0.0-alpha.17" + } + }, "node_modules/@saucelabs/theme-github-codeblock": { "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@saucelabs/theme-github-codeblock/-/theme-github-codeblock-0.2.3.tgz", - "integrity": "sha512-GSl3Lr/jOWm4OP3BPX2vXxc8FMSOXj1mJnls6cUqMwlGOfKQ1Ia9pq1O9/ES+5TrZHIzAws/n5FFSn1OkGJw/Q==" + "license": "MIT" }, "node_modules/@sideway/address": { "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", - "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "license": "BSD-3-Clause", "dependencies": { "@hapi/hoek": "^9.0.0" } }, "node_modules/@sideway/formula": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.0.tgz", - "integrity": "sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg==" + "license": "BSD-3-Clause" }, "node_modules/@sideway/pinpoint": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", - "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + "license": "BSD-3-Clause" }, "node_modules/@sinclair/typebox": { "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@sindresorhus/is": { "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/@sinonjs/commons": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", - "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "type-detect": "4.0.8" } }, "node_modules/@sinonjs/fake-timers": { "version": "10.3.0", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", - "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.0" } }, "node_modules/@slorber/static-site-generator-webpack-plugin": { "version": "4.0.7", - "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", - "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==", + "license": "MIT", "dependencies": { "eval": "^0.1.8", "p-map": "^4.0.0", @@ -4853,8 +4645,7 @@ }, "node_modules/@stencil/core": { "version": "2.22.3", - "resolved": "https://registry.npmjs.org/@stencil/core/-/core-2.22.3.tgz", - "integrity": "sha512-kmVA0M/HojwsfkeHsifvHVIYe4l5tin7J5+DLgtl8h6WWfiMClND5K3ifCXXI2ETDNKiEk21p6jql3Fx9o2rng==", + "license": "MIT", "bin": { "stencil": "bin/stencil" }, @@ -4865,8 +4656,7 @@ }, "node_modules/@svgr/babel-plugin-add-jsx-attribute": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", - "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4880,8 +4670,7 @@ }, "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-6.5.0.tgz", - "integrity": "sha512-8zYdkym7qNyfXpWvu4yq46k41pyNM9SOstoWhKlm+IfdCE1DdnRKeMUPsWIEO/DEkaWxJ8T9esNdG3QwQ93jBA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4895,8 +4684,7 @@ }, "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-6.5.0.tgz", - "integrity": "sha512-NFdxMq3xA42Kb1UbzCVxplUc0iqSyM9X8kopImvFnB+uSDdzIHOdbs1op8ofAvVRtbg4oZiyRl3fTYeKcOe9Iw==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4910,8 +4698,7 @@ }, "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", - "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4925,8 +4712,7 @@ }, "node_modules/@svgr/babel-plugin-svg-dynamic-title": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", - "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4940,8 +4726,7 @@ }, "node_modules/@svgr/babel-plugin-svg-em-dimensions": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", - "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4955,8 +4740,7 @@ }, "node_modules/@svgr/babel-plugin-transform-react-native-svg": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", - "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -4970,8 +4754,7 @@ }, "node_modules/@svgr/babel-plugin-transform-svg-component": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", - "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -4985,8 +4768,7 @@ }, "node_modules/@svgr/babel-preset": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", - "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "license": "MIT", "dependencies": { "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", "@svgr/babel-plugin-remove-jsx-attribute": "*", @@ -5010,8 +4792,7 @@ }, "node_modules/@svgr/core": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", - "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "license": "MIT", "dependencies": { "@babel/core": "^7.19.6", "@svgr/babel-preset": "^6.5.1", @@ -5027,47 +4808,9 @@ "url": "https://github.com/sponsors/gregberge" } }, - "node_modules/@svgr/core/node_modules/@babel/core": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.20.2.tgz", - "integrity": "sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==", - "dependencies": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.2", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-module-transforms": "^7.20.2", - "@babel/helpers": "^7.20.1", - "@babel/parser": "^7.20.2", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@svgr/core/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/@svgr/hast-util-to-babel-ast": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", - "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "license": "MIT", "dependencies": { "@babel/types": "^7.20.0", "entities": "^4.4.0" @@ -5082,8 +4825,7 @@ }, "node_modules/@svgr/hast-util-to-babel-ast/node_modules/entities": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "license": "BSD-2-Clause", "engines": { "node": ">=0.12" }, @@ -5093,8 +4835,7 @@ }, "node_modules/@svgr/plugin-jsx": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", - "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "license": "MIT", "dependencies": { "@babel/core": "^7.19.6", "@svgr/babel-preset": "^6.5.1", @@ -5112,47 +4853,9 @@ "@svgr/core": "^6.0.0" } }, - "node_modules/@svgr/plugin-jsx/node_modules/@babel/core": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.20.2.tgz", - "integrity": "sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==", - "dependencies": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.2", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-module-transforms": "^7.20.2", - "@babel/helpers": "^7.20.1", - "@babel/parser": "^7.20.2", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@svgr/plugin-jsx/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/@svgr/plugin-svgo": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", - "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", + "license": "MIT", "dependencies": { "cosmiconfig": "^7.0.1", "deepmerge": "^4.2.2", @@ -5171,8 +4874,7 @@ }, "node_modules/@svgr/webpack": { "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", - "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", + "license": "MIT", "dependencies": { "@babel/core": "^7.19.6", "@babel/plugin-transform-react-constant-elements": "^7.18.12", @@ -5191,49 +4893,16 @@ "url": "https://github.com/sponsors/gregberge" } }, - "node_modules/@svgr/webpack/node_modules/@babel/core": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.20.2.tgz", - "integrity": "sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==", - "dependencies": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.2", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-module-transforms": "^7.20.2", - "@babel/helpers": "^7.20.1", - "@babel/parser": "^7.20.2", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@svgr/webpack/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/@swc/core": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.49.tgz", - "integrity": "sha512-br44ZHOfE9YyRGcORSLkHFQHTvhwRcaithBJ1Q5y5iMGpLbH0Wai3GN49L60RvmGwxNJfWzT+E7+rNNR7ewKgA==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.9.3.tgz", + "integrity": "sha512-oRj0AFePUhtatX+BscVhnzaAmWjpfAeySpM1TCbxA1rtBDeH/JDhi5yYzAKneDYtVtBvA7ApfeuzhMC9ye4xSg==", "dev": true, "hasInstallScript": true, + "dependencies": { + "@swc/counter": "^0.1.3", + "@swc/types": "^0.1.17" + }, "engines": { "node": ">=10" }, @@ -5242,19 +4911,19 @@ "url": "https://opencollective.com/swc" }, "optionalDependencies": { - "@swc/core-darwin-arm64": "1.3.49", - "@swc/core-darwin-x64": "1.3.49", - "@swc/core-linux-arm-gnueabihf": "1.3.49", - "@swc/core-linux-arm64-gnu": "1.3.49", - "@swc/core-linux-arm64-musl": "1.3.49", - "@swc/core-linux-x64-gnu": "1.3.49", - "@swc/core-linux-x64-musl": "1.3.49", - "@swc/core-win32-arm64-msvc": "1.3.49", - "@swc/core-win32-ia32-msvc": "1.3.49", - "@swc/core-win32-x64-msvc": "1.3.49" + "@swc/core-darwin-arm64": "1.9.3", + "@swc/core-darwin-x64": "1.9.3", + "@swc/core-linux-arm-gnueabihf": "1.9.3", + "@swc/core-linux-arm64-gnu": "1.9.3", + "@swc/core-linux-arm64-musl": "1.9.3", + "@swc/core-linux-x64-gnu": "1.9.3", + "@swc/core-linux-x64-musl": "1.9.3", + "@swc/core-win32-arm64-msvc": "1.9.3", + "@swc/core-win32-ia32-msvc": "1.9.3", + "@swc/core-win32-x64-msvc": "1.9.3" }, "peerDependencies": { - "@swc/helpers": "^0.5.0" + "@swc/helpers": "*" }, "peerDependenciesMeta": { "@swc/helpers": { @@ -5263,9 +4932,9 @@ } }, "node_modules/@swc/core-darwin-arm64": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.49.tgz", - "integrity": "sha512-g7aIfXh6uPHmhLXdjXQq5t3HAyS/EdvujasW1DIS5k8UqOBaSoCcSGtLIjzcLv3KujqNfYcm118E+12H0nY6fQ==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.9.3.tgz", + "integrity": "sha512-hGfl/KTic/QY4tB9DkTbNuxy5cV4IeejpPD4zo+Lzt4iLlDWIeANL4Fkg67FiVceNJboqg48CUX+APhDHO5G1w==", "cpu": [ "arm64" ], @@ -5279,9 +4948,9 @@ } }, "node_modules/@swc/core-darwin-x64": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.3.49.tgz", - "integrity": "sha512-eSIxVX0YDw40Bre5sAx2BV3DzdIGzmQvCf2yiBvLqiiL6GC0mmuDeWbUCAzdUX6fJ6FUVEBMUVqNOc9oJ2/d5w==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.9.3.tgz", + "integrity": "sha512-IaRq05ZLdtgF5h9CzlcgaNHyg4VXuiStnOFpfNEMuI5fm5afP2S0FHq8WdakUz5WppsbddTdplL+vpeApt/WCQ==", "cpu": [ "x64" ], @@ -5295,9 +4964,9 @@ } }, "node_modules/@swc/core-linux-arm-gnueabihf": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.49.tgz", - "integrity": "sha512-8mj3IcRVr/OJY0mVITz6Z5osNAMJK5GiKDaZ+3QejPLbl6aiu4sH4GmTHDRN14RnaVXOpecsGcUoQmNoNa3u3w==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.9.3.tgz", + "integrity": "sha512-Pbwe7xYprj/nEnZrNBvZfjnTxlBIcfApAGdz2EROhjpPj+FBqBa3wOogqbsuGGBdCphf8S+KPprL1z+oDWkmSQ==", "cpu": [ "arm" ], @@ -5311,9 +4980,9 @@ } }, "node_modules/@swc/core-linux-arm64-gnu": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.49.tgz", - "integrity": "sha512-Rmg9xw6tmpOpf6GKKjpHQGmjfHzqSths5ebI2ahrHlhekzZF2HYmPkVw4bHda8Bja6mbaw8FVBgBHjPU8mMeDA==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.9.3.tgz", + "integrity": "sha512-AQ5JZiwNGVV/2K2TVulg0mw/3LYfqpjZO6jDPtR2evNbk9Yt57YsVzS+3vHSlUBQDRV9/jqMuZYVU3P13xrk+g==", "cpu": [ "arm64" ], @@ -5327,9 +4996,9 @@ } }, "node_modules/@swc/core-linux-arm64-musl": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.49.tgz", - "integrity": "sha512-nlKPYMogAI3Aak6Mlkag8/2AlHAZ/DpH7RjhfMazsaGhD/sQOmYdyY9Al69ejpa419YJuREeeeLoojFlSsd30g==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.9.3.tgz", + "integrity": "sha512-tzVH480RY6RbMl/QRgh5HK3zn1ZTFsThuxDGo6Iuk1MdwIbdFYUY034heWUTI4u3Db97ArKh0hNL0xhO3+PZdg==", "cpu": [ "arm64" ], @@ -5343,13 +5012,14 @@ } }, "node_modules/@swc/core-linux-x64-gnu": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.49.tgz", - "integrity": "sha512-QOyeJQ6NVi73SJcizbwvIZTiGA/N+BxX9liRrvibumaQmRh8fWjJiLNsv3ODSHeuonak7E8Bf7a7NnSTyu48Mw==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.9.3.tgz", + "integrity": "sha512-ivXXBRDXDc9k4cdv10R21ccBmGebVOwKXT/UdH1PhxUn9m/h8erAWjz5pcELwjiMf27WokqPgaWVfaclDbgE+w==", "cpu": [ "x64" ], "dev": true, + "license": "Apache-2.0 AND MIT", "optional": true, "os": [ "linux" @@ -5359,13 +5029,14 @@ } }, "node_modules/@swc/core-linux-x64-musl": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.49.tgz", - "integrity": "sha512-WlDMz+SOpYC9O/ZBUw1oiyWI7HyUCMlf/HS8Fy/kRI3eGoGCUxVTCJ1mP57GdQr4Wg32Y/ZpO2KSNQFWnT8mAw==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.9.3.tgz", + "integrity": "sha512-ILsGMgfnOz1HwdDz+ZgEuomIwkP1PHT6maigZxaCIuC6OPEhKE8uYna22uU63XvYcLQvZYDzpR3ms47WQPuNEg==", "cpu": [ "x64" ], "dev": true, + "license": "Apache-2.0 AND MIT", "optional": true, "os": [ "linux" @@ -5375,9 +5046,9 @@ } }, "node_modules/@swc/core-win32-arm64-msvc": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.49.tgz", - "integrity": "sha512-41LZOeI94Za3twib8KOIjnHYAZ+nkBFmboaREsFR1760S7jiMVywqWX8nFZvn/CXj15Fjjgdgyuig+zMREwXwQ==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.9.3.tgz", + "integrity": "sha512-e+XmltDVIHieUnNJHtspn6B+PCcFOMYXNJB1GqoCcyinkEIQNwC8KtWgMqUucUbEWJkPc35NHy9k8aCXRmw9Kg==", "cpu": [ "arm64" ], @@ -5391,9 +5062,9 @@ } }, "node_modules/@swc/core-win32-ia32-msvc": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.49.tgz", - "integrity": "sha512-IdqLPoMKssyAoOCZdNXmnAd6/uyx+Hb9KSfZUHepZaNfwMy6J5XXrOsbYs3v53FH8MtekUUdV+mMX4me9bcv9w==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.9.3.tgz", + "integrity": "sha512-rqpzNfpAooSL4UfQnHhkW8aL+oyjqJniDP0qwZfGnjDoJSbtPysHg2LpcOBEdSnEH+uIZq6J96qf0ZFD8AGfXA==", "cpu": [ "ia32" ], @@ -5407,9 +5078,9 @@ } }, "node_modules/@swc/core-win32-x64-msvc": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.49.tgz", - "integrity": "sha512-7Fqjo5pS3uIohhSbYSaR0+e/bJdxmQb4oG97FIh5qvlCCGQaQ9UiaEeYy4uK0Ad+Menum1IXCAEiG7RHcl6Eyw==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.9.3.tgz", + "integrity": "sha512-3YJJLQ5suIEHEKc1GHtqVq475guiyqisKSoUnoaRtxkDaW5g1yvPt9IoSLOe2mRs7+FFhGGU693RsBUSwOXSdQ==", "cpu": [ "x64" ], @@ -5422,10 +5093,24 @@ "node": ">=10" } }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "dev": true + }, + "node_modules/@swc/types": { + "version": "0.1.17", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.17.tgz", + "integrity": "sha512-V5gRru+aD8YVyCOMAjMpWR1Ui577DD5KSJsHP8RAxopAH22jFz6GZd/qxqjO6MJHQhcsjvjOFXyDhyLQUnMveQ==", + "dev": true, + "dependencies": { + "@swc/counter": "^0.1.3" + } + }, "node_modules/@szmarczak/http-timer": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", + "license": "MIT", "dependencies": { "defer-to-connect": "^1.0.1" }, @@ -5435,17 +5120,17 @@ }, "node_modules/@trysound/sax": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "license": "ISC", "engines": { "node": ">=10.13.0" } }, "node_modules/@types/babel__core": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.1.tgz", - "integrity": "sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==", + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", "dev": true, + "license": "MIT", "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", @@ -5455,37 +5140,39 @@ } }, "node_modules/@types/babel__generator": { - "version": "7.6.4", - "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz", - "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==", + "version": "7.6.8", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz", + "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__template": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz", - "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==", + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", "dev": true, + "license": "MIT", "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__traverse": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.1.tgz", - "integrity": "sha512-MitHFXnhtgwsGZWtT68URpOvLN4EREih1u3QtQiN4VdAxWKRVvGCSvw/Qth0M0Qq3pJpnGOu5JaM/ydK7OGbqg==", + "version": "7.20.6", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.6.tgz", + "integrity": "sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/types": "^7.20.7" } }, "node_modules/@types/body-parser": { "version": "1.19.2", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", - "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "license": "MIT", "dependencies": { "@types/connect": "*", "@types/node": "*" @@ -5493,49 +5180,48 @@ }, "node_modules/@types/bonjour": { "version": "3.5.10", - "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", - "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/cheerio": { "version": "0.22.31", - "resolved": "https://registry.npmjs.org/@types/cheerio/-/cheerio-0.22.31.tgz", - "integrity": "sha512-Kt7Cdjjdi2XWSfrZ53v4Of0wG3ZcmaegFXjMmz9tfNrZSkzzo36G0AL1YqSdcIA78Etjt6E609pt5h1xnQkPUw==", + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/connect": { "version": "3.4.35", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", - "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/connect-history-api-fallback": { "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz", - "integrity": "sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==", + "license": "MIT", "dependencies": { "@types/express-serve-static-core": "*", "@types/node": "*" } }, + "node_modules/@types/css-font-loading-module": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/@types/css-font-loading-module/-/css-font-loading-module-0.0.7.tgz", + "integrity": "sha512-nl09VhutdjINdWyXxHWN/w9zlNCfr60JUqJbd24YXUuCwgeL0TpFSdElCwb6cxfB6ybE19Gjj4g0jsgkXxKv1Q==" + }, "node_modules/@types/debug": { "version": "4.1.12", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", - "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", "dependencies": { "@types/ms": "*" } }, "node_modules/@types/eslint": { "version": "8.4.1", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.1.tgz", - "integrity": "sha512-GE44+DNEyxxh2Kc6ro/VkIj+9ma0pO0bwv9+uHSyBrikYOHr8zYcdPvnBOp1aw8s+CjRvuSx7CyWqRrNFQ59mA==", + "license": "MIT", "dependencies": { "@types/estree": "*", "@types/json-schema": "*" @@ -5543,8 +5229,7 @@ }, "node_modules/@types/eslint-scope": { "version": "3.7.3", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.3.tgz", - "integrity": "sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g==", + "license": "MIT", "dependencies": { "@types/eslint": "*", "@types/estree": "*" @@ -5552,13 +5237,11 @@ }, "node_modules/@types/estree": { "version": "0.0.51", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.51.tgz", - "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==" + "license": "MIT" }, "node_modules/@types/express": { "version": "4.17.13", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz", - "integrity": "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==", + "license": "MIT", "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^4.17.18", @@ -5568,8 +5251,7 @@ }, "node_modules/@types/express-serve-static-core": { "version": "4.17.30", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.30.tgz", - "integrity": "sha512-gstzbTWro2/nFed1WXtf+TtrpwxH7Ggs4RLYTLbeVgIkUQOI3WG/JKjgeOU1zXDvezllupjrf8OPIdvTbIaVOQ==", + "license": "MIT", "dependencies": { "@types/node": "*", "@types/qs": "*", @@ -5577,31 +5259,29 @@ } }, "node_modules/@types/graceful-fs": { - "version": "4.1.6", - "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz", - "integrity": "sha512-Sig0SNORX9fdW+bQuTEovKj3uHcUL6LQKbCrrqb1X7J6/ReAbhCXRAhc+SMejhLELFj2QcyuxmUooZ4bt5ReSw==", + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/hast": { "version": "2.3.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.4.tgz", - "integrity": "sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==", + "license": "MIT", "dependencies": { "@types/unist": "*" } }, "node_modules/@types/history": { "version": "4.7.11", - "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", - "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + "license": "MIT" }, "node_modules/@types/hoist-non-react-statics": { "version": "3.3.5", - "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.5.tgz", - "integrity": "sha512-SbcrWzkKBw2cdwRTwQAswfpB9g9LJWfjtUeW/jvNwbhC8cpmmNYVePa+ncbUe0rGTQ7G3Ff6mYUN2VMfLVr+Sg==", + "license": "MIT", "dependencies": { "@types/react": "*", "hoist-non-react-statics": "^3.3.0" @@ -5609,46 +5289,42 @@ }, "node_modules/@types/html-minifier-terser": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + "license": "MIT" }, "node_modules/@types/http-proxy": { "version": "1.17.9", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.9.tgz", - "integrity": "sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw==", + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/istanbul-lib-coverage": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", - "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/istanbul-lib-report": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", "dev": true, + "license": "MIT", "dependencies": { "@types/istanbul-lib-coverage": "*" } }, "node_modules/@types/istanbul-reports": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", - "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", "dev": true, + "license": "MIT", "dependencies": { "@types/istanbul-lib-report": "*" } }, "node_modules/@types/jest": { - "version": "29.5.4", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.4.tgz", - "integrity": "sha512-PhglGmhWeD46FYOVLt3X7TiWjzwuVGW9wG/4qocPevXMjCmrIc5b6db9WjeGE4QYVpUAWMDv3v0IiBwObY289A==", + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", "dev": true, + "license": "MIT", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" @@ -5656,79 +5332,65 @@ }, "node_modules/@types/json-schema": { "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + "license": "MIT" }, "node_modules/@types/lodash": { "version": "4.14.202", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.202.tgz", - "integrity": "sha512-OvlIYQK9tNneDlS0VN54LLd5uiPCBOp7gS5Z0f1mjoJYBrtStzgmJBxONW3U6OZqdtNzZPmn9BS/7WI7BFFcFQ==" + "license": "MIT" }, "node_modules/@types/lodash.clonedeep": { "version": "4.5.9", - "resolved": "https://registry.npmjs.org/@types/lodash.clonedeep/-/lodash.clonedeep-4.5.9.tgz", - "integrity": "sha512-19429mWC+FyaAhOLzsS8kZUsI+/GmBAQ0HFiCPsKGU+7pBXOQWhyrY6xNNDwUSX8SMZMJvuFVMF9O5dQOlQK9Q==", + "license": "MIT", "dependencies": { "@types/lodash": "*" } }, "node_modules/@types/mdast": { "version": "3.0.10", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz", - "integrity": "sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==", + "license": "MIT", "dependencies": { "@types/unist": "*" } }, "node_modules/@types/mime": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", - "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" + "license": "MIT" }, "node_modules/@types/ms": { "version": "0.7.34", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", - "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + "license": "MIT" }, "node_modules/@types/node": { "version": "17.0.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.13.tgz", - "integrity": "sha512-Y86MAxASe25hNzlDbsviXl8jQHb0RDvKt4c40ZJQ1Don0AAL0STLZSs4N+6gLEO55pedy7r2cLwS+ZDxPm/2Bw==" + "license": "MIT" }, "node_modules/@types/parse-json": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" + "license": "MIT" }, "node_modules/@types/parse5": { "version": "5.0.3", - "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz", - "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==" + "license": "MIT" }, "node_modules/@types/prop-types": { "version": "15.7.4", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.4.tgz", - "integrity": "sha512-rZ5drC/jWjrArrS8BR6SIr4cWpW09RNTYt9AMZo3Jwwif+iacXAqgVjm0B0Bv/S1jhDXKHqRVNCbACkJ89RAnQ==" + "license": "MIT" }, "node_modules/@types/q": { "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.5.tgz", - "integrity": "sha512-L28j2FcJfSZOnL1WBjDYp2vUHCeIFlyYI/53EwD/rKUBQ7MtUUfbQWiyKJGpcnv4/WgrhWsFKrcPstcAt/J0tQ==" + "license": "MIT" }, "node_modules/@types/qs": { "version": "6.9.7", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", - "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" + "license": "MIT" }, "node_modules/@types/range-parser": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", - "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" + "license": "MIT" }, "node_modules/@types/react": { "version": "17.0.38", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.38.tgz", - "integrity": "sha512-SI92X1IA+FMnP3qM5m4QReluXzhcmovhZnLNm3pyeQlooi02qI7sLiepEYqT678uNiyc25XfCqxREFpy3W7YhQ==", + "license": "MIT", "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", @@ -5737,8 +5399,7 @@ }, "node_modules/@types/react-redux": { "version": "7.1.33", - "resolved": "https://registry.npmjs.org/@types/react-redux/-/react-redux-7.1.33.tgz", - "integrity": "sha512-NF8m5AjWCkert+fosDsN3hAlHzpjSiXlVy9EgQEmLoBhaNXbmyeGs/aj5dQzKuF+/q+S7JQagorGDW8pJ28Hmg==", + "license": "MIT", "dependencies": { "@types/hoist-non-react-statics": "^3.3.0", "@types/react": "*", @@ -5748,8 +5409,7 @@ }, "node_modules/@types/react-router": { "version": "5.1.20", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", - "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "license": "MIT", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*" @@ -5757,8 +5417,7 @@ }, "node_modules/@types/react-router-config": { "version": "5.0.7", - "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.7.tgz", - "integrity": "sha512-pFFVXUIydHlcJP6wJm7sDii5mD/bCmmAY0wQzq+M+uX7bqS95AQqHZWP1iNMKrWVQSuHIzj5qi9BvrtLX2/T4w==", + "license": "MIT", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*", @@ -5767,8 +5426,7 @@ }, "node_modules/@types/react-router-dom": { "version": "5.3.3", - "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "license": "MIT", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*", @@ -5777,34 +5435,29 @@ }, "node_modules/@types/retry": { "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + "license": "MIT" }, "node_modules/@types/sax": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw==", + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/scheduler": { "version": "0.16.2", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz", - "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==" + "license": "MIT" }, "node_modules/@types/serve-index": { "version": "1.9.1", - "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", + "license": "MIT", "dependencies": { "@types/express": "*" } }, "node_modules/@types/serve-static": { "version": "1.13.10", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz", - "integrity": "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==", + "license": "MIT", "dependencies": { "@types/mime": "^1", "@types/node": "*" @@ -5812,8 +5465,7 @@ }, "node_modules/@types/sockjs": { "version": "0.3.33", - "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", - "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", + "license": "MIT", "dependencies": { "@types/node": "*" } @@ -5824,38 +5476,39 @@ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", "dev": true }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "optional": true + }, "node_modules/@types/unist": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz", - "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" + "license": "MIT" }, "node_modules/@types/ws": { "version": "8.5.3", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz", - "integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==", + "license": "MIT", "dependencies": { "@types/node": "*" } }, "node_modules/@types/yargs": { "version": "17.0.24", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", - "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", "dev": true, + "license": "MIT", "dependencies": { "@types/yargs-parser": "*" } }, "node_modules/@types/yargs-parser": { "version": "21.0.0", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@webassemblyjs/ast": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", - "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", + "license": "MIT", "dependencies": { "@webassemblyjs/helper-numbers": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1" @@ -5863,23 +5516,19 @@ }, "node_modules/@webassemblyjs/floating-point-hex-parser": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", - "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==" + "license": "MIT" }, "node_modules/@webassemblyjs/helper-api-error": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", - "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==" + "license": "MIT" }, "node_modules/@webassemblyjs/helper-buffer": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", - "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==" + "license": "MIT" }, "node_modules/@webassemblyjs/helper-numbers": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", - "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", + "license": "MIT", "dependencies": { "@webassemblyjs/floating-point-hex-parser": "1.11.1", "@webassemblyjs/helper-api-error": "1.11.1", @@ -5888,13 +5537,11 @@ }, "node_modules/@webassemblyjs/helper-wasm-bytecode": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", - "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==" + "license": "MIT" }, "node_modules/@webassemblyjs/helper-wasm-section": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", - "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", + "license": "MIT", "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", @@ -5904,29 +5551,25 @@ }, "node_modules/@webassemblyjs/ieee754": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", - "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", + "license": "MIT", "dependencies": { "@xtuc/ieee754": "^1.2.0" } }, "node_modules/@webassemblyjs/leb128": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", - "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", + "license": "Apache-2.0", "dependencies": { "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/utf8": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", - "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==" + "license": "MIT" }, "node_modules/@webassemblyjs/wasm-edit": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", - "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", + "license": "MIT", "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", @@ -5940,8 +5583,7 @@ }, "node_modules/@webassemblyjs/wasm-gen": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", - "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", + "license": "MIT", "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", @@ -5952,8 +5594,7 @@ }, "node_modules/@webassemblyjs/wasm-opt": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", - "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", + "license": "MIT", "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", @@ -5963,8 +5604,7 @@ }, "node_modules/@webassemblyjs/wasm-parser": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", - "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", + "license": "MIT", "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-api-error": "1.11.1", @@ -5976,27 +5616,28 @@ }, "node_modules/@webassemblyjs/wast-printer": { "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", - "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", + "license": "MIT", "dependencies": { "@webassemblyjs/ast": "1.11.1", "@xtuc/long": "4.2.2" } }, + "node_modules/@xstate/fsm": { + "version": "1.6.5", + "resolved": "https://registry.npmjs.org/@xstate/fsm/-/fsm-1.6.5.tgz", + "integrity": "sha512-b5o1I6aLNeYlU/3CPlj/Z91ybk1gUsKT+5NAJI+2W4UjvS5KLG28K9v5UvNoFVjHV8PajVZ00RH3vnjyQO7ZAw==" + }, "node_modules/@xtuc/ieee754": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + "license": "BSD-3-Clause" }, "node_modules/@xtuc/long": { "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + "license": "Apache-2.0" }, "node_modules/abort-controller": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", "dependencies": { "event-target-shim": "^5.0.0" }, @@ -6006,8 +5647,7 @@ }, "node_modules/accepts": { "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", "dependencies": { "mime-types": "~2.1.34", "negotiator": "0.6.3" @@ -6018,8 +5658,7 @@ }, "node_modules/acorn": { "version": "8.8.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.0.tgz", - "integrity": "sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==", + "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -6029,32 +5668,28 @@ }, "node_modules/acorn-import-assertions": { "version": "1.8.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", - "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", + "license": "MIT", "peerDependencies": { "acorn": "^8" } }, "node_modules/acorn-walk": { "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "license": "MIT", "engines": { "node": ">=0.4.0" } }, "node_modules/address": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/address/-/address-1.1.2.tgz", - "integrity": "sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==", + "license": "MIT", "engines": { "node": ">= 0.12.0" } }, "node_modules/aggregate-error": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "license": "MIT", "dependencies": { "clean-stack": "^2.0.0", "indent-string": "^4.0.0" @@ -6065,8 +5700,7 @@ }, "node_modules/ajv": { "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -6080,8 +5714,7 @@ }, "node_modules/ajv-formats": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", - "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "license": "MIT", "dependencies": { "ajv": "^8.0.0" }, @@ -6096,8 +5729,7 @@ }, "node_modules/ajv-formats/node_modules/ajv": { "version": "8.9.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.9.0.tgz", - "integrity": "sha512-qOKJyNj/h+OWx7s5DePL6Zu1KeM9jPZhwBqs+7DzP6bGOvqzVCSf0xueYmVuaC/oQ/VtS2zLMLHdQFbkka+XDQ==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -6111,21 +5743,18 @@ }, "node_modules/ajv-formats/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/ajv-keywords": { "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", "peerDependencies": { "ajv": "^6.9.1" } }, "node_modules/algoliasearch": { "version": "4.19.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.19.1.tgz", - "integrity": "sha512-IJF5b93b2MgAzcE/tuzW0yOPnuUyRgGAtaPv5UUywXM8kzqfdwZTO4sPJBzoGz1eOy6H9uEchsJsBFTELZSu+g==", + "license": "MIT", "dependencies": { "@algolia/cache-browser-local-storage": "4.19.1", "@algolia/cache-common": "4.19.1", @@ -6145,8 +5774,7 @@ }, "node_modules/algoliasearch-helper": { "version": "3.14.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.14.0.tgz", - "integrity": "sha512-gXDXzsSS0YANn5dHr71CUXOo84cN4azhHKUbg71vAWnH+1JBiR4jf7to3t3JHXknXkbV0F7f055vUSBKrltHLQ==", + "license": "MIT", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -6156,22 +5784,19 @@ }, "node_modules/alphanum-sort": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", - "integrity": "sha512-0FcBfdcmaumGPQ0qPn7Q5qTgz/ooXgIyp1rf8ik5bGX8mpE2YHjC0P/eyQvxu1GURYQgq9ozf2mteQ5ZD9YiyQ==" + "license": "MIT" }, "node_modules/ansi-align": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "license": "ISC", "dependencies": { "string-width": "^4.1.0" } }, "node_modules/ansi-escapes": { "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", "dev": true, + "license": "MIT", "dependencies": { "type-fest": "^0.21.3" }, @@ -6184,9 +5809,8 @@ }, "node_modules/ansi-escapes/node_modules/type-fest": { "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", "dev": true, + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -6196,19 +5820,17 @@ }, "node_modules/ansi-html-community": { "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", - "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", "engines": [ "node >= 0.8.0" ], + "license": "Apache-2.0", "bin": { "ansi-html": "bin/ansi-html" } }, "node_modules/ansi-red": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/ansi-red/-/ansi-red-0.1.1.tgz", - "integrity": "sha512-ewaIr5y+9CUTGFwZfpECUbFlGcC0GCw1oqR9RI6h1gQCd9Aj2GxSckCnPsVJnmfMZbwFYE+leZGASgkWl06Jow==", + "license": "MIT", "dependencies": { "ansi-wrap": "0.1.0" }, @@ -6218,16 +5840,14 @@ }, "node_modules/ansi-regex": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/ansi-styles": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "license": "MIT", "dependencies": { "color-convert": "^1.9.0" }, @@ -6237,21 +5857,18 @@ }, "node_modules/ansi-wrap": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/ansi-wrap/-/ansi-wrap-0.1.0.tgz", - "integrity": "sha512-ZyznvL8k/FZeQHr2T6LzcJ/+vBApDnMNZvfVFy3At0knswWd6rJ3/0Hhmpu8oqa6C92npmozs890sX9Dl6q+Qw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/any-promise": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==" + "license": "MIT" }, "node_modules/anymatch": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "license": "ISC", "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -6262,8 +5879,6 @@ }, "node_modules/arch": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", - "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", "funding": [ { "type": "github", @@ -6277,12 +5892,12 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/archive-type": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/archive-type/-/archive-type-4.0.0.tgz", - "integrity": "sha512-zV4Ky0v1F8dBrdYElwTvQhweQ0P7Kwc1aluqJsYtOBP01jXcWCyW2IEfI1YiqsG+Iy7ZR+o5LF1N+PGECBxHWA==", + "license": "MIT", "dependencies": { "file-type": "^4.2.0" }, @@ -6292,50 +5907,43 @@ }, "node_modules/archive-type/node_modules/file-type": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-4.4.0.tgz", - "integrity": "sha512-f2UbFQEk7LXgWpi5ntcO86OeA/cC80fuDDDaX/fZ2ZGel+AF7leRQqBBW1eJNiiQkrZlAoM6P+VYP5P6bOlDEQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/arg": { "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + "license": "MIT" }, "node_modules/argparse": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + "license": "Python-2.0" }, "node_modules/arr-diff": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha512-YVIQ82gZPGBebQV/a8dar4AitzCQs0jjXwMPZllpXMaGjXPYVUawSxQrRsjhjupyVxEvbHgUmIhKVlND+j02kA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/arr-flatten": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/arr-union": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha512-sKpyeERZ02v1FeCZT8lrfJq5u6goHCtpTAzPwJYe7c8SPFOboNjNg1vz2L4VTn9T4PQxEx13TbXLmYUcS6Ug7Q==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/array-buffer-byte-length": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz", - "integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "is-array-buffer": "^3.0.1" @@ -6346,23 +5954,21 @@ }, "node_modules/array-find-index": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", - "integrity": "sha512-M1HQyIXcBGtVywBt8WVdim+lrNaK7VHp99Qt5pSNziXznKHViIBbXWtfRTpEFpF/c4FdfxNAsCCwPp5phBYJtw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/array-flatten": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" + "license": "MIT" }, "node_modules/array-move": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-move/-/array-move-3.0.1.tgz", - "integrity": "sha512-H3Of6NIn2nNU1gsVDqDnYKY/LCdWvCMMOWifNGhKcVQgiZ6nOek39aESOvro6zmueP07exSl93YLvkN4fZOkSg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/array-move/-/array-move-4.0.0.tgz", + "integrity": "sha512-+RY54S8OuVvg94THpneQvFRmqWdAHeqtMzgMW6JNurHxe8rsS07cHQdfGkXnTUXiBcyZ0j3SiDIxxj0RPiqCkQ==", "engines": { - "node": ">=10" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -6370,32 +5976,28 @@ }, "node_modules/array-union": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/array-uniq": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", - "integrity": "sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/array-unique": { "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha512-SleRWjh9JUud2wH1hPs9rZBZ33H6T9HOiL0uwGnGx9FpE6wKGyfWugmbkEOIs6qWrZhg0LWeLziLrEwQJhs5mQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/array.prototype.filter": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array.prototype.filter/-/array.prototype.filter-1.0.2.tgz", - "integrity": "sha512-us+UrmGOilqttSOgoWZTpOvHu68vZT2YCjc/H4vhu56vzZpaDFBhB+Se2UwqWzMKbDv7Myq5M5pcZLAtUvTQdQ==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -6412,8 +6014,7 @@ }, "node_modules/array.prototype.find": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/array.prototype.find/-/array.prototype.find-2.2.1.tgz", - "integrity": "sha512-I2ri5Z9uMpMvnsNrHre9l3PaX+z9D0/z6F7Yt2u15q7wt0I62g5kX6xUKR1SJiefgG+u2/gJUmM8B47XRvQR6w==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -6426,8 +6027,7 @@ }, "node_modules/array.prototype.flat": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -6443,8 +6043,7 @@ }, "node_modules/array.prototype.reduce": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/array.prototype.reduce/-/array.prototype.reduce-1.0.5.tgz", - "integrity": "sha512-kDdugMl7id9COE8R7MHF5jWk7Dqt/fs4Pv+JXoICnYwqpjjjbUurz6w5fT5IG6brLdJhv6/VoHB0H7oyIBXd+Q==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -6461,29 +6060,25 @@ }, "node_modules/arrify": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/asap": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==" + "license": "MIT" }, "node_modules/asn1": { "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "license": "MIT", "dependencies": { "safer-buffer": "~2.1.0" } }, "node_modules/asn1.js": { "version": "5.4.1", - "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", - "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", + "license": "MIT", "dependencies": { "bn.js": "^4.0.0", "inherits": "^2.0.1", @@ -6493,13 +6088,11 @@ }, "node_modules/asn1.js/node_modules/bn.js": { "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "license": "MIT" }, "node_modules/assert": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/assert/-/assert-2.1.0.tgz", - "integrity": "sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "is-nan": "^1.3.2", @@ -6510,50 +6103,43 @@ }, "node_modules/assert-plus": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "license": "MIT", "engines": { "node": ">=0.8" } }, "node_modules/assign-symbols": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/async": { "version": "2.6.4", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", - "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "license": "MIT", "dependencies": { "lodash": "^4.17.14" } }, "node_modules/asynckit": { "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + "license": "MIT" }, "node_modules/at-least-node": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "license": "ISC", "engines": { "node": ">= 4.0.0" } }, "node_modules/atoa": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/atoa/-/atoa-1.0.0.tgz", - "integrity": "sha512-VVE1H6cc4ai+ZXo/CRWoJiHXrA1qfA31DPnx6D20+kSI547hQN5Greh51LQ1baMRMfxO5K5M4ImMtZbZt2DODQ==" + "license": "MIT" }, "node_modules/atob": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "license": "(MIT OR Apache-2.0)", "bin": { "atob": "bin/atob.js" }, @@ -6563,16 +6149,13 @@ }, "node_modules/autolinker": { "version": "3.16.2", - "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-3.16.2.tgz", - "integrity": "sha512-JiYl7j2Z19F9NdTmirENSUUIIL/9MytEWtmzhfmsKPCp9E+G35Y0UNCMoM9tFigxT59qSc8Ml2dlZXOCVTYwuA==", + "license": "MIT", "dependencies": { "tslib": "^2.3.0" } }, "node_modules/autoprefixer": { "version": "10.4.15", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.15.tgz", - "integrity": "sha512-KCuPB8ZCIqFdA4HwKXsvz7j6gvSDNhDP7WnUjBleRkKjPdvCmHFuQ77ocavI8FT6NdvlBnE2UFr2H4Mycn8Vew==", "funding": [ { "type": "opencollective", @@ -6587,6 +6170,7 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { "browserslist": "^4.21.10", "caniuse-lite": "^1.0.30001520", @@ -6607,8 +6191,7 @@ }, "node_modules/available-typed-arrays": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", - "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -6618,32 +6201,30 @@ }, "node_modules/aws-sign2": { "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", + "license": "Apache-2.0", "engines": { "node": "*" } }, "node_modules/aws4": { "version": "1.12.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz", - "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==" + "license": "MIT" }, "node_modules/axios": { "version": "0.25.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz", - "integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==", + "license": "MIT", "dependencies": { "follow-redirects": "^1.14.7" } }, "node_modules/babel-jest": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.6.4.tgz", - "integrity": "sha512-meLj23UlSLddj6PC+YTOFRgDAtjnZom8w/ACsrx0gtPtv5cJZk0A5Unk5bV4wixD7XaPCN1fQvpww8czkZURmw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/transform": "^29.6.4", + "@jest/transform": "^29.7.0", "@types/babel__core": "^7.1.14", "babel-plugin-istanbul": "^6.1.1", "babel-preset-jest": "^29.6.3", @@ -6660,9 +6241,8 @@ }, "node_modules/babel-jest/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -6675,9 +6255,8 @@ }, "node_modules/babel-jest/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -6691,9 +6270,8 @@ }, "node_modules/babel-jest/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -6703,24 +6281,21 @@ }, "node_modules/babel-jest/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/babel-jest/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/babel-jest/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -6730,8 +6305,7 @@ }, "node_modules/babel-loader": { "version": "8.2.5", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.5.tgz", - "integrity": "sha512-OSiFfH89LrEMiWd4pLNqGz4CwJDtbs2ZVc+iGu2HrkRfPxId9F2anQj38IxWpmRfsUY0aBZYi1EFcd3mhtRMLQ==", + "license": "MIT", "dependencies": { "find-cache-dir": "^3.3.1", "loader-utils": "^2.0.0", @@ -6748,8 +6322,7 @@ }, "node_modules/babel-loader/node_modules/schema-utils": { "version": "2.7.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", - "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.5", "ajv": "^6.12.4", @@ -6765,8 +6338,7 @@ }, "node_modules/babel-plugin-apply-mdx-type-prop": { "version": "1.6.22", - "resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz", - "integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "7.10.4", "@mdx-js/util": "1.6.22" @@ -6781,21 +6353,18 @@ }, "node_modules/babel-plugin-apply-mdx-type-prop/node_modules/@babel/helper-plugin-utils": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", - "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + "license": "MIT" }, "node_modules/babel-plugin-dynamic-import-node": { "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "license": "MIT", "dependencies": { "object.assign": "^4.1.0" } }, "node_modules/babel-plugin-extract-import-names": { "version": "1.6.22", - "resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz", - "integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "7.10.4" }, @@ -6806,14 +6375,12 @@ }, "node_modules/babel-plugin-extract-import-names/node_modules/@babel/helper-plugin-utils": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", - "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + "license": "MIT" }, "node_modules/babel-plugin-istanbul": { "version": "6.1.1", - "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", - "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", @@ -6827,9 +6394,8 @@ }, "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", - "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "@babel/core": "^7.12.3", "@babel/parser": "^7.14.7", @@ -6843,18 +6409,16 @@ }, "node_modules/babel-plugin-istanbul/node_modules/semver": { "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/babel-plugin-jest-hoist": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", - "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/template": "^7.3.3", "@babel/types": "^7.3.3", @@ -6867,8 +6431,7 @@ }, "node_modules/babel-plugin-polyfill-corejs2": { "version": "0.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz", - "integrity": "sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q==", + "license": "MIT", "dependencies": { "@babel/compat-data": "^7.17.7", "@babel/helper-define-polyfill-provider": "^0.3.3", @@ -6880,16 +6443,14 @@ }, "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/babel-plugin-polyfill-corejs3": { "version": "0.5.2", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz", - "integrity": "sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ==", + "license": "MIT", "dependencies": { "@babel/helper-define-polyfill-provider": "^0.3.1", "core-js-compat": "^3.21.0" @@ -6900,8 +6461,7 @@ }, "node_modules/babel-plugin-polyfill-regenerator": { "version": "0.3.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz", - "integrity": "sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A==", + "license": "MIT", "dependencies": { "@babel/helper-define-polyfill-provider": "^0.3.1" }, @@ -6910,23 +6470,27 @@ } }, "node_modules/babel-preset-current-node-syntax": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", - "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz", + "integrity": "sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-bigint": "^7.8.3", - "@babel/plugin-syntax-class-properties": "^7.8.3", - "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.3", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.8.3" + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" }, "peerDependencies": { "@babel/core": "^7.0.0" @@ -6934,9 +6498,8 @@ }, "node_modules/babel-preset-jest": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", - "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", "dev": true, + "license": "MIT", "dependencies": { "babel-plugin-jest-hoist": "^29.6.3", "babel-preset-current-node-syntax": "^1.0.0" @@ -6950,16 +6513,14 @@ }, "node_modules/babylon": { "version": "6.18.0", - "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", + "license": "MIT", "bin": { "babylon": "bin/babylon.js" } }, "node_modules/bail": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", - "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -6967,13 +6528,11 @@ }, "node_modules/balanced-match": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "license": "MIT" }, "node_modules/base": { "version": "0.11.2", - "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "license": "MIT", "dependencies": { "cache-base": "^1.0.1", "class-utils": "^0.3.5", @@ -6989,8 +6548,7 @@ }, "node_modules/base/node_modules/define-property": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", + "license": "MIT", "dependencies": { "is-descriptor": "^1.0.0" }, @@ -7000,21 +6558,17 @@ }, "node_modules/base16": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz", - "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" + "license": "MIT" }, "node_modules/base64-arraybuffer": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz", - "integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==", + "license": "MIT", "engines": { "node": ">= 0.6.0" } }, "node_modules/base64-js": { "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", "funding": [ { "type": "github", @@ -7028,41 +6582,37 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/batch": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + "license": "MIT" }, "node_modules/bcrypt-pbkdf": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "license": "BSD-3-Clause", "dependencies": { "tweetnacl": "^0.14.3" } }, "node_modules/big-integer": { "version": "1.6.51", - "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz", - "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==", + "license": "Unlicense", "engines": { "node": ">=0.6" } }, "node_modules/big.js": { "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "license": "MIT", "engines": { "node": "*" } }, "node_modules/bin-build": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bin-build/-/bin-build-3.0.0.tgz", - "integrity": "sha512-jcUOof71/TNAI2uM5uoUaDq2ePcVBQ3R/qhxAz1rX7UfvduAL/RXD3jXzvn8cVcDJdGVkiR1shal3OH0ImpuhA==", + "license": "MIT", "dependencies": { "decompress": "^4.0.0", "download": "^6.2.2", @@ -7076,8 +6626,7 @@ }, "node_modules/bin-build/node_modules/cross-spawn": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", + "license": "MIT", "dependencies": { "lru-cache": "^4.0.1", "shebang-command": "^1.2.0", @@ -7086,8 +6635,7 @@ }, "node_modules/bin-build/node_modules/execa": { "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", + "license": "MIT", "dependencies": { "cross-spawn": "^5.0.1", "get-stream": "^3.0.0", @@ -7103,24 +6651,21 @@ }, "node_modules/bin-build/node_modules/get-stream": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-build/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-build/node_modules/lru-cache": { "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "license": "ISC", "dependencies": { "pseudomap": "^1.0.2", "yallist": "^2.1.2" @@ -7128,8 +6673,7 @@ }, "node_modules/bin-build/node_modules/npm-run-path": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", + "license": "MIT", "dependencies": { "path-key": "^2.0.0" }, @@ -7139,16 +6683,14 @@ }, "node_modules/bin-build/node_modules/path-key": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-build/node_modules/shebang-command": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "license": "MIT", "dependencies": { "shebang-regex": "^1.0.0" }, @@ -7158,16 +6700,14 @@ }, "node_modules/bin-build/node_modules/shebang-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-build/node_modules/which": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -7177,13 +6717,11 @@ }, "node_modules/bin-build/node_modules/yallist": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==" + "license": "ISC" }, "node_modules/bin-check": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bin-check/-/bin-check-4.1.0.tgz", - "integrity": "sha512-b6weQyEUKsDGFlACWSIOfveEnImkJyK/FGW6FAG42loyoquvjdtOIqO6yBFzHyqyVVhNgNkQxxx09SFLK28YnA==", + "license": "MIT", "dependencies": { "execa": "^0.7.0", "executable": "^4.1.0" @@ -7194,8 +6732,7 @@ }, "node_modules/bin-check/node_modules/cross-spawn": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", + "license": "MIT", "dependencies": { "lru-cache": "^4.0.1", "shebang-command": "^1.2.0", @@ -7204,8 +6741,7 @@ }, "node_modules/bin-check/node_modules/execa": { "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", + "license": "MIT", "dependencies": { "cross-spawn": "^5.0.1", "get-stream": "^3.0.0", @@ -7221,24 +6757,21 @@ }, "node_modules/bin-check/node_modules/get-stream": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-check/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-check/node_modules/lru-cache": { "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "license": "ISC", "dependencies": { "pseudomap": "^1.0.2", "yallist": "^2.1.2" @@ -7246,8 +6779,7 @@ }, "node_modules/bin-check/node_modules/npm-run-path": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", + "license": "MIT", "dependencies": { "path-key": "^2.0.0" }, @@ -7257,16 +6789,14 @@ }, "node_modules/bin-check/node_modules/path-key": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-check/node_modules/shebang-command": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "license": "MIT", "dependencies": { "shebang-regex": "^1.0.0" }, @@ -7276,16 +6806,14 @@ }, "node_modules/bin-check/node_modules/shebang-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-check/node_modules/which": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -7295,13 +6823,11 @@ }, "node_modules/bin-check/node_modules/yallist": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==" + "license": "ISC" }, "node_modules/bin-version": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bin-version/-/bin-version-3.1.0.tgz", - "integrity": "sha512-Mkfm4iE1VFt4xd4vH+gx+0/71esbfus2LsnCGe8Pi4mndSPyT+NGES/Eg99jx8/lUGWfu3z2yuB/bt5UB+iVbQ==", + "license": "MIT", "dependencies": { "execa": "^1.0.0", "find-versions": "^3.0.0" @@ -7312,8 +6838,7 @@ }, "node_modules/bin-version-check": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/bin-version-check/-/bin-version-check-4.0.0.tgz", - "integrity": "sha512-sR631OrhC+1f8Cvs8WyVWOA33Y8tgwjETNPyyD/myRBXLkfS/vl74FmH/lFcRl9KY3zwGh7jFhvyk9vV3/3ilQ==", + "license": "MIT", "dependencies": { "bin-version": "^3.0.0", "semver": "^5.6.0", @@ -7325,16 +6850,14 @@ }, "node_modules/bin-version-check/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/bin-version/node_modules/cross-spawn": { "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "license": "MIT", "dependencies": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -7348,8 +6871,7 @@ }, "node_modules/bin-version/node_modules/execa": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "license": "MIT", "dependencies": { "cross-spawn": "^6.0.0", "get-stream": "^4.0.0", @@ -7365,16 +6887,14 @@ }, "node_modules/bin-version/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-version/node_modules/npm-run-path": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", + "license": "MIT", "dependencies": { "path-key": "^2.0.0" }, @@ -7384,24 +6904,21 @@ }, "node_modules/bin-version/node_modules/path-key": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-version/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/bin-version/node_modules/shebang-command": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "license": "MIT", "dependencies": { "shebang-regex": "^1.0.0" }, @@ -7411,16 +6928,14 @@ }, "node_modules/bin-version/node_modules/shebang-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-version/node_modules/which": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -7430,8 +6945,7 @@ }, "node_modules/bin-wrapper": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bin-wrapper/-/bin-wrapper-4.1.0.tgz", - "integrity": "sha512-hfRmo7hWIXPkbpi0ZltboCMVrU+0ClXR/JgbCKKjlDjQf6igXa7OwdqNcFWQZPZTgiY7ZpzE3+LjjkLiTN2T7Q==", + "license": "MIT", "dependencies": { "bin-check": "^4.1.0", "bin-version-check": "^4.0.0", @@ -7446,16 +6960,14 @@ }, "node_modules/bin-wrapper/node_modules/@sindresorhus/is": { "version": "0.7.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz", - "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-wrapper/node_modules/cacheable-request": { "version": "2.1.4", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz", - "integrity": "sha512-vag0O2LKZ/najSoUwDbVlnlCFvhBE/7mGTY2B5FgCBDcRD+oVV1HYTOwM6JZfMg/hIcM6IwnTZ1uQQL5/X3xIQ==", + "license": "MIT", "dependencies": { "clone-response": "1.0.2", "get-stream": "3.0.0", @@ -7468,8 +6980,7 @@ }, "node_modules/bin-wrapper/node_modules/download": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/download/-/download-7.1.0.tgz", - "integrity": "sha512-xqnBTVd/E+GxJVrX5/eUJiLYjCGPwMpdL+jGhGU57BvtcA7wwhtHVbXBeUk51kOpW3S7Jn3BQbN9Q1R1Km2qDQ==", + "license": "MIT", "dependencies": { "archive-type": "^4.0.0", "caw": "^2.0.1", @@ -7490,32 +7001,28 @@ }, "node_modules/bin-wrapper/node_modules/download/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-wrapper/node_modules/file-type": { "version": "8.1.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-8.1.0.tgz", - "integrity": "sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/bin-wrapper/node_modules/get-stream": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-wrapper/node_modules/got": { "version": "8.3.2", - "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz", - "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", + "license": "MIT", "dependencies": { "@sindresorhus/is": "^0.7.0", "cacheable-request": "^2.1.1", @@ -7541,53 +7048,46 @@ }, "node_modules/bin-wrapper/node_modules/got/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-wrapper/node_modules/http-cache-semantics": { "version": "3.8.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", - "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==" + "license": "BSD-2-Clause" }, "node_modules/bin-wrapper/node_modules/import-lazy": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-3.1.0.tgz", - "integrity": "sha512-8/gvXvX2JMn0F+CDlSC4l6kOmVaLOO3XLkksI7CI3Ud95KDYJuYur2b9P/PUt/i/pDAMd/DulQsNbbbmRRsDIQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/bin-wrapper/node_modules/is-plain-obj": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-wrapper/node_modules/keyv": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz", - "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", + "license": "MIT", "dependencies": { "json-buffer": "3.0.0" } }, "node_modules/bin-wrapper/node_modules/lowercase-keys": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz", - "integrity": "sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/bin-wrapper/node_modules/make-dir": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "license": "MIT", "dependencies": { "pify": "^3.0.0" }, @@ -7597,16 +7097,14 @@ }, "node_modules/bin-wrapper/node_modules/make-dir/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-wrapper/node_modules/normalize-url": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", + "license": "MIT", "dependencies": { "prepend-http": "^2.0.0", "query-string": "^5.0.1", @@ -7618,16 +7116,14 @@ }, "node_modules/bin-wrapper/node_modules/p-cancelable": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz", - "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/bin-wrapper/node_modules/p-event": { "version": "2.3.1", - "resolved": "https://registry.npmjs.org/p-event/-/p-event-2.3.1.tgz", - "integrity": "sha512-NQCqOFhbpVTMX4qMe8PF8lbGtzZ+LCiN7pcNrb/413Na7+TRoe1xkKUzuWa/YEJdGQ0FvKtj35EEbDoVPO2kbA==", + "license": "MIT", "dependencies": { "p-timeout": "^2.0.1" }, @@ -7637,8 +7133,7 @@ }, "node_modules/bin-wrapper/node_modules/p-timeout": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", - "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", + "license": "MIT", "dependencies": { "p-finally": "^1.0.0" }, @@ -7648,8 +7143,7 @@ }, "node_modules/bin-wrapper/node_modules/sort-keys": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha512-/dPCrG1s3ePpWm6yBbxZq5Be1dXGLyLn9Z791chDC3NFrpkVbWGzkBwPN1knaciexFXgRJ7hzdnwZ4stHSDmjg==", + "license": "MIT", "dependencies": { "is-plain-obj": "^1.0.0" }, @@ -7659,28 +7153,22 @@ }, "node_modules/binary": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/binary/-/binary-0.3.0.tgz", - "integrity": "sha512-D4H1y5KYwpJgK8wk1Cue5LLPgmwHKYSChkbspQg5JtVuR5ulGckxfR62H3AE9UDkdMC8yyXlqYihuz3Aqg2XZg==", + "license": "MIT", "dependencies": { "buffers": "~0.1.1", "chainsaw": "~0.1.0" - }, - "engines": { - "node": "*" } }, "node_modules/binary-extensions": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/bl": { "version": "1.2.3", - "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", - "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", + "license": "MIT", "dependencies": { "readable-stream": "^2.3.5", "safe-buffer": "^5.1.1" @@ -7688,13 +7176,11 @@ }, "node_modules/bl/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/bl/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -7707,26 +7193,21 @@ }, "node_modules/bl/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/bluebird": { "version": "3.4.7", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.4.7.tgz", - "integrity": "sha512-iD3898SR7sWVRHbiQv+sHUtHnMvC1o3nW5rAcqnq3uOn07DSAppZYUkIGslDz6gXC7HfunPe7YVBgoEJASPcHA==" + "license": "MIT" }, "node_modules/bn.js": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", - "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" + "license": "MIT" }, "node_modules/body": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/body/-/body-5.1.0.tgz", - "integrity": "sha512-chUsBxGRtuElD6fmw1gHLpvnKdVLK302peeFa9ZqAEk8TyzZ3fygLyUEDDPTJvL9+Bor0dIwn6ePOsRM2y0zQQ==", "dependencies": { "continuable-cache": "^0.3.1", "error": "^7.0.0", @@ -7736,8 +7217,7 @@ }, "node_modules/body-parser": { "version": "1.20.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz", - "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.4", @@ -7759,34 +7239,28 @@ }, "node_modules/body-parser/node_modules/bytes": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/body-parser/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/body-parser/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/body/node_modules/bytes": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz", - "integrity": "sha512-/x68VkHLeTl3/Ll8IvxdwzhrT+IyKc52e/oyHhA2RwqPqswSnjVbSddfPRwAsJtbilMAPSRWwAlpxdYsSWOTKQ==" + "version": "1.0.0" }, "node_modules/body/node_modules/raw-body": { "version": "1.1.7", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-1.1.7.tgz", - "integrity": "sha512-WmJJU2e9Y6M5UzTOkHaM7xJGAPQD8PNzx3bAd2+uhZAim6wDk6dAZxPVYLF67XhbR4hmKGh33Lpmh4XWrCH5Mg==", + "license": "MIT", "dependencies": { "bytes": "1", "string_decoder": "0.10" @@ -7797,13 +7271,11 @@ }, "node_modules/body/node_modules/string_decoder": { "version": "0.10.31", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", - "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==" + "license": "MIT" }, "node_modules/bonjour-service": { "version": "1.0.13", - "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.0.13.tgz", - "integrity": "sha512-LWKRU/7EqDUC9CTAQtuZl5HzBALoCYwtLhffW3et7vZMwv3bWLpJf8bRYlMD5OCcDpTfnPgNCV4yo9ZIaJGMiA==", + "license": "MIT", "dependencies": { "array-flatten": "^2.1.2", "dns-equal": "^1.0.0", @@ -7813,13 +7285,11 @@ }, "node_modules/boolbase": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=" + "license": "ISC" }, "node_modules/boxen": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", - "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", + "license": "MIT", "dependencies": { "ansi-align": "^3.0.0", "camelcase": "^6.2.0", @@ -7839,8 +7309,7 @@ }, "node_modules/boxen/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -7853,8 +7322,7 @@ }, "node_modules/boxen/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -7868,8 +7336,7 @@ }, "node_modules/boxen/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -7879,21 +7346,18 @@ }, "node_modules/boxen/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/boxen/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/boxen/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -7903,8 +7367,7 @@ }, "node_modules/brace-expansion": { "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -7912,8 +7375,7 @@ }, "node_modules/braces": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "license": "MIT", "dependencies": { "fill-range": "^7.0.1" }, @@ -7923,13 +7385,11 @@ }, "node_modules/brorand": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==" + "license": "MIT" }, "node_modules/browserify-aes": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "license": "MIT", "dependencies": { "buffer-xor": "^1.0.3", "cipher-base": "^1.0.0", @@ -7941,8 +7401,7 @@ }, "node_modules/browserify-cipher": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "license": "MIT", "dependencies": { "browserify-aes": "^1.0.4", "browserify-des": "^1.0.0", @@ -7951,8 +7410,7 @@ }, "node_modules/browserify-des": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", - "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "license": "MIT", "dependencies": { "cipher-base": "^1.0.1", "des.js": "^1.0.0", @@ -7962,8 +7420,7 @@ }, "node_modules/browserify-rsa": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", - "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", + "license": "MIT", "dependencies": { "bn.js": "^5.0.0", "randombytes": "^2.0.1" @@ -7971,8 +7428,7 @@ }, "node_modules/browserify-sign": { "version": "4.2.2", - "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.2.tgz", - "integrity": "sha512-1rudGyeYY42Dk6texmv7c4VcQ0EsvVbLwZkA+AQB7SxvXxmcD93jcHie8bzecJ+ChDlmAm2Qyu0+Ccg5uhZXCg==", + "license": "ISC", "dependencies": { "bn.js": "^5.2.1", "browserify-rsa": "^4.1.0", @@ -7990,8 +7446,6 @@ }, "node_modules/browserify-sign/node_modules/safe-buffer": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", @@ -8005,20 +7459,20 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/browserify-zlib": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "license": "MIT", "dependencies": { "pako": "~1.0.5" } }, "node_modules/browserslist": { - "version": "4.21.10", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz", - "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==", + "version": "4.24.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", + "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==", "funding": [ { "type": "opencollective", @@ -8033,11 +7487,12 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001517", - "electron-to-chromium": "^1.4.477", - "node-releases": "^2.0.13", - "update-browserslist-db": "^1.0.11" + "caniuse-lite": "^1.0.30001669", + "electron-to-chromium": "^1.5.41", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.1" }, "bin": { "browserslist": "cli.js" @@ -8048,17 +7503,14 @@ }, "node_modules/bser": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", - "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", "dev": true, + "license": "Apache-2.0", "dependencies": { "node-int64": "^0.4.0" } }, "node_modules/buffer": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", "funding": [ { "type": "github", @@ -8073,6 +7525,7 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" @@ -8080,8 +7533,7 @@ }, "node_modules/buffer-alloc": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", - "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "license": "MIT", "dependencies": { "buffer-alloc-unsafe": "^1.1.0", "buffer-fill": "^1.0.0" @@ -8089,65 +7541,54 @@ }, "node_modules/buffer-alloc-unsafe": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + "license": "MIT" }, "node_modules/buffer-crc32": { "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "license": "MIT", "engines": { "node": "*" } }, "node_modules/buffer-fill": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", - "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==" + "license": "MIT" }, "node_modules/buffer-from": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + "license": "MIT" }, "node_modules/buffer-indexof-polyfill": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/buffer-indexof-polyfill/-/buffer-indexof-polyfill-1.0.2.tgz", - "integrity": "sha512-I7wzHwA3t1/lwXQh+A5PbNvJxgfo5r3xulgpYDB5zckTu/Z9oUK9biouBKQUjEqzaz3HnAT6TYoovmE+GqSf7A==", + "license": "MIT", "engines": { "node": ">=0.10" } }, "node_modules/buffer-xor": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==" + "license": "MIT" }, "node_modules/buffers": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/buffers/-/buffers-0.1.1.tgz", - "integrity": "sha512-9q/rDEGSb/Qsvv2qvzIzdluL5k7AaJOTrw23z9reQthrbF7is4CtlT0DXyO1oei2DCp4uojjzQ7igaSHp1kAEQ==", "engines": { "node": ">=0.2.0" } }, "node_modules/builtin-status-codes": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", - "integrity": "sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ==" + "license": "MIT" }, "node_modules/bytes": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/cache-base": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "license": "MIT", "dependencies": { "collection-visit": "^1.0.0", "component-emitter": "^1.2.1", @@ -8165,8 +7606,7 @@ }, "node_modules/cacheable-request": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", + "license": "MIT", "dependencies": { "clone-response": "^1.0.2", "get-stream": "^5.1.0", @@ -8182,8 +7622,7 @@ }, "node_modules/cacheable-request/node_modules/get-stream": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "license": "MIT", "dependencies": { "pump": "^3.0.0" }, @@ -8196,24 +7635,21 @@ }, "node_modules/cacheable-request/node_modules/lowercase-keys": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/cacheable-request/node_modules/normalize-url": { "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/call-bind": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "license": "MIT", "dependencies": { "function-bind": "^1.1.1", "get-intrinsic": "^1.0.2" @@ -8224,13 +7660,11 @@ }, "node_modules/call-me-maybe": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz", - "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==" + "license": "MIT" }, "node_modules/caller-callsite": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz", - "integrity": "sha512-JuG3qI4QOftFsZyOn1qq87fq5grLIyk1JYd5lJmdA+fG7aQ9pA/i3JIJGcO3q0MrRcHlOt1U+ZeHW8Dq9axALQ==", + "license": "MIT", "dependencies": { "callsites": "^2.0.0" }, @@ -8240,16 +7674,14 @@ }, "node_modules/caller-callsite/node_modules/callsites": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz", - "integrity": "sha512-ksWePWBloaWPxJYQ8TL0JHvtci6G5QTKwQ95RcWAa/lzoAKuAOflGdAK92hpHXjkwb8zLxoLNUoNYZgVsaJzvQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/caller-path": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz", - "integrity": "sha512-MCL3sf6nCSXOwCTzvPKhN18TU7AHTvdtam8DAogxcrJ8Rjfbbg7Lgng64H9Iy+vUV6VGFClN/TyxBkAebLRR4A==", + "license": "MIT", "dependencies": { "caller-callsite": "^2.0.0" }, @@ -8259,16 +7691,14 @@ }, "node_modules/callsites": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/camel-case": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", - "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "license": "MIT", "dependencies": { "pascal-case": "^3.1.2", "tslib": "^2.0.3" @@ -8276,8 +7706,7 @@ }, "node_modules/camelcase": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -8287,16 +7716,14 @@ }, "node_modules/camelcase-css": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/camelcase-keys": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-2.1.0.tgz", - "integrity": "sha512-bA/Z/DERHKqoEOrp+qeGKw1QlvEQkGZSc0XaY6VnTxZr+Kv1G5zFwttpjv8qxZ/sBPT4nthwZaAcsAZTJlSKXQ==", + "license": "MIT", "dependencies": { "camelcase": "^2.0.0", "map-obj": "^1.0.0" @@ -8307,16 +7734,14 @@ }, "node_modules/camelcase-keys/node_modules/camelcase": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", - "integrity": "sha512-DLIsRzJVBQu72meAKPkWQOLcujdXT32hwdfnkI1frSiSRMK1MofjKHf+MEx0SB6fjEFXL8fBDv1dKymBlOp4Qw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/caniuse-api": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "license": "MIT", "dependencies": { "browserslist": "^4.0.0", "caniuse-lite": "^1.0.0", @@ -8325,9 +7750,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001525", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001525.tgz", - "integrity": "sha512-/3z+wB4icFt3r0USMwxujAqRvaD/B7rvGTsKhbhSQErVrJvkZCLhgNLJxU8MevahQVH6hCU9FsHdNUFbiwmE7Q==", + "version": "1.0.30001686", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001686.tgz", + "integrity": "sha512-Y7deg0Aergpa24M3qLC5xjNklnKnhsmSyR/V89dLZ1n0ucJIFNs7PgR2Yfa/Zf6W79SbBicgtGxZr2juHkEUIA==", "funding": [ { "type": "opencollective", @@ -8341,17 +7766,16 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ] + ], + "license": "CC-BY-4.0" }, "node_modules/caseless": { "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==" + "license": "Apache-2.0" }, "node_modules/caw": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/caw/-/caw-2.0.1.tgz", - "integrity": "sha512-Cg8/ZSBEa8ZVY9HspcGUYaK63d/bN7rqS3CYCzEGUxuYv6UlmcjzDUz2fCFFHyTvUW5Pk0I+3hkA3iXlIj6guA==", + "license": "MIT", "dependencies": { "get-proxy": "^2.0.0", "isurl": "^1.0.0-alpha5", @@ -8364,8 +7788,7 @@ }, "node_modules/ccount": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz", - "integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8373,19 +7796,14 @@ }, "node_modules/chainsaw": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/chainsaw/-/chainsaw-0.1.0.tgz", - "integrity": "sha512-75kWfWt6MEKNC8xYXIdRpDehRYY/tNSgwKaJq+dbbDcxORuVrrQ+SEHoWsniVn9XPYfP4gmdWIeDk/4YNp1rNQ==", + "license": "MIT/X11", "dependencies": { "traverse": ">=0.3.0 <0.4" - }, - "engines": { - "node": "*" } }, "node_modules/chalk": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -8397,17 +7815,15 @@ }, "node_modules/char-regex": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", - "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/character-entities": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", - "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8415,8 +7831,7 @@ }, "node_modules/character-entities-legacy": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", - "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8424,8 +7839,7 @@ }, "node_modules/character-reference-invalid": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", - "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8433,16 +7847,14 @@ }, "node_modules/charset": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/charset/-/charset-1.0.1.tgz", - "integrity": "sha512-6dVyOOYjpfFcL1Y4qChrAoQLRHvj2ziyhcm0QJlhOcAhykL/k1kTUPbeo+87MNRTRdk2OIIsIXbuF3x2wi5EXg==", + "license": "MIT", "engines": { "node": ">=4.0.0" } }, "node_modules/cheerio": { "version": "1.0.0-rc.12", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", - "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "license": "MIT", "dependencies": { "cheerio-select": "^2.1.0", "dom-serializer": "^2.0.0", @@ -8461,8 +7873,7 @@ }, "node_modules/cheerio-select": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", - "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "^1.0.0", "css-select": "^5.1.0", @@ -8477,8 +7888,7 @@ }, "node_modules/cheerio-select/node_modules/css-select": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", - "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "^1.0.0", "css-what": "^6.1.0", @@ -8492,8 +7902,7 @@ }, "node_modules/cheerio-select/node_modules/dom-serializer": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "license": "MIT", "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.2", @@ -8505,8 +7914,7 @@ }, "node_modules/cheerio-select/node_modules/domhandler": { "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", "dependencies": { "domelementtype": "^2.3.0" }, @@ -8519,8 +7927,7 @@ }, "node_modules/cheerio-select/node_modules/domutils": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "license": "BSD-2-Clause", "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", @@ -8532,8 +7939,7 @@ }, "node_modules/cheerio-select/node_modules/entities": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "license": "BSD-2-Clause", "engines": { "node": ">=0.12" }, @@ -8543,8 +7949,7 @@ }, "node_modules/cheerio/node_modules/dom-serializer": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "license": "MIT", "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.2", @@ -8556,8 +7961,7 @@ }, "node_modules/cheerio/node_modules/domhandler": { "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", "dependencies": { "domelementtype": "^2.3.0" }, @@ -8570,8 +7974,7 @@ }, "node_modules/cheerio/node_modules/domutils": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "license": "BSD-2-Clause", "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", @@ -8583,8 +7986,7 @@ }, "node_modules/cheerio/node_modules/entities": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "license": "BSD-2-Clause", "engines": { "node": ">=0.12" }, @@ -8594,8 +7996,6 @@ }, "node_modules/cheerio/node_modules/htmlparser2": { "version": "8.0.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.1.tgz", - "integrity": "sha512-4lVbmc1diZC7GUJQtRQ5yBAeUCL1exyMwmForWkRLnwyzWBFxN633SALPMGYaWZvKe9j1pRZJpauvmxENSp/EA==", "funding": [ "https://github.com/fb55/htmlparser2?sponsor=1", { @@ -8603,6 +8003,7 @@ "url": "https://github.com/sponsors/fb55" } ], + "license": "MIT", "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.2", @@ -8612,14 +8013,13 @@ }, "node_modules/chokidar": { "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", "funding": [ { "type": "individual", "url": "https://paulmillr.com/funding/" } ], + "license": "MIT", "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -8638,36 +8038,32 @@ }, "node_modules/chrome-trace-event": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", - "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", + "license": "MIT", "engines": { "node": ">=6.0" } }, "node_modules/ci-info": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" + "license": "MIT" }, "node_modules/cipher-base": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "license": "MIT", "dependencies": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" } }, "node_modules/cjs-module-lexer": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz", - "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.1.tgz", + "integrity": "sha512-cuSVIHi9/9E/+821Qjdvngor+xpnlwnuwIyZOaLmHBVdXL+gP+I6QQB9VkO7RI77YIcTV+S1W9AreJ5eN63JBA==", "dev": true }, "node_modules/class-utils": { "version": "0.3.6", - "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "license": "MIT", "dependencies": { "arr-union": "^3.1.0", "define-property": "^0.2.5", @@ -8680,8 +8076,7 @@ }, "node_modules/class-utils/node_modules/define-property": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "license": "MIT", "dependencies": { "is-descriptor": "^0.1.0" }, @@ -8691,8 +8086,7 @@ }, "node_modules/class-utils/node_modules/is-accessor-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -8702,8 +8096,7 @@ }, "node_modules/class-utils/node_modules/is-accessor-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -8713,13 +8106,11 @@ }, "node_modules/class-utils/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/class-utils/node_modules/is-data-descriptor": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -8729,8 +8120,7 @@ }, "node_modules/class-utils/node_modules/is-data-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -8740,8 +8130,7 @@ }, "node_modules/class-utils/node_modules/is-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "license": "MIT", "dependencies": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -8753,21 +8142,19 @@ }, "node_modules/class-utils/node_modules/kind-of": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/classnames": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz", - "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==" + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==" }, "node_modules/clean-css": { "version": "5.3.1", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.1.tgz", - "integrity": "sha512-lCr8OHhiWCTw4v8POJovCoh4T7I9U11yVsPjMWWnnMmp9ZowCxyad1Pathle/9HjaDp+fdQKjO9fQydE6RHTZg==", + "license": "MIT", "dependencies": { "source-map": "~0.6.0" }, @@ -8777,24 +8164,21 @@ }, "node_modules/clean-css/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/clean-stack": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/cli-boxes": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", + "license": "MIT", "engines": { "node": ">=6" }, @@ -8804,9 +8188,8 @@ }, "node_modules/cli-cursor": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", - "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", "dev": true, + "license": "MIT", "dependencies": { "restore-cursor": "^4.0.0" }, @@ -8819,8 +8202,7 @@ }, "node_modules/cli-table3": { "version": "0.6.2", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.2.tgz", - "integrity": "sha512-QyavHCaIC80cMivimWu4aWHilIpiDpfm3hGmqAmXVL1UsnbLuBSMd21hTX6VY4ZSDSM73ESLeF8TOYId3rBTbw==", + "license": "MIT", "dependencies": { "string-width": "^4.2.0" }, @@ -8833,9 +8215,8 @@ }, "node_modules/cli-truncate": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-3.1.0.tgz", - "integrity": "sha512-wfOBkjXteqSnI59oPcJkcPl/ZmwvMMOj340qUIY1SKZCv0B9Cf4D4fAucRkIKQmsIuYK3x1rrgU7MeGRruiuiA==", "dev": true, + "license": "MIT", "dependencies": { "slice-ansi": "^5.0.0", "string-width": "^5.0.0" @@ -8849,9 +8230,8 @@ }, "node_modules/cli-truncate/node_modules/ansi-regex": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -8861,15 +8241,13 @@ }, "node_modules/cli-truncate/node_modules/emoji-regex": { "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/cli-truncate/node_modules/string-width": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "dev": true, + "license": "MIT", "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", @@ -8884,9 +8262,8 @@ }, "node_modules/cli-truncate/node_modules/strip-ansi": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-regex": "^6.0.1" }, @@ -8899,8 +8276,7 @@ }, "node_modules/cliui": { "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "license": "ISC", "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", @@ -8912,8 +8288,7 @@ }, "node_modules/clone-deep": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", - "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4", "kind-of": "^6.0.2", @@ -8925,25 +8300,22 @@ }, "node_modules/clone-response": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", + "license": "MIT", "dependencies": { "mimic-response": "^1.0.0" } }, "node_modules/clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "version": "2.1.1", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/co": { "version": "4.6.0", - "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", - "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", "dev": true, + "license": "MIT", "engines": { "iojs": ">= 1.0.0", "node": ">= 0.12.0" @@ -8951,8 +8323,7 @@ }, "node_modules/coa": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz", - "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==", + "license": "MIT", "dependencies": { "@types/q": "^1.5.1", "chalk": "^2.4.1", @@ -8964,8 +8335,7 @@ }, "node_modules/codemirror": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz", - "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==", + "license": "MIT", "dependencies": { "@codemirror/autocomplete": "^6.0.0", "@codemirror/commands": "^6.0.0", @@ -8978,9 +8348,7 @@ }, "node_modules/coffee-script": { "version": "1.12.7", - "resolved": "https://registry.npmjs.org/coffee-script/-/coffee-script-1.12.7.tgz", - "integrity": "sha512-fLeEhqwymYat/MpTPUjSKHVYYl0ec2mOyALEMLmzr5i1isuG+6jfI2j2d5oBO3VIzgUXgBVIcOT9uH1TFxBckw==", - "deprecated": "CoffeeScript on NPM has moved to \"coffeescript\" (no hyphen)", + "license": "MIT", "bin": { "cake": "bin/cake", "coffee": "bin/coffee" @@ -8991,8 +8359,7 @@ }, "node_modules/collapse-white-space": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz", - "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -9000,14 +8367,12 @@ }, "node_modules/collect-v8-coverage": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", - "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/collection-visit": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", - "integrity": "sha512-lNkKvzEeMBBjUGHZ+q6z9pSJla0KWAQPvtzhEV9+iGyQYG+pBpl7xKDhxoNSOZH2hhv0v5k0y2yAM4o4SjoSkw==", + "license": "MIT", "dependencies": { "map-visit": "^1.0.0", "object-visit": "^1.0.0" @@ -9018,8 +8383,7 @@ }, "node_modules/color": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", - "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", + "license": "MIT", "dependencies": { "color-convert": "^1.9.3", "color-string": "^1.6.0" @@ -9027,21 +8391,18 @@ }, "node_modules/color-convert": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "license": "MIT", "dependencies": { "color-name": "1.1.3" } }, "node_modules/color-name": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + "license": "MIT" }, "node_modules/color-string": { "version": "1.9.1", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", - "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", "dependencies": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -9049,26 +8410,22 @@ }, "node_modules/colord": { "version": "2.9.2", - "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.2.tgz", - "integrity": "sha512-Uqbg+J445nc1TKn4FoDPS6ZZqAvEDnwrH42yo8B40JSOgSLxMZ/gt3h4nmCtPLQeXhjJJkqBx7SCY35WnIixaQ==" + "license": "MIT" }, "node_modules/colorette": { "version": "2.0.20", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + "license": "MIT" }, "node_modules/combine-promises": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.1.0.tgz", - "integrity": "sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg==", + "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/combined-stream": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", "dependencies": { "delayed-stream": "~1.0.0" }, @@ -9078,8 +8435,7 @@ }, "node_modules/comma-separated-tokens": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", - "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -9087,31 +8443,26 @@ }, "node_modules/commander": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/commondir": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" + "license": "MIT" }, "node_modules/component-emitter": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" + "license": "MIT" }, "node_modules/component-event": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/component-event/-/component-event-0.2.1.tgz", - "integrity": "sha512-wGA++isMqiDq1jPYeyv2as/Bt/u+3iLW0rEa+8NQ82jAv3TgqMiCM+B2SaBdn2DfLilLjjq736YcezihRYhfxw==" + "license": "MIT" }, "node_modules/compressible": { "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "license": "MIT", "dependencies": { "mime-db": ">= 1.43.0 < 2" }, @@ -9121,8 +8472,7 @@ }, "node_modules/compression": { "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "license": "MIT", "dependencies": { "accepts": "~1.3.5", "bytes": "3.0.0", @@ -9138,21 +8488,17 @@ }, "node_modules/compression/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/compression/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/compute-gcd": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/compute-gcd/-/compute-gcd-1.2.1.tgz", - "integrity": "sha512-TwMbxBNz0l71+8Sc4czv13h4kEqnchV9igQZBi6QUaz09dnz13juGnnaWWJTRsP3brxOoxeB4SA2WELLw1hCtg==", "dependencies": { "validate.io-array": "^1.0.3", "validate.io-function": "^1.0.2", @@ -9161,8 +8507,6 @@ }, "node_modules/compute-lcm": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/compute-lcm/-/compute-lcm-1.1.2.tgz", - "integrity": "sha512-OFNPdQAXnQhDSKioX8/XYT6sdUlXwpeMjfd6ApxMJfyZ4GxmLR1xvMERctlYhlHwIiz6CSpBc2+qYKjHGZw4TQ==", "dependencies": { "compute-gcd": "^1.2.1", "validate.io-array": "^1.0.3", @@ -9172,16 +8516,14 @@ }, "node_modules/concat-map": { "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + "license": "MIT" }, "node_modules/concat-stream": { "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", "engines": [ "node >= 0.8" ], + "license": "MIT", "dependencies": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", @@ -9191,13 +8533,11 @@ }, "node_modules/concat-stream/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/concat-stream/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -9210,32 +8550,28 @@ }, "node_modules/concat-stream/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/concat-with-sourcemaps": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/concat-with-sourcemaps/-/concat-with-sourcemaps-1.1.0.tgz", - "integrity": "sha512-4gEjHJFT9e+2W/77h/DS5SGUgwDaOwprX8L/gl5+3ixnzkVJJsZWDSelmN3Oilw3LNDZjZV0yqH1hLG3k6nghg==", + "license": "ISC", "dependencies": { "source-map": "^0.6.1" } }, "node_modules/concat-with-sourcemaps/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/config-chain": { "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "license": "MIT", "dependencies": { "ini": "^1.3.4", "proto-list": "~1.2.1" @@ -9243,8 +8579,7 @@ }, "node_modules/configstore": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", + "license": "BSD-2-Clause", "dependencies": { "dot-prop": "^5.2.0", "graceful-fs": "^4.1.2", @@ -9259,57 +8594,45 @@ }, "node_modules/connect-history-api-fallback": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", - "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "license": "MIT", "engines": { "node": ">=0.8" } }, "node_modules/consola": { "version": "2.15.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", - "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + "license": "MIT" }, "node_modules/console-browserify": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", - "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" + "version": "1.2.0" }, "node_modules/console-stream": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/console-stream/-/console-stream-0.1.1.tgz", - "integrity": "sha512-QC/8l9e6ofi6nqZ5PawlDgzmMw3OxIXtvolBzap/F4UDBJlDaZRSNbL/lb41C29FcbSJncBFlJFj2WJoNyZRfQ==" + "version": "0.1.1" }, "node_modules/constants-browserify": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", - "integrity": "sha512-xFxOwqIzR/e1k1gLiWEophSCMqXcwVHIH7akf7b/vxcUeGunlj3hvZaaqxwHsTgn+IndtkQJgSztIDWeumWJDQ==" + "license": "MIT" }, "node_modules/content-disposition": { "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/content-type": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/continuable-cache": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/continuable-cache/-/continuable-cache-0.3.1.tgz", - "integrity": "sha512-TF30kpKhTH8AGCG3dut0rdd/19B7Z+qCnrMoBLpyQu/2drZdNrrpcjPEoJeSVsQM+8KmWG5O56oPDjSSUsuTyA==" + "version": "0.3.1" }, "node_modules/contra": { "version": "1.9.4", - "resolved": "https://registry.npmjs.org/contra/-/contra-1.9.4.tgz", - "integrity": "sha512-N9ArHAqwR/lhPq4OdIAwH4e1btn6EIZMAz4TazjnzCiVECcWUPTma+dRAM38ERImEJBh8NiCCpjoQruSZ+agYg==", + "license": "MIT", "dependencies": { "atoa": "1.0.0", "ticky": "1.0.1" @@ -9317,37 +8640,32 @@ }, "node_modules/convert-source-map": { "version": "1.8.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", - "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.1" } }, "node_modules/cookie": { "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/cookie-signature": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + "license": "MIT" }, "node_modules/copy-descriptor": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha512-XgZ0pFcakEUlbwQEVNg3+QAis1FyTL3Qel9FYy8pSkQqoG3PNoT0bOCQtOXcOkur21r2Eq2kI+IE+gsmAEVlYw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/copy-text-to-clipboard": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", - "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -9357,8 +8675,7 @@ }, "node_modules/copy-webpack-plugin": { "version": "11.0.0", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", - "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "license": "MIT", "dependencies": { "fast-glob": "^3.2.11", "glob-parent": "^6.0.1", @@ -9380,8 +8697,7 @@ }, "node_modules/copy-webpack-plugin/node_modules/ajv": { "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -9395,8 +8711,7 @@ }, "node_modules/copy-webpack-plugin/node_modules/ajv-keywords": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -9406,8 +8721,7 @@ }, "node_modules/copy-webpack-plugin/node_modules/glob-parent": { "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "license": "ISC", "dependencies": { "is-glob": "^4.0.3" }, @@ -9417,8 +8731,7 @@ }, "node_modules/copy-webpack-plugin/node_modules/globby": { "version": "13.1.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.2.tgz", - "integrity": "sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==", + "license": "MIT", "dependencies": { "dir-glob": "^3.0.1", "fast-glob": "^3.2.11", @@ -9435,13 +8748,11 @@ }, "node_modules/copy-webpack-plugin/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/copy-webpack-plugin/node_modules/schema-utils": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", @@ -9458,8 +8769,7 @@ }, "node_modules/copy-webpack-plugin/node_modules/slash": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -9469,9 +8779,8 @@ }, "node_modules/core-js": { "version": "3.25.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.25.1.tgz", - "integrity": "sha512-sr0FY4lnO1hkQ4gLDr24K0DGnweGO1QwSj5BpfQjpSJPdqWalja4cTps29Y/PJVG/P7FYlPDkH3hO+Tr0CvDgQ==", "hasInstallScript": true, + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/core-js" @@ -9479,8 +8788,7 @@ }, "node_modules/core-js-compat": { "version": "3.26.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.26.1.tgz", - "integrity": "sha512-622/KzTudvXCDLRw70iHW4KKs1aGpcRcowGWyYJr2DEBfRrd6hNJybxSWJFuZYD4ma86xhrwDDHxmDaIq4EA8A==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4" }, @@ -9491,9 +8799,8 @@ }, "node_modules/core-js-pure": { "version": "3.24.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.24.0.tgz", - "integrity": "sha512-uzMmW8cRh7uYw4JQtzqvGWRyC2T5+4zipQLQdi2FmiRqP83k3d6F3stv2iAlNhOs6cXN401FCD5TL0vvleuHgA==", "hasInstallScript": true, + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/core-js" @@ -9501,21 +8808,18 @@ }, "node_modules/core-util-is": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", - "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + "license": "MIT" }, "node_modules/cose-base": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", - "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "license": "MIT", "dependencies": { "layout-base": "^1.0.0" } }, "node_modules/cosmiconfig": { "version": "7.0.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.1.tgz", - "integrity": "sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==", + "license": "MIT", "dependencies": { "@types/parse-json": "^4.0.0", "import-fresh": "^3.2.1", @@ -9529,8 +8833,7 @@ }, "node_modules/create-ecdh": { "version": "4.0.4", - "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", - "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", + "license": "MIT", "dependencies": { "bn.js": "^4.1.0", "elliptic": "^6.5.3" @@ -9538,13 +8841,11 @@ }, "node_modules/create-ecdh/node_modules/bn.js": { "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "license": "MIT" }, "node_modules/create-hash": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "license": "MIT", "dependencies": { "cipher-base": "^1.0.1", "inherits": "^2.0.1", @@ -9555,8 +8856,7 @@ }, "node_modules/create-hmac": { "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "license": "MIT", "dependencies": { "cipher-base": "^1.0.3", "create-hash": "^1.1.0", @@ -9566,23 +8866,111 @@ "sha.js": "^2.4.8" } }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-jest/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/create-jest/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/create-jest/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/create-jest/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/create-jest/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/create-jest/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/crelt": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", - "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==" + "license": "MIT" }, "node_modules/cross-fetch": { "version": "3.1.8", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", - "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "license": "MIT", "dependencies": { "node-fetch": "^2.6.12" } }, "node_modules/cross-spawn": { "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -9594,8 +8982,7 @@ }, "node_modules/crowdin-cli": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/crowdin-cli/-/crowdin-cli-0.3.0.tgz", - "integrity": "sha512-s1vSRqWalCqd+vW7nF4oZo1a2pMpEgwIiwVlPRD0HmGY3HjJwQKXqZ26NpX5qCDVN8UdEsScy+2jle0PPQBmAg==", + "license": "MIT", "dependencies": { "request": "^2.53.0", "yamljs": "^0.2.1", @@ -9607,16 +8994,14 @@ }, "node_modules/crowdin-cli/node_modules/yargs": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-2.3.0.tgz", - "integrity": "sha512-w48USdbTdaVMcE3CnXsEtSY9zYSN7dTyVnLBgrJF2quA5rLwobC9zixxfexereLGFaxjxtR3oWdydC0qoayakw==", + "license": "MIT/X11", "dependencies": { "wordwrap": "0.0.2" } }, "node_modules/crypto-browserify": { "version": "3.12.0", - "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "license": "MIT", "dependencies": { "browserify-cipher": "^1.0.0", "browserify-sign": "^4.0.0", @@ -9636,29 +9021,25 @@ }, "node_modules/crypto-js": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz", - "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q==" + "license": "MIT" }, "node_modules/crypto-random-string": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/css-color-names": { "version": "0.0.4", - "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", - "integrity": "sha512-zj5D7X1U2h2zsXOAM8EyUREBnnts6H+Jm+d1M2DbiQQcUtnqgQsMrdo8JW9R80YFUmIdBZeMu5wvYM7hcgWP/Q==", + "license": "MIT", "engines": { "node": "*" } }, "node_modules/css-declaration-sorter": { "version": "6.3.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.3.1.tgz", - "integrity": "sha512-fBffmak0bPAnyqc/HO8C3n2sHrp9wcqQz6ES9koRF2/mLOVAx9zIQ3Y7R29sYCteTPqMCwns4WYQoCX91Xl3+w==", + "license": "ISC", "engines": { "node": "^10 || ^12 || >=14" }, @@ -9668,16 +9049,14 @@ }, "node_modules/css-line-break": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-line-break/-/css-line-break-2.1.0.tgz", - "integrity": "sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==", + "license": "MIT", "dependencies": { "utrie": "^1.0.2" } }, "node_modules/css-loader": { "version": "6.7.1", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.7.1.tgz", - "integrity": "sha512-yB5CNFa14MbPJcomwNh3wLThtkZgcNyI2bNMRt8iE5Z8Vwl7f8vQXFAzn2HDOJvtDq2NTZBUGMSUNNyrv3/+cw==", + "license": "MIT", "dependencies": { "icss-utils": "^5.1.0", "postcss": "^8.4.7", @@ -9701,8 +9080,7 @@ }, "node_modules/css-minimizer-webpack-plugin": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.0.0.tgz", - "integrity": "sha512-7ZXXRzRHvofv3Uac5Y+RkWRNo0ZMlcg8e9/OtrqUYmwDWJo+qs67GvdeFrXLsFb7czKNwjQhPkM0avlIYl+1nA==", + "license": "MIT", "dependencies": { "cssnano": "^5.1.8", "jest-worker": "^27.5.1", @@ -9738,8 +9116,7 @@ }, "node_modules/css-minimizer-webpack-plugin/node_modules/ajv": { "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -9753,8 +9130,7 @@ }, "node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -9764,13 +9140,11 @@ }, "node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", @@ -9787,16 +9161,14 @@ }, "node_modules/css-minimizer-webpack-plugin/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/css-select": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", - "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "^1.0.0", "css-what": "^6.0.1", @@ -9810,13 +9182,11 @@ }, "node_modules/css-select-base-adapter": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", - "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==" + "license": "MIT" }, "node_modules/css-tree": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "license": "MIT", "dependencies": { "mdn-data": "2.0.14", "source-map": "^0.6.1" @@ -9827,16 +9197,14 @@ }, "node_modules/css-tree/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/css-what": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", - "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "license": "BSD-2-Clause", "engines": { "node": ">= 6" }, @@ -9846,8 +9214,7 @@ }, "node_modules/cssesc": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", "bin": { "cssesc": "bin/cssesc" }, @@ -9857,8 +9224,7 @@ }, "node_modules/cssnano": { "version": "5.1.12", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.12.tgz", - "integrity": "sha512-TgvArbEZu0lk/dvg2ja+B7kYoD7BBCmn3+k58xD0qjrGHsFzXY/wKTo9M5egcUCabPol05e/PVoIu79s2JN4WQ==", + "license": "MIT", "dependencies": { "cssnano-preset-default": "^5.2.12", "lilconfig": "^2.0.3", @@ -9877,8 +9243,7 @@ }, "node_modules/cssnano-preset-advanced": { "version": "5.3.10", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", - "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", + "license": "MIT", "dependencies": { "autoprefixer": "^10.4.12", "cssnano-preset-default": "^5.2.14", @@ -9896,8 +9261,7 @@ }, "node_modules/cssnano-preset-default": { "version": "5.2.14", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", - "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", + "license": "MIT", "dependencies": { "css-declaration-sorter": "^6.3.1", "cssnano-utils": "^3.1.0", @@ -9938,24 +9302,21 @@ }, "node_modules/cssnano-util-get-arguments": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", - "integrity": "sha512-6RIcwmV3/cBMG8Aj5gucQRsJb4vv4I4rn6YjPbVWd5+Pn/fuG+YseGvXGk00XLkoZkaj31QOD7vMUpNPC4FIuw==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/cssnano-util-get-match": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", - "integrity": "sha512-JPMZ1TSMRUPVIqEalIBNoBtAYbi8okvcFns4O0YIhcdGebeYZK7dMyHJiQ6GqNBA9kE0Hym4Aqym5rPdsV/4Cw==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/cssnano-util-raw-cache": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz", - "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0" }, @@ -9965,13 +9326,11 @@ }, "node_modules/cssnano-util-raw-cache/node_modules/picocolors": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", - "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + "license": "ISC" }, "node_modules/cssnano-util-raw-cache/node_modules/postcss": { "version": "7.0.39", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", - "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "license": "MIT", "dependencies": { "picocolors": "^0.2.1", "source-map": "^0.6.1" @@ -9986,24 +9345,21 @@ }, "node_modules/cssnano-util-raw-cache/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/cssnano-util-same-parent": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz", - "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/cssnano-utils": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", - "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -10013,8 +9369,7 @@ }, "node_modules/csso": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "license": "MIT", "dependencies": { "css-tree": "^1.1.2" }, @@ -10024,13 +9379,11 @@ }, "node_modules/csstype": { "version": "3.0.10", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.10.tgz", - "integrity": "sha512-2u44ZG2OcNUO9HDp/Jl8C07x6pU/eTR3ncV91SiK3dhG9TWvRVsCoJw14Ckx5DgWkzGA3waZWO3d7pgqpUI/XA==" + "license": "MIT" }, "node_modules/currently-unhandled": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", - "integrity": "sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==", + "license": "MIT", "dependencies": { "array-find-index": "^1.0.1" }, @@ -10040,8 +9393,7 @@ }, "node_modules/cytoscape": { "version": "3.28.1", - "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.28.1.tgz", - "integrity": "sha512-xyItz4O/4zp9/239wCcH8ZcFuuZooEeF8KHRmzjDfGdXsj3OG9MFSMA0pJE0uX3uCN/ygof6hHf4L7lst+JaDg==", + "license": "MIT", "dependencies": { "heap": "^0.2.6", "lodash": "^4.17.21" @@ -10052,8 +9404,7 @@ }, "node_modules/cytoscape-cose-bilkent": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", - "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "license": "MIT", "dependencies": { "cose-base": "^1.0.0" }, @@ -10063,8 +9414,7 @@ }, "node_modules/cytoscape-fcose": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", - "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", + "license": "MIT", "dependencies": { "cose-base": "^2.2.0" }, @@ -10074,21 +9424,18 @@ }, "node_modules/cytoscape-fcose/node_modules/cose-base": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", - "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", + "license": "MIT", "dependencies": { "layout-base": "^2.0.0" } }, "node_modules/cytoscape-fcose/node_modules/layout-base": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", - "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==" + "license": "MIT" }, "node_modules/d3": { "version": "7.9.0", - "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", - "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "license": "ISC", "dependencies": { "d3-array": "3", "d3-axis": "3", @@ -10127,8 +9474,7 @@ }, "node_modules/d3-array": { "version": "3.2.4", - "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", - "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", "dependencies": { "internmap": "1 - 2" }, @@ -10138,16 +9484,14 @@ }, "node_modules/d3-axis": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", - "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-brush": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", - "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", + "license": "ISC", "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", @@ -10161,8 +9505,7 @@ }, "node_modules/d3-chord": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", - "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "license": "ISC", "dependencies": { "d3-path": "1 - 3" }, @@ -10172,16 +9515,14 @@ }, "node_modules/d3-color": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", - "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-contour": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", - "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "license": "ISC", "dependencies": { "d3-array": "^3.2.0" }, @@ -10191,8 +9532,7 @@ }, "node_modules/d3-delaunay": { "version": "6.0.4", - "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", - "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "license": "ISC", "dependencies": { "delaunator": "5" }, @@ -10202,16 +9542,14 @@ }, "node_modules/d3-dispatch": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", - "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-drag": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", - "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", "dependencies": { "d3-dispatch": "1 - 3", "d3-selection": "3" @@ -10222,8 +9560,7 @@ }, "node_modules/d3-dsv": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", - "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "license": "ISC", "dependencies": { "commander": "7", "iconv-lite": "0.6", @@ -10246,16 +9583,14 @@ }, "node_modules/d3-dsv/node_modules/commander": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", "engines": { "node": ">= 10" } }, "node_modules/d3-dsv/node_modules/iconv-lite": { "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" }, @@ -10265,16 +9600,14 @@ }, "node_modules/d3-ease": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", - "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", "engines": { "node": ">=12" } }, "node_modules/d3-fetch": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", - "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", + "license": "ISC", "dependencies": { "d3-dsv": "1 - 3" }, @@ -10284,8 +9617,7 @@ }, "node_modules/d3-force": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", - "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", "dependencies": { "d3-dispatch": "1 - 3", "d3-quadtree": "1 - 3", @@ -10297,16 +9629,14 @@ }, "node_modules/d3-format": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", - "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-geo": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", - "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "license": "ISC", "dependencies": { "d3-array": "2.5.0 - 3" }, @@ -10316,16 +9646,14 @@ }, "node_modules/d3-hierarchy": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", - "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-interpolate": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", - "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", "dependencies": { "d3-color": "1 - 3" }, @@ -10335,40 +9663,35 @@ }, "node_modules/d3-path": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", - "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-polygon": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", - "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-quadtree": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", - "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-random": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", - "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-scale": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", - "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", "dependencies": { "d3-array": "2.10.0 - 3", "d3-format": "1 - 3", @@ -10382,8 +9705,7 @@ }, "node_modules/d3-scale-chromatic": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", - "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", + "license": "ISC", "dependencies": { "d3-color": "1 - 3", "d3-interpolate": "1 - 3" @@ -10394,16 +9716,14 @@ }, "node_modules/d3-selection": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", - "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-shape": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", - "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", "dependencies": { "d3-path": "^3.1.0" }, @@ -10413,8 +9733,7 @@ }, "node_modules/d3-time": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", - "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", "dependencies": { "d3-array": "2 - 3" }, @@ -10424,8 +9743,7 @@ }, "node_modules/d3-time-format": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", - "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", "dependencies": { "d3-time": "1 - 3" }, @@ -10435,16 +9753,14 @@ }, "node_modules/d3-timer": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", - "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/d3-transition": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", - "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", "dependencies": { "d3-color": "1 - 3", "d3-dispatch": "1 - 3", @@ -10461,8 +9777,7 @@ }, "node_modules/d3-zoom": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", - "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", @@ -10476,8 +9791,7 @@ }, "node_modules/dagre-d3-es": { "version": "7.0.9", - "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.9.tgz", - "integrity": "sha512-rYR4QfVmy+sR44IBDvVtcAmOReGBvRCWDpO2QjYwqgh9yijw6eSHBqaPG/LIOEy7aBsniLvtMW6pg19qJhq60w==", + "license": "MIT", "dependencies": { "d3": "^7.8.2", "lodash-es": "^4.17.21" @@ -10485,8 +9799,7 @@ }, "node_modules/dashdash": { "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "license": "MIT", "dependencies": { "assert-plus": "^1.0.0" }, @@ -10496,13 +9809,11 @@ }, "node_modules/dayjs": { "version": "1.11.10", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.10.tgz", - "integrity": "sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ==" + "license": "MIT" }, "node_modules/debug": { "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "license": "MIT", "dependencies": { "ms": "2.1.2" }, @@ -10517,16 +9828,14 @@ }, "node_modules/decamelize": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/decode-named-character-reference": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", - "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "license": "MIT", "dependencies": { "character-entities": "^2.0.0" }, @@ -10537,8 +9846,7 @@ }, "node_modules/decode-named-character-reference/node_modules/character-entities": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", - "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -10546,16 +9854,14 @@ }, "node_modules/decode-uri-component": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.2.tgz", - "integrity": "sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==", + "license": "MIT", "engines": { "node": ">=0.10" } }, "node_modules/decompress": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/decompress/-/decompress-4.2.1.tgz", - "integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==", + "license": "MIT", "dependencies": { "decompress-tar": "^4.0.0", "decompress-tarbz2": "^4.0.0", @@ -10572,8 +9878,7 @@ }, "node_modules/decompress-response": { "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", + "license": "MIT", "dependencies": { "mimic-response": "^1.0.0" }, @@ -10583,8 +9888,7 @@ }, "node_modules/decompress-tar": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/decompress-tar/-/decompress-tar-4.1.1.tgz", - "integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==", + "license": "MIT", "dependencies": { "file-type": "^5.2.0", "is-stream": "^1.1.0", @@ -10596,24 +9900,21 @@ }, "node_modules/decompress-tar/node_modules/file-type": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", - "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/decompress-tar/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/decompress-tarbz2": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz", - "integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==", + "license": "MIT", "dependencies": { "decompress-tar": "^4.1.0", "file-type": "^6.1.0", @@ -10627,24 +9928,21 @@ }, "node_modules/decompress-tarbz2/node_modules/file-type": { "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz", - "integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/decompress-tarbz2/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/decompress-targz": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/decompress-targz/-/decompress-targz-4.1.1.tgz", - "integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==", + "license": "MIT", "dependencies": { "decompress-tar": "^4.1.1", "file-type": "^5.2.0", @@ -10656,24 +9954,21 @@ }, "node_modules/decompress-targz/node_modules/file-type": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", - "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/decompress-targz/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/decompress-unzip": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/decompress-unzip/-/decompress-unzip-4.0.1.tgz", - "integrity": "sha512-1fqeluvxgnn86MOh66u8FjbtJpAFv5wgCT9Iw8rcBqQcCo5tO8eiJw7NNTrvt9n4CRBVq7CstiS922oPgyGLrw==", + "license": "MIT", "dependencies": { "file-type": "^3.8.0", "get-stream": "^2.2.0", @@ -10686,16 +9981,14 @@ }, "node_modules/decompress-unzip/node_modules/file-type": { "version": "3.9.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", - "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/decompress-unzip/node_modules/get-stream": { "version": "2.3.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz", - "integrity": "sha512-AUGhbbemXxrZJRD5cDvKtQxLuYaIbNtDTK8YqupCI393Q2KSTreEsLUN3ZxAWFGiKTzL6nKuzfcIvieflUX9qA==", + "license": "MIT", "dependencies": { "object-assign": "^4.0.1", "pinkie-promise": "^2.0.0" @@ -10706,16 +9999,14 @@ }, "node_modules/decompress-unzip/node_modules/pify": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/decompress/node_modules/make-dir": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "license": "MIT", "dependencies": { "pify": "^3.0.0" }, @@ -10725,25 +10016,24 @@ }, "node_modules/decompress/node_modules/make-dir/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/decompress/node_modules/pify": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/dedent": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", - "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz", + "integrity": "sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==", "dev": true, + "license": "MIT", "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, @@ -10755,29 +10045,25 @@ }, "node_modules/deep-extend": { "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "license": "MIT", "engines": { "node": ">=4.0.0" } }, "node_modules/deep-is": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" + "license": "MIT" }, "node_modules/deepmerge": { "version": "4.2.2", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz", - "integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/default-gateway": { "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "license": "BSD-2-Clause", "dependencies": { "execa": "^5.0.0" }, @@ -10787,21 +10073,18 @@ }, "node_modules/defer-to-connect": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" + "license": "MIT" }, "node_modules/define-lazy-prop": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/define-properties": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.4.tgz", - "integrity": "sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==", + "license": "MIT", "dependencies": { "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" @@ -10815,8 +10098,7 @@ }, "node_modules/define-property": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "license": "MIT", "dependencies": { "is-descriptor": "^1.0.2", "isobject": "^3.0.1" @@ -10827,8 +10109,7 @@ }, "node_modules/del": { "version": "6.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", - "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "license": "MIT", "dependencies": { "globby": "^11.0.1", "graceful-fs": "^4.2.4", @@ -10848,40 +10129,35 @@ }, "node_modules/delaunator": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", - "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "license": "ISC", "dependencies": { "robust-predicates": "^3.0.2" } }, "node_modules/delayed-stream": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", "engines": { "node": ">=0.4.0" } }, "node_modules/depd": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/dequal": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", - "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/des.js": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.1.0.tgz", - "integrity": "sha512-r17GxjhUCjSRy8aiJpr8/UadFIzMzJGexI3Nmz4ADi9LYSFx4gTBp80+NaX/YsXWWLhpZ7v/v/ubEc/bCNfKwg==", + "license": "MIT", "dependencies": { "inherits": "^2.0.1", "minimalistic-assert": "^1.0.0" @@ -10889,8 +10165,7 @@ }, "node_modules/destroy": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", "engines": { "node": ">= 0.8", "npm": "1.2.8000 || >= 1.4.16" @@ -10898,8 +10173,7 @@ }, "node_modules/detab": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz", - "integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==", + "license": "MIT", "dependencies": { "repeat-string": "^1.5.4" }, @@ -10910,22 +10184,19 @@ }, "node_modules/detect-newline": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", - "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/detect-node": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + "license": "MIT" }, "node_modules/detect-port": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.3.0.tgz", - "integrity": "sha512-E+B1gzkl2gqxt1IhUzwjrxBKRqx1UzC3WLONHinn8S3T6lwV/agVCyitiFOsGJ/eYuEUBvD71MZHy3Pv1G9doQ==", + "license": "MIT", "dependencies": { "address": "^1.0.1", "debug": "^2.6.0" @@ -10940,8 +10211,7 @@ }, "node_modules/detect-port-alt": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", - "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "license": "MIT", "dependencies": { "address": "^1.0.1", "debug": "^2.6.0" @@ -10956,67 +10226,59 @@ }, "node_modules/detect-port-alt/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/detect-port-alt/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/detect-port/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/detect-port/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + "license": "MIT" }, "node_modules/diacritics-map": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/diacritics-map/-/diacritics-map-0.1.0.tgz", - "integrity": "sha512-3omnDTYrGigU0i4cJjvaKwD52B8aoqyX/NEIkukFFkogBemsIbhSa1O414fpTp5nuszJG6lvQ5vBvDVNCbSsaQ==", + "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/didi": { - "version": "10.2.1", - "resolved": "https://registry.npmjs.org/didi/-/didi-10.2.1.tgz", - "integrity": "sha512-NaPoyMxu+78E2O6xE9JQkeTpmVhMcu8xneIKtSfqBuGUBU7LmNUaYtJXJQ2JWRx6iYY69oj4nerXVRWGXAw/IQ==", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/didi/-/didi-10.2.2.tgz", + "integrity": "sha512-l8NYkYFXV1izHI65EyT8EXOjUZtKmQkHLTT89cSP7HU5J/G7AOj0dXKtLc04EXYlga99PBY18IPjOeZ+c3DI4w==", "engines": { "node": ">= 16" } }, "node_modules/diff": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz", - "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.3.1" } }, "node_modules/diff-sequences": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", - "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/diffie-hellman": { "version": "5.0.3", - "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "license": "MIT", "dependencies": { "bn.js": "^4.1.0", "miller-rabin": "^4.0.0", @@ -11025,13 +10287,11 @@ }, "node_modules/diffie-hellman/node_modules/bn.js": { "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "license": "MIT" }, "node_modules/dir-glob": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "license": "MIT", "dependencies": { "path-type": "^4.0.0" }, @@ -11041,18 +10301,15 @@ }, "node_modules/discontinuous-range": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/discontinuous-range/-/discontinuous-range-1.0.0.tgz", - "integrity": "sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ==" + "license": "MIT" }, "node_modules/dns-equal": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" + "license": "MIT" }, "node_modules/dns-packet": { "version": "5.4.0", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.4.0.tgz", - "integrity": "sha512-EgqGeaBB8hLiHLZtp/IbaDQTL8pZ0+IvwzSHA6d7VyMDM+B9hgddEMa9xjK5oYnw0ci0JQ6g2XCD7/f6cafU6g==", + "license": "MIT", "dependencies": { "@leichtgewicht/ip-codec": "^2.0.1" }, @@ -11062,8 +10319,7 @@ }, "node_modules/docusaurus": { "version": "1.14.7", - "resolved": "https://registry.npmjs.org/docusaurus/-/docusaurus-1.14.7.tgz", - "integrity": "sha512-UWqar4ZX0lEcpLc5Tg+MwZ2jhF/1n1toCQRSeoxDON/D+E9ToLr+vTRFVMP/Tk84NXSVjZFRlrjWwM2pXzvLsQ==", + "license": "MIT", "dependencies": { "@babel/core": "^7.12.3", "@babel/plugin-proposal-class-properties": "^7.12.1", @@ -11126,8 +10382,7 @@ }, "node_modules/docusaurus-plugin-openapi-docs": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/docusaurus-plugin-openapi-docs/-/docusaurus-plugin-openapi-docs-2.0.4.tgz", - "integrity": "sha512-jLgEEbMsQ+Y6ihy4y7SmXthUMRDbqAL0OKrdtUaOAxxb/wkLXB28mX74xiZzL928DZJ84IJejHgbjFb2ITcKhA==", + "license": "MIT", "dependencies": { "@apidevtools/json-schema-ref-parser": "^10.1.0", "@docusaurus/plugin-content-docs": ">=2.4.1 <=2.4.3", @@ -11156,8 +10411,7 @@ }, "node_modules/docusaurus-plugin-openapi-docs/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -11170,8 +10424,7 @@ }, "node_modules/docusaurus-plugin-openapi-docs/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -11183,10 +10436,16 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/docusaurus-plugin-openapi-docs/node_modules/color-convert": { + "node_modules/docusaurus-plugin-openapi-docs/node_modules/clsx": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/docusaurus-plugin-openapi-docs/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -11196,13 +10455,11 @@ }, "node_modules/docusaurus-plugin-openapi-docs/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/docusaurus-plugin-openapi-docs/node_modules/fs-extra": { "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "license": "MIT", "dependencies": { "at-least-node": "^1.0.0", "graceful-fs": "^4.2.0", @@ -11215,16 +10472,14 @@ }, "node_modules/docusaurus-plugin-openapi-docs/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/docusaurus-plugin-openapi-docs/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -11234,8 +10489,7 @@ }, "node_modules/docusaurus-theme-openapi-docs": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/docusaurus-theme-openapi-docs/-/docusaurus-theme-openapi-docs-2.0.4.tgz", - "integrity": "sha512-w4YodyfMuzvWg6DDDzqb+eNBt4D5ZkMB13343u4zREBdDBOeOK5ikLQuQ+735WqPbAbH4gonvInXLQTAYodCNw==", + "license": "MIT", "dependencies": { "@docusaurus/theme-common": ">=2.4.1 <=2.4.3", "@hookform/error-message": "^2.0.1", @@ -11271,10 +10525,16 @@ "react-dom": "^16.8.4 || ^17.0.0 || ^18.0.0" } }, + "node_modules/docusaurus-theme-openapi-docs/node_modules/clsx": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/docusaurus-theme-openapi-docs/node_modules/docusaurus-plugin-sass": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.5.tgz", - "integrity": "sha512-Z+D0fLFUKcFpM+bqSUmqKIU+vO+YF1xoEQh5hoFreg2eMf722+siwXDD+sqtwU8E4MvVpuvsQfaHwODNlxJAEg==", + "license": "MIT", "dependencies": { "sass-loader": "^10.1.1" }, @@ -11285,8 +10545,7 @@ }, "node_modules/docusaurus-theme-openapi-docs/node_modules/docusaurus-plugin-sass/node_modules/sass-loader": { "version": "10.5.2", - "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-10.5.2.tgz", - "integrity": "sha512-vMUoSNOUKJILHpcNCCyD23X34gve1TS7Rjd9uXHeKqhvBG39x6XbswFDtpbTElj6XdMFezoWhkh5vtKudf2cgQ==", + "license": "MIT", "dependencies": { "klona": "^2.0.4", "loader-utils": "^2.0.0", @@ -11321,16 +10580,14 @@ }, "node_modules/docusaurus/node_modules/@babel/code-frame": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", - "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", + "license": "MIT", "dependencies": { "@babel/highlight": "^7.10.4" } }, "node_modules/docusaurus/node_modules/airbnb-prop-types": { "version": "2.16.0", - "resolved": "https://registry.npmjs.org/airbnb-prop-types/-/airbnb-prop-types-2.16.0.tgz", - "integrity": "sha512-7WHOFolP/6cS96PhKNrslCLMYAI8yB1Pp6u6XmxozQOiZbsI5ycglZr5cHhBFfuRcQQjzCMith5ZPZdYiJCxUg==", + "license": "MIT", "dependencies": { "array.prototype.find": "^2.1.1", "function.prototype.name": "^1.1.2", @@ -11351,8 +10608,7 @@ }, "node_modules/docusaurus/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -11365,16 +10621,14 @@ }, "node_modules/docusaurus/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/docusaurus/node_modules/autoprefixer": { "version": "9.8.8", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.8.tgz", - "integrity": "sha512-eM9d/swFopRt5gdJ7jrpCwgvEMIayITpojhkkSMRsFHYuH5bkSQ4p/9qTEHtmNudUZh22Tehu7I6CxAW0IXTKA==", + "license": "MIT", "dependencies": { "browserslist": "^4.12.0", "caniuse-lite": "^1.0.30001109", @@ -11394,8 +10648,7 @@ }, "node_modules/docusaurus/node_modules/braces": { "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "license": "MIT", "dependencies": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -11414,8 +10667,7 @@ }, "node_modules/docusaurus/node_modules/browserslist": { "version": "4.14.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.14.2.tgz", - "integrity": "sha512-HI4lPveGKUR0x2StIz+2FXfDk9SfVMrxn6PLh1JeGUwcuoDkdKZebWiyLRJ68iIPDpMI4JLVDf7S7XzslgWOhw==", + "license": "MIT", "dependencies": { "caniuse-lite": "^1.0.30001125", "electron-to-chromium": "^1.3.564", @@ -11435,8 +10687,7 @@ }, "node_modules/docusaurus/node_modules/chalk": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -11447,8 +10698,7 @@ }, "node_modules/docusaurus/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -11458,21 +10708,18 @@ }, "node_modules/docusaurus/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/commander": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/docusaurus/node_modules/cosmiconfig": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz", - "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==", + "license": "MIT", "dependencies": { "import-fresh": "^2.0.0", "is-directory": "^0.3.1", @@ -11485,8 +10732,7 @@ }, "node_modules/docusaurus/node_modules/css-declaration-sorter": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz", - "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==", + "license": "MIT", "dependencies": { "postcss": "^7.0.1", "timsort": "^0.3.0" @@ -11497,8 +10743,7 @@ }, "node_modules/docusaurus/node_modules/css-select": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "^1.0.0", "css-what": "^3.2.1", @@ -11508,8 +10753,7 @@ }, "node_modules/docusaurus/node_modules/css-tree": { "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", + "license": "MIT", "dependencies": { "mdn-data": "2.0.4", "source-map": "^0.6.1" @@ -11520,8 +10764,7 @@ }, "node_modules/docusaurus/node_modules/css-what": { "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", + "license": "BSD-2-Clause", "engines": { "node": ">= 6" }, @@ -11531,8 +10774,7 @@ }, "node_modules/docusaurus/node_modules/cssnano": { "version": "4.1.11", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz", - "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==", + "license": "MIT", "dependencies": { "cosmiconfig": "^5.0.0", "cssnano-preset-default": "^4.0.8", @@ -11545,8 +10787,7 @@ }, "node_modules/docusaurus/node_modules/cssnano-preset-default": { "version": "4.0.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz", - "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==", + "license": "MIT", "dependencies": { "css-declaration-sorter": "^4.0.1", "cssnano-util-raw-cache": "^4.0.1", @@ -11585,8 +10826,7 @@ }, "node_modules/docusaurus/node_modules/dom-serializer": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "license": "MIT", "dependencies": { "domelementtype": "^2.0.1", "entities": "^2.0.0" @@ -11594,8 +10834,7 @@ }, "node_modules/docusaurus/node_modules/domutils": { "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", + "license": "BSD-2-Clause", "dependencies": { "dom-serializer": "0", "domelementtype": "1" @@ -11603,13 +10842,11 @@ }, "node_modules/docusaurus/node_modules/domutils/node_modules/domelementtype": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" + "license": "BSD-2-Clause" }, "node_modules/docusaurus/node_modules/enzyme-adapter-react-16": { "version": "1.15.7", - "resolved": "https://registry.npmjs.org/enzyme-adapter-react-16/-/enzyme-adapter-react-16-1.15.7.tgz", - "integrity": "sha512-LtjKgvlTc/H7adyQcj+aq0P0H07LDL480WQl1gU512IUyaDo/sbOaNDdZsJXYW2XaoPqrLLE9KbZS+X2z6BASw==", + "license": "MIT", "dependencies": { "enzyme-adapter-utils": "^1.14.1", "enzyme-shallow-equal": "^1.0.5", @@ -11632,8 +10869,7 @@ }, "node_modules/docusaurus/node_modules/enzyme-adapter-utils": { "version": "1.14.1", - "resolved": "https://registry.npmjs.org/enzyme-adapter-utils/-/enzyme-adapter-utils-1.14.1.tgz", - "integrity": "sha512-JZgMPF1QOI7IzBj24EZoDpaeG/p8Os7WeBZWTJydpsH7JRStc7jYbHE4CmNQaLqazaGFyLM8ALWA3IIZvxW3PQ==", + "license": "MIT", "dependencies": { "airbnb-prop-types": "^2.16.0", "function.prototype.name": "^1.1.5", @@ -11652,24 +10888,21 @@ }, "node_modules/docusaurus/node_modules/escape-string-regexp": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/docusaurus/node_modules/filesize": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-6.1.0.tgz", - "integrity": "sha512-LpCHtPQ3sFx67z+uh2HnSyWSLLu5Jxo21795uRDuar/EOuYWXib5EmPaGIBuSnRqH2IODiKA2k5re/K9OnN/Yg==", + "license": "BSD-3-Clause", "engines": { "node": ">= 0.4.0" } }, "node_modules/docusaurus/node_modules/fill-range": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", + "license": "MIT", "dependencies": { "extend-shallow": "^2.0.1", "is-number": "^3.0.0", @@ -11682,8 +10915,7 @@ }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin": { "version": "4.1.6", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz", - "integrity": "sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw==", + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.5.5", "chalk": "^2.4.1", @@ -11700,8 +10932,7 @@ }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/ansi-styles": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "license": "MIT", "dependencies": { "color-convert": "^1.9.0" }, @@ -11711,8 +10942,7 @@ }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/chalk": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -11724,37 +10954,32 @@ }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/color-convert": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "license": "MIT", "dependencies": { "color-name": "1.1.3" } }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/color-name": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/escape-string-regexp": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/has-flag": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/docusaurus/node_modules/fork-ts-checker-webpack-plugin/node_modules/supports-color": { "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "license": "MIT", "dependencies": { "has-flag": "^3.0.0" }, @@ -11764,8 +10989,7 @@ }, "node_modules/docusaurus/node_modules/fs-extra": { "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "license": "MIT", "dependencies": { "at-least-node": "^1.0.0", "graceful-fs": "^4.2.0", @@ -11778,8 +11002,7 @@ }, "node_modules/docusaurus/node_modules/globby": { "version": "11.0.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz", - "integrity": "sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==", + "license": "MIT", "dependencies": { "array-union": "^2.1.0", "dir-glob": "^3.0.1", @@ -11797,8 +11020,7 @@ }, "node_modules/docusaurus/node_modules/gzip-size": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz", - "integrity": "sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA==", + "license": "MIT", "dependencies": { "duplexer": "^0.1.1", "pify": "^4.0.1" @@ -11809,16 +11031,14 @@ }, "node_modules/docusaurus/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/docusaurus/node_modules/immer": { "version": "8.0.1", - "resolved": "https://registry.npmjs.org/immer/-/immer-8.0.1.tgz", - "integrity": "sha512-aqXhGP7//Gui2+UrEtvxZxSquQVXTpZ7KDxfCcKAF3Vysvw0CViVaW9RZ1j1xlIYqaaaipBoqdqeibkc18PNvA==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/immer" @@ -11826,8 +11046,7 @@ }, "node_modules/docusaurus/node_modules/import-fresh": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz", - "integrity": "sha512-eZ5H8rcgYazHbKC3PG4ClHNykCSxtAhxSSEM+2mb+7evD2CKF5V7c0dNum7AdpDh0ZdICwZY9sRSn8f+KH96sg==", + "license": "MIT", "dependencies": { "caller-path": "^2.0.0", "resolve-from": "^3.0.0" @@ -11838,13 +11057,11 @@ }, "node_modules/docusaurus/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -11854,8 +11071,7 @@ }, "node_modules/docusaurus/node_modules/is-number": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -11865,8 +11081,7 @@ }, "node_modules/docusaurus/node_modules/is-number/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -11876,8 +11091,7 @@ }, "node_modules/docusaurus/node_modules/js-yaml": { "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -11888,8 +11102,7 @@ }, "node_modules/docusaurus/node_modules/loader-utils": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz", - "integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==", + "license": "MIT", "dependencies": { "big.js": "^5.2.2", "emojis-list": "^3.0.0", @@ -11901,13 +11114,11 @@ }, "node_modules/docusaurus/node_modules/mdn-data": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" + "license": "CC0-1.0" }, "node_modules/docusaurus/node_modules/micromatch": { "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "license": "MIT", "dependencies": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -11929,8 +11140,7 @@ }, "node_modules/docusaurus/node_modules/micromatch/node_modules/extend-shallow": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", "dependencies": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -11941,29 +11151,25 @@ }, "node_modules/docusaurus/node_modules/node-releases": { "version": "1.1.77", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.77.tgz", - "integrity": "sha512-rB1DUFUNAN4Gn9keO2K1efO35IDK7yKHCdCaIMvFO7yUYmmZYeDjnGKle26G4rwj+LKRQpjyUUvMkPglwGCYNQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/normalize-url": { "version": "3.3.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", - "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/docusaurus/node_modules/nth-check": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "~1.0.0" } }, "node_modules/docusaurus/node_modules/open": { "version": "7.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", - "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", + "license": "MIT", "dependencies": { "is-docker": "^2.0.0", "is-wsl": "^2.1.1" @@ -11977,8 +11183,7 @@ }, "node_modules/docusaurus/node_modules/parse-json": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", + "license": "MIT", "dependencies": { "error-ex": "^1.3.1", "json-parse-better-errors": "^1.0.1" @@ -11989,13 +11194,11 @@ }, "node_modules/docusaurus/node_modules/picocolors": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", - "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + "license": "ISC" }, "node_modules/docusaurus/node_modules/postcss": { "version": "7.0.39", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", - "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "license": "MIT", "dependencies": { "picocolors": "^0.2.1", "source-map": "^0.6.1" @@ -12010,8 +11213,7 @@ }, "node_modules/docusaurus/node_modules/postcss-calc": { "version": "7.0.5", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", - "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", + "license": "MIT", "dependencies": { "postcss": "^7.0.27", "postcss-selector-parser": "^6.0.2", @@ -12020,8 +11222,7 @@ }, "node_modules/docusaurus/node_modules/postcss-colormin": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz", - "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==", + "license": "MIT", "dependencies": { "browserslist": "^4.0.0", "color": "^3.0.0", @@ -12035,13 +11236,11 @@ }, "node_modules/docusaurus/node_modules/postcss-colormin/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-convert-values": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz", - "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0", "postcss-value-parser": "^3.0.0" @@ -12052,13 +11251,11 @@ }, "node_modules/docusaurus/node_modules/postcss-convert-values/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-discard-comments": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz", - "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0" }, @@ -12068,8 +11265,7 @@ }, "node_modules/docusaurus/node_modules/postcss-discard-duplicates": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz", - "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0" }, @@ -12079,8 +11275,7 @@ }, "node_modules/docusaurus/node_modules/postcss-discard-empty": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz", - "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0" }, @@ -12090,8 +11285,7 @@ }, "node_modules/docusaurus/node_modules/postcss-discard-overridden": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz", - "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0" }, @@ -12101,8 +11295,7 @@ }, "node_modules/docusaurus/node_modules/postcss-merge-longhand": { "version": "4.0.11", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz", - "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==", + "license": "MIT", "dependencies": { "css-color-names": "0.0.4", "postcss": "^7.0.0", @@ -12115,13 +11308,11 @@ }, "node_modules/docusaurus/node_modules/postcss-merge-longhand/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-merge-rules": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz", - "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==", + "license": "MIT", "dependencies": { "browserslist": "^4.0.0", "caniuse-api": "^3.0.0", @@ -12136,8 +11327,7 @@ }, "node_modules/docusaurus/node_modules/postcss-merge-rules/node_modules/postcss-selector-parser": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", + "license": "MIT", "dependencies": { "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", @@ -12149,8 +11339,7 @@ }, "node_modules/docusaurus/node_modules/postcss-minify-font-values": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz", - "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0", "postcss-value-parser": "^3.0.0" @@ -12161,13 +11350,11 @@ }, "node_modules/docusaurus/node_modules/postcss-minify-font-values/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-minify-gradients": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz", - "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==", + "license": "MIT", "dependencies": { "cssnano-util-get-arguments": "^4.0.0", "is-color-stop": "^1.0.0", @@ -12180,13 +11367,11 @@ }, "node_modules/docusaurus/node_modules/postcss-minify-gradients/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-minify-params": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz", - "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==", + "license": "MIT", "dependencies": { "alphanum-sort": "^1.0.0", "browserslist": "^4.0.0", @@ -12201,13 +11386,11 @@ }, "node_modules/docusaurus/node_modules/postcss-minify-params/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-minify-selectors": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz", - "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==", + "license": "MIT", "dependencies": { "alphanum-sort": "^1.0.0", "has": "^1.0.0", @@ -12220,8 +11403,7 @@ }, "node_modules/docusaurus/node_modules/postcss-minify-selectors/node_modules/postcss-selector-parser": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", + "license": "MIT", "dependencies": { "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", @@ -12233,8 +11415,7 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-charset": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz", - "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0" }, @@ -12244,8 +11425,7 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-display-values": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz", - "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==", + "license": "MIT", "dependencies": { "cssnano-util-get-match": "^4.0.0", "postcss": "^7.0.0", @@ -12257,13 +11437,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-display-values/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-positions": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz", - "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==", + "license": "MIT", "dependencies": { "cssnano-util-get-arguments": "^4.0.0", "has": "^1.0.0", @@ -12276,13 +11454,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-positions/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-repeat-style": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz", - "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==", + "license": "MIT", "dependencies": { "cssnano-util-get-arguments": "^4.0.0", "cssnano-util-get-match": "^4.0.0", @@ -12295,13 +11471,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-repeat-style/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-string": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz", - "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==", + "license": "MIT", "dependencies": { "has": "^1.0.0", "postcss": "^7.0.0", @@ -12313,13 +11487,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-string/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-timing-functions": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz", - "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==", + "license": "MIT", "dependencies": { "cssnano-util-get-match": "^4.0.0", "postcss": "^7.0.0", @@ -12331,13 +11503,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-timing-functions/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-unicode": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz", - "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==", + "license": "MIT", "dependencies": { "browserslist": "^4.0.0", "postcss": "^7.0.0", @@ -12349,13 +11519,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-unicode/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-url": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz", - "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==", + "license": "MIT", "dependencies": { "is-absolute-url": "^2.0.0", "normalize-url": "^3.0.0", @@ -12368,13 +11536,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-url/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-normalize-whitespace": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz", - "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0", "postcss-value-parser": "^3.0.0" @@ -12385,13 +11551,11 @@ }, "node_modules/docusaurus/node_modules/postcss-normalize-whitespace/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-ordered-values": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz", - "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==", + "license": "MIT", "dependencies": { "cssnano-util-get-arguments": "^4.0.0", "postcss": "^7.0.0", @@ -12403,13 +11567,11 @@ }, "node_modules/docusaurus/node_modules/postcss-ordered-values/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-reduce-initial": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz", - "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==", + "license": "MIT", "dependencies": { "browserslist": "^4.0.0", "caniuse-api": "^3.0.0", @@ -12422,8 +11584,7 @@ }, "node_modules/docusaurus/node_modules/postcss-reduce-transforms": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz", - "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==", + "license": "MIT", "dependencies": { "cssnano-util-get-match": "^4.0.0", "has": "^1.0.0", @@ -12436,13 +11597,11 @@ }, "node_modules/docusaurus/node_modules/postcss-reduce-transforms/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-svgo": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz", - "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==", + "license": "MIT", "dependencies": { "postcss": "^7.0.0", "postcss-value-parser": "^3.0.0", @@ -12454,13 +11613,11 @@ }, "node_modules/docusaurus/node_modules/postcss-svgo/node_modules/postcss-value-parser": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/postcss-unique-selectors": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz", - "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==", + "license": "MIT", "dependencies": { "alphanum-sort": "^1.0.0", "postcss": "^7.0.0", @@ -12472,8 +11629,7 @@ }, "node_modules/docusaurus/node_modules/prompts": { "version": "2.4.0", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.0.tgz", - "integrity": "sha512-awZAKrk3vN6CroQukBL+R9051a4R3zCZBlJm/HBfrSZ8iTpYix3VX1vU4mveiLpiwmOJT4wokTF9m6HUk4KqWQ==", + "license": "MIT", "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" @@ -12484,8 +11640,7 @@ }, "node_modules/docusaurus/node_modules/react": { "version": "16.14.0", - "resolved": "https://registry.npmjs.org/react/-/react-16.14.0.tgz", - "integrity": "sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", @@ -12497,8 +11652,7 @@ }, "node_modules/docusaurus/node_modules/react-dev-utils": { "version": "11.0.4", - "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-11.0.4.tgz", - "integrity": "sha512-dx0LvIGHcOPtKbeiSUM4jqpBl3TcY7CDjZdfOIcKeznE7BWr9dg0iPG90G5yfVQ+p/rGNMXdbfStvzQZEVEi4A==", + "license": "MIT", "dependencies": { "@babel/code-frame": "7.10.4", "address": "1.1.2", @@ -12531,8 +11685,7 @@ }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/ansi-styles": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "license": "MIT", "dependencies": { "color-convert": "^1.9.0" }, @@ -12542,8 +11695,7 @@ }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/chalk": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -12555,37 +11707,32 @@ }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/chalk/node_modules/escape-string-regexp": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/color-convert": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "license": "MIT", "dependencies": { "color-name": "1.1.3" } }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/color-name": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/has-flag": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/docusaurus/node_modules/react-dev-utils/node_modules/supports-color": { "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "license": "MIT", "dependencies": { "has-flag": "^3.0.0" }, @@ -12595,8 +11742,7 @@ }, "node_modules/docusaurus/node_modules/react-dom": { "version": "16.14.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz", - "integrity": "sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", @@ -12609,8 +11755,7 @@ }, "node_modules/docusaurus/node_modules/react-test-renderer": { "version": "16.14.0", - "resolved": "https://registry.npmjs.org/react-test-renderer/-/react-test-renderer-16.14.0.tgz", - "integrity": "sha512-L8yPjqPE5CZO6rKsKXRO/rVPiaCOy0tQQJbC+UjPNlobl5mad59lvPjwFsQHTvL03caVDIVr9x9/OSgDe6I5Eg==", + "license": "MIT", "dependencies": { "object-assign": "^4.1.1", "prop-types": "^15.6.2", @@ -12623,16 +11768,14 @@ }, "node_modules/docusaurus/node_modules/resolve-from": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", - "integrity": "sha512-GnlH6vxLymXJNMBo7XP1fJIzBFbdYt49CuTwmB/6N53t+kMPRMFKz783LlQ4tv28XoQfMWinAJX6WCGf2IlaIw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/docusaurus/node_modules/scheduler": { "version": "0.19.1", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.19.1.tgz", - "integrity": "sha512-n/zwRWRYSUj0/3g/otKDRPMh6qv2SYMWNq85IEa8iZyAv8od9zDYpGSnpBEjNgcMNq6Scbu5KfIPxNF72R/2EA==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1" @@ -12640,21 +11783,18 @@ }, "node_modules/docusaurus/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/docusaurus/node_modules/shell-quote": { "version": "1.7.2", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz", - "integrity": "sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/sitemap": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz", - "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==", + "license": "MIT", "dependencies": { "lodash.chunk": "^4.2.0", "lodash.padstart": "^4.6.1", @@ -12668,16 +11808,14 @@ }, "node_modules/docusaurus/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/docusaurus/node_modules/strip-ansi": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "license": "MIT", "dependencies": { "ansi-regex": "^5.0.0" }, @@ -12687,8 +11825,7 @@ }, "node_modules/docusaurus/node_modules/stylehacks": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz", - "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==", + "license": "MIT", "dependencies": { "browserslist": "^4.0.0", "postcss": "^7.0.0", @@ -12700,8 +11837,7 @@ }, "node_modules/docusaurus/node_modules/stylehacks/node_modules/postcss-selector-parser": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", + "license": "MIT", "dependencies": { "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", @@ -12713,8 +11849,7 @@ }, "node_modules/docusaurus/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -12724,9 +11859,7 @@ }, "node_modules/docusaurus/node_modules/svgo": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "deprecated": "This SVGO version is no longer supported. Upgrade to v2.x.x.", + "license": "MIT", "dependencies": { "chalk": "^2.4.1", "coa": "^2.0.2", @@ -12751,8 +11884,7 @@ }, "node_modules/docusaurus/node_modules/svgo/node_modules/ansi-styles": { "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "license": "MIT", "dependencies": { "color-convert": "^1.9.0" }, @@ -12762,8 +11894,7 @@ }, "node_modules/docusaurus/node_modules/svgo/node_modules/chalk": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -12775,37 +11906,32 @@ }, "node_modules/docusaurus/node_modules/svgo/node_modules/color-convert": { "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "license": "MIT", "dependencies": { "color-name": "1.1.3" } }, "node_modules/docusaurus/node_modules/svgo/node_modules/color-name": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + "license": "MIT" }, "node_modules/docusaurus/node_modules/svgo/node_modules/escape-string-regexp": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/docusaurus/node_modules/svgo/node_modules/has-flag": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/docusaurus/node_modules/svgo/node_modules/supports-color": { "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "license": "MIT", "dependencies": { "has-flag": "^3.0.0" }, @@ -12815,16 +11941,14 @@ }, "node_modules/docusaurus/node_modules/tapable": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/docusaurus/node_modules/to-regex-range": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", + "license": "MIT", "dependencies": { "is-number": "^3.0.0", "repeat-string": "^1.6.1" @@ -12835,21 +11959,18 @@ }, "node_modules/docusaurus/node_modules/tr46": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", - "integrity": "sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==", + "license": "MIT", "dependencies": { "punycode": "^2.1.0" } }, "node_modules/docusaurus/node_modules/webidl-conversions": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", - "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" + "license": "BSD-2-Clause" }, "node_modules/docusaurus/node_modules/whatwg-url": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", - "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", + "license": "MIT", "dependencies": { "lodash.sortby": "^4.7.0", "tr46": "^1.0.1", @@ -12858,16 +11979,14 @@ }, "node_modules/dom-converter": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "license": "MIT", "dependencies": { "utila": "~0.4" } }, "node_modules/dom-serializer": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", - "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", + "license": "MIT", "dependencies": { "domelementtype": "^2.0.1", "domhandler": "^4.2.0", @@ -12879,8 +11998,7 @@ }, "node_modules/domain-browser": { "version": "4.23.0", - "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-4.23.0.tgz", - "integrity": "sha512-ArzcM/II1wCCujdCNyQjXrAFwS4mrLh4C7DZWlaI8mdh7h3BfKdNd3bKXITfl2PT9FtfQqaGvhi1vPRQPimjGA==", + "license": "Artistic-2.0", "engines": { "node": ">=10" }, @@ -12890,19 +12008,17 @@ }, "node_modules/domelementtype": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/fb55" } - ] + ], + "license": "BSD-2-Clause" }, "node_modules/domhandler": { "version": "4.3.1", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", - "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "license": "BSD-2-Clause", "dependencies": { "domelementtype": "^2.2.0" }, @@ -12915,21 +12031,22 @@ }, "node_modules/domify": { "version": "1.4.2", - "resolved": "https://registry.npmjs.org/domify/-/domify-1.4.2.tgz", - "integrity": "sha512-m4yreHcUWHBncGVV7U+yQzc12vIlq0jMrtHZ5mW6dQMiL/7skSYNVX9wqKwOtyO9SGCgevrAFEgOCAHmamHTUA==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/dompurify": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.9.tgz", - "integrity": "sha512-uyb4NDIvQ3hRn6NiC+SIFaP4mJ/MdXlvtunaqK9Bn6dD3RuB/1S/gasEjDHD8eiaqdSael2vBv+hOs7Y+jhYOQ==" + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.2.tgz", + "integrity": "sha512-YMM+erhdZ2nkZ4fTNRTSI94mb7VG7uVF5vj5Zde7tImgnhZE3R6YW/IACGIHb2ux+QkEXMhe591N+5jWOmL4Zw==", + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } }, "node_modules/domutils": { "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "license": "BSD-2-Clause", "dependencies": { "dom-serializer": "^1.0.1", "domelementtype": "^2.2.0", @@ -12941,8 +12058,7 @@ }, "node_modules/dot-case": { "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "license": "MIT", "dependencies": { "no-case": "^3.0.4", "tslib": "^2.0.3" @@ -12950,8 +12066,7 @@ }, "node_modules/dot-prop": { "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", + "license": "MIT", "dependencies": { "is-obj": "^2.0.0" }, @@ -12961,16 +12076,14 @@ }, "node_modules/dot-prop/node_modules/is-obj": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/download": { "version": "6.2.5", - "resolved": "https://registry.npmjs.org/download/-/download-6.2.5.tgz", - "integrity": "sha512-DpO9K1sXAST8Cpzb7kmEhogJxymyVUd5qz/vCOSyvwtp2Klj2XcDt5YUuasgxka44SxF0q5RriKIwJmQHG2AuA==", + "license": "MIT", "dependencies": { "caw": "^2.0.0", "content-disposition": "^0.5.2", @@ -12990,24 +12103,21 @@ }, "node_modules/download/node_modules/file-type": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", - "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/download/node_modules/get-stream": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/download/node_modules/got": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/got/-/got-7.1.0.tgz", - "integrity": "sha512-Y5WMo7xKKq1muPsxD+KmrR8DH5auG7fBdDVueZwETwV6VytKyU9OX/ddpq2/1hp1vIPvVb4T81dKQz3BivkNLw==", + "license": "MIT", "dependencies": { "decompress-response": "^3.2.0", "duplexer3": "^0.1.4", @@ -13030,24 +12140,21 @@ }, "node_modules/download/node_modules/is-plain-obj": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/download/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/download/node_modules/make-dir": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "license": "MIT", "dependencies": { "pify": "^3.0.0" }, @@ -13057,32 +12164,28 @@ }, "node_modules/download/node_modules/p-cancelable": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.3.0.tgz", - "integrity": "sha512-RVbZPLso8+jFeq1MfNvgXtCRED2raz/dKpacfTNxsx6pLEpEomM7gah6VeHSYV3+vo0OAi4MkArtQcWWXuQoyw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/download/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/download/node_modules/prepend-http": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", - "integrity": "sha512-PhmXi5XmoyKw1Un4E+opM2KcsJInDvKyuOumcjjw3waw86ZNjHwVUOOWLc4bCzLdcKNaWBH9e99sbWzDQsVaYg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/download/node_modules/url-parse-lax": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz", - "integrity": "sha512-BVA4lR5PIviy2PMseNd2jbFQ+jwSwQGdJejf5ctd1rEXt0Ypd7yanUK9+lYechVlN5VaTJGsu2U/3MDDu6KgBA==", + "license": "MIT", "dependencies": { "prepend-http": "^1.0.1" }, @@ -13092,31 +12195,26 @@ }, "node_modules/downloadjs": { "version": "1.4.7", - "resolved": "https://registry.npmjs.org/downloadjs/-/downloadjs-1.4.7.tgz", - "integrity": "sha512-LN1gO7+u9xjU5oEScGFKvXhYf7Y/empUIIEAGBs1LzUq/rg5duiDrkuH5A2lQGd5jfMOb9X9usDa2oVXwJ0U/Q==" + "license": "MIT" }, "node_modules/duplexer": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + "license": "MIT" }, "node_modules/duplexer2": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", - "integrity": "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==", + "license": "BSD-3-Clause", "dependencies": { "readable-stream": "^2.0.2" } }, "node_modules/duplexer2/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/duplexer2/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -13129,26 +12227,22 @@ }, "node_modules/duplexer2/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/duplexer3": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" + "license": "BSD-3-Clause" }, "node_modules/eastasianwidth": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + "license": "MIT" }, "node_modules/ecc-jsbn": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "license": "MIT", "dependencies": { "jsbn": "~0.1.0", "safer-buffer": "^2.1.0" @@ -13156,23 +12250,20 @@ }, "node_modules/ee-first": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.4.506", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.506.tgz", - "integrity": "sha512-xxGct4GPAKSRlrLBtJxJFYy74W11zX6PO9GyHgl/U+2s3Dp0ZEwAklDfNHXOWcvH7zWMpsmgbR0ggEuaYAVvHA==" + "version": "1.5.68", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.68.tgz", + "integrity": "sha512-FgMdJlma0OzUYlbrtZ4AeXjKxKPk6KT8WOP8BjcqxWtlg8qyJQjRzPJzUtUn5GBg1oQ26hFs7HOOHJMYiJRnvQ==" }, "node_modules/elkjs": { "version": "0.8.2", - "resolved": "https://registry.npmjs.org/elkjs/-/elkjs-0.8.2.tgz", - "integrity": "sha512-L6uRgvZTH+4OF5NE/MBbzQx/WYpru1xCBE9respNj6qznEewGUIfhzmm7horWWxbNO2M0WckQypGctR8lH79xQ==" + "license": "EPL-2.0" }, "node_modules/elliptic": { "version": "6.5.4", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", - "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==", + "license": "MIT", "dependencies": { "bn.js": "^4.11.9", "brorand": "^1.1.0", @@ -13185,14 +12276,12 @@ }, "node_modules/elliptic/node_modules/bn.js": { "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "license": "MIT" }, "node_modules/emittery": { "version": "0.13.1", - "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", - "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -13202,21 +12291,18 @@ }, "node_modules/emoji-regex": { "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + "license": "MIT" }, "node_modules/emojis-list": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "license": "MIT", "engines": { "node": ">= 4" } }, "node_modules/emoticon": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz", - "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -13224,24 +12310,21 @@ }, "node_modules/encodeurl": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/end-of-stream": { "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "license": "MIT", "dependencies": { "once": "^1.4.0" } }, "node_modules/enhanced-resolve": { "version": "5.10.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.10.0.tgz", - "integrity": "sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==", + "license": "MIT", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -13252,16 +12335,14 @@ }, "node_modules/entities": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "license": "BSD-2-Clause", "funding": { "url": "https://github.com/fb55/entities?sponsor=1" } }, "node_modules/enzyme": { "version": "3.11.0", - "resolved": "https://registry.npmjs.org/enzyme/-/enzyme-3.11.0.tgz", - "integrity": "sha512-Dw8/Gs4vRjxY6/6i9wU0V+utmQO9kvh9XLnz3LIudviOnVYDEe2ec+0k+NQoMamn1VrjKgCUOWj5jG/5M5M0Qw==", + "license": "MIT", "dependencies": { "array.prototype.flat": "^1.2.3", "cheerio": "^1.0.0-rc.3", @@ -13292,8 +12373,7 @@ }, "node_modules/enzyme-shallow-equal": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/enzyme-shallow-equal/-/enzyme-shallow-equal-1.0.5.tgz", - "integrity": "sha512-i6cwm7hN630JXenxxJFBKzgLC3hMTafFQXflvzHgPmDhOBhxUWDe8AeRv1qp2/uWJ2Y8z5yLWMzmAfkTOiOCZg==", + "license": "MIT", "dependencies": { "has": "^1.0.3", "object-is": "^1.1.5" @@ -13304,29 +12384,24 @@ }, "node_modules/error": { "version": "7.2.1", - "resolved": "https://registry.npmjs.org/error/-/error-7.2.1.tgz", - "integrity": "sha512-fo9HBvWnx3NGUKMvMwB/CBCMMrfEJgbDTVDEkPygA3Bdd3lM1OyCd+rbQ8BwnpF6GdVeOLDNmyL4N5Bg80ZvdA==", "dependencies": { "string-template": "~0.2.1" } }, "node_modules/error-ex": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "license": "MIT", "dependencies": { "is-arrayish": "^0.2.1" } }, "node_modules/error-ex/node_modules/is-arrayish": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" + "license": "MIT" }, "node_modules/es-abstract": { "version": "1.21.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.2.tgz", - "integrity": "sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==", + "license": "MIT", "dependencies": { "array-buffer-byte-length": "^1.0.0", "available-typed-arrays": "^1.0.5", @@ -13372,18 +12447,15 @@ }, "node_modules/es-array-method-boxes-properly": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz", - "integrity": "sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==" + "license": "MIT" }, "node_modules/es-module-lexer": { "version": "0.9.3", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", - "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==" + "license": "MIT" }, "node_modules/es-set-tostringtag": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", - "integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==", + "license": "MIT", "dependencies": { "get-intrinsic": "^1.1.3", "has": "^1.0.3", @@ -13395,16 +12467,14 @@ }, "node_modules/es-shim-unscopables": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", + "license": "MIT", "dependencies": { "has": "^1.0.3" } }, "node_modules/es-to-primitive": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "license": "MIT", "dependencies": { "is-callable": "^1.1.4", "is-date-object": "^1.0.1", @@ -13419,42 +12489,37 @@ }, "node_modules/es6-promise": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.3.1.tgz", - "integrity": "sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==" + "license": "MIT" }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "engines": { "node": ">=6" } }, "node_modules/escape-goat": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/escape-html": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" + "license": "MIT" }, "node_modules/escape-string-regexp": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/eslint-scope": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^4.1.1" @@ -13465,8 +12530,7 @@ }, "node_modules/esprima": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", "bin": { "esparse": "bin/esparse.js", "esvalidate": "bin/esvalidate.js" @@ -13477,8 +12541,7 @@ }, "node_modules/esrecurse": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -13488,32 +12551,28 @@ }, "node_modules/esrecurse/node_modules/estraverse": { "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", "engines": { "node": ">=4.0" } }, "node_modules/estraverse": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "license": "BSD-2-Clause", "engines": { "node": ">=4.0" } }, "node_modules/esutils": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "license": "BSD-2-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/eta": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eta/-/eta-2.0.0.tgz", - "integrity": "sha512-NqE7S2VmVwgMS8yBxsH4VgNQjNjLq1gfGU0u9I6Cjh468nPRMoDfGdK9n1p/3Dvsw3ebklDkZsFAnKJ9sefjBA==", + "license": "MIT", "engines": { "node": ">=6.0.0" }, @@ -13523,16 +12582,13 @@ }, "node_modules/etag": { "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/eval": { "version": "0.1.8", - "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", - "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", "dependencies": { "@types/node": "*", "require-like": ">= 0.1.1" @@ -13543,29 +12599,25 @@ }, "node_modules/event-target-shim": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/eventemitter3": { "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + "license": "MIT" }, "node_modules/events": { "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", "engines": { "node": ">=0.8.x" } }, "node_modules/evp_bytestokey": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "license": "MIT", "dependencies": { "md5.js": "^1.3.4", "safe-buffer": "^5.1.1" @@ -13573,8 +12625,7 @@ }, "node_modules/exec-buffer": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/exec-buffer/-/exec-buffer-3.2.0.tgz", - "integrity": "sha512-wsiD+2Tp6BWHoVv3B+5Dcx6E7u5zky+hUwOHjuH2hKSLR3dvRmX8fk8UD8uqQixHs4Wk6eDmiegVrMPjKj7wpA==", + "license": "MIT", "dependencies": { "execa": "^0.7.0", "p-finally": "^1.0.0", @@ -13588,8 +12639,7 @@ }, "node_modules/exec-buffer/node_modules/cross-spawn": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", + "license": "MIT", "dependencies": { "lru-cache": "^4.0.1", "shebang-command": "^1.2.0", @@ -13598,8 +12648,7 @@ }, "node_modules/exec-buffer/node_modules/execa": { "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", + "license": "MIT", "dependencies": { "cross-spawn": "^5.0.1", "get-stream": "^3.0.0", @@ -13615,24 +12664,21 @@ }, "node_modules/exec-buffer/node_modules/get-stream": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/exec-buffer/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/exec-buffer/node_modules/lru-cache": { "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "license": "ISC", "dependencies": { "pseudomap": "^1.0.2", "yallist": "^2.1.2" @@ -13640,8 +12686,7 @@ }, "node_modules/exec-buffer/node_modules/npm-run-path": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", + "license": "MIT", "dependencies": { "path-key": "^2.0.0" }, @@ -13651,24 +12696,21 @@ }, "node_modules/exec-buffer/node_modules/path-key": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/exec-buffer/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/exec-buffer/node_modules/rimraf": { "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -13678,8 +12720,7 @@ }, "node_modules/exec-buffer/node_modules/shebang-command": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "license": "MIT", "dependencies": { "shebang-regex": "^1.0.0" }, @@ -13689,16 +12730,14 @@ }, "node_modules/exec-buffer/node_modules/shebang-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/exec-buffer/node_modules/which": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -13708,13 +12747,11 @@ }, "node_modules/exec-buffer/node_modules/yallist": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==" + "license": "ISC" }, "node_modules/execa": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "license": "MIT", "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", @@ -13735,8 +12772,7 @@ }, "node_modules/execa/node_modules/get-stream": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -13746,8 +12782,7 @@ }, "node_modules/executable": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", - "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", + "license": "MIT", "dependencies": { "pify": "^2.2.0" }, @@ -13757,21 +12792,17 @@ }, "node_modules/executable/node_modules/pify": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/exenv": { "version": "1.2.2", - "resolved": "https://registry.npmjs.org/exenv/-/exenv-1.2.2.tgz", - "integrity": "sha512-Z+ktTxTwv9ILfgKCk32OX3n/doe+OcLTRtqK9pcL+JsP3J1/VW8Uvl4ZjLlKqeW4rzK4oesDOGMEMRIZqtP4Iw==" + "license": "BSD-3-Clause" }, "node_modules/exit": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", - "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", "dev": true, "engines": { "node": ">= 0.8.0" @@ -13779,8 +12810,7 @@ }, "node_modules/expand-brackets": { "version": "2.1.4", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", - "integrity": "sha512-w/ozOKR9Obk3qoWeY/WDi6MFta9AoMR+zud60mdnbniMcBxRuFJyDt2LdX/14A1UABeqk+Uk+LDfUpvoGKppZA==", + "license": "MIT", "dependencies": { "debug": "^2.3.3", "define-property": "^0.2.5", @@ -13796,16 +12826,14 @@ }, "node_modules/expand-brackets/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/expand-brackets/node_modules/define-property": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "license": "MIT", "dependencies": { "is-descriptor": "^0.1.0" }, @@ -13815,8 +12843,7 @@ }, "node_modules/expand-brackets/node_modules/is-accessor-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -13826,8 +12853,7 @@ }, "node_modules/expand-brackets/node_modules/is-accessor-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -13837,13 +12863,11 @@ }, "node_modules/expand-brackets/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/expand-brackets/node_modules/is-data-descriptor": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -13853,8 +12877,7 @@ }, "node_modules/expand-brackets/node_modules/is-data-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -13864,8 +12887,7 @@ }, "node_modules/expand-brackets/node_modules/is-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "license": "MIT", "dependencies": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -13877,21 +12899,18 @@ }, "node_modules/expand-brackets/node_modules/kind-of": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/expand-brackets/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/expand-range": { "version": "1.8.2", - "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", - "integrity": "sha512-AFASGfIlnIbkKPQwX1yHaDjFvh/1gyKJODme52V6IORh69uEYgZp0o9C+qsIGNVEiuuhQU0CSSl++Rlegg1qvA==", + "license": "MIT", "dependencies": { "fill-range": "^2.1.0" }, @@ -13901,8 +12920,7 @@ }, "node_modules/expand-range/node_modules/fill-range": { "version": "2.2.4", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", - "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", + "license": "MIT", "dependencies": { "is-number": "^2.1.0", "isobject": "^2.0.0", @@ -13916,13 +12934,11 @@ }, "node_modules/expand-range/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/expand-range/node_modules/is-number": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", - "integrity": "sha512-QUzH43Gfb9+5yckcrSA0VBDwEtDUchrk4F6tfJZQuNzDJbEDB9cZNzSfXGQ1jqmdDY/kl41lUOWM9syA8z8jlg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -13932,13 +12948,11 @@ }, "node_modules/expand-range/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/expand-range/node_modules/isobject": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha512-+OUdGJlgjOBZDfxnDjYYG6zp487z0JGNQq3cYQYg5f5hKR+syHMsaztzGeml/4kGG55CSpKSpWTY+jYGgsHLgA==", + "license": "MIT", "dependencies": { "isarray": "1.0.0" }, @@ -13948,8 +12962,7 @@ }, "node_modules/expand-range/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -13958,16 +12971,17 @@ } }, "node_modules/expect": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/expect/-/expect-29.6.4.tgz", - "integrity": "sha512-F2W2UyQ8XYyftHT57dtfg8Ue3X5qLgm2sSug0ivvLRH/VKNRL/pDxg/TH7zVzbQB0tu80clNFy6LU7OS/VSEKA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/expect-utils": "^29.6.4", + "@jest/expect-utils": "^29.7.0", "jest-get-type": "^29.6.3", - "jest-matcher-utils": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3" + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -13975,8 +12989,7 @@ }, "node_modules/express": { "version": "4.18.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz", - "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==", + "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", @@ -14016,13 +13029,11 @@ }, "node_modules/express/node_modules/array-flatten": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + "license": "MIT" }, "node_modules/express/node_modules/content-disposition": { "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", "dependencies": { "safe-buffer": "5.2.1" }, @@ -14032,34 +13043,28 @@ }, "node_modules/express/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/express/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/express/node_modules/path-to-regexp": { "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "license": "MIT" }, "node_modules/express/node_modules/range-parser": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/express/node_modules/safe-buffer": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", @@ -14073,12 +13078,12 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/ext-list": { "version": "2.2.2", - "resolved": "https://registry.npmjs.org/ext-list/-/ext-list-2.2.2.tgz", - "integrity": "sha512-u+SQgsubraE6zItfVA0tBuCBhfU9ogSRnsvygI7wht9TS510oLkBRXBsqopeUG/GBOIQyKZO9wjTqIu/sf5zFA==", + "license": "MIT", "dependencies": { "mime-db": "^1.28.0" }, @@ -14088,8 +13093,7 @@ }, "node_modules/ext-name": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ext-name/-/ext-name-5.0.0.tgz", - "integrity": "sha512-yblEwXAbGv1VQDmow7s38W77hzAgJAO50ztBLMcUyUBfxv1HC+LGwtiEN+Co6LtlqT/5uwVOxsD4TNIilWhwdQ==", + "license": "MIT", "dependencies": { "ext-list": "^2.0.0", "sort-keys-length": "^1.0.0" @@ -14100,13 +13104,11 @@ }, "node_modules/extend": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + "license": "MIT" }, "node_modules/extend-shallow": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "license": "MIT", "dependencies": { "is-extendable": "^0.1.0" }, @@ -14116,8 +13118,7 @@ }, "node_modules/extglob": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "license": "MIT", "dependencies": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -14134,8 +13135,7 @@ }, "node_modules/extglob/node_modules/define-property": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", + "license": "MIT", "dependencies": { "is-descriptor": "^1.0.0" }, @@ -14145,22 +13145,19 @@ }, "node_modules/extsprintf": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", "engines": [ "node >=0.6.0" - ] + ], + "license": "MIT" }, "node_modules/fast-deep-equal": { "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + "license": "MIT" }, "node_modules/fast-folder-size": { "version": "1.6.1", - "resolved": "https://registry.npmjs.org/fast-folder-size/-/fast-folder-size-1.6.1.tgz", - "integrity": "sha512-F3tRpfkAzb7TT2JNKaJUglyuRjRa+jelQD94s9OSqkfEeytLmupCqQiD+H2KoIXGtp4pB5m4zNmv5m2Ktcr+LA==", "hasInstallScript": true, + "license": "ISC", "dependencies": { "unzipper": "^0.10.11" }, @@ -14170,8 +13167,7 @@ }, "node_modules/fast-glob": { "version": "3.2.11", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz", - "integrity": "sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==", + "license": "MIT", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -14185,31 +13181,26 @@ }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + "license": "MIT" }, "node_modules/fast-safe-stringify": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", - "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==" + "license": "MIT" }, "node_modules/fast-url-parser": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha1-9K8+qfNNiicc9YrSs3WfQx8LMY0=", + "license": "MIT", "dependencies": { "punycode": "^1.3.2" } }, "node_modules/fast-url-parser/node_modules/punycode": { "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" + "license": "MIT" }, "node_modules/fast-xml-parser": { "version": "4.1.3", - "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.1.3.tgz", - "integrity": "sha512-LsNDahCiCcJPe8NO7HijcnukHB24tKbfDDA5IILx9dmW3Frb52lhbeX6MPNUSvyGNfav2VTYpJ/OqkRoVLrh2Q==", + "license": "MIT", "dependencies": { "strnum": "^1.0.5" }, @@ -14223,16 +13214,14 @@ }, "node_modules/fastq": { "version": "1.13.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz", - "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", + "license": "ISC", "dependencies": { "reusify": "^1.0.4" } }, "node_modules/faye-websocket": { "version": "0.11.4", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", - "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "license": "Apache-2.0", "dependencies": { "websocket-driver": ">=0.5.1" }, @@ -14242,25 +13231,22 @@ }, "node_modules/fb-watchman": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", - "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", "dev": true, + "license": "Apache-2.0", "dependencies": { "bser": "2.1.1" } }, "node_modules/fbemitter": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/fbemitter/-/fbemitter-3.0.0.tgz", - "integrity": "sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==", + "license": "BSD-3-Clause", "dependencies": { "fbjs": "^3.0.0" } }, "node_modules/fbjs": { "version": "3.0.5", - "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.5.tgz", - "integrity": "sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==", + "license": "MIT", "dependencies": { "cross-fetch": "^3.1.5", "fbjs-css-vars": "^1.0.0", @@ -14273,21 +13259,18 @@ }, "node_modules/fbjs-css-vars": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz", - "integrity": "sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==" + "license": "MIT" }, "node_modules/fd-slicer": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "license": "MIT", "dependencies": { "pend": "~1.2.0" } }, "node_modules/feed": { "version": "4.2.2", - "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", - "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "license": "MIT", "dependencies": { "xml-js": "^1.6.11" }, @@ -14296,9 +13279,9 @@ } }, "node_modules/feelers": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/feelers/-/feelers-1.3.1.tgz", - "integrity": "sha512-vynmIHhjttmT0wfzbI+Nmi84wLbLwUt83NXo5YTQMReIjRwgHhQpxs7koixX/flJIlTG8M4eukc1U1oQAYkhNw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/feelers/-/feelers-1.4.0.tgz", + "integrity": "sha512-CGa/7ILuqoqTaeYeoKsg/4tzu2es9sEEJTmSjdu0lousZBw4V9gcYhHYFNmbrSrKmbAVfOzj6/DsymGJWFIOeg==", "dependencies": { "@bpmn-io/cm-theme": "^0.1.0-alpha.2", "@bpmn-io/feel-lint": "^1.2.0", @@ -14314,29 +13297,53 @@ "@lezer/markdown": "^1.1.0", "feelin": "^3.0.1", "lezer-feel": "^1.2.4", - "min-dom": "^4.1.0" + "min-dom": "^5.0.0" }, "engines": { "node": "*" } }, + "node_modules/feelers/node_modules/domify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/domify/-/domify-2.0.0.tgz", + "integrity": "sha512-rmvrrmWQPD/X1A/nPBfIVg4r05792QdG9Z4Prk6oQG0F9zBMDkr0GKAdds1wjb2dq1rTz/ywc4ZxpZbgz0tttg==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/feelers/node_modules/min-dom": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/min-dom/-/min-dom-5.1.1.tgz", + "integrity": "sha512-GaKUlguMAofd3OJsB0OkP17i5kucKqErgVCJxPawO9l5NwIPnr28SAr99zzlzMCWWljISBYrnZVWdE2Q92YGFQ==", + "dependencies": { + "domify": "^2.0.0", + "min-dash": "^4.2.1" + } + }, "node_modules/feelin": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/feelin/-/feelin-3.0.1.tgz", - "integrity": "sha512-aYXH3UYkM2eopg3scgNRNEo/ecwizKH6qTqkEu5nSLMMlMgfhLDhWrLl7ChG5iHspO9o4Q2YSP1o4wW8q0L2Qw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/feelin/-/feelin-3.2.0.tgz", + "integrity": "sha512-GFDbHsTYk7YXO1tyw1dOjb7IODeAZvNIosdGZThUwPx5XcD/XhO0hnPZXsIbAzSsIdrgGlTEEdby9fZ2gixysA==", "dependencies": { - "@lezer/lr": "^1.3.9", - "lezer-feel": "^1.2.5", - "luxon": "^3.1.0" + "@lezer/lr": "^1.4.2", + "lezer-feel": "^1.4.0", + "luxon": "^3.5.0" }, "engines": { "node": "*" } }, + "node_modules/fflate": { + "version": "0.4.8", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.4.8.tgz", + "integrity": "sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==" + }, "node_modules/figures": { "version": "1.7.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", - "integrity": "sha512-UxKlfCRuCBxSXU4C6t9scbDyWZ4VlaFFdojKtzJuSkuOBQ5CNFum+zZXFwHjo+CxBC1t6zlYPgHIgFjL8ggoEQ==", + "license": "MIT", "dependencies": { "escape-string-regexp": "^1.0.5", "object-assign": "^4.1.0" @@ -14346,39 +13353,16 @@ } }, "node_modules/file-drops": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/file-drops/-/file-drops-0.4.0.tgz", - "integrity": "sha512-dPLRxrQ/sWHyU1DMf72doyyFuqeR/T8hJ97coJHXmdeHvqMTdOMJ/PLsHKjQzDHC8TBQO0rDUinDEXz3WGTnQA==", - "dependencies": { - "min-dom": "^3.1.1" - } - }, - "node_modules/file-drops/node_modules/component-event": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/component-event/-/component-event-0.1.4.tgz", - "integrity": "sha512-GMwOG8MnUHP1l8DZx1ztFO0SJTFnIzZnBDkXAj8RM2ntV2A6ALlDxgbMY1Fvxlg6WPQ+5IM/a6vg4PEYbjg/Rw==" - }, - "node_modules/file-drops/node_modules/min-dash": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/min-dash/-/min-dash-3.8.1.tgz", - "integrity": "sha512-evumdlmIlg9mbRVPbC4F5FuRhNmcMS5pvuBUbqb1G9v09Ro0ImPEgz5n3khir83lFok1inKqVDjnKEg3GpDxQg==" - }, - "node_modules/file-drops/node_modules/min-dom": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/min-dom/-/min-dom-3.2.1.tgz", - "integrity": "sha512-v6YCmnDzxk4rRJntWTUiwggLupPw/8ZSRqUq0PDaBwVZEO/wYzCH4SKVBV+KkEvf3u0XaWHly5JEosPtqRATZA==", + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/file-drops/-/file-drops-0.5.0.tgz", + "integrity": "sha512-ZaENKwVySae4RhEGjh1gEE1wMnIIPG6XqtOwHNQYSl7RNwUHoRGVVspe+BrW7cUFseHNIit3Oy9Z/HPIEU5XWA==", "dependencies": { - "component-event": "^0.1.4", - "domify": "^1.3.1", - "indexof": "0.0.1", - "matches-selector": "^1.2.0", - "min-dash": "^3.8.1" + "min-dom": "^4.0.3" } }, "node_modules/file-loader": { "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", - "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "license": "MIT", "dependencies": { "loader-utils": "^2.0.0", "schema-utils": "^3.0.0" @@ -14396,29 +13380,25 @@ }, "node_modules/file-saver": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz", - "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==" + "license": "MIT" }, "node_modules/file-type": { "version": "10.11.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-10.11.0.tgz", - "integrity": "sha512-uzk64HRpUZyTGZtVuvrjP0FYxzQrBf4rojot6J65YMEbwBLB0CWm0CLojVpwpmFmxcE/lkvYICgfcGozbBq6rw==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/filename-reserved-regex": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz", - "integrity": "sha512-lc1bnsSr4L4Bdif8Xb/qrtokGbq5zlsms/CYH8PP+WtCkGNF65DPiQY8vG3SakEdRn8Dlnm+gW/qWKKjS5sZzQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/filenamify": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/filenamify/-/filenamify-2.1.0.tgz", - "integrity": "sha512-ICw7NTT6RsDp2rnYKVd8Fu4cr6ITzGy3+u4vUujPkabyaz+03F24NWEX7fs5fp+kBonlaqPH8fAO2NM+SXt/JA==", + "license": "MIT", "dependencies": { "filename-reserved-regex": "^2.0.0", "strip-outer": "^1.0.0", @@ -14430,16 +13410,14 @@ }, "node_modules/filesize": { "version": "8.0.7", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", - "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "license": "BSD-3-Clause", "engines": { "node": ">= 0.4.0" } }, "node_modules/fill-range": { "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -14449,16 +13427,14 @@ }, "node_modules/filter-obj": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-2.0.2.tgz", - "integrity": "sha512-lO3ttPjHZRfjMcxWKb1j1eDhTFsu4meeR3lnMcnBFhk6RuLhvEiuALu2TlfL310ph4lCYYwgF/ElIjdP739tdg==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/finalhandler": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "license": "MIT", "dependencies": { "debug": "2.6.9", "encodeurl": "~1.0.2", @@ -14474,21 +13450,18 @@ }, "node_modules/finalhandler/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/finalhandler/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/find-cache-dir": { "version": "3.3.2", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", - "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", + "license": "MIT", "dependencies": { "commondir": "^1.0.1", "make-dir": "^3.0.2", @@ -14503,8 +13476,7 @@ }, "node_modules/find-up": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "license": "MIT", "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -14515,8 +13487,7 @@ }, "node_modules/find-versions": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-3.2.0.tgz", - "integrity": "sha512-P8WRou2S+oe222TOCHitLy8zj+SIsVJh52VP4lvXkaFVnOFFdoWv1H1Jjvel1aI6NCFOAaeAVm8qrI0odiLcww==", + "license": "MIT", "dependencies": { "semver-regex": "^2.0.0" }, @@ -14526,13 +13497,11 @@ }, "node_modules/flatpickr": { "version": "4.6.13", - "resolved": "https://registry.npmjs.org/flatpickr/-/flatpickr-4.6.13.tgz", - "integrity": "sha512-97PMG/aywoYpB4IvbvUJi0RQi8vearvU0oov1WW3k0WZPBMrTQVqekSX5CjSG/M4Q3i6A/0FKXC7RyAoAUUSPw==" + "license": "MIT" }, "node_modules/flux": { "version": "4.0.4", - "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.4.tgz", - "integrity": "sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==", + "license": "BSD-3-Clause", "dependencies": { "fbemitter": "^3.0.0", "fbjs": "^3.0.1" @@ -14542,23 +13511,22 @@ } }, "node_modules/focus-trap": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/focus-trap/-/focus-trap-7.5.4.tgz", - "integrity": "sha512-N7kHdlgsO/v+iD/dMoJKtsSqs5Dz/dXZVebRgJw23LDk+jMi/974zyiOYDziY2JPp8xivq9BmUGwIJMiuSBi7w==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/focus-trap/-/focus-trap-7.6.2.tgz", + "integrity": "sha512-9FhUxK1hVju2+AiQIDJ5Dd//9R2n2RAfJ0qfhF4IHGHgcoEUTMpbTeG/zbEuwaiYXfuAH6XE0/aCyxDdRM+W5w==", "dependencies": { "tabbable": "^6.2.0" } }, "node_modules/follow-redirects": { "version": "1.14.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", - "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==", "funding": [ { "type": "individual", "url": "https://github.com/sponsors/RubenVerborgh" } ], + "license": "MIT", "engines": { "node": ">=4.0" }, @@ -14570,29 +13538,25 @@ }, "node_modules/for-each": { "version": "0.3.3", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", - "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "license": "MIT", "dependencies": { "is-callable": "^1.1.3" } }, "node_modules/for-in": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/foreach": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.6.tgz", - "integrity": "sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg==" + "license": "MIT" }, "node_modules/foreground-child": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "license": "ISC", "dependencies": { "cross-spawn": "^7.0.0", "signal-exit": "^4.0.1" @@ -14606,8 +13570,7 @@ }, "node_modules/foreground-child/node_modules/signal-exit": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", "engines": { "node": ">=14" }, @@ -14617,16 +13580,14 @@ }, "node_modules/forever-agent": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", + "license": "Apache-2.0", "engines": { "node": "*" } }, "node_modules/fork-ts-checker-webpack-plugin": { "version": "6.5.2", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.2.tgz", - "integrity": "sha512-m5cUmF30xkZ7h4tWUgTAcEaKmUW7tfyUyTqNNOz7OxWJ0v1VWKTcOvH8FWHUwSjlW/356Ijc9vi3XfcPstpQKA==", + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.8.3", "@types/json-schema": "^7.0.5", @@ -14663,8 +13624,7 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -14677,8 +13637,7 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -14692,8 +13651,7 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -14703,13 +13661,11 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", - "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "license": "MIT", "dependencies": { "@types/parse-json": "^4.0.0", "import-fresh": "^3.1.0", @@ -14723,8 +13679,7 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "license": "MIT", "dependencies": { "at-least-node": "^1.0.0", "graceful-fs": "^4.2.0", @@ -14737,16 +13692,14 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { "version": "2.7.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", - "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.4", "ajv": "^6.12.2", @@ -14762,8 +13715,7 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -14773,16 +13725,14 @@ }, "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/form-data": { "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.6", @@ -14794,16 +13744,14 @@ }, "node_modules/forwarded": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/fraction.js": { "version": "4.3.4", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.4.tgz", - "integrity": "sha512-pwiTgt0Q7t+GHZA4yaLjObx4vXmmdcS0iSJ19o8d/goUGgItX9UZWKWNnLHehxviD8wU2IWRsnR8cD5+yOJP2Q==", + "license": "MIT", "engines": { "node": "*" }, @@ -14814,8 +13762,7 @@ }, "node_modules/fragment-cache": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", - "integrity": "sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==", + "license": "MIT", "dependencies": { "map-cache": "^0.2.2" }, @@ -14825,16 +13772,14 @@ }, "node_modules/fresh": { "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/from2": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==", + "license": "MIT", "dependencies": { "inherits": "^2.0.1", "readable-stream": "^2.0.0" @@ -14842,13 +13787,11 @@ }, "node_modules/from2/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/from2/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -14861,21 +13804,18 @@ }, "node_modules/from2/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/fs-constants": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + "license": "MIT" }, "node_modules/fs-extra": { "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "license": "MIT", "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", @@ -14887,31 +13827,15 @@ }, "node_modules/fs-monkey": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz", - "integrity": "sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==" + "license": "Unlicense" }, "node_modules/fs.realpath": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } + "license": "ISC" }, "node_modules/fstream": { "version": "1.0.12", - "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz", - "integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==", + "license": "ISC", "dependencies": { "graceful-fs": "^4.1.2", "inherits": "~2.0.0", @@ -14924,8 +13848,7 @@ }, "node_modules/fstream/node_modules/rimraf": { "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -14935,13 +13858,11 @@ }, "node_modules/function-bind": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "license": "MIT" }, "node_modules/function.prototype.name": { "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", @@ -14957,16 +13878,14 @@ }, "node_modules/functions-have-names": { "version": "1.2.3", - "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", - "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/gaze": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/gaze/-/gaze-1.1.3.tgz", - "integrity": "sha512-BRdNm8hbWzFzWHERTrejLqwHDfS4GibPoq5wjTPIoJHoBtKGPg3xAFfxmM+9ztbXelxcf2hwQcaz1PtmFeue8g==", + "license": "MIT", "dependencies": { "globule": "^1.0.0" }, @@ -14976,24 +13895,21 @@ }, "node_modules/gensync": { "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/get-caller-file": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", "engines": { "node": "6.* || 8.* || >= 10.*" } }, "node_modules/get-intrinsic": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "license": "MIT", "dependencies": { "function-bind": "^1.1.1", "has": "^1.0.3", @@ -15005,22 +13921,19 @@ }, "node_modules/get-own-enumerable-property-symbols": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", - "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + "license": "ISC" }, "node_modules/get-package-type": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", - "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=8.0.0" } }, "node_modules/get-proxy": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/get-proxy/-/get-proxy-2.1.0.tgz", - "integrity": "sha512-zmZIaQTWnNQb4R4fJUEp/FC51eZsc6EkErspy3xtIYStaq8EB/hDIWipxsal+E8rz0qD7f2sL/NA9Xee4RInJw==", + "license": "MIT", "dependencies": { "npm-conf": "^1.1.0" }, @@ -15030,16 +13943,14 @@ }, "node_modules/get-stdin": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", - "integrity": "sha512-F5aQMywwJ2n85s4hJPTT9RPxGmubonuB10MNYo17/xph174n2MIR33HRguhzVag10O/npM7SPk73LMZNP+FaWw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/get-stream": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "license": "MIT", "dependencies": { "pump": "^3.0.0" }, @@ -15049,8 +13960,7 @@ }, "node_modules/get-symbol-description": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "get-intrinsic": "^1.1.1" @@ -15064,25 +13974,22 @@ }, "node_modules/get-value": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/getpass": { "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "license": "MIT", "dependencies": { "assert-plus": "^1.0.0" } }, "node_modules/gifsicle": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/gifsicle/-/gifsicle-4.0.1.tgz", - "integrity": "sha512-A/kiCLfDdV+ERV/UB+2O41mifd+RxH8jlRG8DMxZO84Bma/Fw0htqZ+hY2iaalLRNyUu7tYZQslqUBJxBggxbg==", "hasInstallScript": true, + "license": "MIT", "dependencies": { "bin-build": "^3.0.0", "bin-wrapper": "^4.0.0", @@ -15098,8 +14005,7 @@ }, "node_modules/gifsicle/node_modules/cross-spawn": { "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "license": "MIT", "dependencies": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -15113,8 +14019,7 @@ }, "node_modules/gifsicle/node_modules/execa": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "license": "MIT", "dependencies": { "cross-spawn": "^6.0.0", "get-stream": "^4.0.0", @@ -15130,16 +14035,14 @@ }, "node_modules/gifsicle/node_modules/is-stream": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/gifsicle/node_modules/npm-run-path": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", + "license": "MIT", "dependencies": { "path-key": "^2.0.0" }, @@ -15149,24 +14052,21 @@ }, "node_modules/gifsicle/node_modules/path-key": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/gifsicle/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/gifsicle/node_modules/shebang-command": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", + "license": "MIT", "dependencies": { "shebang-regex": "^1.0.0" }, @@ -15176,16 +14076,14 @@ }, "node_modules/gifsicle/node_modules/shebang-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/gifsicle/node_modules/which": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -15195,13 +14093,11 @@ }, "node_modules/github-slugger": { "version": "1.5.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", - "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + "license": "ISC" }, "node_modules/glob": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", - "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -15219,8 +14115,7 @@ }, "node_modules/glob-parent": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, @@ -15230,13 +14125,11 @@ }, "node_modules/glob-to-regexp": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + "license": "BSD-2-Clause" }, "node_modules/global-dirs": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.0.tgz", - "integrity": "sha512-v8ho2DS5RiCjftj1nD9NmnfaOzTdud7RRnVd9kFNOjqZbISlx5DQ+OrTkywgd0dIt7oFCvKetZSHoHcP3sDdiA==", + "license": "MIT", "dependencies": { "ini": "2.0.0" }, @@ -15249,16 +14142,14 @@ }, "node_modules/global-dirs/node_modules/ini": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/global-modules": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "license": "MIT", "dependencies": { "global-prefix": "^3.0.0" }, @@ -15268,8 +14159,7 @@ }, "node_modules/global-prefix": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "license": "MIT", "dependencies": { "ini": "^1.3.5", "kind-of": "^6.0.2", @@ -15281,8 +14171,7 @@ }, "node_modules/global-prefix/node_modules/which": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -15292,16 +14181,14 @@ }, "node_modules/globals": { "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/globalthis": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", + "license": "MIT", "dependencies": { "define-properties": "^1.1.3" }, @@ -15314,8 +14201,7 @@ }, "node_modules/globby": { "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "license": "MIT", "dependencies": { "array-union": "^2.1.0", "dir-glob": "^3.0.1", @@ -15333,8 +14219,7 @@ }, "node_modules/globule": { "version": "1.3.4", - "resolved": "https://registry.npmjs.org/globule/-/globule-1.3.4.tgz", - "integrity": "sha512-OPTIfhMBh7JbBYDpa5b+Q5ptmMWKwcNcFSR/0c6t8V4f3ZAVBEsKNY37QdVqmLRYSMhOUGYrY0QhSoEpzGr/Eg==", + "license": "MIT", "dependencies": { "glob": "~7.1.1", "lodash": "^4.17.21", @@ -15346,8 +14231,7 @@ }, "node_modules/globule/node_modules/glob": { "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -15365,8 +14249,7 @@ }, "node_modules/gopd": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "license": "MIT", "dependencies": { "get-intrinsic": "^1.1.3" }, @@ -15376,8 +14259,7 @@ }, "node_modules/got": { "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", + "license": "MIT", "dependencies": { "@sindresorhus/is": "^0.14.0", "@szmarczak/http-timer": "^1.1.2", @@ -15397,13 +14279,11 @@ }, "node_modules/graceful-fs": { "version": "4.2.9", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", - "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" + "license": "ISC" }, "node_modules/gray-matter": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", - "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "license": "MIT", "dependencies": { "js-yaml": "^3.13.1", "kind-of": "^6.0.2", @@ -15416,16 +14296,14 @@ }, "node_modules/gray-matter/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/gray-matter/node_modules/js-yaml": { "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -15436,9 +14314,7 @@ }, "node_modules/gulp-header": { "version": "1.8.12", - "resolved": "https://registry.npmjs.org/gulp-header/-/gulp-header-1.8.12.tgz", - "integrity": "sha512-lh9HLdb53sC7XIZOYzTXM4lFuXElv3EVkSDhsd7DoJBj7hm+Ni7D3qYbb+Rr8DuM8nRanBvkVO9d7askreXGnQ==", - "deprecated": "Removed event-stream from gulp-header", + "license": "MIT", "dependencies": { "concat-with-sourcemaps": "*", "lodash.template": "^4.4.0", @@ -15447,8 +14323,7 @@ }, "node_modules/gzip-size": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", - "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "license": "MIT", "dependencies": { "duplexer": "^0.1.2" }, @@ -15461,22 +14336,18 @@ }, "node_modules/handle-thing": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + "license": "MIT" }, "node_modules/har-schema": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==", + "license": "ISC", "engines": { "node": ">=4" } }, "node_modules/har-validator": { "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "deprecated": "this library is no longer supported", + "license": "MIT", "dependencies": { "ajv": "^6.12.3", "har-schema": "^2.0.0" @@ -15487,8 +14358,7 @@ }, "node_modules/has": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "license": "MIT", "dependencies": { "function-bind": "^1.1.1" }, @@ -15498,8 +14368,7 @@ }, "node_modules/has-ansi": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==", + "license": "MIT", "dependencies": { "ansi-regex": "^2.0.0" }, @@ -15509,32 +14378,28 @@ }, "node_modules/has-ansi/node_modules/ansi-regex": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/has-bigints": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", - "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-flag": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/has-property-descriptors": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "license": "MIT", "dependencies": { "get-intrinsic": "^1.1.1" }, @@ -15544,8 +14409,7 @@ }, "node_modules/has-proto": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -15555,16 +14419,14 @@ }, "node_modules/has-symbol-support-x": { "version": "1.4.2", - "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", - "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==", + "license": "MIT", "engines": { "node": "*" } }, "node_modules/has-symbols": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -15574,8 +14436,7 @@ }, "node_modules/has-to-string-tag-x": { "version": "1.4.1", - "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", - "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", + "license": "MIT", "dependencies": { "has-symbol-support-x": "^1.4.1" }, @@ -15585,8 +14446,7 @@ }, "node_modules/has-tostringtag": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "license": "MIT", "dependencies": { "has-symbols": "^1.0.2" }, @@ -15599,8 +14459,7 @@ }, "node_modules/has-value": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", - "integrity": "sha512-IBXk4GTsLYdQ7Rvt+GRBrFSVEkmuOUy4re0Xjd9kJSUQpnTrWR4/y9RpfexN9vkAPMFuQoeWKwqzPozRTlasGw==", + "license": "MIT", "dependencies": { "get-value": "^2.0.6", "has-values": "^1.0.0", @@ -15612,8 +14471,7 @@ }, "node_modules/has-values": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", - "integrity": "sha512-ODYZC64uqzmtfGMEAX/FvZiRyWLpAC3vYnNunURUnkGVTS+mI0smVsWaPydRBsE3g+ok7h960jChO8mFcWlHaQ==", + "license": "MIT", "dependencies": { "is-number": "^3.0.0", "kind-of": "^4.0.0" @@ -15624,13 +14482,11 @@ }, "node_modules/has-values/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/has-values/node_modules/is-number": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -15640,8 +14496,7 @@ }, "node_modules/has-values/node_modules/is-number/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -15651,8 +14506,7 @@ }, "node_modules/has-values/node_modules/kind-of": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", - "integrity": "sha512-24XsCxmEbRwEDbz/qz3stgin8TTzZ1ESR56OMCN0ujYg+vRutNSiOj9bHH9u85DKgXguraugV5sFuvbD4FW/hw==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -15662,16 +14516,14 @@ }, "node_modules/has-yarn": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/hash-base": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", + "license": "MIT", "dependencies": { "inherits": "^2.0.4", "readable-stream": "^3.6.0", @@ -15683,8 +14535,6 @@ }, "node_modules/hash-base/node_modules/safe-buffer": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", @@ -15698,12 +14548,12 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/hash.js": { "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "license": "MIT", "dependencies": { "inherits": "^2.0.3", "minimalistic-assert": "^1.0.1" @@ -15711,8 +14561,7 @@ }, "node_modules/hast-to-hyperscript": { "version": "9.0.1", - "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", - "integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.3", "comma-separated-tokens": "^1.0.0", @@ -15729,8 +14578,7 @@ }, "node_modules/hast-util-from-parse5": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz", - "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==", + "license": "MIT", "dependencies": { "@types/parse5": "^5.0.0", "hastscript": "^6.0.0", @@ -15746,8 +14594,7 @@ }, "node_modules/hast-util-parse-selector": { "version": "2.2.5", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", - "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -15755,8 +14602,7 @@ }, "node_modules/hast-util-raw": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz", - "integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "hast-util-from-parse5": "^6.0.0", @@ -15776,13 +14622,11 @@ }, "node_modules/hast-util-raw/node_modules/parse5": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" + "license": "MIT" }, "node_modules/hast-util-to-parse5": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz", - "integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==", + "license": "MIT", "dependencies": { "hast-to-hyperscript": "^9.0.0", "property-information": "^5.0.0", @@ -15797,8 +14641,7 @@ }, "node_modules/hast-util-whitespace": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-2.0.1.tgz", - "integrity": "sha512-nAxA0v8+vXSBDt3AnRUNjyRIQ0rD+ntpbAp4LnPkumc5M9yUbSMa4XDU9Q6etY4f1Wp4bNgvc1yjiZtsTTrSng==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -15806,8 +14649,7 @@ }, "node_modules/hastscript": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", - "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "comma-separated-tokens": "^1.0.0", @@ -15822,36 +14664,30 @@ }, "node_modules/he": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "license": "MIT", "bin": { "he": "bin/he" } }, "node_modules/heap": { "version": "0.2.7", - "resolved": "https://registry.npmjs.org/heap/-/heap-0.2.7.tgz", - "integrity": "sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg==" + "license": "MIT" }, "node_modules/hex-color-regex": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" + "license": "MIT" }, "node_modules/highlight.js": { "version": "9.18.5", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-9.18.5.tgz", - "integrity": "sha512-a5bFyofd/BHCX52/8i8uJkjr9DYwXIPnM/plwI6W7ezItLGqzt7X2G2nXuYSfsIJdkwwj/g9DG1LkcGJI/dDoA==", - "deprecated": "Support has ended for 9.x series. Upgrade to @latest", "hasInstallScript": true, + "license": "BSD-3-Clause", "engines": { "node": "*" } }, "node_modules/history": { "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.1.2", "loose-envify": "^1.2.0", @@ -15863,8 +14699,7 @@ }, "node_modules/hmac-drbg": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==", + "license": "MIT", "dependencies": { "hash.js": "^1.0.3", "minimalistic-assert": "^1.0.0", @@ -15873,21 +14708,18 @@ }, "node_modules/hoist-non-react-statics": { "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "license": "BSD-3-Clause", "dependencies": { "react-is": "^16.7.0" } }, "node_modules/hosted-git-info": { "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==" + "license": "ISC" }, "node_modules/hpack.js": { "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "license": "MIT", "dependencies": { "inherits": "^2.0.1", "obuf": "^1.0.0", @@ -15897,13 +14729,11 @@ }, "node_modules/hpack.js/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/hpack.js/node_modules/readable-stream": { "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -15916,26 +14746,22 @@ }, "node_modules/hpack.js/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/hsl-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", - "integrity": "sha512-M5ezZw4LzXbBKMruP+BNANf0k+19hDQMgpzBIYnya//Al+fjNct9Wf3b1WedLqdEs2hKBvxq/jh+DsHJLj0F9A==" + "license": "MIT" }, "node_modules/hsla-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", - "integrity": "sha512-7Wn5GMLuHBjZCb2bTmnDOycho0p/7UVaAeqXZGbHrBCl6Yd/xDhQJAXe6Ga9AXJH2I5zY1dEdYw2u1UptnSBJA==" + "license": "MIT" }, "node_modules/html-element-map": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/html-element-map/-/html-element-map-1.3.1.tgz", - "integrity": "sha512-6XMlxrAFX4UEEGxctfFnmrFaaZFNf9i5fNuV5wZ3WWQ4FVaNP1aX1LkX9j2mfEx1NpjeE/rL3nmgEn23GdFmrg==", + "license": "MIT", "dependencies": { "array.prototype.filter": "^1.0.0", "call-bind": "^1.0.2" @@ -15946,19 +14772,16 @@ }, "node_modules/html-entities": { "version": "2.3.3", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.3.tgz", - "integrity": "sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==" + "license": "MIT" }, "node_modules/html-escaper": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/html-minifier-terser": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "license": "MIT", "dependencies": { "camel-case": "^4.1.2", "clean-css": "^5.2.2", @@ -15977,16 +14800,14 @@ }, "node_modules/html-minifier-terser/node_modules/commander": { "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", "engines": { "node": ">= 12" } }, "node_modules/html-tags": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.2.0.tgz", - "integrity": "sha512-vy7ClnArOZwCnqZgvv+ddgHgJiAFXe3Ge9ML5/mBctVJoUoYPCdxVucOywjDARn6CVoh3dRSFdPHy2sX80L0Wg==", + "license": "MIT", "engines": { "node": ">=8" }, @@ -15996,8 +14817,7 @@ }, "node_modules/html-void-elements": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz", - "integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -16005,8 +14825,7 @@ }, "node_modules/html-webpack-plugin": { "version": "5.5.0", - "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.0.tgz", - "integrity": "sha512-sy88PC2cRTVxvETRgUHFrL4No3UxvcH8G1NepGhqaTT+GXN2kTamqasot0inS5hXeg1cMbFDt27zzo9p35lZVw==", + "license": "MIT", "dependencies": { "@types/html-minifier-terser": "^6.0.0", "html-minifier-terser": "^6.0.2", @@ -16027,8 +14846,7 @@ }, "node_modules/html2canvas": { "version": "1.4.1", - "resolved": "https://registry.npmjs.org/html2canvas/-/html2canvas-1.4.1.tgz", - "integrity": "sha512-fPU6BHNpsyIhr8yyMpTLLxAbkaK8ArIBcmZIRiBLiDhjeqvXolaEmDGmELFuX9I4xDcaKKcJl+TKZLqruBbmWA==", + "license": "MIT", "dependencies": { "css-line-break": "^2.1.0", "text-segmentation": "^1.0.3" @@ -16039,8 +14857,6 @@ }, "node_modules/htmlparser2": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", "funding": [ "https://github.com/fb55/htmlparser2?sponsor=1", { @@ -16048,6 +14864,7 @@ "url": "https://github.com/sponsors/fb55" } ], + "license": "MIT", "dependencies": { "domelementtype": "^2.0.1", "domhandler": "^4.0.0", @@ -16057,18 +14874,15 @@ }, "node_modules/http-cache-semantics": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" + "license": "BSD-2-Clause" }, "node_modules/http-deceiver": { "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + "license": "MIT" }, "node_modules/http-errors": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", @@ -16082,13 +14896,11 @@ }, "node_modules/http-parser-js": { "version": "0.5.8", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", - "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + "license": "MIT" }, "node_modules/http-proxy": { "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "license": "MIT", "dependencies": { "eventemitter3": "^4.0.0", "follow-redirects": "^1.0.0", @@ -16100,8 +14912,7 @@ }, "node_modules/http-proxy-middleware": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", - "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "license": "MIT", "dependencies": { "@types/http-proxy": "^1.17.8", "http-proxy": "^1.18.1", @@ -16123,8 +14934,7 @@ }, "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", - "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -16134,13 +14944,11 @@ }, "node_modules/http-reasons": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/http-reasons/-/http-reasons-0.1.0.tgz", - "integrity": "sha512-P6kYh0lKZ+y29T2Gqz+RlC9WBLhKe8kDmcJ+A+611jFfxdPsbMRQ5aNmFRM3lENqFkK+HTTL+tlQviAiv0AbLQ==" + "license": "Apache-2.0" }, "node_modules/http-signature": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", + "license": "MIT", "dependencies": { "assert-plus": "^1.0.0", "jsprim": "^1.2.2", @@ -16153,27 +14961,23 @@ }, "node_modules/http2-client": { "version": "1.3.5", - "resolved": "https://registry.npmjs.org/http2-client/-/http2-client-1.3.5.tgz", - "integrity": "sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==" + "license": "MIT" }, "node_modules/https-browserify": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", - "integrity": "sha512-J+FkSdyD+0mA0N+81tMotaRMfSL9SGi+xpD3T6YApKsc3bGSXJlfXri3VyFOeYkfLRQisDk1W+jIFFKBeUBbBg==" + "license": "MIT" }, "node_modules/human-signals": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "license": "Apache-2.0", "engines": { "node": ">=10.17.0" } }, "node_modules/husky": { "version": "8.0.3", - "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.3.tgz", - "integrity": "sha512-+dQSyqPh4x1hlO1swXBiNb2HzTDN1I2IGLQx1GrBuiqFJfoMrnZWwVmatvSiO+Iz8fBUnf+lekwNo4c2LlXItg==", "dev": true, + "license": "MIT", "bin": { "husky": "lib/bin.js" }, @@ -16186,8 +14990,7 @@ }, "node_modules/iconv-lite": { "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -16197,8 +15000,7 @@ }, "node_modules/icss-utils": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", - "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "license": "ISC", "engines": { "node": "^10 || ^12 || >= 14" }, @@ -16208,13 +15010,10 @@ }, "node_modules/ids": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/ids/-/ids-1.0.5.tgz", - "integrity": "sha512-XQ0yom/4KWTL29sLG+tyuycy7UmeaM/79GRtSJq6IG9cJGIPeBz5kwDCguie3TwxaMNIc3WtPi0cTa1XYHicpw==" + "license": "MIT" }, "node_modules/ieee754": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", "funding": [ { "type": "github", @@ -16228,20 +15027,19 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "BSD-3-Clause" }, "node_modules/ignore": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==", + "license": "MIT", "engines": { "node": ">= 4" } }, "node_modules/image-size": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", - "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "license": "MIT", "dependencies": { "queue": "6.0.2" }, @@ -16254,8 +15052,7 @@ }, "node_modules/imagemin": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/imagemin/-/imagemin-6.1.0.tgz", - "integrity": "sha512-8ryJBL1CN5uSHpiBMX0rJw79C9F9aJqMnjGnrd/1CafegpNuA81RBAAru/jQQEOWlOJJlpRnlcVFF6wq+Ist0A==", + "license": "MIT", "dependencies": { "file-type": "^10.7.0", "globby": "^8.0.1", @@ -16270,8 +15067,7 @@ }, "node_modules/imagemin-gifsicle": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/imagemin-gifsicle/-/imagemin-gifsicle-6.0.1.tgz", - "integrity": "sha512-kuu47c6iKDQ6R9J10xCwL0lgs0+sMz3LRHqRcJ2CRBWdcNmo3T5hUaM8hSZfksptZXJLGKk8heSAvwtSdB1Fng==", + "license": "MIT", "dependencies": { "exec-buffer": "^3.0.0", "gifsicle": "^4.0.0", @@ -16283,8 +15079,7 @@ }, "node_modules/imagemin-jpegtran": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/imagemin-jpegtran/-/imagemin-jpegtran-6.0.0.tgz", - "integrity": "sha512-Ih+NgThzqYfEWv9t58EItncaaXIHR0u9RuhKa8CtVBlMBvY0dCIxgQJQCfwImA4AV1PMfmUKlkyIHJjb7V4z1g==", + "license": "MIT", "dependencies": { "exec-buffer": "^3.0.0", "is-jpg": "^2.0.0", @@ -16296,8 +15091,7 @@ }, "node_modules/imagemin-optipng": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/imagemin-optipng/-/imagemin-optipng-6.0.0.tgz", - "integrity": "sha512-FoD2sMXvmoNm/zKPOWdhKpWdFdF9qiJmKC17MxZJPH42VMAp17/QENI/lIuP7LCUnLVAloO3AUoTSNzfhpyd8A==", + "license": "MIT", "dependencies": { "exec-buffer": "^3.0.0", "is-png": "^1.0.0", @@ -16309,8 +15103,7 @@ }, "node_modules/imagemin-svgo": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/imagemin-svgo/-/imagemin-svgo-7.1.0.tgz", - "integrity": "sha512-0JlIZNWP0Luasn1HT82uB9nU9aa+vUj6kpT+MjPW11LbprXC+iC4HDwn1r4Q2/91qj4iy9tRZNsFySMlEpLdpg==", + "license": "MIT", "dependencies": { "is-svg": "^4.2.1", "svgo": "^1.3.2" @@ -16324,16 +15117,14 @@ }, "node_modules/imagemin-svgo/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/imagemin-svgo/node_modules/css-select": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "^1.0.0", "css-what": "^3.2.1", @@ -16343,8 +15134,7 @@ }, "node_modules/imagemin-svgo/node_modules/css-tree": { "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", + "license": "MIT", "dependencies": { "mdn-data": "2.0.4", "source-map": "^0.6.1" @@ -16355,8 +15145,7 @@ }, "node_modules/imagemin-svgo/node_modules/css-what": { "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", + "license": "BSD-2-Clause", "engines": { "node": ">= 6" }, @@ -16366,8 +15155,7 @@ }, "node_modules/imagemin-svgo/node_modules/dom-serializer": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "license": "MIT", "dependencies": { "domelementtype": "^2.0.1", "entities": "^2.0.0" @@ -16375,8 +15163,7 @@ }, "node_modules/imagemin-svgo/node_modules/domutils": { "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", + "license": "BSD-2-Clause", "dependencies": { "dom-serializer": "0", "domelementtype": "1" @@ -16384,13 +15171,11 @@ }, "node_modules/imagemin-svgo/node_modules/domutils/node_modules/domelementtype": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" + "license": "BSD-2-Clause" }, "node_modules/imagemin-svgo/node_modules/js-yaml": { "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -16401,30 +15186,25 @@ }, "node_modules/imagemin-svgo/node_modules/mdn-data": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" + "license": "CC0-1.0" }, "node_modules/imagemin-svgo/node_modules/nth-check": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "~1.0.0" } }, "node_modules/imagemin-svgo/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/imagemin-svgo/node_modules/svgo": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "deprecated": "This SVGO version is no longer supported. Upgrade to v2.x.x.", + "license": "MIT", "dependencies": { "chalk": "^2.4.1", "coa": "^2.0.2", @@ -16449,16 +15229,14 @@ }, "node_modules/imagemin/node_modules/@nodelib/fs.stat": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz", - "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==", + "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/imagemin/node_modules/array-union": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", - "integrity": "sha512-Dxr6QJj/RdU/hCaBjOfxW+q6lyuVE6JFWIrAUpuOOhoJJoQ99cUn3igRaHVB5P9WrgFVN0FfArM3x0cueOU8ng==", + "license": "MIT", "dependencies": { "array-uniq": "^1.0.1" }, @@ -16468,8 +15246,7 @@ }, "node_modules/imagemin/node_modules/braces": { "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "license": "MIT", "dependencies": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -16488,8 +15265,7 @@ }, "node_modules/imagemin/node_modules/dir-glob": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.0.0.tgz", - "integrity": "sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag==", + "license": "MIT", "dependencies": { "arrify": "^1.0.1", "path-type": "^3.0.0" @@ -16500,8 +15276,7 @@ }, "node_modules/imagemin/node_modules/fast-glob": { "version": "2.2.7", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz", - "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==", + "license": "MIT", "dependencies": { "@mrmlnc/readdir-enhanced": "^2.2.1", "@nodelib/fs.stat": "^1.1.2", @@ -16516,8 +15291,7 @@ }, "node_modules/imagemin/node_modules/fill-range": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", + "license": "MIT", "dependencies": { "extend-shallow": "^2.0.1", "is-number": "^3.0.0", @@ -16530,8 +15304,7 @@ }, "node_modules/imagemin/node_modules/glob-parent": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", - "integrity": "sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==", + "license": "ISC", "dependencies": { "is-glob": "^3.1.0", "path-dirname": "^1.0.0" @@ -16539,8 +15312,7 @@ }, "node_modules/imagemin/node_modules/glob-parent/node_modules/is-glob": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", - "integrity": "sha512-UFpDDrPgM6qpnFNI+rh/p3bUaq9hKLZN8bMUWzxmcnZVS3omf4IPK+BrewlnWjO1WmUsMYuSjKh4UJuV4+Lqmw==", + "license": "MIT", "dependencies": { "is-extglob": "^2.1.0" }, @@ -16550,8 +15322,7 @@ }, "node_modules/imagemin/node_modules/globby": { "version": "8.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-8.0.2.tgz", - "integrity": "sha512-yTzMmKygLp8RUpG1Ymu2VXPSJQZjNAZPD4ywgYEaG7e4tBJeUQBO8OpXrf1RCNcEs5alsoJYPAMiIHP0cmeC7w==", + "license": "MIT", "dependencies": { "array-union": "^1.0.1", "dir-glob": "2.0.0", @@ -16567,26 +15338,22 @@ }, "node_modules/imagemin/node_modules/globby/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/imagemin/node_modules/ignore": { "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" + "license": "MIT" }, "node_modules/imagemin/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/imagemin/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -16596,8 +15363,7 @@ }, "node_modules/imagemin/node_modules/is-number": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -16607,8 +15373,7 @@ }, "node_modules/imagemin/node_modules/is-number/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -16618,8 +15383,7 @@ }, "node_modules/imagemin/node_modules/make-dir": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "license": "MIT", "dependencies": { "pify": "^3.0.0" }, @@ -16629,16 +15393,14 @@ }, "node_modules/imagemin/node_modules/make-dir/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/imagemin/node_modules/micromatch": { "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "license": "MIT", "dependencies": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -16660,8 +15422,7 @@ }, "node_modules/imagemin/node_modules/micromatch/node_modules/extend-shallow": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", "dependencies": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -16672,8 +15433,7 @@ }, "node_modules/imagemin/node_modules/path-type": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", + "license": "MIT", "dependencies": { "pify": "^3.0.0" }, @@ -16683,24 +15443,21 @@ }, "node_modules/imagemin/node_modules/path-type/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/imagemin/node_modules/slash": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/imagemin/node_modules/to-regex-range": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", + "license": "MIT", "dependencies": { "is-number": "^3.0.0", "repeat-string": "^1.6.1" @@ -16711,8 +15468,7 @@ }, "node_modules/immer": { "version": "9.0.21", - "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", - "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/immer" @@ -16720,13 +15476,11 @@ }, "node_modules/immutable": { "version": "4.3.4", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.4.tgz", - "integrity": "sha512-fsXeu4J4i6WNWSikpI88v/PcVflZz+6kMhUfIwc5SY+poQRPnaf5V7qds6SUyUN3cVxEzuCab7QIoLOQ+DQ1wA==" + "license": "MIT" }, "node_modules/import-fresh": { "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "license": "MIT", "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -16740,17 +15494,17 @@ }, "node_modules/import-lazy": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/import-local": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", - "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", "dev": true, + "license": "MIT", "dependencies": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" @@ -16767,42 +15521,32 @@ }, "node_modules/imurmurhash": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "license": "MIT", "engines": { "node": ">=0.8.19" } }, "node_modules/indent-string": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/indexes-of": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", - "integrity": "sha512-bup+4tap3Hympa+JBJUG7XuOsdNQ6fxt0MHyXMKuLBKn0OqsTfvUxkUrroEX1+B2VsSHvCjiIcZVxRtYa4nllA==" - }, - "node_modules/indexof": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz", - "integrity": "sha512-i0G7hLJ1z0DE8dsqJa2rycj9dBmNKgXBvotXtZYXakU9oivfB9Uj2ZBC27qqef2U58/ZLwalxa1X/RDCdkHtVg==" + "license": "MIT" }, "node_modules/infima": { "version": "0.2.0-alpha.43", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", - "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "license": "MIT", "engines": { "node": ">=12" } }, "node_modules/inflight": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "license": "ISC", "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -16810,23 +15554,19 @@ }, "node_modules/inherits": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + "license": "ISC" }, "node_modules/ini": { "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + "license": "ISC" }, "node_modules/inline-style-parser": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", - "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + "license": "MIT" }, "node_modules/internal-slot": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz", - "integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==", + "license": "MIT", "dependencies": { "get-intrinsic": "^1.2.0", "has": "^1.0.3", @@ -16838,24 +15578,21 @@ }, "node_modules/internmap": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", - "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/interpret": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "license": "MIT", "engines": { "node": ">= 0.10" } }, "node_modules/into-stream": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz", - "integrity": "sha512-TcdjPibTksa1NQximqep2r17ISRiNE9fwlfbg3F8ANdvP5/yrFTew86VcO//jk4QTaMlbjypPBq76HN2zaKfZQ==", + "license": "MIT", "dependencies": { "from2": "^2.1.1", "p-is-promise": "^1.1.0" @@ -16866,40 +15603,35 @@ }, "node_modules/invariant": { "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "license": "MIT", "dependencies": { "loose-envify": "^1.0.0" } }, "node_modules/ip-regex": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz", - "integrity": "sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/ipaddr.js": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.0.1.tgz", - "integrity": "sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==", + "license": "MIT", "engines": { "node": ">= 10" } }, "node_modules/is-absolute-url": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", - "integrity": "sha512-vOx7VprsKyllwjSkLV79NIhpyLfr3jAp7VaTCMXOJHu4m0Ew1CZ2fcjASwmV1jI3BWuWHB013M48eyeldk9gYg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-accessor-descriptor": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "license": "MIT", "dependencies": { "kind-of": "^6.0.0" }, @@ -16909,8 +15641,7 @@ }, "node_modules/is-alphabetical": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", - "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -16918,8 +15649,7 @@ }, "node_modules/is-alphanumerical": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", - "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "license": "MIT", "dependencies": { "is-alphabetical": "^1.0.0", "is-decimal": "^1.0.0" @@ -16931,8 +15661,7 @@ }, "node_modules/is-arguments": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "has-tostringtag": "^1.0.0" @@ -16946,8 +15675,7 @@ }, "node_modules/is-array-buffer": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz", - "integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "get-intrinsic": "^1.2.0", @@ -16959,13 +15687,11 @@ }, "node_modules/is-arrayish": { "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + "license": "MIT" }, "node_modules/is-bigint": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", - "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "license": "MIT", "dependencies": { "has-bigints": "^1.0.1" }, @@ -16975,8 +15701,7 @@ }, "node_modules/is-binary-path": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", "dependencies": { "binary-extensions": "^2.0.0" }, @@ -16986,8 +15711,7 @@ }, "node_modules/is-boolean-object": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", - "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "has-tostringtag": "^1.0.0" @@ -17001,8 +15725,6 @@ }, "node_modules/is-buffer": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", - "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", "funding": [ { "type": "github", @@ -17017,14 +15739,14 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/is-callable": { "version": "1.2.7", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", - "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -17034,8 +15756,7 @@ }, "node_modules/is-ci": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", + "license": "MIT", "dependencies": { "ci-info": "^2.0.0" }, @@ -17045,8 +15766,7 @@ }, "node_modules/is-color-stop": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", - "integrity": "sha512-H1U8Vz0cfXNujrJzEcvvwMDW9Ra+biSYA3ThdQvAnMLJkEHQXn6bWzLkxHtVYJ+Sdbx0b6finn3jZiaVe7MAHA==", + "license": "MIT", "dependencies": { "css-color-names": "^0.0.4", "hex-color-regex": "^1.1.0", @@ -17058,8 +15778,7 @@ }, "node_modules/is-core-module": { "version": "2.9.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.9.0.tgz", - "integrity": "sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A==", + "license": "MIT", "dependencies": { "has": "^1.0.3" }, @@ -17069,8 +15788,7 @@ }, "node_modules/is-data-descriptor": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "license": "MIT", "dependencies": { "kind-of": "^6.0.0" }, @@ -17080,8 +15798,7 @@ }, "node_modules/is-date-object": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", - "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "license": "MIT", "dependencies": { "has-tostringtag": "^1.0.0" }, @@ -17094,8 +15811,7 @@ }, "node_modules/is-decimal": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", - "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -17103,8 +15819,7 @@ }, "node_modules/is-descriptor": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "license": "MIT", "dependencies": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -17116,16 +15831,14 @@ }, "node_modules/is-directory": { "version": "0.3.1", - "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", - "integrity": "sha512-yVChGzahRFvbkscn2MlwGismPO12i9+znNruC5gVEntG3qu0xQMzsGg/JFbrsqDOHtHFPci+V5aP5T9I+yeKqw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-docker": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "license": "MIT", "bin": { "is-docker": "cli.js" }, @@ -17138,24 +15851,21 @@ }, "node_modules/is-extendable": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-extglob": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-finite": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz", - "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==", + "license": "MIT", "engines": { "node": ">=0.10.0" }, @@ -17165,25 +15875,22 @@ }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/is-generator-fn": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", - "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/is-generator-function": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", - "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "license": "MIT", "dependencies": { "has-tostringtag": "^1.0.0" }, @@ -17196,8 +15903,7 @@ }, "node_modules/is-gif": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-gif/-/is-gif-3.0.0.tgz", - "integrity": "sha512-IqJ/jlbw5WJSNfwQ/lHEDXF8rxhRgF6ythk2oiEvhpG29F704eX9NO6TvPfMiq9DrbwgcEDnETYNcZDPewQoVw==", + "license": "MIT", "dependencies": { "file-type": "^10.4.0" }, @@ -17207,8 +15913,7 @@ }, "node_modules/is-glob": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", "dependencies": { "is-extglob": "^2.1.1" }, @@ -17218,8 +15923,7 @@ }, "node_modules/is-hexadecimal": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", - "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -17227,8 +15931,7 @@ }, "node_modules/is-installed-globally": { "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "license": "MIT", "dependencies": { "global-dirs": "^3.0.0", "is-path-inside": "^3.0.2" @@ -17242,16 +15945,14 @@ }, "node_modules/is-jpg": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-jpg/-/is-jpg-2.0.0.tgz", - "integrity": "sha512-ODlO0ruzhkzD3sdynIainVP5eoOFNN85rxA1+cwwnPe4dKyX0r5+hxNO5XpCrxlHcmb9vkOit9mhRD2JVuimHg==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/is-nan": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/is-nan/-/is-nan-1.3.2.tgz", - "integrity": "sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.0", "define-properties": "^1.1.3" @@ -17265,13 +15966,11 @@ }, "node_modules/is-natural-number": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-natural-number/-/is-natural-number-4.0.1.tgz", - "integrity": "sha512-Y4LTamMe0DDQIIAlaer9eKebAlDSV6huy+TWhJVPlzZh2o4tRP5SQWFlLn5N0To4mDD22/qdOq+veo1cSISLgQ==" + "license": "MIT" }, "node_modules/is-negative-zero": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -17281,8 +15980,7 @@ }, "node_modules/is-npm": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz", - "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -17292,16 +15990,14 @@ }, "node_modules/is-number": { "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", "engines": { "node": ">=0.12.0" } }, "node_modules/is-number-object": { "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", - "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", + "license": "MIT", "dependencies": { "has-tostringtag": "^1.0.0" }, @@ -17314,48 +16010,42 @@ }, "node_modules/is-obj": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-object": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.2.tgz", - "integrity": "sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-path-cwd": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/is-path-inside": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/is-plain-obj": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/is-plain-object": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "license": "MIT", "dependencies": { "isobject": "^3.0.1" }, @@ -17365,16 +16055,14 @@ }, "node_modules/is-png": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-png/-/is-png-1.1.0.tgz", - "integrity": "sha512-23Rmps8UEx3Bzqr0JqAtQo0tYP6sDfIfMt1rL9rzlla/zbteftI9LSJoqsIoGgL06sJboDGdVns4RTakAW/WTw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-regex": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", - "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "has-tostringtag": "^1.0.0" @@ -17388,37 +16076,32 @@ }, "node_modules/is-regexp": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", - "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-resolvable": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" + "license": "ISC" }, "node_modules/is-retry-allowed": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", - "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-root": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", - "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/is-shared-array-buffer": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2" }, @@ -17428,8 +16111,7 @@ }, "node_modules/is-stream": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", "engines": { "node": ">=8" }, @@ -17439,8 +16121,7 @@ }, "node_modules/is-string": { "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", - "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "license": "MIT", "dependencies": { "has-tostringtag": "^1.0.0" }, @@ -17453,13 +16134,11 @@ }, "node_modules/is-subset": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-subset/-/is-subset-0.1.1.tgz", - "integrity": "sha512-6Ybun0IkarhmEqxXCNw/C0bna6Zb/TkfUX9UbwJtK6ObwAVCxmAP308WWTHviM/zAqXk05cdhYsUsZeGQh99iw==" + "license": "MIT" }, "node_modules/is-svg": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-4.4.0.tgz", - "integrity": "sha512-v+AgVwiK5DsGtT9ng+m4mClp6zDAmwrW8nZi6Gg15qzvBnRWWdfWA1TGaXyCDnWq5g5asofIgMVl3PjKxvk1ug==", + "license": "MIT", "dependencies": { "fast-xml-parser": "^4.1.3" }, @@ -17472,8 +16151,7 @@ }, "node_modules/is-symbol": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", + "license": "MIT", "dependencies": { "has-symbols": "^1.0.2" }, @@ -17486,8 +16164,7 @@ }, "node_modules/is-typed-array": { "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "license": "MIT", "dependencies": { "available-typed-arrays": "^1.0.5", "call-bind": "^1.0.2", @@ -17504,23 +16181,19 @@ }, "node_modules/is-typedarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + "license": "MIT" }, "node_modules/is-url": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", - "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==" + "license": "MIT" }, "node_modules/is-utf8": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", - "integrity": "sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==" + "license": "MIT" }, "node_modules/is-weakref": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", - "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2" }, @@ -17530,8 +16203,7 @@ }, "node_modules/is-whitespace-character": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz", - "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -17539,16 +16211,14 @@ }, "node_modules/is-windows": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-word-character": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz", - "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -17556,8 +16226,7 @@ }, "node_modules/is-wsl": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "license": "MIT", "dependencies": { "is-docker": "^2.0.0" }, @@ -17567,13 +16236,11 @@ }, "node_modules/is-yarn-global": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" + "license": "MIT" }, "node_modules/is2": { "version": "2.0.9", - "resolved": "https://registry.npmjs.org/is2/-/is2-2.0.9.tgz", - "integrity": "sha512-rZkHeBn9Zzq52sd9IUIV3a5mfwBY+o2HePMh0wkGBM4z4qjvy2GwVxQ6nNXSfw6MmVP6gf1QIlWjiOavhM3x5g==", + "license": "MIT", "dependencies": { "deep-is": "^0.1.3", "ip-regex": "^4.1.0", @@ -17585,45 +16252,43 @@ }, "node_modules/isarray": { "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + "license": "MIT" }, "node_modules/isexe": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" + "license": "ISC" }, "node_modules/isobject": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/isstream": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==" + "license": "MIT" }, "node_modules/istanbul-lib-coverage": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", - "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=8" } }, "node_modules/istanbul-lib-instrument": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.0.tgz", - "integrity": "sha512-x58orMzEVfzPUKqlbLd1hXCnySCxKdDKa6Rjg97CwuLLRI4g3FHTdnExu1OqffVFay6zeMW+T6/DowFLndWnIw==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { - "@babel/core": "^7.12.3", - "@babel/parser": "^7.14.7", - "@istanbuljs/schema": "^0.1.2", + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" }, @@ -17633,9 +16298,8 @@ }, "node_modules/istanbul-lib-report": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", - "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "istanbul-lib-coverage": "^3.0.0", "make-dir": "^4.0.0", @@ -17647,18 +16311,16 @@ }, "node_modules/istanbul-lib-report/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/istanbul-lib-report/node_modules/make-dir": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", - "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", "dev": true, + "license": "MIT", "dependencies": { "semver": "^7.5.3" }, @@ -17671,9 +16333,8 @@ }, "node_modules/istanbul-lib-report/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -17683,9 +16344,8 @@ }, "node_modules/istanbul-lib-source-maps": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", - "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "debug": "^4.1.1", "istanbul-lib-coverage": "^3.0.0", @@ -17697,18 +16357,18 @@ }, "node_modules/istanbul-lib-source-maps/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/istanbul-reports": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", - "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" @@ -17719,8 +16379,7 @@ }, "node_modules/isurl": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz", - "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", + "license": "MIT", "dependencies": { "has-to-string-tag-x": "^1.2.0", "is-object": "^1.0.1" @@ -17731,8 +16390,7 @@ }, "node_modules/jackspeak": { "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "license": "BlueOak-1.0.0", "dependencies": { "@isaacs/cliui": "^8.0.2" }, @@ -17747,15 +16405,16 @@ } }, "node_modules/jest": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest/-/jest-29.6.4.tgz", - "integrity": "sha512-tEFhVQFF/bzoYV1YuGyzLPZ6vlPrdfvDmmAxudA1dLEuiztqg2Rkx20vkKY32xiDROcD2KXlgZ7Cu8RPeEHRKw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/core": "^29.6.4", + "@jest/core": "^29.7.0", "@jest/types": "^29.6.3", "import-local": "^3.0.2", - "jest-cli": "^29.6.4" + "jest-cli": "^29.7.0" }, "bin": { "jest": "bin/jest.js" @@ -17773,13 +16432,14 @@ } }, "node_modules/jest-changed-files": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.6.3.tgz", - "integrity": "sha512-G5wDnElqLa4/c66ma5PG9eRjE342lIbF6SUnTJi26C3J28Fv2TVY2rOyKB9YGbSA5ogwevgmxc4j4aVjrEK6Yg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", "dev": true, + "license": "MIT", "dependencies": { "execa": "^5.0.0", - "jest-util": "^29.6.3", + "jest-util": "^29.7.0", "p-limit": "^3.1.0" }, "engines": { @@ -17788,9 +16448,8 @@ }, "node_modules/jest-changed-files/node_modules/p-limit": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -17802,28 +16461,29 @@ } }, "node_modules/jest-circus": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.6.4.tgz", - "integrity": "sha512-YXNrRyntVUgDfZbjXWBMPslX1mQ8MrSG0oM/Y06j9EYubODIyHWP8hMUbjbZ19M3M+zamqEur7O80HODwACoJw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/environment": "^29.6.4", - "@jest/expect": "^29.6.4", - "@jest/test-result": "^29.6.4", + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "co": "^4.6.0", "dedent": "^1.0.0", "is-generator-fn": "^2.0.0", - "jest-each": "^29.6.3", - "jest-matcher-utils": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-runtime": "^29.6.4", - "jest-snapshot": "^29.6.4", - "jest-util": "^29.6.3", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", "p-limit": "^3.1.0", - "pretty-format": "^29.6.3", + "pretty-format": "^29.7.0", "pure-rand": "^6.0.0", "slash": "^3.0.0", "stack-utils": "^2.0.3" @@ -17834,9 +16494,8 @@ }, "node_modules/jest-circus/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -17849,9 +16508,8 @@ }, "node_modules/jest-circus/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -17865,9 +16523,8 @@ }, "node_modules/jest-circus/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -17877,24 +16534,21 @@ }, "node_modules/jest-circus/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-circus/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-circus/node_modules/p-limit": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -17907,9 +16561,8 @@ }, "node_modules/jest-circus/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -17918,22 +16571,22 @@ } }, "node_modules/jest-cli": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.6.4.tgz", - "integrity": "sha512-+uMCQ7oizMmh8ZwRfZzKIEszFY9ksjjEQnTEMTaL7fYiL3Kw4XhqT9bYh+A4DQKUb67hZn2KbtEnDuHvcgK4pQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/core": "^29.6.4", - "@jest/test-result": "^29.6.4", + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", "@jest/types": "^29.6.3", "chalk": "^4.0.0", + "create-jest": "^29.7.0", "exit": "^0.1.2", - "graceful-fs": "^4.2.9", "import-local": "^3.0.2", - "jest-config": "^29.6.4", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", - "prompts": "^2.0.1", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", "yargs": "^17.3.1" }, "bin": { @@ -17953,9 +16606,8 @@ }, "node_modules/jest-cli/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -17968,9 +16620,8 @@ }, "node_modules/jest-cli/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -17984,9 +16635,8 @@ }, "node_modules/jest-cli/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -17996,24 +16646,21 @@ }, "node_modules/jest-cli/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-cli/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-cli/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18022,31 +16669,32 @@ } }, "node_modules/jest-config": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.6.4.tgz", - "integrity": "sha512-JWohr3i9m2cVpBumQFv2akMEnFEPVOh+9L2xIBJhJ0zOaci2ZXuKJj0tgMKQCBZAKA09H049IR4HVS/43Qb19A==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", "dev": true, + "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", - "@jest/test-sequencer": "^29.6.4", + "@jest/test-sequencer": "^29.7.0", "@jest/types": "^29.6.3", - "babel-jest": "^29.6.4", + "babel-jest": "^29.7.0", "chalk": "^4.0.0", "ci-info": "^3.2.0", "deepmerge": "^4.2.2", "glob": "^7.1.3", "graceful-fs": "^4.2.9", - "jest-circus": "^29.6.4", - "jest-environment-node": "^29.6.4", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", "jest-get-type": "^29.6.3", "jest-regex-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-runner": "^29.6.4", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", "micromatch": "^4.0.4", "parse-json": "^5.2.0", - "pretty-format": "^29.6.3", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "strip-json-comments": "^3.1.1" }, @@ -18068,9 +16716,8 @@ }, "node_modules/jest-config/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18083,9 +16730,8 @@ }, "node_modules/jest-config/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18098,9 +16744,9 @@ } }, "node_modules/jest-config/node_modules/ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", "dev": true, "funding": [ { @@ -18108,15 +16754,15 @@ "url": "https://github.com/sponsors/sibiraj-s" } ], + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-config/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18126,24 +16772,21 @@ }, "node_modules/jest-config/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-config/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-config/node_modules/strip-json-comments": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" }, @@ -18153,9 +16796,8 @@ }, "node_modules/jest-config/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18164,15 +16806,16 @@ } }, "node_modules/jest-diff": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.6.4.tgz", - "integrity": "sha512-9F48UxR9e4XOEZvoUXEHSWY4qC4zERJaOfrbBg9JpbJOO43R1vN76REt/aMGZoY6GD5g84nnJiBIVlscegefpw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.0.0", "diff-sequences": "^29.6.3", "jest-get-type": "^29.6.3", - "pretty-format": "^29.6.3" + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -18180,9 +16823,8 @@ }, "node_modules/jest-diff/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18195,9 +16837,8 @@ }, "node_modules/jest-diff/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18211,9 +16852,8 @@ }, "node_modules/jest-diff/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18223,24 +16863,21 @@ }, "node_modules/jest-diff/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-diff/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-diff/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18249,10 +16886,11 @@ } }, "node_modules/jest-docblock": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.6.3.tgz", - "integrity": "sha512-2+H+GOTQBEm2+qFSQ7Ma+BvyV+waiIFxmZF5LdpBsAEjWX8QYjSCa4FrkIYtbfXUJJJnFCYrOtt6TZ+IAiTjBQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", "dev": true, + "license": "MIT", "dependencies": { "detect-newline": "^3.0.0" }, @@ -18261,16 +16899,17 @@ } }, "node_modules/jest-each": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.6.3.tgz", - "integrity": "sha512-KoXfJ42k8cqbkfshW7sSHcdfnv5agDdHCPA87ZBdmHP+zJstTJc0ttQaJ/x7zK6noAL76hOuTIJ6ZkQRS5dcyg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "chalk": "^4.0.0", "jest-get-type": "^29.6.3", - "jest-util": "^29.6.3", - "pretty-format": "^29.6.3" + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -18278,9 +16917,8 @@ }, "node_modules/jest-each/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18293,9 +16931,8 @@ }, "node_modules/jest-each/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18309,9 +16946,8 @@ }, "node_modules/jest-each/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18321,24 +16957,21 @@ }, "node_modules/jest-each/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-each/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-each/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18347,17 +16980,18 @@ } }, "node_modules/jest-environment-node": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.6.4.tgz", - "integrity": "sha512-i7SbpH2dEIFGNmxGCpSc2w9cA4qVD+wfvg2ZnfQ7XVrKL0NA5uDVBIiGH8SR4F0dKEv/0qI5r+aDomDf04DpEQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/environment": "^29.6.4", - "@jest/fake-timers": "^29.6.4", + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", - "jest-mock": "^29.6.3", - "jest-util": "^29.6.3" + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -18365,18 +16999,18 @@ }, "node_modules/jest-get-type": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", - "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-haste-map": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.6.4.tgz", - "integrity": "sha512-12Ad+VNTDHxKf7k+M65sviyynRoZYuL1/GTuhEVb8RYsNSNln71nANRb/faSyWvx0j+gHcivChXHIoMJrGYjog==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/graceful-fs": "^4.1.3", @@ -18385,8 +17019,8 @@ "fb-watchman": "^2.0.0", "graceful-fs": "^4.2.9", "jest-regex-util": "^29.6.3", - "jest-util": "^29.6.3", - "jest-worker": "^29.6.4", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", "micromatch": "^4.0.4", "walker": "^1.0.8" }, @@ -18399,21 +17033,21 @@ }, "node_modules/jest-haste-map/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-haste-map/node_modules/jest-worker": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.4.tgz", - "integrity": "sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*", - "jest-util": "^29.6.3", + "jest-util": "^29.7.0", "merge-stream": "^2.0.0", "supports-color": "^8.0.0" }, @@ -18423,9 +17057,8 @@ }, "node_modules/jest-haste-map/node_modules/supports-color": { "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18437,28 +17070,30 @@ } }, "node_modules/jest-leak-detector": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.6.3.tgz", - "integrity": "sha512-0kfbESIHXYdhAdpLsW7xdwmYhLf1BRu4AA118/OxFm0Ho1b2RcTmO4oF6aAMaxpxdxnJ3zve2rgwzNBD4Zbm7Q==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", "dev": true, + "license": "MIT", "dependencies": { "jest-get-type": "^29.6.3", - "pretty-format": "^29.6.3" + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-matcher-utils": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.6.4.tgz", - "integrity": "sha512-KSzwyzGvK4HcfnserYqJHYi7sZVqdREJ9DMPAKVbS98JsIAvumihaNUbjrWw0St7p9IY7A9UskCW5MYlGmBQFQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.0.0", - "jest-diff": "^29.6.4", + "jest-diff": "^29.7.0", "jest-get-type": "^29.6.3", - "pretty-format": "^29.6.3" + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -18466,9 +17101,8 @@ }, "node_modules/jest-matcher-utils/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18481,9 +17115,8 @@ }, "node_modules/jest-matcher-utils/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18497,9 +17130,8 @@ }, "node_modules/jest-matcher-utils/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18509,24 +17141,21 @@ }, "node_modules/jest-matcher-utils/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-matcher-utils/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-matcher-utils/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18535,10 +17164,11 @@ } }, "node_modules/jest-message-util": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.6.3.tgz", - "integrity": "sha512-FtzaEEHzjDpQp51HX4UMkPZjy46ati4T5pEMyM6Ik48ztu4T9LQplZ6OsimHx7EuM9dfEh5HJa6D3trEftu3dA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", "dev": true, + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.12.13", "@jest/types": "^29.6.3", @@ -18546,7 +17176,7 @@ "chalk": "^4.0.0", "graceful-fs": "^4.2.9", "micromatch": "^4.0.4", - "pretty-format": "^29.6.3", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "stack-utils": "^2.0.3" }, @@ -18556,9 +17186,8 @@ }, "node_modules/jest-message-util/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18571,9 +17200,8 @@ }, "node_modules/jest-message-util/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18587,9 +17215,8 @@ }, "node_modules/jest-message-util/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18599,24 +17226,21 @@ }, "node_modules/jest-message-util/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-message-util/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-message-util/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18625,14 +17249,15 @@ } }, "node_modules/jest-mock": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.6.3.tgz", - "integrity": "sha512-Z7Gs/mOyTSR4yPsaZ72a/MtuK6RnC3JYqWONe48oLaoEcYwEDxqvbXz85G4SJrm2Z5Ar9zp6MiHF4AlFlRM4Pg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", - "jest-util": "^29.6.3" + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -18640,9 +17265,8 @@ }, "node_modules/jest-pnp-resolver": { "version": "1.2.3", - "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", - "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" }, @@ -18657,25 +17281,25 @@ }, "node_modules/jest-regex-util": { "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", - "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-resolve": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.6.4.tgz", - "integrity": "sha512-fPRq+0vcxsuGlG0O3gyoqGTAxasagOxEuyoxHeyxaZbc9QNek0AmJWSkhjlMG+mTsj+8knc/mWb3fXlRNVih7Q==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.0.0", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", + "jest-haste-map": "^29.7.0", "jest-pnp-resolver": "^1.2.2", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", "resolve": "^1.20.0", "resolve.exports": "^2.0.0", "slash": "^3.0.0" @@ -18685,13 +17309,14 @@ } }, "node_modules/jest-resolve-dependencies": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.6.4.tgz", - "integrity": "sha512-7+6eAmr1ZBF3vOAJVsfLj1QdqeXG+WYhidfLHBRZqGN24MFRIiKG20ItpLw2qRAsW/D2ZUUmCNf6irUr/v6KHA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", "dev": true, + "license": "MIT", "dependencies": { "jest-regex-util": "^29.6.3", - "jest-snapshot": "^29.6.4" + "jest-snapshot": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -18699,9 +17324,8 @@ }, "node_modules/jest-resolve/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18714,9 +17338,8 @@ }, "node_modules/jest-resolve/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18730,9 +17353,8 @@ }, "node_modules/jest-resolve/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18742,24 +17364,21 @@ }, "node_modules/jest-resolve/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-resolve/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-resolve/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18768,30 +17387,31 @@ } }, "node_modules/jest-runner": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.6.4.tgz", - "integrity": "sha512-SDaLrMmtVlQYDuG0iSPYLycG8P9jLI+fRm8AF/xPKhYDB2g6xDWjXBrR5M8gEWsK6KVFlebpZ4QsrxdyIX1Jaw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/console": "^29.6.4", - "@jest/environment": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "emittery": "^0.13.1", "graceful-fs": "^4.2.9", - "jest-docblock": "^29.6.3", - "jest-environment-node": "^29.6.4", - "jest-haste-map": "^29.6.4", - "jest-leak-detector": "^29.6.3", - "jest-message-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-runtime": "^29.6.4", - "jest-util": "^29.6.3", - "jest-watcher": "^29.6.4", - "jest-worker": "^29.6.4", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", "p-limit": "^3.1.0", "source-map-support": "0.5.13" }, @@ -18801,9 +17421,8 @@ }, "node_modules/jest-runner/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18816,9 +17435,8 @@ }, "node_modules/jest-runner/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18832,9 +17450,8 @@ }, "node_modules/jest-runner/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -18844,27 +17461,26 @@ }, "node_modules/jest-runner/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-runner/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-runner/node_modules/jest-worker": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.4.tgz", - "integrity": "sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*", - "jest-util": "^29.6.3", + "jest-util": "^29.7.0", "merge-stream": "^2.0.0", "supports-color": "^8.0.0" }, @@ -18874,9 +17490,8 @@ }, "node_modules/jest-runner/node_modules/jest-worker/node_modules/supports-color": { "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18889,9 +17504,8 @@ }, "node_modules/jest-runner/node_modules/p-limit": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -18904,18 +17518,16 @@ }, "node_modules/jest-runner/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/jest-runner/node_modules/source-map-support": { "version": "0.5.13", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", - "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", "dev": true, + "license": "MIT", "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -18923,9 +17535,8 @@ }, "node_modules/jest-runner/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -18934,17 +17545,18 @@ } }, "node_modules/jest-runtime": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.6.4.tgz", - "integrity": "sha512-s/QxMBLvmwLdchKEjcLfwzP7h+jsHvNEtxGP5P+Fl1FMaJX2jMiIqe4rJw4tFprzCwuSvVUo9bn0uj4gNRXsbA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/environment": "^29.6.4", - "@jest/fake-timers": "^29.6.4", - "@jest/globals": "^29.6.4", + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", "@jest/source-map": "^29.6.3", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", @@ -18952,13 +17564,13 @@ "collect-v8-coverage": "^1.0.0", "glob": "^7.1.3", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-mock": "^29.6.3", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", "jest-regex-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-snapshot": "^29.6.4", - "jest-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", "slash": "^3.0.0", "strip-bom": "^4.0.0" }, @@ -18968,9 +17580,8 @@ }, "node_modules/jest-runtime/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -18983,9 +17594,8 @@ }, "node_modules/jest-runtime/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -18999,9 +17609,8 @@ }, "node_modules/jest-runtime/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -19011,24 +17620,21 @@ }, "node_modules/jest-runtime/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-runtime/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-runtime/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -19037,30 +17643,31 @@ } }, "node_modules/jest-snapshot": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.6.4.tgz", - "integrity": "sha512-VC1N8ED7+4uboUKGIDsbvNAZb6LakgIPgAF4RSpF13dN6YaMokfRqO+BaqK4zIh6X3JffgwbzuGqDEjHm/MrvA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", "dev": true, + "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@babel/generator": "^7.7.2", "@babel/plugin-syntax-jsx": "^7.7.2", "@babel/plugin-syntax-typescript": "^7.7.2", "@babel/types": "^7.3.3", - "@jest/expect-utils": "^29.6.4", - "@jest/transform": "^29.6.4", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", "@jest/types": "^29.6.3", "babel-preset-current-node-syntax": "^1.0.0", "chalk": "^4.0.0", - "expect": "^29.6.4", + "expect": "^29.7.0", "graceful-fs": "^4.2.9", - "jest-diff": "^29.6.4", + "jest-diff": "^29.7.0", "jest-get-type": "^29.6.3", - "jest-matcher-utils": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", "natural-compare": "^1.4.0", - "pretty-format": "^29.6.3", + "pretty-format": "^29.7.0", "semver": "^7.5.3" }, "engines": { @@ -19069,9 +17676,8 @@ }, "node_modules/jest-snapshot/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -19084,9 +17690,8 @@ }, "node_modules/jest-snapshot/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -19100,9 +17705,8 @@ }, "node_modules/jest-snapshot/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -19112,24 +17716,21 @@ }, "node_modules/jest-snapshot/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-snapshot/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-snapshot/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -19138,10 +17739,11 @@ } }, "node_modules/jest-util": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.6.3.tgz", - "integrity": "sha512-QUjna/xSy4B32fzcKTSz1w7YYzgiHrjjJjevdRf61HYk998R5vVMMNmrHESYZVDS5DSWs+1srPLPKxXPkeSDOA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -19156,9 +17758,8 @@ }, "node_modules/jest-util/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -19171,9 +17772,8 @@ }, "node_modules/jest-util/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -19187,8 +17787,6 @@ }, "node_modules/jest-util/node_modules/ci-info": { "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", "dev": true, "funding": [ { @@ -19196,15 +17794,15 @@ "url": "https://github.com/sponsors/sibiraj-s" } ], + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-util/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -19214,24 +17812,21 @@ }, "node_modules/jest-util/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-util/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-util/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -19240,17 +17835,18 @@ } }, "node_modules/jest-validate": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.6.3.tgz", - "integrity": "sha512-e7KWZcAIX+2W1o3cHfnqpGajdCs1jSM3DkXjGeLSNmCazv1EeI1ggTeK5wdZhF+7N+g44JI2Od3veojoaumlfg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", "dev": true, + "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "camelcase": "^6.2.0", "chalk": "^4.0.0", "jest-get-type": "^29.6.3", "leven": "^3.1.0", - "pretty-format": "^29.6.3" + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -19258,9 +17854,8 @@ }, "node_modules/jest-validate/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -19273,9 +17868,8 @@ }, "node_modules/jest-validate/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -19289,9 +17883,8 @@ }, "node_modules/jest-validate/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -19301,24 +17894,21 @@ }, "node_modules/jest-validate/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-validate/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-validate/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -19327,18 +17917,19 @@ } }, "node_modules/jest-watcher": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.6.4.tgz", - "integrity": "sha512-oqUWvx6+On04ShsT00Ir9T4/FvBeEh2M9PTubgITPxDa739p4hoQweWPRGyYeaojgT0xTpZKF0Y/rSY1UgMxvQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", "dev": true, + "license": "MIT", "dependencies": { - "@jest/test-result": "^29.6.4", + "@jest/test-result": "^29.7.0", "@jest/types": "^29.6.3", "@types/node": "*", "ansi-escapes": "^4.2.1", "chalk": "^4.0.0", "emittery": "^0.13.1", - "jest-util": "^29.6.3", + "jest-util": "^29.7.0", "string-length": "^4.0.1" }, "engines": { @@ -19347,9 +17938,8 @@ }, "node_modules/jest-watcher/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -19362,9 +17952,8 @@ }, "node_modules/jest-watcher/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -19378,9 +17967,8 @@ }, "node_modules/jest-watcher/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -19390,24 +17978,21 @@ }, "node_modules/jest-watcher/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/jest-watcher/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-watcher/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -19417,8 +18002,7 @@ }, "node_modules/jest-worker": { "version": "27.5.1", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", - "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "license": "MIT", "dependencies": { "@types/node": "*", "merge-stream": "^2.0.0", @@ -19430,16 +18014,14 @@ }, "node_modules/jest-worker/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/jest-worker/node_modules/supports-color": { "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -19452,8 +18034,7 @@ }, "node_modules/joi": { "version": "17.6.0", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.6.0.tgz", - "integrity": "sha512-OX5dG6DTbcr/kbMFj0KGYxuew69HPcAE3K/sZpEV2nP6e/j/C0HV+HNiBPCASxdx5T7DMoa0s8UeHWMnb6n2zw==", + "license": "BSD-3-Clause", "dependencies": { "@hapi/hoek": "^9.0.0", "@hapi/topo": "^5.0.0", @@ -19464,9 +18045,8 @@ }, "node_modules/jpegtran-bin": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jpegtran-bin/-/jpegtran-bin-4.0.0.tgz", - "integrity": "sha512-2cRl1ism+wJUoYAYFt6O/rLBfpXNWG2dUWbgcEkTt5WGMnqI46eEro8T4C5zGROxKRqyKpCBSdHPvt5UYCtxaQ==", "hasInstallScript": true, + "license": "MIT", "dependencies": { "bin-build": "^3.0.0", "bin-wrapper": "^4.0.0", @@ -19481,21 +18061,18 @@ }, "node_modules/js-levenshtein": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", - "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/js-tokens": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + "license": "MIT" }, "node_modules/js-yaml": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, @@ -19505,60 +18082,52 @@ }, "node_modules/jsbn": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" + "license": "MIT" }, "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz", + "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", "bin": { "jsesc": "bin/jsesc" }, "engines": { - "node": ">=4" + "node": ">=6" } }, "node_modules/json-buffer": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" + "license": "MIT" }, "node_modules/json-parse-better-errors": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" + "license": "MIT" }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + "license": "MIT" }, "node_modules/json-pointer": { "version": "0.6.2", - "resolved": "https://registry.npmjs.org/json-pointer/-/json-pointer-0.6.2.tgz", - "integrity": "sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw==", + "license": "MIT", "dependencies": { "foreach": "^2.0.4" } }, "node_modules/json-schema": { "version": "0.4.0", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", - "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" + "license": "(AFL-2.1 OR BSD-3-Clause)" }, "node_modules/json-schema-compare": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/json-schema-compare/-/json-schema-compare-0.2.2.tgz", - "integrity": "sha512-c4WYmDKyJXhs7WWvAWm3uIYnfyWFoIp+JEoX34rctVvEkMYCPGhXtvmFFXiffBbxfZsvQ0RNnV5H7GvDF5HCqQ==", + "license": "MIT", "dependencies": { "lodash": "^4.17.4" } }, "node_modules/json-schema-merge-allof": { "version": "0.8.1", - "resolved": "https://registry.npmjs.org/json-schema-merge-allof/-/json-schema-merge-allof-0.8.1.tgz", - "integrity": "sha512-CTUKmIlPJbsWfzRRnOXz+0MjIqvnleIXwFTzz+t9T86HnYX/Rozria6ZVGLktAU9e+NygNljveP+yxqtQp/Q4w==", + "license": "MIT", "dependencies": { "compute-lcm": "^1.1.2", "json-schema-compare": "^0.2.2", @@ -19570,18 +18139,16 @@ }, "node_modules/json-schema-traverse": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + "license": "MIT" }, "node_modules/json-stringify-safe": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" + "license": "ISC" }, "node_modules/json5": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz", - "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "bin": { "json5": "lib/cli.js" }, @@ -19591,8 +18158,7 @@ }, "node_modules/jsonfile": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "license": "MIT", "dependencies": { "universalify": "^2.0.0" }, @@ -19602,8 +18168,7 @@ }, "node_modules/jsprim": { "version": "1.4.2", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", - "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", + "license": "MIT", "dependencies": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", @@ -19616,52 +18181,46 @@ }, "node_modules/keyv": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", + "license": "MIT", "dependencies": { "json-buffer": "3.0.0" } }, "node_modules/khroma": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", - "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + "version": "2.1.0" }, "node_modules/kind-of": { "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/kleur": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", - "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/klona": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", - "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==", + "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/lang-feel": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lang-feel/-/lang-feel-2.0.0.tgz", - "integrity": "sha512-cMD6EIhb7vyXLs4kXmaphfZZNr5SkbRxmkfsZUjUJzOV5YxyKBF73VI/8fC3GDUifzs0lVo2DruVszk5igrddg==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/lang-feel/-/lang-feel-2.2.0.tgz", + "integrity": "sha512-Ebo5nftYsMfJzB3Ny8Oy4oaDXZXb5x61qtVVmKv6aImvAZUbT76mD60ZbEilizjZQzsR2CcU1iMK5sacIa1NVA==", "dependencies": { - "@codemirror/autocomplete": "^6.9.1", - "@codemirror/language": "^6.9.1", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.21.0", - "@lezer/common": "^1.1.2", - "lezer-feel": "^1.2.0" + "@codemirror/autocomplete": "^6.16.2", + "@codemirror/language": "^6.10.2", + "@codemirror/state": "^6.4.1", + "@codemirror/view": "^6.28.1", + "@lezer/common": "^1.2.1", + "lezer-feel": "^1.3.0" }, "engines": { "node": "*" @@ -19669,8 +18228,7 @@ }, "node_modules/latest-version": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", + "license": "MIT", "dependencies": { "package-json": "^6.3.0" }, @@ -19680,13 +18238,11 @@ }, "node_modules/layout-base": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", - "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==" + "license": "MIT" }, "node_modules/lazy-cache": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-2.0.2.tgz", - "integrity": "sha512-7vp2Acd2+Kz4XkzxGxaB1FWOi8KjWIWsgdfD5MCb86DWvlLqhRPM+d6Pro3iNEL5VT9mstz5hKAlcd+QR6H3aA==", + "license": "MIT", "dependencies": { "set-getter": "^0.1.0" }, @@ -19696,19 +18252,19 @@ }, "node_modules/leven": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/lezer-feel": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/lezer-feel/-/lezer-feel-1.2.8.tgz", - "integrity": "sha512-CO5JEpwNhH1p8mmRRcqMjJrYxO3vNx0nEsF9Ak4OPa1pNHEqvJ2rwYwM9LjZ7jh/Sl5FxbTJT/teF9a+zWmflg==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/lezer-feel/-/lezer-feel-1.4.0.tgz", + "integrity": "sha512-kNxG7O38gwpuYy+C3JCRxQNTCE2qu9uTuH5dE3EGVnRhIQMe6rPDz0S8t3urLEOsMud6HI795m6zX2ujfUaqTw==", "dependencies": { - "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.0" + "@lezer/highlight": "^1.2.1", + "@lezer/lr": "^1.4.2", + "min-dash": "^4.2.1" }, "engines": { "node": "*" @@ -19716,22 +18272,19 @@ }, "node_modules/lilconfig": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", - "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/lines-and-columns": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + "license": "MIT" }, "node_modules/lint-staged": { "version": "14.0.1", - "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-14.0.1.tgz", - "integrity": "sha512-Mw0cL6HXnHN1ag0mN/Dg4g6sr8uf8sn98w2Oc1ECtFto9tvRF7nkXGJRbx8gPlHyoR0pLyBr2lQHbWwmUHe1Sw==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "5.3.0", "commander": "11.0.0", @@ -19756,9 +18309,8 @@ }, "node_modules/lint-staged/node_modules/chalk": { "version": "5.3.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", - "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", "dev": true, + "license": "MIT", "engines": { "node": "^12.17.0 || ^14.13 || >=16.0.0" }, @@ -19768,18 +18320,16 @@ }, "node_modules/lint-staged/node_modules/commander": { "version": "11.0.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-11.0.0.tgz", - "integrity": "sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=16" } }, "node_modules/lint-staged/node_modules/execa": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-7.2.0.tgz", - "integrity": "sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==", "dev": true, + "license": "MIT", "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.1", @@ -19800,9 +18350,8 @@ }, "node_modules/lint-staged/node_modules/get-stream": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -19812,18 +18361,16 @@ }, "node_modules/lint-staged/node_modules/human-signals": { "version": "4.3.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-4.3.1.tgz", - "integrity": "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=14.18.0" } }, "node_modules/lint-staged/node_modules/is-stream": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", "dev": true, + "license": "MIT", "engines": { "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, @@ -19833,9 +18380,8 @@ }, "node_modules/lint-staged/node_modules/mimic-fn": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -19845,9 +18391,8 @@ }, "node_modules/lint-staged/node_modules/npm-run-path": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz", - "integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==", "dev": true, + "license": "MIT", "dependencies": { "path-key": "^4.0.0" }, @@ -19860,9 +18405,8 @@ }, "node_modules/lint-staged/node_modules/onetime": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", "dev": true, + "license": "MIT", "dependencies": { "mimic-fn": "^4.0.0" }, @@ -19875,9 +18419,8 @@ }, "node_modules/lint-staged/node_modules/path-key": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -19887,9 +18430,8 @@ }, "node_modules/lint-staged/node_modules/strip-final-newline": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -19899,25 +18441,22 @@ }, "node_modules/lint-staged/node_modules/yaml": { "version": "2.3.1", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz", - "integrity": "sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==", "dev": true, + "license": "ISC", "engines": { "node": ">= 14" } }, "node_modules/liquid-json": { "version": "0.3.1", - "resolved": "https://registry.npmjs.org/liquid-json/-/liquid-json-0.3.1.tgz", - "integrity": "sha512-wUayTU8MS827Dam6MxgD72Ui+KOSF+u/eIqpatOtjnvgJ0+mnDq33uC2M7J0tPK+upe/DpUAuK4JUU89iBoNKQ==", + "license": "Apache-2.0", "engines": { "node": ">=4" } }, "node_modules/list-item": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/list-item/-/list-item-1.1.1.tgz", - "integrity": "sha512-S3D0WZ4J6hyM8o5SNKWaMYB1ALSacPZ2nHGEuCjmHZ+dc03gFeNZoNDcqfcnO4vDhTZmNrqrpYZCdXsRh22bzw==", + "license": "MIT", "dependencies": { "expand-range": "^1.8.1", "extend-shallow": "^2.0.1", @@ -19930,13 +18469,11 @@ }, "node_modules/list-item/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/list-item/node_modules/is-number": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", - "integrity": "sha512-QUzH43Gfb9+5yckcrSA0VBDwEtDUchrk4F6tfJZQuNzDJbEDB9cZNzSfXGQ1jqmdDY/kl41lUOWM9syA8z8jlg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -19946,8 +18483,7 @@ }, "node_modules/list-item/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -19957,14 +18493,12 @@ }, "node_modules/listenercount": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/listenercount/-/listenercount-1.0.1.tgz", - "integrity": "sha512-3mk/Zag0+IJxeDrxSgaDPy4zZ3w05PRZeJNnlWhzFz5OkX49J4krc+A8X2d2M69vGMBEX0uyl8M+W+8gH+kBqQ==" + "license": "ISC" }, "node_modules/listr2": { "version": "6.6.1", - "resolved": "https://registry.npmjs.org/listr2/-/listr2-6.6.1.tgz", - "integrity": "sha512-+rAXGHh0fkEWdXBmX+L6mmfmXmXvDGEKzkjxO+8mP3+nI/r/CWznVBvsibXdxda9Zz0OW2e2ikphN3OwCT/jSg==", "dev": true, + "license": "MIT", "dependencies": { "cli-truncate": "^3.1.0", "colorette": "^2.0.20", @@ -19987,9 +18521,8 @@ }, "node_modules/listr2/node_modules/ansi-regex": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -19999,9 +18532,8 @@ }, "node_modules/listr2/node_modules/ansi-styles": { "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -20011,21 +18543,18 @@ }, "node_modules/listr2/node_modules/emoji-regex": { "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/listr2/node_modules/eventemitter3": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz", - "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/listr2/node_modules/string-width": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "dev": true, + "license": "MIT", "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", @@ -20040,9 +18569,8 @@ }, "node_modules/listr2/node_modules/strip-ansi": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-regex": "^6.0.1" }, @@ -20055,9 +18583,8 @@ }, "node_modules/listr2/node_modules/wrap-ansi": { "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", @@ -20072,13 +18599,11 @@ }, "node_modules/livereload-js": { "version": "2.4.0", - "resolved": "https://registry.npmjs.org/livereload-js/-/livereload-js-2.4.0.tgz", - "integrity": "sha512-XPQH8Z2GDP/Hwz2PCDrh2mth4yFejwA1OZ/81Ti3LgKyhDcEjsSsqFWZojHG0va/duGd+WyosY7eXLDoOyqcPw==" + "license": "MIT" }, "node_modules/load-json-file": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", - "integrity": "sha512-cy7ZdNRXdablkXYNI049pthVeXFurRyb9+hA/dZzerZ0pGTx42z+y+ssxBaVV2l70t1muq5IdKhn4UtcoGUY9A==", + "license": "MIT", "dependencies": { "graceful-fs": "^4.1.2", "parse-json": "^2.2.0", @@ -20092,8 +18617,7 @@ }, "node_modules/load-json-file/node_modules/parse-json": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==", + "license": "MIT", "dependencies": { "error-ex": "^1.2.0" }, @@ -20103,16 +18627,14 @@ }, "node_modules/load-json-file/node_modules/pify": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/load-json-file/node_modules/strip-bom": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", - "integrity": "sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==", + "license": "MIT", "dependencies": { "is-utf8": "^0.2.0" }, @@ -20122,21 +18644,18 @@ }, "node_modules/load-script": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", - "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ=" + "license": "MIT" }, "node_modules/loader-runner": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.2.0.tgz", - "integrity": "sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw==", + "license": "MIT", "engines": { "node": ">=6.11.5" } }, "node_modules/loader-utils": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz", - "integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==", + "license": "MIT", "dependencies": { "big.js": "^5.2.2", "emojis-list": "^3.0.0", @@ -20148,8 +18667,7 @@ }, "node_modules/locate-path": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "license": "MIT", "dependencies": { "p-locate": "^4.1.0" }, @@ -20159,138 +18677,111 @@ }, "node_modules/lodash": { "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + "license": "MIT" }, "node_modules/lodash-es": { "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + "license": "MIT" }, "node_modules/lodash._reinterpolate": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha512-xYHt68QRoYGjeeM/XOE1uJtvXQAgvszfBhjV4yvsQH0u2i9I6cI6c6/eG4Hh3UAOVn0y/xAXwmTzEay49Q//HA==" + "license": "MIT" }, "node_modules/lodash.assignin": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.assignin/-/lodash.assignin-4.2.0.tgz", - "integrity": "sha512-yX/rx6d/UTVh7sSVWVSIMjfnz95evAgDFdb1ZozC35I9mSFCkmzptOzevxjgbQUsc78NR44LVHWjsoMQXy9FDg==" + "license": "MIT" }, "node_modules/lodash.bind": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/lodash.bind/-/lodash.bind-4.2.1.tgz", - "integrity": "sha512-lxdsn7xxlCymgLYo1gGvVrfHmkjDiyqVv62FAeF2i5ta72BipE1SLxw8hPEPLhD4/247Ijw07UQH7Hq/chT5LA==" + "license": "MIT" }, "node_modules/lodash.chunk": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz", - "integrity": "sha512-ZzydJKfUHJwHa+hF5X66zLFCBrWn5GeF28OHEr4WVWtNDXlQ/IjWKPBiikqKo2ne0+v6JgCgJ0GzJp8k8bHC7w==" + "license": "MIT" }, "node_modules/lodash.clonedeep": { "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", - "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" + "license": "MIT" }, "node_modules/lodash.curry": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.curry/-/lodash.curry-4.1.1.tgz", - "integrity": "sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==" + "license": "MIT" }, "node_modules/lodash.debounce": { "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + "license": "MIT" }, "node_modules/lodash.defaults": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", - "integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==" + "license": "MIT" }, "node_modules/lodash.escape": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz", - "integrity": "sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw==" + "license": "MIT" }, "node_modules/lodash.filter": { "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.filter/-/lodash.filter-4.6.0.tgz", - "integrity": "sha512-pXYUy7PR8BCLwX5mgJ/aNtyOvuJTdZAo9EQFUvMIYugqmJxnrYaANvTbgndOzHSCSR0wnlBBfRXJL5SbWxo3FQ==" + "license": "MIT" }, "node_modules/lodash.flatten": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz", - "integrity": "sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==" + "license": "MIT" }, "node_modules/lodash.flattendeep": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", - "integrity": "sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ==" + "license": "MIT" }, "node_modules/lodash.flow": { "version": "3.5.0", - "resolved": "https://registry.npmjs.org/lodash.flow/-/lodash.flow-3.5.0.tgz", - "integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==" + "license": "MIT" }, "node_modules/lodash.foreach": { "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.foreach/-/lodash.foreach-4.5.0.tgz", - "integrity": "sha512-aEXTF4d+m05rVOAUG3z4vZZ4xVexLKZGF0lIxuHZ1Hplpk/3B6Z1+/ICICYRLm7c41Z2xiejbkCkJoTlypoXhQ==" + "license": "MIT" }, "node_modules/lodash.isequal": { "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", - "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==" + "license": "MIT" }, "node_modules/lodash.map": { "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.map/-/lodash.map-4.6.0.tgz", - "integrity": "sha512-worNHGKLDetmcEYDvh2stPCrrQRkP20E4l0iIS7F8EvzMqBBi7ltvFN5m1HvTf1P7Jk1txKhvFcmYsCr8O2F1Q==" + "license": "MIT" }, "node_modules/lodash.memoize": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + "license": "MIT" }, "node_modules/lodash.merge": { "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" + "license": "MIT" }, "node_modules/lodash.padstart": { "version": "4.6.1", - "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz", - "integrity": "sha512-sW73O6S8+Tg66eY56DBk85aQzzUJDtpoXFBgELMd5P/SotAguo+1kYO6RuYgXxA4HJH3LFTFPASX6ET6bjfriw==" + "license": "MIT" }, "node_modules/lodash.pick": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz", - "integrity": "sha512-hXt6Ul/5yWjfklSGvLQl8vM//l3FtyHZeuelpzK6mm99pNvN9yTDruNZPEJZD1oWrqo+izBmB7oUfWgcCX7s4Q==" + "license": "MIT" }, "node_modules/lodash.reduce": { "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.reduce/-/lodash.reduce-4.6.0.tgz", - "integrity": "sha512-6raRe2vxCYBhpBu+B+TtNGUzah+hQjVdu3E17wfusjyrXBka2nBS8OH/gjVZ5PvHOhWmIZTYri09Z6n/QfnNMw==" + "license": "MIT" }, "node_modules/lodash.reject": { "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.reject/-/lodash.reject-4.6.0.tgz", - "integrity": "sha512-qkTuvgEzYdyhiJBx42YPzPo71R1aEr0z79kAv7Ixg8wPFEjgRgJdUsGMG3Hf3OYSF/kHI79XhNlt+5Ar6OzwxQ==" + "license": "MIT" }, "node_modules/lodash.some": { "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.some/-/lodash.some-4.6.0.tgz", - "integrity": "sha512-j7MJE+TuT51q9ggt4fSgVqro163BEFjAt3u97IqU+JA2DkWl80nFTrowzLpZ/BnpN7rrl0JA/593NAdd8p/scQ==" + "license": "MIT" }, "node_modules/lodash.sortby": { "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==" + "license": "MIT" }, "node_modules/lodash.template": { "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", + "license": "MIT", "dependencies": { "lodash._reinterpolate": "^3.0.0", "lodash.templatesettings": "^4.0.0" @@ -20298,22 +18789,19 @@ }, "node_modules/lodash.templatesettings": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", + "license": "MIT", "dependencies": { "lodash._reinterpolate": "^3.0.0" } }, "node_modules/lodash.uniq": { "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" + "license": "MIT" }, "node_modules/log-update": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/log-update/-/log-update-5.0.1.tgz", - "integrity": "sha512-5UtUDQ/6edw4ofyljDNcOVJQ4c7OjDro4h3y8e1GQL5iYElYclVHJ3zeWchylvMaKnDbDilC8irOVyexnA/Slw==", "dev": true, + "license": "MIT", "dependencies": { "ansi-escapes": "^5.0.0", "cli-cursor": "^4.0.0", @@ -20330,9 +18818,8 @@ }, "node_modules/log-update/node_modules/ansi-escapes": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-5.0.0.tgz", - "integrity": "sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==", "dev": true, + "license": "MIT", "dependencies": { "type-fest": "^1.0.2" }, @@ -20345,9 +18832,8 @@ }, "node_modules/log-update/node_modules/ansi-regex": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -20357,9 +18843,8 @@ }, "node_modules/log-update/node_modules/ansi-styles": { "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -20369,15 +18854,13 @@ }, "node_modules/log-update/node_modules/emoji-regex": { "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/log-update/node_modules/string-width": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "dev": true, + "license": "MIT", "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", @@ -20392,9 +18875,8 @@ }, "node_modules/log-update/node_modules/strip-ansi": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-regex": "^6.0.1" }, @@ -20407,9 +18889,8 @@ }, "node_modules/log-update/node_modules/type-fest": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", - "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", "dev": true, + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -20419,9 +18900,8 @@ }, "node_modules/log-update/node_modules/wrap-ansi": { "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", @@ -20436,8 +18916,7 @@ }, "node_modules/logalot": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/logalot/-/logalot-2.1.0.tgz", - "integrity": "sha512-Ah4CgdSRfeCJagxQhcVNMi9BfGYyEKLa6d7OA6xSbld/Hg3Cf2QiOa1mDpmG7Ve8LOH6DN3mdttzjQAvWTyVkw==", + "license": "MIT", "dependencies": { "figures": "^1.3.5", "squeak": "^1.0.0" @@ -20448,16 +18927,14 @@ }, "node_modules/longest": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", - "integrity": "sha512-k+yt5n3l48JU4k8ftnKG6V7u32wyH2NfKzeMto9F/QRE0amxy/LayxwlvjjkZEIzqR+19IrtFO8p5kB9QaYUFg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/loose-envify": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, @@ -20467,8 +18944,7 @@ }, "node_modules/loud-rejection": { "version": "1.6.0", - "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", - "integrity": "sha512-RPNliZOFkqFumDhvYqOaNY4Uz9oJM2K9tC6JWsJJsNdhuONW4LQHRBpb0qf4pJApVffI5N39SwzWZJuEhfd7eQ==", + "license": "MIT", "dependencies": { "currently-unhandled": "^0.4.1", "signal-exit": "^3.0.0" @@ -20479,24 +18955,21 @@ }, "node_modules/lower-case": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "license": "MIT", "dependencies": { "tslib": "^2.0.3" } }, "node_modules/lowercase-keys": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/lpad-align": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/lpad-align/-/lpad-align-1.1.2.tgz", - "integrity": "sha512-MMIcFmmR9zlGZtBcFOows6c2COMekHCIFJz3ew/rRpKZ1wR4mXDPzvcVqLarux8M33X4TPSq2Jdw8WJj0q0KbQ==", + "license": "MIT", "dependencies": { "get-stdin": "^4.0.1", "indent-string": "^2.1.0", @@ -20512,8 +18985,7 @@ }, "node_modules/lpad-align/node_modules/indent-string": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", - "integrity": "sha512-aqwDFWSgSgfRaEwao5lg5KEcVd/2a+D1rvoG7NdilmYz0NwRk6StWpWdz/Hpk34MKPpx7s8XxUqimfcQK6gGlg==", + "license": "MIT", "dependencies": { "repeating": "^2.0.0" }, @@ -20523,8 +18995,7 @@ }, "node_modules/lru-cache": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "license": "ISC", "dependencies": { "yallist": "^4.0.0" }, @@ -20533,17 +19004,16 @@ } }, "node_modules/luxon": { - "version": "3.4.4", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.4.4.tgz", - "integrity": "sha512-zobTr7akeGHnv7eBOXcRgMeCP6+uyYsczwmeRCauvpvaAltgNyTbLH/+VaEAPUeWBT+1GuNmz4wC/6jtQzbbVA==", + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.5.0.tgz", + "integrity": "sha512-rh+Zjr6DNfUYR3bPwJEnuwDdqMbxZW7LOQfUN4B54+Cl+0o5zaU9RJ6bcidfDtC1cWCZXQ+nvX8bf6bAji37QQ==", "engines": { "node": ">=12" } }, "node_modules/make-dir": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "license": "MIT", "dependencies": { "semver": "^6.0.0" }, @@ -20556,41 +19026,36 @@ }, "node_modules/make-dir/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/makeerror": { "version": "1.0.12", - "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", - "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "tmpl": "1.0.5" } }, "node_modules/map-cache": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha512-8y/eV9QQZCiyn1SprXSrCmqJN0yNRATe+PO8ztwqrvrbdRLA3eYJF0yaR0YayLWkMbsQSKWS9N2gPcGEc4UsZg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/map-obj": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", - "integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/map-visit": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", - "integrity": "sha512-4y7uGv8bd2WdM9vpQsiQNo41Ln1NvhvDRuVt0k2JZQ+ezN2uaQes7lZeZ+QQUHOLQAtDaBJ+7wCbi+ab/KFs+w==", + "license": "MIT", "dependencies": { "object-visit": "^1.0.0" }, @@ -20600,8 +19065,7 @@ }, "node_modules/markdown-escapes": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", - "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -20609,16 +19073,14 @@ }, "node_modules/markdown-link": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/markdown-link/-/markdown-link-0.1.1.tgz", - "integrity": "sha512-TurLymbyLyo+kAUUAV9ggR9EPcDjP/ctlv9QAFiqUH7c+t6FlsbivPo9OKTU8xdOx9oNd2drW/Fi5RRElQbUqA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/markdown-toc": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/markdown-toc/-/markdown-toc-1.2.0.tgz", - "integrity": "sha512-eOsq7EGd3asV0oBfmyqngeEIhrbkc7XVP63OwcJBIhH2EpG2PzFcbZdhy1jutXSlRBBVMNXHvMtSr5LAxSUvUg==", + "license": "MIT", "dependencies": { "concat-stream": "^1.5.2", "diacritics-map": "^0.1.0", @@ -20642,24 +19104,21 @@ }, "node_modules/markdown-toc/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/markdown-toc/node_modules/autolinker": { "version": "0.28.1", - "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-0.28.1.tgz", - "integrity": "sha512-zQAFO1Dlsn69eXaO6+7YZc+v84aquQKbwpzCE3L0stj56ERn9hutFxPopViLjo9G+rWwjozRhgS5KJ25Xy19cQ==", + "license": "MIT", "dependencies": { "gulp-header": "^1.7.1" } }, "node_modules/markdown-toc/node_modules/gray-matter": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-2.1.1.tgz", - "integrity": "sha512-vbmvP1Fe/fxuT2QuLVcqb2BfK7upGhhbLIt9/owWEvPYrZZEkelLcq2HqzxosV+PQ67dUFLaAeNpH7C4hhICAA==", + "license": "MIT", "dependencies": { "ansi-red": "^0.1.1", "coffee-script": "^1.12.4", @@ -20673,8 +19132,7 @@ }, "node_modules/markdown-toc/node_modules/js-yaml": { "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -20685,8 +19143,7 @@ }, "node_modules/markdown-toc/node_modules/remarkable": { "version": "1.7.4", - "resolved": "https://registry.npmjs.org/remarkable/-/remarkable-1.7.4.tgz", - "integrity": "sha512-e6NKUXgX95whv7IgddywbeN/ItCkWbISmc2DiqHJb0wTrqZIexqdco5b8Z3XZoo/48IdNVKM9ZCvTPJ4F5uvhg==", + "license": "MIT", "dependencies": { "argparse": "^1.0.10", "autolinker": "~0.28.0" @@ -20698,20 +19155,24 @@ "node": ">= 0.10.0" } }, - "node_modules/matches-selector": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/matches-selector/-/matches-selector-1.2.0.tgz", - "integrity": "sha512-c4vLwYWyl+Ji+U43eU/G5FwxWd4ZH0ePUsFs5y0uwD9HUEFBXUQ1zUUan+78IpRD+y4pUfG0nAzNM292K7ItvA==" + "node_modules/marked": { + "version": "15.0.3", + "resolved": "https://registry.npmjs.org/marked/-/marked-15.0.3.tgz", + "integrity": "sha512-Ai0cepvl2NHnTcO9jYDtcOEtVBNVYR31XnEA3BndO7f5As1wzpcOceSUM8FDkNLJNIODcLpDTWay/qQhqbuMvg==", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 18" + } }, "node_modules/math-random": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.4.tgz", - "integrity": "sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A==" + "license": "MIT" }, "node_modules/md5.js": { "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "license": "MIT", "dependencies": { "hash-base": "^3.0.0", "inherits": "^2.0.1", @@ -20720,8 +19181,7 @@ }, "node_modules/mdast-squeeze-paragraphs": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz", - "integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==", + "license": "MIT", "dependencies": { "unist-util-remove": "^2.0.0" }, @@ -20732,8 +19192,7 @@ }, "node_modules/mdast-util-definitions": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz", - "integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==", + "license": "MIT", "dependencies": { "unist-util-visit": "^2.0.0" }, @@ -20744,8 +19203,7 @@ }, "node_modules/mdast-util-definitions/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -20758,8 +19216,7 @@ }, "node_modules/mdast-util-definitions/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -20771,8 +19228,7 @@ }, "node_modules/mdast-util-from-markdown": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz", - "integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==", + "license": "MIT", "dependencies": { "@types/mdast": "^3.0.0", "@types/unist": "^2.0.0", @@ -20794,8 +19250,7 @@ }, "node_modules/mdast-util-from-markdown/node_modules/mdast-util-to-string": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz", - "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==", + "license": "MIT", "dependencies": { "@types/mdast": "^3.0.0" }, @@ -20806,8 +19261,7 @@ }, "node_modules/mdast-util-from-markdown/node_modules/unist-util-stringify-position": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -20818,8 +19272,7 @@ }, "node_modules/mdast-util-to-hast": { "version": "10.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz", - "integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==", + "license": "MIT", "dependencies": { "@types/mdast": "^3.0.0", "@types/unist": "^2.0.0", @@ -20837,8 +19290,7 @@ }, "node_modules/mdast-util-to-hast/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -20851,8 +19303,7 @@ }, "node_modules/mdast-util-to-hast/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -20864,8 +19315,7 @@ }, "node_modules/mdast-util-to-string": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", - "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -20873,26 +19323,22 @@ }, "node_modules/mdn-data": { "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + "license": "CC0-1.0" }, "node_modules/mdurl": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + "license": "MIT" }, "node_modules/media-typer": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/memfs": { "version": "3.4.7", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.4.7.tgz", - "integrity": "sha512-ygaiUSNalBX85388uskeCyhSAoOSgzBbtVCr9jA2RROssFL9Q19/ZXFqS+2Th2sr1ewNIWgFdLzLC3Yl1Zv+lw==", + "license": "Unlicense", "dependencies": { "fs-monkey": "^1.0.3" }, @@ -20902,13 +19348,11 @@ }, "node_modules/memoize-one": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-5.1.1.tgz", - "integrity": "sha512-HKeeBpWvqiVJD57ZUAsJNm71eHTykffzcLZVYWiVfQeI1rJtuEaS7hQiEpWfVVk18donPwJEcFKIkCmPJNOhHA==" + "license": "MIT" }, "node_modules/meow": { "version": "3.7.0", - "resolved": "https://registry.npmjs.org/meow/-/meow-3.7.0.tgz", - "integrity": "sha512-TNdwZs0skRlpPpCUK25StC4VH+tP5GgeY1HQOOGP+lQ2xtdkN2VtT/5tiX9k3IWpkBPV9b3LsAWXn4GGi/PrSA==", + "license": "MIT", "dependencies": { "camelcase-keys": "^2.0.0", "decamelize": "^1.1.2", @@ -20927,26 +19371,22 @@ }, "node_modules/merge-descriptors": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "license": "MIT" }, "node_modules/merge-stream": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + "license": "MIT" }, "node_modules/merge2": { "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/mermaid": { "version": "9.4.3", - "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-9.4.3.tgz", - "integrity": "sha512-TLkQEtqhRSuEHSE34lh5bCa94KATCyluAXmFnNI2PRZwOpXFeqiJWwZl+d2CcemE1RS6QbbueSSq9QIg8Uxcyw==", + "license": "MIT", "dependencies": { "@braintree/sanitize-url": "^6.0.0", "cytoscape": "^3.23.0", @@ -20968,38 +19408,32 @@ }, "node_modules/mermaid/node_modules/dompurify": { "version": "2.4.3", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.4.3.tgz", - "integrity": "sha512-q6QaLcakcRjebxjg8/+NP+h0rPfatOgOzc46Fst9VAA3jF2ApfKBNKMzdP4DYTqtUMXSCd5pRS/8Po/OmoCHZQ==" + "license": "(MPL-2.0 OR Apache-2.0)" }, "node_modules/mermaid/node_modules/uuid": { "version": "9.0.1", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", - "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", "funding": [ "https://github.com/sponsors/broofa", "https://github.com/sponsors/ctavan" ], + "license": "MIT", "bin": { "uuid": "dist/bin/uuid" } }, "node_modules/methods": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/microevent.ts": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/microevent.ts/-/microevent.ts-0.1.1.tgz", - "integrity": "sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g==" + "license": "MIT" }, "node_modules/micromark": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.2.0.tgz", - "integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==", "funding": [ { "type": "GitHub Sponsors", @@ -21010,6 +19444,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", @@ -21032,8 +19467,6 @@ }, "node_modules/micromark-core-commonmark": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz", - "integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==", "funding": [ { "type": "GitHub Sponsors", @@ -21044,6 +19477,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-factory-destination": "^1.0.0", @@ -21065,8 +19499,6 @@ }, "node_modules/micromark-factory-destination": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz", - "integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==", "funding": [ { "type": "GitHub Sponsors", @@ -21077,6 +19509,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-character": "^1.0.0", "micromark-util-symbol": "^1.0.0", @@ -21085,8 +19518,6 @@ }, "node_modules/micromark-factory-label": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz", - "integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==", "funding": [ { "type": "GitHub Sponsors", @@ -21097,6 +19528,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-character": "^1.0.0", "micromark-util-symbol": "^1.0.0", @@ -21106,8 +19538,6 @@ }, "node_modules/micromark-factory-space": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", - "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", "funding": [ { "type": "GitHub Sponsors", @@ -21118,6 +19548,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-character": "^1.0.0", "micromark-util-types": "^1.0.0" @@ -21125,8 +19556,6 @@ }, "node_modules/micromark-factory-title": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz", - "integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==", "funding": [ { "type": "GitHub Sponsors", @@ -21137,6 +19566,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-factory-space": "^1.0.0", "micromark-util-character": "^1.0.0", @@ -21146,8 +19576,6 @@ }, "node_modules/micromark-factory-whitespace": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz", - "integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==", "funding": [ { "type": "GitHub Sponsors", @@ -21158,6 +19586,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-factory-space": "^1.0.0", "micromark-util-character": "^1.0.0", @@ -21167,8 +19596,6 @@ }, "node_modules/micromark-util-character": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", - "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", "funding": [ { "type": "GitHub Sponsors", @@ -21179,6 +19606,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-symbol": "^1.0.0", "micromark-util-types": "^1.0.0" @@ -21186,8 +19614,6 @@ }, "node_modules/micromark-util-chunked": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz", - "integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==", "funding": [ { "type": "GitHub Sponsors", @@ -21198,14 +19624,13 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-symbol": "^1.0.0" } }, "node_modules/micromark-util-classify-character": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz", - "integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==", "funding": [ { "type": "GitHub Sponsors", @@ -21216,6 +19641,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-character": "^1.0.0", "micromark-util-symbol": "^1.0.0", @@ -21224,8 +19650,6 @@ }, "node_modules/micromark-util-combine-extensions": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz", - "integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==", "funding": [ { "type": "GitHub Sponsors", @@ -21236,6 +19660,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-chunked": "^1.0.0", "micromark-util-types": "^1.0.0" @@ -21243,8 +19668,6 @@ }, "node_modules/micromark-util-decode-numeric-character-reference": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz", - "integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==", "funding": [ { "type": "GitHub Sponsors", @@ -21255,14 +19678,13 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-symbol": "^1.0.0" } }, "node_modules/micromark-util-decode-string": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz", - "integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==", "funding": [ { "type": "GitHub Sponsors", @@ -21273,6 +19695,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^1.0.0", @@ -21282,8 +19705,6 @@ }, "node_modules/micromark-util-encode": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz", - "integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==", "funding": [ { "type": "GitHub Sponsors", @@ -21293,12 +19714,11 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromark-util-html-tag-name": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz", - "integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==", "funding": [ { "type": "GitHub Sponsors", @@ -21308,12 +19728,11 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromark-util-normalize-identifier": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz", - "integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==", "funding": [ { "type": "GitHub Sponsors", @@ -21324,14 +19743,13 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-symbol": "^1.0.0" } }, "node_modules/micromark-util-resolve-all": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz", - "integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==", "funding": [ { "type": "GitHub Sponsors", @@ -21342,14 +19760,13 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-types": "^1.0.0" } }, "node_modules/micromark-util-sanitize-uri": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz", - "integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==", "funding": [ { "type": "GitHub Sponsors", @@ -21360,6 +19777,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-character": "^1.0.0", "micromark-util-encode": "^1.0.0", @@ -21368,8 +19786,6 @@ }, "node_modules/micromark-util-subtokenize": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz", - "integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==", "funding": [ { "type": "GitHub Sponsors", @@ -21380,6 +19796,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-chunked": "^1.0.0", "micromark-util-symbol": "^1.0.0", @@ -21389,8 +19806,6 @@ }, "node_modules/micromark-util-symbol": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", - "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", "funding": [ { "type": "GitHub Sponsors", @@ -21400,12 +19815,11 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromark-util-types": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", - "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", "funding": [ { "type": "GitHub Sponsors", @@ -21415,12 +19829,12 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromatch": { "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "license": "MIT", "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" @@ -21431,8 +19845,7 @@ }, "node_modules/miller-rabin": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "license": "MIT", "dependencies": { "bn.js": "^4.0.0", "brorand": "^1.0.1" @@ -21443,13 +19856,11 @@ }, "node_modules/miller-rabin/node_modules/bn.js": { "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "license": "MIT" }, "node_modules/mime": { "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", "bin": { "mime": "cli.js" }, @@ -21459,24 +19870,21 @@ }, "node_modules/mime-db": { "version": "1.51.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.51.0.tgz", - "integrity": "sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/mime-format": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mime-format/-/mime-format-2.0.1.tgz", - "integrity": "sha512-XxU3ngPbEnrYnNbIX+lYSaYg0M01v6p2ntd2YaFksTu0vayaw5OJvbdRyWs07EYRlLED5qadUZ+xo+XhOvFhwg==", + "license": "Apache-2.0", "dependencies": { "charset": "^1.0.0" } }, "node_modules/mime-types": { "version": "2.1.34", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.34.tgz", - "integrity": "sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A==", + "license": "MIT", "dependencies": { "mime-db": "1.51.0" }, @@ -21486,39 +19894,36 @@ }, "node_modules/mimic-fn": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/mimic-response": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/min-dash": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/min-dash/-/min-dash-4.2.1.tgz", - "integrity": "sha512-to+unsToePnm7cUeR9TrMzFlETHd/UXmU+ELTRfWZj5XGT41KF6X3L233o3E/GdEs3sk2Tbw/lOLD1avmWkg8A==" + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/min-dash/-/min-dash-4.2.2.tgz", + "integrity": "sha512-qbhSYUxk6mBaF096B3JOQSumXbKWHenmT97cSpdNzgkWwGjhjhE/KZODCoDNhI2I4C9Cb6R/Q13S4BYkUSXoXQ==" }, "node_modules/min-dom": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/min-dom/-/min-dom-4.1.0.tgz", - "integrity": "sha512-1lj1EyoSwY/UmTeT/hhPiZTsq+vK9D+8FAJ/53iK5jT1otkG9rJTixSKdjmTieEvdfES+sKbbTptzaQJhnacjA==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/min-dom/-/min-dom-4.2.1.tgz", + "integrity": "sha512-TMoL8SEEIhUWYgkj7XMSgxmwSyGI+4fP2KFFGnN3FbHfbGHVdsLYSz8LoIsgPhz4dWRmLvxWWSMgzZMJW5sZuA==", "dependencies": { "component-event": "^0.2.1", "domify": "^1.4.1", - "min-dash": "^4.0.0" + "min-dash": "^4.2.1" } }, "node_modules/mini-create-react-context": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz", - "integrity": "sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.1", "tiny-warning": "^1.0.3" @@ -21530,8 +19935,7 @@ }, "node_modules/mini-css-extract-plugin": { "version": "2.6.1", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.6.1.tgz", - "integrity": "sha512-wd+SD57/K6DiV7jIR34P+s3uckTRuQvx0tKPcvjFlrEylk6P4mQ2KSWk1hblj1Kxaqok7LogKOieygXqBczNlg==", + "license": "MIT", "dependencies": { "schema-utils": "^4.0.0" }, @@ -21548,8 +19952,7 @@ }, "node_modules/mini-css-extract-plugin/node_modules/ajv": { "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -21563,8 +19966,7 @@ }, "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -21574,13 +19976,11 @@ }, "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", @@ -21597,18 +19997,15 @@ }, "node_modules/minimalistic-assert": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + "license": "ISC" }, "node_modules/minimalistic-crypto-utils": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==" + "license": "MIT" }, "node_modules/minimatch": { "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -21618,26 +20015,23 @@ }, "node_modules/minimist": { "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" + "license": "MIT" }, "node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", "engines": { "node": ">=16 || 14 >=14.17" } }, "node_modules/mitt": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.1.tgz", - "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==" + "license": "MIT" }, "node_modules/mixin-deep": { "version": "1.3.2", - "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", - "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "license": "MIT", "dependencies": { "for-in": "^1.0.2", "is-extendable": "^1.0.1" @@ -21648,8 +20042,7 @@ }, "node_modules/mixin-deep/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -21658,14 +20051,16 @@ } }, "node_modules/mixpanel-browser": { - "version": "2.47.0", - "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.47.0.tgz", - "integrity": "sha512-Ldrva0fRBEIFWmEibBQO1PulfpJVF3pf28Guk09lDirDaSQqqU/xs9zQLwN2rL5VwVtsP1aD3JaCgaa98EjojQ==" + "version": "2.56.0", + "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.56.0.tgz", + "integrity": "sha512-GYeEz58pV2M9MZtK8vSPL4oJmCwGS08FDDRZvZwr5VJpWdT4Lgyg6zXhmNfCmSTEIw2coaarm7HZ4FL9dAVvnA==", + "dependencies": { + "rrweb": "2.0.0-alpha.13" + } }, "node_modules/mkdirp": { "version": "0.5.6", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", - "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "license": "MIT", "dependencies": { "minimist": "^1.2.6" }, @@ -21675,34 +20070,29 @@ }, "node_modules/moo": { "version": "0.5.2", - "resolved": "https://registry.npmjs.org/moo/-/moo-0.5.2.tgz", - "integrity": "sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==" + "license": "BSD-3-Clause" }, "node_modules/mri": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", - "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/mrmime": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.0.tgz", - "integrity": "sha512-a70zx7zFfVO7XpnQ2IX1Myh9yY4UYvfld/dikWRnsXxbyvMcfz+u6UfgNAtH+k2QqtJuzVpv6eLTx1G2+WKZbQ==", + "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/ms": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "license": "MIT" }, "node_modules/multicast-dns": { "version": "7.2.5", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", - "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "license": "MIT", "dependencies": { "dns-packet": "^5.2.2", "thunky": "^1.0.2" @@ -21713,16 +20103,14 @@ }, "node_modules/mustache": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", - "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", + "license": "MIT", "bin": { "mustache": "bin/mustache" } }, "node_modules/mz": { "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "license": "MIT", "dependencies": { "any-promise": "^1.0.0", "object-assign": "^4.0.1", @@ -21730,9 +20118,15 @@ } }, "node_modules/nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -21742,8 +20136,7 @@ }, "node_modules/nanomatch": { "version": "1.2.13", - "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "license": "MIT", "dependencies": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -21763,8 +20156,7 @@ }, "node_modules/nanomatch/node_modules/extend-shallow": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", "dependencies": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -21775,8 +20167,7 @@ }, "node_modules/nanomatch/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -21786,14 +20177,12 @@ }, "node_modules/natural-compare": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/nearley": { "version": "2.20.1", - "resolved": "https://registry.npmjs.org/nearley/-/nearley-2.20.1.tgz", - "integrity": "sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==", + "license": "MIT", "dependencies": { "commander": "^2.19.0", "moo": "^0.5.0", @@ -21813,31 +20202,26 @@ }, "node_modules/nearley/node_modules/commander": { "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + "license": "MIT" }, "node_modules/negotiator": { "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/neo-async": { "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + "license": "MIT" }, "node_modules/nice-try": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" + "license": "MIT" }, "node_modules/no-case": { "version": "3.0.4", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", - "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "license": "MIT", "dependencies": { "lower-case": "^2.0.2", "tslib": "^2.0.3" @@ -21845,16 +20229,14 @@ }, "node_modules/node-emoji": { "version": "1.11.0", - "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", - "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", + "license": "MIT", "dependencies": { "lodash": "^4.17.21" } }, "node_modules/node-fetch": { "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -21872,8 +20254,7 @@ }, "node_modules/node-fetch-h2": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/node-fetch-h2/-/node-fetch-h2-2.3.0.tgz", - "integrity": "sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==", + "license": "MIT", "dependencies": { "http2-client": "^1.2.5" }, @@ -21883,22 +20264,19 @@ }, "node_modules/node-forge": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", - "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "license": "(BSD-3-Clause OR GPL-2.0)", "engines": { "node": ">= 6.13.0" } }, "node_modules/node-int64": { "version": "0.4.0", - "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", - "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/node-polyfill-webpack-plugin": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/node-polyfill-webpack-plugin/-/node-polyfill-webpack-plugin-2.0.1.tgz", - "integrity": "sha512-ZUMiCnZkP1LF0Th2caY6J/eKKoA0TefpoVa68m/LQU1I/mE8rGt4fNYGgNuCcK+aG8P8P43nbeJ2RqJMOL/Y1A==", + "license": "MIT", "dependencies": { "assert": "^2.0.0", "browserify-zlib": "^0.2.0", @@ -21935,8 +20313,6 @@ }, "node_modules/node-polyfill-webpack-plugin/node_modules/buffer": { "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", "funding": [ { "type": "github", @@ -21951,6 +20327,7 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.2.1" @@ -21958,8 +20335,7 @@ }, "node_modules/node-polyfill-webpack-plugin/node_modules/readable-stream": { "version": "4.5.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.5.2.tgz", - "integrity": "sha512-yjavECdqeZ3GLXNgRXgeQEdz9fvDDkNKyHnbHRFtOr7/LcfgBcmct7t/ET+HaCTqfh06OzoAxrkN/IfjJBVe+g==", + "license": "MIT", "dependencies": { "abort-controller": "^3.0.0", "buffer": "^6.0.3", @@ -21973,8 +20349,7 @@ }, "node_modules/node-polyfill-webpack-plugin/node_modules/type-fest": { "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=12.20" }, @@ -21984,26 +20359,23 @@ }, "node_modules/node-readfiles": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/node-readfiles/-/node-readfiles-0.2.0.tgz", - "integrity": "sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA==", + "license": "MIT", "dependencies": { "es6-promise": "^3.2.1" } }, "node_modules/node-releases": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", - "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==" + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" }, "node_modules/non-layered-tidy-tree-layout": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz", - "integrity": "sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==" + "license": "MIT" }, "node_modules/normalize-package-data": { "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "license": "BSD-2-Clause", "dependencies": { "hosted-git-info": "^2.1.4", "resolve": "^1.10.0", @@ -22013,32 +20385,28 @@ }, "node_modules/normalize-package-data/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/normalize-path": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/normalize-range": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/normalize-url": { "version": "6.1.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", - "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -22048,8 +20416,7 @@ }, "node_modules/npm-conf": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/npm-conf/-/npm-conf-1.1.3.tgz", - "integrity": "sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw==", + "license": "MIT", "dependencies": { "config-chain": "^1.1.11", "pify": "^3.0.0" @@ -22060,16 +20427,14 @@ }, "node_modules/npm-conf/node_modules/pify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/npm-run-path": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "license": "MIT", "dependencies": { "path-key": "^3.0.0" }, @@ -22079,13 +20444,11 @@ }, "node_modules/nprogress": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" + "license": "MIT" }, "node_modules/nth-check": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz", - "integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "^1.0.0" }, @@ -22095,21 +20458,18 @@ }, "node_modules/num2fraction": { "version": "1.2.2", - "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", - "integrity": "sha512-Y1wZESM7VUThYY+4W+X4ySH2maqcA+p7UR+w8VWNWVAd6lwuXXWz/w/Cz43J/dI2I+PS6wD5N+bJUF+gjWvIqg==" + "license": "MIT" }, "node_modules/oas-kit-common": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/oas-kit-common/-/oas-kit-common-1.0.8.tgz", - "integrity": "sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ==", + "license": "BSD-3-Clause", "dependencies": { "fast-safe-stringify": "^2.0.7" } }, "node_modules/oas-linter": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/oas-linter/-/oas-linter-3.2.2.tgz", - "integrity": "sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ==", + "license": "BSD-3-Clause", "dependencies": { "@exodus/schemasafe": "^1.0.0-rc.2", "should": "^13.2.1", @@ -22121,8 +20481,7 @@ }, "node_modules/oas-resolver": { "version": "2.5.6", - "resolved": "https://registry.npmjs.org/oas-resolver/-/oas-resolver-2.5.6.tgz", - "integrity": "sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ==", + "license": "BSD-3-Clause", "dependencies": { "node-fetch-h2": "^2.3.0", "oas-kit-common": "^1.0.8", @@ -22139,8 +20498,7 @@ }, "node_modules/oas-resolver-browser": { "version": "2.5.2", - "resolved": "https://registry.npmjs.org/oas-resolver-browser/-/oas-resolver-browser-2.5.2.tgz", - "integrity": "sha512-L3ugWyBHOpKLT+lb+pFXCOpk3byh6usis5T9u9mfu92jH5bR6YK8MA2bebUTIjY7I4415PzDeZcmcc+i7X05MA==", + "license": "BSD-3-Clause", "dependencies": { "node-fetch-h2": "^2.3.0", "oas-kit-common": "^1.0.8", @@ -22158,8 +20516,7 @@ }, "node_modules/oas-resolver-browser/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -22172,16 +20529,14 @@ }, "node_modules/oas-resolver-browser/node_modules/camelcase": { "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/oas-resolver-browser/node_modules/cliui": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", - "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "license": "ISC", "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", @@ -22190,8 +20545,7 @@ }, "node_modules/oas-resolver-browser/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -22201,13 +20555,11 @@ }, "node_modules/oas-resolver-browser/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/oas-resolver-browser/node_modules/wrap-ansi": { "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -22219,13 +20571,11 @@ }, "node_modules/oas-resolver-browser/node_modules/y18n": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + "license": "ISC" }, "node_modules/oas-resolver-browser/node_modules/yargs": { "version": "15.4.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", - "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "license": "MIT", "dependencies": { "cliui": "^6.0.0", "decamelize": "^1.2.0", @@ -22245,8 +20595,7 @@ }, "node_modules/oas-resolver-browser/node_modules/yargs-parser": { "version": "18.1.3", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "license": "ISC", "dependencies": { "camelcase": "^5.0.0", "decamelize": "^1.2.0" @@ -22257,16 +20606,14 @@ }, "node_modules/oas-schema-walker": { "version": "1.1.5", - "resolved": "https://registry.npmjs.org/oas-schema-walker/-/oas-schema-walker-1.1.5.tgz", - "integrity": "sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ==", + "license": "BSD-3-Clause", "funding": { "url": "https://github.com/Mermade/oas-kit?sponsor=1" } }, "node_modules/oas-validator": { "version": "5.0.8", - "resolved": "https://registry.npmjs.org/oas-validator/-/oas-validator-5.0.8.tgz", - "integrity": "sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw==", + "license": "BSD-3-Clause", "dependencies": { "call-me-maybe": "^1.0.1", "oas-kit-common": "^1.0.8", @@ -22283,24 +20630,21 @@ }, "node_modules/oauth-sign": { "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "license": "Apache-2.0", "engines": { "node": "*" } }, "node_modules/object-assign": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/object-copy": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", - "integrity": "sha512-79LYn6VAb63zgtmAteVOWo9Vdj71ZVBy3Pbse+VqxDpEP83XuujMrGqHIwAXJ5I/aM0zU7dIyIAhifVTPrNItQ==", + "license": "MIT", "dependencies": { "copy-descriptor": "^0.1.0", "define-property": "^0.2.5", @@ -22312,8 +20656,7 @@ }, "node_modules/object-copy/node_modules/define-property": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "license": "MIT", "dependencies": { "is-descriptor": "^0.1.0" }, @@ -22323,8 +20666,7 @@ }, "node_modules/object-copy/node_modules/is-accessor-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -22334,13 +20676,11 @@ }, "node_modules/object-copy/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/object-copy/node_modules/is-data-descriptor": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -22350,8 +20690,7 @@ }, "node_modules/object-copy/node_modules/is-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "license": "MIT", "dependencies": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -22363,16 +20702,14 @@ }, "node_modules/object-copy/node_modules/is-descriptor/node_modules/kind-of": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/object-copy/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -22382,16 +20719,14 @@ }, "node_modules/object-inspect": { "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object-is": { "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3" @@ -22405,16 +20740,14 @@ }, "node_modules/object-keys": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "license": "MIT", "engines": { "node": ">= 0.4" } }, "node_modules/object-visit": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", - "integrity": "sha512-GBaMwwAVK9qbQN3Scdo0OyvgPW7l3lnaVMj84uTOZlswkX0KpF6fyDBJhtTthf7pymztoN36/KEr1DyhF96zEA==", + "license": "MIT", "dependencies": { "isobject": "^3.0.0" }, @@ -22424,8 +20757,7 @@ }, "node_modules/object.assign": { "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -22441,8 +20773,7 @@ }, "node_modules/object.entries": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -22454,8 +20785,7 @@ }, "node_modules/object.fromentries": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -22470,8 +20800,7 @@ }, "node_modules/object.getownpropertydescriptors": { "version": "2.1.5", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.5.tgz", - "integrity": "sha512-yDNzckpM6ntyQiGTik1fKV1DcVDRS+w8bvpWNCBanvH5LfRX9O8WTHqQzG4RZwRAM4I0oU7TV11Lj5v0g20ibw==", + "license": "MIT", "dependencies": { "array.prototype.reduce": "^1.0.5", "call-bind": "^1.0.2", @@ -22487,8 +20816,7 @@ }, "node_modules/object.pick": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", - "integrity": "sha512-tqa/UMy/CCoYmj+H5qc07qvSL9dqcs/WZENZ1JbtWBlATP+iVOe778gE6MSijnyCnORzDuX6hU+LA4SZ09YjFQ==", + "license": "MIT", "dependencies": { "isobject": "^3.0.1" }, @@ -22498,8 +20826,7 @@ }, "node_modules/object.values": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -22514,13 +20841,11 @@ }, "node_modules/obuf": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + "license": "MIT" }, "node_modules/on-finished": { "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", "dependencies": { "ee-first": "1.1.1" }, @@ -22530,24 +20855,21 @@ }, "node_modules/on-headers": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/once": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "license": "ISC", "dependencies": { "wrappy": "1" } }, "node_modules/onetime": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", "dependencies": { "mimic-fn": "^2.1.0" }, @@ -22560,8 +20882,7 @@ }, "node_modules/open": { "version": "8.4.0", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.0.tgz", - "integrity": "sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==", + "license": "MIT", "dependencies": { "define-lazy-prop": "^2.0.0", "is-docker": "^2.1.1", @@ -22576,17 +20897,15 @@ }, "node_modules/opener": { "version": "1.5.2", - "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", - "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "license": "(WTFPL OR MIT)", "bin": { "opener": "bin/opener-bin.js" } }, "node_modules/optipng-bin": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/optipng-bin/-/optipng-bin-5.1.0.tgz", - "integrity": "sha512-9baoqZTNNmXQjq/PQTWEXbVV3AMO2sI/GaaqZJZ8SExfAzjijeAP7FEeT+TtyumSw7gr0PZtSUYB/Ke7iHQVKA==", "hasInstallScript": true, + "license": "MIT", "dependencies": { "bin-build": "^3.0.0", "bin-wrapper": "^4.0.0", @@ -22601,13 +20920,11 @@ }, "node_modules/os-browserify": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", - "integrity": "sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==" + "license": "MIT" }, "node_modules/os-filter-obj": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/os-filter-obj/-/os-filter-obj-2.0.0.tgz", - "integrity": "sha512-uksVLsqG3pVdzzPvmAHpBK0wKxYItuzZr7SziusRPoz67tGV8rL1szZ6IdeUrbqLjGDwApBtN29eEE3IqGHOjg==", + "license": "MIT", "dependencies": { "arch": "^2.1.0" }, @@ -22617,16 +20934,14 @@ }, "node_modules/p-cancelable": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/p-event": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-event/-/p-event-1.3.0.tgz", - "integrity": "sha512-hV1zbA7gwqPVFcapfeATaNjQ3J0NuzorHPyG8GPL9g/Y/TplWVBVoCKCXL6Ej2zscrCEv195QNWJXuBH6XZuzA==", + "license": "MIT", "dependencies": { "p-timeout": "^1.1.1" }, @@ -22636,24 +20951,21 @@ }, "node_modules/p-finally": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/p-is-promise": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", - "integrity": "sha512-zL7VE4JVS2IFSkR2GQKDSPEVxkoH43/p7oEnwpdCndKYJO0HVeRB7fA8TJwuLOTBREtK0ea8eHaxdwcpob5dmg==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/p-limit": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "license": "MIT", "dependencies": { "p-try": "^2.0.0" }, @@ -22666,8 +20978,7 @@ }, "node_modules/p-locate": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "license": "MIT", "dependencies": { "p-limit": "^2.2.0" }, @@ -22677,8 +20988,7 @@ }, "node_modules/p-map": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "license": "MIT", "dependencies": { "aggregate-error": "^3.0.0" }, @@ -22691,8 +21001,7 @@ }, "node_modules/p-map-series": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-map-series/-/p-map-series-1.0.0.tgz", - "integrity": "sha512-4k9LlvY6Bo/1FcIdV33wqZQES0Py+iKISU9Uc8p8AjWoZPnFKMpVIVD3s0EYn4jzLh1I+WeUZkJ0Yoa4Qfw3Kg==", + "license": "MIT", "dependencies": { "p-reduce": "^1.0.0" }, @@ -22702,24 +21011,21 @@ }, "node_modules/p-pipe": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/p-pipe/-/p-pipe-1.2.0.tgz", - "integrity": "sha512-IA8SqjIGA8l9qOksXJvsvkeQ+VGb0TAzNCzvKvz9wt5wWLqfWbV6fXy43gpR2L4Te8sOq3S+Ql9biAaMKPdbtw==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/p-reduce": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-1.0.0.tgz", - "integrity": "sha512-3Tx1T3oM1xO/Y8Gj0sWyE78EIJZ+t+aEmXUdvQgvGmSMri7aPTHoovbXEreWKkL5j21Er60XAWLTzKbAKYOujQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/p-retry": { "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "license": "MIT", "dependencies": { "@types/retry": "0.12.0", "retry": "^0.13.1" @@ -22730,8 +21036,7 @@ }, "node_modules/p-timeout": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-1.2.1.tgz", - "integrity": "sha512-gb0ryzr+K2qFqFv6qi3khoeqMZF/+ajxQipEF6NteZVnvz9tzdsfAVj3lYtn1gAXvH5lfLwfxEII799gt/mRIA==", + "license": "MIT", "dependencies": { "p-finally": "^1.0.0" }, @@ -22741,16 +21046,14 @@ }, "node_modules/p-try": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/package-json": { "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", + "license": "MIT", "dependencies": { "got": "^9.6.0", "registry-auth-token": "^4.0.0", @@ -22763,21 +21066,18 @@ }, "node_modules/package-json/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/pako": { "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" + "license": "(MIT AND Zlib)" }, "node_modules/param-case": { "version": "3.0.4", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", - "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "license": "MIT", "dependencies": { "dot-case": "^3.0.4", "tslib": "^2.0.3" @@ -22785,8 +21085,7 @@ }, "node_modules/parent-module": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", "dependencies": { "callsites": "^3.0.0" }, @@ -22796,8 +21095,7 @@ }, "node_modules/parse-asn1": { "version": "5.1.6", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", - "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", + "license": "ISC", "dependencies": { "asn1.js": "^5.2.0", "browserify-aes": "^1.0.0", @@ -22808,8 +21106,7 @@ }, "node_modules/parse-entities": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", - "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "license": "MIT", "dependencies": { "character-entities": "^1.0.0", "character-entities-legacy": "^1.0.0", @@ -22825,8 +21122,7 @@ }, "node_modules/parse-json": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", @@ -22842,13 +21138,11 @@ }, "node_modules/parse-numeric-range": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", - "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + "license": "ISC" }, "node_modules/parse5": { "version": "7.1.2", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", - "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "license": "MIT", "dependencies": { "entities": "^4.4.0" }, @@ -22858,8 +21152,7 @@ }, "node_modules/parse5-htmlparser2-tree-adapter": { "version": "7.0.0", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", - "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", + "license": "MIT", "dependencies": { "domhandler": "^5.0.2", "parse5": "^7.0.0" @@ -22870,8 +21163,7 @@ }, "node_modules/parse5-htmlparser2-tree-adapter/node_modules/domhandler": { "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", "dependencies": { "domelementtype": "^2.3.0" }, @@ -22884,8 +21176,7 @@ }, "node_modules/parse5/node_modules/entities": { "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "license": "BSD-2-Clause", "engines": { "node": ">=0.12" }, @@ -22895,16 +21186,14 @@ }, "node_modules/parseurl": { "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/pascal-case": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", - "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "license": "MIT", "dependencies": { "no-case": "^3.0.4", "tslib": "^2.0.3" @@ -22912,16 +21201,14 @@ }, "node_modules/pascalcase": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha512-XHXfu/yOQRy9vYOtUDVMN60OEJjW013GoObG1o+xwQTpB9eYJX/BjXMsdW13ZDPruFhYYn0AG22w0xgQMwl3Nw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/path": { "version": "0.12.7", - "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", - "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "license": "MIT", "dependencies": { "process": "^0.11.1", "util": "^0.10.3" @@ -22929,104 +21216,89 @@ }, "node_modules/path-browserify": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", - "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==" + "license": "MIT" }, "node_modules/path-dirname": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", - "integrity": "sha512-ALzNPpyNq9AqXMBjeymIjFDAkAFH06mHJH/cSBHAgU0s4vfpBn6b2nf8tiRLvagKD8RbTpq2FKTBg7cl9l3c7Q==" + "license": "MIT" }, "node_modules/path-exists": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/path-is-absolute": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/path-is-inside": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" + "license": "(WTFPL OR MIT)" }, "node_modules/path-key": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/path-parse": { "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + "license": "MIT" }, "node_modules/path-scurry": { - "version": "1.10.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz", - "integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==", + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", "dependencies": { - "lru-cache": "^9.1.1 || ^10.0.0", + "lru-cache": "^10.2.0", "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">=16 || 14 >=14.18" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/path-scurry/node_modules/lru-cache": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.1.0.tgz", - "integrity": "sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==", - "engines": { - "node": "14 || >=16.14" - } + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==" }, "node_modules/path-to-regexp": { "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "license": "MIT", "dependencies": { "isarray": "0.0.1" } }, "node_modules/path-type": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/path/node_modules/inherits": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + "license": "ISC" }, "node_modules/path/node_modules/util": { "version": "0.10.4", - "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", - "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "license": "MIT", "dependencies": { "inherits": "2.0.3" } }, "node_modules/pbkdf2": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz", - "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==", + "license": "MIT", "dependencies": { "create-hash": "^1.1.2", "create-hmac": "^1.1.4", @@ -23040,23 +21312,20 @@ }, "node_modules/pend": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==" + "license": "MIT" }, "node_modules/performance-now": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==" + "license": "MIT" }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==" }, "node_modules/picomatch": { "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", "engines": { "node": ">=8.6" }, @@ -23066,9 +21335,8 @@ }, "node_modules/pidtree": { "version": "0.6.0", - "resolved": "https://registry.npmjs.org/pidtree/-/pidtree-0.6.0.tgz", - "integrity": "sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==", "dev": true, + "license": "MIT", "bin": { "pidtree": "bin/pidtree.js" }, @@ -23078,24 +21346,21 @@ }, "node_modules/pify": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/pinkie": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", - "integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/pinkie-promise": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==", + "license": "MIT", "dependencies": { "pinkie": "^2.0.0" }, @@ -23105,16 +21370,14 @@ }, "node_modules/pirates": { "version": "4.0.5", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", - "integrity": "sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==", + "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/pkg-dir": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "license": "MIT", "dependencies": { "find-up": "^4.0.0" }, @@ -23124,8 +21387,7 @@ }, "node_modules/pkg-up": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", - "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "license": "MIT", "dependencies": { "find-up": "^3.0.0" }, @@ -23135,8 +21397,7 @@ }, "node_modules/pkg-up/node_modules/find-up": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "license": "MIT", "dependencies": { "locate-path": "^3.0.0" }, @@ -23146,8 +21407,7 @@ }, "node_modules/pkg-up/node_modules/locate-path": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "license": "MIT", "dependencies": { "p-locate": "^3.0.0", "path-exists": "^3.0.0" @@ -23158,8 +21418,7 @@ }, "node_modules/pkg-up/node_modules/p-locate": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "license": "MIT", "dependencies": { "p-limit": "^2.0.0" }, @@ -23169,52 +21428,49 @@ }, "node_modules/pkg-up/node_modules/path-exists": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/playwright": { - "version": "1.32.2", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.32.2.tgz", - "integrity": "sha512-jHVnXJke0PXpuPszKtk9y1zZSlzO5+2a+aockT/AND0oeXx46FiJEFrafthurglLygVZA+1gEbtUM1C7qtTV+Q==", + "version": "1.49.0", "dev": true, - "hasInstallScript": true, + "license": "Apache-2.0", "dependencies": { - "playwright-core": "1.32.2" + "playwright-core": "1.49.0" }, "bin": { "playwright": "cli.js" }, "engines": { - "node": ">=14" + "node": ">=18" + }, + "optionalDependencies": { + "fsevents": "2.3.2" } }, - "node_modules/playwright-core": { - "version": "1.32.2", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.32.2.tgz", - "integrity": "sha512-zD7aonO+07kOTthsrCR3YCVnDcqSHIJpdFUtZEMOb6//1Rc7/6mZDRdw+nlzcQiQltOOsiqI3rrSyn/SlyjnJQ==", + "node_modules/playwright/node_modules/playwright-core": { + "version": "1.49.0", "dev": true, + "license": "Apache-2.0", "bin": { - "playwright": "cli.js" + "playwright-core": "cli.js" }, "engines": { - "node": ">=14" + "node": ">=18" } }, "node_modules/pluralize": { "version": "8.0.0", - "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", - "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/portfinder": { "version": "1.0.32", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.32.tgz", - "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==", + "license": "MIT", "dependencies": { "async": "^2.6.4", "debug": "^3.2.7", @@ -23226,24 +21482,22 @@ }, "node_modules/portfinder/node_modules/debug": { "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "license": "MIT", "dependencies": { "ms": "^2.1.1" } }, "node_modules/posix-character-classes": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/postcss": { - "version": "8.4.16", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.16.tgz", - "integrity": "sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==", + "version": "8.4.49", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", + "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", "funding": [ { "type": "opencollective", @@ -23252,12 +21506,17 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "nanoid": "^3.3.4", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "nanoid": "^3.3.7", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -23265,8 +21524,7 @@ }, "node_modules/postcss-calc": { "version": "8.2.4", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", - "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "license": "MIT", "dependencies": { "postcss-selector-parser": "^6.0.9", "postcss-value-parser": "^4.2.0" @@ -23277,8 +21535,7 @@ }, "node_modules/postcss-colormin": { "version": "5.3.1", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", - "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "caniuse-api": "^3.0.0", @@ -23294,8 +21551,7 @@ }, "node_modules/postcss-convert-values": { "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", - "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "postcss-value-parser": "^4.2.0" @@ -23309,8 +21565,7 @@ }, "node_modules/postcss-discard-comments": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", - "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -23320,8 +21575,7 @@ }, "node_modules/postcss-discard-duplicates": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", - "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -23331,8 +21585,7 @@ }, "node_modules/postcss-discard-empty": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", - "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -23342,8 +21595,7 @@ }, "node_modules/postcss-discard-overridden": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", - "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -23353,8 +21605,7 @@ }, "node_modules/postcss-discard-unused": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", - "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", + "license": "MIT", "dependencies": { "postcss-selector-parser": "^6.0.5" }, @@ -23367,8 +21618,7 @@ }, "node_modules/postcss-loader": { "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.0.1.tgz", - "integrity": "sha512-VRviFEyYlLjctSM93gAZtcJJ/iSkPZ79zWbN/1fSH+NisBByEiVLqpdVDrPLVSi8DX0oJo12kL/GppTBdKVXiQ==", + "license": "MIT", "dependencies": { "cosmiconfig": "^7.0.0", "klona": "^2.0.5", @@ -23388,8 +21638,7 @@ }, "node_modules/postcss-merge-idents": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", - "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", + "license": "MIT", "dependencies": { "cssnano-utils": "^3.1.0", "postcss-value-parser": "^4.2.0" @@ -23403,8 +21652,7 @@ }, "node_modules/postcss-merge-longhand": { "version": "5.1.7", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", - "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0", "stylehacks": "^5.1.1" @@ -23418,8 +21666,7 @@ }, "node_modules/postcss-merge-rules": { "version": "5.1.4", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", - "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "caniuse-api": "^3.0.0", @@ -23435,8 +21682,7 @@ }, "node_modules/postcss-minify-font-values": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", - "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23449,8 +21695,7 @@ }, "node_modules/postcss-minify-gradients": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", - "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "license": "MIT", "dependencies": { "colord": "^2.9.1", "cssnano-utils": "^3.1.0", @@ -23465,8 +21710,7 @@ }, "node_modules/postcss-minify-params": { "version": "5.1.4", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", - "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "cssnano-utils": "^3.1.0", @@ -23481,8 +21725,7 @@ }, "node_modules/postcss-minify-selectors": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", - "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "license": "MIT", "dependencies": { "postcss-selector-parser": "^6.0.5" }, @@ -23495,8 +21738,7 @@ }, "node_modules/postcss-modules-extract-imports": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", - "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", + "license": "ISC", "engines": { "node": "^10 || ^12 || >= 14" }, @@ -23506,8 +21748,7 @@ }, "node_modules/postcss-modules-local-by-default": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz", - "integrity": "sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ==", + "license": "MIT", "dependencies": { "icss-utils": "^5.0.0", "postcss-selector-parser": "^6.0.2", @@ -23522,8 +21763,7 @@ }, "node_modules/postcss-modules-scope": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", - "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", + "license": "ISC", "dependencies": { "postcss-selector-parser": "^6.0.4" }, @@ -23536,8 +21776,7 @@ }, "node_modules/postcss-modules-values": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", - "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "license": "ISC", "dependencies": { "icss-utils": "^5.0.0" }, @@ -23550,8 +21789,7 @@ }, "node_modules/postcss-normalize-charset": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", - "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -23561,8 +21799,7 @@ }, "node_modules/postcss-normalize-display-values": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", - "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23575,8 +21812,7 @@ }, "node_modules/postcss-normalize-positions": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", - "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23589,8 +21825,7 @@ }, "node_modules/postcss-normalize-repeat-style": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", - "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23603,8 +21838,7 @@ }, "node_modules/postcss-normalize-string": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", - "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23617,8 +21851,7 @@ }, "node_modules/postcss-normalize-timing-functions": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", - "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23631,8 +21864,7 @@ }, "node_modules/postcss-normalize-unicode": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", - "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "postcss-value-parser": "^4.2.0" @@ -23646,8 +21878,7 @@ }, "node_modules/postcss-normalize-url": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", - "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "license": "MIT", "dependencies": { "normalize-url": "^6.0.1", "postcss-value-parser": "^4.2.0" @@ -23661,8 +21892,7 @@ }, "node_modules/postcss-normalize-whitespace": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", - "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23675,8 +21905,7 @@ }, "node_modules/postcss-ordered-values": { "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", - "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "license": "MIT", "dependencies": { "cssnano-utils": "^3.1.0", "postcss-value-parser": "^4.2.0" @@ -23690,8 +21919,7 @@ }, "node_modules/postcss-reduce-idents": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", - "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23704,8 +21932,7 @@ }, "node_modules/postcss-reduce-initial": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", - "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "caniuse-api": "^3.0.0" @@ -23719,8 +21946,7 @@ }, "node_modules/postcss-reduce-transforms": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", - "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0" }, @@ -23733,8 +21959,7 @@ }, "node_modules/postcss-selector-parser": { "version": "6.0.10", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", - "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "license": "MIT", "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -23745,8 +21970,7 @@ }, "node_modules/postcss-sort-media-queries": { "version": "4.4.1", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", - "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", + "license": "MIT", "dependencies": { "sort-css-media-queries": "2.1.0" }, @@ -23759,8 +21983,7 @@ }, "node_modules/postcss-svgo": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", - "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "license": "MIT", "dependencies": { "postcss-value-parser": "^4.2.0", "svgo": "^2.7.0" @@ -23774,8 +21997,7 @@ }, "node_modules/postcss-unique-selectors": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", - "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "license": "MIT", "dependencies": { "postcss-selector-parser": "^6.0.5" }, @@ -23788,13 +22010,11 @@ }, "node_modules/postcss-value-parser": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + "license": "MIT" }, "node_modules/postcss-zindex": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", - "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", + "license": "MIT", "engines": { "node": "^10 || ^12 || >=14.0" }, @@ -23804,8 +22024,7 @@ }, "node_modules/postman-url-encoder": { "version": "3.0.5", - "resolved": "https://registry.npmjs.org/postman-url-encoder/-/postman-url-encoder-3.0.5.tgz", - "integrity": "sha512-jOrdVvzUXBC7C+9gkIkpDJ3HIxOHTIqjpQ4C1EMt1ZGeMvSEpbFCKq23DEfgsj46vMnDgyQf+1ZLp2Wm+bKSsA==", + "license": "Apache-2.0", "dependencies": { "punycode": "^2.1.1" }, @@ -23814,9 +22033,9 @@ } }, "node_modules/preact": { - "version": "10.19.6", - "resolved": "https://registry.npmjs.org/preact/-/preact-10.19.6.tgz", - "integrity": "sha512-gympg+T2Z1fG1unB8NH29yHJwnEaCH37Z32diPDku316OTnRPeMbiRV9kTrfZpocXjdfnWuFUl/Mj4BHaf6gnw==", + "version": "10.25.1", + "resolved": "https://registry.npmjs.org/preact/-/preact-10.25.1.tgz", + "integrity": "sha512-frxeZV2vhQSohQwJ7FvlqC40ze89+8friponWUFeVEkaCfhC6Eu4V0iND5C9CXz8JLndV07QRDeXzH1+Anz5Og==", "funding": { "type": "opencollective", "url": "https://opencollective.com/preact" @@ -23824,17 +22043,15 @@ }, "node_modules/prepend-http": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/prettier": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.0.tgz", - "integrity": "sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==", + "version": "3.3.3", "dev": true, + "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, @@ -23847,8 +22064,7 @@ }, "node_modules/pretty-bytes": { "version": "5.6.0", - "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", - "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", + "license": "MIT", "engines": { "node": ">=6" }, @@ -23858,18 +22074,18 @@ }, "node_modules/pretty-error": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", - "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "license": "MIT", "dependencies": { "lodash": "^4.17.20", "renderkid": "^3.0.0" } }, "node_modules/pretty-format": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.6.3.tgz", - "integrity": "sha512-ZsBgjVhFAj5KeK+nHfF1305/By3lechHQSMWCTl8iHSbfOm2TN5nHEtFc/+W7fAyUeCs2n5iow72gld4gW0xDw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", @@ -23881,9 +22097,8 @@ }, "node_modules/pretty-format/node_modules/ansi-styles": { "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -23893,59 +22108,51 @@ }, "node_modules/pretty-format/node_modules/react-is": { "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/pretty-time": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/prism-react-renderer": { "version": "1.3.5", - "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-1.3.5.tgz", - "integrity": "sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg==", + "license": "MIT", "peerDependencies": { "react": ">=0.14.9" } }, "node_modules/prismjs": { "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/process": { "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "license": "MIT", "engines": { "node": ">= 0.6.0" } }, "node_modules/process-nextick-args": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + "license": "MIT" }, "node_modules/promise": { "version": "7.3.1", - "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", - "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", + "license": "MIT", "dependencies": { "asap": "~2.0.3" } }, "node_modules/prompts": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", - "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "license": "MIT", "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" @@ -23956,8 +22163,7 @@ }, "node_modules/prop-types": { "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", @@ -23966,8 +22172,7 @@ }, "node_modules/prop-types-exact": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/prop-types-exact/-/prop-types-exact-1.2.0.tgz", - "integrity": "sha512-K+Tk3Kd9V0odiXFP9fwDHUYRyvK3Nun3GVyPapSIs5OBkITAm15W0CPFD/YKTkMUAbc0b9CUwRQp2ybiBIq+eA==", + "license": "MIT", "dependencies": { "has": "^1.0.3", "object.assign": "^4.1.0", @@ -23976,8 +22181,7 @@ }, "node_modules/property-information": { "version": "5.6.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", - "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "license": "MIT", "dependencies": { "xtend": "^4.0.0" }, @@ -23988,13 +22192,11 @@ }, "node_modules/proto-list": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + "license": "ISC" }, "node_modules/proxy-addr": { "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" @@ -24005,26 +22207,22 @@ }, "node_modules/proxy-addr/node_modules/ipaddr.js": { "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", "engines": { "node": ">= 0.10" } }, "node_modules/pseudomap": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==" + "license": "ISC" }, "node_modules/psl": { "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==" + "license": "MIT" }, "node_modules/public-encrypt": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", - "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "license": "MIT", "dependencies": { "bn.js": "^4.1.0", "browserify-rsa": "^4.0.0", @@ -24036,13 +22234,11 @@ }, "node_modules/public-encrypt/node_modules/bn.js": { "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "license": "MIT" }, "node_modules/pump": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "license": "MIT", "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" @@ -24050,16 +22246,14 @@ }, "node_modules/punycode": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/pupa": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", + "license": "MIT", "dependencies": { "escape-goat": "^2.0.0" }, @@ -24069,13 +22263,12 @@ }, "node_modules/pure-color": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/pure-color/-/pure-color-1.3.0.tgz", - "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" + "license": "MIT" }, "node_modules/pure-rand": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.2.tgz", - "integrity": "sha512-6Yg0ekpKICSjPswYOuC5sku/TSWaRYlA0qsXqJgM/d/4pLPHPuTxK7Nbf7jFKzAeedUhR8C7K9Uv63FBsSo8xQ==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", "dev": true, "funding": [ { @@ -24086,12 +22279,12 @@ "type": "opencollective", "url": "https://opencollective.com/fast-check" } - ] + ], + "license": "MIT" }, "node_modules/pushfeedback": { "version": "0.1.39", - "resolved": "https://registry.npmjs.org/pushfeedback/-/pushfeedback-0.1.39.tgz", - "integrity": "sha512-/sZR2Sqdi1MgxVxVFup3c8GMW1vKVDykCNYIzJH4ic+whTG4a1VFR/uhsx6iHDP3CNxVO/mcxH+pMRSv9voypQ==", + "license": "MIT", "dependencies": { "@stencil/core": "^2.13.0", "html2canvas": "^1.4.1" @@ -24099,16 +22292,14 @@ }, "node_modules/pushfeedback-react": { "version": "0.1.30", - "resolved": "https://registry.npmjs.org/pushfeedback-react/-/pushfeedback-react-0.1.30.tgz", - "integrity": "sha512-rHLfkmHSL8NLHdZ9xPlAoes4cor8LGzsCwYBvlajlIFggsajdO+F5GdUAi+cXvXJaBM+Us4EmZlD9naBljHMZA==", + "license": "ISC", "dependencies": { "pushfeedback": "^0.1.39" } }, "node_modules/q": { "version": "1.5.1", - "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", - "integrity": "sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==", + "license": "MIT", "engines": { "node": ">=0.6.0", "teleport": ">=0.2.0" @@ -24116,8 +22307,7 @@ }, "node_modules/qs": { "version": "6.10.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz", - "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==", + "license": "BSD-3-Clause", "dependencies": { "side-channel": "^1.0.4" }, @@ -24130,8 +22320,7 @@ }, "node_modules/query-string": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", + "license": "MIT", "dependencies": { "decode-uri-component": "^0.2.0", "object-assign": "^4.1.0", @@ -24143,24 +22332,19 @@ }, "node_modules/querystring-es3": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==", "engines": { "node": ">=0.4.x" } }, "node_modules/queue": { "version": "6.0.2", - "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", - "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "license": "MIT", "dependencies": { "inherits": "~2.0.3" } }, "node_modules/queue-microtask": { "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "funding": [ { "type": "github", @@ -24174,25 +22358,23 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/raf": { "version": "3.4.1", - "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz", - "integrity": "sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==", + "license": "MIT", "dependencies": { "performance-now": "^2.1.0" } }, "node_modules/railroad-diagrams": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz", - "integrity": "sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A==" + "license": "CC0-1.0" }, "node_modules/randexp": { "version": "0.4.6", - "resolved": "https://registry.npmjs.org/randexp/-/randexp-0.4.6.tgz", - "integrity": "sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ==", + "license": "MIT", "dependencies": { "discontinuous-range": "1.0.0", "ret": "~0.1.10" @@ -24203,8 +22385,7 @@ }, "node_modules/randomatic": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.1.tgz", - "integrity": "sha512-TuDE5KxZ0J461RVjrJZCJc+J+zCkTb1MbH9AQUq68sMhOMcy9jLcb3BrZKgp9q9Ncltdg4QVqWrH02W2EFFVYw==", + "license": "MIT", "dependencies": { "is-number": "^4.0.0", "kind-of": "^6.0.0", @@ -24216,24 +22397,21 @@ }, "node_modules/randomatic/node_modules/is-number": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/randombytes": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "license": "MIT", "dependencies": { "safe-buffer": "^5.1.0" } }, "node_modules/randomfill": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "license": "MIT", "dependencies": { "randombytes": "^2.0.5", "safe-buffer": "^5.1.0" @@ -24241,16 +22419,14 @@ }, "node_modules/range-parser": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/raw-body": { "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -24263,16 +22439,14 @@ }, "node_modules/raw-body/node_modules/bytes": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/rc": { "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", "dependencies": { "deep-extend": "^0.6.0", "ini": "~1.3.0", @@ -24285,8 +22459,7 @@ }, "node_modules/react": { "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", - "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1" @@ -24297,8 +22470,7 @@ }, "node_modules/react-base16-styling": { "version": "0.6.0", - "resolved": "https://registry.npmjs.org/react-base16-styling/-/react-base16-styling-0.6.0.tgz", - "integrity": "sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==", + "license": "MIT", "dependencies": { "base16": "^1.0.0", "lodash.curry": "^4.0.1", @@ -24308,8 +22480,7 @@ }, "node_modules/react-dev-utils": { "version": "12.0.1", - "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", - "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.16.0", "address": "^1.1.2", @@ -24342,8 +22513,7 @@ }, "node_modules/react-dev-utils/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -24356,8 +22526,7 @@ }, "node_modules/react-dev-utils/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -24371,8 +22540,7 @@ }, "node_modules/react-dev-utils/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -24382,13 +22550,11 @@ }, "node_modules/react-dev-utils/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/react-dev-utils/node_modules/escape-string-regexp": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -24398,8 +22564,7 @@ }, "node_modules/react-dev-utils/node_modules/find-up": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "license": "MIT", "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -24413,24 +22578,21 @@ }, "node_modules/react-dev-utils/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/react-dev-utils/node_modules/loader-utils": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.0.tgz", - "integrity": "sha512-HVl9ZqccQihZ7JM85dco1MvO9G+ONvxoGa9rkhzFsneGLKSUg1gJf9bWzhRhcvm2qChhWpebQhP44qxjKIUCaQ==", + "license": "MIT", "engines": { "node": ">= 12.13.0" } }, "node_modules/react-dev-utils/node_modules/locate-path": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "license": "MIT", "dependencies": { "p-locate": "^5.0.0" }, @@ -24443,8 +22605,7 @@ }, "node_modules/react-dev-utils/node_modules/p-limit": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -24457,8 +22618,7 @@ }, "node_modules/react-dev-utils/node_modules/p-locate": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "license": "MIT", "dependencies": { "p-limit": "^3.0.2" }, @@ -24471,8 +22631,7 @@ }, "node_modules/react-dev-utils/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -24482,8 +22641,7 @@ }, "node_modules/react-dom": { "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", - "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", @@ -24495,18 +22653,15 @@ }, "node_modules/react-error-overlay": { "version": "6.0.11", - "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", - "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + "license": "MIT" }, "node_modules/react-fast-compare": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.0.tgz", - "integrity": "sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==" + "license": "MIT" }, "node_modules/react-helmet-async": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", - "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "license": "Apache-2.0", "dependencies": { "@babel/runtime": "^7.12.5", "invariant": "^2.2.4", @@ -24521,8 +22676,7 @@ }, "node_modules/react-hook-form": { "version": "7.49.3", - "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.49.3.tgz", - "integrity": "sha512-foD6r3juidAT1cOZzpmD/gOKt7fRsDhXXZ0y28+Al1CHgX+AY1qIN9VSIIItXRq1dN68QrRwl1ORFlwjBaAqeQ==", + "license": "MIT", "engines": { "node": ">=18", "pnpm": "8" @@ -24537,13 +22691,11 @@ }, "node_modules/react-is": { "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + "license": "MIT" }, "node_modules/react-json-view": { "version": "1.21.3", - "resolved": "https://registry.npmjs.org/react-json-view/-/react-json-view-1.21.3.tgz", - "integrity": "sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==", + "license": "MIT", "dependencies": { "flux": "^4.0.1", "react-base16-styling": "^0.6.0", @@ -24557,13 +22709,11 @@ }, "node_modules/react-lifecycles-compat": { "version": "3.0.4", - "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz", - "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" + "license": "MIT" }, "node_modules/react-live": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/react-live/-/react-live-4.0.1.tgz", - "integrity": "sha512-ndRYxgJYdcfVibnM0zublvEdwArbIwplhLxpOf3dsRtVh8BId0nOnblticIwhl24D5RcmIHf8siCErtgGN4zLw==", + "license": "MIT", "dependencies": { "prism-react-renderer": "^1.3.1", "sucrase": "^3.31.0", @@ -24577,17 +22727,18 @@ "node_modules/react-loadable": { "name": "@docusaurus/react-loadable", "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "license": "MIT", "dependencies": { "@types/react": "*", "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" } }, "node_modules/react-loadable-ssr-addon-v5-slorber": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", - "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.10.3" }, @@ -24601,13 +22752,11 @@ }, "node_modules/react-magic-dropzone": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/react-magic-dropzone/-/react-magic-dropzone-1.0.1.tgz", - "integrity": "sha512-0BIROPARmXHpk4AS3eWBOsewxoM5ndk2psYP/JmbCq8tz3uR2LIV1XiroZ9PKrmDRMctpW+TvsBCtWasuS8vFA==" + "license": "MIT" }, "node_modules/react-markdown": { "version": "8.0.7", - "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-8.0.7.tgz", - "integrity": "sha512-bvWbzG4MtOU62XqBx3Xx+zB2raaFFsq4mYiAzfjXJMEz2sixgeAfraA3tvzULF02ZdOMUOKTBFFaZJDDrq+BJQ==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "@types/prop-types": "^15.0.0", @@ -24636,8 +22785,7 @@ }, "node_modules/react-markdown/node_modules/bail": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -24645,8 +22793,7 @@ }, "node_modules/react-markdown/node_modules/comma-separated-tokens": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -24654,8 +22801,7 @@ }, "node_modules/react-markdown/node_modules/is-plain-obj": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -24665,8 +22811,7 @@ }, "node_modules/react-markdown/node_modules/property-information": { "version": "6.4.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.4.0.tgz", - "integrity": "sha512-9t5qARVofg2xQqKtytzt+lZ4d1Qvj8t5B8fEwXK6qOfgRLgH/b13QlgEyDh033NOS31nXeFbYv7CLUDG1CeifQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -24674,13 +22819,11 @@ }, "node_modules/react-markdown/node_modules/react-is": { "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "license": "MIT" }, "node_modules/react-markdown/node_modules/remark-parse": { "version": "10.0.2", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.2.tgz", - "integrity": "sha512-3ydxgHa/ZQzG8LvC7jTXccARYDcRld3VfcgIIFs7bI6vbRSxJJmzgLEIIoYKyrfhaY+ujuWaf/PJiMZXoiCXgw==", + "license": "MIT", "dependencies": { "@types/mdast": "^3.0.0", "mdast-util-from-markdown": "^1.0.0", @@ -24693,8 +22836,7 @@ }, "node_modules/react-markdown/node_modules/space-separated-tokens": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -24702,16 +22844,14 @@ }, "node_modules/react-markdown/node_modules/style-to-object": { "version": "0.4.4", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", - "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", + "license": "MIT", "dependencies": { "inline-style-parser": "0.1.1" } }, "node_modules/react-markdown/node_modules/trough": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", - "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -24719,8 +22859,7 @@ }, "node_modules/react-markdown/node_modules/unified": { "version": "10.1.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", - "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "bail": "^2.0.0", @@ -24737,8 +22876,7 @@ }, "node_modules/react-markdown/node_modules/unist-util-is": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", - "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -24749,8 +22887,7 @@ }, "node_modules/react-markdown/node_modules/unist-util-stringify-position": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -24761,8 +22898,7 @@ }, "node_modules/react-markdown/node_modules/unist-util-visit": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", - "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0", @@ -24775,8 +22911,7 @@ }, "node_modules/react-markdown/node_modules/unist-util-visit-parents": { "version": "5.1.3", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", - "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0" @@ -24788,8 +22923,7 @@ }, "node_modules/react-markdown/node_modules/vfile": { "version": "5.3.7", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", - "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "is-buffer": "^2.0.0", @@ -24803,8 +22937,7 @@ }, "node_modules/react-markdown/node_modules/vfile-message": { "version": "3.1.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", - "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-stringify-position": "^3.0.0" @@ -24816,8 +22949,7 @@ }, "node_modules/react-modal": { "version": "3.16.1", - "resolved": "https://registry.npmjs.org/react-modal/-/react-modal-3.16.1.tgz", - "integrity": "sha512-VStHgI3BVcGo7OXczvnJN7yT2TWHJPDXZWyI/a0ssFNhGZWsPmB8cF0z33ewDXq4VfYMO1vXgiv/g8Nj9NDyWg==", + "license": "MIT", "dependencies": { "exenv": "^1.2.0", "prop-types": "^15.7.2", @@ -24833,9 +22965,9 @@ } }, "node_modules/react-player": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/react-player/-/react-player-2.11.0.tgz", - "integrity": "sha512-fIrwpuXOBXdEg1FiyV9isKevZOaaIsAAtZy5fcjkQK9Nhmk1I2NXzY/hkPos8V0zb/ZX416LFy8gv7l/1k3a5w==", + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/react-player/-/react-player-2.16.0.tgz", + "integrity": "sha512-mAIPHfioD7yxO0GNYVFD1303QFtI3lyyQZLY229UEAp/a10cSW+hPcakg0Keq8uWJxT2OiT/4Gt+Lc9bD6bJmQ==", "dependencies": { "deepmerge": "^4.0.0", "load-script": "^1.0.0", @@ -24849,8 +22981,7 @@ }, "node_modules/react-redux": { "version": "7.2.9", - "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-7.2.9.tgz", - "integrity": "sha512-Gx4L3uM182jEEayZfRbI/G11ZpYdNAnBs70lFVMNdHJI76XYtR+7m0MN+eAs7UHBPhWXcnFPaS+9owSCJQHNpQ==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.15.4", "@types/react-redux": "^7.1.20", @@ -24873,13 +23004,11 @@ }, "node_modules/react-redux/node_modules/react-is": { "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", - "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==" + "license": "MIT" }, "node_modules/react-router": { "version": "5.3.3", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.3.tgz", - "integrity": "sha512-mzQGUvS3bM84TnbtMYR8ZjKnuPJ71IjSzR+DE6UkUqvN4czWIqEs17yLL8xkAycv4ev0AiN+IGrWu88vJs/p2w==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.13", "history": "^4.9.0", @@ -24898,8 +23027,7 @@ }, "node_modules/react-router-config": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", - "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.1.2" }, @@ -24910,8 +23038,7 @@ }, "node_modules/react-router-dom": { "version": "5.3.3", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-Ov0tGPMBgqmbu5CDmN++tv2HQ9HlWDuWIIqn4b88gjlAN5IHI+4ZUZRcpz9Hl0azFIwihbLDYw1OiHGRo7ZIng==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.13", "history": "^4.9.0", @@ -24927,8 +23054,7 @@ }, "node_modules/react-textarea-autosize": { "version": "8.5.3", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", - "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.20.13", "use-composed-ref": "^1.3.0", @@ -24943,8 +23069,7 @@ }, "node_modules/read-pkg": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", - "integrity": "sha512-7BGwRHqt4s/uVbuyoeejRn4YmFnYZiFl4AuaeXHlgZf3sONF0SOGlxs2Pw8g6hCKupo08RafIO5YXFNOKTfwsQ==", + "license": "MIT", "dependencies": { "load-json-file": "^1.0.0", "normalize-package-data": "^2.3.2", @@ -24956,8 +23081,7 @@ }, "node_modules/read-pkg-up": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", - "integrity": "sha512-WD9MTlNtI55IwYUS27iHh9tK3YoIVhxis8yKhLpTqWtml739uXc9NWTpxoHkfZf3+DkCCsXox94/VWZniuZm6A==", + "license": "MIT", "dependencies": { "find-up": "^1.0.0", "read-pkg": "^1.0.0" @@ -24968,8 +23092,7 @@ }, "node_modules/read-pkg-up/node_modules/find-up": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", - "integrity": "sha512-jvElSjyuo4EMQGoTwo1uJU5pQMwTW5lS1x05zzfJuTIyLR3zwO27LYrxNg+dlvKpGOuGy/MzBdXh80g0ve5+HA==", + "license": "MIT", "dependencies": { "path-exists": "^2.0.0", "pinkie-promise": "^2.0.0" @@ -24980,8 +23103,7 @@ }, "node_modules/read-pkg-up/node_modules/path-exists": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", - "integrity": "sha512-yTltuKuhtNeFJKa1PiRzfLAU5182q1y4Eb4XCJ3PBqyzEDkAZRzBrKKBct682ls9reBVHf9udYLN5Nd+K1B9BQ==", + "license": "MIT", "dependencies": { "pinkie-promise": "^2.0.0" }, @@ -24991,8 +23113,7 @@ }, "node_modules/read-pkg/node_modules/path-type": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", - "integrity": "sha512-S4eENJz1pkiQn9Znv33Q+deTOKmbl+jj1Fl+qiP/vYezj+S8x+J3Uo0ISrx/QoEvIlOaDWJhPaRd1flJ9HXZqg==", + "license": "MIT", "dependencies": { "graceful-fs": "^4.1.2", "pify": "^2.0.0", @@ -25004,16 +23125,14 @@ }, "node_modules/read-pkg/node_modules/pify": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/readable-stream": { "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -25025,8 +23144,7 @@ }, "node_modules/readdirp": { "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", "dependencies": { "picomatch": "^2.2.1" }, @@ -25036,13 +23154,10 @@ }, "node_modules/reading-time": { "version": "1.5.0", - "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", - "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" + "license": "MIT" }, "node_modules/rechoir": { "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", "dependencies": { "resolve": "^1.1.6" }, @@ -25052,8 +23167,7 @@ }, "node_modules/recursive-readdir": { "version": "2.2.2", - "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz", - "integrity": "sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==", + "license": "MIT", "dependencies": { "minimatch": "3.0.4" }, @@ -25063,8 +23177,7 @@ }, "node_modules/redent": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz", - "integrity": "sha512-qtW5hKzGQZqKoh6JNSD+4lfitfPKGz42e6QwiRmPM5mmKtR0N41AbJRYu0xJi7nhOJ4WDgRkKvAk6tw4WIwR4g==", + "license": "MIT", "dependencies": { "indent-string": "^2.1.0", "strip-indent": "^1.0.1" @@ -25075,8 +23188,7 @@ }, "node_modules/redent/node_modules/indent-string": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", - "integrity": "sha512-aqwDFWSgSgfRaEwao5lg5KEcVd/2a+D1rvoG7NdilmYz0NwRk6StWpWdz/Hpk34MKPpx7s8XxUqimfcQK6gGlg==", + "license": "MIT", "dependencies": { "repeating": "^2.0.0" }, @@ -25086,42 +23198,36 @@ }, "node_modules/redux": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/redux/-/redux-4.2.1.tgz", - "integrity": "sha512-LAUYz4lc+Do8/g7aeRa8JkyDErK6ekstQaqWQrNRW//MY1TvCEpMtpTWvlQ+FPbWCx+Xixu/6SHt5N0HR+SB4w==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.9.2" } }, "node_modules/redux-thunk": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-2.4.2.tgz", - "integrity": "sha512-+P3TjtnP0k/FEjcBL5FZpoovtvrTNT/UXd4/sluaSyrURlSlhLSzEdfsTBW7WsKB6yPvgd7q/iZPICFjW4o57Q==", + "license": "MIT", "peerDependencies": { "redux": "^4" } }, "node_modules/reflect.ownkeys": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/reflect.ownkeys/-/reflect.ownkeys-0.2.0.tgz", - "integrity": "sha512-qOLsBKHCpSOFKK1NUOCGC5VyeufB6lEsFe92AL2bhIJsacZS1qdoOZSbPk3MYKuT2cFlRDnulKXuuElIrMjGUg==" + "license": "MIT" }, "node_modules/reftools": { "version": "1.1.9", - "resolved": "https://registry.npmjs.org/reftools/-/reftools-1.1.9.tgz", - "integrity": "sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==", + "license": "BSD-3-Clause", "funding": { "url": "https://github.com/Mermade/oas-kit?sponsor=1" } }, "node_modules/regenerate": { "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + "license": "MIT" }, "node_modules/regenerate-unicode-properties": { "version": "10.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", - "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", + "license": "MIT", "dependencies": { "regenerate": "^1.4.2" }, @@ -25131,21 +23237,18 @@ }, "node_modules/regenerator-runtime": { "version": "0.13.9", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", - "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" + "license": "MIT" }, "node_modules/regenerator-transform": { "version": "0.15.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", - "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.8.4" } }, "node_modules/regex-not": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "license": "MIT", "dependencies": { "extend-shallow": "^3.0.2", "safe-regex": "^1.1.0" @@ -25156,8 +23259,7 @@ }, "node_modules/regex-not/node_modules/extend-shallow": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", "dependencies": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -25168,8 +23270,7 @@ }, "node_modules/regex-not/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -25179,8 +23280,7 @@ }, "node_modules/regexp.prototype.flags": { "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", @@ -25195,8 +23295,7 @@ }, "node_modules/regexpu-core": { "version": "5.2.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.2.2.tgz", - "integrity": "sha512-T0+1Zp2wjF/juXMrMxHxidqGYn8U4R+zleSJhX9tQ1PUsS8a9UtYfbsF9LdiVgNX3kiX8RNaKM42nfSgvFJjmw==", + "license": "MIT", "dependencies": { "regenerate": "^1.4.2", "regenerate-unicode-properties": "^10.1.0", @@ -25211,8 +23310,7 @@ }, "node_modules/registry-auth-token": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", - "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", + "license": "MIT", "dependencies": { "rc": "^1.2.8" }, @@ -25222,8 +23320,7 @@ }, "node_modules/registry-url": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", + "license": "MIT", "dependencies": { "rc": "^1.2.8" }, @@ -25233,13 +23330,11 @@ }, "node_modules/regjsgen": { "version": "0.7.1", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.7.1.tgz", - "integrity": "sha512-RAt+8H2ZEzHeYWxZ3H2z6tF18zyyOnlcdaafLrm21Bguj7uZy6ULibiAFdXEtKQY4Sy7wDTwDiOazasMLc4KPA==" + "license": "MIT" }, "node_modules/regjsparser": { "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "license": "BSD-2-Clause", "dependencies": { "jsesc": "~0.5.0" }, @@ -25249,16 +23344,13 @@ }, "node_modules/regjsparser/node_modules/jsesc": { "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", "bin": { "jsesc": "bin/jsesc" } }, "node_modules/rehype-raw": { "version": "6.1.1", - "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-6.1.1.tgz", - "integrity": "sha512-d6AKtisSRtDRX4aSPsJGTfnzrX2ZkHQLE5kiUuGOeEoLpbEulFF4hj0mLPbsa+7vmguDKOVVEQdHKDSwoaIDsQ==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "hast-util-raw": "^7.2.0", @@ -25271,13 +23363,11 @@ }, "node_modules/rehype-raw/node_modules/@types/parse5": { "version": "6.0.3", - "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-6.0.3.tgz", - "integrity": "sha512-SuT16Q1K51EAVPz1K29DJ/sXjhSQ0zjvsypYJ6tlwVsRV9jwW5Adq2ch8Dq8kDBCkYnELS7N7VNCSB5nC56t/g==" + "license": "MIT" }, "node_modules/rehype-raw/node_modules/bail": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25285,8 +23375,7 @@ }, "node_modules/rehype-raw/node_modules/comma-separated-tokens": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25294,8 +23383,7 @@ }, "node_modules/rehype-raw/node_modules/hast-util-from-parse5": { "version": "7.1.2", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-7.1.2.tgz", - "integrity": "sha512-Nz7FfPBuljzsN3tCQ4kCBKqdNhQE2l0Tn+X1ubgKBPRoiDIu1mL08Cfw4k7q71+Duyaw7DXDN+VTAp4Vh3oCOw==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "@types/unist": "^2.0.0", @@ -25312,8 +23400,7 @@ }, "node_modules/rehype-raw/node_modules/hast-util-parse-selector": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-3.1.1.tgz", - "integrity": "sha512-jdlwBjEexy1oGz0aJ2f4GKMaVKkA9jwjr4MjAAI22E5fM/TXVZHuS5OpONtdeIkRKqAaryQ2E9xNQxijoThSZA==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0" }, @@ -25324,8 +23411,7 @@ }, "node_modules/rehype-raw/node_modules/hast-util-raw": { "version": "7.2.3", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-7.2.3.tgz", - "integrity": "sha512-RujVQfVsOrxzPOPSzZFiwofMArbQke6DJjnFfceiEbFh7S05CbPt0cYN+A5YeD3pso0JQk6O1aHBnx9+Pm2uqg==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "@types/parse5": "^6.0.0", @@ -25346,8 +23432,7 @@ }, "node_modules/rehype-raw/node_modules/hast-util-to-parse5": { "version": "7.1.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-7.1.0.tgz", - "integrity": "sha512-YNRgAJkH2Jky5ySkIqFXTQiaqcAtJyVE+D5lkN6CdtOqrnkLfGYYrEcKuHOJZlp+MwjSwuD3fZuawI+sic/RBw==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "comma-separated-tokens": "^2.0.0", @@ -25363,8 +23448,7 @@ }, "node_modules/rehype-raw/node_modules/hastscript": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-7.2.0.tgz", - "integrity": "sha512-TtYPq24IldU8iKoJQqvZOuhi5CyCQRAbvDOX0x1eW6rsHSxa/1i2CCiptNTotGHJ3VoHRGmqiv6/D3q113ikkw==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "comma-separated-tokens": "^2.0.0", @@ -25379,8 +23463,7 @@ }, "node_modules/rehype-raw/node_modules/html-void-elements": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-2.0.1.tgz", - "integrity": "sha512-0quDb7s97CfemeJAnW9wC0hw78MtW7NU3hqtCD75g2vFlDLt36llsYD7uB7SUzojLMP24N5IatXf7ylGXiGG9A==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25388,8 +23471,7 @@ }, "node_modules/rehype-raw/node_modules/is-plain-obj": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -25399,13 +23481,11 @@ }, "node_modules/rehype-raw/node_modules/parse5": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" + "license": "MIT" }, "node_modules/rehype-raw/node_modules/property-information": { "version": "6.4.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.4.0.tgz", - "integrity": "sha512-9t5qARVofg2xQqKtytzt+lZ4d1Qvj8t5B8fEwXK6qOfgRLgH/b13QlgEyDh033NOS31nXeFbYv7CLUDG1CeifQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25413,8 +23493,7 @@ }, "node_modules/rehype-raw/node_modules/space-separated-tokens": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25422,8 +23501,7 @@ }, "node_modules/rehype-raw/node_modules/trough": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", - "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25431,8 +23509,7 @@ }, "node_modules/rehype-raw/node_modules/unified": { "version": "10.1.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", - "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "bail": "^2.0.0", @@ -25449,8 +23526,7 @@ }, "node_modules/rehype-raw/node_modules/unist-util-is": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", - "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -25461,8 +23537,7 @@ }, "node_modules/rehype-raw/node_modules/unist-util-position": { "version": "4.0.4", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz", - "integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -25473,8 +23548,7 @@ }, "node_modules/rehype-raw/node_modules/unist-util-stringify-position": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -25485,8 +23559,7 @@ }, "node_modules/rehype-raw/node_modules/unist-util-visit": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", - "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0", @@ -25499,8 +23572,7 @@ }, "node_modules/rehype-raw/node_modules/unist-util-visit-parents": { "version": "5.1.3", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", - "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0" @@ -25512,8 +23584,7 @@ }, "node_modules/rehype-raw/node_modules/vfile": { "version": "5.3.7", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", - "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "is-buffer": "^2.0.0", @@ -25527,8 +23598,7 @@ }, "node_modules/rehype-raw/node_modules/vfile-location": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz", - "integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "vfile": "^5.0.0" @@ -25540,8 +23610,7 @@ }, "node_modules/rehype-raw/node_modules/vfile-message": { "version": "3.1.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", - "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-stringify-position": "^3.0.0" @@ -25553,8 +23622,7 @@ }, "node_modules/rehype-raw/node_modules/web-namespaces": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", - "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25562,8 +23630,7 @@ }, "node_modules/rehype-raw/node_modules/zwitch": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", - "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25571,16 +23638,14 @@ }, "node_modules/relateurl": { "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=", + "license": "MIT", "engines": { "node": ">= 0.10" } }, "node_modules/remark-emoji": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", - "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==", + "license": "MIT", "dependencies": { "emoticon": "^3.2.0", "node-emoji": "^1.10.0", @@ -25589,8 +23654,7 @@ }, "node_modules/remark-emoji/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -25603,8 +23667,7 @@ }, "node_modules/remark-emoji/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -25616,8 +23679,7 @@ }, "node_modules/remark-footnotes": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz", - "integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -25625,8 +23687,7 @@ }, "node_modules/remark-mdx": { "version": "1.6.22", - "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz", - "integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==", + "license": "MIT", "dependencies": { "@babel/core": "7.12.9", "@babel/helper-plugin-utils": "7.10.4", @@ -25644,8 +23705,7 @@ }, "node_modules/remark-mdx/node_modules/@babel/core": { "version": "7.12.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", - "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "license": "MIT", "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/generator": "^7.12.5", @@ -25674,13 +23734,11 @@ }, "node_modules/remark-mdx/node_modules/@babel/helper-plugin-utils": { "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", - "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" + "license": "MIT" }, "node_modules/remark-mdx/node_modules/@babel/plugin-proposal-object-rest-spread": { "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", - "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.0", @@ -25692,8 +23750,7 @@ }, "node_modules/remark-mdx/node_modules/@babel/plugin-syntax-jsx": { "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", - "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -25703,16 +23760,14 @@ }, "node_modules/remark-mdx/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/remark-mdx/node_modules/unified": { "version": "9.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", - "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "license": "MIT", "dependencies": { "bail": "^1.0.0", "extend": "^3.0.0", @@ -25728,8 +23783,7 @@ }, "node_modules/remark-parse": { "version": "8.0.3", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz", - "integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==", + "license": "MIT", "dependencies": { "ccount": "^1.0.0", "collapse-white-space": "^1.0.2", @@ -25755,8 +23809,7 @@ }, "node_modules/remark-rehype": { "version": "10.1.0", - "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-10.1.0.tgz", - "integrity": "sha512-EFmR5zppdBp0WQeDVZ/b66CWJipB2q2VLNFMabzDSGR66Z2fQii83G5gTBbgGEnEEA0QRussvrFHxk1HWGJskw==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "@types/mdast": "^3.0.0", @@ -25770,8 +23823,7 @@ }, "node_modules/remark-rehype/node_modules/bail": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25779,8 +23831,7 @@ }, "node_modules/remark-rehype/node_modules/is-plain-obj": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", "engines": { "node": ">=12" }, @@ -25790,8 +23841,7 @@ }, "node_modules/remark-rehype/node_modules/mdast-util-definitions": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz", - "integrity": "sha512-8SVPMuHqlPME/z3gqVwWY4zVXn8lqKv/pAhC57FuJ40ImXyBpmO5ukh98zB2v7Blql2FiHjHv9LVztSIqjY+MA==", + "license": "MIT", "dependencies": { "@types/mdast": "^3.0.0", "@types/unist": "^2.0.0", @@ -25804,8 +23854,7 @@ }, "node_modules/remark-rehype/node_modules/mdast-util-to-hast": { "version": "12.3.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-12.3.0.tgz", - "integrity": "sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw==", + "license": "MIT", "dependencies": { "@types/hast": "^2.0.0", "@types/mdast": "^3.0.0", @@ -25823,8 +23872,7 @@ }, "node_modules/remark-rehype/node_modules/trough": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", - "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -25832,8 +23880,7 @@ }, "node_modules/remark-rehype/node_modules/unified": { "version": "10.1.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", - "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "bail": "^2.0.0", @@ -25850,8 +23897,7 @@ }, "node_modules/remark-rehype/node_modules/unist-util-generated": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz", - "integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -25859,8 +23905,7 @@ }, "node_modules/remark-rehype/node_modules/unist-util-is": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", - "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -25871,8 +23916,7 @@ }, "node_modules/remark-rehype/node_modules/unist-util-position": { "version": "4.0.4", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz", - "integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -25883,8 +23927,7 @@ }, "node_modules/remark-rehype/node_modules/unist-util-stringify-position": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0" }, @@ -25895,8 +23938,7 @@ }, "node_modules/remark-rehype/node_modules/unist-util-visit": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", - "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0", @@ -25909,8 +23951,7 @@ }, "node_modules/remark-rehype/node_modules/unist-util-visit-parents": { "version": "5.1.3", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", - "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0" @@ -25922,8 +23963,7 @@ }, "node_modules/remark-rehype/node_modules/vfile": { "version": "5.3.7", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", - "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "is-buffer": "^2.0.0", @@ -25937,8 +23977,7 @@ }, "node_modules/remark-rehype/node_modules/vfile-message": { "version": "3.1.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", - "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-stringify-position": "^3.0.0" @@ -25950,8 +23989,7 @@ }, "node_modules/remark-squeeze-paragraphs": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz", - "integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==", + "license": "MIT", "dependencies": { "mdast-squeeze-paragraphs": "^4.0.0" }, @@ -25962,8 +24000,7 @@ }, "node_modules/remarkable": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/remarkable/-/remarkable-2.0.1.tgz", - "integrity": "sha512-YJyMcOH5lrR+kZdmB0aJJ4+93bEojRZ1HGDn9Eagu6ibg7aVZhc3OWbbShRid+Q5eAfsEqWxpe+g5W5nYNfNiA==", + "license": "MIT", "dependencies": { "argparse": "^1.0.10", "autolinker": "^3.11.0" @@ -25977,16 +24014,14 @@ }, "node_modules/remarkable/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/renderkid": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", - "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "license": "MIT", "dependencies": { "css-select": "^4.1.3", "dom-converter": "^0.2.0", @@ -25997,24 +24032,21 @@ }, "node_modules/repeat-element": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz", - "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/repeat-string": { "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "license": "MIT", "engines": { "node": ">=0.10" } }, "node_modules/repeating": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", - "integrity": "sha512-ZqtSMuVybkISo2OWvqvm7iHSWngvdaW3IpsT9/uP8v4gMi591LY6h35wdOfvQdWCKFWZWm2Y1Opp4kV7vQKT6A==", + "license": "MIT", "dependencies": { "is-finite": "^1.0.0" }, @@ -26024,17 +24056,17 @@ }, "node_modules/replace-ext": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.1.tgz", - "integrity": "sha512-yD5BHCe7quCgBph4rMQ+0KkIRKwWCrHDOX1p1Gp6HwjPM5kVoCdKGNhN7ydqqsX6lJEnQDKZ/tFMiEdQ1dvPEw==", + "license": "MIT", "engines": { "node": ">= 0.10" } }, "node_modules/replace-in-file": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/replace-in-file/-/replace-in-file-7.1.0.tgz", - "integrity": "sha512-1uZmJ78WtqNYCSuPC9IWbweXkGxPOtk2rKuar8diTw7naVIQZiE3Tm8ACx2PCMXDtVH6N+XxwaRY2qZ2xHPqXw==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/replace-in-file/-/replace-in-file-7.2.0.tgz", + "integrity": "sha512-CiLXVop3o8/h2Kd1PwKPPimmS9wUV0Ki6Fl8+1ITD35nB3Gl/PrW5IONpTE0AXk0z4v8WYcpEpdeZqMXvSnWpg==", "dev": true, + "license": "MIT", "dependencies": { "chalk": "^4.1.2", "glob": "^8.1.0", @@ -26052,6 +24084,7 @@ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -26064,9 +24097,8 @@ }, "node_modules/replace-in-file/node_modules/brace-expansion": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "dev": true, + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } @@ -26076,6 +24108,7 @@ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -26092,6 +24125,7 @@ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -26103,13 +24137,16 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/replace-in-file/node_modules/glob": { "version": "8.1.0", "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, + "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -26129,6 +24166,7 @@ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -26138,6 +24176,7 @@ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", "dev": true, + "license": "ISC", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -26150,6 +24189,7 @@ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -26159,9 +24199,7 @@ }, "node_modules/request": { "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", + "license": "Apache-2.0", "dependencies": { "aws-sign2": "~0.7.0", "aws4": "^1.8.0", @@ -26190,64 +24228,53 @@ }, "node_modules/request/node_modules/qs": { "version": "6.5.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", - "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.6" } }, "node_modules/request/node_modules/uuid": { "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "license": "MIT", "bin": { "uuid": "bin/uuid" } }, "node_modules/require-directory": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/require-from-string": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/require-like": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", - "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", "engines": { "node": "*" } }, "node_modules/require-main-filename": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + "license": "ISC" }, "node_modules/requires-port": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + "license": "MIT" }, "node_modules/reselect": { "version": "4.1.8", - "resolved": "https://registry.npmjs.org/reselect/-/reselect-4.1.8.tgz", - "integrity": "sha512-ab9EmR80F/zQTMNeneUr4cv+jSwPJgIlvEmVwLerwrWVbpLlBuls9XHzIeTFy4cegU2NHBp3va0LKOzU5qFEYQ==" + "license": "MIT" }, "node_modules/resolve": { "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", + "license": "MIT", "dependencies": { "is-core-module": "^2.9.0", "path-parse": "^1.0.7", @@ -26262,9 +24289,8 @@ }, "node_modules/resolve-cwd": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", - "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, + "license": "MIT", "dependencies": { "resolve-from": "^5.0.0" }, @@ -26274,54 +24300,48 @@ }, "node_modules/resolve-cwd/node_modules/resolve-from": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/resolve-from": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/resolve-pathname": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", - "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + "license": "MIT" }, "node_modules/resolve-url": { "version": "0.2.1", - "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha512-ZuF55hVUQaaczgOIwqWzkEcEidmlD/xl44x1UZnhOXcYuFN2S6+rcxpG+C1N3So0wvNI3DmJICUFfu2SxhBmvg==", - "deprecated": "https://github.com/lydell/resolve-url#deprecated" + "license": "MIT" }, "node_modules/resolve.exports": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", - "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/responselike": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", + "license": "MIT", "dependencies": { "lowercase-keys": "^1.0.0" } }, "node_modules/restore-cursor": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", - "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", "dev": true, + "license": "MIT", "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" @@ -26335,24 +24355,21 @@ }, "node_modules/ret": { "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", + "license": "MIT", "engines": { "node": ">=0.12" } }, "node_modules/retry": { "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "license": "MIT", "engines": { "node": ">= 4" } }, "node_modules/reusify": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "license": "MIT", "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" @@ -26360,24 +24377,20 @@ }, "node_modules/rfdc": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.0.tgz", - "integrity": "sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/rgb-regex": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", - "integrity": "sha512-gDK5mkALDFER2YLqH6imYvK6g02gpNGM4ILDZ472EwWfXZnC2ZEpoB2ECXTyOVUKuk/bPJZMzwQPBYICzP+D3w==" + "license": "MIT" }, "node_modules/rgba-regex": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", - "integrity": "sha512-zgn5OjNQXLUTdq8m17KdaicF6w89TZs8ZU8y0AYENIU6wG8GG6LLm0yLSiPY8DmaYmHdgRW8rnApjoT0fQRfMg==" + "license": "MIT" }, "node_modules/rimraf": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -26390,8 +24403,7 @@ }, "node_modules/ripemd160": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "license": "MIT", "dependencies": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -26402,10 +24414,40 @@ "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==" }, + "node_modules/rrdom": { + "version": "2.0.0-alpha.17", + "resolved": "https://registry.npmjs.org/rrdom/-/rrdom-2.0.0-alpha.17.tgz", + "integrity": "sha512-b6caDiNcFO96Opp7TGdcVd4OLGSXu5dJe+A0IDiAu8mk7OmhqZCSDlgQdTKmdO5wMf4zPsUTgb8H/aNvR3kDHA==", + "dependencies": { + "rrweb-snapshot": "^2.0.0-alpha.17" + } + }, + "node_modules/rrweb": { + "version": "2.0.0-alpha.13", + "resolved": "https://registry.npmjs.org/rrweb/-/rrweb-2.0.0-alpha.13.tgz", + "integrity": "sha512-a8GXOCnzWHNaVZPa7hsrLZtNZ3CGjiL+YrkpLo0TfmxGLhjNZbWY2r7pE06p+FcjFNlgUVTmFrSJbK3kO7yxvw==", + "dependencies": { + "@rrweb/types": "^2.0.0-alpha.13", + "@types/css-font-loading-module": "0.0.7", + "@xstate/fsm": "^1.4.0", + "base64-arraybuffer": "^1.0.1", + "fflate": "^0.4.4", + "mitt": "^3.0.0", + "rrdom": "^2.0.0-alpha.13", + "rrweb-snapshot": "^2.0.0-alpha.13" + } + }, + "node_modules/rrweb-snapshot": { + "version": "2.0.0-alpha.17", + "resolved": "https://registry.npmjs.org/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.17.tgz", + "integrity": "sha512-GBg5pV8LHOTbeVmH2VHLEFR0mc2QpQMzAvcoxEGfPNWgWHc8UvKCyq7pqN1vA+fDZ+yXXbixeO0kB2pzVvFCBw==", + "dependencies": { + "postcss": "^8.4.38" + } + }, "node_modules/rst-selector-parser": { "version": "2.2.3", - "resolved": "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz", - "integrity": "sha512-nDG1rZeP6oFTLN6yNDV/uiAvs1+FS/KlrEwh7+y7dpuApDBy6bI2HTBcc0/V8lv9OTqfyD34eF7au2pm8aBbhA==", + "license": "BSD-3-Clause", "dependencies": { "lodash.flattendeep": "^4.4.0", "nearley": "^2.7.10" @@ -26413,13 +24455,11 @@ }, "node_modules/rtl-detect": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz", - "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==" + "license": "BSD-3-Clause" }, "node_modules/rtlcss": { "version": "3.5.0", - "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-3.5.0.tgz", - "integrity": "sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A==", + "license": "MIT", "dependencies": { "find-up": "^5.0.0", "picocolors": "^1.0.0", @@ -26432,8 +24472,7 @@ }, "node_modules/rtlcss/node_modules/find-up": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "license": "MIT", "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -26447,8 +24486,7 @@ }, "node_modules/rtlcss/node_modules/locate-path": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "license": "MIT", "dependencies": { "p-locate": "^5.0.0" }, @@ -26461,8 +24499,7 @@ }, "node_modules/rtlcss/node_modules/p-limit": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -26475,8 +24512,7 @@ }, "node_modules/rtlcss/node_modules/p-locate": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "license": "MIT", "dependencies": { "p-limit": "^3.0.2" }, @@ -26489,8 +24525,7 @@ }, "node_modules/rtlcss/node_modules/strip-json-comments": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "license": "MIT", "engines": { "node": ">=8" }, @@ -26500,8 +24535,6 @@ }, "node_modules/run-parallel": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", "funding": [ { "type": "github", @@ -26516,27 +24549,25 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "dependencies": { "queue-microtask": "^1.2.2" } }, "node_modules/rw": { "version": "1.3.3", - "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", - "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==" + "license": "BSD-3-Clause" }, "node_modules/rxjs": { "version": "7.6.0", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.6.0.tgz", - "integrity": "sha512-DDa7d8TFNUalGC9VqXvQ1euWNN7sc63TrUCuM9J998+ViviahMIjKSOU7rfcgFOF+FCD71BhDRv4hrFz+ImDLQ==", + "license": "Apache-2.0", "dependencies": { "tslib": "^2.1.0" } }, "node_modules/sade": { "version": "1.8.1", - "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", - "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "license": "MIT", "dependencies": { "mri": "^1.1.0" }, @@ -26546,26 +24577,21 @@ }, "node_modules/safe-buffer": { "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "license": "MIT" }, "node_modules/safe-json-parse": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/safe-json-parse/-/safe-json-parse-1.0.1.tgz", - "integrity": "sha512-o0JmTu17WGUaUOHa1l0FPGXKBfijbxK6qoHzlkihsDXxzBHvJcA7zgviKR92Xs841rX9pK16unfphLq0/KqX7A==" + "version": "1.0.1" }, "node_modules/safe-regex": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha512-aJXcif4xnaNUzvUuC5gcb46oTS7zvg4jpMTnuqtrEPlR3vFr4pxtdTwaF1Qs3Enjn9HK+ZlwQui+a7z0SywIzg==", + "license": "MIT", "dependencies": { "ret": "~0.1.10" } }, "node_modules/safe-regex-test": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "get-intrinsic": "^1.1.3", @@ -26577,13 +24603,11 @@ }, "node_modules/safer-buffer": { "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + "license": "MIT" }, "node_modules/sass": { "version": "1.70.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.70.0.tgz", - "integrity": "sha512-uUxNQ3zAHeAx5nRFskBnrWzDUJrrvpCPD5FNAoRvTi0WwremlheES3tg+56PaVtCs5QDRX5CBLxxKMDJMEa1WQ==", + "license": "MIT", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -26598,8 +24622,7 @@ }, "node_modules/sass-loader": { "version": "13.3.3", - "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-13.3.3.tgz", - "integrity": "sha512-mt5YN2F1MOZr3d/wBRcZxeFgwgkH44wVc2zohO2YF6JiOMkiXe4BYRZpSu2sO1g71mo/j16txzUhsKZlqjVGzA==", + "license": "MIT", "dependencies": { "neo-async": "^2.6.2" }, @@ -26634,13 +24657,11 @@ }, "node_modules/sax": { "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" + "license": "ISC" }, "node_modules/scheduler": { "version": "0.20.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", - "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", + "license": "MIT", "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1" @@ -26648,8 +24669,7 @@ }, "node_modules/schema-utils": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", - "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.8", "ajv": "^6.12.5", @@ -26665,8 +24685,7 @@ }, "node_modules/search-insights": { "version": "2.8.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.8.0.tgz", - "integrity": "sha512-VzI4PMktJbydkbrF3/n40vFfRxdwg+o3CkQt0F3mHRSXVuv0PsVxQvB6mQQq/e9MCXAemcmp/GP9CNHpayFoCw==", + "license": "MIT", "peer": true, "engines": { "node": ">=16.0.0" @@ -26674,8 +24693,7 @@ }, "node_modules/section-matter": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "license": "MIT", "dependencies": { "extend-shallow": "^2.0.1", "kind-of": "^6.0.0" @@ -26686,8 +24704,7 @@ }, "node_modules/seek-bzip": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/seek-bzip/-/seek-bzip-1.0.6.tgz", - "integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==", + "license": "MIT", "dependencies": { "commander": "^2.8.1" }, @@ -26698,18 +24715,15 @@ }, "node_modules/seek-bzip/node_modules/commander": { "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + "license": "MIT" }, "node_modules/select-hose": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + "license": "MIT" }, "node_modules/selfsigned": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.0.1.tgz", - "integrity": "sha512-LmME957M1zOsUhG+67rAjKfiWFox3SBxE/yymatMZsAx+oMrJ0YQ8AToOnyCm7xbeg2ep37IHLxdu0o2MavQOQ==", + "license": "MIT", "dependencies": { "node-forge": "^1" }, @@ -26719,8 +24733,7 @@ }, "node_modules/semver": { "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "license": "ISC", "dependencies": { "lru-cache": "^6.0.0" }, @@ -26733,8 +24746,7 @@ }, "node_modules/semver-diff": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", + "license": "MIT", "dependencies": { "semver": "^6.3.0" }, @@ -26744,24 +24756,21 @@ }, "node_modules/semver-diff/node_modules/semver": { "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/semver-regex": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-2.0.0.tgz", - "integrity": "sha512-mUdIBBvdn0PLOeP3TEkMH7HHeUP3GjsXCwKarjv/kGmUFOYg1VqEemKhoQpWMu6X2I8kHeuVdGibLGkVK+/5Qw==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/semver-truncate": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/semver-truncate/-/semver-truncate-1.1.2.tgz", - "integrity": "sha512-V1fGg9i4CL3qesB6U0L6XAm4xOJiHmt4QAacazumuasc03BvtFGIMCduv01JWQ69Nv+JST9TqhSCiJoxoY031w==", + "license": "MIT", "dependencies": { "semver": "^5.3.0" }, @@ -26771,16 +24780,14 @@ }, "node_modules/semver-truncate/node_modules/semver": { "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "license": "ISC", "bin": { "semver": "bin/semver" } }, "node_modules/send": { "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "license": "MIT", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -26802,42 +24809,36 @@ }, "node_modules/send/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/send/node_modules/debug/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/send/node_modules/ms": { "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + "license": "MIT" }, "node_modules/send/node_modules/range-parser": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/serialize-javascript": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", - "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "license": "BSD-3-Clause", "dependencies": { "randombytes": "^2.1.0" } }, "node_modules/serve-handler": { "version": "6.1.3", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.3.tgz", - "integrity": "sha512-FosMqFBNrLyeiIDvP1zgO6YoTzFYHxLDEIavhlmQ+knB2Z7l1t+kGLHkZIDN7UVWqQAmKI3D20A6F6jo3nDd4w==", + "license": "MIT", "dependencies": { "bytes": "3.0.0", "content-disposition": "0.5.2", @@ -26851,16 +24852,14 @@ }, "node_modules/serve-handler/node_modules/mime-db": { "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/serve-handler/node_modules/mime-types": { "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "license": "MIT", "dependencies": { "mime-db": "~1.33.0" }, @@ -26870,13 +24869,11 @@ }, "node_modules/serve-handler/node_modules/path-to-regexp": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + "license": "MIT" }, "node_modules/serve-index": { "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "license": "MIT", "dependencies": { "accepts": "~1.3.4", "batch": "0.6.1", @@ -26892,24 +24889,21 @@ }, "node_modules/serve-index/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/serve-index/node_modules/depd": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/serve-index/node_modules/http-errors": { "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "license": "MIT", "dependencies": { "depd": "~1.1.2", "inherits": "2.0.3", @@ -26922,31 +24916,26 @@ }, "node_modules/serve-index/node_modules/inherits": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + "license": "ISC" }, "node_modules/serve-index/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/serve-index/node_modules/setprototypeof": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + "license": "ISC" }, "node_modules/serve-index/node_modules/statuses": { "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/serve-static": { "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "license": "MIT", "dependencies": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -26959,13 +24948,11 @@ }, "node_modules/set-blocking": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + "license": "ISC" }, "node_modules/set-getter": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/set-getter/-/set-getter-0.1.1.tgz", - "integrity": "sha512-9sVWOy+gthr+0G9DzqqLaYNA7+5OKkSmcqjL9cBpDEaZrr3ShQlyX2cZ/O/ozE41oxn/Tt0LGEM/w4Rub3A3gw==", + "license": "MIT", "dependencies": { "to-object-path": "^0.3.0" }, @@ -26975,8 +24962,7 @@ }, "node_modules/set-value": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", - "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "license": "MIT", "dependencies": { "extend-shallow": "^2.0.1", "is-extendable": "^0.1.1", @@ -26989,18 +24975,15 @@ }, "node_modules/setimmediate": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", - "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==" + "license": "MIT" }, "node_modules/setprototypeof": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + "license": "ISC" }, "node_modules/sha.js": { "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "license": "(MIT AND BSD-3-Clause)", "dependencies": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -27011,8 +24994,7 @@ }, "node_modules/shallow-clone": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", - "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "license": "MIT", "dependencies": { "kind-of": "^6.0.2" }, @@ -27022,13 +25004,11 @@ }, "node_modules/shallowequal": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", - "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + "license": "MIT" }, "node_modules/shebang-command": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" }, @@ -27038,21 +25018,18 @@ }, "node_modules/shebang-regex": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/shell-quote": { "version": "1.7.3", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.3.tgz", - "integrity": "sha512-Vpfqwm4EnqGdlsBFNmHhxhElJYrdfcxPThu+ryKS5J8L/fhAwLazFZtq+S+TWZ9ANj2piSQLGj6NQg+lKPmxrw==" + "license": "MIT" }, "node_modules/shelljs": { "version": "0.8.5", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", - "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "license": "BSD-3-Clause", "dependencies": { "glob": "^7.0.0", "interpret": "^1.0.0", @@ -27067,8 +25044,7 @@ }, "node_modules/should": { "version": "13.2.3", - "resolved": "https://registry.npmjs.org/should/-/should-13.2.3.tgz", - "integrity": "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==", + "license": "MIT", "dependencies": { "should-equal": "^2.0.0", "should-format": "^3.0.3", @@ -27079,16 +25055,14 @@ }, "node_modules/should-equal": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/should-equal/-/should-equal-2.0.0.tgz", - "integrity": "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==", + "license": "MIT", "dependencies": { "should-type": "^1.4.0" } }, "node_modules/should-format": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/should-format/-/should-format-3.0.3.tgz", - "integrity": "sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q==", + "license": "MIT", "dependencies": { "should-type": "^1.3.0", "should-type-adaptors": "^1.0.1" @@ -27096,13 +25070,11 @@ }, "node_modules/should-type": { "version": "1.4.0", - "resolved": "https://registry.npmjs.org/should-type/-/should-type-1.4.0.tgz", - "integrity": "sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ==" + "license": "MIT" }, "node_modules/should-type-adaptors": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz", - "integrity": "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==", + "license": "MIT", "dependencies": { "should-type": "^1.3.0", "should-util": "^1.0.0" @@ -27110,36 +25082,11 @@ }, "node_modules/should-util": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/should-util/-/should-util-1.0.1.tgz", - "integrity": "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==" - }, - "node_modules/showdown": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/showdown/-/showdown-2.1.0.tgz", - "integrity": "sha512-/6NVYu4U819R2pUIk79n67SYgJHWCce0a5xTP979WbNp0FL9MN1I1QK662IDU1b6JzKTvmhgI7T7JYIxBi3kMQ==", - "dependencies": { - "commander": "^9.0.0" - }, - "bin": { - "showdown": "bin/showdown.js" - }, - "funding": { - "type": "individual", - "url": "https://www.paypal.me/tiviesantos" - } - }, - "node_modules/showdown/node_modules/commander": { - "version": "9.5.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", - "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", - "engines": { - "node": "^12.20.0 || >=14" - } + "license": "MIT" }, "node_modules/side-channel": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.0", "get-intrinsic": "^1.0.2", @@ -27151,21 +25098,18 @@ }, "node_modules/signal-exit": { "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + "license": "ISC" }, "node_modules/simple-swizzle": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", + "license": "MIT", "dependencies": { "is-arrayish": "^0.3.1" } }, "node_modules/sirv": { "version": "1.0.19", - "resolved": "https://registry.npmjs.org/sirv/-/sirv-1.0.19.tgz", - "integrity": "sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==", + "license": "MIT", "dependencies": { "@polka/url": "^1.0.0-next.20", "mrmime": "^1.0.0", @@ -27177,13 +25121,11 @@ }, "node_modules/sisteransi": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", - "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + "license": "MIT" }, "node_modules/sitemap": { "version": "7.1.1", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", - "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", + "license": "MIT", "dependencies": { "@types/node": "^17.0.5", "@types/sax": "^1.2.1", @@ -27200,17 +25142,15 @@ }, "node_modules/slash": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/slice-ansi": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", - "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^6.0.0", "is-fullwidth-code-point": "^4.0.0" @@ -27224,9 +25164,8 @@ }, "node_modules/slice-ansi/node_modules/ansi-styles": { "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -27236,9 +25175,8 @@ }, "node_modules/slice-ansi/node_modules/is-fullwidth-code-point": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" }, @@ -27248,16 +25186,14 @@ }, "node_modules/slugify": { "version": "1.6.6", - "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.6.tgz", - "integrity": "sha512-h+z7HKHYXj6wJU+AnS/+IH8Uh9fdcX1Lrhg1/VMdf9PwoBQXFcXiAdsy2tSK0P6gKwJLXp02r90ahUCqHk9rrw==", + "license": "MIT", "engines": { "node": ">=8.0.0" } }, "node_modules/snapdragon": { "version": "0.8.2", - "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "license": "MIT", "dependencies": { "base": "^0.11.1", "debug": "^2.2.0", @@ -27274,8 +25210,7 @@ }, "node_modules/snapdragon-node": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "license": "MIT", "dependencies": { "define-property": "^1.0.0", "isobject": "^3.0.0", @@ -27287,8 +25222,7 @@ }, "node_modules/snapdragon-node/node_modules/define-property": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", + "license": "MIT", "dependencies": { "is-descriptor": "^1.0.0" }, @@ -27298,8 +25232,7 @@ }, "node_modules/snapdragon-util": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "license": "MIT", "dependencies": { "kind-of": "^3.2.0" }, @@ -27309,13 +25242,11 @@ }, "node_modules/snapdragon-util/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/snapdragon-util/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -27325,16 +25256,14 @@ }, "node_modules/snapdragon/node_modules/debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } }, "node_modules/snapdragon/node_modules/define-property": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "license": "MIT", "dependencies": { "is-descriptor": "^0.1.0" }, @@ -27344,8 +25273,7 @@ }, "node_modules/snapdragon/node_modules/is-accessor-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -27355,8 +25283,7 @@ }, "node_modules/snapdragon/node_modules/is-accessor-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -27366,13 +25293,11 @@ }, "node_modules/snapdragon/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/snapdragon/node_modules/is-data-descriptor": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -27382,8 +25307,7 @@ }, "node_modules/snapdragon/node_modules/is-data-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -27393,8 +25317,7 @@ }, "node_modules/snapdragon/node_modules/is-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "license": "MIT", "dependencies": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -27406,21 +25329,18 @@ }, "node_modules/snapdragon/node_modules/kind-of": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/snapdragon/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "license": "MIT" }, "node_modules/sockjs": { "version": "0.3.24", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", - "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "license": "MIT", "dependencies": { "faye-websocket": "^0.11.3", "uuid": "^8.3.2", @@ -27429,16 +25349,14 @@ }, "node_modules/sort-css-media-queries": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", - "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==", + "license": "MIT", "engines": { "node": ">= 6.3.0" } }, "node_modules/sort-keys": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-1.1.2.tgz", - "integrity": "sha512-vzn8aSqKgytVik0iwdBEi+zevbTYZogewTUM6dtpmGwEcdzbub/TX4bCzRhebDCRC3QzXgJsLRKB2V/Oof7HXg==", + "license": "MIT", "dependencies": { "is-plain-obj": "^1.0.0" }, @@ -27448,8 +25366,7 @@ }, "node_modules/sort-keys-length": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/sort-keys-length/-/sort-keys-length-1.0.1.tgz", - "integrity": "sha512-GRbEOUqCxemTAk/b32F2xa8wDTs+Z1QHOkbhJDQTvv/6G3ZkbJ+frYWsTcc7cBB3Fu4wy4XlLCuNtJuMn7Gsvw==", + "license": "MIT", "dependencies": { "sort-keys": "^1.0.0" }, @@ -27459,33 +25376,29 @@ }, "node_modules/sort-keys/node_modules/is-plain-obj": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/source-map": { "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-resolve": { "version": "0.5.3", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", - "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", - "deprecated": "See https://github.com/lydell/source-map-resolve#deprecated", + "license": "MIT", "dependencies": { "atob": "^2.1.2", "decode-uri-component": "^0.2.0", @@ -27496,8 +25409,7 @@ }, "node_modules/source-map-support": { "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "license": "MIT", "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -27505,22 +25417,18 @@ }, "node_modules/source-map-support/node_modules/source-map": { "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-url": { "version": "0.4.1", - "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz", - "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==", - "deprecated": "See https://github.com/lydell/source-map-url#deprecated" + "license": "MIT" }, "node_modules/space-separated-tokens": { "version": "1.1.5", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", - "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -27528,8 +25436,7 @@ }, "node_modules/spdx-correct": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", - "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "license": "Apache-2.0", "dependencies": { "spdx-expression-parse": "^3.0.0", "spdx-license-ids": "^3.0.0" @@ -27537,13 +25444,11 @@ }, "node_modules/spdx-exceptions": { "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==" + "license": "CC-BY-3.0" }, "node_modules/spdx-expression-parse": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "license": "MIT", "dependencies": { "spdx-exceptions": "^2.1.0", "spdx-license-ids": "^3.0.0" @@ -27551,13 +25456,11 @@ }, "node_modules/spdx-license-ids": { "version": "3.0.13", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.13.tgz", - "integrity": "sha512-XkD+zwiqXHikFZm4AX/7JSCXA98U5Db4AFd5XUg/+9UNtnH75+Z9KxtpYiJZx36mUDVOwH83pl7yvCer6ewM3w==" + "license": "CC0-1.0" }, "node_modules/spdy": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "license": "MIT", "dependencies": { "debug": "^4.1.0", "handle-thing": "^2.0.0", @@ -27571,8 +25474,7 @@ }, "node_modules/spdy-transport": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "license": "MIT", "dependencies": { "debug": "^4.1.0", "detect-node": "^2.0.4", @@ -27584,8 +25486,7 @@ }, "node_modules/split-string": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "license": "MIT", "dependencies": { "extend-shallow": "^3.0.0" }, @@ -27595,8 +25496,7 @@ }, "node_modules/split-string/node_modules/extend-shallow": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", "dependencies": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -27607,8 +25507,7 @@ }, "node_modules/split-string/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -27618,13 +25517,11 @@ }, "node_modules/sprintf-js": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + "license": "BSD-3-Clause" }, "node_modules/squeak": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/squeak/-/squeak-1.3.0.tgz", - "integrity": "sha512-YQL1ulInM+ev8nXX7vfXsCsDh6IqXlrremc1hzi77776BtpWgYJUMto3UM05GSAaGzJgWekszjoKDrVNB5XG+A==", + "license": "MIT", "dependencies": { "chalk": "^1.0.0", "console-stream": "^0.1.1", @@ -27636,24 +25533,21 @@ }, "node_modules/squeak/node_modules/ansi-regex": { "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/squeak/node_modules/ansi-styles": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/squeak/node_modules/chalk": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==", + "license": "MIT", "dependencies": { "ansi-styles": "^2.2.1", "escape-string-regexp": "^1.0.2", @@ -27667,8 +25561,7 @@ }, "node_modules/squeak/node_modules/strip-ansi": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==", + "license": "MIT", "dependencies": { "ansi-regex": "^2.0.0" }, @@ -27678,16 +25571,14 @@ }, "node_modules/squeak/node_modules/supports-color": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==", + "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/sshpk": { "version": "1.17.0", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", - "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", + "license": "MIT", "dependencies": { "asn1": "~0.2.3", "assert-plus": "^1.0.0", @@ -27710,14 +25601,12 @@ }, "node_modules/stable": { "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" + "license": "MIT" }, "node_modules/stack-utils": { "version": "2.0.6", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", - "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", "dev": true, + "license": "MIT", "dependencies": { "escape-string-regexp": "^2.0.0" }, @@ -27727,17 +25616,15 @@ }, "node_modules/stack-utils/node_modules/escape-string-regexp": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/state-toggle": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz", - "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -27745,8 +25632,7 @@ }, "node_modules/static-extend": { "version": "0.1.2", - "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", - "integrity": "sha512-72E9+uLc27Mt718pMHt9VMNiAL4LMsmDbBva8mxWUCkT07fSzEGMYUCk0XWY6lp0j6RBAG4cJ3mWuZv2OE3s0g==", + "license": "MIT", "dependencies": { "define-property": "^0.2.5", "object-copy": "^0.1.0" @@ -27757,8 +25643,7 @@ }, "node_modules/static-extend/node_modules/define-property": { "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", + "license": "MIT", "dependencies": { "is-descriptor": "^0.1.0" }, @@ -27768,8 +25653,7 @@ }, "node_modules/static-extend/node_modules/is-accessor-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -27779,8 +25663,7 @@ }, "node_modules/static-extend/node_modules/is-accessor-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -27790,13 +25673,11 @@ }, "node_modules/static-extend/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/static-extend/node_modules/is-data-descriptor": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -27806,8 +25687,7 @@ }, "node_modules/static-extend/node_modules/is-data-descriptor/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -27817,8 +25697,7 @@ }, "node_modules/static-extend/node_modules/is-descriptor": { "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "license": "MIT", "dependencies": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -27830,29 +25709,25 @@ }, "node_modules/static-extend/node_modules/kind-of": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/statuses": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/std-env": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.0.1.tgz", - "integrity": "sha512-mC1Ps9l77/97qeOZc+HrOL7TIaOboHqMZ24dGVQrlxFcpPpfCHpH+qfUT7Dz+6mlG8+JPA1KfBQo19iC/+Ngcw==" + "license": "MIT" }, "node_modules/stream-browserify": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-3.0.0.tgz", - "integrity": "sha512-H73RAHsVBapbim0tU2JwwOiXUj+fikfiaoYAKHF3VJfA0pe2BCzkhAHBlLG6REzE+2WNZcxOXjK7lkso+9euLA==", + "license": "MIT", "dependencies": { "inherits": "~2.0.4", "readable-stream": "^3.5.0" @@ -27860,8 +25735,7 @@ }, "node_modules/stream-http": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-3.2.0.tgz", - "integrity": "sha512-Oq1bLqisTyK3TSCXpPbT4sdeYNdmyZJv1LxpEm2vu1ZhK89kSE5YXwZc3cWk0MagGaKriBh9mCFbVGtO+vY29A==", + "license": "MIT", "dependencies": { "builtin-status-codes": "^3.0.0", "inherits": "^2.0.4", @@ -27871,24 +25745,20 @@ }, "node_modules/strict-uri-encode": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/string_decoder": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.2.0" } }, "node_modules/string_decoder/node_modules/safe-buffer": { "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", @@ -27902,22 +25772,21 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/string-argv": { "version": "0.3.2", - "resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz", - "integrity": "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.6.19" } }, "node_modules/string-length": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", - "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", "dev": true, + "license": "MIT", "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" @@ -27927,14 +25796,11 @@ } }, "node_modules/string-template": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/string-template/-/string-template-0.2.1.tgz", - "integrity": "sha512-Yptehjogou2xm4UJbxJ4CxgZx12HBfeystp0y3x7s4Dj32ltVVG1Gg8YhKjHZkHicuKpZX/ffilA8505VbUbpw==" + "version": "0.2.1" }, "node_modules/string-width": { "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -27947,8 +25813,7 @@ "node_modules/string-width-cjs": { "name": "string-width", "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -27960,8 +25825,7 @@ }, "node_modules/string.prototype.trim": { "version": "1.2.7", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz", - "integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -27976,8 +25840,7 @@ }, "node_modules/string.prototype.trimend": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -27989,8 +25852,7 @@ }, "node_modules/string.prototype.trimstart": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -28002,8 +25864,7 @@ }, "node_modules/stringify-object": { "version": "3.3.0", - "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", - "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "license": "BSD-2-Clause", "dependencies": { "get-own-enumerable-property-symbols": "^3.0.0", "is-obj": "^1.0.1", @@ -28015,8 +25876,7 @@ }, "node_modules/strip-ansi": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, @@ -28027,8 +25887,7 @@ "node_modules/strip-ansi-cjs": { "name": "strip-ansi", "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, @@ -28038,57 +25897,50 @@ }, "node_modules/strip-bom": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", - "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/strip-bom-string": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/strip-color": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/strip-color/-/strip-color-0.1.0.tgz", - "integrity": "sha512-p9LsUieSjWNNAxVCXLeilaDlmuUOrDS5/dF9znM1nZc7EGX5+zEFC0bEevsNIaldjlks+2jns5Siz6F9iK6jwA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/strip-dirs": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/strip-dirs/-/strip-dirs-2.1.0.tgz", - "integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==", + "license": "MIT", "dependencies": { "is-natural-number": "^4.0.1" } }, "node_modules/strip-eof": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha512-7FCwGGmx8mD5xQd3RPUvnSpUXHM3BWuzjtpD4TXsfcZ9EL4azvVVUscFYwD9nx8Kh+uCBC00XBtAykoMHwTh8Q==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/strip-final-newline": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/strip-indent": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-1.0.1.tgz", - "integrity": "sha512-I5iQq6aFMM62fBEAIB/hXzwJD6EEZ0xEGCX2t7oXqaKPIRgt4WruAQ285BISgdkP+HLGWyeGmNJcpIwFeRYRUA==", + "license": "MIT", "dependencies": { "get-stdin": "^4.0.1" }, @@ -28101,16 +25953,14 @@ }, "node_modules/strip-json-comments": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/strip-outer": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-outer/-/strip-outer-1.0.1.tgz", - "integrity": "sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg==", + "license": "MIT", "dependencies": { "escape-string-regexp": "^1.0.2" }, @@ -28120,26 +25970,22 @@ }, "node_modules/strnum": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/strnum/-/strnum-1.0.5.tgz", - "integrity": "sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA==" + "license": "MIT" }, "node_modules/style-mod": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", - "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==" + "license": "MIT" }, "node_modules/style-to-object": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz", - "integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==", + "license": "MIT", "dependencies": { "inline-style-parser": "0.1.1" } }, "node_modules/stylehacks": { "version": "5.1.1", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", - "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "license": "MIT", "dependencies": { "browserslist": "^4.21.4", "postcss-selector-parser": "^6.0.4" @@ -28153,13 +25999,11 @@ }, "node_modules/stylis": { "version": "4.3.1", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.1.tgz", - "integrity": "sha512-EQepAV+wMsIaGVGX1RECzgrcqRRU/0sYOHkeLsZ3fzHaHXZy4DaOOX0vOlGQdlsjkh3mFHAIlVimpwAs4dslyQ==" + "license": "MIT" }, "node_modules/sucrase": { "version": "3.35.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", - "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "license": "MIT", "dependencies": { "@jridgewell/gen-mapping": "^0.3.2", "commander": "^4.0.0", @@ -28179,24 +26023,21 @@ }, "node_modules/sucrase/node_modules/brace-expansion": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } }, "node_modules/sucrase/node_modules/commander": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", "engines": { "node": ">= 6" } }, "node_modules/sucrase/node_modules/glob": { "version": "10.3.10", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", - "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", + "license": "ISC", "dependencies": { "foreground-child": "^3.1.0", "jackspeak": "^2.3.5", @@ -28216,8 +26057,7 @@ }, "node_modules/sucrase/node_modules/minimatch": { "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "license": "ISC", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -28230,8 +26070,7 @@ }, "node_modules/supports-color": { "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "license": "MIT", "dependencies": { "has-flag": "^3.0.0" }, @@ -28241,8 +26080,7 @@ }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -28252,13 +26090,11 @@ }, "node_modules/svg-parser": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", - "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + "license": "MIT" }, "node_modules/svgo": { "version": "2.8.0", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", - "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "license": "MIT", "dependencies": { "@trysound/sax": "0.2.0", "commander": "^7.2.0", @@ -28277,16 +26113,14 @@ }, "node_modules/svgo/node_modules/commander": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", "engines": { "node": ">= 10" } }, "node_modules/swagger2openapi": { "version": "7.0.8", - "resolved": "https://registry.npmjs.org/swagger2openapi/-/swagger2openapi-7.0.8.tgz", - "integrity": "sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==", + "license": "BSD-3-Clause", "dependencies": { "call-me-maybe": "^1.0.1", "node-fetch": "^2.6.1", @@ -28311,9 +26145,8 @@ }, "node_modules/swc-loader": { "version": "0.2.3", - "resolved": "https://registry.npmjs.org/swc-loader/-/swc-loader-0.2.3.tgz", - "integrity": "sha512-D1p6XXURfSPleZZA/Lipb3A8pZ17fP4NObZvFCDjK/OKljroqDpPmsBdTraWhVBqUNpcWBQY1imWdoPScRlQ7A==", "dev": true, + "license": "MIT", "peerDependencies": { "@swc/core": "^1.2.147", "webpack": ">=2" @@ -28321,21 +26154,18 @@ }, "node_modules/tabbable": { "version": "6.2.0", - "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", - "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" + "license": "MIT" }, "node_modules/tapable": { "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", - "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/tar-stream": { "version": "1.6.2", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", - "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", + "license": "MIT", "dependencies": { "bl": "^1.0.0", "buffer-alloc": "^1.2.0", @@ -28351,13 +26181,11 @@ }, "node_modules/tar-stream/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/tar-stream/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -28370,16 +26198,14 @@ }, "node_modules/tar-stream/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/tcp-port-used": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/tcp-port-used/-/tcp-port-used-1.0.2.tgz", - "integrity": "sha512-l7ar8lLUD3XS1V2lfoJlCBaeoaWo/2xfYt81hM7VlvR4RrMVFqfmzfhLVk40hAb368uitje5gPtBRL1m/DGvLA==", + "license": "MIT", "dependencies": { "debug": "4.3.1", "is2": "^2.0.6" @@ -28387,8 +26213,7 @@ }, "node_modules/tcp-port-used/node_modules/debug": { "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "license": "MIT", "dependencies": { "ms": "2.1.2" }, @@ -28403,16 +26228,14 @@ }, "node_modules/temp-dir": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-1.0.0.tgz", - "integrity": "sha512-xZFXEGbG7SNC3itwBzI3RYjq/cEhBkx2hJuKGIUOcEULmkQExXiHat2z/qkISYsuR+IKumhEfKKbV5qXmhICFQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/tempfile": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/tempfile/-/tempfile-2.0.0.tgz", - "integrity": "sha512-ZOn6nJUgvgC09+doCEF3oB+r3ag7kUvlsXEGX069QRD60p+P3uP7XG9N2/at+EyIRGSN//ZY3LyEotA1YpmjuA==", + "license": "MIT", "dependencies": { "temp-dir": "^1.0.0", "uuid": "^3.0.1" @@ -28423,17 +26246,14 @@ }, "node_modules/tempfile/node_modules/uuid": { "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "license": "MIT", "bin": { "uuid": "bin/uuid" } }, "node_modules/terser": { "version": "5.10.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.10.0.tgz", - "integrity": "sha512-AMmF99DMfEDiRJfxfY5jj5wNH/bYO09cniSqhfoyxc8sFoYIgkJy86G04UoZU5VjlpnplVu0K6Tx6E9b5+DlHA==", + "license": "BSD-2-Clause", "dependencies": { "commander": "^2.20.0", "source-map": "~0.7.2", @@ -28456,8 +26276,7 @@ }, "node_modules/terser-webpack-plugin": { "version": "5.3.3", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.3.tgz", - "integrity": "sha512-Fx60G5HNYknNTNQnzQ1VePRuu89ZVYWfjRAeT5rITuCY/1b08s49e5kSQwHDirKZWuoKOBRFS98EUUoZ9kLEwQ==", + "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "^0.3.7", "jest-worker": "^27.4.5", @@ -28489,22 +26308,19 @@ }, "node_modules/terser/node_modules/commander": { "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + "license": "MIT" }, "node_modules/terser/node_modules/source-map": { "version": "0.7.3", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", - "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", + "license": "BSD-3-Clause", "engines": { "node": ">= 8" } }, "node_modules/test-exclude": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", - "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", "dev": true, + "license": "ISC", "dependencies": { "@istanbuljs/schema": "^0.1.2", "glob": "^7.1.4", @@ -28516,29 +26332,25 @@ }, "node_modules/text-segmentation": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/text-segmentation/-/text-segmentation-1.0.3.tgz", - "integrity": "sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==", + "license": "MIT", "dependencies": { "utrie": "^1.0.2" } }, "node_modules/text-table": { "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + "license": "MIT" }, "node_modules/thenify": { "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "license": "MIT", "dependencies": { "any-promise": "^1.0.0" } }, "node_modules/thenify-all": { "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "license": "MIT", "dependencies": { "thenify": ">= 3.1.0 < 4" }, @@ -28548,13 +26360,11 @@ }, "node_modules/through": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" + "license": "MIT" }, "node_modules/through2": { "version": "2.0.5", - "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", - "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "license": "MIT", "dependencies": { "readable-stream": "~2.3.6", "xtend": "~4.0.1" @@ -28562,13 +26372,11 @@ }, "node_modules/through2/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/through2/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -28581,34 +26389,29 @@ }, "node_modules/through2/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/thunky": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + "license": "MIT" }, "node_modules/ticky": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ticky/-/ticky-1.0.1.tgz", - "integrity": "sha512-RX35iq/D+lrsqhcPWIazM9ELkjOe30MSeoBHQHSsRwd1YuhJO5ui1K1/R0r7N3mFvbLBs33idw+eR6j+w6i/DA==" + "license": "MIT" }, "node_modules/timed-out": { "version": "4.0.1", - "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz", - "integrity": "sha512-G7r3AhovYtr5YKOWQkta8RKAPb+J9IsO4uVmzjl8AZwfhs8UcUwTiD6gcJYSgOtzyjvQKrKYn41syHbUWMkafA==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/timers-browserify": { "version": "2.0.12", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", - "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", + "license": "MIT", "dependencies": { "setimmediate": "^1.0.4" }, @@ -28618,18 +26421,15 @@ }, "node_modules/timsort": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha512-qsdtZH+vMoCARQtyod4imc2nIJwg9Cc7lPRrw9CzF8ZKR0khdr8+2nX80PBhET3tcyTtJDxAffGh2rXH4tyU8A==" + "license": "MIT" }, "node_modules/tiny-invariant": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.2.0.tgz", - "integrity": "sha512-1Uhn/aqw5C6RI4KejVeTg6mIS7IqxnLJ8Mv2tV5rTc0qWobay7pDUz6Wi392Cnc8ak1H0F2cjoRzb2/AW4+Fvg==" + "license": "MIT" }, "node_modules/tiny-lr": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tiny-lr/-/tiny-lr-1.1.1.tgz", - "integrity": "sha512-44yhA3tsaRoMOjQQ+5v5mVdqef+kH6Qze9jTpqtVufgYjYt08zyZAwNwwVBj3i1rJMnR52IxOW0LK0vBzgAkuA==", + "license": "MIT", "dependencies": { "body": "^5.1.0", "debug": "^3.1.0", @@ -28641,16 +26441,14 @@ }, "node_modules/tiny-lr/node_modules/debug": { "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "license": "MIT", "dependencies": { "ms": "^2.1.1" } }, "node_modules/tiny-lr/node_modules/faye-websocket": { "version": "0.10.0", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.10.0.tgz", - "integrity": "sha512-Xhj93RXbMSq8urNCUq4p9l0P6hnySJ/7YNRhYNug0bLOuii7pKO7xQFb5mx9xZXWCar88pLPb805PvUkwrLZpQ==", + "license": "MIT", "dependencies": { "websocket-driver": ">=0.5.1" }, @@ -28660,32 +26458,20 @@ }, "node_modules/tiny-warning": { "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + "license": "MIT" }, "node_modules/tmpl": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", - "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", - "dev": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/to-buffer": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", - "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "engines": { - "node": ">=4" - } + "license": "MIT" }, "node_modules/to-object-path": { "version": "0.3.0", - "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", - "integrity": "sha512-9mWHdnGRuh3onocaHzukyvCZhzvr6tiflAy/JRFXcJX0TjgfWA9pk9t8CMbzmBE4Jfw58pXbkngtBtqYxzNEyg==", + "license": "MIT", "dependencies": { "kind-of": "^3.0.2" }, @@ -28695,13 +26481,11 @@ }, "node_modules/to-object-path/node_modules/is-buffer": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "license": "MIT" }, "node_modules/to-object-path/node_modules/kind-of": { "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "license": "MIT", "dependencies": { "is-buffer": "^1.1.5" }, @@ -28711,16 +26495,14 @@ }, "node_modules/to-readable-stream": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/to-regex": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "license": "MIT", "dependencies": { "define-property": "^2.0.2", "extend-shallow": "^3.0.2", @@ -28733,8 +26515,7 @@ }, "node_modules/to-regex-range": { "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -28744,8 +26525,7 @@ }, "node_modules/to-regex/node_modules/extend-shallow": { "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", "dependencies": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -28756,8 +26536,7 @@ }, "node_modules/to-regex/node_modules/is-extendable": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", "dependencies": { "is-plain-object": "^2.0.4" }, @@ -28767,29 +26546,25 @@ }, "node_modules/toidentifier": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", "engines": { "node": ">=0.6" } }, "node_modules/toml": { "version": "2.3.6", - "resolved": "https://registry.npmjs.org/toml/-/toml-2.3.6.tgz", - "integrity": "sha512-gVweAectJU3ebq//Ferr2JUY4WKSDe5N+z0FvjDncLGyHmIDoxgY/2Ie4qfEIDm4IS7OA6Rmdm7pdEEdMcV/xQ==" + "license": "MIT" }, "node_modules/totalist": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz", - "integrity": "sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/tough-cookie": { "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "license": "BSD-3-Clause", "dependencies": { "psl": "^1.1.28", "punycode": "^2.1.1" @@ -28800,21 +26575,15 @@ }, "node_modules/tr46": { "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + "license": "MIT" }, "node_modules/traverse": { "version": "0.3.9", - "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.3.9.tgz", - "integrity": "sha512-iawgk0hLP3SxGKDfnDJf8wTz4p2qImnyihM5Hh/sGvQ3K37dPi/w8sRhdNIxYA1TwFwc5mDhIJq+O0RsvXBKdQ==", - "engines": { - "node": "*" - } + "license": "MIT/X11" }, "node_modules/tree-node-cli": { "version": "1.6.0", - "resolved": "https://registry.npmjs.org/tree-node-cli/-/tree-node-cli-1.6.0.tgz", - "integrity": "sha512-M8um5Lbl76rWU5aC8oOeEhruiCM29lFCKnwpxrwMjpRicHXJx+bb9Cak11G3zYLrMb6Glsrhnn90rHIzDJrjvg==", + "license": "MIT", "dependencies": { "commander": "^5.0.0", "fast-folder-size": "1.6.1", @@ -28826,14 +26595,11 @@ } }, "node_modules/trim": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", - "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==" + "version": "0.0.1" }, "node_modules/trim-lines": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", - "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -28841,16 +26607,14 @@ }, "node_modules/trim-newlines": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", - "integrity": "sha512-Nm4cF79FhSTzrLKGDMi3I4utBtFv8qKy4sq1enftf2gMdpqI8oVQTAfySkTz5r49giVzDj88SVZXP4CeYQwjaw==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/trim-repeated": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/trim-repeated/-/trim-repeated-1.0.0.tgz", - "integrity": "sha512-pkonvlKk8/ZuR0D5tLW8ljt5I8kmxp2XKymhepUeOdCEfKpZaktSArkLHZt76OB1ZvO9bssUsDty4SWhLvZpLg==", + "license": "MIT", "dependencies": { "escape-string-regexp": "^1.0.2" }, @@ -28860,8 +26624,7 @@ }, "node_modules/trim-trailing-lines": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz", - "integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -28869,8 +26632,7 @@ }, "node_modules/trough": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", - "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -28878,8 +26640,7 @@ }, "node_modules/truncate-html": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/truncate-html/-/truncate-html-1.0.4.tgz", - "integrity": "sha512-FpDAlPzpJ3jlZiNEahRs584FS3jOSQafgj4cC9DmAYPct6uMZDLY625+eErRd43G35vGDrNq3i7b4aYUQ/Bxqw==", + "license": "MIT", "dependencies": { "@types/cheerio": "^0.22.8", "cheerio": "0.22.0" @@ -28887,8 +26648,7 @@ }, "node_modules/truncate-html/node_modules/cheerio": { "version": "0.22.0", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-0.22.0.tgz", - "integrity": "sha512-8/MzidM6G/TgRelkzDG13y3Y9LxBjCb+8yOEZ9+wwq5gVF2w2pV0wmHvjfT0RvuxGyR7UEuK36r+yYMbT4uKgA==", + "license": "MIT", "dependencies": { "css-select": "~1.2.0", "dom-serializer": "~0.1.0", @@ -28913,8 +26673,7 @@ }, "node_modules/truncate-html/node_modules/css-select": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-1.2.0.tgz", - "integrity": "sha512-dUQOBoqdR7QwV90WysXPLXG5LO7nhYBgiWVfxF80DKPF8zx1t/pUd2FYy73emg3zrjtM6dzmYgbHKfV2rxiHQA==", + "license": "BSD-like", "dependencies": { "boolbase": "~1.0.0", "css-what": "2.1", @@ -28924,16 +26683,14 @@ }, "node_modules/truncate-html/node_modules/css-what": { "version": "2.1.3", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz", - "integrity": "sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==", + "license": "BSD-2-Clause", "engines": { "node": "*" } }, "node_modules/truncate-html/node_modules/dom-serializer": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.1.tgz", - "integrity": "sha512-l0IU0pPzLWSHBcieZbpOKgkIn3ts3vAh7ZuFyXNwJxJXk/c4Gwj9xaTJwIDVQCXawWD0qb3IzMGH5rglQaO0XA==", + "license": "MIT", "dependencies": { "domelementtype": "^1.3.0", "entities": "^1.1.1" @@ -28941,21 +26698,17 @@ }, "node_modules/truncate-html/node_modules/domelementtype": { "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" + "license": "BSD-2-Clause" }, "node_modules/truncate-html/node_modules/domhandler": { "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", + "license": "BSD-2-Clause", "dependencies": { "domelementtype": "1" } }, "node_modules/truncate-html/node_modules/domutils": { "version": "1.5.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", - "integrity": "sha512-gSu5Oi/I+3wDENBsOWBiRK1eoGxcywYSqg3rR960/+EfY0CF4EX1VPkgHOZ3WiS/Jg2DtliF6BhWcHlfpYUcGw==", "dependencies": { "dom-serializer": "0", "domelementtype": "1" @@ -28963,13 +26716,11 @@ }, "node_modules/truncate-html/node_modules/entities": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" + "license": "BSD-2-Clause" }, "node_modules/truncate-html/node_modules/htmlparser2": { "version": "3.10.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", - "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", + "license": "MIT", "dependencies": { "domelementtype": "^1.3.1", "domhandler": "^2.3.0", @@ -28981,39 +26732,33 @@ }, "node_modules/truncate-html/node_modules/nth-check": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", + "license": "BSD-2-Clause", "dependencies": { "boolbase": "~1.0.0" } }, "node_modules/ts-dedent": { "version": "2.2.0", - "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", - "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "license": "MIT", "engines": { "node": ">=6.10" } }, "node_modules/ts-interface-checker": { "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==" + "license": "Apache-2.0" }, "node_modules/tslib": { "version": "2.4.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", - "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" + "license": "0BSD" }, "node_modules/tty-browserify": { "version": "0.0.1", - "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.1.tgz", - "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==" + "license": "MIT" }, "node_modules/tunnel-agent": { "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "license": "Apache-2.0", "dependencies": { "safe-buffer": "^5.0.1" }, @@ -29023,22 +26768,19 @@ }, "node_modules/tweetnacl": { "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" + "license": "Unlicense" }, "node_modules/type-detect": { "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/type-fest": { "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -29048,8 +26790,7 @@ }, "node_modules/type-is": { "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -29060,8 +26801,7 @@ }, "node_modules/typed-array-length": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", - "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "for-each": "^0.3.3", @@ -29073,21 +26813,18 @@ }, "node_modules/typedarray": { "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==" + "license": "MIT" }, "node_modules/typedarray-to-buffer": { "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "license": "MIT", "dependencies": { "is-typedarray": "^1.0.0" } }, "node_modules/typescript": { "version": "4.7.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz", - "integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==", + "license": "Apache-2.0", "peer": true, "bin": { "tsc": "bin/tsc", @@ -29099,8 +26836,6 @@ }, "node_modules/ua-parser-js": { "version": "1.0.35", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.35.tgz", - "integrity": "sha512-fKnGuqmTBnIE+/KXSzCn4db8RTigUzw1AN0DmdU6hJovUTbYJKyqj+8Mt1c4VfRDnOVJnENmfYkIPZ946UrSAA==", "funding": [ { "type": "opencollective", @@ -29111,14 +26846,14 @@ "url": "https://paypal.me/faisalman" } ], + "license": "MIT", "engines": { "node": "*" } }, "node_modules/unbox-primitive": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", - "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", + "license": "MIT", "dependencies": { "call-bind": "^1.0.2", "has-bigints": "^1.0.2", @@ -29131,8 +26866,7 @@ }, "node_modules/unbzip2-stream": { "version": "1.4.3", - "resolved": "https://registry.npmjs.org/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", - "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", + "license": "MIT", "dependencies": { "buffer": "^5.2.1", "through": "^2.3.8" @@ -29140,8 +26874,7 @@ }, "node_modules/unherit": { "version": "1.1.3", - "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz", - "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==", + "license": "MIT", "dependencies": { "inherits": "^2.0.0", "xtend": "^4.0.0" @@ -29153,16 +26886,14 @@ }, "node_modules/unicode-canonical-property-names-ecmascript": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/unicode-match-property-ecmascript": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "license": "MIT", "dependencies": { "unicode-canonical-property-names-ecmascript": "^2.0.0", "unicode-property-aliases-ecmascript": "^2.0.0" @@ -29173,24 +26904,21 @@ }, "node_modules/unicode-match-property-value-ecmascript": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/unicode-property-aliases-ecmascript": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/unified": { "version": "9.2.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", - "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", + "license": "MIT", "dependencies": { "bail": "^1.0.0", "extend": "^3.0.0", @@ -29206,8 +26934,7 @@ }, "node_modules/union-value": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", - "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", + "license": "MIT", "dependencies": { "arr-union": "^3.1.0", "get-value": "^2.0.6", @@ -29220,18 +26947,15 @@ }, "node_modules/uniq": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha512-Gw+zz50YNKPDKXs+9d+aKAjVwpjNwqzvNpLigIruT4HA9lMZNdMqs9x07kKHB/L9WRzqp4+DlTU5s4wG2esdoA==" + "license": "MIT" }, "node_modules/uniqs": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", - "integrity": "sha512-mZdDpf3vBV5Efh29kMw5tXoup/buMgxLzOt/XKFKcVmi+15ManNQWr6HfZ2aiZTYlYixbdNJ0KFmIZIv52tHSQ==" + "license": "MIT" }, "node_modules/unique-string": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", + "license": "MIT", "dependencies": { "crypto-random-string": "^2.0.0" }, @@ -29241,8 +26965,7 @@ }, "node_modules/unist-builder": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz", - "integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -29250,8 +26973,7 @@ }, "node_modules/unist-util-generated": { "version": "1.1.6", - "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz", - "integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -29259,8 +26981,7 @@ }, "node_modules/unist-util-is": { "version": "4.1.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", - "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -29268,8 +26989,7 @@ }, "node_modules/unist-util-position": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz", - "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -29277,8 +26997,7 @@ }, "node_modules/unist-util-remove": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz", - "integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==", + "license": "MIT", "dependencies": { "unist-util-is": "^4.0.0" }, @@ -29289,8 +27008,7 @@ }, "node_modules/unist-util-remove-position": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz", - "integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==", + "license": "MIT", "dependencies": { "unist-util-visit": "^2.0.0" }, @@ -29301,8 +27019,7 @@ }, "node_modules/unist-util-remove-position/node_modules/unist-util-visit": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0", @@ -29315,8 +27032,7 @@ }, "node_modules/unist-util-remove-position/node_modules/unist-util-visit-parents": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^4.0.0" @@ -29328,8 +27044,7 @@ }, "node_modules/unist-util-stringify-position": { "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", - "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.2" }, @@ -29340,8 +27055,7 @@ }, "node_modules/unist-util-visit": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", - "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", @@ -29354,8 +27068,7 @@ }, "node_modules/unist-util-visit-parents": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", - "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" @@ -29367,13 +27080,11 @@ }, "node_modules/unist-util-visit-parents/node_modules/@types/unist": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.0.tgz", - "integrity": "sha512-MFETx3tbTjE7Uk6vvnWINA/1iJ7LuMdO4fcq8UfF0pRbj01aGLduVvQcRyswuACJdpnHgg8E3rQLhaRdNEJS0w==" + "license": "MIT" }, "node_modules/unist-util-visit-parents/node_modules/unist-util-is": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", - "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0" }, @@ -29384,13 +27095,11 @@ }, "node_modules/unist-util-visit/node_modules/@types/unist": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.0.tgz", - "integrity": "sha512-MFETx3tbTjE7Uk6vvnWINA/1iJ7LuMdO4fcq8UfF0pRbj01aGLduVvQcRyswuACJdpnHgg8E3rQLhaRdNEJS0w==" + "license": "MIT" }, "node_modules/unist-util-visit/node_modules/unist-util-is": { "version": "6.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", - "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0" }, @@ -29401,29 +27110,25 @@ }, "node_modules/universalify": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "license": "MIT", "engines": { "node": ">= 10.0.0" } }, "node_modules/unpipe": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/unquote": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", - "integrity": "sha512-vRCqFv6UhXpWxZPyGDh/F3ZpNv8/qo7w6iufLpQg9aKnQ71qM4B5KiI7Mia9COcjEhrO9LueHpMYjYzsWH3OIg==" + "license": "MIT" }, "node_modules/unset-value": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", - "integrity": "sha512-PcA2tsuGSF9cnySLHTLSh2qrQiJ70mn+r+Glzxv2TWZblxsxCC52BDlZoPCsz7STd9pN7EZetkWZBAvk4cgZdQ==", + "license": "MIT", "dependencies": { "has-value": "^0.3.1", "isobject": "^3.0.0" @@ -29434,8 +27139,7 @@ }, "node_modules/unset-value/node_modules/has-value": { "version": "0.3.1", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", - "integrity": "sha512-gpG936j8/MzaeID5Yif+577c17TxaDmhuyVgSwtnL/q8UUTySg8Mecb+8Cf1otgLoD7DDH75axp86ER7LFsf3Q==", + "license": "MIT", "dependencies": { "get-value": "^2.0.3", "has-values": "^0.1.4", @@ -29447,8 +27151,7 @@ }, "node_modules/unset-value/node_modules/has-value/node_modules/isobject": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha512-+OUdGJlgjOBZDfxnDjYYG6zp487z0JGNQq3cYQYg5f5hKR+syHMsaztzGeml/4kGG55CSpKSpWTY+jYGgsHLgA==", + "license": "MIT", "dependencies": { "isarray": "1.0.0" }, @@ -29458,21 +27161,18 @@ }, "node_modules/unset-value/node_modules/has-values": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha512-J8S0cEdWuQbqD9//tlZxiMuMNmxB8PlEwvYwuxsTmR1G5RXUePEX/SJn7aD0GMLieuZYSwNH0cQuJGwnYunXRQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/unset-value/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/unzipper": { "version": "0.10.11", - "resolved": "https://registry.npmjs.org/unzipper/-/unzipper-0.10.11.tgz", - "integrity": "sha512-+BrAq2oFqWod5IESRjL3S8baohbevGcVA+teAIOYWM3pDVdseogqbzhhvvmiyQrUNKFUnDMtELW3X8ykbyDCJw==", + "license": "MIT", "dependencies": { "big-integer": "^1.6.17", "binary": "~0.3.0", @@ -29488,13 +27188,11 @@ }, "node_modules/unzipper/node_modules/isarray": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + "license": "MIT" }, "node_modules/unzipper/node_modules/readable-stream": { "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -29507,16 +27205,15 @@ }, "node_modules/unzipper/node_modules/string_decoder": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/update-browserslist-db": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", - "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz", + "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==", "funding": [ { "type": "opencollective", @@ -29531,9 +27228,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.2.0", + "picocolors": "^1.1.0" }, "bin": { "update-browserslist-db": "cli.js" @@ -29544,8 +27242,7 @@ }, "node_modules/update-notifier": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz", - "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==", + "license": "BSD-2-Clause", "dependencies": { "boxen": "^5.0.0", "chalk": "^4.1.0", @@ -29571,8 +27268,7 @@ }, "node_modules/update-notifier/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -29585,8 +27281,7 @@ }, "node_modules/update-notifier/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -29600,8 +27295,7 @@ }, "node_modules/update-notifier/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -29611,21 +27305,18 @@ }, "node_modules/update-notifier/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/update-notifier/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/update-notifier/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -29635,22 +27326,18 @@ }, "node_modules/uri-js": { "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "license": "BSD-2-Clause", "dependencies": { "punycode": "^2.1.0" } }, "node_modules/urix": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==", - "deprecated": "Please see https://github.com/lydell/urix#deprecated" + "license": "MIT" }, "node_modules/url": { "version": "0.11.3", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.3.tgz", - "integrity": "sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw==", + "license": "MIT", "dependencies": { "punycode": "^1.4.1", "qs": "^6.11.2" @@ -29658,8 +27345,7 @@ }, "node_modules/url-loader": { "version": "4.1.1", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", - "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "license": "MIT", "dependencies": { "loader-utils": "^2.0.0", "mime-types": "^2.1.27", @@ -29684,8 +27370,7 @@ }, "node_modules/url-parse-lax": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", + "license": "MIT", "dependencies": { "prepend-http": "^2.0.0" }, @@ -29695,21 +27380,18 @@ }, "node_modules/url-to-options": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz", - "integrity": "sha512-0kQLIzG4fdk/G5NONku64rSH/x32NOA39LVQqlK8Le6lvTF6GGRJpqaQFGgU+CLwySIqBSMdwYM0sYcW9f6P4A==", + "license": "MIT", "engines": { "node": ">= 4" } }, "node_modules/url/node_modules/punycode": { "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + "license": "MIT" }, "node_modules/url/node_modules/qs": { "version": "6.11.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz", - "integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==", + "license": "BSD-3-Clause", "dependencies": { "side-channel": "^1.0.4" }, @@ -29722,32 +27404,28 @@ }, "node_modules/use": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/use-composed-ref": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", - "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, "node_modules/use-editable": { "version": "2.3.3", - "resolved": "https://registry.npmjs.org/use-editable/-/use-editable-2.3.3.tgz", - "integrity": "sha512-7wVD2JbfAFJ3DK0vITvXBdpd9JAz5BcKAAolsnLBuBn6UDDwBGuCIAGvR3yA2BNKm578vAMVHFCWaOcA+BhhiA==", + "license": "MIT", "peerDependencies": { "react": ">= 16.8.0" } }, "node_modules/use-isomorphic-layout-effect": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", - "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0" }, @@ -29759,8 +27437,7 @@ }, "node_modules/use-latest": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", - "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "license": "MIT", "dependencies": { "use-isomorphic-layout-effect": "^1.1.1" }, @@ -29775,16 +27452,14 @@ }, "node_modules/use-sync-external-store": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", - "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", + "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, "node_modules/util": { "version": "0.12.5", - "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", - "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "license": "MIT", "dependencies": { "inherits": "^2.0.3", "is-arguments": "^1.0.4", @@ -29795,13 +27470,11 @@ }, "node_modules/util-deprecate": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + "license": "MIT" }, "node_modules/util.promisify": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz", - "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==", + "license": "MIT", "dependencies": { "define-properties": "^1.1.3", "es-abstract": "^1.17.2", @@ -29814,45 +27487,39 @@ }, "node_modules/utila": { "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw=" + "license": "MIT" }, "node_modules/utility-types": { "version": "3.10.0", - "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", - "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==", + "license": "MIT", "engines": { "node": ">= 4" } }, "node_modules/utils-merge": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", "engines": { "node": ">= 0.4.0" } }, "node_modules/utrie": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/utrie/-/utrie-1.0.2.tgz", - "integrity": "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==", + "license": "MIT", "dependencies": { "base64-arraybuffer": "^1.0.2" } }, "node_modules/uuid": { "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "license": "MIT", "bin": { "uuid": "dist/bin/uuid" } }, "node_modules/uvu": { "version": "0.5.6", - "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz", - "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==", + "license": "MIT", "dependencies": { "dequal": "^2.0.0", "diff": "^5.0.0", @@ -29868,30 +27535,35 @@ }, "node_modules/uvu/node_modules/kleur": { "version": "4.1.5", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", - "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/v8-to-istanbul": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz", - "integrity": "sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==", + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", "dev": true, + "license": "ISC", "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", - "convert-source-map": "^1.6.0" + "convert-source-map": "^2.0.0" }, "engines": { "node": ">=10.12.0" } }, + "node_modules/v8-to-istanbul/node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, "node_modules/validate-npm-package-license": { "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "license": "Apache-2.0", "dependencies": { "spdx-correct": "^3.0.0", "spdx-expression-parse": "^3.0.0" @@ -29899,53 +27571,41 @@ }, "node_modules/validate.io-array": { "version": "1.0.6", - "resolved": "https://registry.npmjs.org/validate.io-array/-/validate.io-array-1.0.6.tgz", - "integrity": "sha512-DeOy7CnPEziggrOO5CZhVKJw6S3Yi7e9e65R1Nl/RTN1vTQKnzjfvks0/8kQ40FP/dsjRAOd4hxmJ7uLa6vxkg==" + "license": "MIT" }, "node_modules/validate.io-function": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/validate.io-function/-/validate.io-function-1.0.2.tgz", - "integrity": "sha512-LlFybRJEriSuBnUhQyG5bwglhh50EpTL2ul23MPIuR1odjO7XaMLFV8vHGwp7AZciFxtYOeiSCT5st+XSPONiQ==" + "version": "1.0.2" }, "node_modules/validate.io-integer": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/validate.io-integer/-/validate.io-integer-1.0.5.tgz", - "integrity": "sha512-22izsYSLojN/P6bppBqhgUDjCkr5RY2jd+N2a3DCAUey8ydvrZ/OkGvFPR7qfOpwR2LC5p4Ngzxz36g5Vgr/hQ==", "dependencies": { "validate.io-number": "^1.0.3" } }, "node_modules/validate.io-integer-array": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/validate.io-integer-array/-/validate.io-integer-array-1.0.0.tgz", - "integrity": "sha512-mTrMk/1ytQHtCY0oNO3dztafHYyGU88KL+jRxWuzfOmQb+4qqnWmI+gykvGp8usKZOM0H7keJHEbRaFiYA0VrA==", "dependencies": { "validate.io-array": "^1.0.3", "validate.io-integer": "^1.0.4" } }, "node_modules/validate.io-number": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/validate.io-number/-/validate.io-number-1.0.3.tgz", - "integrity": "sha512-kRAyotcbNaSYoDnXvb4MHg/0a1egJdLwS6oJ38TJY7aw9n93Fl/3blIXdyYvPOp55CNxywooG/3BcrwNrBpcSg==" + "version": "1.0.3" }, "node_modules/value-equal": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", - "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + "license": "MIT" }, "node_modules/vary": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/vendors": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz", - "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -29953,11 +27613,10 @@ }, "node_modules/verror": { "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", "engines": [ "node >=0.6.0" ], + "license": "MIT", "dependencies": { "assert-plus": "^1.0.0", "core-util-is": "1.0.2", @@ -29966,13 +27625,11 @@ }, "node_modules/verror/node_modules/core-util-is": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" + "license": "MIT" }, "node_modules/vfile": { "version": "4.2.1", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", - "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "is-buffer": "^2.0.0", @@ -29986,8 +27643,7 @@ }, "node_modules/vfile-location": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", - "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" @@ -29995,8 +27651,7 @@ }, "node_modules/vfile-message": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", - "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "license": "MIT", "dependencies": { "@types/unist": "^2.0.0", "unist-util-stringify-position": "^2.0.0" @@ -30008,18 +27663,15 @@ }, "node_modules/vm-browserify": { "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", - "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" + "license": "MIT" }, "node_modules/w3c-keyname": { "version": "2.2.8", - "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", - "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==" + "license": "MIT" }, "node_modules/wait-on": { "version": "6.0.1", - "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz", - "integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==", + "license": "MIT", "dependencies": { "axios": "^0.25.0", "joi": "^17.6.0", @@ -30036,25 +27688,22 @@ }, "node_modules/walker": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", - "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", "dev": true, + "license": "Apache-2.0", "dependencies": { "makeerror": "1.0.12" } }, "node_modules/warning": { "version": "4.0.3", - "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", - "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "license": "MIT", "dependencies": { "loose-envify": "^1.0.0" } }, "node_modules/watchpack": { "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "license": "MIT", "dependencies": { "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.1.2" @@ -30065,16 +27714,14 @@ }, "node_modules/wbuf": { "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "license": "MIT", "dependencies": { "minimalistic-assert": "^1.0.0" } }, "node_modules/web-namespaces": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", - "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -30082,18 +27729,15 @@ }, "node_modules/web-worker": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.3.0.tgz", - "integrity": "sha512-BSR9wyRsy/KOValMgd5kMyr3JzpdeoR9KVId8u5GVlTTAtNChlsE4yTxeY7zMdNSyOmoKBv8NH2qeRY9Tg+IaA==" + "license": "Apache-2.0" }, "node_modules/webidl-conversions": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + "license": "BSD-2-Clause" }, "node_modules/webpack": { "version": "5.74.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.74.0.tgz", - "integrity": "sha512-A2InDwnhhGN4LYctJj6M1JEaGL7Luj6LOmyBHjcI8529cm5p6VXiTIW2sn6ffvEAKmveLzvu4jrihwXtPojlAA==", + "license": "MIT", "dependencies": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^0.0.51", @@ -30138,8 +27782,7 @@ }, "node_modules/webpack-bundle-analyzer": { "version": "4.5.0", - "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.5.0.tgz", - "integrity": "sha512-GUMZlM3SKwS8Z+CKeIFx7CVoHn3dXFcUAjT/dcZQQmfSZGvitPfMob2ipjai7ovFFqPvTqkEZ/leL4O0YOdAYQ==", + "license": "MIT", "dependencies": { "acorn": "^8.0.4", "acorn-walk": "^8.0.0", @@ -30160,8 +27803,7 @@ }, "node_modules/webpack-bundle-analyzer/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -30174,8 +27816,7 @@ }, "node_modules/webpack-bundle-analyzer/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -30189,8 +27830,7 @@ }, "node_modules/webpack-bundle-analyzer/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -30200,29 +27840,25 @@ }, "node_modules/webpack-bundle-analyzer/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/webpack-bundle-analyzer/node_modules/commander": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", "engines": { "node": ">= 10" } }, "node_modules/webpack-bundle-analyzer/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/webpack-bundle-analyzer/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -30232,8 +27868,7 @@ }, "node_modules/webpack-dev-middleware": { "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "license": "MIT", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -30254,8 +27889,7 @@ }, "node_modules/webpack-dev-middleware/node_modules/ajv": { "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -30269,8 +27903,7 @@ }, "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -30280,21 +27913,18 @@ }, "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/webpack-dev-middleware/node_modules/range-parser": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/webpack-dev-middleware/node_modules/schema-utils": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", @@ -30311,8 +27941,7 @@ }, "node_modules/webpack-dev-server": { "version": "4.9.3", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.9.3.tgz", - "integrity": "sha512-3qp/eoboZG5/6QgiZ3llN8TUzkSpYg1Ko9khWX1h40MIEUNS2mDoIa8aXsPfskER+GbTvs/IJZ1QTBBhhuetSw==", + "license": "MIT", "dependencies": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", @@ -30365,8 +27994,7 @@ }, "node_modules/webpack-dev-server/node_modules/ajv": { "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", @@ -30380,8 +28008,7 @@ }, "node_modules/webpack-dev-server/node_modules/ajv-keywords": { "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -30391,13 +28018,11 @@ }, "node_modules/webpack-dev-server/node_modules/json-schema-traverse": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "license": "MIT" }, "node_modules/webpack-dev-server/node_modules/schema-utils": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", @@ -30414,8 +28039,7 @@ }, "node_modules/webpack-dev-server/node_modules/ws": { "version": "8.8.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", - "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", + "license": "MIT", "engines": { "node": ">=10.0.0" }, @@ -30434,8 +28058,7 @@ }, "node_modules/webpack-merge": { "version": "5.8.0", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", - "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", + "license": "MIT", "dependencies": { "clone-deep": "^4.0.1", "wildcard": "^2.0.0" @@ -30446,16 +28069,14 @@ }, "node_modules/webpack-sources": { "version": "3.2.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", - "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "license": "MIT", "engines": { "node": ">=10.13.0" } }, "node_modules/webpackbar": { "version": "5.0.2", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", - "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "license": "MIT", "dependencies": { "chalk": "^4.1.0", "consola": "^2.15.3", @@ -30471,8 +28092,7 @@ }, "node_modules/webpackbar/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -30485,8 +28105,7 @@ }, "node_modules/webpackbar/node_modules/chalk": { "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -30500,8 +28119,7 @@ }, "node_modules/webpackbar/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -30511,21 +28129,18 @@ }, "node_modules/webpackbar/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/webpackbar/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/webpackbar/node_modules/supports-color": { "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -30535,8 +28150,7 @@ }, "node_modules/websocket-driver": { "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "license": "Apache-2.0", "dependencies": { "http-parser-js": ">=0.5.1", "safe-buffer": ">=5.1.0", @@ -30548,16 +28162,14 @@ }, "node_modules/websocket-extensions": { "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "license": "Apache-2.0", "engines": { "node": ">=0.8.0" } }, "node_modules/whatwg-url": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" @@ -30565,8 +28177,7 @@ }, "node_modules/which": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -30579,8 +28190,7 @@ }, "node_modules/which-boxed-primitive": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", + "license": "MIT", "dependencies": { "is-bigint": "^1.0.1", "is-boolean-object": "^1.1.0", @@ -30594,13 +28204,11 @@ }, "node_modules/which-module": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", - "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==" + "license": "ISC" }, "node_modules/which-typed-array": { "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "license": "MIT", "dependencies": { "available-typed-arrays": "^1.0.5", "call-bind": "^1.0.2", @@ -30618,8 +28226,7 @@ }, "node_modules/widest-line": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", + "license": "MIT", "dependencies": { "string-width": "^4.0.0" }, @@ -30629,29 +28236,25 @@ }, "node_modules/wildcard": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", - "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==" + "license": "MIT" }, "node_modules/wordwrap": { "version": "0.0.2", - "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", - "integrity": "sha512-xSBsCeh+g+dinoBv3GAOWM4LcVVO68wLXRanibtBSdUvkGWQRGeE9P7IwU9EmDDi4jA6L44lz15CGMwdw9N5+Q==", + "license": "MIT/X11", "engines": { "node": ">=0.4.0" } }, "node_modules/worker-rpc": { "version": "0.1.1", - "resolved": "https://registry.npmjs.org/worker-rpc/-/worker-rpc-0.1.1.tgz", - "integrity": "sha512-P1WjMrUB3qgJNI9jfmpZ/htmBEjFh//6l/5y8SD9hg1Ef5zTTVVoRjTrTEzPrNBQvmhMxkoTsjOXN10GWU7aCg==", + "license": "MIT", "dependencies": { "microevent.ts": "~0.1.1" } }, "node_modules/wrap-ansi": { "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -30667,8 +28270,7 @@ "node_modules/wrap-ansi-cjs": { "name": "wrap-ansi", "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -30683,8 +28285,7 @@ }, "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -30697,8 +28298,7 @@ }, "node_modules/wrap-ansi-cjs/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -30708,13 +28308,11 @@ }, "node_modules/wrap-ansi-cjs/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/wrap-ansi/node_modules/ansi-styles": { "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -30727,8 +28325,7 @@ }, "node_modules/wrap-ansi/node_modules/color-convert": { "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -30738,18 +28335,15 @@ }, "node_modules/wrap-ansi/node_modules/color-name": { "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "license": "MIT" }, "node_modules/wrappy": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + "license": "ISC" }, "node_modules/write-file-atomic": { "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4", "is-typedarray": "^1.0.0", @@ -30759,8 +28353,7 @@ }, "node_modules/ws": { "version": "7.5.6", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.6.tgz", - "integrity": "sha512-6GLgCqo2cy2A2rjCNFlxQS6ZljG/coZfZXclldI8FB/1G3CCI36Zd8xy2HrFVACi8tfk5XrgLQEk+P0Tnz9UcA==", + "license": "MIT", "engines": { "node": ">=8.3.0" }, @@ -30779,16 +28372,14 @@ }, "node_modules/xdg-basedir": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", + "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/xml-formatter": { "version": "2.6.1", - "resolved": "https://registry.npmjs.org/xml-formatter/-/xml-formatter-2.6.1.tgz", - "integrity": "sha512-dOiGwoqm8y22QdTNI7A+N03tyVfBlQ0/oehAzxIZtwnFAHGeSlrfjF73YQvzSsa/Kt6+YZasKsrdu6OIpuBggw==", + "license": "MIT", "dependencies": { "xml-parser-xo": "^3.2.0" }, @@ -30798,8 +28389,7 @@ }, "node_modules/xml-js": { "version": "1.6.11", - "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", - "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "license": "MIT", "dependencies": { "sax": "^1.2.4" }, @@ -30809,58 +28399,50 @@ }, "node_modules/xml-parser-xo": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/xml-parser-xo/-/xml-parser-xo-3.2.0.tgz", - "integrity": "sha512-8LRU6cq+d7mVsoDaMhnkkt3CTtAs4153p49fRo+HIB3I1FD1o5CeXRjRH29sQevIfVJIcPjKSsPU/+Ujhq09Rg==", + "license": "MIT", "engines": { "node": ">= 10" } }, "node_modules/xmlbuilder": { "version": "13.0.2", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz", - "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==", + "license": "MIT", "engines": { "node": ">=6.0" } }, "node_modules/xtend": { "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "license": "MIT", "engines": { "node": ">=0.4" } }, "node_modules/y18n": { "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/yallist": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + "license": "ISC" }, "node_modules/yaml": { "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "license": "ISC", "engines": { "node": ">= 6" } }, "node_modules/yaml-ast-parser": { "version": "0.0.43", - "resolved": "https://registry.npmjs.org/yaml-ast-parser/-/yaml-ast-parser-0.0.43.tgz", - "integrity": "sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==" + "license": "Apache-2.0" }, "node_modules/yamljs": { "version": "0.2.10", - "resolved": "https://registry.npmjs.org/yamljs/-/yamljs-0.2.10.tgz", - "integrity": "sha512-sbkbOosewjeRmJ23Hjee1RgTxn+xa7mt4sew3tfD0SdH0LTcswnZC9dhSNq4PIz15roQMzb84DjECyQo5DWIww==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "glob": "^7.0.5" @@ -30872,16 +28454,14 @@ }, "node_modules/yamljs/node_modules/argparse": { "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } }, "node_modules/yargs": { "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "license": "MIT", "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", @@ -30897,16 +28477,14 @@ }, "node_modules/yargs-parser": { "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/yauzl": { "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "license": "MIT", "dependencies": { "buffer-crc32": "~0.2.3", "fd-slicer": "~1.1.0" @@ -30914,8 +28492,7 @@ }, "node_modules/yocto-queue": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "license": "MIT", "engines": { "node": ">=10" }, @@ -30925,23025 +28502,11 @@ }, "node_modules/zwitch": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", - "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" } } - }, - "dependencies": { - "@algolia/autocomplete-core": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", - "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", - "requires": { - "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", - "@algolia/autocomplete-shared": "1.9.3" - } - }, - "@algolia/autocomplete-plugin-algolia-insights": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", - "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", - "requires": { - "@algolia/autocomplete-shared": "1.9.3" - } - }, - "@algolia/autocomplete-preset-algolia": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", - "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", - "requires": { - "@algolia/autocomplete-shared": "1.9.3" - } - }, - "@algolia/autocomplete-shared": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", - "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", - "requires": {} - }, - "@algolia/cache-browser-local-storage": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.19.1.tgz", - "integrity": "sha512-FYAZWcGsFTTaSAwj9Std8UML3Bu8dyWDncM7Ls8g+58UOe4XYdlgzXWbrIgjaguP63pCCbMoExKr61B+ztK3tw==", - "requires": { - "@algolia/cache-common": "4.19.1" - } - }, - "@algolia/cache-common": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.19.1.tgz", - "integrity": "sha512-XGghi3l0qA38HiqdoUY+wvGyBsGvKZ6U3vTiMBT4hArhP3fOGLXpIINgMiiGjTe4FVlTa5a/7Zf2bwlIHfRqqg==" - }, - "@algolia/cache-in-memory": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.19.1.tgz", - "integrity": "sha512-+PDWL+XALGvIginigzu8oU6eWw+o76Z8zHbBovWYcrtWOEtinbl7a7UTt3x3lthv+wNuFr/YD1Gf+B+A9V8n5w==", - "requires": { - "@algolia/cache-common": "4.19.1" - } - }, - "@algolia/client-account": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.19.1.tgz", - "integrity": "sha512-Oy0ritA2k7AMxQ2JwNpfaEcgXEDgeyKu0V7E7xt/ZJRdXfEpZcwp9TOg4TJHC7Ia62gIeT2Y/ynzsxccPw92GA==", - "requires": { - "@algolia/client-common": "4.19.1", - "@algolia/client-search": "4.19.1", - "@algolia/transporter": "4.19.1" - } - }, - "@algolia/client-analytics": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.19.1.tgz", - "integrity": "sha512-5QCq2zmgdZLIQhHqwl55ZvKVpLM3DNWjFI4T+bHr3rGu23ew2bLO4YtyxaZeChmDb85jUdPDouDlCumGfk6wOg==", - "requires": { - "@algolia/client-common": "4.19.1", - "@algolia/client-search": "4.19.1", - "@algolia/requester-common": "4.19.1", - "@algolia/transporter": "4.19.1" - } - }, - "@algolia/client-common": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.19.1.tgz", - "integrity": "sha512-3kAIVqTcPrjfS389KQvKzliC559x+BDRxtWamVJt8IVp7LGnjq+aVAXg4Xogkur1MUrScTZ59/AaUd5EdpyXgA==", - "requires": { - "@algolia/requester-common": "4.19.1", - "@algolia/transporter": "4.19.1" - } - }, - "@algolia/client-personalization": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.19.1.tgz", - "integrity": "sha512-8CWz4/H5FA+krm9HMw2HUQenizC/DxUtsI5oYC0Jxxyce1vsr8cb1aEiSJArQT6IzMynrERif1RVWLac1m36xw==", - "requires": { - "@algolia/client-common": "4.19.1", - "@algolia/requester-common": "4.19.1", - "@algolia/transporter": "4.19.1" - } - }, - "@algolia/client-search": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.19.1.tgz", - "integrity": "sha512-mBecfMFS4N+yK/p0ZbK53vrZbL6OtWMk8YmnOv1i0LXx4pelY8TFhqKoTit3NPVPwoSNN0vdSN9dTu1xr1XOVw==", - "requires": { - "@algolia/client-common": "4.19.1", - "@algolia/requester-common": "4.19.1", - "@algolia/transporter": "4.19.1" - } - }, - "@algolia/events": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", - "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" - }, - "@algolia/logger-common": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.19.1.tgz", - "integrity": "sha512-i6pLPZW/+/YXKis8gpmSiNk1lOmYCmRI6+x6d2Qk1OdfvX051nRVdalRbEcVTpSQX6FQAoyeaui0cUfLYW5Elw==" - }, - "@algolia/logger-console": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.19.1.tgz", - "integrity": "sha512-jj72k9GKb9W0c7TyC3cuZtTr0CngLBLmc8trzZlXdfvQiigpUdvTi1KoWIb2ZMcRBG7Tl8hSb81zEY3zI2RlXg==", - "requires": { - "@algolia/logger-common": "4.19.1" - } - }, - "@algolia/requester-browser-xhr": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.19.1.tgz", - "integrity": "sha512-09K/+t7lptsweRTueHnSnmPqIxbHMowejAkn9XIcJMLdseS3zl8ObnS5GWea86mu3vy4+8H+ZBKkUN82Zsq/zg==", - "requires": { - "@algolia/requester-common": "4.19.1" - } - }, - "@algolia/requester-common": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.19.1.tgz", - "integrity": "sha512-BisRkcWVxrDzF1YPhAckmi2CFYK+jdMT60q10d7z3PX+w6fPPukxHRnZwooiTUrzFe50UBmLItGizWHP5bDzVQ==" - }, - "@algolia/requester-node-http": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.19.1.tgz", - "integrity": "sha512-6DK52DHviBHTG2BK/Vv2GIlEw7i+vxm7ypZW0Z7vybGCNDeWzADx+/TmxjkES2h15+FZOqVf/Ja677gePsVItA==", - "requires": { - "@algolia/requester-common": "4.19.1" - } - }, - "@algolia/transporter": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.19.1.tgz", - "integrity": "sha512-nkpvPWbpuzxo1flEYqNIbGz7xhfhGOKGAZS7tzC+TELgEmi7z99qRyTfNSUlW7LZmB3ACdnqAo+9A9KFBENviQ==", - "requires": { - "@algolia/cache-common": "4.19.1", - "@algolia/logger-common": "4.19.1", - "@algolia/requester-common": "4.19.1" - } - }, - "@ampproject/remapping": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.1.2.tgz", - "integrity": "sha512-hoyByceqwKirw7w3Z7gnIIZC3Wx3J484Y3L/cMpXFbr7d9ZQj2mODrirNzcJa+SM3UlpWXYvKV4RlRpFXlWgXg==", - "requires": { - "@jridgewell/trace-mapping": "^0.3.0" - } - }, - "@apidevtools/json-schema-ref-parser": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-10.1.0.tgz", - "integrity": "sha512-3e+viyMuXdrcK8v5pvP+SDoAQ77FH6OyRmuK48SZKmdHJRFm87RsSs8qm6kP39a/pOPURByJw+OXzQIqcfmKtA==", - "requires": { - "@jsdevtools/ono": "^7.1.3", - "@types/json-schema": "^7.0.11", - "@types/lodash.clonedeep": "^4.5.7", - "js-yaml": "^4.1.0", - "lodash.clonedeep": "^4.5.0" - } - }, - "@auth0/auth0-react": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@auth0/auth0-react/-/auth0-react-2.2.1.tgz", - "integrity": "sha512-4L4FZvSqIwzVk5mwWFbWzfJ4Zq11dgS0v4KIGKro5tL9dgOnBGq+Ino/1mzexPV1LJHBkfwXG4+IaPiQNz5CGg==", - "requires": { - "@auth0/auth0-spa-js": "^2.1.2" - } - }, - "@auth0/auth0-spa-js": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@auth0/auth0-spa-js/-/auth0-spa-js-2.1.2.tgz", - "integrity": "sha512-xdA65Z/U7++Y7L9Uwh8Q8OVOs6qgFz+fb7GAzHFjpr1icO37B//xdzLXm7ZRgA19RWrsNe1nme3h896igJSvvw==" - }, - "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", - "requires": { - "@babel/highlight": "^7.18.6" - } - }, - "@babel/compat-data": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.20.1.tgz", - "integrity": "sha512-EWZ4mE2diW3QALKvDMiXnbZpRvlj+nayZ112nK93SnhqOtpdsbVD4W+2tEoT3YNBAG9RBR0ISY758ZkOgsn6pQ==" - }, - "@babel/core": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.18.9.tgz", - "integrity": "sha512-1LIb1eL8APMy91/IMW+31ckrfBM4yCoLaVzoDhZUKSM4cu1L1nIidyxkCgzPAgrC5WEz36IPEr/eSeSF9pIn+g==", - "requires": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.18.9", - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-module-transforms": "^7.18.9", - "@babel/helpers": "^7.18.9", - "@babel/parser": "^7.18.9", - "@babel/template": "^7.18.6", - "@babel/traverse": "^7.18.9", - "@babel/types": "^7.18.9", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@babel/generator": { - "version": "7.20.4", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.20.4.tgz", - "integrity": "sha512-luCf7yk/cm7yab6CAW1aiFnmEfBJplb/JojV56MYEK7ziWfGmFlTfmL9Ehwfy4gFhbjBfWO1wj7/TuSbVNEEtA==", - "requires": { - "@babel/types": "^7.20.2", - "@jridgewell/gen-mapping": "^0.3.2", - "jsesc": "^2.5.1" - } - }, - "@babel/helper-annotate-as-pure": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz", - "integrity": "sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA==", - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz", - "integrity": "sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==", - "requires": { - "@babel/helper-explode-assignable-expression": "^7.18.6", - "@babel/types": "^7.18.9" - } - }, - "@babel/helper-compilation-targets": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.20.0.tgz", - "integrity": "sha512-0jp//vDGp9e8hZzBc6N/KwA5ZK3Wsm/pfm4CrY7vzegkVxc65SgSn6wYOnwHe9Js9HRQ1YTCKLGPzDtaS3RoLQ==", - "requires": { - "@babel/compat-data": "^7.20.0", - "@babel/helper-validator-option": "^7.18.6", - "browserslist": "^4.21.3", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@babel/helper-create-class-features-plugin": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.20.2.tgz", - "integrity": "sha512-k22GoYRAHPYr9I+Gvy2ZQlAe5mGy8BqWst2wRt8cwIufWTxrsVshhIBvYNqC80N0GSFWTsqRVexOtfzlgOEDvA==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-member-expression-to-functions": "^7.18.9", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-replace-supers": "^7.19.1", - "@babel/helper-split-export-declaration": "^7.18.6" - } - }, - "@babel/helper-create-regexp-features-plugin": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.19.0.tgz", - "integrity": "sha512-htnV+mHX32DF81amCDrwIDr8nrp1PTm+3wfBN9/v8QJOLEioOCOG7qNyq0nHeFiWbT3Eb7gsPwEmV64UCQ1jzw==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "regexpu-core": "^5.1.0" - } - }, - "@babel/helper-define-polyfill-provider": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz", - "integrity": "sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww==", - "requires": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==" - }, - "@babel/helper-explode-assignable-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz", - "integrity": "sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==", - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", - "requires": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" - } - }, - "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-member-expression-to-functions": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.18.9.tgz", - "integrity": "sha512-RxifAh2ZoVU67PyKIO4AMi1wTenGfMR/O/ae0CCRqwgBAt5v7xjdtRw7UoSbsreKrQn5t7r89eruK/9JjYHuDg==", - "requires": { - "@babel/types": "^7.18.9" - } - }, - "@babel/helper-module-imports": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz", - "integrity": "sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==", - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-module-transforms": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.20.2.tgz", - "integrity": "sha512-zvBKyJXRbmK07XhMuujYoJ48B5yvvmM6+wcpv6Ivj4Yg6qO7NOZOSnvZN9CRl1zz1Z4cKf8YejmCMh8clOoOeA==", - "requires": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-simple-access": "^7.20.2", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/helper-validator-identifier": "^7.19.1", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2" - } - }, - "@babel/helper-optimise-call-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz", - "integrity": "sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==", - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-plugin-utils": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz", - "integrity": "sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ==" - }, - "@babel/helper-remap-async-to-generator": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz", - "integrity": "sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-wrap-function": "^7.18.9", - "@babel/types": "^7.18.9" - } - }, - "@babel/helper-replace-supers": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.19.1.tgz", - "integrity": "sha512-T7ahH7wV0Hfs46SFh5Jz3s0B6+o8g3c+7TMxu7xKfmHikg7EAZ3I2Qk9LFhjxXq8sL7UkP5JflezNwoZa8WvWw==", - "requires": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-member-expression-to-functions": "^7.18.9", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/traverse": "^7.19.1", - "@babel/types": "^7.19.0" - } - }, - "@babel/helper-simple-access": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz", - "integrity": "sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA==", - "requires": { - "@babel/types": "^7.20.2" - } - }, - "@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.20.0.tgz", - "integrity": "sha512-5y1JYeNKfvnT8sZcK9DVRtpTbGiomYIHviSP3OQWmDPU3DeH4a1ZlT/N2lyQ5P8egjcRaT/Y9aNqUxK0WsnIIg==", - "requires": { - "@babel/types": "^7.20.0" - } - }, - "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-string-parser": { - "version": "7.19.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz", - "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==" - }, - "@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==" - }, - "@babel/helper-validator-option": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz", - "integrity": "sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==" - }, - "@babel/helper-wrap-function": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.19.0.tgz", - "integrity": "sha512-txX8aN8CZyYGTwcLhlk87KRqncAzhh5TpQamZUa0/u3an36NtDpUP6bQgBCBcLeBs09R/OwQu3OjK0k/HwfNDg==", - "requires": { - "@babel/helper-function-name": "^7.19.0", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.19.0", - "@babel/types": "^7.19.0" - } - }, - "@babel/helpers": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.20.1.tgz", - "integrity": "sha512-J77mUVaDTUJFZ5BpP6mMn6OIl3rEWymk2ZxDBQJUG3P+PbmyMcF3bYWvz0ma69Af1oobDqT/iAsvzhB58xhQUg==", - "requires": { - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.0" - } - }, - "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", - "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "@babel/parser": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.21.3.tgz", - "integrity": "sha512-lobG0d7aOfQRXh8AyklEAgZGvA4FShxo6xQbUrrT/cNBPUdIDojlokwJsQyCC/eKia7ifqM0yP+2DRZ4WKw2RQ==" - }, - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz", - "integrity": "sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.18.9.tgz", - "integrity": "sha512-AHrP9jadvH7qlOj6PINbgSuphjQUAK7AOT7DPjBo9EHoLhQTnnK5u45e1Hd4DbSQEO9nqPWtQ89r+XEOWFScKg==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", - "@babel/plugin-proposal-optional-chaining": "^7.18.9" - } - }, - "@babel/plugin-proposal-async-generator-functions": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.1.tgz", - "integrity": "sha512-Gh5rchzSwE4kC+o/6T8waD0WHEQIsDmjltY8WnWRXHUdH8axZhuH86Ov9M72YhJfDrZseQwuuWaaIT/TmePp3g==", - "requires": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-remap-async-to-generator": "^7.18.9", - "@babel/plugin-syntax-async-generators": "^7.8.4" - } - }, - "@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", - "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-proposal-class-static-block": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.6.tgz", - "integrity": "sha512-+I3oIiNxrCpup3Gi8n5IGMwj0gOCAjcJUSQEcotNnCCPMEnixawOQ+KeJPlgfjzx+FKQ1QSyZOWe7wmoJp7vhw==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - } - }, - "@babel/plugin-proposal-dynamic-import": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz", - "integrity": "sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - } - }, - "@babel/plugin-proposal-export-namespace-from": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz", - "integrity": "sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - } - }, - "@babel/plugin-proposal-json-strings": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz", - "integrity": "sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-json-strings": "^7.8.3" - } - }, - "@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.18.9.tgz", - "integrity": "sha512-128YbMpjCrP35IOExw2Fq+x55LMP42DzhOhX2aNNIdI9avSWl2PI0yuBWarr3RYpZBSPtabfadkH2yeRiMD61Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - } - }, - "@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", - "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - } - }, - "@babel/plugin-proposal-numeric-separator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", - "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - } - }, - "@babel/plugin-proposal-object-rest-spread": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.2.tgz", - "integrity": "sha512-Ks6uej9WFK+fvIMesSqbAto5dD8Dz4VuuFvGJFKgIGSkJuRGcrwGECPA1fDgQK3/DbExBJpEkTeYeB8geIFCSQ==", - "requires": { - "@babel/compat-data": "^7.20.1", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.20.1" - } - }, - "@babel/plugin-proposal-optional-catch-binding": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz", - "integrity": "sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - } - }, - "@babel/plugin-proposal-optional-chaining": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.18.9.tgz", - "integrity": "sha512-v5nwt4IqBXihxGsW2QmCWMDS3B3bzGIk/EQVZz2ei7f3NJl8NzAJVvUmpDW5q1CRNY+Beb/k58UAH1Km1N411w==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - } - }, - "@babel/plugin-proposal-private-methods": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", - "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-proposal-private-property-in-object": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.18.6.tgz", - "integrity": "sha512-9Rysx7FOctvT5ouj5JODjAFAkgGoudQuLPamZb0v1TGLpapdNaftzifU8NTWQm0IRjqoYypdrSmyWgkocDQ8Dw==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - } - }, - "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", - "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-bigint": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", - "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "requires": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.3" - } - }, - "@babel/plugin-syntax-import-assertions": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.20.0.tgz", - "integrity": "sha512-IUh1vakzNoWalR8ch/areW7qFopR2AEw03JlG7BbrDqmQ4X3q9uuipQwSGrUn7oGiemKjtSLDhNtQHzMHr1JdQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.19.0" - } - }, - "@babel/plugin-syntax-import-meta": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", - "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-jsx": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz", - "integrity": "sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-typescript": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.20.0.tgz", - "integrity": "sha512-rd9TkG+u1CExzS4SM1BlMEhMXwFLKVjOAFFCDx9PbX5ycJWDoWMcwdJH9RhkPu1dOgn5TrxLot/Gx6lWFuAUNQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.19.0" - } - }, - "@babel/plugin-transform-arrow-functions": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.18.6.tgz", - "integrity": "sha512-9S9X9RUefzrsHZmKMbDXxweEH+YlE8JJEuat9FdvW9Qh1cw7W64jELCtWNkPBPX5En45uy28KGvA/AySqUh8CQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-async-to-generator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.18.6.tgz", - "integrity": "sha512-ARE5wZLKnTgPW7/1ftQmSi1CmkqqHo2DNmtztFhvgtOWSDfq0Cq9/9L+KnZNYSNrydBekhW3rwShduf59RoXag==", - "requires": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-remap-async-to-generator": "^7.18.6" - } - }, - "@babel/plugin-transform-block-scoped-functions": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz", - "integrity": "sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-block-scoping": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.20.2.tgz", - "integrity": "sha512-y5V15+04ry69OV2wULmwhEA6jwSWXO1TwAtIwiPXcvHcoOQUqpyMVd2bDsQJMW8AurjulIyUV8kDqtjSwHy1uQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-classes": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.20.2.tgz", - "integrity": "sha512-9rbPp0lCVVoagvtEyQKSo5L8oo0nQS/iif+lwlAz29MccX2642vWDlSZK+2T2buxbopotId2ld7zZAzRfz9j1g==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-replace-supers": "^7.19.1", - "@babel/helper-split-export-declaration": "^7.18.6", - "globals": "^11.1.0" - } - }, - "@babel/plugin-transform-computed-properties": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.18.9.tgz", - "integrity": "sha512-+i0ZU1bCDymKakLxn5srGHrsAPRELC2WIbzwjLhHW9SIE1cPYkLCL0NlnXMZaM1vhfgA2+M7hySk42VBvrkBRw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-destructuring": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.20.2.tgz", - "integrity": "sha512-mENM+ZHrvEgxLTBXUiQ621rRXZes3KWUv6NdQlrnr1TkWVw+hUjQBZuP2X32qKlrlG2BzgR95gkuCRSkJl8vIw==", - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-dotall-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz", - "integrity": "sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-duplicate-keys": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz", - "integrity": "sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-exponentiation-operator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz", - "integrity": "sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==", - "requires": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-for-of": { - "version": "7.18.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz", - "integrity": "sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-function-name": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz", - "integrity": "sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==", - "requires": { - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-function-name": "^7.18.9", - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-literals": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz", - "integrity": "sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-member-expression-literals": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz", - "integrity": "sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-modules-amd": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.19.6.tgz", - "integrity": "sha512-uG3od2mXvAtIFQIh0xrpLH6r5fpSQN04gIVovl+ODLdUMANokxQLZnPBHcjmv3GxRjnqwLuHvppjjcelqUFZvg==", - "requires": { - "@babel/helper-module-transforms": "^7.19.6", - "@babel/helper-plugin-utils": "^7.19.0" - } - }, - "@babel/plugin-transform-modules-commonjs": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.19.6.tgz", - "integrity": "sha512-8PIa1ym4XRTKuSsOUXqDG0YaOlEuTVvHMe5JCfgBMOtHvJKw/4NGovEGN33viISshG/rZNVrACiBmPQLvWN8xQ==", - "requires": { - "@babel/helper-module-transforms": "^7.19.6", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-simple-access": "^7.19.4" - } - }, - "@babel/plugin-transform-modules-systemjs": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.19.6.tgz", - "integrity": "sha512-fqGLBepcc3kErfR9R3DnVpURmckXP7gj7bAlrTQyBxrigFqszZCkFkcoxzCp2v32XmwXLvbw+8Yq9/b+QqksjQ==", - "requires": { - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-module-transforms": "^7.19.6", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-validator-identifier": "^7.19.1" - } - }, - "@babel/plugin-transform-modules-umd": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", - "integrity": "sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==", - "requires": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.19.1.tgz", - "integrity": "sha512-oWk9l9WItWBQYS4FgXD4Uyy5kq898lvkXpXQxoJEY1RnvPk4R/Dvu2ebXU9q8lP+rlMwUQTFf2Ok6d78ODa0kw==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.19.0", - "@babel/helper-plugin-utils": "^7.19.0" - } - }, - "@babel/plugin-transform-new-target": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz", - "integrity": "sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-object-super": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz", - "integrity": "sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-replace-supers": "^7.18.6" - } - }, - "@babel/plugin-transform-parameters": { - "version": "7.20.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.20.3.tgz", - "integrity": "sha512-oZg/Fpx0YDrj13KsLyO8I/CX3Zdw7z0O9qOd95SqcoIzuqy/WTGWvePeHAnZCN54SfdyjHcb1S30gc8zlzlHcA==", - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-property-literals": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz", - "integrity": "sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-react-constant-elements": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.20.2.tgz", - "integrity": "sha512-KS/G8YI8uwMGKErLFOHS/ekhqdHhpEloxs43NecQHVgo2QuQSyJhGIY1fL8UGl9wy5ItVwwoUL4YxVqsplGq2g==", - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-react-display-name": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.18.6.tgz", - "integrity": "sha512-TV4sQ+T013n61uMoygyMRm+xf04Bd5oqFpv2jAEQwSZ8NwQA7zeRPg1LMVg2PWi3zWBz+CLKD+v5bcpZ/BS0aA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-react-jsx": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.19.0.tgz", - "integrity": "sha512-UVEvX3tXie3Szm3emi1+G63jyw1w5IcMY0FSKM+CRnKRI5Mr1YbCNgsSTwoTwKphQEG9P+QqmuRFneJPZuHNhg==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/plugin-syntax-jsx": "^7.18.6", - "@babel/types": "^7.19.0" - } - }, - "@babel/plugin-transform-react-jsx-development": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz", - "integrity": "sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA==", - "requires": { - "@babel/plugin-transform-react-jsx": "^7.18.6" - } - }, - "@babel/plugin-transform-react-pure-annotations": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.6.tgz", - "integrity": "sha512-I8VfEPg9r2TRDdvnHgPepTKvuRomzA8+u+nhY7qSI1fR2hRNebasZEETLyM5mAUr0Ku56OkXJ0I7NHJnO6cJiQ==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-regenerator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.6.tgz", - "integrity": "sha512-poqRI2+qiSdeldcz4wTSTXBRryoq3Gc70ye7m7UD5Ww0nE29IXqMl6r7Nd15WBgRd74vloEMlShtH6CKxVzfmQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "regenerator-transform": "^0.15.0" - } - }, - "@babel/plugin-transform-reserved-words": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz", - "integrity": "sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-runtime": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.18.9.tgz", - "integrity": "sha512-wS8uJwBt7/b/mzE13ktsJdmS4JP/j7PQSaADtnb4I2wL0zK51MQ0pmF8/Jy0wUIS96fr+fXT6S/ifiPXnvrlSg==", - "requires": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.9", - "babel-plugin-polyfill-corejs2": "^0.3.1", - "babel-plugin-polyfill-corejs3": "^0.5.2", - "babel-plugin-polyfill-regenerator": "^0.3.1", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@babel/plugin-transform-shorthand-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz", - "integrity": "sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-spread": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.19.0.tgz", - "integrity": "sha512-RsuMk7j6n+r752EtzyScnWkQyuJdli6LdO5Klv8Yx0OfPVTcQkIUfS8clx5e9yHXzlnhOZF3CbQ8C2uP5j074w==", - "requires": { - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9" - } - }, - "@babel/plugin-transform-sticky-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz", - "integrity": "sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-template-literals": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz", - "integrity": "sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-typeof-symbol": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz", - "integrity": "sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-typescript": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.20.2.tgz", - "integrity": "sha512-jvS+ngBfrnTUBfOQq8NfGnSbF9BrqlR6hjJ2yVxMkmO5nL/cdifNbI30EfjRlN4g5wYWNnMPyj5Sa6R1pbLeag==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.20.2", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-typescript": "^7.20.0" - } - }, - "@babel/plugin-transform-unicode-escapes": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz", - "integrity": "sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-unicode-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz", - "integrity": "sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/polyfill": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/polyfill/-/polyfill-7.12.1.tgz", - "integrity": "sha512-X0pi0V6gxLi6lFZpGmeNa4zxtwEmCs42isWLNjZZDE0Y8yVfgu0T2OAHlzBbdYlqbW/YXVvoBHpATEM+goCj8g==", - "requires": { - "core-js": "^2.6.5", - "regenerator-runtime": "^0.13.4" - }, - "dependencies": { - "core-js": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.12.tgz", - "integrity": "sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ==" - } - } - }, - "@babel/preset-env": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.20.2.tgz", - "integrity": "sha512-1G0efQEWR1EHkKvKHqbG+IN/QdgwfByUpM5V5QroDzGV2t3S/WXNQd693cHiHTlCFMpr9B6FkPFXDA2lQcKoDg==", - "requires": { - "@babel/compat-data": "^7.20.1", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.18.9", - "@babel/plugin-proposal-async-generator-functions": "^7.20.1", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-class-static-block": "^7.18.6", - "@babel/plugin-proposal-dynamic-import": "^7.18.6", - "@babel/plugin-proposal-export-namespace-from": "^7.18.9", - "@babel/plugin-proposal-json-strings": "^7.18.6", - "@babel/plugin-proposal-logical-assignment-operators": "^7.18.9", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", - "@babel/plugin-proposal-numeric-separator": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.20.2", - "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", - "@babel/plugin-proposal-optional-chaining": "^7.18.9", - "@babel/plugin-proposal-private-methods": "^7.18.6", - "@babel/plugin-proposal-private-property-in-object": "^7.18.6", - "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.20.0", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.18.6", - "@babel/plugin-transform-async-to-generator": "^7.18.6", - "@babel/plugin-transform-block-scoped-functions": "^7.18.6", - "@babel/plugin-transform-block-scoping": "^7.20.2", - "@babel/plugin-transform-classes": "^7.20.2", - "@babel/plugin-transform-computed-properties": "^7.18.9", - "@babel/plugin-transform-destructuring": "^7.20.2", - "@babel/plugin-transform-dotall-regex": "^7.18.6", - "@babel/plugin-transform-duplicate-keys": "^7.18.9", - "@babel/plugin-transform-exponentiation-operator": "^7.18.6", - "@babel/plugin-transform-for-of": "^7.18.8", - "@babel/plugin-transform-function-name": "^7.18.9", - "@babel/plugin-transform-literals": "^7.18.9", - "@babel/plugin-transform-member-expression-literals": "^7.18.6", - "@babel/plugin-transform-modules-amd": "^7.19.6", - "@babel/plugin-transform-modules-commonjs": "^7.19.6", - "@babel/plugin-transform-modules-systemjs": "^7.19.6", - "@babel/plugin-transform-modules-umd": "^7.18.6", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.19.1", - "@babel/plugin-transform-new-target": "^7.18.6", - "@babel/plugin-transform-object-super": "^7.18.6", - "@babel/plugin-transform-parameters": "^7.20.1", - "@babel/plugin-transform-property-literals": "^7.18.6", - "@babel/plugin-transform-regenerator": "^7.18.6", - "@babel/plugin-transform-reserved-words": "^7.18.6", - "@babel/plugin-transform-shorthand-properties": "^7.18.6", - "@babel/plugin-transform-spread": "^7.19.0", - "@babel/plugin-transform-sticky-regex": "^7.18.6", - "@babel/plugin-transform-template-literals": "^7.18.9", - "@babel/plugin-transform-typeof-symbol": "^7.18.9", - "@babel/plugin-transform-unicode-escapes": "^7.18.10", - "@babel/plugin-transform-unicode-regex": "^7.18.6", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.20.2", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "core-js-compat": "^3.25.1", - "semver": "^6.3.0" - }, - "dependencies": { - "babel-plugin-polyfill-corejs3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.6.0.tgz", - "integrity": "sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA==", - "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.3", - "core-js-compat": "^3.25.1" - } - }, - "babel-plugin-polyfill-regenerator": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.1.tgz", - "integrity": "sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw==", - "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.3" - } - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@babel/preset-modules": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", - "requires": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - } - }, - "@babel/preset-react": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.18.6.tgz", - "integrity": "sha512-zXr6atUmyYdiWRVLOZahakYmOBHtWc2WGCkP8PYTgZi0iJXDY2CN180TdrIW4OGOAdLc7TifzDIvtx6izaRIzg==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-react-display-name": "^7.18.6", - "@babel/plugin-transform-react-jsx": "^7.18.6", - "@babel/plugin-transform-react-jsx-development": "^7.18.6", - "@babel/plugin-transform-react-pure-annotations": "^7.18.6" - } - }, - "@babel/preset-typescript": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.18.6.tgz", - "integrity": "sha512-s9ik86kXBAnD760aybBucdpnLsAt0jK1xqJn2juOn9lkOvSHV60os5hxoVJsPzMQxvnUJFAlkont2DvvaYEBtQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-typescript": "^7.18.6" - } - }, - "@babel/register": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.21.0.tgz", - "integrity": "sha512-9nKsPmYDi5DidAqJaQooxIhsLJiNMkGr8ypQ8Uic7cIox7UCDsM7HuUGxdGT7mSDTYbqzIdsOWzfBton/YJrMw==", - "requires": { - "clone-deep": "^4.0.1", - "find-cache-dir": "^2.0.0", - "make-dir": "^2.1.0", - "pirates": "^4.0.5", - "source-map-support": "^0.5.16" - }, - "dependencies": { - "find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "requires": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - } - }, - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==" - }, - "pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "requires": { - "find-up": "^3.0.0" - } - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } - } - }, - "@babel/runtime": { - "version": "7.22.11", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.11.tgz", - "integrity": "sha512-ee7jVNlWN09+KftVOu9n7S8gQzD/Z6hN/I8VBRXW4P1+Xe7kJGXMwu8vds4aGIMHZnNbdpSWCfZZtinytpcAvA==", - "requires": { - "regenerator-runtime": "^0.14.0" - }, - "dependencies": { - "regenerator-runtime": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", - "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" - } - } - }, - "@babel/runtime-corejs3": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.18.9.tgz", - "integrity": "sha512-qZEWeccZCrHA2Au4/X05QW5CMdm4VjUDCrGq5gf1ZDcM4hRqreKrtwAn7yci9zfgAS9apvnsFXiGBHBAxZdK9A==", - "requires": { - "core-js-pure": "^3.20.2", - "regenerator-runtime": "^0.13.4" - } - }, - "@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", - "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" - } - }, - "@babel/traverse": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.20.1.tgz", - "integrity": "sha512-d3tN8fkVJwFLkHkBN479SOsw4DMZnz8cdbL/gvuDuzy3TS6Nfw80HuQqhw1pITbIruHyh7d1fMA47kWzmcUEGA==", - "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.1", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.20.1", - "@babel/types": "^7.20.0", - "debug": "^4.1.0", - "globals": "^11.1.0" - } - }, - "@babel/types": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.21.3.tgz", - "integrity": "sha512-sBGdETxC+/M4o/zKC0sl6sjWv62WFR/uzxrJ6uYyMLZOUlPnwzw0tKgVHOXxaAd5l2g8pEDM5RZ495GPQI77kg==", - "requires": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", - "to-fast-properties": "^2.0.0" - } - }, - "@bcoe/v8-coverage": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", - "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true - }, - "@bpmn-io/cm-theme": { - "version": "0.1.0-alpha.2", - "resolved": "https://registry.npmjs.org/@bpmn-io/cm-theme/-/cm-theme-0.1.0-alpha.2.tgz", - "integrity": "sha512-ZILgiYzxk3KMvxplUXmdRFQo45/JehDPg5k9tWfehmzUOSE13ssyLPil8uCloMQnb3yyzyOWTjb/wzKXTHlFQw==", - "requires": { - "@codemirror/language": "^6.3.1", - "@codemirror/view": "^6.5.1", - "@lezer/highlight": "^1.1.4" - } - }, - "@bpmn-io/draggle": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@bpmn-io/draggle/-/draggle-4.1.0.tgz", - "integrity": "sha512-gHRjQGJEpEwVxspNwNhnqHHAt8cE1l1cObFEf5YSuSXVxTLZcNAQOgmEDJ+QMk1UPDKfnQwvbeDdv5ytCnksfw==", - "requires": { - "contra": "^1.9.4" - } - }, - "@bpmn-io/feel-editor": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@bpmn-io/feel-editor/-/feel-editor-1.2.0.tgz", - "integrity": "sha512-402yrNL+a58d9AiNE48IScTiWDGmB+8Fpiq9eqg/sKCzhdHahl5fZyl+cksfcyJjzJF1byUOhYy3UxL3/tbLmQ==", - "requires": { - "@bpmn-io/feel-lint": "^1.2.0", - "@codemirror/autocomplete": "^6.12.0", - "@codemirror/commands": "^6.3.3", - "@codemirror/language": "^6.10.0", - "@codemirror/lint": "^6.4.2", - "@codemirror/state": "^6.4.0", - "@codemirror/view": "^6.23.0", - "@lezer/highlight": "^1.2.0", - "lang-feel": "^2.0.0", - "min-dom": "^4.1.0" - } - }, - "@bpmn-io/feel-lint": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@bpmn-io/feel-lint/-/feel-lint-1.2.0.tgz", - "integrity": "sha512-nsvAYxiSbWyjpd3gNnJd+60aTWrZvngYnZfe+GpmkM/pQoOgtF17GhD/p4fgaeAd/uUP3q9sO6EWRX+OU/p9dw==", - "requires": { - "@codemirror/language": "^6.8.0", - "lezer-feel": "^1.2.3" - } - }, - "@bpmn-io/form-js": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js/-/form-js-1.7.3.tgz", - "integrity": "sha512-CPDUwS3lftH/lHG08o4kBthgO3Qz918jwr6KnQ0O6Vtm6KjNLsjwZrbf3RPmLuAJzLfQMxh9oA8dTTZJM4s1UQ==", - "requires": { - "@bpmn-io/form-js-carbon-styles": "^1.7.3", - "@bpmn-io/form-js-editor": "^1.7.3", - "@bpmn-io/form-js-playground": "^1.7.3", - "@bpmn-io/form-js-viewer": "^1.7.3" - } - }, - "@bpmn-io/form-js-carbon-styles": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-carbon-styles/-/form-js-carbon-styles-1.7.3.tgz", - "integrity": "sha512-5W3zoa4VxY8eaKr4mLu/yRdugzhan8fTKsJGXfJR+iW0ErGdAo969rIxEbINsAKBqNiWxdipoWsEgvPxKbh/VQ==" - }, - "@bpmn-io/form-js-editor": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-editor/-/form-js-editor-1.7.3.tgz", - "integrity": "sha512-6kHZQJslKavw1M8JgZpOXyeIRj3wk+2TjosT81iWqreVnDS434cUpd8HTgXZBlrPugYmGjDd1e4Oa2CVVxixWw==", - "requires": { - "@bpmn-io/draggle": "^4.0.0", - "@bpmn-io/form-js-viewer": "^1.7.3", - "@bpmn-io/properties-panel": "^3.18.1", - "array-move": "^3.0.1", - "big.js": "^6.2.1", - "ids": "^1.0.0", - "min-dash": "^4.2.1", - "min-dom": "^4.1.0", - "preact": "^10.5.14" - }, - "dependencies": { - "big.js": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz", - "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==" - } - } - }, - "@bpmn-io/form-js-playground": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-playground/-/form-js-playground-1.7.3.tgz", - "integrity": "sha512-l87drk8rA3/yQa6RhOwRsVIp0kKGSg+asOXLZppNePh1lcaw0iF4taSa2PQeiI2d1GwnA3fjht+NhzzE/r+2/Q==", - "requires": { - "@bpmn-io/form-js-editor": "^1.7.3", - "@bpmn-io/form-js-viewer": "^1.7.3", - "@codemirror/autocomplete": "^6.12.0", - "@codemirror/commands": "^6.1.2", - "@codemirror/lang-json": "^6.0.1", - "@codemirror/language": "^6.10.0", - "@codemirror/lint": "^6.0.0", - "@codemirror/state": "^6.1.1", - "@codemirror/view": "^6.23.1", - "classnames": "^2.3.1", - "codemirror": "^6.0.1", - "downloadjs": "^1.4.7", - "file-drops": "^0.4.0", - "mitt": "^3.0.0", - "preact": "^10.5.14" - } - }, - "@bpmn-io/form-js-viewer": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@bpmn-io/form-js-viewer/-/form-js-viewer-1.7.3.tgz", - "integrity": "sha512-/XEDHzZbxmYXAp10ClPQu8h/4CoYqPdUYkppD/fL+UXFTO9ZJFBMn2TGgJEwXZP3H6/m6fSFqPFuWnVudWkCYg==", - "requires": { - "@carbon/grid": "^11.11.0", - "big.js": "^6.2.1", - "classnames": "^2.3.1", - "didi": "^10.0.1", - "dompurify": "^3.0.8", - "feelers": "^1.3.0", - "feelin": "^3.0.0", - "flatpickr": "^4.6.13", - "ids": "^1.0.0", - "lodash": "^4.5.0", - "min-dash": "^4.2.1", - "preact": "^10.5.14", - "showdown": "^2.1.0" - }, - "dependencies": { - "big.js": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz", - "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==" - } - } - }, - "@bpmn-io/properties-panel": { - "version": "3.18.1", - "resolved": "https://registry.npmjs.org/@bpmn-io/properties-panel/-/properties-panel-3.18.1.tgz", - "integrity": "sha512-ygBhVH99IFG1VbMlbvInXQUeqHwQH4uaajFaUi3OsjELpM1WcmHQ72fXPd1tC/OtJJhZoQajHiTI8SdL38t9ug==", - "requires": { - "@bpmn-io/feel-editor": "^1.2.0", - "@codemirror/view": "^6.14.0", - "classnames": "^2.3.1", - "feelers": "^1.3.0", - "focus-trap": "^7.5.2", - "min-dash": "^4.1.1", - "min-dom": "^4.0.3" - } - }, - "@braintree/sanitize-url": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", - "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==" - }, - "@carbon/grid": { - "version": "11.21.0", - "resolved": "https://registry.npmjs.org/@carbon/grid/-/grid-11.21.0.tgz", - "integrity": "sha512-Zzhos2we+HqM0obdQgma+OvLoM9dNGq07YcLxFxrc/vEOn/D01sner6dyMMqS2y8036zIaoqVMGArSzPfoxrLA==", - "requires": { - "@carbon/layout": "^11.20.0" - } - }, - "@carbon/layout": { - "version": "11.20.0", - "resolved": "https://registry.npmjs.org/@carbon/layout/-/layout-11.20.0.tgz", - "integrity": "sha512-G9eJE3xb/J98Id9VvTA/b4v+2i/c+IiHAhxNPc0PPpPN6C/r6U4gJsG4yPgQnbuIU42cP9L8OvCrQr0mbrCMlA==" - }, - "@codemirror/autocomplete": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.13.0.tgz", - "integrity": "sha512-SuDrho1klTINfbcMPnyro1ZxU9xJtwDMtb62R8TjL/tOl71IoOsvBo1a9x+hDvHhIzkTcJHy2VC+rmpGgYkRSw==", - "requires": { - "@codemirror/language": "^6.0.0", - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.17.0", - "@lezer/common": "^1.0.0" - } - }, - "@codemirror/commands": { - "version": "6.3.3", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.3.tgz", - "integrity": "sha512-dO4hcF0fGT9tu1Pj1D2PvGvxjeGkbC6RGcZw6Qs74TH+Ed1gw98jmUgd2axWvIZEqTeTuFrg1lEB1KV6cK9h1A==", - "requires": { - "@codemirror/language": "^6.0.0", - "@codemirror/state": "^6.4.0", - "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.1.0" - } - }, - "@codemirror/lang-json": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.1.tgz", - "integrity": "sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ==", - "requires": { - "@codemirror/language": "^6.0.0", - "@lezer/json": "^1.0.0" - } - }, - "@codemirror/language": { - "version": "6.10.1", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.1.tgz", - "integrity": "sha512-5GrXzrhq6k+gL5fjkAwt90nYDmjlzTIJV8THnxNFtNKWotMIlzzN+CpqxqwXOECnUdOndmSeWntVrVcv5axWRQ==", - "requires": { - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.23.0", - "@lezer/common": "^1.1.0", - "@lezer/highlight": "^1.0.0", - "@lezer/lr": "^1.0.0", - "style-mod": "^4.0.0" - } - }, - "@codemirror/lint": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.5.0.tgz", - "integrity": "sha512-+5YyicIaaAZKU8K43IQi8TBy6mF6giGeWAH7N96Z5LC30Wm5JMjqxOYIE9mxwMG1NbhT2mA3l9hA4uuKUM3E5g==", - "requires": { - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", - "crelt": "^1.0.5" - } - }, - "@codemirror/search": { - "version": "6.5.6", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.6.tgz", - "integrity": "sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q==", - "requires": { - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", - "crelt": "^1.0.5" - } - }, - "@codemirror/state": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", - "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" - }, - "@codemirror/view": { - "version": "6.25.1", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.25.1.tgz", - "integrity": "sha512-2LXLxsQnHDdfGzDvjzAwZh2ZviNJm7im6tGpa0IONIDnFd8RZ80D2SNi8PDi6YjKcMoMRK20v6OmKIdsrwsyoQ==", - "requires": { - "@codemirror/state": "^6.4.0", - "style-mod": "^4.1.0", - "w3c-keyname": "^2.2.4" - } - }, - "@colors/colors": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", - "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", - "optional": true - }, - "@docsearch/css": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.5.2.tgz", - "integrity": "sha512-SPiDHaWKQZpwR2siD0KQUwlStvIAnEyK6tAE2h2Wuoq8ue9skzhlyVQ1ddzOxX6khULnAALDiR/isSF3bnuciA==" - }, - "@docsearch/react": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.5.2.tgz", - "integrity": "sha512-9Ahcrs5z2jq/DcAvYtvlqEBHImbm4YJI8M9y0x6Tqg598P40HTEkX7hsMcIuThI+hTFxRGZ9hll0Wygm2yEjng==", - "requires": { - "@algolia/autocomplete-core": "1.9.3", - "@algolia/autocomplete-preset-algolia": "1.9.3", - "@docsearch/css": "3.5.2", - "algoliasearch": "^4.19.1" - } - }, - "@docusaurus/core": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz", - "integrity": "sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==", - "requires": { - "@babel/core": "^7.18.6", - "@babel/generator": "^7.18.7", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.18.6", - "@babel/preset-env": "^7.18.6", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@babel/runtime": "^7.18.6", - "@babel/runtime-corejs3": "^7.18.6", - "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.4.1", - "@docusaurus/logger": "2.4.1", - "@docusaurus/mdx-loader": "2.4.1", - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-common": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "@slorber/static-site-generator-webpack-plugin": "^4.0.7", - "@svgr/webpack": "^6.2.1", - "autoprefixer": "^10.4.7", - "babel-loader": "^8.2.5", - "babel-plugin-dynamic-import-node": "^2.3.3", - "boxen": "^6.2.1", - "chalk": "^4.1.2", - "chokidar": "^3.5.3", - "clean-css": "^5.3.0", - "cli-table3": "^0.6.2", - "combine-promises": "^1.1.0", - "commander": "^5.1.0", - "copy-webpack-plugin": "^11.0.0", - "core-js": "^3.23.3", - "css-loader": "^6.7.1", - "css-minimizer-webpack-plugin": "^4.0.0", - "cssnano": "^5.1.12", - "del": "^6.1.1", - "detect-port": "^1.3.0", - "escape-html": "^1.0.3", - "eta": "^2.0.0", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "html-minifier-terser": "^6.1.0", - "html-tags": "^3.2.0", - "html-webpack-plugin": "^5.5.0", - "import-fresh": "^3.3.0", - "leven": "^3.1.0", - "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.6.1", - "postcss": "^8.4.14", - "postcss-loader": "^7.0.0", - "prompts": "^2.4.2", - "react-dev-utils": "^12.0.1", - "react-helmet-async": "^1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", - "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.3.3", - "react-router-config": "^5.1.1", - "react-router-dom": "^5.3.3", - "rtl-detect": "^1.0.4", - "semver": "^7.3.7", - "serve-handler": "^6.1.3", - "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.3", - "tslib": "^2.4.0", - "update-notifier": "^5.1.0", - "url-loader": "^4.1.1", - "wait-on": "^6.0.1", - "webpack": "^5.73.0", - "webpack-bundle-analyzer": "^4.5.0", - "webpack-dev-server": "^4.9.3", - "webpack-merge": "^5.8.0", - "webpackbar": "^5.0.2" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==" - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "boxen": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", - "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", - "requires": { - "ansi-align": "^3.0.1", - "camelcase": "^6.2.0", - "chalk": "^4.1.2", - "cli-boxes": "^3.0.0", - "string-width": "^5.0.1", - "type-fest": "^2.5.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "cli-boxes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==" - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", - "requires": { - "ansi-regex": "^6.0.1" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - }, - "type-fest": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.15.1.tgz", - "integrity": "sha512-LYSjcIz3NmoQksXq/3/B7Nfad+T8mkaI628agAAnHCpXPTBRMK2ygt3eABpzII8CbZZM8dLdVQ4Gr8ousjFjMw==" - }, - "widest-line": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", - "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", - "requires": { - "string-width": "^5.0.1" - } - }, - "wrap-ansi": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.0.1.tgz", - "integrity": "sha512-QFF+ufAqhoYHvoHdajT/Po7KoXVBPXS2bgjIam5isfWJPfIOnQZ50JtUiVvCv/sjgacf3yRrt2ZKUZ/V4itN4g==", - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.1.0.tgz", - "integrity": "sha512-VbqNsoz55SYGczauuup0MFUyXNQviSpFTj1RQtFzmQLk18qbVSpTFFGMT293rmDaQuKCT6InmbuEyUne4mTuxQ==" - } - } - } - } - }, - "@docusaurus/cssnano-preset": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.1.tgz", - "integrity": "sha512-ka+vqXwtcW1NbXxWsh6yA1Ckii1klY9E53cJ4O9J09nkMBgrNX3iEFED1fWdv8wf4mJjvGi5RLZ2p9hJNjsLyQ==", - "requires": { - "cssnano-preset-advanced": "^5.3.8", - "postcss": "^8.4.14", - "postcss-sort-media-queries": "^4.2.1", - "tslib": "^2.4.0" - } - }, - "@docusaurus/logger": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.1.tgz", - "integrity": "sha512-5h5ysIIWYIDHyTVd8BjheZmQZmEgWDR54aQ1BX9pjFfpyzFo5puKXKYrYJXbjEHGyVhEzmB9UXwbxGfaZhOjcg==", - "requires": { - "chalk": "^4.1.2", - "tslib": "^2.4.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@docusaurus/mdx-loader": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.1.tgz", - "integrity": "sha512-4KhUhEavteIAmbBj7LVFnrVYDiU51H5YWW1zY6SmBSte/YLhDutztLTBE0PQl1Grux1jzUJeaSvAzHpTn6JJDQ==", - "requires": { - "@babel/parser": "^7.18.8", - "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@mdx-js/mdx": "^1.6.22", - "escape-html": "^1.0.3", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "image-size": "^1.0.1", - "mdast-util-to-string": "^2.0.0", - "remark-emoji": "^2.2.0", - "stringify-object": "^3.3.0", - "tslib": "^2.4.0", - "unified": "^9.2.2", - "unist-util-visit": "^2.0.3", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - }, - "dependencies": { - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "@docusaurus/module-type-aliases": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.1.tgz", - "integrity": "sha512-gLBuIFM8Dp2XOCWffUDSjtxY7jQgKvYujt7Mx5s4FCTfoL5dN1EVbnrn+O2Wvh8b0a77D57qoIDY7ghgmatR1A==", - "requires": { - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.4.1", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "@types/react-router-dom": "*", - "react-helmet-async": "*", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" - } - }, - "@docusaurus/plugin-content-blog": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.1.tgz", - "integrity": "sha512-E2i7Knz5YIbE1XELI6RlTnZnGgS52cUO4BlCiCUCvQHbR+s1xeIWz4C6BtaVnlug0Ccz7nFSksfwDpVlkujg5Q==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/logger": "2.4.1", - "@docusaurus/mdx-loader": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-common": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "cheerio": "^1.0.0-rc.12", - "feed": "^4.2.2", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "reading-time": "^1.5.0", - "tslib": "^2.4.0", - "unist-util-visit": "^2.0.3", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" - }, - "dependencies": { - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "@docusaurus/plugin-content-docs": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.1.tgz", - "integrity": "sha512-Lo7lSIcpswa2Kv4HEeUcGYqaasMUQNpjTXpV0N8G6jXgZaQurqp7E8NGYeGbDXnb48czmHWbzDL4S3+BbK0VzA==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/logger": "2.4.1", - "@docusaurus/mdx-loader": "2.4.1", - "@docusaurus/module-type-aliases": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "@types/react-router-config": "^5.0.6", - "combine-promises": "^1.1.0", - "fs-extra": "^10.1.0", - "import-fresh": "^3.3.0", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" - } - }, - "@docusaurus/plugin-content-pages": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.1.tgz", - "integrity": "sha512-/UjuH/76KLaUlL+o1OvyORynv6FURzjurSjvn2lbWTFc4tpYY2qLYTlKpTCBVPhlLUQsfyFnshEJDLmPneq2oA==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/mdx-loader": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "fs-extra": "^10.1.0", - "tslib": "^2.4.0", - "webpack": "^5.73.0" - } - }, - "@docusaurus/plugin-debug": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.1.tgz", - "integrity": "sha512-7Yu9UPzRShlrH/G8btOpR0e6INFZr0EegWplMjOqelIwAcx3PKyR8mgPTxGTxcqiYj6hxSCRN0D8R7YrzImwNA==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils": "2.4.1", - "fs-extra": "^10.1.0", - "react-json-view": "^1.21.3", - "tslib": "^2.4.0" - } - }, - "@docusaurus/plugin-google-analytics": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.1.tgz", - "integrity": "sha512-dyZJdJiCoL+rcfnm0RPkLt/o732HvLiEwmtoNzOoz9MSZz117UH2J6U2vUDtzUzwtFLIf32KkeyzisbwUCgcaQ==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "tslib": "^2.4.0" - } - }, - "@docusaurus/plugin-google-gtag": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.1.tgz", - "integrity": "sha512-mKIefK+2kGTQBYvloNEKtDmnRD7bxHLsBcxgnbt4oZwzi2nxCGjPX6+9SQO2KCN5HZbNrYmGo5GJfMgoRvy6uA==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "tslib": "^2.4.0" - } - }, - "@docusaurus/plugin-google-tag-manager": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.1.tgz", - "integrity": "sha512-Zg4Ii9CMOLfpeV2nG74lVTWNtisFaH9QNtEw48R5QE1KIwDBdTVaiSA18G1EujZjrzJJzXN79VhINSbOJO/r3g==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "tslib": "^2.4.0" - } - }, - "@docusaurus/plugin-sitemap": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.1.tgz", - "integrity": "sha512-lZx+ijt/+atQ3FVE8FOHV/+X3kuok688OydDXrqKRJyXBJZKgGjA2Qa8RjQ4f27V2woaXhtnyrdPop/+OjVMRg==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/logger": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-common": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "fs-extra": "^10.1.0", - "sitemap": "^7.1.1", - "tslib": "^2.4.0" - } - }, - "@docusaurus/preset-classic": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.1.tgz", - "integrity": "sha512-P4//+I4zDqQJ+UDgoFrjIFaQ1MeS9UD1cvxVQaI6O7iBmiHQm0MGROP1TbE7HlxlDPXFJjZUK3x3cAoK63smGQ==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/plugin-content-blog": "2.4.1", - "@docusaurus/plugin-content-docs": "2.4.1", - "@docusaurus/plugin-content-pages": "2.4.1", - "@docusaurus/plugin-debug": "2.4.1", - "@docusaurus/plugin-google-analytics": "2.4.1", - "@docusaurus/plugin-google-gtag": "2.4.1", - "@docusaurus/plugin-google-tag-manager": "2.4.1", - "@docusaurus/plugin-sitemap": "2.4.1", - "@docusaurus/theme-classic": "2.4.1", - "@docusaurus/theme-common": "2.4.1", - "@docusaurus/theme-search-algolia": "2.4.1", - "@docusaurus/types": "2.4.1" - } - }, - "@docusaurus/react-loadable": { - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", - "requires": { - "@types/react": "*", - "prop-types": "^15.6.2" - } - }, - "@docusaurus/theme-classic": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.1.tgz", - "integrity": "sha512-Rz0wKUa+LTW1PLXmwnf8mn85EBzaGSt6qamqtmnh9Hflkc+EqiYMhtUJeLdV+wsgYq4aG0ANc+bpUDpsUhdnwg==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/mdx-loader": "2.4.1", - "@docusaurus/module-type-aliases": "2.4.1", - "@docusaurus/plugin-content-blog": "2.4.1", - "@docusaurus/plugin-content-docs": "2.4.1", - "@docusaurus/plugin-content-pages": "2.4.1", - "@docusaurus/theme-common": "2.4.1", - "@docusaurus/theme-translations": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-common": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "@mdx-js/react": "^1.6.22", - "clsx": "^1.2.1", - "copy-text-to-clipboard": "^3.0.1", - "infima": "0.2.0-alpha.43", - "lodash": "^4.17.21", - "nprogress": "^0.2.0", - "postcss": "^8.4.14", - "prism-react-renderer": "^1.3.5", - "prismjs": "^1.28.0", - "react-router-dom": "^5.3.3", - "rtlcss": "^3.5.0", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - } - }, - "@docusaurus/theme-common": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.1.tgz", - "integrity": "sha512-G7Zau1W5rQTaFFB3x3soQoZpkgMbl/SYNG8PfMFIjKa3M3q8n0m/GRf5/H/e5BqOvt8c+ZWIXGCiz+kUCSHovA==", - "requires": { - "@docusaurus/mdx-loader": "2.4.1", - "@docusaurus/module-type-aliases": "2.4.1", - "@docusaurus/plugin-content-blog": "2.4.1", - "@docusaurus/plugin-content-docs": "2.4.1", - "@docusaurus/plugin-content-pages": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-common": "2.4.1", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "clsx": "^1.2.1", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^1.3.5", - "tslib": "^2.4.0", - "use-sync-external-store": "^1.2.0", - "utility-types": "^3.10.0" - } - }, - "@docusaurus/theme-mermaid": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-mermaid/-/theme-mermaid-2.4.1.tgz", - "integrity": "sha512-cM0ImKIqZfjmlaC+uAjep39kNBvb1bjz429QBHGs32maob4+UnRzVPPpCUCltyPVb4xjG5h1Tyq4pHzhtIikqA==", - "requires": { - "@docusaurus/core": "2.4.1", - "@docusaurus/module-type-aliases": "2.4.1", - "@docusaurus/theme-common": "2.4.1", - "@docusaurus/types": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "@mdx-js/react": "^1.6.22", - "mermaid": "^9.2.2", - "tslib": "^2.4.0" - } - }, - "@docusaurus/theme-search-algolia": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.1.tgz", - "integrity": "sha512-6BcqW2lnLhZCXuMAvPRezFs1DpmEKzXFKlYjruuas+Xy3AQeFzDJKTJFIm49N77WFCTyxff8d3E4Q9pi/+5McQ==", - "requires": { - "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.4.1", - "@docusaurus/logger": "2.4.1", - "@docusaurus/plugin-content-docs": "2.4.1", - "@docusaurus/theme-common": "2.4.1", - "@docusaurus/theme-translations": "2.4.1", - "@docusaurus/utils": "2.4.1", - "@docusaurus/utils-validation": "2.4.1", - "algoliasearch": "^4.13.1", - "algoliasearch-helper": "^3.10.0", - "clsx": "^1.2.1", - "eta": "^2.0.0", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - } - }, - "@docusaurus/theme-translations": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.1.tgz", - "integrity": "sha512-T1RAGP+f86CA1kfE8ejZ3T3pUU3XcyvrGMfC/zxCtc2BsnoexuNI9Vk2CmuKCb+Tacvhxjv5unhxXce0+NKyvA==", - "requires": { - "fs-extra": "^10.1.0", - "tslib": "^2.4.0" - } - }, - "@docusaurus/types": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz", - "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*", - "commander": "^5.1.0", - "joi": "^17.6.0", - "react-helmet-async": "^1.3.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0", - "webpack-merge": "^5.8.0" - } - }, - "@docusaurus/utils": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.1.tgz", - "integrity": "sha512-1lvEZdAQhKNht9aPXPoh69eeKnV0/62ROhQeFKKxmzd0zkcuE/Oc5Gpnt00y/f5bIsmOsYMY7Pqfm/5rteT5GA==", - "requires": { - "@docusaurus/logger": "2.4.1", - "@svgr/webpack": "^6.2.1", - "escape-string-regexp": "^4.0.0", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "github-slugger": "^1.4.0", - "globby": "^11.1.0", - "gray-matter": "^4.0.3", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.5", - "resolve-pathname": "^3.0.0", - "shelljs": "^0.8.5", - "tslib": "^2.4.0", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - }, - "dependencies": { - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==" - } - } - }, - "@docusaurus/utils-common": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.1.tgz", - "integrity": "sha512-bCVGdZU+z/qVcIiEQdyx0K13OC5mYwxhSuDUR95oFbKVuXYRrTVrwZIqQljuo1fyJvFTKHiL9L9skQOPokuFNQ==", - "requires": { - "tslib": "^2.4.0" - } - }, - "@docusaurus/utils-validation": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.1.tgz", - "integrity": "sha512-unII3hlJlDwZ3w8U+pMO3Lx3RhI4YEbY3YNsQj4yzrkZzlpqZOLuAiZK2JyULnD+TKbceKU0WyWkQXtYbLNDFA==", - "requires": { - "@docusaurus/logger": "2.4.1", - "@docusaurus/utils": "2.4.1", - "joi": "^17.6.0", - "js-yaml": "^4.1.0", - "tslib": "^2.4.0" - } - }, - "@exodus/schemasafe": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@exodus/schemasafe/-/schemasafe-1.3.0.tgz", - "integrity": "sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==" - }, - "@hapi/hoek": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.2.1.tgz", - "integrity": "sha512-gfta+H8aziZsm8pZa0vj04KO6biEiisppNgA1kbJvFrrWu9Vm7eaUEy76DIxsuTaWvti5fkJVhllWc6ZTE+Mdw==" - }, - "@hapi/topo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", - "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", - "requires": { - "@hapi/hoek": "^9.0.0" - } - }, - "@hookform/error-message": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@hookform/error-message/-/error-message-2.0.1.tgz", - "integrity": "sha512-U410sAr92xgxT1idlu9WWOVjndxLdgPUHEB8Schr27C9eh7/xUnITWpCMF93s+lGiG++D4JnbSnrb5A21AdSNg==", - "requires": {} - }, - "@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "requires": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==" - }, - "ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==" - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "requires": { - "ansi-regex": "^6.0.1" - } - }, - "wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - } - } - } - }, - "@istanbuljs/load-nyc-config": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", - "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", - "dev": true, - "requires": { - "camelcase": "^5.3.1", - "find-up": "^4.1.0", - "get-package-type": "^0.1.0", - "js-yaml": "^3.13.1", - "resolve-from": "^5.0.0" - }, - "dependencies": { - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true - } - } - }, - "@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", - "dev": true - }, - "@jest/console": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.6.4.tgz", - "integrity": "sha512-wNK6gC0Ha9QeEPSkeJedQuTQqxZYnDPuDcDhVuVatRvMkL4D0VTvFVZj+Yuh6caG2aOfzkUZ36KtCmLNtR02hw==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3", - "slash": "^3.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@jest/core": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.6.4.tgz", - "integrity": "sha512-U/vq5ccNTSVgYH7mHnodHmCffGWHJnz/E1BEWlLuK5pM4FZmGfBn/nrJGLjUsSmyx3otCeqc1T31F4y08AMDLg==", - "dev": true, - "requires": { - "@jest/console": "^29.6.4", - "@jest/reporters": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "ansi-escapes": "^4.2.1", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "exit": "^0.1.2", - "graceful-fs": "^4.2.9", - "jest-changed-files": "^29.6.3", - "jest-config": "^29.6.4", - "jest-haste-map": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-regex-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-resolve-dependencies": "^29.6.4", - "jest-runner": "^29.6.4", - "jest-runtime": "^29.6.4", - "jest-snapshot": "^29.6.4", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", - "jest-watcher": "^29.6.4", - "micromatch": "^4.0.4", - "pretty-format": "^29.6.3", - "slash": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@jest/environment": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.6.4.tgz", - "integrity": "sha512-sQ0SULEjA1XUTHmkBRl7A1dyITM9yb1yb3ZNKPX3KlTd6IG7mWUe3e2yfExtC2Zz1Q+mMckOLHmL/qLiuQJrBQ==", - "dev": true, - "requires": { - "@jest/fake-timers": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "jest-mock": "^29.6.3" - } - }, - "@jest/expect": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.6.4.tgz", - "integrity": "sha512-Warhsa7d23+3X5bLbrbYvaehcgX5TLYhI03JKoedTiI8uJU4IhqYBWF7OSSgUyz4IgLpUYPkK0AehA5/fRclAA==", - "dev": true, - "requires": { - "expect": "^29.6.4", - "jest-snapshot": "^29.6.4" - } - }, - "@jest/expect-utils": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.6.4.tgz", - "integrity": "sha512-FEhkJhqtvBwgSpiTrocquJCdXPsyvNKcl/n7A3u7X4pVoF4bswm11c9d4AV+kfq2Gpv/mM8x7E7DsRvH+djkrg==", - "dev": true, - "requires": { - "jest-get-type": "^29.6.3" - } - }, - "@jest/fake-timers": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.6.4.tgz", - "integrity": "sha512-6UkCwzoBK60edXIIWb0/KWkuj7R7Qq91vVInOe3De6DSpaEiqjKcJw4F7XUet24Wupahj9J6PlR09JqJ5ySDHw==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "@sinonjs/fake-timers": "^10.0.2", - "@types/node": "*", - "jest-message-util": "^29.6.3", - "jest-mock": "^29.6.3", - "jest-util": "^29.6.3" - } - }, - "@jest/globals": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.6.4.tgz", - "integrity": "sha512-wVIn5bdtjlChhXAzVXavcY/3PEjf4VqM174BM3eGL5kMxLiZD5CLnbmkEyA1Dwh9q8XjP6E8RwjBsY/iCWrWsA==", - "dev": true, - "requires": { - "@jest/environment": "^29.6.4", - "@jest/expect": "^29.6.4", - "@jest/types": "^29.6.3", - "jest-mock": "^29.6.3" - } - }, - "@jest/reporters": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.6.4.tgz", - "integrity": "sha512-sxUjWxm7QdchdrD3NfWKrL8FBsortZeibSJv4XLjESOOjSUOkjQcb0ZHJwfhEGIvBvTluTzfG2yZWZhkrXJu8g==", - "dev": true, - "requires": { - "@bcoe/v8-coverage": "^0.2.3", - "@jest/console": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", - "@jest/types": "^29.6.3", - "@jridgewell/trace-mapping": "^0.3.18", - "@types/node": "*", - "chalk": "^4.0.0", - "collect-v8-coverage": "^1.0.0", - "exit": "^0.1.2", - "glob": "^7.1.3", - "graceful-fs": "^4.2.9", - "istanbul-lib-coverage": "^3.0.0", - "istanbul-lib-instrument": "^6.0.0", - "istanbul-lib-report": "^3.0.0", - "istanbul-lib-source-maps": "^4.0.0", - "istanbul-reports": "^3.1.3", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3", - "jest-worker": "^29.6.4", - "slash": "^3.0.0", - "string-length": "^4.0.1", - "strip-ansi": "^6.0.0", - "v8-to-istanbul": "^9.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "jest-worker": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.4.tgz", - "integrity": "sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==", - "dev": true, - "requires": { - "@types/node": "*", - "jest-util": "^29.6.3", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "dependencies": { - "supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@jest/schemas": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", - "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", - "dev": true, - "requires": { - "@sinclair/typebox": "^0.27.8" - } - }, - "@jest/source-map": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", - "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", - "dev": true, - "requires": { - "@jridgewell/trace-mapping": "^0.3.18", - "callsites": "^3.0.0", - "graceful-fs": "^4.2.9" - } - }, - "@jest/test-result": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.6.4.tgz", - "integrity": "sha512-uQ1C0AUEN90/dsyEirgMLlouROgSY+Wc/JanVVk0OiUKa5UFh7sJpMEM3aoUBAz2BRNvUJ8j3d294WFuRxSyOQ==", - "dev": true, - "requires": { - "@jest/console": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "collect-v8-coverage": "^1.0.0" - } - }, - "@jest/test-sequencer": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.6.4.tgz", - "integrity": "sha512-E84M6LbpcRq3fT4ckfKs9ryVanwkaIB0Ws9bw3/yP4seRLg/VaCZ/LgW0MCq5wwk4/iP/qnilD41aj2fsw2RMg==", - "dev": true, - "requires": { - "@jest/test-result": "^29.6.4", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", - "slash": "^3.0.0" - } - }, - "@jest/transform": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.6.4.tgz", - "integrity": "sha512-8thgRSiXUqtr/pPGY/OsyHuMjGyhVnWrFAwoxmIemlBuiMyU1WFs0tXoNxzcr4A4uErs/ABre76SGmrr5ab/AA==", - "dev": true, - "requires": { - "@babel/core": "^7.11.6", - "@jest/types": "^29.6.3", - "@jridgewell/trace-mapping": "^0.3.18", - "babel-plugin-istanbul": "^6.1.1", - "chalk": "^4.0.0", - "convert-source-map": "^2.0.0", - "fast-json-stable-stringify": "^2.1.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", - "jest-regex-util": "^29.6.3", - "jest-util": "^29.6.3", - "micromatch": "^4.0.4", - "pirates": "^4.0.4", - "slash": "^3.0.0", - "write-file-atomic": "^4.0.2" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "convert-source-map": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "write-file-atomic": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", - "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", - "dev": true, - "requires": { - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.7" - } - } - } - }, - "@jest/types": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", - "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", - "dev": true, - "requires": { - "@jest/schemas": "^29.6.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@jridgewell/gen-mapping": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", - "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", - "requires": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" - } - }, - "@jridgewell/resolve-uri": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", - "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==" - }, - "@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==" - }, - "@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" - }, - "@jridgewell/trace-mapping": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", - "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", - "requires": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" - } - }, - "@jsdevtools/ono": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/@jsdevtools/ono/-/ono-7.1.3.tgz", - "integrity": "sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==" - }, - "@leichtgewicht/ip-codec": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", - "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==" - }, - "@lezer/common": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", - "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" - }, - "@lezer/highlight": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", - "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", - "requires": { - "@lezer/common": "^1.0.0" - } - }, - "@lezer/json": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@lezer/json/-/json-1.0.2.tgz", - "integrity": "sha512-xHT2P4S5eeCYECyKNPhr4cbEL9tc8w83SPwRC373o9uEdrvGKTZoJVAGxpOsZckMlEh9W23Pc72ew918RWQOBQ==", - "requires": { - "@lezer/common": "^1.2.0", - "@lezer/highlight": "^1.0.0", - "@lezer/lr": "^1.0.0" - } - }, - "@lezer/lr": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.0.tgz", - "integrity": "sha512-Wst46p51km8gH0ZUmeNrtpRYmdlRHUpN1DQd3GFAyKANi8WVz8c2jHYTf1CVScFaCjQw1iO3ZZdqGDxQPRErTg==", - "requires": { - "@lezer/common": "^1.0.0" - } - }, - "@lezer/markdown": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.2.0.tgz", - "integrity": "sha512-d7MwsfAukZJo1GpPrcPGa3MxaFFOqNp0gbqF+3F7pTeNDOgeJN1muXzx1XXDPt+Ac+/voCzsH7qXqnn+xReG/g==", - "requires": { - "@lezer/common": "^1.0.0", - "@lezer/highlight": "^1.0.0" - } - }, - "@mdx-js/mdx": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz", - "integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==", - "requires": { - "@babel/core": "7.12.9", - "@babel/plugin-syntax-jsx": "7.12.1", - "@babel/plugin-syntax-object-rest-spread": "7.8.3", - "@mdx-js/util": "1.6.22", - "babel-plugin-apply-mdx-type-prop": "1.6.22", - "babel-plugin-extract-import-names": "1.6.22", - "camelcase-css": "2.0.1", - "detab": "2.0.4", - "hast-util-raw": "6.0.1", - "lodash.uniq": "4.5.0", - "mdast-util-to-hast": "10.0.1", - "remark-footnotes": "2.0.0", - "remark-mdx": "1.6.22", - "remark-parse": "8.0.3", - "remark-squeeze-paragraphs": "4.0.0", - "style-to-object": "0.3.0", - "unified": "9.2.0", - "unist-builder": "2.0.3", - "unist-util-visit": "2.0.3" - }, - "dependencies": { - "@babel/core": { - "version": "7.12.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", - "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", - "requires": { - "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.12.5", - "@babel/helper-module-transforms": "^7.12.1", - "@babel/helpers": "^7.12.5", - "@babel/parser": "^7.12.7", - "@babel/template": "^7.12.7", - "@babel/traverse": "^7.12.9", - "@babel/types": "^7.12.7", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.1", - "json5": "^2.1.2", - "lodash": "^4.17.19", - "resolve": "^1.3.2", - "semver": "^5.4.1", - "source-map": "^0.5.0" - } - }, - "@babel/plugin-syntax-jsx": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", - "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "unified": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", - "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", - "requires": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - } - }, - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "@mdx-js/react": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-1.6.22.tgz", - "integrity": "sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==", - "requires": {} - }, - "@mdx-js/util": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", - "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==" - }, - "@mrmlnc/readdir-enhanced": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", - "requires": { - "call-me-maybe": "^1.0.1", - "glob-to-regexp": "^0.3.0" - }, - "dependencies": { - "glob-to-regexp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha512-Iozmtbqv0noj0uDDqoL0zNq0VBEfK2YFoMAZoxJe4cwphvLR+JskfF30QhXHOR4m3KrE6NLRYw+U9MRXvifyig==" - } - } - }, - "@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "requires": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - } - }, - "@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==" - }, - "@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "requires": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - } - }, - "@paloaltonetworks/openapi-to-postmanv2": { - "version": "3.1.0-hotfix.1", - "resolved": "https://registry.npmjs.org/@paloaltonetworks/openapi-to-postmanv2/-/openapi-to-postmanv2-3.1.0-hotfix.1.tgz", - "integrity": "sha512-0bdaPCEyQbnUo4xpOu7EzxXXkDx4BAXqc8QSbVBlzlVB5KoTLJiKKB4c3fa4BXbK+3u/OqfLbeNCebc2EC8ngA==", - "requires": { - "@paloaltonetworks/postman-collection": "^4.1.0", - "ajv": "8.1.0", - "ajv-formats": "2.1.1", - "async": "3.2.1", - "commander": "2.20.3", - "js-yaml": "3.14.1", - "json-schema-merge-allof": "0.8.1", - "lodash": "4.17.21", - "oas-resolver-browser": "2.5.2", - "path-browserify": "1.0.1", - "yaml": "1.10.2" - }, - "dependencies": { - "ajv": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.1.0.tgz", - "integrity": "sha512-B/Sk2Ix7A36fs/ZkuGLIR86EdjbgR6fsAcbx9lOP/QBSXujDNbVmIS/U4Itz5k8fPFDeVZl/zQ/gJW4Jrq6XjQ==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "async": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.1.tgz", - "integrity": "sha512-XdD5lRO/87udXCMC9meWdYiR+Nq6ZjUfXidViUZGu2F1MO4T3XwZ1et0hb2++BgLfhyJwy44BGB/yx80ABx8hg==" - }, - "commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - } - } - }, - "@paloaltonetworks/postman-code-generators": { - "version": "1.1.15-patch.2", - "resolved": "https://registry.npmjs.org/@paloaltonetworks/postman-code-generators/-/postman-code-generators-1.1.15-patch.2.tgz", - "integrity": "sha512-tRnAKtV4M8wLxcVnAx6ZCjCqbrR1xiqJNQkf1A71K8UxEP3N/+EspT82N5c0555w02oYFk21ViHuzuhm4gaGLw==", - "requires": { - "@paloaltonetworks/postman-collection": "^4.1.0", - "async": "^3.2.4", - "path": "^0.12.7", - "shelljs": "^0.8.5" - }, - "dependencies": { - "async": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", - "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" - } - } - }, - "@paloaltonetworks/postman-collection": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@paloaltonetworks/postman-collection/-/postman-collection-4.1.1.tgz", - "integrity": "sha512-9JHHkkD8Xb4rvdKob7TDPRfqfmdG3KU0aO5gJyyjvMFbOVysam5I0d8/9HPOuJXWkUHGo3Sn+ov2Fcm2bnJ52Q==", - "requires": { - "file-type": "3.9.0", - "http-reasons": "0.1.0", - "iconv-lite": "0.6.3", - "liquid-json": "0.3.1", - "lodash": "4.17.21", - "mime-format": "2.0.1", - "mime-types": "2.1.34", - "postman-url-encoder": "3.0.5", - "semver": "7.3.5", - "uuid": "8.3.2" - }, - "dependencies": { - "file-type": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", - "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==" - }, - "iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - } - }, - "semver": { - "version": "7.3.5", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", - "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==", - "requires": { - "lru-cache": "^6.0.0" - } - } - } - }, - "@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "optional": true - }, - "@playwright/test": { - "version": "1.32.2", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.32.2.tgz", - "integrity": "sha512-nhaTSDpEdTTttdkDE8Z6K3icuG1DVRxrl98Qq0Lfc63SS9a2sjc9+x8ezysh7MzCKz6Y+nArml3/mmt+gqRmQQ==", - "dev": true, - "requires": { - "@types/node": "*", - "fsevents": "2.3.2", - "playwright-core": "1.32.2" - } - }, - "@polka/url": { - "version": "1.0.0-next.21", - "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz", - "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==" - }, - "@redocly/ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@redocly/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-9GWx27t7xWhDIR02PA18nzBdLcKQRgc46xNQvjFkrYk4UOmvKhJ/dawwiX0cCOeetN5LcaaiqQbVOWYK62SGHw==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "dependencies": { - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - } - } - }, - "@redocly/openapi-core": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.7.0.tgz", - "integrity": "sha512-mDl9tq96WjMElX4RX+oyqfTiquBNXzFRWres/JN6AlWhBbhFOz2nXnCCIILcjZkRchKFDKShU+pqHpvPJ7xVDQ==", - "requires": { - "@redocly/ajv": "^8.11.0", - "colorette": "^1.2.0", - "js-levenshtein": "^1.1.6", - "js-yaml": "^4.1.0", - "lodash.isequal": "^4.5.0", - "minimatch": "^5.0.1", - "node-fetch": "^2.6.1", - "pluralize": "^8.0.0", - "yaml-ast-parser": "0.0.43" - }, - "dependencies": { - "brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "requires": { - "balanced-match": "^1.0.0" - } - }, - "colorette": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.4.0.tgz", - "integrity": "sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g==" - }, - "minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "requires": { - "brace-expansion": "^2.0.1" - } - } - } - }, - "@reduxjs/toolkit": { - "version": "1.9.7", - "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-1.9.7.tgz", - "integrity": "sha512-t7v8ZPxhhKgOKtU+uyJT13lu4vL7az5aFi4IdoDs/eS548edn2M8Ik9h8fxgvMjGoAUVFSt6ZC1P5cWmQ014QQ==", - "requires": { - "immer": "^9.0.21", - "redux": "^4.2.1", - "redux-thunk": "^2.4.2", - "reselect": "^4.1.8" - } - }, - "@saucelabs/theme-github-codeblock": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@saucelabs/theme-github-codeblock/-/theme-github-codeblock-0.2.3.tgz", - "integrity": "sha512-GSl3Lr/jOWm4OP3BPX2vXxc8FMSOXj1mJnls6cUqMwlGOfKQ1Ia9pq1O9/ES+5TrZHIzAws/n5FFSn1OkGJw/Q==" - }, - "@sideway/address": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", - "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", - "requires": { - "@hapi/hoek": "^9.0.0" - } - }, - "@sideway/formula": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.0.tgz", - "integrity": "sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg==" - }, - "@sideway/pinpoint": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", - "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" - }, - "@sinclair/typebox": { - "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "dev": true - }, - "@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==" - }, - "@sinonjs/commons": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", - "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", - "dev": true, - "requires": { - "type-detect": "4.0.8" - } - }, - "@sinonjs/fake-timers": { - "version": "10.3.0", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", - "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", - "dev": true, - "requires": { - "@sinonjs/commons": "^3.0.0" - } - }, - "@slorber/static-site-generator-webpack-plugin": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", - "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==", - "requires": { - "eval": "^0.1.8", - "p-map": "^4.0.0", - "webpack-sources": "^3.2.2" - } - }, - "@stencil/core": { - "version": "2.22.3", - "resolved": "https://registry.npmjs.org/@stencil/core/-/core-2.22.3.tgz", - "integrity": "sha512-kmVA0M/HojwsfkeHsifvHVIYe4l5tin7J5+DLgtl8h6WWfiMClND5K3ifCXXI2ETDNKiEk21p6jql3Fx9o2rng==" - }, - "@svgr/babel-plugin-add-jsx-attribute": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", - "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", - "requires": {} - }, - "@svgr/babel-plugin-remove-jsx-attribute": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-6.5.0.tgz", - "integrity": "sha512-8zYdkym7qNyfXpWvu4yq46k41pyNM9SOstoWhKlm+IfdCE1DdnRKeMUPsWIEO/DEkaWxJ8T9esNdG3QwQ93jBA==", - "requires": {} - }, - "@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-6.5.0.tgz", - "integrity": "sha512-NFdxMq3xA42Kb1UbzCVxplUc0iqSyM9X8kopImvFnB+uSDdzIHOdbs1op8ofAvVRtbg4oZiyRl3fTYeKcOe9Iw==", - "requires": {} - }, - "@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", - "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", - "requires": {} - }, - "@svgr/babel-plugin-svg-dynamic-title": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", - "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", - "requires": {} - }, - "@svgr/babel-plugin-svg-em-dimensions": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", - "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", - "requires": {} - }, - "@svgr/babel-plugin-transform-react-native-svg": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", - "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", - "requires": {} - }, - "@svgr/babel-plugin-transform-svg-component": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", - "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", - "requires": {} - }, - "@svgr/babel-preset": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", - "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", - "requires": { - "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", - "@svgr/babel-plugin-remove-jsx-attribute": "*", - "@svgr/babel-plugin-remove-jsx-empty-expression": "*", - "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", - "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", - "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", - "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", - "@svgr/babel-plugin-transform-svg-component": "^6.5.1" - } - }, - "@svgr/core": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", - "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", - "requires": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", - "camelcase": "^6.2.0", - "cosmiconfig": "^7.0.1" - }, - "dependencies": { - "@babel/core": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.20.2.tgz", - "integrity": "sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==", - "requires": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.2", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-module-transforms": "^7.20.2", - "@babel/helpers": "^7.20.1", - "@babel/parser": "^7.20.2", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - } - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@svgr/hast-util-to-babel-ast": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", - "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", - "requires": { - "@babel/types": "^7.20.0", - "entities": "^4.4.0" - }, - "dependencies": { - "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" - } - } - }, - "@svgr/plugin-jsx": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", - "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", - "requires": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/hast-util-to-babel-ast": "^6.5.1", - "svg-parser": "^2.0.4" - }, - "dependencies": { - "@babel/core": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.20.2.tgz", - "integrity": "sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==", - "requires": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.2", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-module-transforms": "^7.20.2", - "@babel/helpers": "^7.20.1", - "@babel/parser": "^7.20.2", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - } - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@svgr/plugin-svgo": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", - "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", - "requires": { - "cosmiconfig": "^7.0.1", - "deepmerge": "^4.2.2", - "svgo": "^2.8.0" - } - }, - "@svgr/webpack": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", - "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", - "requires": { - "@babel/core": "^7.19.6", - "@babel/plugin-transform-react-constant-elements": "^7.18.12", - "@babel/preset-env": "^7.19.4", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@svgr/core": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", - "@svgr/plugin-svgo": "^6.5.1" - }, - "dependencies": { - "@babel/core": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.20.2.tgz", - "integrity": "sha512-w7DbG8DtMrJcFOi4VrLm+8QM4az8Mo+PuLBKLp2zrYRCow8W/f9xiXm5sN53C8HksCyDQwCKha9JiDoIyPjT2g==", - "requires": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.20.2", - "@babel/helper-compilation-targets": "^7.20.0", - "@babel/helper-module-transforms": "^7.20.2", - "@babel/helpers": "^7.20.1", - "@babel/parser": "^7.20.2", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.1", - "@babel/types": "^7.20.2", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" - } - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "@swc/core": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.49.tgz", - "integrity": "sha512-br44ZHOfE9YyRGcORSLkHFQHTvhwRcaithBJ1Q5y5iMGpLbH0Wai3GN49L60RvmGwxNJfWzT+E7+rNNR7ewKgA==", - "dev": true, - "requires": { - "@swc/core-darwin-arm64": "1.3.49", - "@swc/core-darwin-x64": "1.3.49", - "@swc/core-linux-arm-gnueabihf": "1.3.49", - "@swc/core-linux-arm64-gnu": "1.3.49", - "@swc/core-linux-arm64-musl": "1.3.49", - "@swc/core-linux-x64-gnu": "1.3.49", - "@swc/core-linux-x64-musl": "1.3.49", - "@swc/core-win32-arm64-msvc": "1.3.49", - "@swc/core-win32-ia32-msvc": "1.3.49", - "@swc/core-win32-x64-msvc": "1.3.49" - } - }, - "@swc/core-darwin-arm64": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.49.tgz", - "integrity": "sha512-g7aIfXh6uPHmhLXdjXQq5t3HAyS/EdvujasW1DIS5k8UqOBaSoCcSGtLIjzcLv3KujqNfYcm118E+12H0nY6fQ==", - "dev": true, - "optional": true - }, - "@swc/core-darwin-x64": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.3.49.tgz", - "integrity": "sha512-eSIxVX0YDw40Bre5sAx2BV3DzdIGzmQvCf2yiBvLqiiL6GC0mmuDeWbUCAzdUX6fJ6FUVEBMUVqNOc9oJ2/d5w==", - "dev": true, - "optional": true - }, - "@swc/core-linux-arm-gnueabihf": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.49.tgz", - "integrity": "sha512-8mj3IcRVr/OJY0mVITz6Z5osNAMJK5GiKDaZ+3QejPLbl6aiu4sH4GmTHDRN14RnaVXOpecsGcUoQmNoNa3u3w==", - "dev": true, - "optional": true - }, - "@swc/core-linux-arm64-gnu": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.49.tgz", - "integrity": "sha512-Rmg9xw6tmpOpf6GKKjpHQGmjfHzqSths5ebI2ahrHlhekzZF2HYmPkVw4bHda8Bja6mbaw8FVBgBHjPU8mMeDA==", - "dev": true, - "optional": true - }, - "@swc/core-linux-arm64-musl": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.49.tgz", - "integrity": "sha512-nlKPYMogAI3Aak6Mlkag8/2AlHAZ/DpH7RjhfMazsaGhD/sQOmYdyY9Al69ejpa419YJuREeeeLoojFlSsd30g==", - "dev": true, - "optional": true - }, - "@swc/core-linux-x64-gnu": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.49.tgz", - "integrity": "sha512-QOyeJQ6NVi73SJcizbwvIZTiGA/N+BxX9liRrvibumaQmRh8fWjJiLNsv3ODSHeuonak7E8Bf7a7NnSTyu48Mw==", - "dev": true, - "optional": true - }, - "@swc/core-linux-x64-musl": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.49.tgz", - "integrity": "sha512-WlDMz+SOpYC9O/ZBUw1oiyWI7HyUCMlf/HS8Fy/kRI3eGoGCUxVTCJ1mP57GdQr4Wg32Y/ZpO2KSNQFWnT8mAw==", - "dev": true, - "optional": true - }, - "@swc/core-win32-arm64-msvc": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.49.tgz", - "integrity": "sha512-41LZOeI94Za3twib8KOIjnHYAZ+nkBFmboaREsFR1760S7jiMVywqWX8nFZvn/CXj15Fjjgdgyuig+zMREwXwQ==", - "dev": true, - "optional": true - }, - "@swc/core-win32-ia32-msvc": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.49.tgz", - "integrity": "sha512-IdqLPoMKssyAoOCZdNXmnAd6/uyx+Hb9KSfZUHepZaNfwMy6J5XXrOsbYs3v53FH8MtekUUdV+mMX4me9bcv9w==", - "dev": true, - "optional": true - }, - "@swc/core-win32-x64-msvc": { - "version": "1.3.49", - "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.49.tgz", - "integrity": "sha512-7Fqjo5pS3uIohhSbYSaR0+e/bJdxmQb4oG97FIh5qvlCCGQaQ9UiaEeYy4uK0Ad+Menum1IXCAEiG7RHcl6Eyw==", - "dev": true, - "optional": true - }, - "@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "requires": { - "defer-to-connect": "^1.0.1" - } - }, - "@trysound/sax": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==" - }, - "@types/babel__core": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.1.tgz", - "integrity": "sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==", - "dev": true, - "requires": { - "@babel/parser": "^7.20.7", - "@babel/types": "^7.20.7", - "@types/babel__generator": "*", - "@types/babel__template": "*", - "@types/babel__traverse": "*" - } - }, - "@types/babel__generator": { - "version": "7.6.4", - "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz", - "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==", - "dev": true, - "requires": { - "@babel/types": "^7.0.0" - } - }, - "@types/babel__template": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz", - "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==", - "dev": true, - "requires": { - "@babel/parser": "^7.1.0", - "@babel/types": "^7.0.0" - } - }, - "@types/babel__traverse": { - "version": "7.20.1", - "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.1.tgz", - "integrity": "sha512-MitHFXnhtgwsGZWtT68URpOvLN4EREih1u3QtQiN4VdAxWKRVvGCSvw/Qth0M0Qq3pJpnGOu5JaM/ydK7OGbqg==", - "dev": true, - "requires": { - "@babel/types": "^7.20.7" - } - }, - "@types/body-parser": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", - "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", - "requires": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "@types/bonjour": { - "version": "3.5.10", - "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", - "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", - "requires": { - "@types/node": "*" - } - }, - "@types/cheerio": { - "version": "0.22.31", - "resolved": "https://registry.npmjs.org/@types/cheerio/-/cheerio-0.22.31.tgz", - "integrity": "sha512-Kt7Cdjjdi2XWSfrZ53v4Of0wG3ZcmaegFXjMmz9tfNrZSkzzo36G0AL1YqSdcIA78Etjt6E609pt5h1xnQkPUw==", - "requires": { - "@types/node": "*" - } - }, - "@types/connect": { - "version": "3.4.35", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", - "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", - "requires": { - "@types/node": "*" - } - }, - "@types/connect-history-api-fallback": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz", - "integrity": "sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==", - "requires": { - "@types/express-serve-static-core": "*", - "@types/node": "*" - } - }, - "@types/debug": { - "version": "4.1.12", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", - "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", - "requires": { - "@types/ms": "*" - } - }, - "@types/eslint": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.1.tgz", - "integrity": "sha512-GE44+DNEyxxh2Kc6ro/VkIj+9ma0pO0bwv9+uHSyBrikYOHr8zYcdPvnBOp1aw8s+CjRvuSx7CyWqRrNFQ59mA==", - "requires": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "@types/eslint-scope": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.3.tgz", - "integrity": "sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g==", - "requires": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, - "@types/estree": { - "version": "0.0.51", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.51.tgz", - "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==" - }, - "@types/express": { - "version": "4.17.13", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz", - "integrity": "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==", - "requires": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.18", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "@types/express-serve-static-core": { - "version": "4.17.30", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.30.tgz", - "integrity": "sha512-gstzbTWro2/nFed1WXtf+TtrpwxH7Ggs4RLYTLbeVgIkUQOI3WG/JKjgeOU1zXDvezllupjrf8OPIdvTbIaVOQ==", - "requires": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - } - }, - "@types/graceful-fs": { - "version": "4.1.6", - "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz", - "integrity": "sha512-Sig0SNORX9fdW+bQuTEovKj3uHcUL6LQKbCrrqb1X7J6/ReAbhCXRAhc+SMejhLELFj2QcyuxmUooZ4bt5ReSw==", - "dev": true, - "requires": { - "@types/node": "*" - } - }, - "@types/hast": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.4.tgz", - "integrity": "sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==", - "requires": { - "@types/unist": "*" - } - }, - "@types/history": { - "version": "4.7.11", - "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", - "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" - }, - "@types/hoist-non-react-statics": { - "version": "3.3.5", - "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.5.tgz", - "integrity": "sha512-SbcrWzkKBw2cdwRTwQAswfpB9g9LJWfjtUeW/jvNwbhC8cpmmNYVePa+ncbUe0rGTQ7G3Ff6mYUN2VMfLVr+Sg==", - "requires": { - "@types/react": "*", - "hoist-non-react-statics": "^3.3.0" - } - }, - "@types/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" - }, - "@types/http-proxy": { - "version": "1.17.9", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.9.tgz", - "integrity": "sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw==", - "requires": { - "@types/node": "*" - } - }, - "@types/istanbul-lib-coverage": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", - "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", - "dev": true - }, - "@types/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", - "dev": true, - "requires": { - "@types/istanbul-lib-coverage": "*" - } - }, - "@types/istanbul-reports": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", - "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", - "dev": true, - "requires": { - "@types/istanbul-lib-report": "*" - } - }, - "@types/jest": { - "version": "29.5.4", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.4.tgz", - "integrity": "sha512-PhglGmhWeD46FYOVLt3X7TiWjzwuVGW9wG/4qocPevXMjCmrIc5b6db9WjeGE4QYVpUAWMDv3v0IiBwObY289A==", - "dev": true, - "requires": { - "expect": "^29.0.0", - "pretty-format": "^29.0.0" - } - }, - "@types/json-schema": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" - }, - "@types/lodash": { - "version": "4.14.202", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.202.tgz", - "integrity": "sha512-OvlIYQK9tNneDlS0VN54LLd5uiPCBOp7gS5Z0f1mjoJYBrtStzgmJBxONW3U6OZqdtNzZPmn9BS/7WI7BFFcFQ==" - }, - "@types/lodash.clonedeep": { - "version": "4.5.9", - "resolved": "https://registry.npmjs.org/@types/lodash.clonedeep/-/lodash.clonedeep-4.5.9.tgz", - "integrity": "sha512-19429mWC+FyaAhOLzsS8kZUsI+/GmBAQ0HFiCPsKGU+7pBXOQWhyrY6xNNDwUSX8SMZMJvuFVMF9O5dQOlQK9Q==", - "requires": { - "@types/lodash": "*" - } - }, - "@types/mdast": { - "version": "3.0.10", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz", - "integrity": "sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==", - "requires": { - "@types/unist": "*" - } - }, - "@types/mime": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", - "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" - }, - "@types/ms": { - "version": "0.7.34", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", - "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" - }, - "@types/node": { - "version": "17.0.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.13.tgz", - "integrity": "sha512-Y86MAxASe25hNzlDbsviXl8jQHb0RDvKt4c40ZJQ1Don0AAL0STLZSs4N+6gLEO55pedy7r2cLwS+ZDxPm/2Bw==" - }, - "@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" - }, - "@types/parse5": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz", - "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==" - }, - "@types/prop-types": { - "version": "15.7.4", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.4.tgz", - "integrity": "sha512-rZ5drC/jWjrArrS8BR6SIr4cWpW09RNTYt9AMZo3Jwwif+iacXAqgVjm0B0Bv/S1jhDXKHqRVNCbACkJ89RAnQ==" - }, - "@types/q": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.5.tgz", - "integrity": "sha512-L28j2FcJfSZOnL1WBjDYp2vUHCeIFlyYI/53EwD/rKUBQ7MtUUfbQWiyKJGpcnv4/WgrhWsFKrcPstcAt/J0tQ==" - }, - "@types/qs": { - "version": "6.9.7", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", - "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" - }, - "@types/range-parser": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", - "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" - }, - "@types/react": { - "version": "17.0.38", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.38.tgz", - "integrity": "sha512-SI92X1IA+FMnP3qM5m4QReluXzhcmovhZnLNm3pyeQlooi02qI7sLiepEYqT678uNiyc25XfCqxREFpy3W7YhQ==", - "requires": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, - "@types/react-redux": { - "version": "7.1.33", - "resolved": "https://registry.npmjs.org/@types/react-redux/-/react-redux-7.1.33.tgz", - "integrity": "sha512-NF8m5AjWCkert+fosDsN3hAlHzpjSiXlVy9EgQEmLoBhaNXbmyeGs/aj5dQzKuF+/q+S7JQagorGDW8pJ28Hmg==", - "requires": { - "@types/hoist-non-react-statics": "^3.3.0", - "@types/react": "*", - "hoist-non-react-statics": "^3.3.0", - "redux": "^4.0.0" - } - }, - "@types/react-router": { - "version": "5.1.20", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", - "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*" - } - }, - "@types/react-router-config": { - "version": "5.0.7", - "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.7.tgz", - "integrity": "sha512-pFFVXUIydHlcJP6wJm7sDii5mD/bCmmAY0wQzq+M+uX7bqS95AQqHZWP1iNMKrWVQSuHIzj5qi9BvrtLX2/T4w==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "^5.1.0" - } - }, - "@types/react-router-dom": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "*" - } - }, - "@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" - }, - "@types/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw==", - "requires": { - "@types/node": "*" - } - }, - "@types/scheduler": { - "version": "0.16.2", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz", - "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==" - }, - "@types/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", - "requires": { - "@types/express": "*" - } - }, - "@types/serve-static": { - "version": "1.13.10", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz", - "integrity": "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==", - "requires": { - "@types/mime": "^1", - "@types/node": "*" - } - }, - "@types/sockjs": { - "version": "0.3.33", - "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", - "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", - "requires": { - "@types/node": "*" - } - }, - "@types/stack-utils": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", - "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", - "dev": true - }, - "@types/unist": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz", - "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" - }, - "@types/ws": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz", - "integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==", - "requires": { - "@types/node": "*" - } - }, - "@types/yargs": { - "version": "17.0.24", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", - "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", - "dev": true, - "requires": { - "@types/yargs-parser": "*" - } - }, - "@types/yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==", - "dev": true - }, - "@webassemblyjs/ast": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", - "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", - "requires": { - "@webassemblyjs/helper-numbers": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1" - } - }, - "@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", - "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==" - }, - "@webassemblyjs/helper-api-error": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", - "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==" - }, - "@webassemblyjs/helper-buffer": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", - "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==" - }, - "@webassemblyjs/helper-numbers": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", - "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", - "requires": { - "@webassemblyjs/floating-point-hex-parser": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@xtuc/long": "4.2.2" - } - }, - "@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", - "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==" - }, - "@webassemblyjs/helper-wasm-section": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", - "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", - "requires": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1" - } - }, - "@webassemblyjs/ieee754": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", - "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", - "requires": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "@webassemblyjs/leb128": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", - "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", - "requires": { - "@xtuc/long": "4.2.2" - } - }, - "@webassemblyjs/utf8": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", - "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==" - }, - "@webassemblyjs/wasm-edit": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", - "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", - "requires": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/helper-wasm-section": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-opt": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "@webassemblyjs/wast-printer": "1.11.1" - } - }, - "@webassemblyjs/wasm-gen": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", - "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", - "requires": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" - } - }, - "@webassemblyjs/wasm-opt": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", - "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", - "requires": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1" - } - }, - "@webassemblyjs/wasm-parser": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", - "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", - "requires": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" - } - }, - "@webassemblyjs/wast-printer": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", - "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", - "requires": { - "@webassemblyjs/ast": "1.11.1", - "@xtuc/long": "4.2.2" - } - }, - "@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" - }, - "@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" - }, - "abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "requires": { - "event-target-shim": "^5.0.0" - } - }, - "accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "requires": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - } - }, - "acorn": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.0.tgz", - "integrity": "sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==" - }, - "acorn-import-assertions": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", - "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", - "requires": {} - }, - "acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==" - }, - "address": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/address/-/address-1.1.2.tgz", - "integrity": "sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==" - }, - "aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "requires": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - } - }, - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ajv-formats": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", - "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", - "requires": { - "ajv": "^8.0.0" - }, - "dependencies": { - "ajv": { - "version": "8.9.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.9.0.tgz", - "integrity": "sha512-qOKJyNj/h+OWx7s5DePL6Zu1KeM9jPZhwBqs+7DzP6bGOvqzVCSf0xueYmVuaC/oQ/VtS2zLMLHdQFbkka+XDQ==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - } - } - }, - "ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "requires": {} - }, - "algoliasearch": { - "version": "4.19.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.19.1.tgz", - "integrity": "sha512-IJF5b93b2MgAzcE/tuzW0yOPnuUyRgGAtaPv5UUywXM8kzqfdwZTO4sPJBzoGz1eOy6H9uEchsJsBFTELZSu+g==", - "requires": { - "@algolia/cache-browser-local-storage": "4.19.1", - "@algolia/cache-common": "4.19.1", - "@algolia/cache-in-memory": "4.19.1", - "@algolia/client-account": "4.19.1", - "@algolia/client-analytics": "4.19.1", - "@algolia/client-common": "4.19.1", - "@algolia/client-personalization": "4.19.1", - "@algolia/client-search": "4.19.1", - "@algolia/logger-common": "4.19.1", - "@algolia/logger-console": "4.19.1", - "@algolia/requester-browser-xhr": "4.19.1", - "@algolia/requester-common": "4.19.1", - "@algolia/requester-node-http": "4.19.1", - "@algolia/transporter": "4.19.1" - } - }, - "algoliasearch-helper": { - "version": "3.14.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.14.0.tgz", - "integrity": "sha512-gXDXzsSS0YANn5dHr71CUXOo84cN4azhHKUbg71vAWnH+1JBiR4jf7to3t3JHXknXkbV0F7f055vUSBKrltHLQ==", - "requires": { - "@algolia/events": "^4.0.1" - } - }, - "alphanum-sort": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", - "integrity": "sha512-0FcBfdcmaumGPQ0qPn7Q5qTgz/ooXgIyp1rf8ik5bGX8mpE2YHjC0P/eyQvxu1GURYQgq9ozf2mteQ5ZD9YiyQ==" - }, - "ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "requires": { - "string-width": "^4.1.0" - } - }, - "ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dev": true, - "requires": { - "type-fest": "^0.21.3" - }, - "dependencies": { - "type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "dev": true - } - } - }, - "ansi-html-community": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", - "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==" - }, - "ansi-red": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/ansi-red/-/ansi-red-0.1.1.tgz", - "integrity": "sha512-ewaIr5y+9CUTGFwZfpECUbFlGcC0GCw1oqR9RI6h1gQCd9Aj2GxSckCnPsVJnmfMZbwFYE+leZGASgkWl06Jow==", - "requires": { - "ansi-wrap": "0.1.0" - } - }, - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "ansi-wrap": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/ansi-wrap/-/ansi-wrap-0.1.0.tgz", - "integrity": "sha512-ZyznvL8k/FZeQHr2T6LzcJ/+vBApDnMNZvfVFy3At0knswWd6rJ3/0Hhmpu8oqa6C92npmozs890sX9Dl6q+Qw==" - }, - "any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==" - }, - "anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - } - }, - "arch": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", - "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==" - }, - "archive-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/archive-type/-/archive-type-4.0.0.tgz", - "integrity": "sha512-zV4Ky0v1F8dBrdYElwTvQhweQ0P7Kwc1aluqJsYtOBP01jXcWCyW2IEfI1YiqsG+Iy7ZR+o5LF1N+PGECBxHWA==", - "requires": { - "file-type": "^4.2.0" - }, - "dependencies": { - "file-type": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-4.4.0.tgz", - "integrity": "sha512-f2UbFQEk7LXgWpi5ntcO86OeA/cC80fuDDDaX/fZ2ZGel+AF7leRQqBBW1eJNiiQkrZlAoM6P+VYP5P6bOlDEQ==" - } - } - }, - "arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" - }, - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "arr-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha512-YVIQ82gZPGBebQV/a8dar4AitzCQs0jjXwMPZllpXMaGjXPYVUawSxQrRsjhjupyVxEvbHgUmIhKVlND+j02kA==" - }, - "arr-flatten": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==" - }, - "arr-union": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha512-sKpyeERZ02v1FeCZT8lrfJq5u6goHCtpTAzPwJYe7c8SPFOboNjNg1vz2L4VTn9T4PQxEx13TbXLmYUcS6Ug7Q==" - }, - "array-buffer-byte-length": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz", - "integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==", - "requires": { - "call-bind": "^1.0.2", - "is-array-buffer": "^3.0.1" - } - }, - "array-find-index": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", - "integrity": "sha512-M1HQyIXcBGtVywBt8WVdim+lrNaK7VHp99Qt5pSNziXznKHViIBbXWtfRTpEFpF/c4FdfxNAsCCwPp5phBYJtw==" - }, - "array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" - }, - "array-move": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-move/-/array-move-3.0.1.tgz", - "integrity": "sha512-H3Of6NIn2nNU1gsVDqDnYKY/LCdWvCMMOWifNGhKcVQgiZ6nOek39aESOvro6zmueP07exSl93YLvkN4fZOkSg==" - }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==" - }, - "array-uniq": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", - "integrity": "sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q==" - }, - "array-unique": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha512-SleRWjh9JUud2wH1hPs9rZBZ33H6T9HOiL0uwGnGx9FpE6wKGyfWugmbkEOIs6qWrZhg0LWeLziLrEwQJhs5mQ==" - }, - "array.prototype.filter": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array.prototype.filter/-/array.prototype.filter-1.0.2.tgz", - "integrity": "sha512-us+UrmGOilqttSOgoWZTpOvHu68vZT2YCjc/H4vhu56vzZpaDFBhB+Se2UwqWzMKbDv7Myq5M5pcZLAtUvTQdQ==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-array-method-boxes-properly": "^1.0.0", - "is-string": "^1.0.7" - } - }, - "array.prototype.find": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/array.prototype.find/-/array.prototype.find-2.2.1.tgz", - "integrity": "sha512-I2ri5Z9uMpMvnsNrHre9l3PaX+z9D0/z6F7Yt2u15q7wt0I62g5kX6xUKR1SJiefgG+u2/gJUmM8B47XRvQR6w==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0" - } - }, - "array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0" - } - }, - "array.prototype.reduce": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/array.prototype.reduce/-/array.prototype.reduce-1.0.5.tgz", - "integrity": "sha512-kDdugMl7id9COE8R7MHF5jWk7Dqt/fs4Pv+JXoICnYwqpjjjbUurz6w5fT5IG6brLdJhv6/VoHB0H7oyIBXd+Q==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-array-method-boxes-properly": "^1.0.0", - "is-string": "^1.0.7" - } - }, - "arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==" - }, - "asap": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==" - }, - "asn1": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "asn1.js": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", - "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", - "requires": { - "bn.js": "^4.0.0", - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0", - "safer-buffer": "^2.1.0" - }, - "dependencies": { - "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - } - } - }, - "assert": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/assert/-/assert-2.1.0.tgz", - "integrity": "sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==", - "requires": { - "call-bind": "^1.0.2", - "is-nan": "^1.3.2", - "object-is": "^1.1.5", - "object.assign": "^4.1.4", - "util": "^0.12.5" - } - }, - "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==" - }, - "assign-symbols": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==" - }, - "async": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", - "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", - "requires": { - "lodash": "^4.17.14" - } - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==" - }, - "atoa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/atoa/-/atoa-1.0.0.tgz", - "integrity": "sha512-VVE1H6cc4ai+ZXo/CRWoJiHXrA1qfA31DPnx6D20+kSI547hQN5Greh51LQ1baMRMfxO5K5M4ImMtZbZt2DODQ==" - }, - "atob": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==" - }, - "autolinker": { - "version": "3.16.2", - "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-3.16.2.tgz", - "integrity": "sha512-JiYl7j2Z19F9NdTmirENSUUIIL/9MytEWtmzhfmsKPCp9E+G35Y0UNCMoM9tFigxT59qSc8Ml2dlZXOCVTYwuA==", - "requires": { - "tslib": "^2.3.0" - } - }, - "autoprefixer": { - "version": "10.4.15", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.15.tgz", - "integrity": "sha512-KCuPB8ZCIqFdA4HwKXsvz7j6gvSDNhDP7WnUjBleRkKjPdvCmHFuQ77ocavI8FT6NdvlBnE2UFr2H4Mycn8Vew==", - "requires": { - "browserslist": "^4.21.10", - "caniuse-lite": "^1.0.30001520", - "fraction.js": "^4.2.0", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", - "postcss-value-parser": "^4.2.0" - } - }, - "available-typed-arrays": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", - "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==" - }, - "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==" - }, - "aws4": { - "version": "1.12.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz", - "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==" - }, - "axios": { - "version": "0.25.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz", - "integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==", - "requires": { - "follow-redirects": "^1.14.7" - } - }, - "babel-jest": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.6.4.tgz", - "integrity": "sha512-meLj23UlSLddj6PC+YTOFRgDAtjnZom8w/ACsrx0gtPtv5cJZk0A5Unk5bV4wixD7XaPCN1fQvpww8czkZURmw==", - "dev": true, - "requires": { - "@jest/transform": "^29.6.4", - "@types/babel__core": "^7.1.14", - "babel-plugin-istanbul": "^6.1.1", - "babel-preset-jest": "^29.6.3", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "slash": "^3.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "babel-loader": { - "version": "8.2.5", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.5.tgz", - "integrity": "sha512-OSiFfH89LrEMiWd4pLNqGz4CwJDtbs2ZVc+iGu2HrkRfPxId9F2anQj38IxWpmRfsUY0aBZYi1EFcd3mhtRMLQ==", - "requires": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^2.0.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "dependencies": { - "schema-utils": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", - "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", - "requires": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - } - } - } - }, - "babel-plugin-apply-mdx-type-prop": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz", - "integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==", - "requires": { - "@babel/helper-plugin-utils": "7.10.4", - "@mdx-js/util": "1.6.22" - }, - "dependencies": { - "@babel/helper-plugin-utils": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", - "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" - } - } - }, - "babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "requires": { - "object.assign": "^4.1.0" - } - }, - "babel-plugin-extract-import-names": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz", - "integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==", - "requires": { - "@babel/helper-plugin-utils": "7.10.4" - }, - "dependencies": { - "@babel/helper-plugin-utils": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", - "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" - } - } - }, - "babel-plugin-istanbul": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", - "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.0.0", - "@istanbuljs/load-nyc-config": "^1.0.0", - "@istanbuljs/schema": "^0.1.2", - "istanbul-lib-instrument": "^5.0.4", - "test-exclude": "^6.0.0" - }, - "dependencies": { - "istanbul-lib-instrument": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", - "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", - "dev": true, - "requires": { - "@babel/core": "^7.12.3", - "@babel/parser": "^7.14.7", - "@istanbuljs/schema": "^0.1.2", - "istanbul-lib-coverage": "^3.2.0", - "semver": "^6.3.0" - } - }, - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true - } - } - }, - "babel-plugin-jest-hoist": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", - "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", - "dev": true, - "requires": { - "@babel/template": "^7.3.3", - "@babel/types": "^7.3.3", - "@types/babel__core": "^7.1.14", - "@types/babel__traverse": "^7.0.6" - } - }, - "babel-plugin-polyfill-corejs2": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz", - "integrity": "sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q==", - "requires": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.3.3", - "semver": "^6.1.1" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "babel-plugin-polyfill-corejs3": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz", - "integrity": "sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ==", - "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.1", - "core-js-compat": "^3.21.0" - } - }, - "babel-plugin-polyfill-regenerator": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz", - "integrity": "sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A==", - "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.1" - } - }, - "babel-preset-current-node-syntax": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", - "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", - "dev": true, - "requires": { - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-bigint": "^7.8.3", - "@babel/plugin-syntax-class-properties": "^7.8.3", - "@babel/plugin-syntax-import-meta": "^7.8.3", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.8.3", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.8.3" - } - }, - "babel-preset-jest": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", - "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", - "dev": true, - "requires": { - "babel-plugin-jest-hoist": "^29.6.3", - "babel-preset-current-node-syntax": "^1.0.0" - } - }, - "babylon": { - "version": "6.18.0", - "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==" - }, - "bail": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", - "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==" - }, - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "base": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", - "requires": { - "cache-base": "^1.0.1", - "class-utils": "^0.3.5", - "component-emitter": "^1.2.1", - "define-property": "^1.0.0", - "isobject": "^3.0.1", - "mixin-deep": "^1.2.0", - "pascalcase": "^0.1.1" - }, - "dependencies": { - "define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", - "requires": { - "is-descriptor": "^1.0.0" - } - } - } - }, - "base16": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz", - "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" - }, - "base64-arraybuffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz", - "integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==" - }, - "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", - "requires": { - "tweetnacl": "^0.14.3" - } - }, - "big-integer": { - "version": "1.6.51", - "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz", - "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==" - }, - "big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==" - }, - "bin-build": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bin-build/-/bin-build-3.0.0.tgz", - "integrity": "sha512-jcUOof71/TNAI2uM5uoUaDq2ePcVBQ3R/qhxAz1rX7UfvduAL/RXD3jXzvn8cVcDJdGVkiR1shal3OH0ImpuhA==", - "requires": { - "decompress": "^4.0.0", - "download": "^6.2.2", - "execa": "^0.7.0", - "p-map-series": "^1.0.0", - "tempfile": "^2.0.0" - }, - "dependencies": { - "cross-spawn": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", - "requires": { - "lru-cache": "^4.0.1", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "execa": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", - "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", - "requires": { - "path-key": "^2.0.0" - } - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==" - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==" - } - } - }, - "bin-check": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bin-check/-/bin-check-4.1.0.tgz", - "integrity": "sha512-b6weQyEUKsDGFlACWSIOfveEnImkJyK/FGW6FAG42loyoquvjdtOIqO6yBFzHyqyVVhNgNkQxxx09SFLK28YnA==", - "requires": { - "execa": "^0.7.0", - "executable": "^4.1.0" - }, - "dependencies": { - "cross-spawn": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", - "requires": { - "lru-cache": "^4.0.1", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "execa": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", - "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", - "requires": { - "path-key": "^2.0.0" - } - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==" - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==" - } - } - }, - "bin-version": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bin-version/-/bin-version-3.1.0.tgz", - "integrity": "sha512-Mkfm4iE1VFt4xd4vH+gx+0/71esbfus2LsnCGe8Pi4mndSPyT+NGES/Eg99jx8/lUGWfu3z2yuB/bt5UB+iVbQ==", - "requires": { - "execa": "^1.0.0", - "find-versions": "^3.0.0" - }, - "dependencies": { - "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "requires": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", - "requires": { - "path-key": "^2.0.0" - } - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==" - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "bin-version-check": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/bin-version-check/-/bin-version-check-4.0.0.tgz", - "integrity": "sha512-sR631OrhC+1f8Cvs8WyVWOA33Y8tgwjETNPyyD/myRBXLkfS/vl74FmH/lFcRl9KY3zwGh7jFhvyk9vV3/3ilQ==", - "requires": { - "bin-version": "^3.0.0", - "semver": "^5.6.0", - "semver-truncate": "^1.1.2" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } - } - }, - "bin-wrapper": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bin-wrapper/-/bin-wrapper-4.1.0.tgz", - "integrity": "sha512-hfRmo7hWIXPkbpi0ZltboCMVrU+0ClXR/JgbCKKjlDjQf6igXa7OwdqNcFWQZPZTgiY7ZpzE3+LjjkLiTN2T7Q==", - "requires": { - "bin-check": "^4.1.0", - "bin-version-check": "^4.0.0", - "download": "^7.1.0", - "import-lazy": "^3.1.0", - "os-filter-obj": "^2.0.0", - "pify": "^4.0.1" - }, - "dependencies": { - "@sindresorhus/is": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz", - "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==" - }, - "cacheable-request": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz", - "integrity": "sha512-vag0O2LKZ/najSoUwDbVlnlCFvhBE/7mGTY2B5FgCBDcRD+oVV1HYTOwM6JZfMg/hIcM6IwnTZ1uQQL5/X3xIQ==", - "requires": { - "clone-response": "1.0.2", - "get-stream": "3.0.0", - "http-cache-semantics": "3.8.1", - "keyv": "3.0.0", - "lowercase-keys": "1.0.0", - "normalize-url": "2.0.1", - "responselike": "1.0.2" - } - }, - "download": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/download/-/download-7.1.0.tgz", - "integrity": "sha512-xqnBTVd/E+GxJVrX5/eUJiLYjCGPwMpdL+jGhGU57BvtcA7wwhtHVbXBeUk51kOpW3S7Jn3BQbN9Q1R1Km2qDQ==", - "requires": { - "archive-type": "^4.0.0", - "caw": "^2.0.1", - "content-disposition": "^0.5.2", - "decompress": "^4.2.0", - "ext-name": "^5.0.0", - "file-type": "^8.1.0", - "filenamify": "^2.0.0", - "get-stream": "^3.0.0", - "got": "^8.3.1", - "make-dir": "^1.2.0", - "p-event": "^2.1.0", - "pify": "^3.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "file-type": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-8.1.0.tgz", - "integrity": "sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ==" - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==" - }, - "got": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz", - "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", - "requires": { - "@sindresorhus/is": "^0.7.0", - "cacheable-request": "^2.1.1", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^3.0.0", - "into-stream": "^3.1.0", - "is-retry-allowed": "^1.1.0", - "isurl": "^1.0.0-alpha5", - "lowercase-keys": "^1.0.0", - "mimic-response": "^1.0.0", - "p-cancelable": "^0.4.0", - "p-timeout": "^2.0.1", - "pify": "^3.0.0", - "safe-buffer": "^5.1.1", - "timed-out": "^4.0.1", - "url-parse-lax": "^3.0.0", - "url-to-options": "^1.0.1" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "http-cache-semantics": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", - "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==" - }, - "import-lazy": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-3.1.0.tgz", - "integrity": "sha512-8/gvXvX2JMn0F+CDlSC4l6kOmVaLOO3XLkksI7CI3Ud95KDYJuYur2b9P/PUt/i/pDAMd/DulQsNbbbmRRsDIQ==" - }, - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==" - }, - "keyv": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz", - "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", - "requires": { - "json-buffer": "3.0.0" - } - }, - "lowercase-keys": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz", - "integrity": "sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A==" - }, - "make-dir": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", - "requires": { - "pify": "^3.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "normalize-url": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", - "requires": { - "prepend-http": "^2.0.0", - "query-string": "^5.0.1", - "sort-keys": "^2.0.0" - } - }, - "p-cancelable": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz", - "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==" - }, - "p-event": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/p-event/-/p-event-2.3.1.tgz", - "integrity": "sha512-NQCqOFhbpVTMX4qMe8PF8lbGtzZ+LCiN7pcNrb/413Na7+TRoe1xkKUzuWa/YEJdGQ0FvKtj35EEbDoVPO2kbA==", - "requires": { - "p-timeout": "^2.0.1" - } - }, - "p-timeout": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", - "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", - "requires": { - "p-finally": "^1.0.0" - } - }, - "sort-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha512-/dPCrG1s3ePpWm6yBbxZq5Be1dXGLyLn9Z791chDC3NFrpkVbWGzkBwPN1knaciexFXgRJ7hzdnwZ4stHSDmjg==", - "requires": { - "is-plain-obj": "^1.0.0" - } - } - } - }, - "binary": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/binary/-/binary-0.3.0.tgz", - "integrity": "sha512-D4H1y5KYwpJgK8wk1Cue5LLPgmwHKYSChkbspQg5JtVuR5ulGckxfR62H3AE9UDkdMC8yyXlqYihuz3Aqg2XZg==", - "requires": { - "buffers": "~0.1.1", - "chainsaw": "~0.1.0" - } - }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==" - }, - "bl": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", - "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", - "requires": { - "readable-stream": "^2.3.5", - "safe-buffer": "^5.1.1" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "bluebird": { - "version": "3.4.7", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.4.7.tgz", - "integrity": "sha512-iD3898SR7sWVRHbiQv+sHUtHnMvC1o3nW5rAcqnq3uOn07DSAppZYUkIGslDz6gXC7HfunPe7YVBgoEJASPcHA==" - }, - "bn.js": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", - "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" - }, - "body": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/body/-/body-5.1.0.tgz", - "integrity": "sha512-chUsBxGRtuElD6fmw1gHLpvnKdVLK302peeFa9ZqAEk8TyzZ3fygLyUEDDPTJvL9+Bor0dIwn6ePOsRM2y0zQQ==", - "requires": { - "continuable-cache": "^0.3.1", - "error": "^7.0.0", - "raw-body": "~1.1.0", - "safe-json-parse": "~1.0.1" - }, - "dependencies": { - "bytes": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz", - "integrity": "sha512-/x68VkHLeTl3/Ll8IvxdwzhrT+IyKc52e/oyHhA2RwqPqswSnjVbSddfPRwAsJtbilMAPSRWwAlpxdYsSWOTKQ==" - }, - "raw-body": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-1.1.7.tgz", - "integrity": "sha512-WmJJU2e9Y6M5UzTOkHaM7xJGAPQD8PNzx3bAd2+uhZAim6wDk6dAZxPVYLF67XhbR4hmKGh33Lpmh4XWrCH5Mg==", - "requires": { - "bytes": "1", - "string_decoder": "0.10" - } - }, - "string_decoder": { - "version": "0.10.31", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", - "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==" - } - } - }, - "body-parser": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz", - "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==", - "requires": { - "bytes": "3.1.2", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.10.3", - "raw-body": "2.5.1", - "type-is": "~1.6.18", - "unpipe": "1.0.0" - }, - "dependencies": { - "bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "bonjour-service": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.0.13.tgz", - "integrity": "sha512-LWKRU/7EqDUC9CTAQtuZl5HzBALoCYwtLhffW3et7vZMwv3bWLpJf8bRYlMD5OCcDpTfnPgNCV4yo9ZIaJGMiA==", - "requires": { - "array-flatten": "^2.1.2", - "dns-equal": "^1.0.0", - "fast-deep-equal": "^3.1.3", - "multicast-dns": "^7.2.5" - } - }, - "boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=" - }, - "boxen": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", - "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", - "requires": { - "ansi-align": "^3.0.0", - "camelcase": "^6.2.0", - "chalk": "^4.1.0", - "cli-boxes": "^2.2.1", - "string-width": "^4.2.2", - "type-fest": "^0.20.2", - "widest-line": "^3.1.0", - "wrap-ansi": "^7.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "requires": { - "fill-range": "^7.0.1" - } - }, - "brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==" - }, - "browserify-aes": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "requires": { - "buffer-xor": "^1.0.3", - "cipher-base": "^1.0.0", - "create-hash": "^1.1.0", - "evp_bytestokey": "^1.0.3", - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "browserify-cipher": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", - "requires": { - "browserify-aes": "^1.0.4", - "browserify-des": "^1.0.0", - "evp_bytestokey": "^1.0.0" - } - }, - "browserify-des": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", - "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", - "requires": { - "cipher-base": "^1.0.1", - "des.js": "^1.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "browserify-rsa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", - "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", - "requires": { - "bn.js": "^5.0.0", - "randombytes": "^2.0.1" - } - }, - "browserify-sign": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.2.tgz", - "integrity": "sha512-1rudGyeYY42Dk6texmv7c4VcQ0EsvVbLwZkA+AQB7SxvXxmcD93jcHie8bzecJ+ChDlmAm2Qyu0+Ccg5uhZXCg==", - "requires": { - "bn.js": "^5.2.1", - "browserify-rsa": "^4.1.0", - "create-hash": "^1.2.0", - "create-hmac": "^1.1.7", - "elliptic": "^6.5.4", - "inherits": "^2.0.4", - "parse-asn1": "^5.1.6", - "readable-stream": "^3.6.2", - "safe-buffer": "^5.2.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - } - } - }, - "browserify-zlib": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "requires": { - "pako": "~1.0.5" - } - }, - "browserslist": { - "version": "4.21.10", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz", - "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==", - "requires": { - "caniuse-lite": "^1.0.30001517", - "electron-to-chromium": "^1.4.477", - "node-releases": "^2.0.13", - "update-browserslist-db": "^1.0.11" - } - }, - "bser": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", - "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", - "dev": true, - "requires": { - "node-int64": "^0.4.0" - } - }, - "buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "buffer-alloc": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", - "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", - "requires": { - "buffer-alloc-unsafe": "^1.1.0", - "buffer-fill": "^1.0.0" - } - }, - "buffer-alloc-unsafe": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" - }, - "buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==" - }, - "buffer-fill": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", - "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==" - }, - "buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" - }, - "buffer-indexof-polyfill": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/buffer-indexof-polyfill/-/buffer-indexof-polyfill-1.0.2.tgz", - "integrity": "sha512-I7wzHwA3t1/lwXQh+A5PbNvJxgfo5r3xulgpYDB5zckTu/Z9oUK9biouBKQUjEqzaz3HnAT6TYoovmE+GqSf7A==" - }, - "buffer-xor": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==" - }, - "buffers": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/buffers/-/buffers-0.1.1.tgz", - "integrity": "sha512-9q/rDEGSb/Qsvv2qvzIzdluL5k7AaJOTrw23z9reQthrbF7is4CtlT0DXyO1oei2DCp4uojjzQ7igaSHp1kAEQ==" - }, - "builtin-status-codes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", - "integrity": "sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ==" - }, - "bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" - }, - "cache-base": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", - "requires": { - "collection-visit": "^1.0.0", - "component-emitter": "^1.2.1", - "get-value": "^2.0.6", - "has-value": "^1.0.0", - "isobject": "^3.0.1", - "set-value": "^2.0.0", - "to-object-path": "^0.3.0", - "union-value": "^1.0.0", - "unset-value": "^1.0.0" - } - }, - "cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "requires": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "dependencies": { - "get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "requires": { - "pump": "^3.0.0" - } - }, - "lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" - }, - "normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==" - } - } - }, - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "call-me-maybe": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz", - "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==" - }, - "caller-callsite": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz", - "integrity": "sha512-JuG3qI4QOftFsZyOn1qq87fq5grLIyk1JYd5lJmdA+fG7aQ9pA/i3JIJGcO3q0MrRcHlOt1U+ZeHW8Dq9axALQ==", - "requires": { - "callsites": "^2.0.0" - }, - "dependencies": { - "callsites": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz", - "integrity": "sha512-ksWePWBloaWPxJYQ8TL0JHvtci6G5QTKwQ95RcWAa/lzoAKuAOflGdAK92hpHXjkwb8zLxoLNUoNYZgVsaJzvQ==" - } - } - }, - "caller-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz", - "integrity": "sha512-MCL3sf6nCSXOwCTzvPKhN18TU7AHTvdtam8DAogxcrJ8Rjfbbg7Lgng64H9Iy+vUV6VGFClN/TyxBkAebLRR4A==", - "requires": { - "caller-callsite": "^2.0.0" - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==" - }, - "camel-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", - "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", - "requires": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, - "camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==" - }, - "camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==" - }, - "camelcase-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-2.1.0.tgz", - "integrity": "sha512-bA/Z/DERHKqoEOrp+qeGKw1QlvEQkGZSc0XaY6VnTxZr+Kv1G5zFwttpjv8qxZ/sBPT4nthwZaAcsAZTJlSKXQ==", - "requires": { - "camelcase": "^2.0.0", - "map-obj": "^1.0.0" - }, - "dependencies": { - "camelcase": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", - "integrity": "sha512-DLIsRzJVBQu72meAKPkWQOLcujdXT32hwdfnkI1frSiSRMK1MofjKHf+MEx0SB6fjEFXL8fBDv1dKymBlOp4Qw==" - } - } - }, - "caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "requires": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "caniuse-lite": { - "version": "1.0.30001525", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001525.tgz", - "integrity": "sha512-/3z+wB4icFt3r0USMwxujAqRvaD/B7rvGTsKhbhSQErVrJvkZCLhgNLJxU8MevahQVH6hCU9FsHdNUFbiwmE7Q==" - }, - "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==" - }, - "caw": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/caw/-/caw-2.0.1.tgz", - "integrity": "sha512-Cg8/ZSBEa8ZVY9HspcGUYaK63d/bN7rqS3CYCzEGUxuYv6UlmcjzDUz2fCFFHyTvUW5Pk0I+3hkA3iXlIj6guA==", - "requires": { - "get-proxy": "^2.0.0", - "isurl": "^1.0.0-alpha5", - "tunnel-agent": "^0.6.0", - "url-to-options": "^1.0.1" - } - }, - "ccount": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz", - "integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==" - }, - "chainsaw": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/chainsaw/-/chainsaw-0.1.0.tgz", - "integrity": "sha512-75kWfWt6MEKNC8xYXIdRpDehRYY/tNSgwKaJq+dbbDcxORuVrrQ+SEHoWsniVn9XPYfP4gmdWIeDk/4YNp1rNQ==", - "requires": { - "traverse": ">=0.3.0 <0.4" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "char-regex": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", - "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", - "dev": true - }, - "character-entities": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", - "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==" - }, - "character-entities-legacy": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", - "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==" - }, - "character-reference-invalid": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", - "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==" - }, - "charset": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/charset/-/charset-1.0.1.tgz", - "integrity": "sha512-6dVyOOYjpfFcL1Y4qChrAoQLRHvj2ziyhcm0QJlhOcAhykL/k1kTUPbeo+87MNRTRdk2OIIsIXbuF3x2wi5EXg==" - }, - "cheerio": { - "version": "1.0.0-rc.12", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", - "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", - "requires": { - "cheerio-select": "^2.1.0", - "dom-serializer": "^2.0.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "htmlparser2": "^8.0.1", - "parse5": "^7.0.0", - "parse5-htmlparser2-tree-adapter": "^7.0.0" - }, - "dependencies": { - "dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "requires": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - } - }, - "domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "requires": { - "domelementtype": "^2.3.0" - } - }, - "domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", - "requires": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" - } - }, - "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" - }, - "htmlparser2": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.1.tgz", - "integrity": "sha512-4lVbmc1diZC7GUJQtRQ5yBAeUCL1exyMwmForWkRLnwyzWBFxN633SALPMGYaWZvKe9j1pRZJpauvmxENSp/EA==", - "requires": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "entities": "^4.3.0" - } - } - } - }, - "cheerio-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", - "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", - "requires": { - "boolbase": "^1.0.0", - "css-select": "^5.1.0", - "css-what": "^6.1.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1" - }, - "dependencies": { - "css-select": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", - "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", - "requires": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - } - }, - "dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "requires": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - } - }, - "domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "requires": { - "domelementtype": "^2.3.0" - } - }, - "domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", - "requires": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" - } - }, - "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" - } - } - }, - "chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "requires": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "fsevents": "~2.3.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - } - }, - "chrome-trace-event": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", - "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==" - }, - "ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" - }, - "cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "cjs-module-lexer": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz", - "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==", - "dev": true - }, - "class-utils": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", - "requires": { - "arr-union": "^3.1.0", - "define-property": "^0.2.5", - "isobject": "^3.0.0", - "static-extend": "^0.1.1" - }, - "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - } - }, - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" - } - } - }, - "classnames": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz", - "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==" - }, - "clean-css": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.1.tgz", - "integrity": "sha512-lCr8OHhiWCTw4v8POJovCoh4T7I9U11yVsPjMWWnnMmp9ZowCxyad1Pathle/9HjaDp+fdQKjO9fQydE6RHTZg==", - "requires": { - "source-map": "~0.6.0" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - } - } - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" - }, - "cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==" - }, - "cli-cursor": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", - "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", - "dev": true, - "requires": { - "restore-cursor": "^4.0.0" - } - }, - "cli-table3": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.2.tgz", - "integrity": "sha512-QyavHCaIC80cMivimWu4aWHilIpiDpfm3hGmqAmXVL1UsnbLuBSMd21hTX6VY4ZSDSM73ESLeF8TOYId3rBTbw==", - "requires": { - "@colors/colors": "1.5.0", - "string-width": "^4.2.0" - } - }, - "cli-truncate": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-3.1.0.tgz", - "integrity": "sha512-wfOBkjXteqSnI59oPcJkcPl/ZmwvMMOj340qUIY1SKZCv0B9Cf4D4fAucRkIKQmsIuYK3x1rrgU7MeGRruiuiA==", - "dev": true, - "requires": { - "slice-ansi": "^5.0.0", - "string-width": "^5.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - } - } - }, - "cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - } - }, - "clone-deep": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", - "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", - "requires": { - "is-plain-object": "^2.0.4", - "kind-of": "^6.0.2", - "shallow-clone": "^3.0.0" - } - }, - "clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==" - }, - "co": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", - "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", - "dev": true - }, - "coa": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz", - "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==", - "requires": { - "@types/q": "^1.5.1", - "chalk": "^2.4.1", - "q": "^1.1.2" - } - }, - "codemirror": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz", - "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==", - "requires": { - "@codemirror/autocomplete": "^6.0.0", - "@codemirror/commands": "^6.0.0", - "@codemirror/language": "^6.0.0", - "@codemirror/lint": "^6.0.0", - "@codemirror/search": "^6.0.0", - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0" - } - }, - "coffee-script": { - "version": "1.12.7", - "resolved": "https://registry.npmjs.org/coffee-script/-/coffee-script-1.12.7.tgz", - "integrity": "sha512-fLeEhqwymYat/MpTPUjSKHVYYl0ec2mOyALEMLmzr5i1isuG+6jfI2j2d5oBO3VIzgUXgBVIcOT9uH1TFxBckw==" - }, - "collapse-white-space": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz", - "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==" - }, - "collect-v8-coverage": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", - "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", - "dev": true - }, - "collection-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", - "integrity": "sha512-lNkKvzEeMBBjUGHZ+q6z9pSJla0KWAQPvtzhEV9+iGyQYG+pBpl7xKDhxoNSOZH2hhv0v5k0y2yAM4o4SjoSkw==", - "requires": { - "map-visit": "^1.0.0", - "object-visit": "^1.0.0" - } - }, - "color": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", - "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", - "requires": { - "color-convert": "^1.9.3", - "color-string": "^1.6.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "color-string": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", - "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", - "requires": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "colord": { - "version": "2.9.2", - "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.2.tgz", - "integrity": "sha512-Uqbg+J445nc1TKn4FoDPS6ZZqAvEDnwrH42yo8B40JSOgSLxMZ/gt3h4nmCtPLQeXhjJJkqBx7SCY35WnIixaQ==" - }, - "colorette": { - "version": "2.0.20", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" - }, - "combine-promises": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.1.0.tgz", - "integrity": "sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg==" - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "comma-separated-tokens": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", - "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==" - }, - "commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==" - }, - "commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" - }, - "component-emitter": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" - }, - "component-event": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/component-event/-/component-event-0.2.1.tgz", - "integrity": "sha512-wGA++isMqiDq1jPYeyv2as/Bt/u+3iLW0rEa+8NQ82jAv3TgqMiCM+B2SaBdn2DfLilLjjq736YcezihRYhfxw==" - }, - "compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "requires": { - "mime-db": ">= 1.43.0 < 2" - } - }, - "compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "requires": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "compute-gcd": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/compute-gcd/-/compute-gcd-1.2.1.tgz", - "integrity": "sha512-TwMbxBNz0l71+8Sc4czv13h4kEqnchV9igQZBi6QUaz09dnz13juGnnaWWJTRsP3brxOoxeB4SA2WELLw1hCtg==", - "requires": { - "validate.io-array": "^1.0.3", - "validate.io-function": "^1.0.2", - "validate.io-integer-array": "^1.0.0" - } - }, - "compute-lcm": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/compute-lcm/-/compute-lcm-1.1.2.tgz", - "integrity": "sha512-OFNPdQAXnQhDSKioX8/XYT6sdUlXwpeMjfd6ApxMJfyZ4GxmLR1xvMERctlYhlHwIiz6CSpBc2+qYKjHGZw4TQ==", - "requires": { - "compute-gcd": "^1.2.1", - "validate.io-array": "^1.0.3", - "validate.io-function": "^1.0.2", - "validate.io-integer-array": "^1.0.0" - } - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "requires": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "concat-with-sourcemaps": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/concat-with-sourcemaps/-/concat-with-sourcemaps-1.1.0.tgz", - "integrity": "sha512-4gEjHJFT9e+2W/77h/DS5SGUgwDaOwprX8L/gl5+3ixnzkVJJsZWDSelmN3Oilw3LNDZjZV0yqH1hLG3k6nghg==", - "requires": { - "source-map": "^0.6.1" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - } - } - }, - "config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "requires": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "requires": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - } - }, - "connect-history-api-fallback": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", - "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==" - }, - "consola": { - "version": "2.15.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", - "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" - }, - "console-browserify": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", - "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" - }, - "console-stream": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/console-stream/-/console-stream-0.1.1.tgz", - "integrity": "sha512-QC/8l9e6ofi6nqZ5PawlDgzmMw3OxIXtvolBzap/F4UDBJlDaZRSNbL/lb41C29FcbSJncBFlJFj2WJoNyZRfQ==" - }, - "constants-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", - "integrity": "sha512-xFxOwqIzR/e1k1gLiWEophSCMqXcwVHIH7akf7b/vxcUeGunlj3hvZaaqxwHsTgn+IndtkQJgSztIDWeumWJDQ==" - }, - "content-disposition": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=" - }, - "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" - }, - "continuable-cache": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/continuable-cache/-/continuable-cache-0.3.1.tgz", - "integrity": "sha512-TF30kpKhTH8AGCG3dut0rdd/19B7Z+qCnrMoBLpyQu/2drZdNrrpcjPEoJeSVsQM+8KmWG5O56oPDjSSUsuTyA==" - }, - "contra": { - "version": "1.9.4", - "resolved": "https://registry.npmjs.org/contra/-/contra-1.9.4.tgz", - "integrity": "sha512-N9ArHAqwR/lhPq4OdIAwH4e1btn6EIZMAz4TazjnzCiVECcWUPTma+dRAM38ERImEJBh8NiCCpjoQruSZ+agYg==", - "requires": { - "atoa": "1.0.0", - "ticky": "1.0.1" - } - }, - "convert-source-map": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", - "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", - "requires": { - "safe-buffer": "~5.1.1" - } - }, - "cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==" - }, - "cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" - }, - "copy-descriptor": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha512-XgZ0pFcakEUlbwQEVNg3+QAis1FyTL3Qel9FYy8pSkQqoG3PNoT0bOCQtOXcOkur21r2Eq2kI+IE+gsmAEVlYw==" - }, - "copy-text-to-clipboard": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", - "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==" - }, - "copy-webpack-plugin": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", - "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", - "requires": { - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.1", - "globby": "^13.1.1", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "dependencies": { - "ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "requires": { - "fast-deep-equal": "^3.1.3" - } - }, - "glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "requires": { - "is-glob": "^4.0.3" - } - }, - "globby": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.2.tgz", - "integrity": "sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==", - "requires": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, - "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "requires": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - } - }, - "slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==" - } - } - }, - "core-js": { - "version": "3.25.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.25.1.tgz", - "integrity": "sha512-sr0FY4lnO1hkQ4gLDr24K0DGnweGO1QwSj5BpfQjpSJPdqWalja4cTps29Y/PJVG/P7FYlPDkH3hO+Tr0CvDgQ==" - }, - "core-js-compat": { - "version": "3.26.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.26.1.tgz", - "integrity": "sha512-622/KzTudvXCDLRw70iHW4KKs1aGpcRcowGWyYJr2DEBfRrd6hNJybxSWJFuZYD4ma86xhrwDDHxmDaIq4EA8A==", - "requires": { - "browserslist": "^4.21.4" - } - }, - "core-js-pure": { - "version": "3.24.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.24.0.tgz", - "integrity": "sha512-uzMmW8cRh7uYw4JQtzqvGWRyC2T5+4zipQLQdi2FmiRqP83k3d6F3stv2iAlNhOs6cXN401FCD5TL0vvleuHgA==" - }, - "core-util-is": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", - "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" - }, - "cose-base": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", - "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", - "requires": { - "layout-base": "^1.0.0" - } - }, - "cosmiconfig": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.1.tgz", - "integrity": "sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==", - "requires": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" - } - }, - "create-ecdh": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", - "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", - "requires": { - "bn.js": "^4.1.0", - "elliptic": "^6.5.3" - }, - "dependencies": { - "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - } - } - }, - "create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "requires": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "create-hmac": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "requires": { - "cipher-base": "^1.0.3", - "create-hash": "^1.1.0", - "inherits": "^2.0.1", - "ripemd160": "^2.0.0", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "crelt": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", - "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==" - }, - "cross-fetch": { - "version": "3.1.8", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", - "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", - "requires": { - "node-fetch": "^2.6.12" - } - }, - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "crowdin-cli": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/crowdin-cli/-/crowdin-cli-0.3.0.tgz", - "integrity": "sha512-s1vSRqWalCqd+vW7nF4oZo1a2pMpEgwIiwVlPRD0HmGY3HjJwQKXqZ26NpX5qCDVN8UdEsScy+2jle0PPQBmAg==", - "requires": { - "request": "^2.53.0", - "yamljs": "^0.2.1", - "yargs": "^2.3.0" - }, - "dependencies": { - "yargs": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-2.3.0.tgz", - "integrity": "sha512-w48USdbTdaVMcE3CnXsEtSY9zYSN7dTyVnLBgrJF2quA5rLwobC9zixxfexereLGFaxjxtR3oWdydC0qoayakw==", - "requires": { - "wordwrap": "0.0.2" - } - } - } - }, - "crypto-browserify": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", - "requires": { - "browserify-cipher": "^1.0.0", - "browserify-sign": "^4.0.0", - "create-ecdh": "^4.0.0", - "create-hash": "^1.1.0", - "create-hmac": "^1.1.0", - "diffie-hellman": "^5.0.0", - "inherits": "^2.0.1", - "pbkdf2": "^3.0.3", - "public-encrypt": "^4.0.0", - "randombytes": "^2.0.0", - "randomfill": "^1.0.3" - } - }, - "crypto-js": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz", - "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q==" - }, - "crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==" - }, - "css-color-names": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", - "integrity": "sha512-zj5D7X1U2h2zsXOAM8EyUREBnnts6H+Jm+d1M2DbiQQcUtnqgQsMrdo8JW9R80YFUmIdBZeMu5wvYM7hcgWP/Q==" - }, - "css-declaration-sorter": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.3.1.tgz", - "integrity": "sha512-fBffmak0bPAnyqc/HO8C3n2sHrp9wcqQz6ES9koRF2/mLOVAx9zIQ3Y7R29sYCteTPqMCwns4WYQoCX91Xl3+w==", - "requires": {} - }, - "css-line-break": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-line-break/-/css-line-break-2.1.0.tgz", - "integrity": "sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==", - "requires": { - "utrie": "^1.0.2" - } - }, - "css-loader": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.7.1.tgz", - "integrity": "sha512-yB5CNFa14MbPJcomwNh3wLThtkZgcNyI2bNMRt8iE5Z8Vwl7f8vQXFAzn2HDOJvtDq2NTZBUGMSUNNyrv3/+cw==", - "requires": { - "icss-utils": "^5.1.0", - "postcss": "^8.4.7", - "postcss-modules-extract-imports": "^3.0.0", - "postcss-modules-local-by-default": "^4.0.0", - "postcss-modules-scope": "^3.0.0", - "postcss-modules-values": "^4.0.0", - "postcss-value-parser": "^4.2.0", - "semver": "^7.3.5" - } - }, - "css-minimizer-webpack-plugin": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.0.0.tgz", - "integrity": "sha512-7ZXXRzRHvofv3Uac5Y+RkWRNo0ZMlcg8e9/OtrqUYmwDWJo+qs67GvdeFrXLsFb7czKNwjQhPkM0avlIYl+1nA==", - "requires": { - "cssnano": "^5.1.8", - "jest-worker": "^27.5.1", - "postcss": "^8.4.13", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1" - }, - "dependencies": { - "ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "requires": { - "fast-deep-equal": "^3.1.3" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, - "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "requires": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - } - } - }, - "css-select": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", - "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", - "requires": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - } - }, - "css-select-base-adapter": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", - "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==" - }, - "css-tree": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", - "requires": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - } - } - }, - "css-what": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", - "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==" - }, - "cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==" - }, - "cssnano": { - "version": "5.1.12", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.12.tgz", - "integrity": "sha512-TgvArbEZu0lk/dvg2ja+B7kYoD7BBCmn3+k58xD0qjrGHsFzXY/wKTo9M5egcUCabPol05e/PVoIu79s2JN4WQ==", - "requires": { - "cssnano-preset-default": "^5.2.12", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" - } - }, - "cssnano-preset-advanced": { - "version": "5.3.10", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", - "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", - "requires": { - "autoprefixer": "^10.4.12", - "cssnano-preset-default": "^5.2.14", - "postcss-discard-unused": "^5.1.0", - "postcss-merge-idents": "^5.1.1", - "postcss-reduce-idents": "^5.2.0", - "postcss-zindex": "^5.1.0" - } - }, - "cssnano-preset-default": { - "version": "5.2.14", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", - "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", - "requires": { - "css-declaration-sorter": "^6.3.1", - "cssnano-utils": "^3.1.0", - "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.1", - "postcss-convert-values": "^5.1.3", - "postcss-discard-comments": "^5.1.2", - "postcss-discard-duplicates": "^5.1.0", - "postcss-discard-empty": "^5.1.1", - "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.7", - "postcss-merge-rules": "^5.1.4", - "postcss-minify-font-values": "^5.1.0", - "postcss-minify-gradients": "^5.1.1", - "postcss-minify-params": "^5.1.4", - "postcss-minify-selectors": "^5.2.1", - "postcss-normalize-charset": "^5.1.0", - "postcss-normalize-display-values": "^5.1.0", - "postcss-normalize-positions": "^5.1.1", - "postcss-normalize-repeat-style": "^5.1.1", - "postcss-normalize-string": "^5.1.0", - "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.1", - "postcss-normalize-url": "^5.1.0", - "postcss-normalize-whitespace": "^5.1.1", - "postcss-ordered-values": "^5.1.3", - "postcss-reduce-initial": "^5.1.2", - "postcss-reduce-transforms": "^5.1.0", - "postcss-svgo": "^5.1.0", - "postcss-unique-selectors": "^5.1.1" - } - }, - "cssnano-util-get-arguments": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", - "integrity": "sha512-6RIcwmV3/cBMG8Aj5gucQRsJb4vv4I4rn6YjPbVWd5+Pn/fuG+YseGvXGk00XLkoZkaj31QOD7vMUpNPC4FIuw==" - }, - "cssnano-util-get-match": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", - "integrity": "sha512-JPMZ1TSMRUPVIqEalIBNoBtAYbi8okvcFns4O0YIhcdGebeYZK7dMyHJiQ6GqNBA9kE0Hym4Aqym5rPdsV/4Cw==" - }, - "cssnano-util-raw-cache": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz", - "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==", - "requires": { - "postcss": "^7.0.0" - }, - "dependencies": { - "picocolors": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", - "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" - }, - "postcss": { - "version": "7.0.39", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", - "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", - "requires": { - "picocolors": "^0.2.1", - "source-map": "^0.6.1" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - } - } - }, - "cssnano-util-same-parent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz", - "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==" - }, - "cssnano-utils": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", - "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", - "requires": {} - }, - "csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", - "requires": { - "css-tree": "^1.1.2" - } - }, - "csstype": { - "version": "3.0.10", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.10.tgz", - "integrity": "sha512-2u44ZG2OcNUO9HDp/Jl8C07x6pU/eTR3ncV91SiK3dhG9TWvRVsCoJw14Ckx5DgWkzGA3waZWO3d7pgqpUI/XA==" - }, - "currently-unhandled": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", - "integrity": "sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==", - "requires": { - "array-find-index": "^1.0.1" - } - }, - "cytoscape": { - "version": "3.28.1", - "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.28.1.tgz", - "integrity": "sha512-xyItz4O/4zp9/239wCcH8ZcFuuZooEeF8KHRmzjDfGdXsj3OG9MFSMA0pJE0uX3uCN/ygof6hHf4L7lst+JaDg==", - "requires": { - "heap": "^0.2.6", - "lodash": "^4.17.21" - } - }, - "cytoscape-cose-bilkent": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", - "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", - "requires": { - "cose-base": "^1.0.0" - } - }, - "cytoscape-fcose": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", - "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", - "requires": { - "cose-base": "^2.2.0" - }, - "dependencies": { - "cose-base": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", - "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", - "requires": { - "layout-base": "^2.0.0" - } - }, - "layout-base": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", - "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==" - } - } - }, - "d3": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", - "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", - "requires": { - "d3-array": "3", - "d3-axis": "3", - "d3-brush": "3", - "d3-chord": "3", - "d3-color": "3", - "d3-contour": "4", - "d3-delaunay": "6", - "d3-dispatch": "3", - "d3-drag": "3", - "d3-dsv": "3", - "d3-ease": "3", - "d3-fetch": "3", - "d3-force": "3", - "d3-format": "3", - "d3-geo": "3", - "d3-hierarchy": "3", - "d3-interpolate": "3", - "d3-path": "3", - "d3-polygon": "3", - "d3-quadtree": "3", - "d3-random": "3", - "d3-scale": "4", - "d3-scale-chromatic": "3", - "d3-selection": "3", - "d3-shape": "3", - "d3-time": "3", - "d3-time-format": "4", - "d3-timer": "3", - "d3-transition": "3", - "d3-zoom": "3" - } - }, - "d3-array": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", - "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", - "requires": { - "internmap": "1 - 2" - } - }, - "d3-axis": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", - "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==" - }, - "d3-brush": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", - "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", - "requires": { - "d3-dispatch": "1 - 3", - "d3-drag": "2 - 3", - "d3-interpolate": "1 - 3", - "d3-selection": "3", - "d3-transition": "3" - } - }, - "d3-chord": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", - "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", - "requires": { - "d3-path": "1 - 3" - } - }, - "d3-color": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", - "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==" - }, - "d3-contour": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", - "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", - "requires": { - "d3-array": "^3.2.0" - } - }, - "d3-delaunay": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", - "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", - "requires": { - "delaunator": "5" - } - }, - "d3-dispatch": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", - "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==" - }, - "d3-drag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", - "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", - "requires": { - "d3-dispatch": "1 - 3", - "d3-selection": "3" - } - }, - "d3-dsv": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", - "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", - "requires": { - "commander": "7", - "iconv-lite": "0.6", - "rw": "1" - }, - "dependencies": { - "commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==" - }, - "iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - } - } - } - }, - "d3-ease": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", - "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==" - }, - "d3-fetch": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", - "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", - "requires": { - "d3-dsv": "1 - 3" - } - }, - "d3-force": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", - "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", - "requires": { - "d3-dispatch": "1 - 3", - "d3-quadtree": "1 - 3", - "d3-timer": "1 - 3" - } - }, - "d3-format": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", - "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==" - }, - "d3-geo": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", - "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", - "requires": { - "d3-array": "2.5.0 - 3" - } - }, - "d3-hierarchy": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", - "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==" - }, - "d3-interpolate": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", - "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", - "requires": { - "d3-color": "1 - 3" - } - }, - "d3-path": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", - "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==" - }, - "d3-polygon": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", - "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==" - }, - "d3-quadtree": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", - "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==" - }, - "d3-random": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", - "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==" - }, - "d3-scale": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", - "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", - "requires": { - "d3-array": "2.10.0 - 3", - "d3-format": "1 - 3", - "d3-interpolate": "1.2.0 - 3", - "d3-time": "2.1.1 - 3", - "d3-time-format": "2 - 4" - } - }, - "d3-scale-chromatic": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", - "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", - "requires": { - "d3-color": "1 - 3", - "d3-interpolate": "1 - 3" - } - }, - "d3-selection": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", - "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==" - }, - "d3-shape": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", - "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", - "requires": { - "d3-path": "^3.1.0" - } - }, - "d3-time": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", - "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", - "requires": { - "d3-array": "2 - 3" - } - }, - "d3-time-format": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", - "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", - "requires": { - "d3-time": "1 - 3" - } - }, - "d3-timer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", - "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==" - }, - "d3-transition": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", - "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", - "requires": { - "d3-color": "1 - 3", - "d3-dispatch": "1 - 3", - "d3-ease": "1 - 3", - "d3-interpolate": "1 - 3", - "d3-timer": "1 - 3" - } - }, - "d3-zoom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", - "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", - "requires": { - "d3-dispatch": "1 - 3", - "d3-drag": "2 - 3", - "d3-interpolate": "1 - 3", - "d3-selection": "2 - 3", - "d3-transition": "2 - 3" - } - }, - "dagre-d3-es": { - "version": "7.0.9", - "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.9.tgz", - "integrity": "sha512-rYR4QfVmy+sR44IBDvVtcAmOReGBvRCWDpO2QjYwqgh9yijw6eSHBqaPG/LIOEy7aBsniLvtMW6pg19qJhq60w==", - "requires": { - "d3": "^7.8.2", - "lodash-es": "^4.17.21" - } - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "dayjs": { - "version": "1.11.10", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.10.tgz", - "integrity": "sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ==" - }, - "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "requires": { - "ms": "2.1.2" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==" - }, - "decode-named-character-reference": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", - "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", - "requires": { - "character-entities": "^2.0.0" - }, - "dependencies": { - "character-entities": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", - "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==" - } - } - }, - "decode-uri-component": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.2.tgz", - "integrity": "sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==" - }, - "decompress": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/decompress/-/decompress-4.2.1.tgz", - "integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==", - "requires": { - "decompress-tar": "^4.0.0", - "decompress-tarbz2": "^4.0.0", - "decompress-targz": "^4.0.0", - "decompress-unzip": "^4.0.1", - "graceful-fs": "^4.1.10", - "make-dir": "^1.0.0", - "pify": "^2.3.0", - "strip-dirs": "^2.0.0" - }, - "dependencies": { - "make-dir": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", - "requires": { - "pify": "^3.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==" - } - } - }, - "decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "decompress-tar": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/decompress-tar/-/decompress-tar-4.1.1.tgz", - "integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==", - "requires": { - "file-type": "^5.2.0", - "is-stream": "^1.1.0", - "tar-stream": "^1.5.2" - }, - "dependencies": { - "file-type": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", - "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - } - } - }, - "decompress-tarbz2": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz", - "integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==", - "requires": { - "decompress-tar": "^4.1.0", - "file-type": "^6.1.0", - "is-stream": "^1.1.0", - "seek-bzip": "^1.0.5", - "unbzip2-stream": "^1.0.9" - }, - "dependencies": { - "file-type": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz", - "integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - } - } - }, - "decompress-targz": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/decompress-targz/-/decompress-targz-4.1.1.tgz", - "integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==", - "requires": { - "decompress-tar": "^4.1.1", - "file-type": "^5.2.0", - "is-stream": "^1.1.0" - }, - "dependencies": { - "file-type": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", - "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - } - } - }, - "decompress-unzip": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/decompress-unzip/-/decompress-unzip-4.0.1.tgz", - "integrity": "sha512-1fqeluvxgnn86MOh66u8FjbtJpAFv5wgCT9Iw8rcBqQcCo5tO8eiJw7NNTrvt9n4CRBVq7CstiS922oPgyGLrw==", - "requires": { - "file-type": "^3.8.0", - "get-stream": "^2.2.0", - "pify": "^2.3.0", - "yauzl": "^2.4.2" - }, - "dependencies": { - "file-type": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", - "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==" - }, - "get-stream": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz", - "integrity": "sha512-AUGhbbemXxrZJRD5cDvKtQxLuYaIbNtDTK8YqupCI393Q2KSTreEsLUN3ZxAWFGiKTzL6nKuzfcIvieflUX9qA==", - "requires": { - "object-assign": "^4.0.1", - "pinkie-promise": "^2.0.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==" - } - } - }, - "dedent": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", - "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==", - "dev": true, - "requires": {} - }, - "deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" - }, - "deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" - }, - "deepmerge": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz", - "integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==" - }, - "default-gateway": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", - "requires": { - "execa": "^5.0.0" - } - }, - "defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" - }, - "define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==" - }, - "define-properties": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.4.tgz", - "integrity": "sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==", - "requires": { - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - } - }, - "define-property": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", - "requires": { - "is-descriptor": "^1.0.2", - "isobject": "^3.0.1" - } - }, - "del": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", - "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", - "requires": { - "globby": "^11.0.1", - "graceful-fs": "^4.2.4", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.2", - "p-map": "^4.0.0", - "rimraf": "^3.0.2", - "slash": "^3.0.0" - } - }, - "delaunator": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", - "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", - "requires": { - "robust-predicates": "^3.0.2" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" - }, - "depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==" - }, - "dequal": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", - "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==" - }, - "des.js": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.1.0.tgz", - "integrity": "sha512-r17GxjhUCjSRy8aiJpr8/UadFIzMzJGexI3Nmz4ADi9LYSFx4gTBp80+NaX/YsXWWLhpZ7v/v/ubEc/bCNfKwg==", - "requires": { - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0" - } - }, - "destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==" - }, - "detab": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz", - "integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==", - "requires": { - "repeat-string": "^1.5.4" - } - }, - "detect-newline": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", - "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", - "dev": true - }, - "detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" - }, - "detect-port": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.3.0.tgz", - "integrity": "sha512-E+B1gzkl2gqxt1IhUzwjrxBKRqx1UzC3WLONHinn8S3T6lwV/agVCyitiFOsGJ/eYuEUBvD71MZHy3Pv1G9doQ==", - "requires": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - } - } - }, - "detect-port-alt": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", - "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", - "requires": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "diacritics-map": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/diacritics-map/-/diacritics-map-0.1.0.tgz", - "integrity": "sha512-3omnDTYrGigU0i4cJjvaKwD52B8aoqyX/NEIkukFFkogBemsIbhSa1O414fpTp5nuszJG6lvQ5vBvDVNCbSsaQ==" - }, - "didi": { - "version": "10.2.1", - "resolved": "https://registry.npmjs.org/didi/-/didi-10.2.1.tgz", - "integrity": "sha512-NaPoyMxu+78E2O6xE9JQkeTpmVhMcu8xneIKtSfqBuGUBU7LmNUaYtJXJQ2JWRx6iYY69oj4nerXVRWGXAw/IQ==" - }, - "diff": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz", - "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==" - }, - "diff-sequences": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", - "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", - "dev": true - }, - "diffie-hellman": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", - "requires": { - "bn.js": "^4.1.0", - "miller-rabin": "^4.0.0", - "randombytes": "^2.0.0" - }, - "dependencies": { - "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - } - } - }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "requires": { - "path-type": "^4.0.0" - } - }, - "discontinuous-range": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/discontinuous-range/-/discontinuous-range-1.0.0.tgz", - "integrity": "sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ==" - }, - "dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" - }, - "dns-packet": { - "version": "5.4.0", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.4.0.tgz", - "integrity": "sha512-EgqGeaBB8hLiHLZtp/IbaDQTL8pZ0+IvwzSHA6d7VyMDM+B9hgddEMa9xjK5oYnw0ci0JQ6g2XCD7/f6cafU6g==", - "requires": { - "@leichtgewicht/ip-codec": "^2.0.1" - } - }, - "docusaurus": { - "version": "1.14.7", - "resolved": "https://registry.npmjs.org/docusaurus/-/docusaurus-1.14.7.tgz", - "integrity": "sha512-UWqar4ZX0lEcpLc5Tg+MwZ2jhF/1n1toCQRSeoxDON/D+E9ToLr+vTRFVMP/Tk84NXSVjZFRlrjWwM2pXzvLsQ==", - "requires": { - "@babel/core": "^7.12.3", - "@babel/plugin-proposal-class-properties": "^7.12.1", - "@babel/plugin-proposal-object-rest-spread": "^7.12.1", - "@babel/polyfill": "^7.12.1", - "@babel/preset-env": "^7.12.1", - "@babel/preset-react": "^7.12.5", - "@babel/register": "^7.12.1", - "@babel/traverse": "^7.12.5", - "@babel/types": "^7.12.6", - "autoprefixer": "^9.7.5", - "babylon": "^6.18.0", - "chalk": "^3.0.0", - "classnames": "^2.2.6", - "commander": "^4.0.1", - "crowdin-cli": "^0.3.0", - "cssnano": "^4.1.10", - "enzyme": "^3.10.0", - "enzyme-adapter-react-16": "^1.15.1", - "escape-string-regexp": "^2.0.0", - "express": "^4.17.1", - "feed": "^4.2.1", - "fs-extra": "^9.0.1", - "gaze": "^1.1.3", - "github-slugger": "^1.3.0", - "glob": "^7.1.6", - "highlight.js": "^9.16.2", - "imagemin": "^6.0.0", - "imagemin-gifsicle": "^6.0.1", - "imagemin-jpegtran": "^6.0.0", - "imagemin-optipng": "^6.0.0", - "imagemin-svgo": "^7.0.0", - "lodash": "^4.17.20", - "markdown-toc": "^1.2.0", - "mkdirp": "^0.5.1", - "portfinder": "^1.0.28", - "postcss": "^7.0.23", - "prismjs": "^1.22.0", - "react": "^16.8.4", - "react-dev-utils": "^11.0.1", - "react-dom": "^16.8.4", - "remarkable": "^2.0.0", - "request": "^2.88.0", - "shelljs": "^0.8.4", - "sitemap": "^3.2.2", - "tcp-port-used": "^1.0.1", - "tiny-lr": "^1.1.1", - "tree-node-cli": "^1.2.5", - "truncate-html": "^1.0.3" - }, - "dependencies": { - "@babel/code-frame": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", - "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", - "requires": { - "@babel/highlight": "^7.10.4" - } - }, - "airbnb-prop-types": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/airbnb-prop-types/-/airbnb-prop-types-2.16.0.tgz", - "integrity": "sha512-7WHOFolP/6cS96PhKNrslCLMYAI8yB1Pp6u6XmxozQOiZbsI5ycglZr5cHhBFfuRcQQjzCMith5ZPZdYiJCxUg==", - "requires": { - "array.prototype.find": "^2.1.1", - "function.prototype.name": "^1.1.2", - "is-regex": "^1.1.0", - "object-is": "^1.1.2", - "object.assign": "^4.1.0", - "object.entries": "^1.1.2", - "prop-types": "^15.7.2", - "prop-types-exact": "^1.2.0", - "react-is": "^16.13.1" - } - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "autoprefixer": { - "version": "9.8.8", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.8.tgz", - "integrity": "sha512-eM9d/swFopRt5gdJ7jrpCwgvEMIayITpojhkkSMRsFHYuH5bkSQ4p/9qTEHtmNudUZh22Tehu7I6CxAW0IXTKA==", - "requires": { - "browserslist": "^4.12.0", - "caniuse-lite": "^1.0.30001109", - "normalize-range": "^0.1.2", - "num2fraction": "^1.2.2", - "picocolors": "^0.2.1", - "postcss": "^7.0.32", - "postcss-value-parser": "^4.1.0" - } - }, - "braces": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "requires": { - "arr-flatten": "^1.1.0", - "array-unique": "^0.3.2", - "extend-shallow": "^2.0.1", - "fill-range": "^4.0.0", - "isobject": "^3.0.1", - "repeat-element": "^1.1.2", - "snapdragon": "^0.8.1", - "snapdragon-node": "^2.0.1", - "split-string": "^3.0.2", - "to-regex": "^3.0.1" - } - }, - "browserslist": { - "version": "4.14.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.14.2.tgz", - "integrity": "sha512-HI4lPveGKUR0x2StIz+2FXfDk9SfVMrxn6PLh1JeGUwcuoDkdKZebWiyLRJ68iIPDpMI4JLVDf7S7XzslgWOhw==", - "requires": { - "caniuse-lite": "^1.0.30001125", - "electron-to-chromium": "^1.3.564", - "escalade": "^3.0.2", - "node-releases": "^1.1.61" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==" - }, - "cosmiconfig": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz", - "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==", - "requires": { - "import-fresh": "^2.0.0", - "is-directory": "^0.3.1", - "js-yaml": "^3.13.1", - "parse-json": "^4.0.0" - } - }, - "css-declaration-sorter": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz", - "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==", - "requires": { - "postcss": "^7.0.1", - "timsort": "^0.3.0" - } - }, - "css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "requires": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "css-tree": { - "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", - "requires": { - "mdn-data": "2.0.4", - "source-map": "^0.6.1" - } - }, - "css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==" - }, - "cssnano": { - "version": "4.1.11", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz", - "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==", - "requires": { - "cosmiconfig": "^5.0.0", - "cssnano-preset-default": "^4.0.8", - "is-resolvable": "^1.0.0", - "postcss": "^7.0.0" - } - }, - "cssnano-preset-default": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz", - "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==", - "requires": { - "css-declaration-sorter": "^4.0.1", - "cssnano-util-raw-cache": "^4.0.1", - "postcss": "^7.0.0", - "postcss-calc": "^7.0.1", - "postcss-colormin": "^4.0.3", - "postcss-convert-values": "^4.0.1", - "postcss-discard-comments": "^4.0.2", - "postcss-discard-duplicates": "^4.0.2", - "postcss-discard-empty": "^4.0.1", - "postcss-discard-overridden": "^4.0.1", - "postcss-merge-longhand": "^4.0.11", - "postcss-merge-rules": "^4.0.3", - "postcss-minify-font-values": "^4.0.2", - "postcss-minify-gradients": "^4.0.2", - "postcss-minify-params": "^4.0.2", - "postcss-minify-selectors": "^4.0.2", - "postcss-normalize-charset": "^4.0.1", - "postcss-normalize-display-values": "^4.0.2", - "postcss-normalize-positions": "^4.0.2", - "postcss-normalize-repeat-style": "^4.0.2", - "postcss-normalize-string": "^4.0.2", - "postcss-normalize-timing-functions": "^4.0.2", - "postcss-normalize-unicode": "^4.0.1", - "postcss-normalize-url": "^4.0.1", - "postcss-normalize-whitespace": "^4.0.2", - "postcss-ordered-values": "^4.1.2", - "postcss-reduce-initial": "^4.0.3", - "postcss-reduce-transforms": "^4.0.2", - "postcss-svgo": "^4.0.3", - "postcss-unique-selectors": "^4.0.1" - } - }, - "dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "requires": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "requires": { - "dom-serializer": "0", - "domelementtype": "1" - }, - "dependencies": { - "domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - } - } - }, - "enzyme-adapter-react-16": { - "version": "1.15.7", - "resolved": "https://registry.npmjs.org/enzyme-adapter-react-16/-/enzyme-adapter-react-16-1.15.7.tgz", - "integrity": "sha512-LtjKgvlTc/H7adyQcj+aq0P0H07LDL480WQl1gU512IUyaDo/sbOaNDdZsJXYW2XaoPqrLLE9KbZS+X2z6BASw==", - "requires": { - "enzyme-adapter-utils": "^1.14.1", - "enzyme-shallow-equal": "^1.0.5", - "has": "^1.0.3", - "object.assign": "^4.1.4", - "object.values": "^1.1.5", - "prop-types": "^15.8.1", - "react-is": "^16.13.1", - "react-test-renderer": "^16.0.0-0", - "semver": "^5.7.0" - } - }, - "enzyme-adapter-utils": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/enzyme-adapter-utils/-/enzyme-adapter-utils-1.14.1.tgz", - "integrity": "sha512-JZgMPF1QOI7IzBj24EZoDpaeG/p8Os7WeBZWTJydpsH7JRStc7jYbHE4CmNQaLqazaGFyLM8ALWA3IIZvxW3PQ==", - "requires": { - "airbnb-prop-types": "^2.16.0", - "function.prototype.name": "^1.1.5", - "has": "^1.0.3", - "object.assign": "^4.1.4", - "object.fromentries": "^2.0.5", - "prop-types": "^15.8.1", - "semver": "^5.7.1" - } - }, - "escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==" - }, - "filesize": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-6.1.0.tgz", - "integrity": "sha512-LpCHtPQ3sFx67z+uh2HnSyWSLLu5Jxo21795uRDuar/EOuYWXib5EmPaGIBuSnRqH2IODiKA2k5re/K9OnN/Yg==" - }, - "fill-range": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", - "requires": { - "extend-shallow": "^2.0.1", - "is-number": "^3.0.0", - "repeat-string": "^1.6.1", - "to-regex-range": "^2.1.0" - } - }, - "fork-ts-checker-webpack-plugin": { - "version": "4.1.6", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz", - "integrity": "sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw==", - "requires": { - "@babel/code-frame": "^7.5.5", - "chalk": "^2.4.1", - "micromatch": "^3.1.10", - "minimatch": "^3.0.4", - "semver": "^5.6.0", - "tapable": "^1.0.0", - "worker-rpc": "^0.1.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "requires": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "globby": { - "version": "11.0.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz", - "integrity": "sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==", - "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.1.1", - "ignore": "^5.1.4", - "merge2": "^1.3.0", - "slash": "^3.0.0" - } - }, - "gzip-size": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz", - "integrity": "sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA==", - "requires": { - "duplexer": "^0.1.1", - "pify": "^4.0.1" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "immer": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/immer/-/immer-8.0.1.tgz", - "integrity": "sha512-aqXhGP7//Gui2+UrEtvxZxSquQVXTpZ7KDxfCcKAF3Vysvw0CViVaW9RZ1j1xlIYqaaaipBoqdqeibkc18PNvA==" - }, - "import-fresh": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz", - "integrity": "sha512-eZ5H8rcgYazHbKC3PG4ClHNykCSxtAhxSSEM+2mb+7evD2CKF5V7c0dNum7AdpDh0ZdICwZY9sRSn8f+KH96sg==", - "requires": { - "caller-path": "^2.0.0", - "resolve-from": "^3.0.0" - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - }, - "is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "loader-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz", - "integrity": "sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==", - "requires": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - } - }, - "mdn-data": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" - }, - "micromatch": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "requires": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "braces": "^2.3.1", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "extglob": "^2.0.4", - "fragment-cache": "^0.2.1", - "kind-of": "^6.0.2", - "nanomatch": "^1.2.9", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.2" - }, - "dependencies": { - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - } - } - } - }, - "node-releases": { - "version": "1.1.77", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.77.tgz", - "integrity": "sha512-rB1DUFUNAN4Gn9keO2K1efO35IDK7yKHCdCaIMvFO7yUYmmZYeDjnGKle26G4rwj+LKRQpjyUUvMkPglwGCYNQ==" - }, - "normalize-url": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", - "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==" - }, - "nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "requires": { - "boolbase": "~1.0.0" - } - }, - "open": { - "version": "7.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", - "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", - "requires": { - "is-docker": "^2.0.0", - "is-wsl": "^2.1.1" - } - }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", - "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - } - }, - "picocolors": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", - "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" - }, - "postcss": { - "version": "7.0.39", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", - "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", - "requires": { - "picocolors": "^0.2.1", - "source-map": "^0.6.1" - } - }, - "postcss-calc": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", - "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", - "requires": { - "postcss": "^7.0.27", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.0.2" - } - }, - "postcss-colormin": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz", - "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==", - "requires": { - "browserslist": "^4.0.0", - "color": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-convert-values": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz", - "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==", - "requires": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-discard-comments": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz", - "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==", - "requires": { - "postcss": "^7.0.0" - } - }, - "postcss-discard-duplicates": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz", - "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==", - "requires": { - "postcss": "^7.0.0" - } - }, - "postcss-discard-empty": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz", - "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==", - "requires": { - "postcss": "^7.0.0" - } - }, - "postcss-discard-overridden": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz", - "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==", - "requires": { - "postcss": "^7.0.0" - } - }, - "postcss-merge-longhand": { - "version": "4.0.11", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz", - "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==", - "requires": { - "css-color-names": "0.0.4", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "stylehacks": "^4.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-merge-rules": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz", - "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==", - "requires": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "cssnano-util-same-parent": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0", - "vendors": "^1.0.0" - }, - "dependencies": { - "postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "requires": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - } - } - } - }, - "postcss-minify-font-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz", - "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==", - "requires": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-minify-gradients": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz", - "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==", - "requires": { - "cssnano-util-get-arguments": "^4.0.0", - "is-color-stop": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-minify-params": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz", - "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==", - "requires": { - "alphanum-sort": "^1.0.0", - "browserslist": "^4.0.0", - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "uniqs": "^2.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-minify-selectors": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz", - "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==", - "requires": { - "alphanum-sort": "^1.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "dependencies": { - "postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "requires": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - } - } - } - }, - "postcss-normalize-charset": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz", - "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==", - "requires": { - "postcss": "^7.0.0" - } - }, - "postcss-normalize-display-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz", - "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==", - "requires": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-positions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz", - "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==", - "requires": { - "cssnano-util-get-arguments": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-repeat-style": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz", - "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==", - "requires": { - "cssnano-util-get-arguments": "^4.0.0", - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-string": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz", - "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==", - "requires": { - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-timing-functions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz", - "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==", - "requires": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-unicode": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz", - "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==", - "requires": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-url": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz", - "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==", - "requires": { - "is-absolute-url": "^2.0.0", - "normalize-url": "^3.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-normalize-whitespace": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz", - "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==", - "requires": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-ordered-values": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz", - "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==", - "requires": { - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-reduce-initial": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz", - "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==", - "requires": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0" - } - }, - "postcss-reduce-transforms": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz", - "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==", - "requires": { - "cssnano-util-get-match": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-svgo": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz", - "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==", - "requires": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "svgo": "^1.0.0" - }, - "dependencies": { - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } - } - }, - "postcss-unique-selectors": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz", - "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==", - "requires": { - "alphanum-sort": "^1.0.0", - "postcss": "^7.0.0", - "uniqs": "^2.0.0" - } - }, - "prompts": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.0.tgz", - "integrity": "sha512-awZAKrk3vN6CroQukBL+R9051a4R3zCZBlJm/HBfrSZ8iTpYix3VX1vU4mveiLpiwmOJT4wokTF9m6HUk4KqWQ==", - "requires": { - "kleur": "^3.0.3", - "sisteransi": "^1.0.5" - } - }, - "react": { - "version": "16.14.0", - "resolved": "https://registry.npmjs.org/react/-/react-16.14.0.tgz", - "integrity": "sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "prop-types": "^15.6.2" - } - }, - "react-dev-utils": { - "version": "11.0.4", - "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-11.0.4.tgz", - "integrity": "sha512-dx0LvIGHcOPtKbeiSUM4jqpBl3TcY7CDjZdfOIcKeznE7BWr9dg0iPG90G5yfVQ+p/rGNMXdbfStvzQZEVEi4A==", - "requires": { - "@babel/code-frame": "7.10.4", - "address": "1.1.2", - "browserslist": "4.14.2", - "chalk": "2.4.2", - "cross-spawn": "7.0.3", - "detect-port-alt": "1.1.6", - "escape-string-regexp": "2.0.0", - "filesize": "6.1.0", - "find-up": "4.1.0", - "fork-ts-checker-webpack-plugin": "4.1.6", - "global-modules": "2.0.0", - "globby": "11.0.1", - "gzip-size": "5.1.1", - "immer": "8.0.1", - "is-root": "2.1.0", - "loader-utils": "2.0.0", - "open": "^7.0.2", - "pkg-up": "3.1.0", - "prompts": "2.4.0", - "react-error-overlay": "^6.0.9", - "recursive-readdir": "2.2.2", - "shell-quote": "1.7.2", - "strip-ansi": "6.0.0", - "text-table": "0.2.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "dependencies": { - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - } - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "react-dom": { - "version": "16.14.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz", - "integrity": "sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "prop-types": "^15.6.2", - "scheduler": "^0.19.1" - } - }, - "react-test-renderer": { - "version": "16.14.0", - "resolved": "https://registry.npmjs.org/react-test-renderer/-/react-test-renderer-16.14.0.tgz", - "integrity": "sha512-L8yPjqPE5CZO6rKsKXRO/rVPiaCOy0tQQJbC+UjPNlobl5mad59lvPjwFsQHTvL03caVDIVr9x9/OSgDe6I5Eg==", - "requires": { - "object-assign": "^4.1.1", - "prop-types": "^15.6.2", - "react-is": "^16.8.6", - "scheduler": "^0.19.1" - } - }, - "resolve-from": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", - "integrity": "sha512-GnlH6vxLymXJNMBo7XP1fJIzBFbdYt49CuTwmB/6N53t+kMPRMFKz783LlQ4tv28XoQfMWinAJX6WCGf2IlaIw==" - }, - "scheduler": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.19.1.tgz", - "integrity": "sha512-n/zwRWRYSUj0/3g/otKDRPMh6qv2SYMWNq85IEa8iZyAv8od9zDYpGSnpBEjNgcMNq6Scbu5KfIPxNF72R/2EA==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "shell-quote": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz", - "integrity": "sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==" - }, - "sitemap": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz", - "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==", - "requires": { - "lodash.chunk": "^4.2.0", - "lodash.padstart": "^4.6.1", - "whatwg-url": "^7.0.0", - "xmlbuilder": "^13.0.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "stylehacks": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz", - "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==", - "requires": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "dependencies": { - "postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "requires": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - } - } - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - }, - "svgo": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "requires": { - "chalk": "^2.4.1", - "coa": "^2.0.2", - "css-select": "^2.0.0", - "css-select-base-adapter": "^0.1.1", - "css-tree": "1.0.0-alpha.37", - "csso": "^4.0.2", - "js-yaml": "^3.13.1", - "mkdirp": "~0.5.1", - "object.values": "^1.1.0", - "sax": "~1.2.4", - "stable": "^0.1.8", - "unquote": "~1.1.1", - "util.promisify": "~1.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==" - }, - "to-regex-range": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", - "requires": { - "is-number": "^3.0.0", - "repeat-string": "^1.6.1" - } - }, - "tr46": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", - "integrity": "sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==", - "requires": { - "punycode": "^2.1.0" - } - }, - "webidl-conversions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", - "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" - }, - "whatwg-url": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", - "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", - "requires": { - "lodash.sortby": "^4.7.0", - "tr46": "^1.0.1", - "webidl-conversions": "^4.0.2" - } - } - } - }, - "docusaurus-plugin-openapi-docs": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/docusaurus-plugin-openapi-docs/-/docusaurus-plugin-openapi-docs-2.0.4.tgz", - "integrity": "sha512-jLgEEbMsQ+Y6ihy4y7SmXthUMRDbqAL0OKrdtUaOAxxb/wkLXB28mX74xiZzL928DZJ84IJejHgbjFb2ITcKhA==", - "requires": { - "@apidevtools/json-schema-ref-parser": "^10.1.0", - "@docusaurus/plugin-content-docs": ">=2.4.1 <=2.4.3", - "@docusaurus/utils": ">=2.4.1 <=2.4.3", - "@docusaurus/utils-validation": ">=2.4.1 <=2.4.3", - "@paloaltonetworks/openapi-to-postmanv2": "3.1.0-hotfix.1", - "@paloaltonetworks/postman-collection": "^4.1.0", - "@redocly/openapi-core": "^1.0.0-beta.125", - "chalk": "^4.1.2", - "clsx": "^1.1.1", - "fs-extra": "^9.0.1", - "json-pointer": "^0.6.2", - "json-schema-merge-allof": "^0.8.1", - "lodash": "^4.17.20", - "mustache": "^4.2.0", - "slugify": "^1.6.5", - "swagger2openapi": "^7.0.8", - "xml-formatter": "^2.6.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "requires": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "docusaurus-theme-openapi-docs": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/docusaurus-theme-openapi-docs/-/docusaurus-theme-openapi-docs-2.0.4.tgz", - "integrity": "sha512-w4YodyfMuzvWg6DDDzqb+eNBt4D5ZkMB13343u4zREBdDBOeOK5ikLQuQ+735WqPbAbH4gonvInXLQTAYodCNw==", - "requires": { - "@docusaurus/theme-common": ">=2.4.1 <=2.4.3", - "@hookform/error-message": "^2.0.1", - "@paloaltonetworks/postman-code-generators": "1.1.15-patch.2", - "@paloaltonetworks/postman-collection": "^4.1.0", - "@reduxjs/toolkit": "^1.7.1", - "clsx": "^1.1.1", - "copy-text-to-clipboard": "^3.1.0", - "crypto-js": "^4.1.1", - "docusaurus-plugin-openapi-docs": "^2.0.4", - "docusaurus-plugin-sass": "^0.2.3", - "file-saver": "^2.0.5", - "lodash": "^4.17.20", - "node-polyfill-webpack-plugin": "^2.0.1", - "prism-react-renderer": "^1.3.5", - "react-hook-form": "^7.43.8", - "react-live": ">=3.1.1 <4.1.0", - "react-magic-dropzone": "^1.0.1", - "react-markdown": "^8.0.1", - "react-modal": "^3.15.1", - "react-redux": "^7.2.0", - "rehype-raw": "^6.1.1", - "sass": "^1.58.1", - "sass-loader": "^13.3.2", - "webpack": "^5.61.0", - "xml-formatter": "^2.6.1" - }, - "dependencies": { - "docusaurus-plugin-sass": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.5.tgz", - "integrity": "sha512-Z+D0fLFUKcFpM+bqSUmqKIU+vO+YF1xoEQh5hoFreg2eMf722+siwXDD+sqtwU8E4MvVpuvsQfaHwODNlxJAEg==", - "requires": { - "sass-loader": "^10.1.1" - }, - "dependencies": { - "sass-loader": { - "version": "10.5.2", - "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-10.5.2.tgz", - "integrity": "sha512-vMUoSNOUKJILHpcNCCyD23X34gve1TS7Rjd9uXHeKqhvBG39x6XbswFDtpbTElj6XdMFezoWhkh5vtKudf2cgQ==", - "requires": { - "klona": "^2.0.4", - "loader-utils": "^2.0.0", - "neo-async": "^2.6.2", - "schema-utils": "^3.0.0", - "semver": "^7.3.2" - } - } - } - } - } - }, - "dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", - "requires": { - "utila": "~0.4" - } - }, - "dom-serializer": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", - "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - } - }, - "domain-browser": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-4.23.0.tgz", - "integrity": "sha512-ArzcM/II1wCCujdCNyQjXrAFwS4mrLh4C7DZWlaI8mdh7h3BfKdNd3bKXITfl2PT9FtfQqaGvhi1vPRQPimjGA==" - }, - "domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==" - }, - "domhandler": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", - "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", - "requires": { - "domelementtype": "^2.2.0" - } - }, - "domify": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/domify/-/domify-1.4.2.tgz", - "integrity": "sha512-m4yreHcUWHBncGVV7U+yQzc12vIlq0jMrtHZ5mW6dQMiL/7skSYNVX9wqKwOtyO9SGCgevrAFEgOCAHmamHTUA==" - }, - "dompurify": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.9.tgz", - "integrity": "sha512-uyb4NDIvQ3hRn6NiC+SIFaP4mJ/MdXlvtunaqK9Bn6dD3RuB/1S/gasEjDHD8eiaqdSael2vBv+hOs7Y+jhYOQ==" - }, - "domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", - "requires": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - } - }, - "dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", - "requires": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "requires": { - "is-obj": "^2.0.0" - }, - "dependencies": { - "is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==" - } - } - }, - "download": { - "version": "6.2.5", - "resolved": "https://registry.npmjs.org/download/-/download-6.2.5.tgz", - "integrity": "sha512-DpO9K1sXAST8Cpzb7kmEhogJxymyVUd5qz/vCOSyvwtp2Klj2XcDt5YUuasgxka44SxF0q5RriKIwJmQHG2AuA==", - "requires": { - "caw": "^2.0.0", - "content-disposition": "^0.5.2", - "decompress": "^4.0.0", - "ext-name": "^5.0.0", - "file-type": "5.2.0", - "filenamify": "^2.0.0", - "get-stream": "^3.0.0", - "got": "^7.0.0", - "make-dir": "^1.0.0", - "p-event": "^1.0.0", - "pify": "^3.0.0" - }, - "dependencies": { - "file-type": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", - "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==" - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==" - }, - "got": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/got/-/got-7.1.0.tgz", - "integrity": "sha512-Y5WMo7xKKq1muPsxD+KmrR8DH5auG7fBdDVueZwETwV6VytKyU9OX/ddpq2/1hp1vIPvVb4T81dKQz3BivkNLw==", - "requires": { - "decompress-response": "^3.2.0", - "duplexer3": "^0.1.4", - "get-stream": "^3.0.0", - "is-plain-obj": "^1.1.0", - "is-retry-allowed": "^1.0.0", - "is-stream": "^1.0.0", - "isurl": "^1.0.0-alpha5", - "lowercase-keys": "^1.0.0", - "p-cancelable": "^0.3.0", - "p-timeout": "^1.1.1", - "safe-buffer": "^5.0.1", - "timed-out": "^4.0.0", - "url-parse-lax": "^1.0.0", - "url-to-options": "^1.0.1" - } - }, - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - }, - "make-dir": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", - "requires": { - "pify": "^3.0.0" - } - }, - "p-cancelable": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.3.0.tgz", - "integrity": "sha512-RVbZPLso8+jFeq1MfNvgXtCRED2raz/dKpacfTNxsx6pLEpEomM7gah6VeHSYV3+vo0OAi4MkArtQcWWXuQoyw==" - }, - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - }, - "prepend-http": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", - "integrity": "sha512-PhmXi5XmoyKw1Un4E+opM2KcsJInDvKyuOumcjjw3waw86ZNjHwVUOOWLc4bCzLdcKNaWBH9e99sbWzDQsVaYg==" - }, - "url-parse-lax": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz", - "integrity": "sha512-BVA4lR5PIviy2PMseNd2jbFQ+jwSwQGdJejf5ctd1rEXt0Ypd7yanUK9+lYechVlN5VaTJGsu2U/3MDDu6KgBA==", - "requires": { - "prepend-http": "^1.0.1" - } - } - } - }, - "downloadjs": { - "version": "1.4.7", - "resolved": "https://registry.npmjs.org/downloadjs/-/downloadjs-1.4.7.tgz", - "integrity": "sha512-LN1gO7+u9xjU5oEScGFKvXhYf7Y/empUIIEAGBs1LzUq/rg5duiDrkuH5A2lQGd5jfMOb9X9usDa2oVXwJ0U/Q==" - }, - "duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" - }, - "duplexer2": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", - "integrity": "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==", - "requires": { - "readable-stream": "^2.0.2" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, - "eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" - }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" - }, - "electron-to-chromium": { - "version": "1.4.506", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.506.tgz", - "integrity": "sha512-xxGct4GPAKSRlrLBtJxJFYy74W11zX6PO9GyHgl/U+2s3Dp0ZEwAklDfNHXOWcvH7zWMpsmgbR0ggEuaYAVvHA==" - }, - "elkjs": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/elkjs/-/elkjs-0.8.2.tgz", - "integrity": "sha512-L6uRgvZTH+4OF5NE/MBbzQx/WYpru1xCBE9respNj6qznEewGUIfhzmm7horWWxbNO2M0WckQypGctR8lH79xQ==" - }, - "elliptic": { - "version": "6.5.4", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", - "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==", - "requires": { - "bn.js": "^4.11.9", - "brorand": "^1.1.0", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.1", - "inherits": "^2.0.4", - "minimalistic-assert": "^1.0.1", - "minimalistic-crypto-utils": "^1.0.1" - }, - "dependencies": { - "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - } - } - }, - "emittery": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", - "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==" - }, - "emoticon": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz", - "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==" - }, - "encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==" - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "requires": { - "once": "^1.4.0" - } - }, - "enhanced-resolve": { - "version": "5.10.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.10.0.tgz", - "integrity": "sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==", - "requires": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - } - }, - "entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" - }, - "enzyme": { - "version": "3.11.0", - "resolved": "https://registry.npmjs.org/enzyme/-/enzyme-3.11.0.tgz", - "integrity": "sha512-Dw8/Gs4vRjxY6/6i9wU0V+utmQO9kvh9XLnz3LIudviOnVYDEe2ec+0k+NQoMamn1VrjKgCUOWj5jG/5M5M0Qw==", - "requires": { - "array.prototype.flat": "^1.2.3", - "cheerio": "^1.0.0-rc.3", - "enzyme-shallow-equal": "^1.0.1", - "function.prototype.name": "^1.1.2", - "has": "^1.0.3", - "html-element-map": "^1.2.0", - "is-boolean-object": "^1.0.1", - "is-callable": "^1.1.5", - "is-number-object": "^1.0.4", - "is-regex": "^1.0.5", - "is-string": "^1.0.5", - "is-subset": "^0.1.1", - "lodash.escape": "^4.0.1", - "lodash.isequal": "^4.5.0", - "object-inspect": "^1.7.0", - "object-is": "^1.0.2", - "object.assign": "^4.1.0", - "object.entries": "^1.1.1", - "object.values": "^1.1.1", - "raf": "^3.4.1", - "rst-selector-parser": "^2.2.3", - "string.prototype.trim": "^1.2.1" - } - }, - "enzyme-shallow-equal": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/enzyme-shallow-equal/-/enzyme-shallow-equal-1.0.5.tgz", - "integrity": "sha512-i6cwm7hN630JXenxxJFBKzgLC3hMTafFQXflvzHgPmDhOBhxUWDe8AeRv1qp2/uWJ2Y8z5yLWMzmAfkTOiOCZg==", - "requires": { - "has": "^1.0.3", - "object-is": "^1.1.5" - } - }, - "error": { - "version": "7.2.1", - "resolved": "https://registry.npmjs.org/error/-/error-7.2.1.tgz", - "integrity": "sha512-fo9HBvWnx3NGUKMvMwB/CBCMMrfEJgbDTVDEkPygA3Bdd3lM1OyCd+rbQ8BwnpF6GdVeOLDNmyL4N5Bg80ZvdA==", - "requires": { - "string-template": "~0.2.1" - } - }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "requires": { - "is-arrayish": "^0.2.1" - }, - "dependencies": { - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" - } - } - }, - "es-abstract": { - "version": "1.21.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.2.tgz", - "integrity": "sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==", - "requires": { - "array-buffer-byte-length": "^1.0.0", - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "es-set-tostringtag": "^2.0.1", - "es-to-primitive": "^1.2.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.2.0", - "get-symbol-description": "^1.0.0", - "globalthis": "^1.0.3", - "gopd": "^1.0.1", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.5", - "is-array-buffer": "^3.0.2", - "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "is-string": "^1.0.7", - "is-typed-array": "^1.1.10", - "is-weakref": "^1.0.2", - "object-inspect": "^1.12.3", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trim": "^1.2.7", - "string.prototype.trimend": "^1.0.6", - "string.prototype.trimstart": "^1.0.6", - "typed-array-length": "^1.0.4", - "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" - } - }, - "es-array-method-boxes-properly": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz", - "integrity": "sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==" - }, - "es-module-lexer": { - "version": "0.9.3", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", - "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==" - }, - "es-set-tostringtag": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", - "integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==", - "requires": { - "get-intrinsic": "^1.1.3", - "has": "^1.0.3", - "has-tostringtag": "^1.0.0" - } - }, - "es-shim-unscopables": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", - "requires": { - "has": "^1.0.3" - } - }, - "es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "es6-promise": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.3.1.tgz", - "integrity": "sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==" - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" - }, - "escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==" - }, - "escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - }, - "eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - } - }, - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" - }, - "esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "requires": { - "estraverse": "^5.2.0" - }, - "dependencies": { - "estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==" - } - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==" - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" - }, - "eta": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eta/-/eta-2.0.0.tgz", - "integrity": "sha512-NqE7S2VmVwgMS8yBxsH4VgNQjNjLq1gfGU0u9I6Cjh468nPRMoDfGdK9n1p/3Dvsw3ebklDkZsFAnKJ9sefjBA==" - }, - "etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==" - }, - "eval": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", - "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", - "requires": { - "@types/node": "*", - "require-like": ">= 0.1.1" - } - }, - "event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==" - }, - "eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==" - }, - "evp_bytestokey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "requires": { - "md5.js": "^1.3.4", - "safe-buffer": "^5.1.1" - } - }, - "exec-buffer": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/exec-buffer/-/exec-buffer-3.2.0.tgz", - "integrity": "sha512-wsiD+2Tp6BWHoVv3B+5Dcx6E7u5zky+hUwOHjuH2hKSLR3dvRmX8fk8UD8uqQixHs4Wk6eDmiegVrMPjKj7wpA==", - "requires": { - "execa": "^0.7.0", - "p-finally": "^1.0.0", - "pify": "^3.0.0", - "rimraf": "^2.5.4", - "tempfile": "^2.0.0" - }, - "dependencies": { - "cross-spawn": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==", - "requires": { - "lru-cache": "^4.0.1", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "execa": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha512-RztN09XglpYI7aBBrJCPW95jEH7YF1UEPOoX9yDhUTPdp7mK+CQvnLTuD10BNXZ3byLTu2uehZ8EcKT/4CGiFw==", - "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", - "requires": { - "path-key": "^2.0.0" - } - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==" - }, - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - }, - "rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "requires": { - "glob": "^7.1.3" - } - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==" - } - } - }, - "execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "requires": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "dependencies": { - "get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==" - } - } - }, - "executable": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", - "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", - "requires": { - "pify": "^2.2.0" - }, - "dependencies": { - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==" - } - } - }, - "exenv": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/exenv/-/exenv-1.2.2.tgz", - "integrity": "sha512-Z+ktTxTwv9ILfgKCk32OX3n/doe+OcLTRtqK9pcL+JsP3J1/VW8Uvl4ZjLlKqeW4rzK4oesDOGMEMRIZqtP4Iw==" - }, - "exit": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", - "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", - "dev": true - }, - "expand-brackets": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", - "integrity": "sha512-w/ozOKR9Obk3qoWeY/WDi6MFta9AoMR+zud60mdnbniMcBxRuFJyDt2LdX/14A1UABeqk+Uk+LDfUpvoGKppZA==", - "requires": { - "debug": "^2.3.3", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "posix-character-classes": "^0.1.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - } - }, - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "expand-range": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", - "integrity": "sha512-AFASGfIlnIbkKPQwX1yHaDjFvh/1gyKJODme52V6IORh69uEYgZp0o9C+qsIGNVEiuuhQU0CSSl++Rlegg1qvA==", - "requires": { - "fill-range": "^2.1.0" - }, - "dependencies": { - "fill-range": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", - "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", - "requires": { - "is-number": "^2.1.0", - "isobject": "^2.0.0", - "randomatic": "^3.0.0", - "repeat-element": "^1.1.2", - "repeat-string": "^1.5.2" - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-number": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", - "integrity": "sha512-QUzH43Gfb9+5yckcrSA0VBDwEtDUchrk4F6tfJZQuNzDJbEDB9cZNzSfXGQ1jqmdDY/kl41lUOWM9syA8z8jlg==", - "requires": { - "kind-of": "^3.0.2" - } - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha512-+OUdGJlgjOBZDfxnDjYYG6zp487z0JGNQq3cYQYg5f5hKR+syHMsaztzGeml/4kGG55CSpKSpWTY+jYGgsHLgA==", - "requires": { - "isarray": "1.0.0" - } - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "expect": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/expect/-/expect-29.6.4.tgz", - "integrity": "sha512-F2W2UyQ8XYyftHT57dtfg8Ue3X5qLgm2sSug0ivvLRH/VKNRL/pDxg/TH7zVzbQB0tu80clNFy6LU7OS/VSEKA==", - "dev": true, - "requires": { - "@jest/expect-utils": "^29.6.4", - "jest-get-type": "^29.6.3", - "jest-matcher-utils": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3" - } - }, - "express": { - "version": "4.18.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz", - "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==", - "requires": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.0", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.5.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.2.0", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.7", - "qs": "6.10.3", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "dependencies": { - "array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" - }, - "content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "requires": { - "safe-buffer": "5.2.1" - } - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" - }, - "range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - } - } - }, - "ext-list": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/ext-list/-/ext-list-2.2.2.tgz", - "integrity": "sha512-u+SQgsubraE6zItfVA0tBuCBhfU9ogSRnsvygI7wht9TS510oLkBRXBsqopeUG/GBOIQyKZO9wjTqIu/sf5zFA==", - "requires": { - "mime-db": "^1.28.0" - } - }, - "ext-name": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ext-name/-/ext-name-5.0.0.tgz", - "integrity": "sha512-yblEwXAbGv1VQDmow7s38W77hzAgJAO50ztBLMcUyUBfxv1HC+LGwtiEN+Co6LtlqT/5uwVOxsD4TNIilWhwdQ==", - "requires": { - "ext-list": "^2.0.0", - "sort-keys-length": "^1.0.0" - } - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", - "requires": { - "is-extendable": "^0.1.0" - } - }, - "extglob": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "requires": { - "array-unique": "^0.3.2", - "define-property": "^1.0.0", - "expand-brackets": "^2.1.4", - "extend-shallow": "^2.0.1", - "fragment-cache": "^0.2.1", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "dependencies": { - "define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", - "requires": { - "is-descriptor": "^1.0.0" - } - } - } - }, - "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==" - }, - "fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "fast-folder-size": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/fast-folder-size/-/fast-folder-size-1.6.1.tgz", - "integrity": "sha512-F3tRpfkAzb7TT2JNKaJUglyuRjRa+jelQD94s9OSqkfEeytLmupCqQiD+H2KoIXGtp4pB5m4zNmv5m2Ktcr+LA==", - "requires": { - "unzipper": "^0.10.11" - } - }, - "fast-glob": { - "version": "3.2.11", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz", - "integrity": "sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==", - "requires": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - } - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "fast-safe-stringify": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", - "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==" - }, - "fast-url-parser": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha1-9K8+qfNNiicc9YrSs3WfQx8LMY0=", - "requires": { - "punycode": "^1.3.2" - }, - "dependencies": { - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - } - } - }, - "fast-xml-parser": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.1.3.tgz", - "integrity": "sha512-LsNDahCiCcJPe8NO7HijcnukHB24tKbfDDA5IILx9dmW3Frb52lhbeX6MPNUSvyGNfav2VTYpJ/OqkRoVLrh2Q==", - "requires": { - "strnum": "^1.0.5" - } - }, - "fastq": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz", - "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", - "requires": { - "reusify": "^1.0.4" - } - }, - "faye-websocket": { - "version": "0.11.4", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", - "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", - "requires": { - "websocket-driver": ">=0.5.1" - } - }, - "fb-watchman": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", - "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", - "dev": true, - "requires": { - "bser": "2.1.1" - } - }, - "fbemitter": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/fbemitter/-/fbemitter-3.0.0.tgz", - "integrity": "sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==", - "requires": { - "fbjs": "^3.0.0" - } - }, - "fbjs": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.5.tgz", - "integrity": "sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==", - "requires": { - "cross-fetch": "^3.1.5", - "fbjs-css-vars": "^1.0.0", - "loose-envify": "^1.0.0", - "object-assign": "^4.1.0", - "promise": "^7.1.1", - "setimmediate": "^1.0.5", - "ua-parser-js": "^1.0.35" - } - }, - "fbjs-css-vars": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz", - "integrity": "sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==" - }, - "fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", - "requires": { - "pend": "~1.2.0" - } - }, - "feed": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", - "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", - "requires": { - "xml-js": "^1.6.11" - } - }, - "feelers": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/feelers/-/feelers-1.3.1.tgz", - "integrity": "sha512-vynmIHhjttmT0wfzbI+Nmi84wLbLwUt83NXo5YTQMReIjRwgHhQpxs7koixX/flJIlTG8M4eukc1U1oQAYkhNw==", - "requires": { - "@bpmn-io/cm-theme": "^0.1.0-alpha.2", - "@bpmn-io/feel-lint": "^1.2.0", - "@codemirror/autocomplete": "^6.10.1", - "@codemirror/commands": "^6.3.0", - "@codemirror/language": "^6.9.1", - "@codemirror/lint": "^6.4.2", - "@codemirror/state": "^6.3.0", - "@codemirror/view": "^6.21.3", - "@lezer/common": "^1.1.0", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.13", - "@lezer/markdown": "^1.1.0", - "feelin": "^3.0.1", - "lezer-feel": "^1.2.4", - "min-dom": "^4.1.0" - } - }, - "feelin": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/feelin/-/feelin-3.0.1.tgz", - "integrity": "sha512-aYXH3UYkM2eopg3scgNRNEo/ecwizKH6qTqkEu5nSLMMlMgfhLDhWrLl7ChG5iHspO9o4Q2YSP1o4wW8q0L2Qw==", - "requires": { - "@lezer/lr": "^1.3.9", - "lezer-feel": "^1.2.5", - "luxon": "^3.1.0" - } - }, - "figures": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", - "integrity": "sha512-UxKlfCRuCBxSXU4C6t9scbDyWZ4VlaFFdojKtzJuSkuOBQ5CNFum+zZXFwHjo+CxBC1t6zlYPgHIgFjL8ggoEQ==", - "requires": { - "escape-string-regexp": "^1.0.5", - "object-assign": "^4.1.0" - } - }, - "file-drops": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/file-drops/-/file-drops-0.4.0.tgz", - "integrity": "sha512-dPLRxrQ/sWHyU1DMf72doyyFuqeR/T8hJ97coJHXmdeHvqMTdOMJ/PLsHKjQzDHC8TBQO0rDUinDEXz3WGTnQA==", - "requires": { - "min-dom": "^3.1.1" - }, - "dependencies": { - "component-event": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/component-event/-/component-event-0.1.4.tgz", - "integrity": "sha512-GMwOG8MnUHP1l8DZx1ztFO0SJTFnIzZnBDkXAj8RM2ntV2A6ALlDxgbMY1Fvxlg6WPQ+5IM/a6vg4PEYbjg/Rw==" - }, - "min-dash": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/min-dash/-/min-dash-3.8.1.tgz", - "integrity": "sha512-evumdlmIlg9mbRVPbC4F5FuRhNmcMS5pvuBUbqb1G9v09Ro0ImPEgz5n3khir83lFok1inKqVDjnKEg3GpDxQg==" - }, - "min-dom": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/min-dom/-/min-dom-3.2.1.tgz", - "integrity": "sha512-v6YCmnDzxk4rRJntWTUiwggLupPw/8ZSRqUq0PDaBwVZEO/wYzCH4SKVBV+KkEvf3u0XaWHly5JEosPtqRATZA==", - "requires": { - "component-event": "^0.1.4", - "domify": "^1.3.1", - "indexof": "0.0.1", - "matches-selector": "^1.2.0", - "min-dash": "^3.8.1" - } - } - } - }, - "file-loader": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", - "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", - "requires": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" - } - }, - "file-saver": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz", - "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==" - }, - "file-type": { - "version": "10.11.0", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-10.11.0.tgz", - "integrity": "sha512-uzk64HRpUZyTGZtVuvrjP0FYxzQrBf4rojot6J65YMEbwBLB0CWm0CLojVpwpmFmxcE/lkvYICgfcGozbBq6rw==" - }, - "filename-reserved-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz", - "integrity": "sha512-lc1bnsSr4L4Bdif8Xb/qrtokGbq5zlsms/CYH8PP+WtCkGNF65DPiQY8vG3SakEdRn8Dlnm+gW/qWKKjS5sZzQ==" - }, - "filenamify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/filenamify/-/filenamify-2.1.0.tgz", - "integrity": "sha512-ICw7NTT6RsDp2rnYKVd8Fu4cr6ITzGy3+u4vUujPkabyaz+03F24NWEX7fs5fp+kBonlaqPH8fAO2NM+SXt/JA==", - "requires": { - "filename-reserved-regex": "^2.0.0", - "strip-outer": "^1.0.0", - "trim-repeated": "^1.0.0" - } - }, - "filesize": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", - "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==" - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "filter-obj": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-2.0.2.tgz", - "integrity": "sha512-lO3ttPjHZRfjMcxWKb1j1eDhTFsu4meeR3lnMcnBFhk6RuLhvEiuALu2TlfL310ph4lCYYwgF/ElIjdP739tdg==" - }, - "finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", - "requires": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "find-cache-dir": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", - "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", - "requires": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - } - }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "find-versions": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-3.2.0.tgz", - "integrity": "sha512-P8WRou2S+oe222TOCHitLy8zj+SIsVJh52VP4lvXkaFVnOFFdoWv1H1Jjvel1aI6NCFOAaeAVm8qrI0odiLcww==", - "requires": { - "semver-regex": "^2.0.0" - } - }, - "flatpickr": { - "version": "4.6.13", - "resolved": "https://registry.npmjs.org/flatpickr/-/flatpickr-4.6.13.tgz", - "integrity": "sha512-97PMG/aywoYpB4IvbvUJi0RQi8vearvU0oov1WW3k0WZPBMrTQVqekSX5CjSG/M4Q3i6A/0FKXC7RyAoAUUSPw==" - }, - "flux": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.4.tgz", - "integrity": "sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==", - "requires": { - "fbemitter": "^3.0.0", - "fbjs": "^3.0.1" - } - }, - "focus-trap": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/focus-trap/-/focus-trap-7.5.4.tgz", - "integrity": "sha512-N7kHdlgsO/v+iD/dMoJKtsSqs5Dz/dXZVebRgJw23LDk+jMi/974zyiOYDziY2JPp8xivq9BmUGwIJMiuSBi7w==", - "requires": { - "tabbable": "^6.2.0" - } - }, - "follow-redirects": { - "version": "1.14.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", - "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==" - }, - "for-each": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", - "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", - "requires": { - "is-callable": "^1.1.3" - } - }, - "for-in": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==" - }, - "foreach": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.6.tgz", - "integrity": "sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg==" - }, - "foreground-child": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", - "requires": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - }, - "dependencies": { - "signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==" - } - } - }, - "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==" - }, - "fork-ts-checker-webpack-plugin": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.2.tgz", - "integrity": "sha512-m5cUmF30xkZ7h4tWUgTAcEaKmUW7tfyUyTqNNOz7OxWJ0v1VWKTcOvH8FWHUwSjlW/356Ijc9vi3XfcPstpQKA==", - "requires": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "glob": "^7.1.6", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "cosmiconfig": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", - "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", - "requires": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.7.2" - } - }, - "fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "requires": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "schema-utils": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", - "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", - "requires": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - }, - "tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==" - } - } - }, - "form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - }, - "forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==" - }, - "fraction.js": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.4.tgz", - "integrity": "sha512-pwiTgt0Q7t+GHZA4yaLjObx4vXmmdcS0iSJ19o8d/goUGgItX9UZWKWNnLHehxviD8wU2IWRsnR8cD5+yOJP2Q==" - }, - "fragment-cache": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", - "integrity": "sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==", - "requires": { - "map-cache": "^0.2.2" - } - }, - "fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==" - }, - "from2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==", - "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" - }, - "fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "fs-monkey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz", - "integrity": "sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==" - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true - }, - "fstream": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz", - "integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==", - "requires": { - "graceful-fs": "^4.1.2", - "inherits": "~2.0.0", - "mkdirp": ">=0.5 0", - "rimraf": "2" - }, - "dependencies": { - "rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" - } - }, - "functions-have-names": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", - "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==" - }, - "gaze": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/gaze/-/gaze-1.1.3.tgz", - "integrity": "sha512-BRdNm8hbWzFzWHERTrejLqwHDfS4GibPoq5wjTPIoJHoBtKGPg3xAFfxmM+9ztbXelxcf2hwQcaz1PtmFeue8g==", - "requires": { - "globule": "^1.0.0" - } - }, - "gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==" - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" - }, - "get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "get-own-enumerable-property-symbols": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", - "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" - }, - "get-package-type": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", - "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", - "dev": true - }, - "get-proxy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/get-proxy/-/get-proxy-2.1.0.tgz", - "integrity": "sha512-zmZIaQTWnNQb4R4fJUEp/FC51eZsc6EkErspy3xtIYStaq8EB/hDIWipxsal+E8rz0qD7f2sL/NA9Xee4RInJw==", - "requires": { - "npm-conf": "^1.1.0" - } - }, - "get-stdin": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", - "integrity": "sha512-F5aQMywwJ2n85s4hJPTT9RPxGmubonuB10MNYo17/xph174n2MIR33HRguhzVag10O/npM7SPk73LMZNP+FaWw==" - }, - "get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "requires": { - "pump": "^3.0.0" - } - }, - "get-symbol-description": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" - } - }, - "get-value": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==" - }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "gifsicle": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/gifsicle/-/gifsicle-4.0.1.tgz", - "integrity": "sha512-A/kiCLfDdV+ERV/UB+2O41mifd+RxH8jlRG8DMxZO84Bma/Fw0htqZ+hY2iaalLRNyUu7tYZQslqUBJxBggxbg==", - "requires": { - "bin-build": "^3.0.0", - "bin-wrapper": "^4.0.0", - "execa": "^1.0.0", - "logalot": "^2.0.0" - }, - "dependencies": { - "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "requires": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==" - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw==", - "requires": { - "path-key": "^2.0.0" - } - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==" - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "github-slugger": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", - "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" - }, - "glob": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", - "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "requires": { - "is-glob": "^4.0.1" - } - }, - "glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" - }, - "global-dirs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.0.tgz", - "integrity": "sha512-v8ho2DS5RiCjftj1nD9NmnfaOzTdud7RRnVd9kFNOjqZbISlx5DQ+OrTkywgd0dIt7oFCvKetZSHoHcP3sDdiA==", - "requires": { - "ini": "2.0.0" - }, - "dependencies": { - "ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==" - } - } - }, - "global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", - "requires": { - "global-prefix": "^3.0.0" - } - }, - "global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", - "requires": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" - }, - "dependencies": { - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==" - }, - "globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", - "requires": { - "define-properties": "^1.1.3" - } - }, - "globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - } - }, - "globule": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/globule/-/globule-1.3.4.tgz", - "integrity": "sha512-OPTIfhMBh7JbBYDpa5b+Q5ptmMWKwcNcFSR/0c6t8V4f3ZAVBEsKNY37QdVqmLRYSMhOUGYrY0QhSoEpzGr/Eg==", - "requires": { - "glob": "~7.1.1", - "lodash": "^4.17.21", - "minimatch": "~3.0.2" - }, - "dependencies": { - "glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - } - } - }, - "gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "requires": { - "get-intrinsic": "^1.1.3" - } - }, - "got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "requires": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - } - }, - "graceful-fs": { - "version": "4.2.9", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", - "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" - }, - "gray-matter": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", - "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", - "requires": { - "js-yaml": "^3.13.1", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "dependencies": { - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - } - } - }, - "gulp-header": { - "version": "1.8.12", - "resolved": "https://registry.npmjs.org/gulp-header/-/gulp-header-1.8.12.tgz", - "integrity": "sha512-lh9HLdb53sC7XIZOYzTXM4lFuXElv3EVkSDhsd7DoJBj7hm+Ni7D3qYbb+Rr8DuM8nRanBvkVO9d7askreXGnQ==", - "requires": { - "concat-with-sourcemaps": "*", - "lodash.template": "^4.4.0", - "through2": "^2.0.0" - } - }, - "gzip-size": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", - "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", - "requires": { - "duplexer": "^0.1.2" - } - }, - "handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" - }, - "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==" - }, - "har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "requires": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - } - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==", - "requires": { - "ansi-regex": "^2.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==" - } - } - }, - "has-bigints": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", - "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", - "requires": { - "get-intrinsic": "^1.1.1" - } - }, - "has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==" - }, - "has-symbol-support-x": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", - "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==" - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" - }, - "has-to-string-tag-x": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", - "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", - "requires": { - "has-symbol-support-x": "^1.4.1" - } - }, - "has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", - "requires": { - "has-symbols": "^1.0.2" - } - }, - "has-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", - "integrity": "sha512-IBXk4GTsLYdQ7Rvt+GRBrFSVEkmuOUy4re0Xjd9kJSUQpnTrWR4/y9RpfexN9vkAPMFuQoeWKwqzPozRTlasGw==", - "requires": { - "get-value": "^2.0.6", - "has-values": "^1.0.0", - "isobject": "^3.0.0" - } - }, - "has-values": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", - "integrity": "sha512-ODYZC64uqzmtfGMEAX/FvZiRyWLpAC3vYnNunURUnkGVTS+mI0smVsWaPydRBsE3g+ok7h960jChO8mFcWlHaQ==", - "requires": { - "is-number": "^3.0.0", - "kind-of": "^4.0.0" - }, - "dependencies": { - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "kind-of": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", - "integrity": "sha512-24XsCxmEbRwEDbz/qz3stgin8TTzZ1ESR56OMCN0ujYg+vRutNSiOj9bHH9u85DKgXguraugV5sFuvbD4FW/hw==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==" - }, - "hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - } - } - }, - "hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "requires": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, - "hast-to-hyperscript": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", - "integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==", - "requires": { - "@types/unist": "^2.0.3", - "comma-separated-tokens": "^1.0.0", - "property-information": "^5.3.0", - "space-separated-tokens": "^1.0.0", - "style-to-object": "^0.3.0", - "unist-util-is": "^4.0.0", - "web-namespaces": "^1.0.0" - } - }, - "hast-util-from-parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz", - "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==", - "requires": { - "@types/parse5": "^5.0.0", - "hastscript": "^6.0.0", - "property-information": "^5.0.0", - "vfile": "^4.0.0", - "vfile-location": "^3.2.0", - "web-namespaces": "^1.0.0" - } - }, - "hast-util-parse-selector": { - "version": "2.2.5", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", - "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==" - }, - "hast-util-raw": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz", - "integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==", - "requires": { - "@types/hast": "^2.0.0", - "hast-util-from-parse5": "^6.0.0", - "hast-util-to-parse5": "^6.0.0", - "html-void-elements": "^1.0.0", - "parse5": "^6.0.0", - "unist-util-position": "^3.0.0", - "vfile": "^4.0.0", - "web-namespaces": "^1.0.0", - "xtend": "^4.0.0", - "zwitch": "^1.0.0" - }, - "dependencies": { - "parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" - } - } - }, - "hast-util-to-parse5": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz", - "integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==", - "requires": { - "hast-to-hyperscript": "^9.0.0", - "property-information": "^5.0.0", - "web-namespaces": "^1.0.0", - "xtend": "^4.0.0", - "zwitch": "^1.0.0" - } - }, - "hast-util-whitespace": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-2.0.1.tgz", - "integrity": "sha512-nAxA0v8+vXSBDt3AnRUNjyRIQ0rD+ntpbAp4LnPkumc5M9yUbSMa4XDU9Q6etY4f1Wp4bNgvc1yjiZtsTTrSng==" - }, - "hastscript": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", - "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", - "requires": { - "@types/hast": "^2.0.0", - "comma-separated-tokens": "^1.0.0", - "hast-util-parse-selector": "^2.0.0", - "property-information": "^5.0.0", - "space-separated-tokens": "^1.0.0" - } - }, - "he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==" - }, - "heap": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/heap/-/heap-0.2.7.tgz", - "integrity": "sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg==" - }, - "hex-color-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" - }, - "highlight.js": { - "version": "9.18.5", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-9.18.5.tgz", - "integrity": "sha512-a5bFyofd/BHCX52/8i8uJkjr9DYwXIPnM/plwI6W7ezItLGqzt7X2G2nXuYSfsIJdkwwj/g9DG1LkcGJI/dDoA==" - }, - "history": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", - "requires": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - }, - "hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==", - "requires": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "requires": { - "react-is": "^16.7.0" - } - }, - "hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==" - }, - "hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", - "requires": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "hsl-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", - "integrity": "sha512-M5ezZw4LzXbBKMruP+BNANf0k+19hDQMgpzBIYnya//Al+fjNct9Wf3b1WedLqdEs2hKBvxq/jh+DsHJLj0F9A==" - }, - "hsla-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", - "integrity": "sha512-7Wn5GMLuHBjZCb2bTmnDOycho0p/7UVaAeqXZGbHrBCl6Yd/xDhQJAXe6Ga9AXJH2I5zY1dEdYw2u1UptnSBJA==" - }, - "html-element-map": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/html-element-map/-/html-element-map-1.3.1.tgz", - "integrity": "sha512-6XMlxrAFX4UEEGxctfFnmrFaaZFNf9i5fNuV5wZ3WWQ4FVaNP1aX1LkX9j2mfEx1NpjeE/rL3nmgEn23GdFmrg==", - "requires": { - "array.prototype.filter": "^1.0.0", - "call-bind": "^1.0.2" - } - }, - "html-entities": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.3.tgz", - "integrity": "sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==" - }, - "html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true - }, - "html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", - "requires": { - "camel-case": "^4.1.2", - "clean-css": "^5.2.2", - "commander": "^8.3.0", - "he": "^1.2.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.10.0" - }, - "dependencies": { - "commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==" - } - } - }, - "html-tags": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.2.0.tgz", - "integrity": "sha512-vy7ClnArOZwCnqZgvv+ddgHgJiAFXe3Ge9ML5/mBctVJoUoYPCdxVucOywjDARn6CVoh3dRSFdPHy2sX80L0Wg==" - }, - "html-void-elements": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz", - "integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==" - }, - "html-webpack-plugin": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.0.tgz", - "integrity": "sha512-sy88PC2cRTVxvETRgUHFrL4No3UxvcH8G1NepGhqaTT+GXN2kTamqasot0inS5hXeg1cMbFDt27zzo9p35lZVw==", - "requires": { - "@types/html-minifier-terser": "^6.0.0", - "html-minifier-terser": "^6.0.2", - "lodash": "^4.17.21", - "pretty-error": "^4.0.0", - "tapable": "^2.0.0" - } - }, - "html2canvas": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/html2canvas/-/html2canvas-1.4.1.tgz", - "integrity": "sha512-fPU6BHNpsyIhr8yyMpTLLxAbkaK8ArIBcmZIRiBLiDhjeqvXolaEmDGmELFuX9I4xDcaKKcJl+TKZLqruBbmWA==", - "requires": { - "css-line-break": "^2.1.0", - "text-segmentation": "^1.0.3" - } - }, - "htmlparser2": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" - }, - "http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" - }, - "http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", - "requires": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - } - }, - "http-parser-js": { - "version": "0.5.8", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", - "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" - }, - "http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "requires": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - } - }, - "http-proxy-middleware": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", - "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", - "requires": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" - }, - "dependencies": { - "is-plain-obj": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", - "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==" - } - } - }, - "http-reasons": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/http-reasons/-/http-reasons-0.1.0.tgz", - "integrity": "sha512-P6kYh0lKZ+y29T2Gqz+RlC9WBLhKe8kDmcJ+A+611jFfxdPsbMRQ5aNmFRM3lENqFkK+HTTL+tlQviAiv0AbLQ==" - }, - "http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", - "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - } - }, - "http2-client": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/http2-client/-/http2-client-1.3.5.tgz", - "integrity": "sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==" - }, - "https-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", - "integrity": "sha512-J+FkSdyD+0mA0N+81tMotaRMfSL9SGi+xpD3T6YApKsc3bGSXJlfXri3VyFOeYkfLRQisDk1W+jIFFKBeUBbBg==" - }, - "human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==" - }, - "husky": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.3.tgz", - "integrity": "sha512-+dQSyqPh4x1hlO1swXBiNb2HzTDN1I2IGLQx1GrBuiqFJfoMrnZWwVmatvSiO+Iz8fBUnf+lekwNo4c2LlXItg==", - "dev": true - }, - "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "icss-utils": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", - "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", - "requires": {} - }, - "ids": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/ids/-/ids-1.0.5.tgz", - "integrity": "sha512-XQ0yom/4KWTL29sLG+tyuycy7UmeaM/79GRtSJq6IG9cJGIPeBz5kwDCguie3TwxaMNIc3WtPi0cTa1XYHicpw==" - }, - "ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" - }, - "ignore": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==" - }, - "image-size": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", - "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", - "requires": { - "queue": "6.0.2" - } - }, - "imagemin": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/imagemin/-/imagemin-6.1.0.tgz", - "integrity": "sha512-8ryJBL1CN5uSHpiBMX0rJw79C9F9aJqMnjGnrd/1CafegpNuA81RBAAru/jQQEOWlOJJlpRnlcVFF6wq+Ist0A==", - "requires": { - "file-type": "^10.7.0", - "globby": "^8.0.1", - "make-dir": "^1.0.0", - "p-pipe": "^1.1.0", - "pify": "^4.0.1", - "replace-ext": "^1.0.0" - }, - "dependencies": { - "@nodelib/fs.stat": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz", - "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==" - }, - "array-union": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", - "integrity": "sha512-Dxr6QJj/RdU/hCaBjOfxW+q6lyuVE6JFWIrAUpuOOhoJJoQ99cUn3igRaHVB5P9WrgFVN0FfArM3x0cueOU8ng==", - "requires": { - "array-uniq": "^1.0.1" - } - }, - "braces": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "requires": { - "arr-flatten": "^1.1.0", - "array-unique": "^0.3.2", - "extend-shallow": "^2.0.1", - "fill-range": "^4.0.0", - "isobject": "^3.0.1", - "repeat-element": "^1.1.2", - "snapdragon": "^0.8.1", - "snapdragon-node": "^2.0.1", - "split-string": "^3.0.2", - "to-regex": "^3.0.1" - } - }, - "dir-glob": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.0.0.tgz", - "integrity": "sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag==", - "requires": { - "arrify": "^1.0.1", - "path-type": "^3.0.0" - } - }, - "fast-glob": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz", - "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==", - "requires": { - "@mrmlnc/readdir-enhanced": "^2.2.1", - "@nodelib/fs.stat": "^1.1.2", - "glob-parent": "^3.1.0", - "is-glob": "^4.0.0", - "merge2": "^1.2.3", - "micromatch": "^3.1.10" - } - }, - "fill-range": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==", - "requires": { - "extend-shallow": "^2.0.1", - "is-number": "^3.0.0", - "repeat-string": "^1.6.1", - "to-regex-range": "^2.1.0" - } - }, - "glob-parent": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", - "integrity": "sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==", - "requires": { - "is-glob": "^3.1.0", - "path-dirname": "^1.0.0" - }, - "dependencies": { - "is-glob": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", - "integrity": "sha512-UFpDDrPgM6qpnFNI+rh/p3bUaq9hKLZN8bMUWzxmcnZVS3omf4IPK+BrewlnWjO1WmUsMYuSjKh4UJuV4+Lqmw==", - "requires": { - "is-extglob": "^2.1.0" - } - } - } - }, - "globby": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-8.0.2.tgz", - "integrity": "sha512-yTzMmKygLp8RUpG1Ymu2VXPSJQZjNAZPD4ywgYEaG7e4tBJeUQBO8OpXrf1RCNcEs5alsoJYPAMiIHP0cmeC7w==", - "requires": { - "array-union": "^1.0.1", - "dir-glob": "2.0.0", - "fast-glob": "^2.0.2", - "glob": "^7.1.2", - "ignore": "^3.3.5", - "pify": "^3.0.0", - "slash": "^1.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "ignore": { - "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - }, - "is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "make-dir": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", - "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", - "requires": { - "pify": "^3.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "micromatch": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "requires": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "braces": "^2.3.1", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "extglob": "^2.0.4", - "fragment-cache": "^0.2.1", - "kind-of": "^6.0.2", - "nanomatch": "^1.2.9", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.2" - }, - "dependencies": { - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - } - } - } - }, - "path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "requires": { - "pify": "^3.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==" - }, - "to-regex-range": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==", - "requires": { - "is-number": "^3.0.0", - "repeat-string": "^1.6.1" - } - } - } - }, - "imagemin-gifsicle": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/imagemin-gifsicle/-/imagemin-gifsicle-6.0.1.tgz", - "integrity": "sha512-kuu47c6iKDQ6R9J10xCwL0lgs0+sMz3LRHqRcJ2CRBWdcNmo3T5hUaM8hSZfksptZXJLGKk8heSAvwtSdB1Fng==", - "requires": { - "exec-buffer": "^3.0.0", - "gifsicle": "^4.0.0", - "is-gif": "^3.0.0" - } - }, - "imagemin-jpegtran": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/imagemin-jpegtran/-/imagemin-jpegtran-6.0.0.tgz", - "integrity": "sha512-Ih+NgThzqYfEWv9t58EItncaaXIHR0u9RuhKa8CtVBlMBvY0dCIxgQJQCfwImA4AV1PMfmUKlkyIHJjb7V4z1g==", - "requires": { - "exec-buffer": "^3.0.0", - "is-jpg": "^2.0.0", - "jpegtran-bin": "^4.0.0" - } - }, - "imagemin-optipng": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/imagemin-optipng/-/imagemin-optipng-6.0.0.tgz", - "integrity": "sha512-FoD2sMXvmoNm/zKPOWdhKpWdFdF9qiJmKC17MxZJPH42VMAp17/QENI/lIuP7LCUnLVAloO3AUoTSNzfhpyd8A==", - "requires": { - "exec-buffer": "^3.0.0", - "is-png": "^1.0.0", - "optipng-bin": "^5.0.0" - } - }, - "imagemin-svgo": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/imagemin-svgo/-/imagemin-svgo-7.1.0.tgz", - "integrity": "sha512-0JlIZNWP0Luasn1HT82uB9nU9aa+vUj6kpT+MjPW11LbprXC+iC4HDwn1r4Q2/91qj4iy9tRZNsFySMlEpLdpg==", - "requires": { - "is-svg": "^4.2.1", - "svgo": "^1.3.2" - }, - "dependencies": { - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "requires": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "css-tree": { - "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", - "requires": { - "mdn-data": "2.0.4", - "source-map": "^0.6.1" - } - }, - "css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==" - }, - "dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "requires": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "requires": { - "dom-serializer": "0", - "domelementtype": "1" - }, - "dependencies": { - "domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - } - } - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "mdn-data": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" - }, - "nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "requires": { - "boolbase": "~1.0.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "svgo": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "requires": { - "chalk": "^2.4.1", - "coa": "^2.0.2", - "css-select": "^2.0.0", - "css-select-base-adapter": "^0.1.1", - "css-tree": "1.0.0-alpha.37", - "csso": "^4.0.2", - "js-yaml": "^3.13.1", - "mkdirp": "~0.5.1", - "object.values": "^1.1.0", - "sax": "~1.2.4", - "stable": "^0.1.8", - "unquote": "~1.1.1", - "util.promisify": "~1.0.0" - } - } - } - }, - "immer": { - "version": "9.0.21", - "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", - "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==" - }, - "immutable": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.4.tgz", - "integrity": "sha512-fsXeu4J4i6WNWSikpI88v/PcVflZz+6kMhUfIwc5SY+poQRPnaf5V7qds6SUyUN3cVxEzuCab7QIoLOQ+DQ1wA==" - }, - "import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "requires": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - } - }, - "import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=" - }, - "import-local": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", - "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", - "dev": true, - "requires": { - "pkg-dir": "^4.2.0", - "resolve-cwd": "^3.0.0" - } - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=" - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" - }, - "indexes-of": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", - "integrity": "sha512-bup+4tap3Hympa+JBJUG7XuOsdNQ6fxt0MHyXMKuLBKn0OqsTfvUxkUrroEX1+B2VsSHvCjiIcZVxRtYa4nllA==" - }, - "indexof": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz", - "integrity": "sha512-i0G7hLJ1z0DE8dsqJa2rycj9dBmNKgXBvotXtZYXakU9oivfB9Uj2ZBC27qqef2U58/ZLwalxa1X/RDCdkHtVg==" - }, - "infima": { - "version": "0.2.0-alpha.43", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", - "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==" - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" - }, - "inline-style-parser": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", - "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" - }, - "internal-slot": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz", - "integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==", - "requires": { - "get-intrinsic": "^1.2.0", - "has": "^1.0.3", - "side-channel": "^1.0.4" - } - }, - "internmap": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", - "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==" - }, - "interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" - }, - "into-stream": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz", - "integrity": "sha512-TcdjPibTksa1NQximqep2r17ISRiNE9fwlfbg3F8ANdvP5/yrFTew86VcO//jk4QTaMlbjypPBq76HN2zaKfZQ==", - "requires": { - "from2": "^2.1.1", - "p-is-promise": "^1.1.0" - } - }, - "invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "requires": { - "loose-envify": "^1.0.0" - } - }, - "ip-regex": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz", - "integrity": "sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q==" - }, - "ipaddr.js": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.0.1.tgz", - "integrity": "sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==" - }, - "is-absolute-url": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", - "integrity": "sha512-vOx7VprsKyllwjSkLV79NIhpyLfr3jAp7VaTCMXOJHu4m0Ew1CZ2fcjASwmV1jI3BWuWHB013M48eyeldk9gYg==" - }, - "is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-alphabetical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", - "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==" - }, - "is-alphanumerical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", - "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", - "requires": { - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0" - } - }, - "is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-array-buffer": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz", - "integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==", - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.2.0", - "is-typed-array": "^1.1.10" - } - }, - "is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - }, - "is-bigint": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", - "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", - "requires": { - "has-bigints": "^1.0.1" - } - }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "requires": { - "binary-extensions": "^2.0.0" - } - }, - "is-boolean-object": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", - "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-buffer": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", - "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==" - }, - "is-callable": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", - "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==" - }, - "is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "requires": { - "ci-info": "^2.0.0" - } - }, - "is-color-stop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", - "integrity": "sha512-H1U8Vz0cfXNujrJzEcvvwMDW9Ra+biSYA3ThdQvAnMLJkEHQXn6bWzLkxHtVYJ+Sdbx0b6finn3jZiaVe7MAHA==", - "requires": { - "css-color-names": "^0.0.4", - "hex-color-regex": "^1.1.0", - "hsl-regex": "^1.0.0", - "hsla-regex": "^1.0.0", - "rgb-regex": "^1.0.1", - "rgba-regex": "^1.0.0" - } - }, - "is-core-module": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.9.0.tgz", - "integrity": "sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A==", - "requires": { - "has": "^1.0.3" - } - }, - "is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-date-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", - "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-decimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", - "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==" - }, - "is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "requires": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - } - }, - "is-directory": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", - "integrity": "sha512-yVChGzahRFvbkscn2MlwGismPO12i9+znNruC5gVEntG3qu0xQMzsGg/JFbrsqDOHtHFPci+V5aP5T9I+yeKqw==" - }, - "is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==" - }, - "is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==" - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" - }, - "is-finite": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz", - "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "is-generator-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", - "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", - "dev": true - }, - "is-generator-function": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", - "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-gif": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-gif/-/is-gif-3.0.0.tgz", - "integrity": "sha512-IqJ/jlbw5WJSNfwQ/lHEDXF8rxhRgF6ythk2oiEvhpG29F704eX9NO6TvPfMiq9DrbwgcEDnETYNcZDPewQoVw==", - "requires": { - "file-type": "^10.4.0" - } - }, - "is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-hexadecimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", - "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==" - }, - "is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", - "requires": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - } - }, - "is-jpg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-jpg/-/is-jpg-2.0.0.tgz", - "integrity": "sha512-ODlO0ruzhkzD3sdynIainVP5eoOFNN85rxA1+cwwnPe4dKyX0r5+hxNO5XpCrxlHcmb9vkOit9mhRD2JVuimHg==" - }, - "is-nan": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/is-nan/-/is-nan-1.3.2.tgz", - "integrity": "sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==", - "requires": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3" - } - }, - "is-natural-number": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-natural-number/-/is-natural-number-4.0.1.tgz", - "integrity": "sha512-Y4LTamMe0DDQIIAlaer9eKebAlDSV6huy+TWhJVPlzZh2o4tRP5SQWFlLn5N0To4mDD22/qdOq+veo1cSISLgQ==" - }, - "is-negative-zero": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==" - }, - "is-npm": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz", - "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==" - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" - }, - "is-number-object": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", - "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==" - }, - "is-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.2.tgz", - "integrity": "sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA==" - }, - "is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==" - }, - "is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==" - }, - "is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==" - }, - "is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "requires": { - "isobject": "^3.0.1" - } - }, - "is-png": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-png/-/is-png-1.1.0.tgz", - "integrity": "sha512-23Rmps8UEx3Bzqr0JqAtQo0tYP6sDfIfMt1rL9rzlla/zbteftI9LSJoqsIoGgL06sJboDGdVns4RTakAW/WTw==" - }, - "is-regex": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", - "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-regexp": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", - "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==" - }, - "is-resolvable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" - }, - "is-retry-allowed": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", - "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==" - }, - "is-root": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", - "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==" - }, - "is-shared-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", - "requires": { - "call-bind": "^1.0.2" - } - }, - "is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==" - }, - "is-string": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", - "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-subset": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-subset/-/is-subset-0.1.1.tgz", - "integrity": "sha512-6Ybun0IkarhmEqxXCNw/C0bna6Zb/TkfUX9UbwJtK6ObwAVCxmAP308WWTHviM/zAqXk05cdhYsUsZeGQh99iw==" - }, - "is-svg": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-4.4.0.tgz", - "integrity": "sha512-v+AgVwiK5DsGtT9ng+m4mClp6zDAmwrW8nZi6Gg15qzvBnRWWdfWA1TGaXyCDnWq5g5asofIgMVl3PjKxvk1ug==", - "requires": { - "fast-xml-parser": "^4.1.3" - } - }, - "is-symbol": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", - "requires": { - "has-symbols": "^1.0.2" - } - }, - "is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", - "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" - } - }, - "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "is-url": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", - "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==" - }, - "is-utf8": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", - "integrity": "sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==" - }, - "is-weakref": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", - "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", - "requires": { - "call-bind": "^1.0.2" - } - }, - "is-whitespace-character": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz", - "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==" - }, - "is-windows": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" - }, - "is-word-character": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz", - "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==" - }, - "is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "requires": { - "is-docker": "^2.0.0" - } - }, - "is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" - }, - "is2": { - "version": "2.0.9", - "resolved": "https://registry.npmjs.org/is2/-/is2-2.0.9.tgz", - "integrity": "sha512-rZkHeBn9Zzq52sd9IUIV3a5mfwBY+o2HePMh0wkGBM4z4qjvy2GwVxQ6nNXSfw6MmVP6gf1QIlWjiOavhM3x5g==", - "requires": { - "deep-is": "^0.1.3", - "ip-regex": "^4.1.0", - "is-url": "^1.2.4" - } - }, - "isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==" - }, - "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==" - }, - "istanbul-lib-coverage": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", - "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", - "dev": true - }, - "istanbul-lib-instrument": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.0.tgz", - "integrity": "sha512-x58orMzEVfzPUKqlbLd1hXCnySCxKdDKa6Rjg97CwuLLRI4g3FHTdnExu1OqffVFay6zeMW+T6/DowFLndWnIw==", - "dev": true, - "requires": { - "@babel/core": "^7.12.3", - "@babel/parser": "^7.14.7", - "@istanbuljs/schema": "^0.1.2", - "istanbul-lib-coverage": "^3.2.0", - "semver": "^7.5.4" - } - }, - "istanbul-lib-report": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", - "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", - "dev": true, - "requires": { - "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^4.0.0", - "supports-color": "^7.1.0" - }, - "dependencies": { - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "make-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", - "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", - "dev": true, - "requires": { - "semver": "^7.5.3" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "istanbul-lib-source-maps": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", - "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", - "dev": true, - "requires": { - "debug": "^4.1.1", - "istanbul-lib-coverage": "^3.0.0", - "source-map": "^0.6.1" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - } - } - }, - "istanbul-reports": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", - "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", - "dev": true, - "requires": { - "html-escaper": "^2.0.0", - "istanbul-lib-report": "^3.0.0" - } - }, - "isurl": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz", - "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", - "requires": { - "has-to-string-tag-x": "^1.2.0", - "is-object": "^1.0.1" - } - }, - "jackspeak": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", - "requires": { - "@isaacs/cliui": "^8.0.2", - "@pkgjs/parseargs": "^0.11.0" - } - }, - "jest": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest/-/jest-29.6.4.tgz", - "integrity": "sha512-tEFhVQFF/bzoYV1YuGyzLPZ6vlPrdfvDmmAxudA1dLEuiztqg2Rkx20vkKY32xiDROcD2KXlgZ7Cu8RPeEHRKw==", - "dev": true, - "requires": { - "@jest/core": "^29.6.4", - "@jest/types": "^29.6.3", - "import-local": "^3.0.2", - "jest-cli": "^29.6.4" - } - }, - "jest-changed-files": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.6.3.tgz", - "integrity": "sha512-G5wDnElqLa4/c66ma5PG9eRjE342lIbF6SUnTJi26C3J28Fv2TVY2rOyKB9YGbSA5ogwevgmxc4j4aVjrEK6Yg==", - "dev": true, - "requires": { - "execa": "^5.0.0", - "jest-util": "^29.6.3", - "p-limit": "^3.1.0" - }, - "dependencies": { - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "requires": { - "yocto-queue": "^0.1.0" - } - } - } - }, - "jest-circus": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.6.4.tgz", - "integrity": "sha512-YXNrRyntVUgDfZbjXWBMPslX1mQ8MrSG0oM/Y06j9EYubODIyHWP8hMUbjbZ19M3M+zamqEur7O80HODwACoJw==", - "dev": true, - "requires": { - "@jest/environment": "^29.6.4", - "@jest/expect": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "co": "^4.6.0", - "dedent": "^1.0.0", - "is-generator-fn": "^2.0.0", - "jest-each": "^29.6.3", - "jest-matcher-utils": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-runtime": "^29.6.4", - "jest-snapshot": "^29.6.4", - "jest-util": "^29.6.3", - "p-limit": "^3.1.0", - "pretty-format": "^29.6.3", - "pure-rand": "^6.0.0", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-cli": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.6.4.tgz", - "integrity": "sha512-+uMCQ7oizMmh8ZwRfZzKIEszFY9ksjjEQnTEMTaL7fYiL3Kw4XhqT9bYh+A4DQKUb67hZn2KbtEnDuHvcgK4pQ==", - "dev": true, - "requires": { - "@jest/core": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/types": "^29.6.3", - "chalk": "^4.0.0", - "exit": "^0.1.2", - "graceful-fs": "^4.2.9", - "import-local": "^3.0.2", - "jest-config": "^29.6.4", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", - "prompts": "^2.0.1", - "yargs": "^17.3.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-config": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.6.4.tgz", - "integrity": "sha512-JWohr3i9m2cVpBumQFv2akMEnFEPVOh+9L2xIBJhJ0zOaci2ZXuKJj0tgMKQCBZAKA09H049IR4HVS/43Qb19A==", - "dev": true, - "requires": { - "@babel/core": "^7.11.6", - "@jest/test-sequencer": "^29.6.4", - "@jest/types": "^29.6.3", - "babel-jest": "^29.6.4", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "deepmerge": "^4.2.2", - "glob": "^7.1.3", - "graceful-fs": "^4.2.9", - "jest-circus": "^29.6.4", - "jest-environment-node": "^29.6.4", - "jest-get-type": "^29.6.3", - "jest-regex-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-runner": "^29.6.4", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", - "micromatch": "^4.0.4", - "parse-json": "^5.2.0", - "pretty-format": "^29.6.3", - "slash": "^3.0.0", - "strip-json-comments": "^3.1.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-diff": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.6.4.tgz", - "integrity": "sha512-9F48UxR9e4XOEZvoUXEHSWY4qC4zERJaOfrbBg9JpbJOO43R1vN76REt/aMGZoY6GD5g84nnJiBIVlscegefpw==", - "dev": true, - "requires": { - "chalk": "^4.0.0", - "diff-sequences": "^29.6.3", - "jest-get-type": "^29.6.3", - "pretty-format": "^29.6.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-docblock": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.6.3.tgz", - "integrity": "sha512-2+H+GOTQBEm2+qFSQ7Ma+BvyV+waiIFxmZF5LdpBsAEjWX8QYjSCa4FrkIYtbfXUJJJnFCYrOtt6TZ+IAiTjBQ==", - "dev": true, - "requires": { - "detect-newline": "^3.0.0" - } - }, - "jest-each": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.6.3.tgz", - "integrity": "sha512-KoXfJ42k8cqbkfshW7sSHcdfnv5agDdHCPA87ZBdmHP+zJstTJc0ttQaJ/x7zK6noAL76hOuTIJ6ZkQRS5dcyg==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "chalk": "^4.0.0", - "jest-get-type": "^29.6.3", - "jest-util": "^29.6.3", - "pretty-format": "^29.6.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-environment-node": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.6.4.tgz", - "integrity": "sha512-i7SbpH2dEIFGNmxGCpSc2w9cA4qVD+wfvg2ZnfQ7XVrKL0NA5uDVBIiGH8SR4F0dKEv/0qI5r+aDomDf04DpEQ==", - "dev": true, - "requires": { - "@jest/environment": "^29.6.4", - "@jest/fake-timers": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "jest-mock": "^29.6.3", - "jest-util": "^29.6.3" - } - }, - "jest-get-type": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", - "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", - "dev": true - }, - "jest-haste-map": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.6.4.tgz", - "integrity": "sha512-12Ad+VNTDHxKf7k+M65sviyynRoZYuL1/GTuhEVb8RYsNSNln71nANRb/faSyWvx0j+gHcivChXHIoMJrGYjog==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "fsevents": "^2.3.2", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.6.3", - "jest-util": "^29.6.3", - "jest-worker": "^29.6.4", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "dependencies": { - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "jest-worker": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.4.tgz", - "integrity": "sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==", - "dev": true, - "requires": { - "@types/node": "*", - "jest-util": "^29.6.3", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - } - }, - "supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-leak-detector": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.6.3.tgz", - "integrity": "sha512-0kfbESIHXYdhAdpLsW7xdwmYhLf1BRu4AA118/OxFm0Ho1b2RcTmO4oF6aAMaxpxdxnJ3zve2rgwzNBD4Zbm7Q==", - "dev": true, - "requires": { - "jest-get-type": "^29.6.3", - "pretty-format": "^29.6.3" - } - }, - "jest-matcher-utils": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.6.4.tgz", - "integrity": "sha512-KSzwyzGvK4HcfnserYqJHYi7sZVqdREJ9DMPAKVbS98JsIAvumihaNUbjrWw0St7p9IY7A9UskCW5MYlGmBQFQ==", - "dev": true, - "requires": { - "chalk": "^4.0.0", - "jest-diff": "^29.6.4", - "jest-get-type": "^29.6.3", - "pretty-format": "^29.6.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-message-util": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.6.3.tgz", - "integrity": "sha512-FtzaEEHzjDpQp51HX4UMkPZjy46ati4T5pEMyM6Ik48ztu4T9LQplZ6OsimHx7EuM9dfEh5HJa6D3trEftu3dA==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^29.6.3", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^29.6.3", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-mock": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.6.3.tgz", - "integrity": "sha512-Z7Gs/mOyTSR4yPsaZ72a/MtuK6RnC3JYqWONe48oLaoEcYwEDxqvbXz85G4SJrm2Z5Ar9zp6MiHF4AlFlRM4Pg==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "jest-util": "^29.6.3" - } - }, - "jest-pnp-resolver": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", - "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", - "dev": true, - "requires": {} - }, - "jest-regex-util": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", - "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", - "dev": true - }, - "jest-resolve": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.6.4.tgz", - "integrity": "sha512-fPRq+0vcxsuGlG0O3gyoqGTAxasagOxEuyoxHeyxaZbc9QNek0AmJWSkhjlMG+mTsj+8knc/mWb3fXlRNVih7Q==", - "dev": true, - "requires": { - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", - "jest-pnp-resolver": "^1.2.2", - "jest-util": "^29.6.3", - "jest-validate": "^29.6.3", - "resolve": "^1.20.0", - "resolve.exports": "^2.0.0", - "slash": "^3.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-resolve-dependencies": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.6.4.tgz", - "integrity": "sha512-7+6eAmr1ZBF3vOAJVsfLj1QdqeXG+WYhidfLHBRZqGN24MFRIiKG20ItpLw2qRAsW/D2ZUUmCNf6irUr/v6KHA==", - "dev": true, - "requires": { - "jest-regex-util": "^29.6.3", - "jest-snapshot": "^29.6.4" - } - }, - "jest-runner": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.6.4.tgz", - "integrity": "sha512-SDaLrMmtVlQYDuG0iSPYLycG8P9jLI+fRm8AF/xPKhYDB2g6xDWjXBrR5M8gEWsK6KVFlebpZ4QsrxdyIX1Jaw==", - "dev": true, - "requires": { - "@jest/console": "^29.6.4", - "@jest/environment": "^29.6.4", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "emittery": "^0.13.1", - "graceful-fs": "^4.2.9", - "jest-docblock": "^29.6.3", - "jest-environment-node": "^29.6.4", - "jest-haste-map": "^29.6.4", - "jest-leak-detector": "^29.6.3", - "jest-message-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-runtime": "^29.6.4", - "jest-util": "^29.6.3", - "jest-watcher": "^29.6.4", - "jest-worker": "^29.6.4", - "p-limit": "^3.1.0", - "source-map-support": "0.5.13" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "jest-worker": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.4.tgz", - "integrity": "sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==", - "dev": true, - "requires": { - "@types/node": "*", - "jest-util": "^29.6.3", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "dependencies": { - "supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - }, - "source-map-support": { - "version": "0.5.13", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", - "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", - "dev": true, - "requires": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-runtime": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.6.4.tgz", - "integrity": "sha512-s/QxMBLvmwLdchKEjcLfwzP7h+jsHvNEtxGP5P+Fl1FMaJX2jMiIqe4rJw4tFprzCwuSvVUo9bn0uj4gNRXsbA==", - "dev": true, - "requires": { - "@jest/environment": "^29.6.4", - "@jest/fake-timers": "^29.6.4", - "@jest/globals": "^29.6.4", - "@jest/source-map": "^29.6.3", - "@jest/test-result": "^29.6.4", - "@jest/transform": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "cjs-module-lexer": "^1.0.0", - "collect-v8-coverage": "^1.0.0", - "glob": "^7.1.3", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-mock": "^29.6.3", - "jest-regex-util": "^29.6.3", - "jest-resolve": "^29.6.4", - "jest-snapshot": "^29.6.4", - "jest-util": "^29.6.3", - "slash": "^3.0.0", - "strip-bom": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-snapshot": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.6.4.tgz", - "integrity": "sha512-VC1N8ED7+4uboUKGIDsbvNAZb6LakgIPgAF4RSpF13dN6YaMokfRqO+BaqK4zIh6X3JffgwbzuGqDEjHm/MrvA==", - "dev": true, - "requires": { - "@babel/core": "^7.11.6", - "@babel/generator": "^7.7.2", - "@babel/plugin-syntax-jsx": "^7.7.2", - "@babel/plugin-syntax-typescript": "^7.7.2", - "@babel/types": "^7.3.3", - "@jest/expect-utils": "^29.6.4", - "@jest/transform": "^29.6.4", - "@jest/types": "^29.6.3", - "babel-preset-current-node-syntax": "^1.0.0", - "chalk": "^4.0.0", - "expect": "^29.6.4", - "graceful-fs": "^4.2.9", - "jest-diff": "^29.6.4", - "jest-get-type": "^29.6.3", - "jest-matcher-utils": "^29.6.4", - "jest-message-util": "^29.6.3", - "jest-util": "^29.6.3", - "natural-compare": "^1.4.0", - "pretty-format": "^29.6.3", - "semver": "^7.5.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-util": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.6.3.tgz", - "integrity": "sha512-QUjna/xSy4B32fzcKTSz1w7YYzgiHrjjJjevdRf61HYk998R5vVMMNmrHESYZVDS5DSWs+1srPLPKxXPkeSDOA==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-validate": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.6.3.tgz", - "integrity": "sha512-e7KWZcAIX+2W1o3cHfnqpGajdCs1jSM3DkXjGeLSNmCazv1EeI1ggTeK5wdZhF+7N+g44JI2Od3veojoaumlfg==", - "dev": true, - "requires": { - "@jest/types": "^29.6.3", - "camelcase": "^6.2.0", - "chalk": "^4.0.0", - "jest-get-type": "^29.6.3", - "leven": "^3.1.0", - "pretty-format": "^29.6.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-watcher": { - "version": "29.6.4", - "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.6.4.tgz", - "integrity": "sha512-oqUWvx6+On04ShsT00Ir9T4/FvBeEh2M9PTubgITPxDa739p4hoQweWPRGyYeaojgT0xTpZKF0Y/rSY1UgMxvQ==", - "dev": true, - "requires": { - "@jest/test-result": "^29.6.4", - "@jest/types": "^29.6.3", - "@types/node": "*", - "ansi-escapes": "^4.2.1", - "chalk": "^4.0.0", - "emittery": "^0.13.1", - "jest-util": "^29.6.3", - "string-length": "^4.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "jest-worker": { - "version": "27.5.1", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", - "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", - "requires": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "dependencies": { - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "joi": { - "version": "17.6.0", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.6.0.tgz", - "integrity": "sha512-OX5dG6DTbcr/kbMFj0KGYxuew69HPcAE3K/sZpEV2nP6e/j/C0HV+HNiBPCASxdx5T7DMoa0s8UeHWMnb6n2zw==", - "requires": { - "@hapi/hoek": "^9.0.0", - "@hapi/topo": "^5.0.0", - "@sideway/address": "^4.1.3", - "@sideway/formula": "^3.0.0", - "@sideway/pinpoint": "^2.0.0" - } - }, - "jpegtran-bin": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jpegtran-bin/-/jpegtran-bin-4.0.0.tgz", - "integrity": "sha512-2cRl1ism+wJUoYAYFt6O/rLBfpXNWG2dUWbgcEkTt5WGMnqI46eEro8T4C5zGROxKRqyKpCBSdHPvt5UYCtxaQ==", - "requires": { - "bin-build": "^3.0.0", - "bin-wrapper": "^4.0.0", - "logalot": "^2.0.0" - } - }, - "js-levenshtein": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", - "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==" - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "requires": { - "argparse": "^2.0.1" - } - }, - "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" - }, - "jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==" - }, - "json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" - }, - "json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" - }, - "json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" - }, - "json-pointer": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/json-pointer/-/json-pointer-0.6.2.tgz", - "integrity": "sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw==", - "requires": { - "foreach": "^2.0.4" - } - }, - "json-schema": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", - "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" - }, - "json-schema-compare": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/json-schema-compare/-/json-schema-compare-0.2.2.tgz", - "integrity": "sha512-c4WYmDKyJXhs7WWvAWm3uIYnfyWFoIp+JEoX34rctVvEkMYCPGhXtvmFFXiffBbxfZsvQ0RNnV5H7GvDF5HCqQ==", - "requires": { - "lodash": "^4.17.4" - } - }, - "json-schema-merge-allof": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/json-schema-merge-allof/-/json-schema-merge-allof-0.8.1.tgz", - "integrity": "sha512-CTUKmIlPJbsWfzRRnOXz+0MjIqvnleIXwFTzz+t9T86HnYX/Rozria6ZVGLktAU9e+NygNljveP+yxqtQp/Q4w==", - "requires": { - "compute-lcm": "^1.1.2", - "json-schema-compare": "^0.2.2", - "lodash": "^4.17.20" - } - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" - }, - "json5": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz", - "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==" - }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" - } - }, - "jsprim": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", - "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.4.0", - "verror": "1.10.0" - } - }, - "keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "requires": { - "json-buffer": "3.0.0" - } - }, - "khroma": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", - "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" - }, - "kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" - }, - "kleur": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", - "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==" - }, - "klona": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", - "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==" - }, - "lang-feel": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lang-feel/-/lang-feel-2.0.0.tgz", - "integrity": "sha512-cMD6EIhb7vyXLs4kXmaphfZZNr5SkbRxmkfsZUjUJzOV5YxyKBF73VI/8fC3GDUifzs0lVo2DruVszk5igrddg==", - "requires": { - "@codemirror/autocomplete": "^6.9.1", - "@codemirror/language": "^6.9.1", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.21.0", - "@lezer/common": "^1.1.2", - "lezer-feel": "^1.2.0" - } - }, - "latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "requires": { - "package-json": "^6.3.0" - } - }, - "layout-base": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", - "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==" - }, - "lazy-cache": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-2.0.2.tgz", - "integrity": "sha512-7vp2Acd2+Kz4XkzxGxaB1FWOi8KjWIWsgdfD5MCb86DWvlLqhRPM+d6Pro3iNEL5VT9mstz5hKAlcd+QR6H3aA==", - "requires": { - "set-getter": "^0.1.0" - } - }, - "leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" - }, - "lezer-feel": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/lezer-feel/-/lezer-feel-1.2.8.tgz", - "integrity": "sha512-CO5JEpwNhH1p8mmRRcqMjJrYxO3vNx0nEsF9Ak4OPa1pNHEqvJ2rwYwM9LjZ7jh/Sl5FxbTJT/teF9a+zWmflg==", - "requires": { - "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.0" - } - }, - "lilconfig": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", - "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==" - }, - "lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" - }, - "lint-staged": { - "version": "14.0.1", - "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-14.0.1.tgz", - "integrity": "sha512-Mw0cL6HXnHN1ag0mN/Dg4g6sr8uf8sn98w2Oc1ECtFto9tvRF7nkXGJRbx8gPlHyoR0pLyBr2lQHbWwmUHe1Sw==", - "dev": true, - "requires": { - "chalk": "5.3.0", - "commander": "11.0.0", - "debug": "4.3.4", - "execa": "7.2.0", - "lilconfig": "2.1.0", - "listr2": "6.6.1", - "micromatch": "4.0.5", - "pidtree": "0.6.0", - "string-argv": "0.3.2", - "yaml": "2.3.1" - }, - "dependencies": { - "chalk": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", - "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", - "dev": true - }, - "commander": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-11.0.0.tgz", - "integrity": "sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ==", - "dev": true - }, - "execa": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-7.2.0.tgz", - "integrity": "sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==", - "dev": true, - "requires": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.1", - "human-signals": "^4.3.0", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^3.0.7", - "strip-final-newline": "^3.0.0" - } - }, - "get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "dev": true - }, - "human-signals": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-4.3.1.tgz", - "integrity": "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==", - "dev": true - }, - "is-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", - "dev": true - }, - "mimic-fn": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", - "dev": true - }, - "npm-run-path": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz", - "integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==", - "dev": true, - "requires": { - "path-key": "^4.0.0" - } - }, - "onetime": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", - "dev": true, - "requires": { - "mimic-fn": "^4.0.0" - } - }, - "path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true - }, - "strip-final-newline": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", - "dev": true - }, - "yaml": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz", - "integrity": "sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==", - "dev": true - } - } - }, - "liquid-json": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/liquid-json/-/liquid-json-0.3.1.tgz", - "integrity": "sha512-wUayTU8MS827Dam6MxgD72Ui+KOSF+u/eIqpatOtjnvgJ0+mnDq33uC2M7J0tPK+upe/DpUAuK4JUU89iBoNKQ==" - }, - "list-item": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/list-item/-/list-item-1.1.1.tgz", - "integrity": "sha512-S3D0WZ4J6hyM8o5SNKWaMYB1ALSacPZ2nHGEuCjmHZ+dc03gFeNZoNDcqfcnO4vDhTZmNrqrpYZCdXsRh22bzw==", - "requires": { - "expand-range": "^1.8.1", - "extend-shallow": "^2.0.1", - "is-number": "^2.1.0", - "repeat-string": "^1.5.2" - }, - "dependencies": { - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-number": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", - "integrity": "sha512-QUzH43Gfb9+5yckcrSA0VBDwEtDUchrk4F6tfJZQuNzDJbEDB9cZNzSfXGQ1jqmdDY/kl41lUOWM9syA8z8jlg==", - "requires": { - "kind-of": "^3.0.2" - } - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "listenercount": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/listenercount/-/listenercount-1.0.1.tgz", - "integrity": "sha512-3mk/Zag0+IJxeDrxSgaDPy4zZ3w05PRZeJNnlWhzFz5OkX49J4krc+A8X2d2M69vGMBEX0uyl8M+W+8gH+kBqQ==" - }, - "listr2": { - "version": "6.6.1", - "resolved": "https://registry.npmjs.org/listr2/-/listr2-6.6.1.tgz", - "integrity": "sha512-+rAXGHh0fkEWdXBmX+L6mmfmXmXvDGEKzkjxO+8mP3+nI/r/CWznVBvsibXdxda9Zz0OW2e2ikphN3OwCT/jSg==", - "dev": true, - "requires": { - "cli-truncate": "^3.1.0", - "colorette": "^2.0.20", - "eventemitter3": "^5.0.1", - "log-update": "^5.0.1", - "rfdc": "^1.3.0", - "wrap-ansi": "^8.1.0" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true - }, - "ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "eventemitter3": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz", - "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", - "dev": true - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - }, - "wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - } - } - } - }, - "livereload-js": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/livereload-js/-/livereload-js-2.4.0.tgz", - "integrity": "sha512-XPQH8Z2GDP/Hwz2PCDrh2mth4yFejwA1OZ/81Ti3LgKyhDcEjsSsqFWZojHG0va/duGd+WyosY7eXLDoOyqcPw==" - }, - "load-json-file": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", - "integrity": "sha512-cy7ZdNRXdablkXYNI049pthVeXFurRyb9+hA/dZzerZ0pGTx42z+y+ssxBaVV2l70t1muq5IdKhn4UtcoGUY9A==", - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0", - "strip-bom": "^2.0.0" - }, - "dependencies": { - "parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==", - "requires": { - "error-ex": "^1.2.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==" - }, - "strip-bom": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", - "integrity": "sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==", - "requires": { - "is-utf8": "^0.2.0" - } - } - } - }, - "load-script": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", - "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ=" - }, - "loader-runner": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.2.0.tgz", - "integrity": "sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw==" - }, - "loader-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz", - "integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==", - "requires": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - } - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "requires": { - "p-locate": "^4.1.0" - } - }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "lodash-es": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" - }, - "lodash._reinterpolate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha512-xYHt68QRoYGjeeM/XOE1uJtvXQAgvszfBhjV4yvsQH0u2i9I6cI6c6/eG4Hh3UAOVn0y/xAXwmTzEay49Q//HA==" - }, - "lodash.assignin": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.assignin/-/lodash.assignin-4.2.0.tgz", - "integrity": "sha512-yX/rx6d/UTVh7sSVWVSIMjfnz95evAgDFdb1ZozC35I9mSFCkmzptOzevxjgbQUsc78NR44LVHWjsoMQXy9FDg==" - }, - "lodash.bind": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/lodash.bind/-/lodash.bind-4.2.1.tgz", - "integrity": "sha512-lxdsn7xxlCymgLYo1gGvVrfHmkjDiyqVv62FAeF2i5ta72BipE1SLxw8hPEPLhD4/247Ijw07UQH7Hq/chT5LA==" - }, - "lodash.chunk": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz", - "integrity": "sha512-ZzydJKfUHJwHa+hF5X66zLFCBrWn5GeF28OHEr4WVWtNDXlQ/IjWKPBiikqKo2ne0+v6JgCgJ0GzJp8k8bHC7w==" - }, - "lodash.clonedeep": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", - "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" - }, - "lodash.curry": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.curry/-/lodash.curry-4.1.1.tgz", - "integrity": "sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==" - }, - "lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" - }, - "lodash.defaults": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", - "integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==" - }, - "lodash.escape": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz", - "integrity": "sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw==" - }, - "lodash.filter": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.filter/-/lodash.filter-4.6.0.tgz", - "integrity": "sha512-pXYUy7PR8BCLwX5mgJ/aNtyOvuJTdZAo9EQFUvMIYugqmJxnrYaANvTbgndOzHSCSR0wnlBBfRXJL5SbWxo3FQ==" - }, - "lodash.flatten": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz", - "integrity": "sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==" - }, - "lodash.flattendeep": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", - "integrity": "sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ==" - }, - "lodash.flow": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/lodash.flow/-/lodash.flow-3.5.0.tgz", - "integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==" - }, - "lodash.foreach": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.foreach/-/lodash.foreach-4.5.0.tgz", - "integrity": "sha512-aEXTF4d+m05rVOAUG3z4vZZ4xVexLKZGF0lIxuHZ1Hplpk/3B6Z1+/ICICYRLm7c41Z2xiejbkCkJoTlypoXhQ==" - }, - "lodash.isequal": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", - "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==" - }, - "lodash.map": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.map/-/lodash.map-4.6.0.tgz", - "integrity": "sha512-worNHGKLDetmcEYDvh2stPCrrQRkP20E4l0iIS7F8EvzMqBBi7ltvFN5m1HvTf1P7Jk1txKhvFcmYsCr8O2F1Q==" - }, - "lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" - }, - "lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" - }, - "lodash.padstart": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz", - "integrity": "sha512-sW73O6S8+Tg66eY56DBk85aQzzUJDtpoXFBgELMd5P/SotAguo+1kYO6RuYgXxA4HJH3LFTFPASX6ET6bjfriw==" - }, - "lodash.pick": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz", - "integrity": "sha512-hXt6Ul/5yWjfklSGvLQl8vM//l3FtyHZeuelpzK6mm99pNvN9yTDruNZPEJZD1oWrqo+izBmB7oUfWgcCX7s4Q==" - }, - "lodash.reduce": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.reduce/-/lodash.reduce-4.6.0.tgz", - "integrity": "sha512-6raRe2vxCYBhpBu+B+TtNGUzah+hQjVdu3E17wfusjyrXBka2nBS8OH/gjVZ5PvHOhWmIZTYri09Z6n/QfnNMw==" - }, - "lodash.reject": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.reject/-/lodash.reject-4.6.0.tgz", - "integrity": "sha512-qkTuvgEzYdyhiJBx42YPzPo71R1aEr0z79kAv7Ixg8wPFEjgRgJdUsGMG3Hf3OYSF/kHI79XhNlt+5Ar6OzwxQ==" - }, - "lodash.some": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.some/-/lodash.some-4.6.0.tgz", - "integrity": "sha512-j7MJE+TuT51q9ggt4fSgVqro163BEFjAt3u97IqU+JA2DkWl80nFTrowzLpZ/BnpN7rrl0JA/593NAdd8p/scQ==" - }, - "lodash.sortby": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==" - }, - "lodash.template": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", - "requires": { - "lodash._reinterpolate": "^3.0.0", - "lodash.templatesettings": "^4.0.0" - } - }, - "lodash.templatesettings": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", - "requires": { - "lodash._reinterpolate": "^3.0.0" - } - }, - "lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" - }, - "log-update": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/log-update/-/log-update-5.0.1.tgz", - "integrity": "sha512-5UtUDQ/6edw4ofyljDNcOVJQ4c7OjDro4h3y8e1GQL5iYElYclVHJ3zeWchylvMaKnDbDilC8irOVyexnA/Slw==", - "dev": true, - "requires": { - "ansi-escapes": "^5.0.0", - "cli-cursor": "^4.0.0", - "slice-ansi": "^5.0.0", - "strip-ansi": "^7.0.1", - "wrap-ansi": "^8.0.1" - }, - "dependencies": { - "ansi-escapes": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-5.0.0.tgz", - "integrity": "sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==", - "dev": true, - "requires": { - "type-fest": "^1.0.2" - } - }, - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true - }, - "ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - }, - "type-fest": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", - "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", - "dev": true - }, - "wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - } - } - } - }, - "logalot": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/logalot/-/logalot-2.1.0.tgz", - "integrity": "sha512-Ah4CgdSRfeCJagxQhcVNMi9BfGYyEKLa6d7OA6xSbld/Hg3Cf2QiOa1mDpmG7Ve8LOH6DN3mdttzjQAvWTyVkw==", - "requires": { - "figures": "^1.3.5", - "squeak": "^1.0.0" - } - }, - "longest": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", - "integrity": "sha512-k+yt5n3l48JU4k8ftnKG6V7u32wyH2NfKzeMto9F/QRE0amxy/LayxwlvjjkZEIzqR+19IrtFO8p5kB9QaYUFg==" - }, - "loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" - } - }, - "loud-rejection": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", - "integrity": "sha512-RPNliZOFkqFumDhvYqOaNY4Uz9oJM2K9tC6JWsJJsNdhuONW4LQHRBpb0qf4pJApVffI5N39SwzWZJuEhfd7eQ==", - "requires": { - "currently-unhandled": "^0.4.1", - "signal-exit": "^3.0.0" - } - }, - "lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", - "requires": { - "tslib": "^2.0.3" - } - }, - "lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" - }, - "lpad-align": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/lpad-align/-/lpad-align-1.1.2.tgz", - "integrity": "sha512-MMIcFmmR9zlGZtBcFOows6c2COMekHCIFJz3ew/rRpKZ1wR4mXDPzvcVqLarux8M33X4TPSq2Jdw8WJj0q0KbQ==", - "requires": { - "get-stdin": "^4.0.1", - "indent-string": "^2.1.0", - "longest": "^1.0.0", - "meow": "^3.3.0" - }, - "dependencies": { - "indent-string": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", - "integrity": "sha512-aqwDFWSgSgfRaEwao5lg5KEcVd/2a+D1rvoG7NdilmYz0NwRk6StWpWdz/Hpk34MKPpx7s8XxUqimfcQK6gGlg==", - "requires": { - "repeating": "^2.0.0" - } - } - } - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "requires": { - "yallist": "^4.0.0" - } - }, - "luxon": { - "version": "3.4.4", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.4.4.tgz", - "integrity": "sha512-zobTr7akeGHnv7eBOXcRgMeCP6+uyYsczwmeRCauvpvaAltgNyTbLH/+VaEAPUeWBT+1GuNmz4wC/6jtQzbbVA==" - }, - "make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "requires": { - "semver": "^6.0.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "makeerror": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", - "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", - "dev": true, - "requires": { - "tmpl": "1.0.5" - } - }, - "map-cache": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha512-8y/eV9QQZCiyn1SprXSrCmqJN0yNRATe+PO8ztwqrvrbdRLA3eYJF0yaR0YayLWkMbsQSKWS9N2gPcGEc4UsZg==" - }, - "map-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", - "integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==" - }, - "map-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", - "integrity": "sha512-4y7uGv8bd2WdM9vpQsiQNo41Ln1NvhvDRuVt0k2JZQ+ezN2uaQes7lZeZ+QQUHOLQAtDaBJ+7wCbi+ab/KFs+w==", - "requires": { - "object-visit": "^1.0.0" - } - }, - "markdown-escapes": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", - "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==" - }, - "markdown-link": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/markdown-link/-/markdown-link-0.1.1.tgz", - "integrity": "sha512-TurLymbyLyo+kAUUAV9ggR9EPcDjP/ctlv9QAFiqUH7c+t6FlsbivPo9OKTU8xdOx9oNd2drW/Fi5RRElQbUqA==" - }, - "markdown-toc": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/markdown-toc/-/markdown-toc-1.2.0.tgz", - "integrity": "sha512-eOsq7EGd3asV0oBfmyqngeEIhrbkc7XVP63OwcJBIhH2EpG2PzFcbZdhy1jutXSlRBBVMNXHvMtSr5LAxSUvUg==", - "requires": { - "concat-stream": "^1.5.2", - "diacritics-map": "^0.1.0", - "gray-matter": "^2.1.0", - "lazy-cache": "^2.0.2", - "list-item": "^1.1.1", - "markdown-link": "^0.1.1", - "minimist": "^1.2.0", - "mixin-deep": "^1.1.3", - "object.pick": "^1.2.0", - "remarkable": "^1.7.1", - "repeat-string": "^1.6.1", - "strip-color": "^0.1.0" - }, - "dependencies": { - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "autolinker": { - "version": "0.28.1", - "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-0.28.1.tgz", - "integrity": "sha512-zQAFO1Dlsn69eXaO6+7YZc+v84aquQKbwpzCE3L0stj56ERn9hutFxPopViLjo9G+rWwjozRhgS5KJ25Xy19cQ==", - "requires": { - "gulp-header": "^1.7.1" - } - }, - "gray-matter": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-2.1.1.tgz", - "integrity": "sha512-vbmvP1Fe/fxuT2QuLVcqb2BfK7upGhhbLIt9/owWEvPYrZZEkelLcq2HqzxosV+PQ67dUFLaAeNpH7C4hhICAA==", - "requires": { - "ansi-red": "^0.1.1", - "coffee-script": "^1.12.4", - "extend-shallow": "^2.0.1", - "js-yaml": "^3.8.1", - "toml": "^2.3.2" - } - }, - "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "remarkable": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/remarkable/-/remarkable-1.7.4.tgz", - "integrity": "sha512-e6NKUXgX95whv7IgddywbeN/ItCkWbISmc2DiqHJb0wTrqZIexqdco5b8Z3XZoo/48IdNVKM9ZCvTPJ4F5uvhg==", - "requires": { - "argparse": "^1.0.10", - "autolinker": "~0.28.0" - } - } - } - }, - "matches-selector": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/matches-selector/-/matches-selector-1.2.0.tgz", - "integrity": "sha512-c4vLwYWyl+Ji+U43eU/G5FwxWd4ZH0ePUsFs5y0uwD9HUEFBXUQ1zUUan+78IpRD+y4pUfG0nAzNM292K7ItvA==" - }, - "math-random": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.4.tgz", - "integrity": "sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A==" - }, - "md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "mdast-squeeze-paragraphs": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz", - "integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==", - "requires": { - "unist-util-remove": "^2.0.0" - } - }, - "mdast-util-definitions": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz", - "integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==", - "requires": { - "unist-util-visit": "^2.0.0" - }, - "dependencies": { - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "mdast-util-from-markdown": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz", - "integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==", - "requires": { - "@types/mdast": "^3.0.0", - "@types/unist": "^2.0.0", - "decode-named-character-reference": "^1.0.0", - "mdast-util-to-string": "^3.1.0", - "micromark": "^3.0.0", - "micromark-util-decode-numeric-character-reference": "^1.0.0", - "micromark-util-decode-string": "^1.0.0", - "micromark-util-normalize-identifier": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0", - "unist-util-stringify-position": "^3.0.0", - "uvu": "^0.5.0" - }, - "dependencies": { - "mdast-util-to-string": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz", - "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==", - "requires": { - "@types/mdast": "^3.0.0" - } - }, - "unist-util-stringify-position": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", - "requires": { - "@types/unist": "^2.0.0" - } - } - } - }, - "mdast-util-to-hast": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz", - "integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==", - "requires": { - "@types/mdast": "^3.0.0", - "@types/unist": "^2.0.0", - "mdast-util-definitions": "^4.0.0", - "mdurl": "^1.0.0", - "unist-builder": "^2.0.0", - "unist-util-generated": "^1.0.0", - "unist-util-position": "^3.0.0", - "unist-util-visit": "^2.0.0" - }, - "dependencies": { - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "mdast-util-to-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", - "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==" - }, - "mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" - }, - "mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" - }, - "media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" - }, - "memfs": { - "version": "3.4.7", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.4.7.tgz", - "integrity": "sha512-ygaiUSNalBX85388uskeCyhSAoOSgzBbtVCr9jA2RROssFL9Q19/ZXFqS+2Th2sr1ewNIWgFdLzLC3Yl1Zv+lw==", - "requires": { - "fs-monkey": "^1.0.3" - } - }, - "memoize-one": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-5.1.1.tgz", - "integrity": "sha512-HKeeBpWvqiVJD57ZUAsJNm71eHTykffzcLZVYWiVfQeI1rJtuEaS7hQiEpWfVVk18donPwJEcFKIkCmPJNOhHA==" - }, - "meow": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/meow/-/meow-3.7.0.tgz", - "integrity": "sha512-TNdwZs0skRlpPpCUK25StC4VH+tP5GgeY1HQOOGP+lQ2xtdkN2VtT/5tiX9k3IWpkBPV9b3LsAWXn4GGi/PrSA==", - "requires": { - "camelcase-keys": "^2.0.0", - "decamelize": "^1.1.2", - "loud-rejection": "^1.0.0", - "map-obj": "^1.0.1", - "minimist": "^1.1.3", - "normalize-package-data": "^2.3.4", - "object-assign": "^4.0.1", - "read-pkg-up": "^1.0.1", - "redent": "^1.0.0", - "trim-newlines": "^1.0.0" - } - }, - "merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" - }, - "merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" - }, - "merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==" - }, - "mermaid": { - "version": "9.4.3", - "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-9.4.3.tgz", - "integrity": "sha512-TLkQEtqhRSuEHSE34lh5bCa94KATCyluAXmFnNI2PRZwOpXFeqiJWwZl+d2CcemE1RS6QbbueSSq9QIg8Uxcyw==", - "requires": { - "@braintree/sanitize-url": "^6.0.0", - "cytoscape": "^3.23.0", - "cytoscape-cose-bilkent": "^4.1.0", - "cytoscape-fcose": "^2.1.0", - "d3": "^7.4.0", - "dagre-d3-es": "7.0.9", - "dayjs": "^1.11.7", - "dompurify": "2.4.3", - "elkjs": "^0.8.2", - "khroma": "^2.0.0", - "lodash-es": "^4.17.21", - "non-layered-tidy-tree-layout": "^2.0.2", - "stylis": "^4.1.2", - "ts-dedent": "^2.2.0", - "uuid": "^9.0.0", - "web-worker": "^1.2.0" - }, - "dependencies": { - "dompurify": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.4.3.tgz", - "integrity": "sha512-q6QaLcakcRjebxjg8/+NP+h0rPfatOgOzc46Fst9VAA3jF2ApfKBNKMzdP4DYTqtUMXSCd5pRS/8Po/OmoCHZQ==" - }, - "uuid": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", - "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==" - } - } - }, - "methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==" - }, - "microevent.ts": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/microevent.ts/-/microevent.ts-0.1.1.tgz", - "integrity": "sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g==" - }, - "micromark": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.2.0.tgz", - "integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==", - "requires": { - "@types/debug": "^4.0.0", - "debug": "^4.0.0", - "decode-named-character-reference": "^1.0.0", - "micromark-core-commonmark": "^1.0.1", - "micromark-factory-space": "^1.0.0", - "micromark-util-character": "^1.0.0", - "micromark-util-chunked": "^1.0.0", - "micromark-util-combine-extensions": "^1.0.0", - "micromark-util-decode-numeric-character-reference": "^1.0.0", - "micromark-util-encode": "^1.0.0", - "micromark-util-normalize-identifier": "^1.0.0", - "micromark-util-resolve-all": "^1.0.0", - "micromark-util-sanitize-uri": "^1.0.0", - "micromark-util-subtokenize": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.1", - "uvu": "^0.5.0" - } - }, - "micromark-core-commonmark": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz", - "integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==", - "requires": { - "decode-named-character-reference": "^1.0.0", - "micromark-factory-destination": "^1.0.0", - "micromark-factory-label": "^1.0.0", - "micromark-factory-space": "^1.0.0", - "micromark-factory-title": "^1.0.0", - "micromark-factory-whitespace": "^1.0.0", - "micromark-util-character": "^1.0.0", - "micromark-util-chunked": "^1.0.0", - "micromark-util-classify-character": "^1.0.0", - "micromark-util-html-tag-name": "^1.0.0", - "micromark-util-normalize-identifier": "^1.0.0", - "micromark-util-resolve-all": "^1.0.0", - "micromark-util-subtokenize": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.1", - "uvu": "^0.5.0" - } - }, - "micromark-factory-destination": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz", - "integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==", - "requires": { - "micromark-util-character": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-factory-label": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz", - "integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==", - "requires": { - "micromark-util-character": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0", - "uvu": "^0.5.0" - } - }, - "micromark-factory-space": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", - "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", - "requires": { - "micromark-util-character": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-factory-title": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz", - "integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==", - "requires": { - "micromark-factory-space": "^1.0.0", - "micromark-util-character": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-factory-whitespace": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz", - "integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==", - "requires": { - "micromark-factory-space": "^1.0.0", - "micromark-util-character": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-util-character": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", - "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", - "requires": { - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-util-chunked": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz", - "integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==", - "requires": { - "micromark-util-symbol": "^1.0.0" - } - }, - "micromark-util-classify-character": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz", - "integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==", - "requires": { - "micromark-util-character": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-util-combine-extensions": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz", - "integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==", - "requires": { - "micromark-util-chunked": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "micromark-util-decode-numeric-character-reference": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz", - "integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==", - "requires": { - "micromark-util-symbol": "^1.0.0" - } - }, - "micromark-util-decode-string": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz", - "integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==", - "requires": { - "decode-named-character-reference": "^1.0.0", - "micromark-util-character": "^1.0.0", - "micromark-util-decode-numeric-character-reference": "^1.0.0", - "micromark-util-symbol": "^1.0.0" - } - }, - "micromark-util-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz", - "integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==" - }, - "micromark-util-html-tag-name": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz", - "integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==" - }, - "micromark-util-normalize-identifier": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz", - "integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==", - "requires": { - "micromark-util-symbol": "^1.0.0" - } - }, - "micromark-util-resolve-all": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz", - "integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==", - "requires": { - "micromark-util-types": "^1.0.0" - } - }, - "micromark-util-sanitize-uri": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz", - "integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==", - "requires": { - "micromark-util-character": "^1.0.0", - "micromark-util-encode": "^1.0.0", - "micromark-util-symbol": "^1.0.0" - } - }, - "micromark-util-subtokenize": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz", - "integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==", - "requires": { - "micromark-util-chunked": "^1.0.0", - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0", - "uvu": "^0.5.0" - } - }, - "micromark-util-symbol": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", - "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==" - }, - "micromark-util-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", - "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==" - }, - "micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", - "requires": { - "braces": "^3.0.2", - "picomatch": "^2.3.1" - } - }, - "miller-rabin": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", - "requires": { - "bn.js": "^4.0.0", - "brorand": "^1.0.1" - }, - "dependencies": { - "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - } - } - }, - "mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" - }, - "mime-db": { - "version": "1.51.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.51.0.tgz", - "integrity": "sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g==" - }, - "mime-format": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mime-format/-/mime-format-2.0.1.tgz", - "integrity": "sha512-XxU3ngPbEnrYnNbIX+lYSaYg0M01v6p2ntd2YaFksTu0vayaw5OJvbdRyWs07EYRlLED5qadUZ+xo+XhOvFhwg==", - "requires": { - "charset": "^1.0.0" - } - }, - "mime-types": { - "version": "2.1.34", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.34.tgz", - "integrity": "sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A==", - "requires": { - "mime-db": "1.51.0" - } - }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" - }, - "mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" - }, - "min-dash": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/min-dash/-/min-dash-4.2.1.tgz", - "integrity": "sha512-to+unsToePnm7cUeR9TrMzFlETHd/UXmU+ELTRfWZj5XGT41KF6X3L233o3E/GdEs3sk2Tbw/lOLD1avmWkg8A==" - }, - "min-dom": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/min-dom/-/min-dom-4.1.0.tgz", - "integrity": "sha512-1lj1EyoSwY/UmTeT/hhPiZTsq+vK9D+8FAJ/53iK5jT1otkG9rJTixSKdjmTieEvdfES+sKbbTptzaQJhnacjA==", - "requires": { - "component-event": "^0.2.1", - "domify": "^1.4.1", - "min-dash": "^4.0.0" - } - }, - "mini-create-react-context": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz", - "integrity": "sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ==", - "requires": { - "@babel/runtime": "^7.12.1", - "tiny-warning": "^1.0.3" - } - }, - "mini-css-extract-plugin": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.6.1.tgz", - "integrity": "sha512-wd+SD57/K6DiV7jIR34P+s3uckTRuQvx0tKPcvjFlrEylk6P4mQ2KSWk1hblj1Kxaqok7LogKOieygXqBczNlg==", - "requires": { - "schema-utils": "^4.0.0" - }, - "dependencies": { - "ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "requires": { - "fast-deep-equal": "^3.1.3" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, - "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "requires": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - } - } - } - }, - "minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==" - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" - }, - "minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==" - }, - "mitt": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.1.tgz", - "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==" - }, - "mixin-deep": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", - "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", - "requires": { - "for-in": "^1.0.2", - "is-extendable": "^1.0.1" - }, - "dependencies": { - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - } - } - }, - "mixpanel-browser": { - "version": "2.47.0", - "resolved": "https://registry.npmjs.org/mixpanel-browser/-/mixpanel-browser-2.47.0.tgz", - "integrity": "sha512-Ldrva0fRBEIFWmEibBQO1PulfpJVF3pf28Guk09lDirDaSQqqU/xs9zQLwN2rL5VwVtsP1aD3JaCgaa98EjojQ==" - }, - "mkdirp": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", - "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", - "requires": { - "minimist": "^1.2.6" - } - }, - "moo": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/moo/-/moo-0.5.2.tgz", - "integrity": "sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==" - }, - "mri": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", - "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==" - }, - "mrmime": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.0.tgz", - "integrity": "sha512-a70zx7zFfVO7XpnQ2IX1Myh9yY4UYvfld/dikWRnsXxbyvMcfz+u6UfgNAtH+k2QqtJuzVpv6eLTx1G2+WKZbQ==" - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "multicast-dns": { - "version": "7.2.5", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", - "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", - "requires": { - "dns-packet": "^5.2.2", - "thunky": "^1.0.2" - } - }, - "mustache": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", - "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==" - }, - "mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "requires": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==" - }, - "nanomatch": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", - "requires": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "fragment-cache": "^0.2.1", - "is-windows": "^1.0.2", - "kind-of": "^6.0.2", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "dependencies": { - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - } - }, - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - } - } - }, - "natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true - }, - "nearley": { - "version": "2.20.1", - "resolved": "https://registry.npmjs.org/nearley/-/nearley-2.20.1.tgz", - "integrity": "sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==", - "requires": { - "commander": "^2.19.0", - "moo": "^0.5.0", - "railroad-diagrams": "^1.0.0", - "randexp": "0.4.6" - }, - "dependencies": { - "commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - } - } - }, - "negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==" - }, - "neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" - }, - "nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "no-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", - "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", - "requires": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node-emoji": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", - "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", - "requires": { - "lodash": "^4.17.21" - } - }, - "node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "requires": { - "whatwg-url": "^5.0.0" - } - }, - "node-fetch-h2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/node-fetch-h2/-/node-fetch-h2-2.3.0.tgz", - "integrity": "sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==", - "requires": { - "http2-client": "^1.2.5" - } - }, - "node-forge": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", - "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==" - }, - "node-int64": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", - "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", - "dev": true - }, - "node-polyfill-webpack-plugin": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/node-polyfill-webpack-plugin/-/node-polyfill-webpack-plugin-2.0.1.tgz", - "integrity": "sha512-ZUMiCnZkP1LF0Th2caY6J/eKKoA0TefpoVa68m/LQU1I/mE8rGt4fNYGgNuCcK+aG8P8P43nbeJ2RqJMOL/Y1A==", - "requires": { - "assert": "^2.0.0", - "browserify-zlib": "^0.2.0", - "buffer": "^6.0.3", - "console-browserify": "^1.2.0", - "constants-browserify": "^1.0.0", - "crypto-browserify": "^3.12.0", - "domain-browser": "^4.22.0", - "events": "^3.3.0", - "filter-obj": "^2.0.2", - "https-browserify": "^1.0.0", - "os-browserify": "^0.3.0", - "path-browserify": "^1.0.1", - "process": "^0.11.10", - "punycode": "^2.1.1", - "querystring-es3": "^0.2.1", - "readable-stream": "^4.0.0", - "stream-browserify": "^3.0.0", - "stream-http": "^3.2.0", - "string_decoder": "^1.3.0", - "timers-browserify": "^2.0.12", - "tty-browserify": "^0.0.1", - "type-fest": "^2.14.0", - "url": "^0.11.0", - "util": "^0.12.4", - "vm-browserify": "^1.1.2" - }, - "dependencies": { - "buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "readable-stream": { - "version": "4.5.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.5.2.tgz", - "integrity": "sha512-yjavECdqeZ3GLXNgRXgeQEdz9fvDDkNKyHnbHRFtOr7/LcfgBcmct7t/ET+HaCTqfh06OzoAxrkN/IfjJBVe+g==", - "requires": { - "abort-controller": "^3.0.0", - "buffer": "^6.0.3", - "events": "^3.3.0", - "process": "^0.11.10", - "string_decoder": "^1.3.0" - } - }, - "type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==" - } - } - }, - "node-readfiles": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/node-readfiles/-/node-readfiles-0.2.0.tgz", - "integrity": "sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA==", - "requires": { - "es6-promise": "^3.2.1" - } - }, - "node-releases": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", - "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==" - }, - "non-layered-tidy-tree-layout": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz", - "integrity": "sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==" - }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } - } - }, - "normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" - }, - "normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==" - }, - "normalize-url": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", - "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==" - }, - "npm-conf": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/npm-conf/-/npm-conf-1.1.3.tgz", - "integrity": "sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw==", - "requires": { - "config-chain": "^1.1.11", - "pify": "^3.0.0" - }, - "dependencies": { - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==" - } - } - }, - "npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "requires": { - "path-key": "^3.0.0" - } - }, - "nprogress": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" - }, - "nth-check": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz", - "integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==", - "requires": { - "boolbase": "^1.0.0" - } - }, - "num2fraction": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", - "integrity": "sha512-Y1wZESM7VUThYY+4W+X4ySH2maqcA+p7UR+w8VWNWVAd6lwuXXWz/w/Cz43J/dI2I+PS6wD5N+bJUF+gjWvIqg==" - }, - "oas-kit-common": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/oas-kit-common/-/oas-kit-common-1.0.8.tgz", - "integrity": "sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ==", - "requires": { - "fast-safe-stringify": "^2.0.7" - } - }, - "oas-linter": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/oas-linter/-/oas-linter-3.2.2.tgz", - "integrity": "sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ==", - "requires": { - "@exodus/schemasafe": "^1.0.0-rc.2", - "should": "^13.2.1", - "yaml": "^1.10.0" - } - }, - "oas-resolver": { - "version": "2.5.6", - "resolved": "https://registry.npmjs.org/oas-resolver/-/oas-resolver-2.5.6.tgz", - "integrity": "sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ==", - "requires": { - "node-fetch-h2": "^2.3.0", - "oas-kit-common": "^1.0.8", - "reftools": "^1.1.9", - "yaml": "^1.10.0", - "yargs": "^17.0.1" - } - }, - "oas-resolver-browser": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/oas-resolver-browser/-/oas-resolver-browser-2.5.2.tgz", - "integrity": "sha512-L3ugWyBHOpKLT+lb+pFXCOpk3byh6usis5T9u9mfu92jH5bR6YK8MA2bebUTIjY7I4415PzDeZcmcc+i7X05MA==", - "requires": { - "node-fetch-h2": "^2.3.0", - "oas-kit-common": "^1.0.8", - "path-browserify": "^1.0.1", - "reftools": "^1.1.6", - "yaml": "^1.10.0", - "yargs": "^15.3.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" - }, - "cliui": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", - "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^6.2.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - } - }, - "y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" - }, - "yargs": { - "version": "15.4.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", - "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", - "requires": { - "cliui": "^6.0.0", - "decamelize": "^1.2.0", - "find-up": "^4.1.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^4.2.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^18.1.2" - } - }, - "yargs-parser": { - "version": "18.1.3", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - } - } - }, - "oas-schema-walker": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/oas-schema-walker/-/oas-schema-walker-1.1.5.tgz", - "integrity": "sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ==" - }, - "oas-validator": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/oas-validator/-/oas-validator-5.0.8.tgz", - "integrity": "sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw==", - "requires": { - "call-me-maybe": "^1.0.1", - "oas-kit-common": "^1.0.8", - "oas-linter": "^3.2.2", - "oas-resolver": "^2.5.6", - "oas-schema-walker": "^1.1.5", - "reftools": "^1.1.9", - "should": "^13.2.1", - "yaml": "^1.10.0" - } - }, - "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" - }, - "object-copy": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", - "integrity": "sha512-79LYn6VAb63zgtmAteVOWo9Vdj71ZVBy3Pbse+VqxDpEP83XuujMrGqHIwAXJ5I/aM0zU7dIyIAhifVTPrNItQ==", - "requires": { - "copy-descriptor": "^0.1.0", - "define-property": "^0.2.5", - "kind-of": "^3.0.3" - }, - "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", - "requires": { - "kind-of": "^3.0.2" - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", - "requires": { - "kind-of": "^3.0.2" - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - }, - "dependencies": { - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" - } - } - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==" - }, - "object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==" - }, - "object-visit": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", - "integrity": "sha512-GBaMwwAVK9qbQN3Scdo0OyvgPW7l3lnaVMj84uTOZlswkX0KpF6fyDBJhtTthf7pymztoN36/KEr1DyhF96zEA==", - "requires": { - "isobject": "^3.0.0" - } - }, - "object.assign": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - } - }, - "object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.getownpropertydescriptors": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.5.tgz", - "integrity": "sha512-yDNzckpM6ntyQiGTik1fKV1DcVDRS+w8bvpWNCBanvH5LfRX9O8WTHqQzG4RZwRAM4I0oU7TV11Lj5v0g20ibw==", - "requires": { - "array.prototype.reduce": "^1.0.5", - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.pick": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", - "integrity": "sha512-tqa/UMy/CCoYmj+H5qc07qvSL9dqcs/WZENZ1JbtWBlATP+iVOe778gE6MSijnyCnORzDuX6hU+LA4SZ09YjFQ==", - "requires": { - "isobject": "^3.0.1" - } - }, - "object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" - }, - "on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "requires": { - "ee-first": "1.1.1" - } - }, - "on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==" - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "requires": { - "wrappy": "1" - } - }, - "onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "requires": { - "mimic-fn": "^2.1.0" - } - }, - "open": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.0.tgz", - "integrity": "sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==", - "requires": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - } - }, - "opener": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", - "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==" - }, - "optipng-bin": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/optipng-bin/-/optipng-bin-5.1.0.tgz", - "integrity": "sha512-9baoqZTNNmXQjq/PQTWEXbVV3AMO2sI/GaaqZJZ8SExfAzjijeAP7FEeT+TtyumSw7gr0PZtSUYB/Ke7iHQVKA==", - "requires": { - "bin-build": "^3.0.0", - "bin-wrapper": "^4.0.0", - "logalot": "^2.0.0" - } - }, - "os-browserify": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", - "integrity": "sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==" - }, - "os-filter-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/os-filter-obj/-/os-filter-obj-2.0.0.tgz", - "integrity": "sha512-uksVLsqG3pVdzzPvmAHpBK0wKxYItuzZr7SziusRPoz67tGV8rL1szZ6IdeUrbqLjGDwApBtN29eEE3IqGHOjg==", - "requires": { - "arch": "^2.1.0" - } - }, - "p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==" - }, - "p-event": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-event/-/p-event-1.3.0.tgz", - "integrity": "sha512-hV1zbA7gwqPVFcapfeATaNjQ3J0NuzorHPyG8GPL9g/Y/TplWVBVoCKCXL6Ej2zscrCEv195QNWJXuBH6XZuzA==", - "requires": { - "p-timeout": "^1.1.1" - } - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==" - }, - "p-is-promise": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", - "integrity": "sha512-zL7VE4JVS2IFSkR2GQKDSPEVxkoH43/p7oEnwpdCndKYJO0HVeRB7fA8TJwuLOTBREtK0ea8eHaxdwcpob5dmg==" - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "requires": { - "aggregate-error": "^3.0.0" - } - }, - "p-map-series": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-map-series/-/p-map-series-1.0.0.tgz", - "integrity": "sha512-4k9LlvY6Bo/1FcIdV33wqZQES0Py+iKISU9Uc8p8AjWoZPnFKMpVIVD3s0EYn4jzLh1I+WeUZkJ0Yoa4Qfw3Kg==", - "requires": { - "p-reduce": "^1.0.0" - } - }, - "p-pipe": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/p-pipe/-/p-pipe-1.2.0.tgz", - "integrity": "sha512-IA8SqjIGA8l9qOksXJvsvkeQ+VGb0TAzNCzvKvz9wt5wWLqfWbV6fXy43gpR2L4Te8sOq3S+Ql9biAaMKPdbtw==" - }, - "p-reduce": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-1.0.0.tgz", - "integrity": "sha512-3Tx1T3oM1xO/Y8Gj0sWyE78EIJZ+t+aEmXUdvQgvGmSMri7aPTHoovbXEreWKkL5j21Er60XAWLTzKbAKYOujQ==" - }, - "p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "requires": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - } - }, - "p-timeout": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-1.2.1.tgz", - "integrity": "sha512-gb0ryzr+K2qFqFv6qi3khoeqMZF/+ajxQipEF6NteZVnvz9tzdsfAVj3lYtn1gAXvH5lfLwfxEII799gt/mRIA==", - "requires": { - "p-finally": "^1.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - }, - "package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "requires": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" - }, - "param-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", - "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", - "requires": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "requires": { - "callsites": "^3.0.0" - } - }, - "parse-asn1": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", - "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", - "requires": { - "asn1.js": "^5.2.0", - "browserify-aes": "^1.0.0", - "evp_bytestokey": "^1.0.0", - "pbkdf2": "^3.0.3", - "safe-buffer": "^5.1.1" - } - }, - "parse-entities": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", - "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", - "requires": { - "character-entities": "^1.0.0", - "character-entities-legacy": "^1.0.0", - "character-reference-invalid": "^1.0.0", - "is-alphanumerical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-hexadecimal": "^1.0.0" - } - }, - "parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - } - }, - "parse-numeric-range": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", - "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" - }, - "parse5": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", - "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", - "requires": { - "entities": "^4.4.0" - }, - "dependencies": { - "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" - } - } - }, - "parse5-htmlparser2-tree-adapter": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", - "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", - "requires": { - "domhandler": "^5.0.2", - "parse5": "^7.0.0" - }, - "dependencies": { - "domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "requires": { - "domelementtype": "^2.3.0" - } - } - } - }, - "parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" - }, - "pascal-case": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", - "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", - "requires": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "pascalcase": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha512-XHXfu/yOQRy9vYOtUDVMN60OEJjW013GoObG1o+xwQTpB9eYJX/BjXMsdW13ZDPruFhYYn0AG22w0xgQMwl3Nw==" - }, - "path": { - "version": "0.12.7", - "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", - "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", - "requires": { - "process": "^0.11.1", - "util": "^0.10.3" - }, - "dependencies": { - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" - }, - "util": { - "version": "0.10.4", - "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", - "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", - "requires": { - "inherits": "2.0.3" - } - } - } - }, - "path-browserify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", - "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==" - }, - "path-dirname": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", - "integrity": "sha512-ALzNPpyNq9AqXMBjeymIjFDAkAFH06mHJH/cSBHAgU0s4vfpBn6b2nf8tiRLvagKD8RbTpq2FKTBg7cl9l3c7Q==" - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" - }, - "path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" - }, - "path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" - }, - "path-scurry": { - "version": "1.10.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz", - "integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==", - "requires": { - "lru-cache": "^9.1.1 || ^10.0.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "dependencies": { - "lru-cache": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.1.0.tgz", - "integrity": "sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==" - } - } - }, - "path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", - "requires": { - "isarray": "0.0.1" - } - }, - "path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==" - }, - "pbkdf2": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz", - "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==", - "requires": { - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4", - "ripemd160": "^2.0.1", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==" - }, - "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==" - }, - "picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" - }, - "picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" - }, - "pidtree": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/pidtree/-/pidtree-0.6.0.tgz", - "integrity": "sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==" - }, - "pinkie": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", - "integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==" - }, - "pinkie-promise": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==", - "requires": { - "pinkie": "^2.0.0" - } - }, - "pirates": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", - "integrity": "sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==" - }, - "pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "requires": { - "find-up": "^4.0.0" - } - }, - "pkg-up": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", - "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", - "requires": { - "find-up": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==" - } - } - }, - "playwright": { - "version": "1.32.2", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.32.2.tgz", - "integrity": "sha512-jHVnXJke0PXpuPszKtk9y1zZSlzO5+2a+aockT/AND0oeXx46FiJEFrafthurglLygVZA+1gEbtUM1C7qtTV+Q==", - "dev": true, - "requires": { - "playwright-core": "1.32.2" - } - }, - "playwright-core": { - "version": "1.32.2", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.32.2.tgz", - "integrity": "sha512-zD7aonO+07kOTthsrCR3YCVnDcqSHIJpdFUtZEMOb6//1Rc7/6mZDRdw+nlzcQiQltOOsiqI3rrSyn/SlyjnJQ==", - "dev": true - }, - "pluralize": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", - "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==" - }, - "portfinder": { - "version": "1.0.32", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.32.tgz", - "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==", - "requires": { - "async": "^2.6.4", - "debug": "^3.2.7", - "mkdirp": "^0.5.6" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "posix-character-classes": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==" - }, - "postcss": { - "version": "8.4.16", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.16.tgz", - "integrity": "sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==", - "requires": { - "nanoid": "^3.3.4", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" - } - }, - "postcss-calc": { - "version": "8.2.4", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", - "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", - "requires": { - "postcss-selector-parser": "^6.0.9", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-colormin": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", - "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", - "requires": { - "browserslist": "^4.21.4", - "caniuse-api": "^3.0.0", - "colord": "^2.9.1", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-convert-values": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", - "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", - "requires": { - "browserslist": "^4.21.4", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-discard-comments": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", - "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", - "requires": {} - }, - "postcss-discard-duplicates": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", - "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", - "requires": {} - }, - "postcss-discard-empty": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", - "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", - "requires": {} - }, - "postcss-discard-overridden": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", - "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", - "requires": {} - }, - "postcss-discard-unused": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", - "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", - "requires": { - "postcss-selector-parser": "^6.0.5" - } - }, - "postcss-loader": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.0.1.tgz", - "integrity": "sha512-VRviFEyYlLjctSM93gAZtcJJ/iSkPZ79zWbN/1fSH+NisBByEiVLqpdVDrPLVSi8DX0oJo12kL/GppTBdKVXiQ==", - "requires": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.7" - } - }, - "postcss-merge-idents": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", - "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", - "requires": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-merge-longhand": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", - "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", - "requires": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.1" - } - }, - "postcss-merge-rules": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", - "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", - "requires": { - "browserslist": "^4.21.4", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^3.1.0", - "postcss-selector-parser": "^6.0.5" - } - }, - "postcss-minify-font-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", - "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-minify-gradients": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", - "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", - "requires": { - "colord": "^2.9.1", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-minify-params": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", - "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", - "requires": { - "browserslist": "^4.21.4", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-minify-selectors": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", - "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", - "requires": { - "postcss-selector-parser": "^6.0.5" - } - }, - "postcss-modules-extract-imports": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", - "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", - "requires": {} - }, - "postcss-modules-local-by-default": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz", - "integrity": "sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ==", - "requires": { - "icss-utils": "^5.0.0", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.1.0" - } - }, - "postcss-modules-scope": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz", - "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==", - "requires": { - "postcss-selector-parser": "^6.0.4" - } - }, - "postcss-modules-values": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", - "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", - "requires": { - "icss-utils": "^5.0.0" - } - }, - "postcss-normalize-charset": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", - "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", - "requires": {} - }, - "postcss-normalize-display-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", - "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-positions": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", - "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-repeat-style": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", - "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-string": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", - "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-timing-functions": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", - "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-unicode": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", - "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", - "requires": { - "browserslist": "^4.21.4", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", - "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", - "requires": { - "normalize-url": "^6.0.1", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-normalize-whitespace": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", - "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-ordered-values": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", - "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", - "requires": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-reduce-idents": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", - "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-reduce-initial": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", - "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", - "requires": { - "browserslist": "^4.21.4", - "caniuse-api": "^3.0.0" - } - }, - "postcss-reduce-transforms": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", - "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-selector-parser": { - "version": "6.0.10", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", - "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", - "requires": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - } - }, - "postcss-sort-media-queries": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", - "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", - "requires": { - "sort-css-media-queries": "2.1.0" - } - }, - "postcss-svgo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", - "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", - "requires": { - "postcss-value-parser": "^4.2.0", - "svgo": "^2.7.0" - } - }, - "postcss-unique-selectors": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", - "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", - "requires": { - "postcss-selector-parser": "^6.0.5" - } - }, - "postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" - }, - "postcss-zindex": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", - "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", - "requires": {} - }, - "postman-url-encoder": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/postman-url-encoder/-/postman-url-encoder-3.0.5.tgz", - "integrity": "sha512-jOrdVvzUXBC7C+9gkIkpDJ3HIxOHTIqjpQ4C1EMt1ZGeMvSEpbFCKq23DEfgsj46vMnDgyQf+1ZLp2Wm+bKSsA==", - "requires": { - "punycode": "^2.1.1" - } - }, - "preact": { - "version": "10.19.6", - "resolved": "https://registry.npmjs.org/preact/-/preact-10.19.6.tgz", - "integrity": "sha512-gympg+T2Z1fG1unB8NH29yHJwnEaCH37Z32diPDku316OTnRPeMbiRV9kTrfZpocXjdfnWuFUl/Mj4BHaf6gnw==" - }, - "prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" - }, - "prettier": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.0.tgz", - "integrity": "sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==", - "dev": true - }, - "pretty-bytes": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", - "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==" - }, - "pretty-error": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", - "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", - "requires": { - "lodash": "^4.17.20", - "renderkid": "^3.0.0" - } - }, - "pretty-format": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.6.3.tgz", - "integrity": "sha512-ZsBgjVhFAj5KeK+nHfF1305/By3lechHQSMWCTl8iHSbfOm2TN5nHEtFc/+W7fAyUeCs2n5iow72gld4gW0xDw==", - "dev": true, - "requires": { - "@jest/schemas": "^29.6.3", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "dev": true - }, - "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", - "dev": true - } - } - }, - "pretty-time": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" - }, - "prism-react-renderer": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-1.3.5.tgz", - "integrity": "sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg==", - "requires": {} - }, - "prismjs": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==" - }, - "process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==" - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "promise": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", - "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", - "requires": { - "asap": "~2.0.3" - } - }, - "prompts": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", - "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", - "requires": { - "kleur": "^3.0.3", - "sisteransi": "^1.0.5" - } - }, - "prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "requires": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - } - }, - "prop-types-exact": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/prop-types-exact/-/prop-types-exact-1.2.0.tgz", - "integrity": "sha512-K+Tk3Kd9V0odiXFP9fwDHUYRyvK3Nun3GVyPapSIs5OBkITAm15W0CPFD/YKTkMUAbc0b9CUwRQp2ybiBIq+eA==", - "requires": { - "has": "^1.0.3", - "object.assign": "^4.1.0", - "reflect.ownkeys": "^0.2.0" - } - }, - "property-information": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", - "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", - "requires": { - "xtend": "^4.0.0" - } - }, - "proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" - }, - "proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "requires": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "dependencies": { - "ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" - } - } - }, - "pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==" - }, - "psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==" - }, - "public-encrypt": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", - "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", - "requires": { - "bn.js": "^4.1.0", - "browserify-rsa": "^4.0.0", - "create-hash": "^1.1.0", - "parse-asn1": "^5.0.0", - "randombytes": "^2.0.1", - "safe-buffer": "^5.1.2" - }, - "dependencies": { - "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - } - } - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" - }, - "pupa": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", - "requires": { - "escape-goat": "^2.0.0" - } - }, - "pure-color": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/pure-color/-/pure-color-1.3.0.tgz", - "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" - }, - "pure-rand": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.2.tgz", - "integrity": "sha512-6Yg0ekpKICSjPswYOuC5sku/TSWaRYlA0qsXqJgM/d/4pLPHPuTxK7Nbf7jFKzAeedUhR8C7K9Uv63FBsSo8xQ==", - "dev": true - }, - "pushfeedback": { - "version": "0.1.39", - "resolved": "https://registry.npmjs.org/pushfeedback/-/pushfeedback-0.1.39.tgz", - "integrity": "sha512-/sZR2Sqdi1MgxVxVFup3c8GMW1vKVDykCNYIzJH4ic+whTG4a1VFR/uhsx6iHDP3CNxVO/mcxH+pMRSv9voypQ==", - "requires": { - "@stencil/core": "^2.13.0", - "html2canvas": "^1.4.1" - } - }, - "pushfeedback-react": { - "version": "0.1.30", - "resolved": "https://registry.npmjs.org/pushfeedback-react/-/pushfeedback-react-0.1.30.tgz", - "integrity": "sha512-rHLfkmHSL8NLHdZ9xPlAoes4cor8LGzsCwYBvlajlIFggsajdO+F5GdUAi+cXvXJaBM+Us4EmZlD9naBljHMZA==", - "requires": { - "pushfeedback": "^0.1.39" - } - }, - "q": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", - "integrity": "sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==" - }, - "qs": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz", - "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==", - "requires": { - "side-channel": "^1.0.4" - } - }, - "query-string": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", - "requires": { - "decode-uri-component": "^0.2.0", - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" - } - }, - "querystring-es3": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==" - }, - "queue": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", - "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", - "requires": { - "inherits": "~2.0.3" - } - }, - "queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" - }, - "raf": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz", - "integrity": "sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==", - "requires": { - "performance-now": "^2.1.0" - } - }, - "railroad-diagrams": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz", - "integrity": "sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A==" - }, - "randexp": { - "version": "0.4.6", - "resolved": "https://registry.npmjs.org/randexp/-/randexp-0.4.6.tgz", - "integrity": "sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ==", - "requires": { - "discontinuous-range": "1.0.0", - "ret": "~0.1.10" - } - }, - "randomatic": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.1.tgz", - "integrity": "sha512-TuDE5KxZ0J461RVjrJZCJc+J+zCkTb1MbH9AQUq68sMhOMcy9jLcb3BrZKgp9q9Ncltdg4QVqWrH02W2EFFVYw==", - "requires": { - "is-number": "^4.0.0", - "kind-of": "^6.0.0", - "math-random": "^1.0.1" - }, - "dependencies": { - "is-number": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==" - } - } - }, - "randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "requires": { - "safe-buffer": "^5.1.0" - } - }, - "randomfill": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", - "requires": { - "randombytes": "^2.0.5", - "safe-buffer": "^5.1.0" - } - }, - "range-parser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=" - }, - "raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", - "requires": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "dependencies": { - "bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" - } - } - }, - "rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, - "react": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", - "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, - "react-base16-styling": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/react-base16-styling/-/react-base16-styling-0.6.0.tgz", - "integrity": "sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==", - "requires": { - "base16": "^1.0.0", - "lodash.curry": "^4.0.1", - "lodash.flow": "^3.3.0", - "pure-color": "^1.2.0" - } - }, - "react-dev-utils": { - "version": "12.0.1", - "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", - "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", - "requires": { - "@babel/code-frame": "^7.16.0", - "address": "^1.1.2", - "browserslist": "^4.18.1", - "chalk": "^4.1.2", - "cross-spawn": "^7.0.3", - "detect-port-alt": "^1.1.6", - "escape-string-regexp": "^4.0.0", - "filesize": "^8.0.6", - "find-up": "^5.0.0", - "fork-ts-checker-webpack-plugin": "^6.5.0", - "global-modules": "^2.0.0", - "globby": "^11.0.4", - "gzip-size": "^6.0.0", - "immer": "^9.0.7", - "is-root": "^2.1.0", - "loader-utils": "^3.2.0", - "open": "^8.4.0", - "pkg-up": "^3.1.0", - "prompts": "^2.4.2", - "react-error-overlay": "^6.0.11", - "recursive-readdir": "^2.2.2", - "shell-quote": "^1.7.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==" - }, - "find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "requires": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "loader-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.0.tgz", - "integrity": "sha512-HVl9ZqccQihZ7JM85dco1MvO9G+ONvxoGa9rkhzFsneGLKSUg1gJf9bWzhRhcvm2qChhWpebQhP44qxjKIUCaQ==" - }, - "locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "requires": { - "p-locate": "^5.0.0" - } - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "requires": { - "p-limit": "^3.0.2" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "react-dom": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", - "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "scheduler": "^0.20.2" - } - }, - "react-error-overlay": { - "version": "6.0.11", - "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", - "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" - }, - "react-fast-compare": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.0.tgz", - "integrity": "sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==" - }, - "react-helmet-async": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", - "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", - "requires": { - "@babel/runtime": "^7.12.5", - "invariant": "^2.2.4", - "prop-types": "^15.7.2", - "react-fast-compare": "^3.2.0", - "shallowequal": "^1.1.0" - } - }, - "react-hook-form": { - "version": "7.49.3", - "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.49.3.tgz", - "integrity": "sha512-foD6r3juidAT1cOZzpmD/gOKt7fRsDhXXZ0y28+Al1CHgX+AY1qIN9VSIIItXRq1dN68QrRwl1ORFlwjBaAqeQ==", - "requires": {} - }, - "react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" - }, - "react-json-view": { - "version": "1.21.3", - "resolved": "https://registry.npmjs.org/react-json-view/-/react-json-view-1.21.3.tgz", - "integrity": "sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==", - "requires": { - "flux": "^4.0.1", - "react-base16-styling": "^0.6.0", - "react-lifecycles-compat": "^3.0.4", - "react-textarea-autosize": "^8.3.2" - } - }, - "react-lifecycles-compat": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz", - "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" - }, - "react-live": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/react-live/-/react-live-4.0.1.tgz", - "integrity": "sha512-ndRYxgJYdcfVibnM0zublvEdwArbIwplhLxpOf3dsRtVh8BId0nOnblticIwhl24D5RcmIHf8siCErtgGN4zLw==", - "requires": { - "prism-react-renderer": "^1.3.1", - "sucrase": "^3.31.0", - "use-editable": "^2.3.3" - } - }, - "react-loadable": { - "version": "npm:@docusaurus/react-loadable@5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", - "requires": { - "@types/react": "*", - "prop-types": "^15.6.2" - } - }, - "react-loadable-ssr-addon-v5-slorber": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", - "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", - "requires": { - "@babel/runtime": "^7.10.3" - } - }, - "react-magic-dropzone": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/react-magic-dropzone/-/react-magic-dropzone-1.0.1.tgz", - "integrity": "sha512-0BIROPARmXHpk4AS3eWBOsewxoM5ndk2psYP/JmbCq8tz3uR2LIV1XiroZ9PKrmDRMctpW+TvsBCtWasuS8vFA==" - }, - "react-markdown": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-8.0.7.tgz", - "integrity": "sha512-bvWbzG4MtOU62XqBx3Xx+zB2raaFFsq4mYiAzfjXJMEz2sixgeAfraA3tvzULF02ZdOMUOKTBFFaZJDDrq+BJQ==", - "requires": { - "@types/hast": "^2.0.0", - "@types/prop-types": "^15.0.0", - "@types/unist": "^2.0.0", - "comma-separated-tokens": "^2.0.0", - "hast-util-whitespace": "^2.0.0", - "prop-types": "^15.0.0", - "property-information": "^6.0.0", - "react-is": "^18.0.0", - "remark-parse": "^10.0.0", - "remark-rehype": "^10.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^0.4.0", - "unified": "^10.0.0", - "unist-util-visit": "^4.0.0", - "vfile": "^5.0.0" - }, - "dependencies": { - "bail": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==" - }, - "comma-separated-tokens": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==" - }, - "is-plain-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==" - }, - "property-information": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.4.0.tgz", - "integrity": "sha512-9t5qARVofg2xQqKtytzt+lZ4d1Qvj8t5B8fEwXK6qOfgRLgH/b13QlgEyDh033NOS31nXeFbYv7CLUDG1CeifQ==" - }, - "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" - }, - "remark-parse": { - "version": "10.0.2", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.2.tgz", - "integrity": "sha512-3ydxgHa/ZQzG8LvC7jTXccARYDcRld3VfcgIIFs7bI6vbRSxJJmzgLEIIoYKyrfhaY+ujuWaf/PJiMZXoiCXgw==", - "requires": { - "@types/mdast": "^3.0.0", - "mdast-util-from-markdown": "^1.0.0", - "unified": "^10.0.0" - } - }, - "space-separated-tokens": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==" - }, - "style-to-object": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", - "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", - "requires": { - "inline-style-parser": "0.1.1" - } - }, - "trough": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", - "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==" - }, - "unified": { - "version": "10.1.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", - "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", - "requires": { - "@types/unist": "^2.0.0", - "bail": "^2.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^4.0.0", - "trough": "^2.0.0", - "vfile": "^5.0.0" - } - }, - "unist-util-is": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", - "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-stringify-position": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-visit": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", - "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^5.0.0", - "unist-util-visit-parents": "^5.1.1" - } - }, - "unist-util-visit-parents": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", - "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^5.0.0" - } - }, - "vfile": { - "version": "5.3.7", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", - "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", - "requires": { - "@types/unist": "^2.0.0", - "is-buffer": "^2.0.0", - "unist-util-stringify-position": "^3.0.0", - "vfile-message": "^3.0.0" - } - }, - "vfile-message": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", - "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-stringify-position": "^3.0.0" - } - } - } - }, - "react-modal": { - "version": "3.16.1", - "resolved": "https://registry.npmjs.org/react-modal/-/react-modal-3.16.1.tgz", - "integrity": "sha512-VStHgI3BVcGo7OXczvnJN7yT2TWHJPDXZWyI/a0ssFNhGZWsPmB8cF0z33ewDXq4VfYMO1vXgiv/g8Nj9NDyWg==", - "requires": { - "exenv": "^1.2.0", - "prop-types": "^15.7.2", - "react-lifecycles-compat": "^3.0.0", - "warning": "^4.0.3" - } - }, - "react-player": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/react-player/-/react-player-2.11.0.tgz", - "integrity": "sha512-fIrwpuXOBXdEg1FiyV9isKevZOaaIsAAtZy5fcjkQK9Nhmk1I2NXzY/hkPos8V0zb/ZX416LFy8gv7l/1k3a5w==", - "requires": { - "deepmerge": "^4.0.0", - "load-script": "^1.0.0", - "memoize-one": "^5.1.1", - "prop-types": "^15.7.2", - "react-fast-compare": "^3.0.1" - } - }, - "react-redux": { - "version": "7.2.9", - "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-7.2.9.tgz", - "integrity": "sha512-Gx4L3uM182jEEayZfRbI/G11ZpYdNAnBs70lFVMNdHJI76XYtR+7m0MN+eAs7UHBPhWXcnFPaS+9owSCJQHNpQ==", - "requires": { - "@babel/runtime": "^7.15.4", - "@types/react-redux": "^7.1.20", - "hoist-non-react-statics": "^3.3.2", - "loose-envify": "^1.4.0", - "prop-types": "^15.7.2", - "react-is": "^17.0.2" - }, - "dependencies": { - "react-is": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", - "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==" - } - } - }, - "react-router": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.3.tgz", - "integrity": "sha512-mzQGUvS3bM84TnbtMYR8ZjKnuPJ71IjSzR+DE6UkUqvN4czWIqEs17yLL8xkAycv4ev0AiN+IGrWu88vJs/p2w==", - "requires": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "hoist-non-react-statics": "^3.1.0", - "loose-envify": "^1.3.1", - "mini-create-react-context": "^0.4.0", - "path-to-regexp": "^1.7.0", - "prop-types": "^15.6.2", - "react-is": "^16.6.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - } - }, - "react-router-config": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", - "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", - "requires": { - "@babel/runtime": "^7.1.2" - } - }, - "react-router-dom": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-Ov0tGPMBgqmbu5CDmN++tv2HQ9HlWDuWIIqn4b88gjlAN5IHI+4ZUZRcpz9Hl0azFIwihbLDYw1OiHGRo7ZIng==", - "requires": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "loose-envify": "^1.3.1", - "prop-types": "^15.6.2", - "react-router": "5.3.3", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - } - }, - "react-textarea-autosize": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", - "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", - "requires": { - "@babel/runtime": "^7.20.13", - "use-composed-ref": "^1.3.0", - "use-latest": "^1.2.1" - } - }, - "read-pkg": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", - "integrity": "sha512-7BGwRHqt4s/uVbuyoeejRn4YmFnYZiFl4AuaeXHlgZf3sONF0SOGlxs2Pw8g6hCKupo08RafIO5YXFNOKTfwsQ==", - "requires": { - "load-json-file": "^1.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^1.0.0" - }, - "dependencies": { - "path-type": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", - "integrity": "sha512-S4eENJz1pkiQn9Znv33Q+deTOKmbl+jj1Fl+qiP/vYezj+S8x+J3Uo0ISrx/QoEvIlOaDWJhPaRd1flJ9HXZqg==", - "requires": { - "graceful-fs": "^4.1.2", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==" - } - } - }, - "read-pkg-up": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", - "integrity": "sha512-WD9MTlNtI55IwYUS27iHh9tK3YoIVhxis8yKhLpTqWtml739uXc9NWTpxoHkfZf3+DkCCsXox94/VWZniuZm6A==", - "requires": { - "find-up": "^1.0.0", - "read-pkg": "^1.0.0" - }, - "dependencies": { - "find-up": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", - "integrity": "sha512-jvElSjyuo4EMQGoTwo1uJU5pQMwTW5lS1x05zzfJuTIyLR3zwO27LYrxNg+dlvKpGOuGy/MzBdXh80g0ve5+HA==", - "requires": { - "path-exists": "^2.0.0", - "pinkie-promise": "^2.0.0" - } - }, - "path-exists": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", - "integrity": "sha512-yTltuKuhtNeFJKa1PiRzfLAU5182q1y4Eb4XCJ3PBqyzEDkAZRzBrKKBct682ls9reBVHf9udYLN5Nd+K1B9BQ==", - "requires": { - "pinkie-promise": "^2.0.0" - } - } - } - }, - "readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, - "readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "requires": { - "picomatch": "^2.2.1" - } - }, - "reading-time": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", - "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", - "requires": { - "resolve": "^1.1.6" - } - }, - "recursive-readdir": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz", - "integrity": "sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==", - "requires": { - "minimatch": "3.0.4" - } - }, - "redent": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz", - "integrity": "sha512-qtW5hKzGQZqKoh6JNSD+4lfitfPKGz42e6QwiRmPM5mmKtR0N41AbJRYu0xJi7nhOJ4WDgRkKvAk6tw4WIwR4g==", - "requires": { - "indent-string": "^2.1.0", - "strip-indent": "^1.0.1" - }, - "dependencies": { - "indent-string": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", - "integrity": "sha512-aqwDFWSgSgfRaEwao5lg5KEcVd/2a+D1rvoG7NdilmYz0NwRk6StWpWdz/Hpk34MKPpx7s8XxUqimfcQK6gGlg==", - "requires": { - "repeating": "^2.0.0" - } - } - } - }, - "redux": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/redux/-/redux-4.2.1.tgz", - "integrity": "sha512-LAUYz4lc+Do8/g7aeRa8JkyDErK6ekstQaqWQrNRW//MY1TvCEpMtpTWvlQ+FPbWCx+Xixu/6SHt5N0HR+SB4w==", - "requires": { - "@babel/runtime": "^7.9.2" - } - }, - "redux-thunk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-2.4.2.tgz", - "integrity": "sha512-+P3TjtnP0k/FEjcBL5FZpoovtvrTNT/UXd4/sluaSyrURlSlhLSzEdfsTBW7WsKB6yPvgd7q/iZPICFjW4o57Q==", - "requires": {} - }, - "reflect.ownkeys": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/reflect.ownkeys/-/reflect.ownkeys-0.2.0.tgz", - "integrity": "sha512-qOLsBKHCpSOFKK1NUOCGC5VyeufB6lEsFe92AL2bhIJsacZS1qdoOZSbPk3MYKuT2cFlRDnulKXuuElIrMjGUg==" - }, - "reftools": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/reftools/-/reftools-1.1.9.tgz", - "integrity": "sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==" - }, - "regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "regenerate-unicode-properties": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", - "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", - "requires": { - "regenerate": "^1.4.2" - } - }, - "regenerator-runtime": { - "version": "0.13.9", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", - "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" - }, - "regenerator-transform": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", - "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", - "requires": { - "@babel/runtime": "^7.8.4" - } - }, - "regex-not": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", - "requires": { - "extend-shallow": "^3.0.2", - "safe-regex": "^1.1.0" - }, - "dependencies": { - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - } - }, - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - } - } - }, - "regexp.prototype.flags": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" - } - }, - "regexpu-core": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.2.2.tgz", - "integrity": "sha512-T0+1Zp2wjF/juXMrMxHxidqGYn8U4R+zleSJhX9tQ1PUsS8a9UtYfbsF9LdiVgNX3kiX8RNaKM42nfSgvFJjmw==", - "requires": { - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsgen": "^0.7.1", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - } - }, - "registry-auth-token": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", - "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", - "requires": { - "rc": "^1.2.8" - } - }, - "registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "requires": { - "rc": "^1.2.8" - } - }, - "regjsgen": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.7.1.tgz", - "integrity": "sha512-RAt+8H2ZEzHeYWxZ3H2z6tF18zyyOnlcdaafLrm21Bguj7uZy6ULibiAFdXEtKQY4Sy7wDTwDiOazasMLc4KPA==" - }, - "regjsparser": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", - "requires": { - "jsesc": "~0.5.0" - }, - "dependencies": { - "jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==" - } - } - }, - "rehype-raw": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-6.1.1.tgz", - "integrity": "sha512-d6AKtisSRtDRX4aSPsJGTfnzrX2ZkHQLE5kiUuGOeEoLpbEulFF4hj0mLPbsa+7vmguDKOVVEQdHKDSwoaIDsQ==", - "requires": { - "@types/hast": "^2.0.0", - "hast-util-raw": "^7.2.0", - "unified": "^10.0.0" - }, - "dependencies": { - "@types/parse5": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-6.0.3.tgz", - "integrity": "sha512-SuT16Q1K51EAVPz1K29DJ/sXjhSQ0zjvsypYJ6tlwVsRV9jwW5Adq2ch8Dq8kDBCkYnELS7N7VNCSB5nC56t/g==" - }, - "bail": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==" - }, - "comma-separated-tokens": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==" - }, - "hast-util-from-parse5": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-7.1.2.tgz", - "integrity": "sha512-Nz7FfPBuljzsN3tCQ4kCBKqdNhQE2l0Tn+X1ubgKBPRoiDIu1mL08Cfw4k7q71+Duyaw7DXDN+VTAp4Vh3oCOw==", - "requires": { - "@types/hast": "^2.0.0", - "@types/unist": "^2.0.0", - "hastscript": "^7.0.0", - "property-information": "^6.0.0", - "vfile": "^5.0.0", - "vfile-location": "^4.0.0", - "web-namespaces": "^2.0.0" - } - }, - "hast-util-parse-selector": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-3.1.1.tgz", - "integrity": "sha512-jdlwBjEexy1oGz0aJ2f4GKMaVKkA9jwjr4MjAAI22E5fM/TXVZHuS5OpONtdeIkRKqAaryQ2E9xNQxijoThSZA==", - "requires": { - "@types/hast": "^2.0.0" - } - }, - "hast-util-raw": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-7.2.3.tgz", - "integrity": "sha512-RujVQfVsOrxzPOPSzZFiwofMArbQke6DJjnFfceiEbFh7S05CbPt0cYN+A5YeD3pso0JQk6O1aHBnx9+Pm2uqg==", - "requires": { - "@types/hast": "^2.0.0", - "@types/parse5": "^6.0.0", - "hast-util-from-parse5": "^7.0.0", - "hast-util-to-parse5": "^7.0.0", - "html-void-elements": "^2.0.0", - "parse5": "^6.0.0", - "unist-util-position": "^4.0.0", - "unist-util-visit": "^4.0.0", - "vfile": "^5.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" - } - }, - "hast-util-to-parse5": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-7.1.0.tgz", - "integrity": "sha512-YNRgAJkH2Jky5ySkIqFXTQiaqcAtJyVE+D5lkN6CdtOqrnkLfGYYrEcKuHOJZlp+MwjSwuD3fZuawI+sic/RBw==", - "requires": { - "@types/hast": "^2.0.0", - "comma-separated-tokens": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" - } - }, - "hastscript": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-7.2.0.tgz", - "integrity": "sha512-TtYPq24IldU8iKoJQqvZOuhi5CyCQRAbvDOX0x1eW6rsHSxa/1i2CCiptNTotGHJ3VoHRGmqiv6/D3q113ikkw==", - "requires": { - "@types/hast": "^2.0.0", - "comma-separated-tokens": "^2.0.0", - "hast-util-parse-selector": "^3.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0" - } - }, - "html-void-elements": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-2.0.1.tgz", - "integrity": "sha512-0quDb7s97CfemeJAnW9wC0hw78MtW7NU3hqtCD75g2vFlDLt36llsYD7uB7SUzojLMP24N5IatXf7ylGXiGG9A==" - }, - "is-plain-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==" - }, - "parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" - }, - "property-information": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.4.0.tgz", - "integrity": "sha512-9t5qARVofg2xQqKtytzt+lZ4d1Qvj8t5B8fEwXK6qOfgRLgH/b13QlgEyDh033NOS31nXeFbYv7CLUDG1CeifQ==" - }, - "space-separated-tokens": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==" - }, - "trough": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", - "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==" - }, - "unified": { - "version": "10.1.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", - "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", - "requires": { - "@types/unist": "^2.0.0", - "bail": "^2.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^4.0.0", - "trough": "^2.0.0", - "vfile": "^5.0.0" - } - }, - "unist-util-is": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", - "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-position": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz", - "integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-stringify-position": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-visit": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", - "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^5.0.0", - "unist-util-visit-parents": "^5.1.1" - } - }, - "unist-util-visit-parents": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", - "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^5.0.0" - } - }, - "vfile": { - "version": "5.3.7", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", - "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", - "requires": { - "@types/unist": "^2.0.0", - "is-buffer": "^2.0.0", - "unist-util-stringify-position": "^3.0.0", - "vfile-message": "^3.0.0" - } - }, - "vfile-location": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz", - "integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==", - "requires": { - "@types/unist": "^2.0.0", - "vfile": "^5.0.0" - } - }, - "vfile-message": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", - "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-stringify-position": "^3.0.0" - } - }, - "web-namespaces": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", - "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==" - }, - "zwitch": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", - "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==" - } - } - }, - "relateurl": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=" - }, - "remark-emoji": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", - "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==", - "requires": { - "emoticon": "^3.2.0", - "node-emoji": "^1.10.0", - "unist-util-visit": "^2.0.3" - }, - "dependencies": { - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "remark-footnotes": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz", - "integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==" - }, - "remark-mdx": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz", - "integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==", - "requires": { - "@babel/core": "7.12.9", - "@babel/helper-plugin-utils": "7.10.4", - "@babel/plugin-proposal-object-rest-spread": "7.12.1", - "@babel/plugin-syntax-jsx": "7.12.1", - "@mdx-js/util": "1.6.22", - "is-alphabetical": "1.0.4", - "remark-parse": "8.0.3", - "unified": "9.2.0" - }, - "dependencies": { - "@babel/core": { - "version": "7.12.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", - "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", - "requires": { - "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.12.5", - "@babel/helper-module-transforms": "^7.12.1", - "@babel/helpers": "^7.12.5", - "@babel/parser": "^7.12.7", - "@babel/template": "^7.12.7", - "@babel/traverse": "^7.12.9", - "@babel/types": "^7.12.7", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.1", - "json5": "^2.1.2", - "lodash": "^4.17.19", - "resolve": "^1.3.2", - "semver": "^5.4.1", - "source-map": "^0.5.0" - } - }, - "@babel/helper-plugin-utils": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", - "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" - }, - "@babel/plugin-proposal-object-rest-spread": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", - "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.0", - "@babel/plugin-transform-parameters": "^7.12.1" - } - }, - "@babel/plugin-syntax-jsx": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", - "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "unified": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", - "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", - "requires": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - } - } - } - }, - "remark-parse": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz", - "integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==", - "requires": { - "ccount": "^1.0.0", - "collapse-white-space": "^1.0.2", - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-whitespace-character": "^1.0.0", - "is-word-character": "^1.0.0", - "markdown-escapes": "^1.0.0", - "parse-entities": "^2.0.0", - "repeat-string": "^1.5.4", - "state-toggle": "^1.0.0", - "trim": "0.0.1", - "trim-trailing-lines": "^1.0.0", - "unherit": "^1.0.4", - "unist-util-remove-position": "^2.0.0", - "vfile-location": "^3.0.0", - "xtend": "^4.0.1" - } - }, - "remark-rehype": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-10.1.0.tgz", - "integrity": "sha512-EFmR5zppdBp0WQeDVZ/b66CWJipB2q2VLNFMabzDSGR66Z2fQii83G5gTBbgGEnEEA0QRussvrFHxk1HWGJskw==", - "requires": { - "@types/hast": "^2.0.0", - "@types/mdast": "^3.0.0", - "mdast-util-to-hast": "^12.1.0", - "unified": "^10.0.0" - }, - "dependencies": { - "bail": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==" - }, - "is-plain-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==" - }, - "mdast-util-definitions": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz", - "integrity": "sha512-8SVPMuHqlPME/z3gqVwWY4zVXn8lqKv/pAhC57FuJ40ImXyBpmO5ukh98zB2v7Blql2FiHjHv9LVztSIqjY+MA==", - "requires": { - "@types/mdast": "^3.0.0", - "@types/unist": "^2.0.0", - "unist-util-visit": "^4.0.0" - } - }, - "mdast-util-to-hast": { - "version": "12.3.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-12.3.0.tgz", - "integrity": "sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw==", - "requires": { - "@types/hast": "^2.0.0", - "@types/mdast": "^3.0.0", - "mdast-util-definitions": "^5.0.0", - "micromark-util-sanitize-uri": "^1.1.0", - "trim-lines": "^3.0.0", - "unist-util-generated": "^2.0.0", - "unist-util-position": "^4.0.0", - "unist-util-visit": "^4.0.0" - } - }, - "trough": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", - "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==" - }, - "unified": { - "version": "10.1.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", - "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", - "requires": { - "@types/unist": "^2.0.0", - "bail": "^2.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^4.0.0", - "trough": "^2.0.0", - "vfile": "^5.0.0" - } - }, - "unist-util-generated": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz", - "integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==" - }, - "unist-util-is": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", - "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-position": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz", - "integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-stringify-position": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", - "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", - "requires": { - "@types/unist": "^2.0.0" - } - }, - "unist-util-visit": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", - "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^5.0.0", - "unist-util-visit-parents": "^5.1.1" - } - }, - "unist-util-visit-parents": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", - "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^5.0.0" - } - }, - "vfile": { - "version": "5.3.7", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", - "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", - "requires": { - "@types/unist": "^2.0.0", - "is-buffer": "^2.0.0", - "unist-util-stringify-position": "^3.0.0", - "vfile-message": "^3.0.0" - } - }, - "vfile-message": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", - "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-stringify-position": "^3.0.0" - } - } - } - }, - "remark-squeeze-paragraphs": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz", - "integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==", - "requires": { - "mdast-squeeze-paragraphs": "^4.0.0" - } - }, - "remarkable": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/remarkable/-/remarkable-2.0.1.tgz", - "integrity": "sha512-YJyMcOH5lrR+kZdmB0aJJ4+93bEojRZ1HGDn9Eagu6ibg7aVZhc3OWbbShRid+Q5eAfsEqWxpe+g5W5nYNfNiA==", - "requires": { - "argparse": "^1.0.10", - "autolinker": "^3.11.0" - }, - "dependencies": { - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - } - } - }, - "renderkid": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", - "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", - "requires": { - "css-select": "^4.1.3", - "dom-converter": "^0.2.0", - "htmlparser2": "^6.1.0", - "lodash": "^4.17.21", - "strip-ansi": "^6.0.1" - } - }, - "repeat-element": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz", - "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==" - }, - "repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==" - }, - "repeating": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", - "integrity": "sha512-ZqtSMuVybkISo2OWvqvm7iHSWngvdaW3IpsT9/uP8v4gMi591LY6h35wdOfvQdWCKFWZWm2Y1Opp4kV7vQKT6A==", - "requires": { - "is-finite": "^1.0.0" - } - }, - "replace-ext": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.1.tgz", - "integrity": "sha512-yD5BHCe7quCgBph4rMQ+0KkIRKwWCrHDOX1p1Gp6HwjPM5kVoCdKGNhN7ydqqsX6lJEnQDKZ/tFMiEdQ1dvPEw==" - }, - "replace-in-file": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/replace-in-file/-/replace-in-file-7.1.0.tgz", - "integrity": "sha512-1uZmJ78WtqNYCSuPC9IWbweXkGxPOtk2rKuar8diTw7naVIQZiE3Tm8ACx2PCMXDtVH6N+XxwaRY2qZ2xHPqXw==", - "dev": true, - "requires": { - "chalk": "^4.1.2", - "glob": "^8.1.0", - "yargs": "^17.7.2" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "requires": { - "brace-expansion": "^2.0.1" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "dependencies": { - "qs": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", - "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==" - }, - "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" - } - } - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==" - }, - "require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==" - }, - "require-like": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", - "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==" - }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" - }, - "requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" - }, - "reselect": { - "version": "4.1.8", - "resolved": "https://registry.npmjs.org/reselect/-/reselect-4.1.8.tgz", - "integrity": "sha512-ab9EmR80F/zQTMNeneUr4cv+jSwPJgIlvEmVwLerwrWVbpLlBuls9XHzIeTFy4cegU2NHBp3va0LKOzU5qFEYQ==" - }, - "resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", - "requires": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - } - }, - "resolve-cwd": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", - "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", - "dev": true, - "requires": { - "resolve-from": "^5.0.0" - }, - "dependencies": { - "resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true - } - } - }, - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" - }, - "resolve-pathname": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", - "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" - }, - "resolve-url": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha512-ZuF55hVUQaaczgOIwqWzkEcEidmlD/xl44x1UZnhOXcYuFN2S6+rcxpG+C1N3So0wvNI3DmJICUFfu2SxhBmvg==" - }, - "resolve.exports": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", - "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", - "dev": true - }, - "responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "requires": { - "lowercase-keys": "^1.0.0" - } - }, - "restore-cursor": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", - "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", - "dev": true, - "requires": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - } - }, - "ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==" - }, - "retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==" - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==" - }, - "rfdc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.0.tgz", - "integrity": "sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA==", - "dev": true - }, - "rgb-regex": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", - "integrity": "sha512-gDK5mkALDFER2YLqH6imYvK6g02gpNGM4ILDZ472EwWfXZnC2ZEpoB2ECXTyOVUKuk/bPJZMzwQPBYICzP+D3w==" - }, - "rgba-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", - "integrity": "sha512-zgn5OjNQXLUTdq8m17KdaicF6w89TZs8ZU8y0AYENIU6wG8GG6LLm0yLSiPY8DmaYmHdgRW8rnApjoT0fQRfMg==" - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "requires": { - "glob": "^7.1.3" - } - }, - "ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, - "robust-predicates": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", - "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==" - }, - "rst-selector-parser": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz", - "integrity": "sha512-nDG1rZeP6oFTLN6yNDV/uiAvs1+FS/KlrEwh7+y7dpuApDBy6bI2HTBcc0/V8lv9OTqfyD34eF7au2pm8aBbhA==", - "requires": { - "lodash.flattendeep": "^4.4.0", - "nearley": "^2.7.10" - } - }, - "rtl-detect": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz", - "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==" - }, - "rtlcss": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-3.5.0.tgz", - "integrity": "sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A==", - "requires": { - "find-up": "^5.0.0", - "picocolors": "^1.0.0", - "postcss": "^8.3.11", - "strip-json-comments": "^3.1.1" - }, - "dependencies": { - "find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "requires": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - } - }, - "locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "requires": { - "p-locate": "^5.0.0" - } - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "requires": { - "p-limit": "^3.0.2" - } - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==" - } - } - }, - "run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "requires": { - "queue-microtask": "^1.2.2" - } - }, - "rw": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", - "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==" - }, - "rxjs": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.6.0.tgz", - "integrity": "sha512-DDa7d8TFNUalGC9VqXvQ1euWNN7sc63TrUCuM9J998+ViviahMIjKSOU7rfcgFOF+FCD71BhDRv4hrFz+ImDLQ==", - "requires": { - "tslib": "^2.1.0" - } - }, - "sade": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", - "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", - "requires": { - "mri": "^1.1.0" - } - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "safe-json-parse": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/safe-json-parse/-/safe-json-parse-1.0.1.tgz", - "integrity": "sha512-o0JmTu17WGUaUOHa1l0FPGXKBfijbxK6qoHzlkihsDXxzBHvJcA7zgviKR92Xs841rX9pK16unfphLq0/KqX7A==" - }, - "safe-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha512-aJXcif4xnaNUzvUuC5gcb46oTS7zvg4jpMTnuqtrEPlR3vFr4pxtdTwaF1Qs3Enjn9HK+ZlwQui+a7z0SywIzg==", - "requires": { - "ret": "~0.1.10" - } - }, - "safe-regex-test": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "is-regex": "^1.1.4" - } - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "sass": { - "version": "1.70.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.70.0.tgz", - "integrity": "sha512-uUxNQ3zAHeAx5nRFskBnrWzDUJrrvpCPD5FNAoRvTi0WwremlheES3tg+56PaVtCs5QDRX5CBLxxKMDJMEa1WQ==", - "requires": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", - "source-map-js": ">=0.6.2 <2.0.0" - } - }, - "sass-loader": { - "version": "13.3.3", - "resolved": "https://registry.npmjs.org/sass-loader/-/sass-loader-13.3.3.tgz", - "integrity": "sha512-mt5YN2F1MOZr3d/wBRcZxeFgwgkH44wVc2zohO2YF6JiOMkiXe4BYRZpSu2sO1g71mo/j16txzUhsKZlqjVGzA==", - "requires": { - "neo-async": "^2.6.2" - } - }, - "sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" - }, - "scheduler": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", - "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, - "schema-utils": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", - "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", - "requires": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - } - }, - "search-insights": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.8.0.tgz", - "integrity": "sha512-VzI4PMktJbydkbrF3/n40vFfRxdwg+o3CkQt0F3mHRSXVuv0PsVxQvB6mQQq/e9MCXAemcmp/GP9CNHpayFoCw==", - "peer": true - }, - "section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "requires": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - } - }, - "seek-bzip": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/seek-bzip/-/seek-bzip-1.0.6.tgz", - "integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==", - "requires": { - "commander": "^2.8.1" - }, - "dependencies": { - "commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - } - } - }, - "select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" - }, - "selfsigned": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.0.1.tgz", - "integrity": "sha512-LmME957M1zOsUhG+67rAjKfiWFox3SBxE/yymatMZsAx+oMrJ0YQ8AToOnyCm7xbeg2ep37IHLxdu0o2MavQOQ==", - "requires": { - "node-forge": "^1" - } - }, - "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "requires": { - "lru-cache": "^6.0.0" - } - }, - "semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "requires": { - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - } - } - }, - "semver-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-2.0.0.tgz", - "integrity": "sha512-mUdIBBvdn0PLOeP3TEkMH7HHeUP3GjsXCwKarjv/kGmUFOYg1VqEemKhoQpWMu6X2I8kHeuVdGibLGkVK+/5Qw==" - }, - "semver-truncate": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/semver-truncate/-/semver-truncate-1.1.2.tgz", - "integrity": "sha512-V1fGg9i4CL3qesB6U0L6XAm4xOJiHmt4QAacazumuasc03BvtFGIMCduv01JWQ69Nv+JST9TqhSCiJoxoY031w==", - "requires": { - "semver": "^5.3.0" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } - } - }, - "send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", - "requires": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - }, - "dependencies": { - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" - } - } - }, - "serialize-javascript": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", - "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", - "requires": { - "randombytes": "^2.1.0" - } - }, - "serve-handler": { - "version": "6.1.3", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.3.tgz", - "integrity": "sha512-FosMqFBNrLyeiIDvP1zgO6YoTzFYHxLDEIavhlmQ+knB2Z7l1t+kGLHkZIDN7UVWqQAmKI3D20A6F6jo3nDd4w==", - "requires": { - "bytes": "3.0.0", - "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", - "mime-types": "2.1.18", - "minimatch": "3.0.4", - "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", - "range-parser": "1.2.0" - }, - "dependencies": { - "mime-db": { - "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==" - }, - "mime-types": { - "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", - "requires": { - "mime-db": "~1.33.0" - } - }, - "path-to-regexp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" - } - } - }, - "serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", - "requires": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==" - }, - "http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", - "requires": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - } - }, - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" - }, - "statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==" - } - } - }, - "serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", - "requires": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.18.0" - } - }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" - }, - "set-getter": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/set-getter/-/set-getter-0.1.1.tgz", - "integrity": "sha512-9sVWOy+gthr+0G9DzqqLaYNA7+5OKkSmcqjL9cBpDEaZrr3ShQlyX2cZ/O/ozE41oxn/Tt0LGEM/w4Rub3A3gw==", - "requires": { - "to-object-path": "^0.3.0" - } - }, - "set-value": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", - "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", - "requires": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.3", - "split-string": "^3.0.1" - } - }, - "setimmediate": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", - "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==" - }, - "setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" - }, - "sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "shallow-clone": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", - "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", - "requires": { - "kind-of": "^6.0.2" - } - }, - "shallowequal": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", - "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" - }, - "shell-quote": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.3.tgz", - "integrity": "sha512-Vpfqwm4EnqGdlsBFNmHhxhElJYrdfcxPThu+ryKS5J8L/fhAwLazFZtq+S+TWZ9ANj2piSQLGj6NQg+lKPmxrw==" - }, - "shelljs": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", - "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - } - }, - "should": { - "version": "13.2.3", - "resolved": "https://registry.npmjs.org/should/-/should-13.2.3.tgz", - "integrity": "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==", - "requires": { - "should-equal": "^2.0.0", - "should-format": "^3.0.3", - "should-type": "^1.4.0", - "should-type-adaptors": "^1.0.1", - "should-util": "^1.0.0" - } - }, - "should-equal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/should-equal/-/should-equal-2.0.0.tgz", - "integrity": "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==", - "requires": { - "should-type": "^1.4.0" - } - }, - "should-format": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/should-format/-/should-format-3.0.3.tgz", - "integrity": "sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q==", - "requires": { - "should-type": "^1.3.0", - "should-type-adaptors": "^1.0.1" - } - }, - "should-type": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/should-type/-/should-type-1.4.0.tgz", - "integrity": "sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ==" - }, - "should-type-adaptors": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz", - "integrity": "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==", - "requires": { - "should-type": "^1.3.0", - "should-util": "^1.0.0" - } - }, - "should-util": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/should-util/-/should-util-1.0.1.tgz", - "integrity": "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==" - }, - "showdown": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/showdown/-/showdown-2.1.0.tgz", - "integrity": "sha512-/6NVYu4U819R2pUIk79n67SYgJHWCce0a5xTP979WbNp0FL9MN1I1QK662IDU1b6JzKTvmhgI7T7JYIxBi3kMQ==", - "requires": { - "commander": "^9.0.0" - }, - "dependencies": { - "commander": { - "version": "9.5.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", - "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==" - } - } - }, - "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - }, - "signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" - }, - "simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", - "requires": { - "is-arrayish": "^0.3.1" - } - }, - "sirv": { - "version": "1.0.19", - "resolved": "https://registry.npmjs.org/sirv/-/sirv-1.0.19.tgz", - "integrity": "sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==", - "requires": { - "@polka/url": "^1.0.0-next.20", - "mrmime": "^1.0.0", - "totalist": "^1.0.0" - } - }, - "sisteransi": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", - "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" - }, - "sitemap": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", - "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", - "requires": { - "@types/node": "^17.0.5", - "@types/sax": "^1.2.1", - "arg": "^5.0.0", - "sax": "^1.2.4" - } - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==" - }, - "slice-ansi": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", - "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==", - "dev": true, - "requires": { - "ansi-styles": "^6.0.0", - "is-fullwidth-code-point": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", - "dev": true - } - } - }, - "slugify": { - "version": "1.6.6", - "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.6.tgz", - "integrity": "sha512-h+z7HKHYXj6wJU+AnS/+IH8Uh9fdcX1Lrhg1/VMdf9PwoBQXFcXiAdsy2tSK0P6gKwJLXp02r90ahUCqHk9rrw==" - }, - "snapdragon": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", - "requires": { - "base": "^0.11.1", - "debug": "^2.2.0", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "map-cache": "^0.2.2", - "source-map": "^0.5.6", - "source-map-resolve": "^0.5.0", - "use": "^3.1.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - } - }, - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - } - } - }, - "snapdragon-node": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", - "requires": { - "define-property": "^1.0.0", - "isobject": "^3.0.0", - "snapdragon-util": "^3.0.1" - }, - "dependencies": { - "define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==", - "requires": { - "is-descriptor": "^1.0.0" - } - } - } - }, - "snapdragon-util": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", - "requires": { - "kind-of": "^3.2.0" - }, - "dependencies": { - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "sockjs": { - "version": "0.3.24", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", - "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", - "requires": { - "faye-websocket": "^0.11.3", - "uuid": "^8.3.2", - "websocket-driver": "^0.7.4" - } - }, - "sort-css-media-queries": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", - "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==" - }, - "sort-keys": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-1.1.2.tgz", - "integrity": "sha512-vzn8aSqKgytVik0iwdBEi+zevbTYZogewTUM6dtpmGwEcdzbub/TX4bCzRhebDCRC3QzXgJsLRKB2V/Oof7HXg==", - "requires": { - "is-plain-obj": "^1.0.0" - }, - "dependencies": { - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==" - } - } - }, - "sort-keys-length": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/sort-keys-length/-/sort-keys-length-1.0.1.tgz", - "integrity": "sha512-GRbEOUqCxemTAk/b32F2xa8wDTs+Z1QHOkbhJDQTvv/6G3ZkbJ+frYWsTcc7cBB3Fu4wy4XlLCuNtJuMn7Gsvw==", - "requires": { - "sort-keys": "^1.0.0" - } - }, - "source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==" - }, - "source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==" - }, - "source-map-resolve": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", - "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", - "requires": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0", - "resolve-url": "^0.2.1", - "source-map-url": "^0.4.0", - "urix": "^0.1.0" - } - }, - "source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "requires": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - } - } - }, - "source-map-url": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz", - "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==" - }, - "space-separated-tokens": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", - "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==" - }, - "spdx-correct": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", - "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==" - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.13", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.13.tgz", - "integrity": "sha512-XkD+zwiqXHikFZm4AX/7JSCXA98U5Db4AFd5XUg/+9UNtnH75+Z9KxtpYiJZx36mUDVOwH83pl7yvCer6ewM3w==" - }, - "spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "requires": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - } - }, - "spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "requires": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "split-string": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", - "requires": { - "extend-shallow": "^3.0.0" - }, - "dependencies": { - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - } - }, - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - } - } - }, - "sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" - }, - "squeak": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/squeak/-/squeak-1.3.0.tgz", - "integrity": "sha512-YQL1ulInM+ev8nXX7vfXsCsDh6IqXlrremc1hzi77776BtpWgYJUMto3UM05GSAaGzJgWekszjoKDrVNB5XG+A==", - "requires": { - "chalk": "^1.0.0", - "console-stream": "^0.1.1", - "lpad-align": "^1.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==" - }, - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA==" - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==", - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==", - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==" - } - } - }, - "sshpk": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", - "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, - "stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" - }, - "stack-utils": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", - "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", - "dev": true, - "requires": { - "escape-string-regexp": "^2.0.0" - }, - "dependencies": { - "escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "dev": true - } - } - }, - "state-toggle": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz", - "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==" - }, - "static-extend": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", - "integrity": "sha512-72E9+uLc27Mt718pMHt9VMNiAL4LMsmDbBva8mxWUCkT07fSzEGMYUCk0XWY6lp0j6RBAG4cJ3mWuZv2OE3s0g==", - "requires": { - "define-property": "^0.2.5", - "object-copy": "^0.1.0" - }, - "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==", - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - } - }, - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" - } - } - }, - "statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==" - }, - "std-env": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.0.1.tgz", - "integrity": "sha512-mC1Ps9l77/97qeOZc+HrOL7TIaOboHqMZ24dGVQrlxFcpPpfCHpH+qfUT7Dz+6mlG8+JPA1KfBQo19iC/+Ngcw==" - }, - "stream-browserify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-3.0.0.tgz", - "integrity": "sha512-H73RAHsVBapbim0tU2JwwOiXUj+fikfiaoYAKHF3VJfA0pe2BCzkhAHBlLG6REzE+2WNZcxOXjK7lkso+9euLA==", - "requires": { - "inherits": "~2.0.4", - "readable-stream": "^3.5.0" - } - }, - "stream-http": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-3.2.0.tgz", - "integrity": "sha512-Oq1bLqisTyK3TSCXpPbT4sdeYNdmyZJv1LxpEm2vu1ZhK89kSE5YXwZc3cWk0MagGaKriBh9mCFbVGtO+vY29A==", - "requires": { - "builtin-status-codes": "^3.0.0", - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "xtend": "^4.0.2" - } - }, - "strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ==" - }, - "string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "requires": { - "safe-buffer": "~5.2.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - } - } - }, - "string-argv": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz", - "integrity": "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==", - "dev": true - }, - "string-length": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", - "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", - "dev": true, - "requires": { - "char-regex": "^1.0.2", - "strip-ansi": "^6.0.0" - } - }, - "string-template": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/string-template/-/string-template-0.2.1.tgz", - "integrity": "sha512-Yptehjogou2xm4UJbxJ4CxgZx12HBfeystp0y3x7s4Dj32ltVVG1Gg8YhKjHZkHicuKpZX/ffilA8505VbUbpw==" - }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "string-width-cjs": { - "version": "npm:string-width@4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "string.prototype.trim": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz", - "integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "stringify-object": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", - "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", - "requires": { - "get-own-enumerable-property-symbols": "^3.0.0", - "is-obj": "^1.0.1", - "is-regexp": "^1.0.0" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "strip-ansi-cjs": { - "version": "npm:strip-ansi@6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "strip-bom": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", - "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", - "dev": true - }, - "strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==" - }, - "strip-color": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/strip-color/-/strip-color-0.1.0.tgz", - "integrity": "sha512-p9LsUieSjWNNAxVCXLeilaDlmuUOrDS5/dF9znM1nZc7EGX5+zEFC0bEevsNIaldjlks+2jns5Siz6F9iK6jwA==" - }, - "strip-dirs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/strip-dirs/-/strip-dirs-2.1.0.tgz", - "integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==", - "requires": { - "is-natural-number": "^4.0.1" - } - }, - "strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha512-7FCwGGmx8mD5xQd3RPUvnSpUXHM3BWuzjtpD4TXsfcZ9EL4azvVVUscFYwD9nx8Kh+uCBC00XBtAykoMHwTh8Q==" - }, - "strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==" - }, - "strip-indent": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-1.0.1.tgz", - "integrity": "sha512-I5iQq6aFMM62fBEAIB/hXzwJD6EEZ0xEGCX2t7oXqaKPIRgt4WruAQ285BISgdkP+HLGWyeGmNJcpIwFeRYRUA==", - "requires": { - "get-stdin": "^4.0.1" - } - }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" - }, - "strip-outer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-outer/-/strip-outer-1.0.1.tgz", - "integrity": "sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg==", - "requires": { - "escape-string-regexp": "^1.0.2" - } - }, - "strnum": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/strnum/-/strnum-1.0.5.tgz", - "integrity": "sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA==" - }, - "style-mod": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", - "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==" - }, - "style-to-object": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz", - "integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==", - "requires": { - "inline-style-parser": "0.1.1" - } - }, - "stylehacks": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", - "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", - "requires": { - "browserslist": "^4.21.4", - "postcss-selector-parser": "^6.0.4" - } - }, - "stylis": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.1.tgz", - "integrity": "sha512-EQepAV+wMsIaGVGX1RECzgrcqRRU/0sYOHkeLsZ3fzHaHXZy4DaOOX0vOlGQdlsjkh3mFHAIlVimpwAs4dslyQ==" - }, - "sucrase": { - "version": "3.35.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", - "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", - "requires": { - "@jridgewell/gen-mapping": "^0.3.2", - "commander": "^4.0.0", - "glob": "^10.3.10", - "lines-and-columns": "^1.1.6", - "mz": "^2.7.0", - "pirates": "^4.0.1", - "ts-interface-checker": "^0.1.9" - }, - "dependencies": { - "brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "requires": { - "balanced-match": "^1.0.0" - } - }, - "commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==" - }, - "glob": { - "version": "10.3.10", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", - "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", - "requires": { - "foreground-child": "^3.1.0", - "jackspeak": "^2.3.5", - "minimatch": "^9.0.1", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", - "path-scurry": "^1.10.1" - } - }, - "minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "requires": { - "brace-expansion": "^2.0.1" - } - } - } - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - }, - "supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==" - }, - "svg-parser": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", - "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" - }, - "svgo": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", - "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", - "requires": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^4.1.3", - "css-tree": "^1.1.3", - "csso": "^4.2.0", - "picocolors": "^1.0.0", - "stable": "^0.1.8" - }, - "dependencies": { - "commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==" - } - } - }, - "swagger2openapi": { - "version": "7.0.8", - "resolved": "https://registry.npmjs.org/swagger2openapi/-/swagger2openapi-7.0.8.tgz", - "integrity": "sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==", - "requires": { - "call-me-maybe": "^1.0.1", - "node-fetch": "^2.6.1", - "node-fetch-h2": "^2.3.0", - "node-readfiles": "^0.2.0", - "oas-kit-common": "^1.0.8", - "oas-resolver": "^2.5.6", - "oas-schema-walker": "^1.1.5", - "oas-validator": "^5.0.8", - "reftools": "^1.1.9", - "yaml": "^1.10.0", - "yargs": "^17.0.1" - } - }, - "swc-loader": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/swc-loader/-/swc-loader-0.2.3.tgz", - "integrity": "sha512-D1p6XXURfSPleZZA/Lipb3A8pZ17fP4NObZvFCDjK/OKljroqDpPmsBdTraWhVBqUNpcWBQY1imWdoPScRlQ7A==", - "dev": true, - "requires": {} - }, - "tabbable": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", - "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" - }, - "tapable": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", - "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==" - }, - "tar-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", - "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", - "requires": { - "bl": "^1.0.0", - "buffer-alloc": "^1.2.0", - "end-of-stream": "^1.0.0", - "fs-constants": "^1.0.0", - "readable-stream": "^2.3.0", - "to-buffer": "^1.1.1", - "xtend": "^4.0.0" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "tcp-port-used": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/tcp-port-used/-/tcp-port-used-1.0.2.tgz", - "integrity": "sha512-l7ar8lLUD3XS1V2lfoJlCBaeoaWo/2xfYt81hM7VlvR4RrMVFqfmzfhLVk40hAb368uitje5gPtBRL1m/DGvLA==", - "requires": { - "debug": "4.3.1", - "is2": "^2.0.6" - }, - "dependencies": { - "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "requires": { - "ms": "2.1.2" - } - } - } - }, - "temp-dir": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-1.0.0.tgz", - "integrity": "sha512-xZFXEGbG7SNC3itwBzI3RYjq/cEhBkx2hJuKGIUOcEULmkQExXiHat2z/qkISYsuR+IKumhEfKKbV5qXmhICFQ==" - }, - "tempfile": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/tempfile/-/tempfile-2.0.0.tgz", - "integrity": "sha512-ZOn6nJUgvgC09+doCEF3oB+r3ag7kUvlsXEGX069QRD60p+P3uP7XG9N2/at+EyIRGSN//ZY3LyEotA1YpmjuA==", - "requires": { - "temp-dir": "^1.0.0", - "uuid": "^3.0.1" - }, - "dependencies": { - "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" - } - } - }, - "terser": { - "version": "5.10.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.10.0.tgz", - "integrity": "sha512-AMmF99DMfEDiRJfxfY5jj5wNH/bYO09cniSqhfoyxc8sFoYIgkJy86G04UoZU5VjlpnplVu0K6Tx6E9b5+DlHA==", - "requires": { - "commander": "^2.20.0", - "source-map": "~0.7.2", - "source-map-support": "~0.5.20" - }, - "dependencies": { - "commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, - "source-map": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", - "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==" - } - } - }, - "terser-webpack-plugin": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.3.tgz", - "integrity": "sha512-Fx60G5HNYknNTNQnzQ1VePRuu89ZVYWfjRAeT5rITuCY/1b08s49e5kSQwHDirKZWuoKOBRFS98EUUoZ9kLEwQ==", - "requires": { - "@jridgewell/trace-mapping": "^0.3.7", - "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.0", - "terser": "^5.7.2" - } - }, - "test-exclude": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", - "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", - "dev": true, - "requires": { - "@istanbuljs/schema": "^0.1.2", - "glob": "^7.1.4", - "minimatch": "^3.0.4" - } - }, - "text-segmentation": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/text-segmentation/-/text-segmentation-1.0.3.tgz", - "integrity": "sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==", - "requires": { - "utrie": "^1.0.2" - } - }, - "text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" - }, - "thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "requires": { - "any-promise": "^1.0.0" - } - }, - "thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "requires": { - "thenify": ">= 3.1.0 < 4" - } - }, - "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" - }, - "through2": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", - "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", - "requires": { - "readable-stream": "~2.3.6", - "xtend": "~4.0.1" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" - }, - "ticky": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ticky/-/ticky-1.0.1.tgz", - "integrity": "sha512-RX35iq/D+lrsqhcPWIazM9ELkjOe30MSeoBHQHSsRwd1YuhJO5ui1K1/R0r7N3mFvbLBs33idw+eR6j+w6i/DA==" - }, - "timed-out": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz", - "integrity": "sha512-G7r3AhovYtr5YKOWQkta8RKAPb+J9IsO4uVmzjl8AZwfhs8UcUwTiD6gcJYSgOtzyjvQKrKYn41syHbUWMkafA==" - }, - "timers-browserify": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", - "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", - "requires": { - "setimmediate": "^1.0.4" - } - }, - "timsort": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha512-qsdtZH+vMoCARQtyod4imc2nIJwg9Cc7lPRrw9CzF8ZKR0khdr8+2nX80PBhET3tcyTtJDxAffGh2rXH4tyU8A==" - }, - "tiny-invariant": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.2.0.tgz", - "integrity": "sha512-1Uhn/aqw5C6RI4KejVeTg6mIS7IqxnLJ8Mv2tV5rTc0qWobay7pDUz6Wi392Cnc8ak1H0F2cjoRzb2/AW4+Fvg==" - }, - "tiny-lr": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tiny-lr/-/tiny-lr-1.1.1.tgz", - "integrity": "sha512-44yhA3tsaRoMOjQQ+5v5mVdqef+kH6Qze9jTpqtVufgYjYt08zyZAwNwwVBj3i1rJMnR52IxOW0LK0vBzgAkuA==", - "requires": { - "body": "^5.1.0", - "debug": "^3.1.0", - "faye-websocket": "~0.10.0", - "livereload-js": "^2.3.0", - "object-assign": "^4.1.0", - "qs": "^6.4.0" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "requires": { - "ms": "^2.1.1" - } - }, - "faye-websocket": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.10.0.tgz", - "integrity": "sha512-Xhj93RXbMSq8urNCUq4p9l0P6hnySJ/7YNRhYNug0bLOuii7pKO7xQFb5mx9xZXWCar88pLPb805PvUkwrLZpQ==", - "requires": { - "websocket-driver": ">=0.5.1" - } - } - } - }, - "tiny-warning": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" - }, - "tmpl": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", - "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", - "dev": true - }, - "to-buffer": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", - "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" - }, - "to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==" - }, - "to-object-path": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", - "integrity": "sha512-9mWHdnGRuh3onocaHzukyvCZhzvr6tiflAy/JRFXcJX0TjgfWA9pk9t8CMbzmBE4Jfw58pXbkngtBtqYxzNEyg==", - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==" - }, - "to-regex": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", - "requires": { - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "regex-not": "^1.0.2", - "safe-regex": "^1.1.0" - }, - "dependencies": { - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - } - }, - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "requires": { - "is-plain-object": "^2.0.4" - } - } - } - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "requires": { - "is-number": "^7.0.0" - } - }, - "toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==" - }, - "toml": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/toml/-/toml-2.3.6.tgz", - "integrity": "sha512-gVweAectJU3ebq//Ferr2JUY4WKSDe5N+z0FvjDncLGyHmIDoxgY/2Ie4qfEIDm4IS7OA6Rmdm7pdEEdMcV/xQ==" - }, - "totalist": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz", - "integrity": "sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g==" - }, - "tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - } - }, - "tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" - }, - "traverse": { - "version": "0.3.9", - "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.3.9.tgz", - "integrity": "sha512-iawgk0hLP3SxGKDfnDJf8wTz4p2qImnyihM5Hh/sGvQ3K37dPi/w8sRhdNIxYA1TwFwc5mDhIJq+O0RsvXBKdQ==" - }, - "tree-node-cli": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/tree-node-cli/-/tree-node-cli-1.6.0.tgz", - "integrity": "sha512-M8um5Lbl76rWU5aC8oOeEhruiCM29lFCKnwpxrwMjpRicHXJx+bb9Cak11G3zYLrMb6Glsrhnn90rHIzDJrjvg==", - "requires": { - "commander": "^5.0.0", - "fast-folder-size": "1.6.1", - "pretty-bytes": "^5.6.0" - } - }, - "trim": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", - "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==" - }, - "trim-lines": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", - "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==" - }, - "trim-newlines": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", - "integrity": "sha512-Nm4cF79FhSTzrLKGDMi3I4utBtFv8qKy4sq1enftf2gMdpqI8oVQTAfySkTz5r49giVzDj88SVZXP4CeYQwjaw==" - }, - "trim-repeated": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/trim-repeated/-/trim-repeated-1.0.0.tgz", - "integrity": "sha512-pkonvlKk8/ZuR0D5tLW8ljt5I8kmxp2XKymhepUeOdCEfKpZaktSArkLHZt76OB1ZvO9bssUsDty4SWhLvZpLg==", - "requires": { - "escape-string-regexp": "^1.0.2" - } - }, - "trim-trailing-lines": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz", - "integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==" - }, - "trough": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", - "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==" - }, - "truncate-html": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/truncate-html/-/truncate-html-1.0.4.tgz", - "integrity": "sha512-FpDAlPzpJ3jlZiNEahRs584FS3jOSQafgj4cC9DmAYPct6uMZDLY625+eErRd43G35vGDrNq3i7b4aYUQ/Bxqw==", - "requires": { - "@types/cheerio": "^0.22.8", - "cheerio": "0.22.0" - }, - "dependencies": { - "cheerio": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-0.22.0.tgz", - "integrity": "sha512-8/MzidM6G/TgRelkzDG13y3Y9LxBjCb+8yOEZ9+wwq5gVF2w2pV0wmHvjfT0RvuxGyR7UEuK36r+yYMbT4uKgA==", - "requires": { - "css-select": "~1.2.0", - "dom-serializer": "~0.1.0", - "entities": "~1.1.1", - "htmlparser2": "^3.9.1", - "lodash.assignin": "^4.0.9", - "lodash.bind": "^4.1.4", - "lodash.defaults": "^4.0.1", - "lodash.filter": "^4.4.0", - "lodash.flatten": "^4.2.0", - "lodash.foreach": "^4.3.0", - "lodash.map": "^4.4.0", - "lodash.merge": "^4.4.0", - "lodash.pick": "^4.2.1", - "lodash.reduce": "^4.4.0", - "lodash.reject": "^4.4.0", - "lodash.some": "^4.4.0" - } - }, - "css-select": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-1.2.0.tgz", - "integrity": "sha512-dUQOBoqdR7QwV90WysXPLXG5LO7nhYBgiWVfxF80DKPF8zx1t/pUd2FYy73emg3zrjtM6dzmYgbHKfV2rxiHQA==", - "requires": { - "boolbase": "~1.0.0", - "css-what": "2.1", - "domutils": "1.5.1", - "nth-check": "~1.0.1" - } - }, - "css-what": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz", - "integrity": "sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==" - }, - "dom-serializer": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.1.tgz", - "integrity": "sha512-l0IU0pPzLWSHBcieZbpOKgkIn3ts3vAh7ZuFyXNwJxJXk/c4Gwj9xaTJwIDVQCXawWD0qb3IzMGH5rglQaO0XA==", - "requires": { - "domelementtype": "^1.3.0", - "entities": "^1.1.1" - } - }, - "domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "domhandler": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "requires": { - "domelementtype": "1" - } - }, - "domutils": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", - "integrity": "sha512-gSu5Oi/I+3wDENBsOWBiRK1eoGxcywYSqg3rR960/+EfY0CF4EX1VPkgHOZ3WiS/Jg2DtliF6BhWcHlfpYUcGw==", - "requires": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "htmlparser2": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", - "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", - "requires": { - "domelementtype": "^1.3.1", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^3.1.1" - } - }, - "nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "requires": { - "boolbase": "~1.0.0" - } - } - } - }, - "ts-dedent": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", - "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==" - }, - "ts-interface-checker": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==" - }, - "tslib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", - "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" - }, - "tty-browserify": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.1.tgz", - "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==" - }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" - }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "dev": true - }, - "type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==" - }, - "type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "requires": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - } - }, - "typed-array-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", - "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==", - "requires": { - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "is-typed-array": "^1.1.9" - } - }, - "typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==" - }, - "typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "requires": { - "is-typedarray": "^1.0.0" - } - }, - "typescript": { - "version": "4.7.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz", - "integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==", - "peer": true - }, - "ua-parser-js": { - "version": "1.0.35", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.35.tgz", - "integrity": "sha512-fKnGuqmTBnIE+/KXSzCn4db8RTigUzw1AN0DmdU6hJovUTbYJKyqj+8Mt1c4VfRDnOVJnENmfYkIPZ946UrSAA==" - }, - "unbox-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", - "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", - "requires": { - "call-bind": "^1.0.2", - "has-bigints": "^1.0.2", - "has-symbols": "^1.0.3", - "which-boxed-primitive": "^1.0.2" - } - }, - "unbzip2-stream": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", - "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", - "requires": { - "buffer": "^5.2.1", - "through": "^2.3.8" - } - }, - "unherit": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz", - "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==", - "requires": { - "inherits": "^2.0.0", - "xtend": "^4.0.0" - } - }, - "unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==" - }, - "unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "requires": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - } - }, - "unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==" - }, - "unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==" - }, - "unified": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", - "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", - "requires": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - } - }, - "union-value": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", - "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", - "requires": { - "arr-union": "^3.1.0", - "get-value": "^2.0.6", - "is-extendable": "^0.1.1", - "set-value": "^2.0.1" - } - }, - "uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha512-Gw+zz50YNKPDKXs+9d+aKAjVwpjNwqzvNpLigIruT4HA9lMZNdMqs9x07kKHB/L9WRzqp4+DlTU5s4wG2esdoA==" - }, - "uniqs": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", - "integrity": "sha512-mZdDpf3vBV5Efh29kMw5tXoup/buMgxLzOt/XKFKcVmi+15ManNQWr6HfZ2aiZTYlYixbdNJ0KFmIZIv52tHSQ==" - }, - "unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "requires": { - "crypto-random-string": "^2.0.0" - } - }, - "unist-builder": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz", - "integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==" - }, - "unist-util-generated": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz", - "integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==" - }, - "unist-util-is": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", - "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==" - }, - "unist-util-position": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz", - "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==" - }, - "unist-util-remove": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz", - "integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==", - "requires": { - "unist-util-is": "^4.0.0" - } - }, - "unist-util-remove-position": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz", - "integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==", - "requires": { - "unist-util-visit": "^2.0.0" - }, - "dependencies": { - "unist-util-visit": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", - "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0", - "unist-util-visit-parents": "^3.0.0" - } - }, - "unist-util-visit-parents": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", - "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-is": "^4.0.0" - } - } - } - }, - "unist-util-stringify-position": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", - "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", - "requires": { - "@types/unist": "^2.0.2" - } - }, - "unist-util-visit": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", - "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", - "requires": { - "@types/unist": "^3.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "dependencies": { - "@types/unist": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.0.tgz", - "integrity": "sha512-MFETx3tbTjE7Uk6vvnWINA/1iJ7LuMdO4fcq8UfF0pRbj01aGLduVvQcRyswuACJdpnHgg8E3rQLhaRdNEJS0w==" - }, - "unist-util-is": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", - "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", - "requires": { - "@types/unist": "^3.0.0" - } - } - } - }, - "unist-util-visit-parents": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", - "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", - "requires": { - "@types/unist": "^3.0.0", - "unist-util-is": "^6.0.0" - }, - "dependencies": { - "@types/unist": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.0.tgz", - "integrity": "sha512-MFETx3tbTjE7Uk6vvnWINA/1iJ7LuMdO4fcq8UfF0pRbj01aGLduVvQcRyswuACJdpnHgg8E3rQLhaRdNEJS0w==" - }, - "unist-util-is": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", - "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", - "requires": { - "@types/unist": "^3.0.0" - } - } - } - }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" - }, - "unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==" - }, - "unquote": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", - "integrity": "sha512-vRCqFv6UhXpWxZPyGDh/F3ZpNv8/qo7w6iufLpQg9aKnQ71qM4B5KiI7Mia9COcjEhrO9LueHpMYjYzsWH3OIg==" - }, - "unset-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", - "integrity": "sha512-PcA2tsuGSF9cnySLHTLSh2qrQiJ70mn+r+Glzxv2TWZblxsxCC52BDlZoPCsz7STd9pN7EZetkWZBAvk4cgZdQ==", - "requires": { - "has-value": "^0.3.1", - "isobject": "^3.0.0" - }, - "dependencies": { - "has-value": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", - "integrity": "sha512-gpG936j8/MzaeID5Yif+577c17TxaDmhuyVgSwtnL/q8UUTySg8Mecb+8Cf1otgLoD7DDH75axp86ER7LFsf3Q==", - "requires": { - "get-value": "^2.0.3", - "has-values": "^0.1.4", - "isobject": "^2.0.0" - }, - "dependencies": { - "isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha512-+OUdGJlgjOBZDfxnDjYYG6zp487z0JGNQq3cYQYg5f5hKR+syHMsaztzGeml/4kGG55CSpKSpWTY+jYGgsHLgA==", - "requires": { - "isarray": "1.0.0" - } - } - } - }, - "has-values": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha512-J8S0cEdWuQbqD9//tlZxiMuMNmxB8PlEwvYwuxsTmR1G5RXUePEX/SJn7aD0GMLieuZYSwNH0cQuJGwnYunXRQ==" - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - } - } - }, - "unzipper": { - "version": "0.10.11", - "resolved": "https://registry.npmjs.org/unzipper/-/unzipper-0.10.11.tgz", - "integrity": "sha512-+BrAq2oFqWod5IESRjL3S8baohbevGcVA+teAIOYWM3pDVdseogqbzhhvvmiyQrUNKFUnDMtELW3X8ykbyDCJw==", - "requires": { - "big-integer": "^1.6.17", - "binary": "~0.3.0", - "bluebird": "~3.4.1", - "buffer-indexof-polyfill": "~1.0.0", - "duplexer2": "~0.1.4", - "fstream": "^1.0.12", - "graceful-fs": "^4.2.2", - "listenercount": "~1.0.1", - "readable-stream": "~2.3.6", - "setimmediate": "~1.0.4" - }, - "dependencies": { - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "update-browserslist-db": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", - "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", - "requires": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" - } - }, - "update-notifier": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz", - "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==", - "requires": { - "boxen": "^5.0.0", - "chalk": "^4.1.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.4.0", - "is-npm": "^5.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.1.0", - "pupa": "^2.1.1", - "semver": "^7.3.4", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "requires": { - "punycode": "^2.1.0" - } - }, - "urix": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==" - }, - "url": { - "version": "0.11.3", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.3.tgz", - "integrity": "sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw==", - "requires": { - "punycode": "^1.4.1", - "qs": "^6.11.2" - }, - "dependencies": { - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" - }, - "qs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz", - "integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==", - "requires": { - "side-channel": "^1.0.4" - } - } - } - }, - "url-loader": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", - "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", - "requires": { - "loader-utils": "^2.0.0", - "mime-types": "^2.1.27", - "schema-utils": "^3.0.0" - } - }, - "url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "requires": { - "prepend-http": "^2.0.0" - } - }, - "url-to-options": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz", - "integrity": "sha512-0kQLIzG4fdk/G5NONku64rSH/x32NOA39LVQqlK8Le6lvTF6GGRJpqaQFGgU+CLwySIqBSMdwYM0sYcW9f6P4A==" - }, - "use": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==" - }, - "use-composed-ref": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", - "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", - "requires": {} - }, - "use-editable": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/use-editable/-/use-editable-2.3.3.tgz", - "integrity": "sha512-7wVD2JbfAFJ3DK0vITvXBdpd9JAz5BcKAAolsnLBuBn6UDDwBGuCIAGvR3yA2BNKm578vAMVHFCWaOcA+BhhiA==", - "requires": {} - }, - "use-isomorphic-layout-effect": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", - "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", - "requires": {} - }, - "use-latest": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", - "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", - "requires": { - "use-isomorphic-layout-effect": "^1.1.1" - } - }, - "use-sync-external-store": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", - "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", - "requires": {} - }, - "util": { - "version": "0.12.5", - "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", - "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", - "requires": { - "inherits": "^2.0.3", - "is-arguments": "^1.0.4", - "is-generator-function": "^1.0.7", - "is-typed-array": "^1.1.3", - "which-typed-array": "^1.1.2" - } - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "util.promisify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz", - "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==", - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.2", - "has-symbols": "^1.0.1", - "object.getownpropertydescriptors": "^2.1.0" - } - }, - "utila": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw=" - }, - "utility-types": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", - "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==" - }, - "utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==" - }, - "utrie": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/utrie/-/utrie-1.0.2.tgz", - "integrity": "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==", - "requires": { - "base64-arraybuffer": "^1.0.2" - } - }, - "uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" - }, - "uvu": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz", - "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==", - "requires": { - "dequal": "^2.0.0", - "diff": "^5.0.0", - "kleur": "^4.0.3", - "sade": "^1.7.3" - }, - "dependencies": { - "kleur": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", - "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==" - } - } - }, - "v8-to-istanbul": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz", - "integrity": "sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==", - "dev": true, - "requires": { - "@jridgewell/trace-mapping": "^0.3.12", - "@types/istanbul-lib-coverage": "^2.0.1", - "convert-source-map": "^1.6.0" - } - }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "validate.io-array": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/validate.io-array/-/validate.io-array-1.0.6.tgz", - "integrity": "sha512-DeOy7CnPEziggrOO5CZhVKJw6S3Yi7e9e65R1Nl/RTN1vTQKnzjfvks0/8kQ40FP/dsjRAOd4hxmJ7uLa6vxkg==" - }, - "validate.io-function": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/validate.io-function/-/validate.io-function-1.0.2.tgz", - "integrity": "sha512-LlFybRJEriSuBnUhQyG5bwglhh50EpTL2ul23MPIuR1odjO7XaMLFV8vHGwp7AZciFxtYOeiSCT5st+XSPONiQ==" - }, - "validate.io-integer": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/validate.io-integer/-/validate.io-integer-1.0.5.tgz", - "integrity": "sha512-22izsYSLojN/P6bppBqhgUDjCkr5RY2jd+N2a3DCAUey8ydvrZ/OkGvFPR7qfOpwR2LC5p4Ngzxz36g5Vgr/hQ==", - "requires": { - "validate.io-number": "^1.0.3" - } - }, - "validate.io-integer-array": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/validate.io-integer-array/-/validate.io-integer-array-1.0.0.tgz", - "integrity": "sha512-mTrMk/1ytQHtCY0oNO3dztafHYyGU88KL+jRxWuzfOmQb+4qqnWmI+gykvGp8usKZOM0H7keJHEbRaFiYA0VrA==", - "requires": { - "validate.io-array": "^1.0.3", - "validate.io-integer": "^1.0.4" - } - }, - "validate.io-number": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/validate.io-number/-/validate.io-number-1.0.3.tgz", - "integrity": "sha512-kRAyotcbNaSYoDnXvb4MHg/0a1egJdLwS6oJ38TJY7aw9n93Fl/3blIXdyYvPOp55CNxywooG/3BcrwNrBpcSg==" - }, - "value-equal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", - "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" - }, - "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==" - }, - "vendors": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz", - "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==" - }, - "verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - }, - "dependencies": { - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" - } - } - }, - "vfile": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", - "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", - "requires": { - "@types/unist": "^2.0.0", - "is-buffer": "^2.0.0", - "unist-util-stringify-position": "^2.0.0", - "vfile-message": "^2.0.0" - } - }, - "vfile-location": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", - "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==" - }, - "vfile-message": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", - "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", - "requires": { - "@types/unist": "^2.0.0", - "unist-util-stringify-position": "^2.0.0" - } - }, - "vm-browserify": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", - "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" - }, - "w3c-keyname": { - "version": "2.2.8", - "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", - "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==" - }, - "wait-on": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz", - "integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==", - "requires": { - "axios": "^0.25.0", - "joi": "^17.6.0", - "lodash": "^4.17.21", - "minimist": "^1.2.5", - "rxjs": "^7.5.4" - } - }, - "walker": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", - "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", - "dev": true, - "requires": { - "makeerror": "1.0.12" - } - }, - "warning": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", - "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", - "requires": { - "loose-envify": "^1.0.0" - } - }, - "watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", - "requires": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - } - }, - "wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "requires": { - "minimalistic-assert": "^1.0.0" - } - }, - "web-namespaces": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", - "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==" - }, - "web-worker": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.3.0.tgz", - "integrity": "sha512-BSR9wyRsy/KOValMgd5kMyr3JzpdeoR9KVId8u5GVlTTAtNChlsE4yTxeY7zMdNSyOmoKBv8NH2qeRY9Tg+IaA==" - }, - "webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" - }, - "webpack": { - "version": "5.74.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.74.0.tgz", - "integrity": "sha512-A2InDwnhhGN4LYctJj6M1JEaGL7Luj6LOmyBHjcI8529cm5p6VXiTIW2sn6ffvEAKmveLzvu4jrihwXtPojlAA==", - "requires": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^0.0.51", - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/wasm-edit": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.7.6", - "browserslist": "^4.14.5", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.10.0", - "es-module-lexer": "^0.9.0", - "eslint-scope": "5.1.1", - "events": "^3.2.0", - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", - "json-parse-even-better-errors": "^2.3.1", - "loader-runner": "^4.2.0", - "mime-types": "^2.1.27", - "neo-async": "^2.6.2", - "schema-utils": "^3.1.0", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.1.3", - "watchpack": "^2.4.0", - "webpack-sources": "^3.2.3" - } - }, - "webpack-bundle-analyzer": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.5.0.tgz", - "integrity": "sha512-GUMZlM3SKwS8Z+CKeIFx7CVoHn3dXFcUAjT/dcZQQmfSZGvitPfMob2ipjai7ovFFqPvTqkEZ/leL4O0YOdAYQ==", - "requires": { - "acorn": "^8.0.4", - "acorn-walk": "^8.0.0", - "chalk": "^4.1.0", - "commander": "^7.2.0", - "gzip-size": "^6.0.0", - "lodash": "^4.17.20", - "opener": "^1.5.2", - "sirv": "^1.0.7", - "ws": "^7.3.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", - "requires": { - "colorette": "^2.0.10", - "memfs": "^3.4.3", - "mime-types": "^2.1.31", - "range-parser": "^1.2.1", - "schema-utils": "^4.0.0" - }, - "dependencies": { - "ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "requires": { - "fast-deep-equal": "^3.1.3" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, - "range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" - }, - "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "requires": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - } - } - } - }, - "webpack-dev-server": { - "version": "4.9.3", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.9.3.tgz", - "integrity": "sha512-3qp/eoboZG5/6QgiZ3llN8TUzkSpYg1Ko9khWX1h40MIEUNS2mDoIa8aXsPfskER+GbTvs/IJZ1QTBBhhuetSw==", - "requires": { - "@types/bonjour": "^3.5.9", - "@types/connect-history-api-fallback": "^1.3.5", - "@types/express": "^4.17.13", - "@types/serve-index": "^1.9.1", - "@types/serve-static": "^1.13.10", - "@types/sockjs": "^0.3.33", - "@types/ws": "^8.5.1", - "ansi-html-community": "^0.0.8", - "bonjour-service": "^1.0.11", - "chokidar": "^3.5.3", - "colorette": "^2.0.10", - "compression": "^1.7.4", - "connect-history-api-fallback": "^2.0.0", - "default-gateway": "^6.0.3", - "express": "^4.17.3", - "graceful-fs": "^4.2.6", - "html-entities": "^2.3.2", - "http-proxy-middleware": "^2.0.3", - "ipaddr.js": "^2.0.1", - "open": "^8.0.9", - "p-retry": "^4.5.0", - "rimraf": "^3.0.2", - "schema-utils": "^4.0.0", - "selfsigned": "^2.0.1", - "serve-index": "^1.9.1", - "sockjs": "^0.3.24", - "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.1", - "ws": "^8.4.2" - }, - "dependencies": { - "ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "requires": { - "fast-deep-equal": "^3.1.3" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, - "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "requires": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - } - }, - "ws": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", - "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", - "requires": {} - } - } - }, - "webpack-merge": { - "version": "5.8.0", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", - "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", - "requires": { - "clone-deep": "^4.0.1", - "wildcard": "^2.0.0" - } - }, - "webpack-sources": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", - "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==" - }, - "webpackbar": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", - "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", - "requires": { - "chalk": "^4.1.0", - "consola": "^2.15.3", - "pretty-time": "^1.1.0", - "std-env": "^3.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "requires": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - } - }, - "websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==" - }, - "whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "requires": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "requires": { - "isexe": "^2.0.0" - } - }, - "which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "requires": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "which-module": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", - "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==" - }, - "which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", - "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" - } - }, - "widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "requires": { - "string-width": "^4.0.0" - } - }, - "wildcard": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", - "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==" - }, - "wordwrap": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", - "integrity": "sha512-xSBsCeh+g+dinoBv3GAOWM4LcVVO68wLXRanibtBSdUvkGWQRGeE9P7IwU9EmDDi4jA6L44lz15CGMwdw9N5+Q==" - }, - "worker-rpc": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/worker-rpc/-/worker-rpc-0.1.1.tgz", - "integrity": "sha512-P1WjMrUB3qgJNI9jfmpZ/htmBEjFh//6l/5y8SD9hg1Ef5zTTVVoRjTrTEzPrNBQvmhMxkoTsjOXN10GWU7aCg==", - "requires": { - "microevent.ts": "~0.1.1" - } - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - } - } - }, - "wrap-ansi-cjs": { - "version": "npm:wrap-ansi@7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "requires": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "ws": { - "version": "7.5.6", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.6.tgz", - "integrity": "sha512-6GLgCqo2cy2A2rjCNFlxQS6ZljG/coZfZXclldI8FB/1G3CCI36Zd8xy2HrFVACi8tfk5XrgLQEk+P0Tnz9UcA==", - "requires": {} - }, - "xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==" - }, - "xml-formatter": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/xml-formatter/-/xml-formatter-2.6.1.tgz", - "integrity": "sha512-dOiGwoqm8y22QdTNI7A+N03tyVfBlQ0/oehAzxIZtwnFAHGeSlrfjF73YQvzSsa/Kt6+YZasKsrdu6OIpuBggw==", - "requires": { - "xml-parser-xo": "^3.2.0" - } - }, - "xml-js": { - "version": "1.6.11", - "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", - "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", - "requires": { - "sax": "^1.2.4" - } - }, - "xml-parser-xo": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/xml-parser-xo/-/xml-parser-xo-3.2.0.tgz", - "integrity": "sha512-8LRU6cq+d7mVsoDaMhnkkt3CTtAs4153p49fRo+HIB3I1FD1o5CeXRjRH29sQevIfVJIcPjKSsPU/+Ujhq09Rg==" - }, - "xmlbuilder": { - "version": "13.0.2", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz", - "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==" - }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" - }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - }, - "yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==" - }, - "yaml-ast-parser": { - "version": "0.0.43", - "resolved": "https://registry.npmjs.org/yaml-ast-parser/-/yaml-ast-parser-0.0.43.tgz", - "integrity": "sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==" - }, - "yamljs": { - "version": "0.2.10", - "resolved": "https://registry.npmjs.org/yamljs/-/yamljs-0.2.10.tgz", - "integrity": "sha512-sbkbOosewjeRmJ23Hjee1RgTxn+xa7mt4sew3tfD0SdH0LTcswnZC9dhSNq4PIz15roQMzb84DjECyQo5DWIww==", - "requires": { - "argparse": "^1.0.7", - "glob": "^7.0.5" - }, - "dependencies": { - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - } - } - }, - "yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "requires": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - } - }, - "yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==" - }, - "yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", - "requires": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - }, - "yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==" - }, - "zwitch": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", - "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==" - } } } diff --git a/package.json b/package.json index a042cca39ba..0bc82be266a 100644 --- a/package.json +++ b/package.json @@ -32,22 +32,22 @@ "api:generate:camunda": "npm run api:generate camunda" }, "dependencies": { - "@auth0/auth0-react": "^2.2.1", - "@bpmn-io/form-js": "^1.7.3", + "@auth0/auth0-react": "^2.2.4", + "@bpmn-io/form-js": "^1.12.0", "@docusaurus/core": "^2.4.1", "@docusaurus/preset-classic": "^2.4.1", "@docusaurus/theme-mermaid": "^2.4.1", "@mdx-js/react": "^1.6.22", "@saucelabs/theme-github-codeblock": "^0.2.3", - "clsx": "^1.2.1", + "clsx": "^2.1.1", "docusaurus": "^1.14.7", "docusaurus-plugin-openapi-docs": "^2.0.4", "docusaurus-theme-openapi-docs": "^2.0.4", - "mixpanel-browser": "^2.47.0", + "mixpanel-browser": "^2.56.0", "pushfeedback-react": "^0.1.30", "react": "^17.0.2", "react-dom": "^17.0.2", - "react-player": "^2.11.0", + "react-player": "^2.16.0", "unist-util-visit": "^5.0.0" }, "browserslist": { @@ -63,15 +63,15 @@ ] }, "devDependencies": { - "@playwright/test": "^1.32.2", - "@swc/core": "^1.3.49", - "@types/jest": "^29.5.4", + "@playwright/test": "^1.49.0", + "@swc/core": "^1.9.3", + "@types/jest": "^29.5.14", "husky": "^8.0.3", - "jest": "^29.6.4", + "jest": "^29.7.0", "lint-staged": "^14.0.1", - "playwright": "^1.32.2", - "prettier": "3.0.0", - "replace-in-file": "^7.1.0", + "playwright": "^1.49.0", + "prettier": "3.3.3", + "replace-in-file": "^7.2.0", "swc-loader": "^0.2.3" }, "lint-staged": { diff --git a/sidebars.js b/sidebars.js index d04c423c7fd..8ad565a53df 100644 --- a/sidebars.js +++ b/sidebars.js @@ -14,7 +14,7 @@ module.exports = { "guides/getting-started-java-spring", "guides/model-your-first-process", { - "By use case": [ + "Orchestration use cases": [ "guides/orchestrate-human-tasks", "guides/orchestrate-apis", "guides/orchestrate-microservices", @@ -33,7 +33,6 @@ module.exports = { "guides/setting-up-development-project", "guides/setup-client-connection-credentials", "guides/configuring-out-of-the-box-connectors", - "guides/message-correlation", "guides/use-connectors-in-hybrid-mode", "guides/host-custom-connectors", ], @@ -109,10 +108,10 @@ module.exports = { "components/console/manage-clusters/create-cluster", "components/console/manage-clusters/manage-cluster", "components/console/manage-clusters/manage-api-clients", + "components/console/manage-clusters/manage-secrets", "components/console/manage-clusters/manage-alerts", "components/console/manage-clusters/manage-ip-allowlists", "components/console/manage-clusters/create-backups", - "components/console/manage-clusters/manage-secrets", "components/console/manage-clusters/settings", ], }, @@ -367,6 +366,7 @@ module.exports = { "components/connectors/out-of-the-box-connectors/googledrive", "components/connectors/out-of-the-box-connectors/google-maps-platform", "components/connectors/out-of-the-box-connectors/google-sheets", + "components/connectors/out-of-the-box-connectors/google-gemini", ], }, "components/connectors/protocol/graphql", @@ -405,6 +405,7 @@ module.exports = { items: [ "components/connectors/custom-built-connectors/connector-templates", "components/connectors/manage-connector-templates", + "components/connectors/custom-built-connectors/connector-template-generator", { type: "category", label: "Connector SDK", @@ -734,6 +735,38 @@ module.exports = { }, ], }, + { + type: "category", + label: "Early access", + link: { + type: "doc", + id: "components/early-access/overview", + }, + items: [ + { + type: "category", + label: "Experimental features", + items: [ + "components/early-access/experimental/rpa/rpa-integration", + "components/early-access/experimental/rpa/rpa-framework-library", + ], + }, + { + type: "category", + label: "Alpha features", + link: { + type: "doc", + id: "components/early-access/alpha/alpha-features", + }, + items: [ + "components/early-access/alpha/sap/sap-integration", + "components/early-access/alpha/sap/odata-connector", + "components/early-access/alpha/sap/rfc-connector", + "components/early-access/alpha/sap/btp-integration", + ], + }, + ], + }, ], "APIs & Tools": [ "apis-tools/working-with-apis-tools", @@ -834,72 +867,69 @@ module.exports = { { Deprecated: [ require("./docs/apis-tools/tasklist-api/sidebar-schema"), - require("./docs/apis-tools/zeebe-api-rest/sidebar-schema"), ], }, ], }, { - Clients: [ + "Clients & SDKs": [ { - "Java client": [ - "apis-tools/java-client/index", - "apis-tools/java-client/job-worker", - "apis-tools/java-client/logging", - "apis-tools/java-client/zeebe-process-test", + SDKs: [ + "apis-tools/node-js-sdk", { - Examples: [ - "apis-tools/java-client-examples/index", - "apis-tools/java-client-examples/process-deploy", - "apis-tools/java-client-examples/process-instance-create", - "apis-tools/java-client-examples/process-instance-create-nonblocking", - "apis-tools/java-client-examples/process-instance-create-with-result", - "apis-tools/java-client-examples/decision-evaluate", - "apis-tools/java-client-examples/job-worker-open", - "apis-tools/java-client-examples/data-pojo", - "apis-tools/java-client-examples/cluster-topology-request", + "Spring Zeebe": [ + "apis-tools/spring-zeebe-sdk/getting-started", + "apis-tools/spring-zeebe-sdk/configuration", ], }, ], }, { - "Community clients": [ - "apis-tools/community-clients/index", + Clients: [ { - "Zeebe clients": [ - "apis-tools/community-clients/c-sharp", - "apis-tools/community-clients/micronaut", - "apis-tools/community-clients/python", - "apis-tools/community-clients/ruby", - "apis-tools/community-clients/rust", - "apis-tools/community-clients/quarkus", + "Java client": [ + "apis-tools/java-client/index", + "apis-tools/java-client/job-worker", + "apis-tools/java-client/logging", + "apis-tools/java-client/zeebe-process-test", { - "CLI client": [ - "apis-tools/community-clients/cli-client/index", - "apis-tools/community-clients/cli-client/cli-get-started", + Examples: [ + "apis-tools/java-client-examples/index", + "apis-tools/java-client-examples/process-deploy", + "apis-tools/java-client-examples/process-instance-create", + "apis-tools/java-client-examples/process-instance-create-nonblocking", + "apis-tools/java-client-examples/process-instance-create-with-result", + "apis-tools/java-client-examples/decision-evaluate", + "apis-tools/java-client-examples/job-worker-open", + "apis-tools/java-client-examples/data-pojo", + "apis-tools/java-client-examples/cluster-topology-request", ], - "Go client": [ - "apis-tools/community-clients/go-client/index", - "apis-tools/community-clients/go-client/go-get-started", - "apis-tools/community-clients/go-client/job-worker", + }, + ], + }, + { + "Community clients": [ + "apis-tools/community-clients/index", + { + "Zeebe clients": [ + { + "CLI client": [ + "apis-tools/community-clients/cli-client/index", + "apis-tools/community-clients/cli-client/cli-get-started", + ], + "Go client": [ + "apis-tools/community-clients/go-client/index", + "apis-tools/community-clients/go-client/go-get-started", + "apis-tools/community-clients/go-client/job-worker", + ], + }, ], }, + "apis-tools/build-your-own-client", ], }, ], }, - "apis-tools/build-your-own-client", - ], - }, - { - SDKs: [ - "apis-tools/node-js-sdk", - { - "Spring Zeebe": [ - "apis-tools/spring-zeebe-sdk/getting-started", - "apis-tools/spring-zeebe-sdk/configuration", - ], - }, ], }, require("./docs/apis-tools/frontend-development/sidebar-schema"), @@ -915,11 +945,29 @@ module.exports = { }, ], }, + { + "Migration manuals": [ + "apis-tools/migration-manuals/migrate-to-zeebe-user-tasks", + "apis-tools/migration-manuals/migrate-to-camunda-api", + ], + }, ], Reference: [ "reference/overview", - "reference/announcements", + { + type: "category", + label: "Announcements", + link: { + type: "doc", + id: "reference/announcements", + }, + items: [ + "reference/announcements/announcements-870", + "reference/announcements/announcements-860", + "reference/announcements/announcements-850", + ], + }, { type: "category", label: "Release notes", @@ -927,14 +975,18 @@ module.exports = { type: "doc", id: "reference/release-notes/release-notes", }, - items: ["reference/release-notes/860", "reference/release-notes/850"], + items: [ + "reference/release-notes/870", + "reference/release-notes/860", + "reference/release-notes/850", + ], }, + "reference/contact", "reference/supported-environments", "reference/dependencies", "reference/camunda-help-center", "reference/auto-updates", "reference/status", - "reference/alpha-features", "reference/licenses", "reference/notices", "reference/release-policy", @@ -1025,6 +1077,7 @@ module.exports = { id: "self-managed/operational-guides/update-guide/introduction", }, items: [ + "self-managed/operational-guides/update-guide/860-to-870", "self-managed/operational-guides/update-guide/850-to-860", "self-managed/operational-guides/update-guide/840-to-850", "self-managed/operational-guides/update-guide/830-to-840", @@ -1104,7 +1157,12 @@ module.exports = { Console: [ "self-managed/console-deployment/overview", "self-managed/console-deployment/installation", - "self-managed/console-deployment/configuration", + { + Configuration: [ + "self-managed/console-deployment/configuration/configuration", + "self-managed/console-deployment/configuration/ssl", + ], + }, "self-managed/console-deployment/telemetry", ], Zeebe: [ @@ -1163,6 +1221,7 @@ module.exports = { }, items: [ "self-managed/zeebe-deployment/exporters/install-zeebe-exporters", + "self-managed/zeebe-deployment/exporters/camunda-exporter", "self-managed/zeebe-deployment/exporters/elasticsearch-exporter", "self-managed/zeebe-deployment/exporters/opensearch-exporter", ], @@ -1565,6 +1624,7 @@ module.exports = { "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection", "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection", "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-missing-data", + "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration", ], }, ], diff --git a/spec/regression/versionMapping.spec.ts b/spec/regression/versionMapping.spec.ts index 38bf5bf181b..06dc99e0871 100644 --- a/spec/regression/versionMapping.spec.ts +++ b/spec/regression/versionMapping.spec.ts @@ -1,9 +1,9 @@ import { test, expect } from "@playwright/test"; test("main docs cross-link to optimize docs", async ({ page }) => { - await page.goto("/docs/components/"); + await page.goto("/docs/reference/release-policy/"); - await expect(page).toHaveTitle(/Overview Components \| Camunda 8 Docs/); + await expect(page).toHaveTitle(/Release policy \| Camunda 8 Docs/); // This is a link known to cross over to $optimize$. await page @@ -16,15 +16,16 @@ test("main docs cross-link to optimize docs", async ({ page }) => { }); test("optimize docs cross-link to main docs", async ({ page }) => { - await page.goto( - "/optimize/apis-tools/optimize-api/optimize-api-authentication/" - ); + await page.goto("/optimize/components/what-is-optimize/"); - await expect(page).toHaveTitle(/Authentication \| Camunda 8 Docs/); + await expect(page).toHaveTitle(/What is Optimize\? \| Camunda 8 Docs/); // This is a link known to cross over to $docs$. - await page.getByRole("link", { name: "building your own client" }).click(); + await page + .getByRole("article") + .getByRole("link", { name: "Modeler" }) + .click(); // The `$docs$` should be transformed to `docs` in the target URL. - await expect(page.url()).toContain("/docs/apis-tools/build-your-own-client/"); + await expect(page.url()).toContain("/docs/components/modeler/about-modeler/"); }); diff --git a/src/css/custom.css b/src/css/custom.css index b21a2dd31be..32c6f23d825 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -20,6 +20,8 @@ See: https://docusaurus.io/docs/styling-layout#styling-your-site-with-infima --ifm-breadcrumb-item-background-active: none; --ifm-breadcrumb-color-active: var(--ifm-font-color-base); --ifm-menu-color: var(--ifm-color-emphasis-800); + --ifm-heading-font-family: IBM Plex Sans, -apple-system, blinkmacsystemfont, + Segoe UI, roboto, oxygen-sans, ubuntu, cantarell, Helvetica Neue, sans-serif; } .docusaurus-highlight-code-line { @@ -185,6 +187,9 @@ svg.implemented:hover [stroke="#333"] { margin-left: 0; margin-bottom: 1.5rem; } +h2 .badge:nth-of-type(1) { + margin-bottom: 0rem; +} h3 .badge:nth-of-type(1) { margin-bottom: 0rem; } @@ -405,6 +410,14 @@ span.callout + p::after { border: none; } +/* Use for inline images within text, such as BPMN symbols */ +.theme-doc-markdown img.inline-image { + border: none; + max-height: 1.8rem; + margin: 0; + padding: 0; +} + /* Unsupported versions in the versions selector */ .dropdown-separator { margin: 0.3rem 0; @@ -520,7 +533,6 @@ a.table-of-contents__link.toc-highlight > span.badge { } h1 { padding-top: 10px; - font-family: "IBM Plex Sans", "sans-serif"; } h1.openapi__heading { margin-bottom: calc( @@ -531,7 +543,6 @@ h1.openapi__heading { h2 { font-size: 28px; font-weight: 600; - font-family: "IBM Plex Sans", sans-serif; border-bottom: 1px solid #dedede; padding-top: 20px; margin-bottom: 20px; @@ -580,6 +591,20 @@ h2#request { margin-bottom: 20px; padding-bottom: 0px; } +h2 .badge--long, +h2 .badge--beginner, +h2 .badge--cloud { + margin-bottom: 0; + font-size: 12px; + margin-left: 5px; + vertical-align: middle; +} +/* Third level page heading */ +h2 .badge--long:nth-of-type(1), +h2 .badge--beginner:nth-of-type(1), +h2 .badge--cloud:nth-of-type(1) { + margin-left: 5px; +} /* Second level page heading */ h3 { margin-bottom: 0.8rem; @@ -603,6 +628,13 @@ h3 .badge--long { margin-left: 5px; vertical-align: middle; } +h3 .badge--medium { + margin-bottom: 0; + font-size: 12px; + margin-left: 5px; + vertical-align: middle; +} + /* Third level page heading */ h3 .badge--long:nth-of-type(1) { margin-left: 5px; @@ -665,3 +697,19 @@ ul ol { .menu__link--active:not(.menu__link--sublist) { font-weight: 600; } + +/* double column div layout */ +.double-column-container { + display: flex; +} +.double-column-left { + flex: 1; +} +.double-column-right { + flex: 3; +} +@media (max-width: 768px) { + .double-column-container { + flex-direction: column; + } +} diff --git a/src/mdx/expandVersionedUrl.spec.js b/src/mdx/expandVersionedUrl.spec.js index 29d1480fa8a..1e43601b0c4 100644 --- a/src/mdx/expandVersionedUrl.spec.js +++ b/src/mdx/expandVersionedUrl.spec.js @@ -1,4 +1,5 @@ const expandVersionedUrl = require("./expandVersionedUrl"); +const { versionMappings } = require("../versions"); describe("expandVersionedUrl", () => { describe("unexpandable URLs", () => { @@ -13,6 +14,8 @@ describe("expandVersionedUrl", () => { ); }); + const [currentVersionMapping, olderVersionMapping] = versionMappings; + describe("when source is from optimize docs", () => { const targetUrl = "$docs$/some/thing"; @@ -23,12 +26,12 @@ describe("expandVersionedUrl", () => { ], [ - "/Users/monkeypants/camunda-docs/optimize_versioned_docs/version-3.10.0/what-is-optimize.md", + `/Users/monkeypants/camunda-docs/optimize_versioned_docs/version-${currentVersionMapping.optimizeVersion}/what-is-optimize.md`, "/docs/some/thing", ], [ - "/Users/monkeypants/camunda-docs/optimize_versioned_docs/version-3.7.0/what-is-optimize.md", - "/docs/1.3/some/thing", + `/Users/monkeypants/camunda-docs/optimize_versioned_docs/version-${olderVersionMapping.optimizeVersion}/what-is-optimize.md`, + `/docs/${olderVersionMapping.docsVersion}/some/thing`, ], ])("when in %s it expands to %s", (sourcePath, expandedUrl) => { expect(expandVersionedUrl(targetUrl, sourcePath)).toEqual(expandedUrl); @@ -44,12 +47,12 @@ describe("expandVersionedUrl", () => { "/optimize/next/some/thing", ], [ - "/Users/monkeypants/camunda-docs/versioned_docs/version-8.2/what-is-optimize.md", + `/Users/monkeypants/camunda-docs/versioned_docs/version-${currentVersionMapping.docsVersion}/what-is-optimize.md`, "/optimize/some/thing", ], [ - "/Users/monkeypants/camunda-docs/versioned_docs/version-1.3/what-is-optimize.md", - "/optimize/3.7.0/some/thing", + `/Users/monkeypants/camunda-docs/versioned_docs/version-${olderVersionMapping.docsVersion}/what-is-optimize.md`, + `/optimize/${olderVersionMapping.optimizeVersion}/some/thing`, ], ])("when in %s it expands to %s", (sourcePath, expandedUrl) => { expect(expandVersionedUrl(targetUrl, sourcePath)).toEqual(expandedUrl); diff --git a/src/pages/contact.md b/src/pages/contact.md deleted file mode 100644 index b772bde285c..00000000000 --- a/src/pages/contact.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Contact -description: Contact Camunda, submit feedback, find support using the Camunda community forum, note bug reports and feature requests, and review security notices. -keywords: - [ - support, - contact-us, - get-support, - help, - need-help, - bug, - bug-report, - feature-request, - issue, - enterprise-support, - ] ---- - -# Contact - -There are a few different channels you can reach us based on your needs: - -- We encourage everyone to participate in our **community** via the [Camunda community forum](https://forum.camunda.io/), where you can exchange ideas with other Camunda users, as well as Camunda employees. For all other Camunda community programs and resources, visit our [Camunda Developer Hub](https://camunda.com/developers). - -- We welcome your **bug** reports and **feature requests** through our community channels mentioned above. - -- For **security-related issues**, review our [security notices](../docs/reference/notices) for the most up-to-date information on known issues and steps to report a vulnerability so we can solve the problem as quickly as possible. Do not use GitHub for security-related issues. - -- **Feedback and support** can be submitted or requested via JIRA by following our [Enterprise support process](https://docs.camunda.org/enterprise/support/). All users can also find feedback and support options in the Help Center or [Camunda community forum](https://forum.camunda.io/). - -- For sales inquiries, information about Camunda 8 performance and benchmarking, or anything not listed above, use our [Contact Us](https://camunda.com/contact/) form. - -## Locating Camunda 8 credentials - -Need assistance locating your Camunda 8 credentials? You can obtain these credentials from Camunda by submitting a **Help Request**. To do this, take the following steps: - -1. Log in to [Jira](https://jira.camunda.com/secure/Dashboard.jspa). -2. Click **Create** in the navigation bar at the top of the page. This launches a **Create Issue** pop-up. -3. In the **Issue Type** field, select **Help Request**. -4. In the **Help Request Type** field, click the option that reads **I need the credentials for downloading Camunda**. -5. In the **Summary** and **Description** fields, **I need the credentials for downloading Camunda** will populate by default. - ![completed help request example](./img/create-issue-request.png) -6. (Optional) Add more details, such as the priority level or authorized support contacts. -7. Click **Create** at the bottom of the pop-up **Create Issue** box. - -After completing these steps, your request is generated. Find additional details on submitting a self-service help request [here](https://docs.camunda.org/enterprise/support/#self-service-help-request). diff --git a/src/versions.js b/src/versions.js index 7119b8998f3..9c3601788be 100644 --- a/src/versions.js +++ b/src/versions.js @@ -31,14 +31,6 @@ const versionMappings = [ docsVersion: "8.3", optimizeVersion: "3.11.0", }, - { - docsVersion: "8.2", - optimizeVersion: "3.10.0", - }, - { - docsVersion: "1.3", - optimizeVersion: "3.7.0", - }, ]; /** @type {Array} */ @@ -46,8 +38,10 @@ const unsupportedVersions = [ // 👋 When archiving a version, move it from the above array into here, and edit it! // `label` appears in the top navbar version selector. // `urlSuffix` gets appended to the target `unsupported.docs.camunda.io/` URL. + { label: "8.2 / 3.10.0", urlSuffix: "8.2" }, { label: "8.1 / 3.9.0", urlSuffix: "8.1" }, { label: "8.0 / 3.8.0", urlSuffix: "8.0" }, + { label: "1.3 / 3.7.0", urlSuffix: "1.3" }, { label: "1.2", urlSuffix: "1.2" }, { label: "1.1", urlSuffix: "1.1" }, { label: "1.0", urlSuffix: "1.0" }, diff --git a/static/.htaccess b/static/.htaccess index f689c8b3652..d0fdc799642 100644 --- a/static/.htaccess +++ b/static/.htaccess @@ -1,3 +1,34 @@ +# Welcome to the redirect party! +# ____ ____ ____ __ ____ ____ ___ ____ ____ __ ____ ____ _ _ _ +# ( _ \( __)( \( )( _ \( __)/ __)(_ _) ( _ \ / _\ ( _ \(_ _)( \/ )/ \ +# ) / ) _) ) D ( )( ) / ) _)( (__ )( ) __// \ ) / )( ) / \_/ +# (__\_)(____)(____/(__)(__\_)(____)\___) (__) (__) \_/\_/(__\_) (__) (__/ (_) +# +# Please read this guide before adding new redirects. +# +################################################################################## +# +# Redirects in this file are sorted in this order: +# 1. Universal rules that pertain to all content. +# These redirects have no anticipated removal. +# 2. Broad version (or unversioned) redirects. +# These redirects have no anticipated removal. +# 3. Specific content moves, in newest-to-oldest order of version in which they are introduced. +# These redirects will be removed when their introduced version is no longer supported. +# +# If you're adding a redirect for a specific page or section, +# you likely want to do that in #3. +# That means that you want to find the banner comment identifying that version, +# and add your redirect below that comment. +# +# For help writing rules, see https://httpd.apache.org/docs/current/mod/mod_rewrite.html#rewriterule. +# +################################################################################## + +################################################################################## +# 1. Universal rules +################################################################################## + # Redirect to https RewriteEngine on RewriteCond %{SERVER_PORT} !^443$ @@ -6,16 +37,96 @@ RewriteRule (.*) https://%{SERVER_NAME}/$1 [R=301,L] # Disable directory listing and multi views Options -Indexes -MultiViews +# Add yaml mime type +AddType text/vnd.yaml yaml +# Add bpmn mime type +AddType text/xml bpmn +# Add graphql scheme mime type +AddType text/plain graphqls +# Add diff mime type +AddType text/plain diff + +# workaround for 404 with trailing slashes https://github.com/camunda-cloud/camunda-cloud-documentation/issues/403 +RewriteRule ^(.*\.(yaml|bpmn|xml|png|jpeg|jpg|yml|svg|graphqls|diff))/$ /$1 [R=301,L] + +################################################################################## +# 2. Broad version (or unversioned) redirects +################################################################################## + # For linking from our apps - redirect URLs with the current version to URLs with no version RewriteRule ^docs/8.6/(.*)$ /docs/$1 [R=302,L] RewriteRule ^optimize/3.14.0/(.*)$ /optimize/$1 [R=302,L] -# 8.7 content moves vvvv -# ---------------------- -# None here yet, but please place them here when you have them. +## Archived versions. Note that the URL is adjusted to staging in the publish-stage workflow. +RewriteRule ^docs/0.25/(.*)$ https://unsupported.docs.camunda.io/0.25/docs/$1 [R=301,L] +RewriteRule ^docs/0.26/(.*)$ https://unsupported.docs.camunda.io/0.26/docs/$1 [R=301,L] +RewriteRule ^docs/1.0/(.*)$ https://unsupported.docs.camunda.io/1.0/docs/$1 [R=301,L] +RewriteRule ^docs/1.1/(.*)$ https://unsupported.docs.camunda.io/1.1/docs/$1 [R=301,L] +RewriteRule ^docs/1.2/(.*)$ https://unsupported.docs.camunda.io/1.2/docs/$1 [R=301,L] +RewriteRule ^docs/1.3/(.*)$ https://unsupported.docs.camunda.io/1.3/docs/$1 [R=301,L] +RewriteRule ^optimize/3.7.0/(.*)$ https://unsupported.docs.camunda.io/1.3/optimize/$1 [R=301,L] +RewriteRule ^docs/8.0/(.*)$ https://unsupported.docs.camunda.io/8.0/docs/$1 [R=301,L] +RewriteRule ^optimize/3.8.0/(.*)$ https://unsupported.docs.camunda.io/8.0/optimize/$1 [R=301,L] +RewriteRule ^docs/8.1/(.*)$ https://unsupported.docs.camunda.io/8.1/docs/$1 [R=301,L] +RewriteRule ^optimize/3.9.0/(.*)$ https://unsupported.docs.camunda.io/8.1/optimize/$1 [R=301,L] +RewriteRule ^docs/8.2/(.*)$ https://unsupported.docs.camunda.io/8.2/docs/$1 [R=301,L] +RewriteRule ^optimize/3.10.0/(.*)$ https://unsupported.docs.camunda.io/8.2/optimize/$1 [R=301,L] + +# Timeless classics. +# rules required after update to docusaurs 2.0.0-beta.15 see https://github.com/camunda-cloud/camunda-cloud-documentation/pull/531 +## index pages are not served as /index/ anymore but /index.html +RewriteRule ^(.*)/index/$ /$1 [R=301,L] +## tags do not exist anymore, redirect to main page +RewriteRule ^(.*)/tags/$ / [R=301,L] +## duplicate sections at the end where removed +RewriteRule ^(.*)/(.+)/\2/?$ /$1/$2 [R=301,L] +# disabled blog, redirect to index page. +RewriteRule ^blog/ / [R=301,L] + +# Product links that I don't think we can ever remove +RewriteRule ^docs/guides/migrating-from-Camunda-Platform/?(.*)$ /docs/guides/migrating-from-camunda-7/$1 [R=301,L] +RewriteRule ^docs/reference/bpmn-processes/?(.*)$ /docs/components/modeler/bpmn/$1 [R=301,L] + +################################################################################## +# 3. Specific content moves +################################################################################## + +#--------------------------------------------------------------------------------- +# 8.7: content moves introduced prior to the release of version 8.7. +#--------------------------------------------------------------------------------- + +# Remove Zeebe REST API +RewriteRule ^docs/next/apis-tools/zeebe-api-rest/specifications/?$ /docs/next/apis-tools/camunda-api-rest/specifications/$1 [R=301,L] +RewriteRule ^docs/next/apis-tools/zeebe-api-rest/zeebe-api-rest-overview/?$ /docs/next/apis-tools/camunda-api-rest/camunda-api-rest-overview/$1 [R=301,L] +RewriteRule ^docs/next/apis-tools/zeebe-api-rest/zeebe-api-rest-authentication/?$ /docs/next/apis-tools/camunda-api-rest/camunda-api-rest-authentication/$1 [R=301,L] +RewriteRule ^docs/next/apis-tools/zeebe-api-rest/zeebe-api-tutorial/?$ /docs/next/apis-tools/camunda-api-rest/camunda-api-rest-overview/$1 [R=301,L] + +# Move migrating to Zeebe user tasks +RewriteRule ^docs/next/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks/?$ /docs/next/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks/$1 [R=301,L] +RewriteRule ^docs/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks/?$ /docs/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks/$1 [R=301,L] +RewriteRule ^docs/8.6/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks/?$ /docs/8.6/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks/$1 [R=301,L] -# 8.6 content moves vvvv -# ---------------------- +# Remove community clients +RewriteRule ^docs/apis-tools/community-clients/c-sharp/?$ /docs/apis-tools/community-clients/$1 [R=301,L] +RewriteRule ^docs/apis-tools/community-clients/micronaut/?$ /docs/apis-tools/community-clients/$1 [R=301,L] +RewriteRule ^docs/apis-tools/community-clients/python/?$ /docs/apis-tools/community-clients/$1 [R=301,L] +RewriteRule ^docs/apis-tools/community-clients/quarkus/?$ /docs/apis-tools/community-clients/$1 [R=301,L] +RewriteRule ^docs/apis-tools/community-clients/ruby/?$ /docs/apis-tools/community-clients/$1 [R=301,L] +RewriteRule ^docs/apis-tools/community-clients/rust/?$ /docs/apis-tools/community-clients/$1 [R=301,L] + +# Move contact page +RewriteRule ^contact/?$ /docs/reference/contact/ [R=301,L] + +# Remove Connector update guides entirely +RewriteRule ^docs/components/connectors/custom-built-connectors/update-guide/(.*)$ /docs/components/connectors/custom-built-connectors/connector-sdk/ [R=301,L] + +# Move alpha features to components +RewriteRule ^docs/next/reference/alpha-features/?$ /docs/next/components/early-access/alpha/alpha-features/$1 [R=301,L] + +# Remove outdated message correlation guide +RewriteRule ^docs/next/guides/message-correlation/?$ /docs/guides/$1 [R=301,L] +RewriteRule ^docs/guides/message-correlation/?$ /docs/guides/$1 [R=301,L] +RewriteRule ^docs/8.5/guides/message-correlation/?$ /docs/guides/$1 [R=301,L] # Remove the Getting Started partials as full pages RewriteRule ^docs/guides/react-components/sm-prerequisites/?$ /docs/guides/$1 [R=301,L] @@ -24,6 +135,12 @@ RewriteRule ^docs/guides/react-components/install-plain-java/?$ /docs/guides/$1 RewriteRule ^docs/guides/react-components/install-docker-compose/?$ /docs/guides/$1 [R=301,L] RewriteRule ^docs/guides/react-components/install-c8run/?$ /docs/guides/$1 [R=301,L] +# TODO at time of release: account for API spec moves in https://github.com/camunda/camunda-docs/pull/4534 (there are many) + +#--------------------------------------------------------------------------------- +# 8.6: content moves introduced prior to the release of version 8.6. +#--------------------------------------------------------------------------------- + # Move Go and CLI client to community section RewriteRule ^docs/apis-tools/cli-client/(.*)$ /docs/next/apis-tools/community-clients/cli-client/$1 [R=301,L] RewriteRule ^docs/apis-tools/go-client/(.*)$ /docs/next/apis-tools/community-clients/go-client/$1 [R=301,L] @@ -49,7 +166,6 @@ RewriteRule ^docs/self-managed/platform-architecture/overview/?$ /docs/self-mana RewriteRule ^docs/8.5/self-managed/platform-architecture/overview/?$ /docs/8.5/self-managed/about-self-managed/$1 [R=301,L] RewriteRule ^docs/8.4/self-managed/platform-architecture/overview/?$ /docs/8.4/self-managed/about-self-managed/$1 [R=301,L] RewriteRule ^docs/8.3/self-managed/platform-architecture/overview/?$ /docs/8.3/self-managed/about-self-managed/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/platform-architecture/overview/?$ /docs/8.2/self-managed/about-self-managed/$1 [R=301,L] # remove tasklist API + Spring Zeebe tutorial RewriteRule ^docs/apis-tools/tasklist-api-rest/tasklist-api-rest-tutorial/?$ /docs/apis-tools/tasklist-api-rest/tasklist-api-rest-overview/$1 [R=301,L] @@ -67,10 +183,6 @@ RewriteRule ^docs/8.3/self-managed/operational-guides/update-guide/800-to-810/?$ RewriteRule ^docs/8.3/self-managed/operational-guides/update-guide/130-to-800/?$ /docs/8.3/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.3/self-managed/operational-guides/update-guide/810-to-820/?$ /docs/8.3/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/operational-guides/update-guide/800-to-810/?$ /docs/8.2/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/operational-guides/update-guide/130-to-800/?$ /docs/8.2/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/operational-guides/update-guide/810-to-820/?$ /docs/8.2/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] - # redirect new-context-pad to context-pad RewriteRule ^docs/components/modeler/web-modeler/new-context-pad/?$ /docs/components/modeler/web-modeler/context-pad/$1 [R=301,L] @@ -78,7 +190,6 @@ RewriteRule ^docs/components/modeler/web-modeler/new-context-pad/?$ /docs/compon RewriteRule ^docs/guides/migrating-from-cawemo/?$ https://docs.camunda.org/enterprise/announcement/#cawemo-saas-announcements [R=301,L] RewriteRule ^docs/8.4/guides/migrating-from-cawemo/?$ https://docs.camunda.org/enterprise/announcement/#cawemo-saas-announcements [R=301,L] RewriteRule ^docs/8.3/guides/migrating-from-cawemo/?$ https://docs.camunda.org/enterprise/announcement/#cawemo-saas-announcements [R=301,L] -RewriteRule ^docs/8.2/guides/migrating-from-cawemo/?$ https://docs.camunda.org/enterprise/announcement/#cawemo-saas-announcements [R=301,L] # Move Help Center RewriteRule ^docs/guides/camunda-help-center/(.*)$ /docs/reference/camunda-help-center/$1 [R=301,L] @@ -92,13 +203,11 @@ RewriteRule ^docs/components/tasklist/userguide/updating-tasklist-cloud/?$ /docs RewriteRule ^docs/8.5/components/tasklist/userguide/updating-tasklist-cloud/?$ /docs/8.5/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.4/components/tasklist/userguide/updating-tasklist-cloud/?$ /docs/8.4/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.3/components/tasklist/userguide/updating-tasklist-cloud/?$ /docs/8.3/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/components/tasklist/userguide/updating-tasklist-cloud/?$ /docs/8.2/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/self-managed/tasklist-deployment/updating-tasklist/?$ /docs/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.5/self-managed/tasklist-deployment/updating-tasklist/?$ /docs/8.5/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.4/self-managed/tasklist-deployment/updating-tasklist/?$ /docs/8.4/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.3/self-managed/tasklist-deployment/updating-tasklist/?$ /docs/8.3/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/tasklist-deployment/updating-tasklist/?$ /docs/8.2/guides/update-guide/introduction/$1 [R=301,L] # Remove community Node Zeebe page RewriteRule ^docs/apis-tools/community-clients/javascript/?$ /docs/apis-tools/node-js-sdk/$1 [R=301,L] @@ -109,13 +218,10 @@ RewriteRule ^docs/components/modeler/web-modeler/new-web-modeler/?$ /docs/compon RewriteRule ^docs/8.5/components/modeler/web-modeler/new-web-modeler/?$ /docs/8.5/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] RewriteRule ^docs/8.4/components/modeler/web-modeler/new-web-modeler/?$ /docs/8.4/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] RewriteRule ^docs/8.3/components/modeler/web-modeler/new-web-modeler/?$ /docs/8.3/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] -RewriteRule ^docs/8.2/components/modeler/web-modeler/new-web-modeler/?$ /docs/8.2/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] RewriteRule ^docs/components/modeler/web-modeler/launch-cloud-modeler/?$ /docs/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] RewriteRule ^docs/8.5/components/modeler/web-modeler/launch-cloud-modeler/?$ /docs/8.5/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] RewriteRule ^docs/8.4/components/modeler/web-modeler/launch-cloud-modeler/?$ /docs/8.4/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] RewriteRule ^docs/8.3/components/modeler/web-modeler/launch-cloud-modeler/?$ /docs/8.3/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] -RewriteRule ^docs/8.2/components/modeler/web-modeler/launch-cloud-modeler/?$ /docs/8.2/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] -RewriteRule ^docs/1.3/components/modeler/web-modeler/launch-cloud-modeler/?$ /docs/1.3/components/modeler/web-modeler/launch-web-modeler/$1 [R=301,L] ## optimize splitting of upgrade guides RewriteRule ^optimize/self-managed/optimize-deployment/migration-update/2.1-to-2.2/?$ /optimize/self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2/ [R=301,L] @@ -147,8 +253,10 @@ RewriteRule ^optimize/self-managed/optimize-deployment/migration-update/instruct # Updated web-modeler troubleshooting RewriteRule ^docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login/?$ /docs/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-missing-data/ [R=301,L] -# 8.5 content moves vvvv -# ---------------------- +#--------------------------------------------------------------------------------- +# 8.5: content moves introduced prior to the release of version 8.5. +#--------------------------------------------------------------------------------- + RewriteRule ^docs/apis-tools/frontend-development/introduction-to-task-applications/?$ /docs/apis-tools/frontend-development/task-applications/introduction-to-task-applications$1 [R=301,L] RewriteRule ^docs/components/operate/userguide/updating-operate/?$ /docs/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] @@ -189,12 +297,10 @@ RewriteRule ^api/tasklist/docs/(.*)$ /docs/apis-tools/tasklist-api-rest/specific RewriteRule ^docs/components/operate/userguide/updating-operate/?$ /docs/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.4/components/operate/userguide/updating-operate/?$ /docs/8.4/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.3/components/operate/userguide/updating-operate/?$ /docs/8.3/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/components/operate/userguide/updating-operate/?$ /docs/8.2/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/self-managed/operate-deployment/updating-operate/?$ /docs/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.4/self-managed/operate-deployment/updating-operate/?$ /docs/8.4/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] RewriteRule ^docs/8.3/self-managed/operate-deployment/updating-operate/?$ /docs/8.3/self-managed/operational-guides/update-guide/introduction/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/operate-deployment/updating-operate/?$ /docs/8.2/guides/update-guide/introduction/$1 [R=301,L] # Consolidate SM troubleshooting pages RewriteRule ^docs/self-managed/platform-deployment/troubleshooting/?$ /docs/self-managed/operational-guides/troubleshooting/$1 [R=301,L] @@ -203,13 +309,11 @@ RewriteRule ^docs/self-managed/platform-deployment/troubleshooting/?$ /docs/self RewriteRule ^docs/components/console/manage-clusters/manage-ip-whitelists/?$ /docs/components/console/manage-clusters/manage-ip-allowlists/$1 [R=301,L] RewriteRule ^docs/8.4/components/console/manage-clusters/manage-ip-whitelists/?$ /docs/8.4/components/console/manage-clusters/manage-ip-allowlists/$1 [R=301,L] RewriteRule ^docs/8.3/components/console/manage-clusters/manage-ip-whitelists/?$ /docs/8.3/components/console/manage-clusters/manage-ip-allowlists/$1 [R=301,L] -RewriteRule ^docs/8.2/components/console/manage-clusters/manage-ip-whitelists/?$ /docs/8.2/components/console/manage-clusters/manage-ip-allowlists/$1 [R=301,L] # Move install-zeebe-exporters RewriteRule ^docs/self-managed/platform-deployment/helm-kubernetes/guides/install-zeebe-exporters/?$ /docs/self-managed/zeebe-deployment/exporters/install-zeebe-exporters/$1 [R=301,L] RewriteRule ^docs/8.4/self-managed/platform-deployment/helm-kubernetes/guides/install-zeebe-exporters/?$ /docs/8.4/self-managed/zeebe-deployment/exporters/install-zeebe-exporters/$1 [R=301,L] RewriteRule ^docs/8.3/self-managed/platform-deployment/helm-kubernetes/guides/install-zeebe-exporters/?$ /docs/8.3/self-managed/zeebe-deployment/exporters/install-zeebe-exporters/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/platform-deployment/helm-kubernetes/guides/install-zeebe-exporters/?$ /docs/8.2/self-managed/zeebe-deployment/exporters/install-zeebe-exporters/$1 [R=301,L] # Rename Console API RewriteRule ^docs/apis-tools/console-api/?$ /docs/apis-tools/administration-api/$1 [R=301,L] @@ -217,7 +321,6 @@ RewriteRule ^docs/8.4/apis-tools/console-api/?$ /docs/8.4/apis-tools/administrat RewriteRule ^docs/apis-tools/console-api/console-api-reference/?$ /docs/apis-tools/administration-api/administration-api-reference/$1 [R=301,L] RewriteRule ^docs/8.4/apis-tools/console-api/console-api-reference/?$ /docs/8.4/apis-tools/administration-api/administration-api-reference/$1 [R=301,L] RewriteRule ^docs/8.3/apis-tools/console-api-reference/?$ /docs/8.3/apis-tools/administration-api-reference/$1 [R=301,L] -RewriteRule ^docs/8.2/apis-tools/console-api-reference/?$ /docs/8.2/apis-tools/administration-api-reference/$1 [R=301,L] RewriteRule ^docs/apis-tools/console-api/authentication/?$ /docs/apis-tools/administration-api/authentication/$1 [R=301,L] RewriteRule ^docs/8.4/apis-tools/console-api/authentication/?$ /docs/8.4/apis-tools/administration-api/authentication/$1 [R=301,L] @@ -230,15 +333,16 @@ RewriteRule ^docs/8.4/components/modeler/forms/form-element-library/forms-elemen RewriteRule ^docs/components/operate/userguide/operate-feedback-and-questions/?$ /contact/$1 [R=301,L] RewriteRule ^docs/8.4/components/operate/userguide/operate-feedback-and-questions/?$ /contact/$1 [R=301,L] RewriteRule ^docs/8.3/components/operate/userguide/operate-feedback-and-questions/?$ /contact/$1 [R=301,L] -RewriteRule ^docs/8.2/components/operate/userguide/operate-feedback-and-questions/?$ /contact/$1 [R=301,L] # Redirect to Connectors overview upon deprecation of Power Automate Connector RewriteRule ^docs/components/connectors/out-of-the-box-connectors/power-automate/?$ /docs/components/connectors/out-of-the-box-connectors/available-connectors-overview/ [R=301,L] RewriteRule ^docs/8.4/components/connectors/out-of-the-box-connectors/power-automate/?$ /docs/8.4/components/connectors/out-of-the-box-connectors/available-connectors-overview/ [R=301,L] RewriteRule ^docs/8.3/components/connectors/out-of-the-box-connectors/power-automate/?$ /docs/8.3/components/connectors/out-of-the-box-connectors/available-connectors-overview/ [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/power-automate/?$ /docs/8.2/components/connectors/out-of-the-box-connectors/available-connectors-overview/ [R=301,L] -# 8.4 content moves +#--------------------------------------------------------------------------------- +# 8.4: content moves introduced prior to the release of version 8.4. +#--------------------------------------------------------------------------------- + RewriteRule ^docs/apis-tools/web-modeler-api/?$ /docs/apis-tools/web-modeler-api/overview/ [R=301,L] RewriteRule ^optimize/apis-tools/optimize-api/optimize-api-authorization/(.*)$ /optimize/apis-tools/optimize-api/optimize-api-authentication/$1 [R=301,L] RewriteRule ^docs/apis-tools/grpc/?$ /docs/apis-tools/zeebe-api/overview/ [R=301,L] @@ -250,16 +354,11 @@ RewriteRule ^docs/components/operate/userguide/updating-operate-cloud/?$ /docs/s # Early access to alpha features RewriteRule ^docs/reference/early-access/?$ /docs/reference/alpha-features/ [R=301,L] RewriteRule ^docs/8.3/reference/early-access/?$ /docs/8.3/reference/alpha-features/ [R=301,L] -RewriteRule ^docs/8.2/reference/early-access/?$ /docs/8.2/reference/alpha-features/ [R=301,L] -# 8.2 content moves: These were removed from latest version docs, so point them back to previous version for now. -# Remove them in the not-distant future. -RewriteRule ^docs/guides/update-guide/026-to-100/?$ /docs/8.2/guides/update-guide/026-to-100/ [R=301,L] -RewriteRule ^docs/guides/update-guide/100-to-110/?$ /docs/8.2/guides/update-guide/100-to-110/ [R=301,L] -RewriteRule ^docs/guides/update-guide/110-to-120/?$ /docs/8.2/guides/update-guide/110-to-120/ [R=301,L] -RewriteRule ^docs/guides/update-guide/120-to-130/?$ /docs/8.2/guides/update-guide/120-to-130/ [R=301,L] +#--------------------------------------------------------------------------------- +# 8.3: content moves introduced prior to the release of version 8.3. +#--------------------------------------------------------------------------------- -# 8.3 content moves RewriteRule ^docs/guides/update-guide/connectors/(.*)$ /docs/components/connectors/custom-built-connectors/update-guide/$1 [R=301,L] RewriteRule ^docs/guides/update-guide/(.*)$ /docs/self-managed/operational-guides/update-guide/$1 [R=301,L] RewriteRule ^docs/self-managed/backup-restore/(.*)$ /docs/self-managed/operational-guides/backup-restore/$1 [R=301,L] @@ -267,28 +366,13 @@ RewriteRule ^docs/self-managed/troubleshooting/log-levels/(.*)$ /docs/self-manag # Condense Connectors RewriteRule ^docs/components/connectors/out-of-the-box-connectors/amazon-sns-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-sns/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sns-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sns/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/amazon-sqs-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-sqs/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sqs-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sqs/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge-webhook/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge-webhook/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/github-webhook/(.*)$ /docs/components/connectors/out-of-the-box-connectors/github/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/github-webhook/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/github/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/kafka-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/kafka/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/kafka-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/kafka/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/rabbitmq-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/rabbitmq/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/rabbitmq-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/rabbitmq/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/slack-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/slack/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/slack-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/slack/$1 [R=301,L] - RewriteRule ^docs/components/connectors/out-of-the-box-connectors/twilio-webhook/(.*)$ /docs/components/connectors/out-of-the-box-connectors/twilio/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/twilio-webhook/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/twilio/$1 [R=301,L] # Remove public API page RewriteRule ^docs/apis-tools/public-api/(.*)$ /docs/apis-tools/working-with-apis-tools/$1 [R=301,L] @@ -318,71 +402,54 @@ RewriteRule ^docs/components/console/console-troubleshooting/feedback-and-suppor # Branding redirects RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-dynamodb/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-dynamodb/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-dynamodb/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-dynamodb/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-dynamodb/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-dynamodb/$1 [R=301,L] RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-eventbridge/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-eventbridge/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-eventbridge/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-eventbridge/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge/$1 [R=301,L] RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-eventbridge-webhook/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-eventbridge-webhook/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-eventbridge-webhook/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-eventbridge-webhook/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-eventbridge-webhook/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge-webhook/$1 [R=301,L] RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-sns/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-sns/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-sns/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-sns/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-sns/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sns/$1 [R=301,L] RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-sns-inbound/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-sns-inbound/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-sns-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-sns-inbound/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-sns-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sns-inbound/$1 [R=301,L] RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-sqs/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-sqs/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-sqs/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-sqs/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-sqs/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sqs/$1 [R=301,L] RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/aws-sqs-inbound/(.*)$ /docs/next/components/connectors/out-of-the-box-connectors/amazon-sqs-inbound/$1 [R=301,L] RewriteRule ^docs/components/connectors/out-of-the-box-connectors/aws-sqs-inbound/(.*)$ /docs/components/connectors/out-of-the-box-connectors/amazon-sqs-inbound/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/aws-sqs-inbound/(.*)$ /docs/8.2/components/connectors/out-of-the-box-connectors/amazon-sqs-inbound/$1 [R=301,L] # Remove orphaned Tasklist page RewriteRule ^docs/next/self-managed/tasklist-deployment/tasklist-api/(.*)$ /docs/next/apis-tools/tasklist-api/tasklist-api-overview/$1 [R=301,L] RewriteRule ^docs/self-managed/tasklist-deployment/tasklist-api/(.*)$ /docs/apis-tools/tasklist-api/tasklist-api-overview/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/tasklist-deployment/tasklist-api/(.*)$ /docs/8.2/apis-tools/tasklist-api/tasklist-api-overview/$1 [R=301,L] # Rename Modeler Camunda 8 RewriteRule ^docs/next/components/modeler/desktop-modeler/connect-to-camunda-platform-8/(.*)$ /docs/next/components/modeler/desktop-modeler/connect-to-camunda-8/$1 [R=301,L] RewriteRule ^docs/components/modeler/desktop-modeler/connect-to-camunda-platform-8/(.*)$ /docs/components/modeler/desktop-modeler/connect-to-camunda-8/$1 [R=301,L] -RewriteRule ^docs/8.2/components/modeler/desktop-modeler/connect-to-camunda-platform-8/(.*)$ /docs/8.2/components/modeler/desktop-modeler/connect-to-camunda-8/$1 [R=301,L] # Rename Concepts Camunda 8 RewriteRule ^docs/next/components/concepts/what-is-camunda-platform-8/(.*)$ /docs/next/components/concepts/what-is-camunda-8/$1 [R=301,L] RewriteRule ^docs/components/concepts/what-is-camunda-platform-8/(.*)$ /docs/components/concepts/what-is-camunda-8/$1 [R=301,L] -RewriteRule ^docs/8.2/components/concepts/what-is-camunda-platform-8/(.*)$ /docs/8.2/components/concepts/what-is-camunda-8/$1 [R=301,L] # Renaming Guides RewriteRule ^docs/guides/migrating-from-camunda-platform-7(.*)$ /docs/guides/migrating-from-camunda-7$1 [R=301,L] RewriteRule ^docs/next/guides/migrating-from-camunda-platform-7(.*)$ /docs/guides/migrating-from-camunda-7$1 [R=301,L] -RewriteRule ^docs/8.2/guides/migrating-from-camunda-platform-7(.*)$ /docs/guides/migrating-from-camunda-7$1 [R=301,L] - -# Redirect pages - https://httpd.apache.org/docs/current/mod/mod_rewrite.html#rewriterule # rename Professional Plan to Starter Plan RewriteRule ^docs/next/components/console/manage-plan/upgrade-to-professional-plan/(.*)$ /docs/next/components/console/manage-plan/upgrade-to-starter-plan/$1 [R=301,L] RewriteRule ^docs/components/console/manage-plan/upgrade-to-professional-plan/(.*)$ /docs/components/console/manage-plan/upgrade-to-starter-plan/$1 [R=301,L] -RewriteRule ^docs/8.2/components/console/manage-plan/upgrade-to-professional-plan/(.*)$ /docs/8.2/components/console/manage-plan/upgrade-to-starter-plan/$1 [R=301,L] RewriteRule ^docs/next/components/console/manage-plan/cancel-professional-subscription/(.*)$ /docs/next/components/console/manage-plan/cancel-starter-subscription/$1 [R=301,L] RewriteRule ^docs/components/console/manage-plan/cancel-professional-subscription/(.*)$ /docs/components/console/manage-plan/cancel-starter-subscription/$1 [R=301,L] -RewriteRule ^docs/8.2/components/console/manage-plan/cancel-professional-subscription/(.*)$ /docs/8.2/components/console/manage-plan/cancel-starter-subscription/$1 [R=301,L] # rename Desktop Modeler reference to Camunda Cloud RewriteRule ^docs/next/components/modeler/desktop-modeler/connect-to-camunda-cloud/(.*)$ /docs/next/components/modeler/desktop-modeler/connect-to-camunda-platform-8/$1 [R=301,L] RewriteRule ^docs/components/modeler/desktop-modeler/connect-to-camunda-cloud/(.*)$ /docs/components/modeler/desktop-modeler/connect-to-camunda-platform-8/$1 [R=301,L] -RewriteRule ^docs/8.2/components/modeler/desktop-modeler/connect-to-camunda-cloud/(.*)$ /docs/8.2/components/modeler/desktop-modeler/connect-to-camunda-platform-8/$1 [R=301,L] # API Endpoints to API Orchestration RewriteRule ^docs/guides/orchestrate-api-endpoints(.*)$ /docs/guides/orchestrate-apis$1 [R=301,L] -RewriteRule ^docs/8.2/guides/orchestrate-api-endpoints(.*)$ /docs/8.2/guides/orchestrate-apis$1 [R=301,L] # Remove OSS section RewriteRule ^docs/components/zeebe/open-source(.*)$ /docs/components/zeebe/zeebe-overview/ [R=301,L] @@ -403,479 +470,4 @@ RewriteRule ^docs/components/modeler/web-modeler/save-and-deploy/?$ /docs/compon RewriteRule ^docs/next/components/modeler/web-modeler/start-instance/?$ /docs/next/components/modeler/web-modeler/run-or-publish-your-process/ [R=301,L] RewriteRule ^docs/next/components/modeler/web-modeler/save-and-deploy/?$ /docs/next/components/modeler/web-modeler/run-or-publish-your-process/#deploy-a-process [R=301,L,NE] -# Modeler start instance and deploy page renaming and merging 8.2 -RewriteRule ^docs/8.2/components/modeler/web-modeler/start-instance/?$ /docs/8.2/components/modeler/web-modeler/run-or-publish-your-process/ [R=301,L] -RewriteRule ^docs/8.2/components/modeler/web-modeler/save-and-deploy/?$ /docs/8.2/components/modeler/web-modeler/run-or-publish-your-process/#deploy-a-process [R=301,L,NE] - -# deduplication of Web Modeler API -RewriteRule ^docs/next/self-managed/modeler/web-modeler/api/(.*)$ /docs/next/apis-tools/web-modeler-api/index/$1 [R=301,L] -RewriteRule ^docs/self-managed/modeler/web-modeler/api/(.*)$ /docs/apis-tools/web-modeler-api/index/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/modeler/web-modeler/api/(.*)$ /docs/8.2/apis-tools/web-modeler-api/index/$1 [R=301,L] - -# connectors restructure /next/ -RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/rest/(.*)$ /docs/next/components/connectors/protocol/rest/$1 [R=301,L] -RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/graphql/(.*)$ /docs/next/components/connectors/protocol/graphql/$1 [R=301,L] -RewriteRule ^docs/next/components/connectors/out-of-the-box-connectors/http-webhook/(.*)$ /docs/next/components/connectors/protocol/http-webhook/$1 [R=301,L] -RewriteRule ^docs/next/components/modeler/web-modeler/advanced-modeling/manage-connector-templates/(.*)$ /docs/next/components/connectors/manage-connector-templates/$1 [R=301,L] - -# connectors restructure 8.2 -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/rest/(.*)$ /docs/8.2/components/connectors/protocol/rest/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/graphql/(.*)$ /docs/8.2/components/connectors/protocol/graphql/$1 [R=301,L] -RewriteRule ^docs/8.2/components/connectors/out-of-the-box-connectors/http-webhook/(.*)$ /docs/8.2/components/connectors/protocol/http-webhook/$1 [R=301,L] -RewriteRule ^docs/8.2/components/modeler/web-modeler/advanced-modeling/manage-connector-templates/(.*)$ /docs/8.2/components/connectors/manage-connector-templates/$1 [R=301,L] -RewriteRule ^docs/components/connectors/out-of-the-box-connectors/rest/(.*)$ /docs/components/connectors/protocol/rest/$1 [R=301,L] -RewriteRule ^docs/components/connectors/out-of-the-box-connectors/graphql/(.*)$ /docs/components/connectors/protocol/graphql/$1 [R=301,L] -RewriteRule ^docs/components/connectors/out-of-the-box-connectors/http-webhook/(.*)$ /docs/components/connectors/protocol/http-webhook/$1 [R=301,L] -RewriteRule ^docs/components/modeler/web-modeler/advanced-modeling/manage-connector-templates/(.*)$ /docs/components/connectors/manage-connector-templates/$1 [R=301,L] - -# identity restructure 8.2 -RewriteRule ^docs/self-managed/identity/user-guide/configure-logging/(.*)$ /docs/self-managed/identity/user-guide/configuration/configure-logging/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/configuration/configure-external-identity-provider-and-logging/(.*)$ /docs/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/configure-external-identity-provider-and-logging/(.*)$ /docs/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/configure-external-identity-provider/(.*)$ /docs/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/connect-to-an-existing-keycloak/(.*)$ /docs/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/managing-user-access/(.*)$ /docs/self-managed/identity/user-guide/authorizations/managing-user-access/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/generating-m2m-tokens/(.*)$ /docs/self-managed/identity/user-guide/authorizations/generating-m2m-tokens/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/making-identity-production-ready/(.*)$ /docs/self-managed/identity/user-guide/configuration/making-identity-production-ready/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/assigning-a-role-to-a-user/(.*)$ /docs/self-managed/identity/user-guide/roles/add-assign-role/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/assigning-a-permission-to-a-role/(.*)$ /docs/self-managed/identity/user-guide/roles/add-assign-permission/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/adding-a-role/(.*)$ /docs/self-managed/identity/user-guide/roles/add-assign-role/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/assigning-a-permission-to-an-application/?(.*)$ /docs/self-managed/identity/user-guide/additional-features/incorporate-applications/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/adding-a-permission/(.*)$ /docs/self-managed/identity/user-guide/roles/add-assign-permission/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/adding-an-api/(.*)$ /docs/self-managed/identity/user-guide/additional-features/adding-an-api/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/adding-an-application/(.*)$ /docs/self-managed/identity/user-guide/additional-features/incorporate-applications/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/groups/creating-a-group/(.*)$ /docs/self-managed/identity/user-guide/groups/create-group/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/groups/assigning-users-to-a-group/(.*)$ /docs/self-managed/identity/user-guide/groups/assign-users-roles-to-group/$1 [R=301,L] -RewriteRule ^docs/self-managed/identity/user-guide/groups/assigning-roles-to-a-group/(.*)$ /docs/self-managed/identity/user-guide/groups/assign-users-roles-to-group/$1 [R=301,L] - -RewriteRule ^docs/8.2/self-managed/identity/user-guide/connect-to-an-existing-keycloak/(.*)$ /docs/8.2/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/managing-user-access/(.*)$ /docs/8.2/self-managed/identity/user-guide/authorizations/managing-user-access/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/generating-m2m-tokens/(.*)$ /docs/8.2/self-managed/identity/user-guide/authorizations/generating-m2m-tokens/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/making-identity-production-ready/(.*)$ /docs/8.2/self-managed/identity/user-guide/configuration/making-identity-production-ready/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/configure-logging/(.*)$ /docs/8.2/self-managed/identity/user-guide/configuration/configure-logging/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider-and-logging/(.*)$ /docs/8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/configure-external-identity-provider-and-logging/(.*)$ /docs/8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/configure-external-identity-provider/(.*)$ /docs/8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/assigning-a-role-to-a-user/(.*)$ /docs/8.2/self-managed/identity/user-guide/roles/add-assign-role/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/assigning-a-permission-to-a-role/(.*)$ /docs/8.2/self-managed/identity/user-guide/roles/add-assign-permission/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/adding-a-role/(.*)$ /docs/8.2/self-managed/identity/user-guide/roles/add-assign-role/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/assigning-a-permission-to-an-application/?(.*)$ /docs/8.2/self-managed/identity/user-guide/additional-features/incorporate-applications/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/adding-a-permission/(.*)$ /docs/8.2/self-managed/identity/user-guide/roles/add-assign-permission/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/adding-an-api/(.*)$ /docs/8.2/self-managed/identity/user-guide/additional-features/adding-an-api/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/adding-an-application/(.*)$ /docs/8.2/self-managed/identity/user-guide/additional-features/incorporate-applications/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/groups/creating-a-group/(.*)$ /docs/8.2/self-managed/identity/user-guide/groups/create-group/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/groups/assigning-users-to-a-group/(.*)$ /docs/8.2/self-managed/identity/user-guide/groups/assign-users-roles-to-group/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/identity/user-guide/groups/assigning-roles-to-a-group/(.*)$ /docs/8.2/self-managed/identity/user-guide/groups/assign-users-roles-to-group/$1 [R=301,L] - -# consolidation of Zeebe Gateway configuration -RewriteRule ^docs/next/self-managed/zeebe-gateway-deployment/zeebe-gateway/(.*)$ /docs/next/self-managed/zeebe-deployment/zeebe-gateway/overview/$1 [R=301,L] -RewriteRule ^docs/self-managed/zeebe-gateway-deployment/zeebe-gateway/(.*)$ /docs/self-managed/zeebe-deployment/zeebe-gateway/overview/$1 [R=301,L] -RewriteRule ^docs/8.2/self-managed/zeebe-gateway-deployment/zeebe-gateway/(.*)$ /docs/8.2/self-managed/zeebe-deployment/zeebe-gateway/overview/$1 [R=301,L] - -# identity restructure -RewriteRule ^docs/next/self-managed/identity/user-guide/connect-to-an-existing-keycloak/(.*)$ /docs/next/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/managing-user-access/(.*)$ /docs/next/self-managed/identity/user-guide/authorizations/managing-user-access/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/generating-m2m-tokens/(.*)$ /docs/next/self-managed/identity/user-guide/authorizations/generating-m2m-tokens/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/making-identity-production-ready/(.*)$ /docs/next/self-managed/identity/user-guide/configuration/making-identity-production-ready/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/configure-logging/(.*)$ /docs/next/self-managed/identity/user-guide/configuration/configure-logging/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/configuration/configure-external-identity-provider-and-logging/(.*)$ /docs/next/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/configure-external-identity-provider-and-logging/(.*)$ /docs/next/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/configure-external-identity-provider/(.*)$ /docs/next/self-managed/identity/user-guide/configuration/configure-external-identity-provider/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/assigning-a-role-to-a-user/(.*)$ /docs/next/self-managed/identity/user-guide/roles/add-assign-role/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/assigning-a-permission-to-a-role/(.*)$ /docs/next/self-managed/identity/user-guide/roles/add-assign-permission/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/adding-a-role/(.*)$ /docs/next/self-managed/identity/user-guide/roles/add-assign-role/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/assigning-a-permission-to-an-application/?(.*)$ /docs/next/self-managed/identity/user-guide/additional-features/incorporate-applications/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/adding-a-permission/(.*)$ /docs/next/self-managed/identity/user-guide/roles/add-assign-permission/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/adding-an-api/(.*)$ /docs/next/self-managed/identity/user-guide/additional-features/adding-an-api/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/adding-an-application/(.*)$ /docs/next/self-managed/identity/user-guide/additional-features/incorporate-applications/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/groups/creating-a-group/(.*)$ /docs/next/self-managed/identity/user-guide/groups/create-group/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/groups/assigning-users-to-a-group/(.*)$ /docs/next/self-managed/identity/user-guide/groups/assign-users-roles-to-group/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/identity/user-guide/groups/assigning-roles-to-a-group/(.*)$ /docs/next/self-managed/identity/user-guide/groups/assign-users-roles-to-group/$1 [R=301,L] - -###### 8.2 release redirects ###### -RewriteRule ^docs/self-managed/web-modeler/installation/(.*)$ /docs/self-managed/modeler/web-modeler/installation/$1 [R=301,L] -RewriteRule ^docs/self-managed/web-modeler/configuration/(.*)$ /docs/self-managed/modeler/web-modeler/configuration/$1 [R=301,L] -RewriteRule ^docs/apis-clients/operate-api/$ /docs/apis-tools/operate-api/overview/ [R=301,L] -RewriteRule ^optimize/components/userguide/processes/ /optimize/components/userguide/process-dashboards [R=301,L] - -# apis/clients to apis/tools change -RewriteRule ^docs/apis-clients/working-with-apis-clients/?$ /docs/apis-tools/working-with-apis-tools/ [R=301,L] -RewriteRule ^docs/apis-clients(.*)$ /docs/apis-tools$1 [R=301,L] -RewriteRule ^optimize/apis-clients(.*)$ /optimize/apis-tools$1 [R=301,L] -RewriteRule ^docs/1.3/apis-clients(.*)$ /docs/1.3/apis-tools$1 [R=301,L] - -# Identity Authorizations -RewriteRule ^docs/next/self-managed/identity/user-guide/groups/creating-authorizations-for-a-group/(.*)$ /docs/next/self-managed/identity/user-guide/authorizations/managing-resource-authorizations/ [R=301,L] - -# Entra ID redirect to combined page -RewriteRule ^docs/next/self-managed/platform-deployment/helm-kubernetes/guides/connecting-to-entra-id/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/guides/connect-to-an-oidc-provider/$1 [R=301,L] - -# Consolidated redundant element-template pages -RewriteRule ^docs/components/modeler/desktop-modeler/element-templates/?$ /docs/components/modeler/desktop-modeler/element-templates/about-templates/ [R=301,L] - -###### Prior to 8.2 release -RewriteRule ^docs/next/self-managed/web-modeler/installation/(.*)$ /docs/next/self-managed/modeler/web-modeler/installation/$1 [R=301,L] -RewriteRule ^docs/next/apis-clients/operate-api/$ /docs/next/apis-clients/operate-api/overview/ [R=301,L] - -# For a short time, we had a change that renamed the page, but not the folder. This next rule covers that. -RewriteRule ^docs/next/apis-clients/working-with-apis-tools/?$ /docs/next/apis-tools/working-with-apis-tools/ [R=301,L] -# Added redirect rules for APIs/Clients to APIs/Tools -RewriteRule ^docs/next/apis-clients/working-with-apis-clients/?$ /docs/next/apis-tools/working-with-apis-tools/ [R=301,L] -RewriteRule ^docs/next/apis-clients(.*)$ /docs/next/apis-tools$1 [R=301,L] -RewriteRule ^optimize/next/apis-clients(.*)$ /optimize/next/apis-tools$1 [R=301,L] - -RewriteRule ^docs/components/zeebe/open-source/exporters/(.*)$ /docs/self-managed/concepts/exporters/$1 [R=301,L] -RewriteRule ^docs/components/zeebe/open-source/exporters/$ /docs/self-managed/concepts/exporters [R=301,L] -RewriteRule ^docs/next/components/zeebe/open-source/exporters/(.*)$ /docs/next/self-managed/concepts/exporters/$1 [R=301,L] -RewriteRule ^docs/next/components/zeebe/open-source/exporters/$ /docs/next/self-managed/concepts/exporters [R=301,L] - -# Redirects for Connectors move to top level -RewriteRule ^docs/next/components/integration-framework/introduction-to-connectors/(.*)$ /docs/next/components/connectors/introduction-to-connectors/$1 [R=301,L] -RewriteRule ^docs/next/components/integration-framework/introduction-to-connectors$ /docs/next/components/connectors/introduction-to-connectors [R=301,L] -RewriteRule ^docs/components/integration-framework/introduction-to-connectors/(.*)$ /docs/components/connectors/introduction-to-connectors/$1 [R=301,L] -RewriteRule ^docs/components/integration-framework/introduction-to-connectors$ /docs/components/connectors/introduction-to-connectors [R=301,L] -RewriteRule ^docs/components/integration-framework/(.*)$ /docs/components/$1 [R=301,L] -RewriteRule ^docs/components/integration-framework$ /docs/components/ [R=301,L] - -# Added for v8.1 -RewriteRule ^docs/self-managed/platform-deployment/helm-kubernetes/deployment/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/deploy/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/ingress-setup/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/local/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/openshift-helm/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/openshift/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/amazon-eks/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/kubernetes-helm/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/deployment/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/kubernetes/(.*)$ /docs/self-managed/platform-deployment/helm-kubernetes/overview/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/platform-8-deployment/(.*)$ /docs/self-managed/platform-deployment/overview/$1 [R=301,L] -RewriteRule ^docs/self-managed/platform-deployment/known-limitations/(.*)$ /docs/self-managed/platform-deployment/troubleshooting/$1 [R=301,L] - -# These redirects go from /next/ to /next/ and should be revisted when /next/ is removed from the sitemap -RewriteRule ^docs/next/self-managed/platform-deployment/helm-kubernetes/deployment/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/deploy/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/ingress-setup/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/local/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/openshift-helm/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/openshift/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/amazon-eks/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/kubernetes-helm/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/deployment/$1 [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/kubernetes/(.*)$ /docs/next/self-managed/platform-deployment/helm-kubernetes/overview/$1 [R=301,L] - -RewriteRule ^docs/guides/implementing-connectors/(.*)$ /docs/guides/configuring-out-of-the-box-connectors/$1 [R=301,L] -RewriteRule ^docs/guides/implementing-connectors$ /docs/guides/configuring-out-of-the-box-connectors/ [R=301,L] - -RewriteRule ^docs/1.3/self-managed/overview/(.*)$ /docs/1.3/self-managed/about-self-managed/$1 [R=301,L] -RewriteRule ^docs/1.3/self-managed/overview$ /docs/1.3/self-managed/about-self-managed/ [R=301,L] -RewriteRule ^docs/self-managed/overview/(.*)$ /docs/self-managed/about-self-managed/$1 [R=301,L] -RewriteRule ^docs/self-managed/overview$ /docs/self-managed/about-self-managed/ [R=301,L] - -RewriteRule ^docs/self-managed/zeebe-deployment/?$ /docs/self-managed/zeebe-deployment/zeebe-installation/ [R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/operations/?$ /docs/self-managed/zeebe-deployment/operations/zeebe-in-production/ [R=301,L] - -RewriteRule ^docs/self-managed/tasklist-deployment/configuration/(.*)$ /docs/self-managed/tasklist-deployment/tasklist-configuration/$1 [R=301,L] -RewriteRule ^docs/self-managed/tasklist-deployment/authentication/(.*)$ /docs/self-managed/tasklist-deployment/tasklist-authentication/$1 [R=301,L] - -# More /next/ to /next/ -RewriteRule ^docs/next/self-managed/platform-deployment/platform-8-deployment/?$ /docs/next/self-managed/platform-deployment/overview/ [R=301,L] -RewriteRule ^docs/next/self-managed/platform-deployment/?$ /docs/next/self-managed/platform-deployment/overview/ [R=301,L] - -RewriteRule ^docs/self-managed/operate-deployment/configuration/(.*)$ /docs/self-managed/operate-deployment/operate-configuration/$1 [R=301,L] -RewriteRule ^docs/self-managed/operate-deployment/authentication/(.*)$ /docs/self-managed/operate-deployment/operate-authentication/$1 [R=301,L] - -RewriteRule ^docs/self-managed/identity/troubleshooting/?$ /docs/self-managed/identity/troubleshooting/troubleshoot-identity/ [R=301,L] -RewriteRule ^docs/self-managed/identity/getting-started/?$ /docs/self-managed/identity/getting-started/install-identity/ [R=301,L] - -RewriteRule ^docs/1.3/components/overview/(.*)$ /docs/1.3/components/components-overview/$1 [R=301,L] -RewriteRule ^docs/1.3/components/overview$ /docs/1.3/components/components-overview/ [R=301,L] -RewriteRule ^docs/components/overview/(.*)$ /docs/components/components-overview/$1 [R=301,L] -RewriteRule ^docs/components/overview$ /docs/components/components-overview/ [R=301,L] - -RewriteRule ^docs/components/zeebe/technical-concepts/?$ /docs/components/zeebe/technical-concepts/technical-concepts-overview/ [R=301,L] - -RewriteRule ^docs/components/tasklist/userguide/overview/(.*)$ /docs/components/tasklist/userguide/using-tasklist/$1 [R=301,L] -RewriteRule ^docs/components/tasklist/userguide/overview$ /docs/components/tasklist/userguide/using-tasklist/ [R=301,L] -RewriteRule ^docs/components/tasklist/introduction/(.*)$ /docs/components/tasklist/introduction-to-tasklist/$1 [R=301,L] -RewriteRule ^docs/components/tasklist/introduction$ /docs/components/tasklist/introduction-to-tasklist/ [R=301,L] - -RewriteRule ^docs/components/operate/?$ /docs/components/operate/operate-introduction/ [R=301,L] - -RewriteRule ^docs/components/modeler/web-modeler/connectors/available-connectors/?$ /docs/components/modeler/web-modeler/connectors/available-connectors/available-connectors-overview/ [R=301,L] -RewriteRule ^docs/components/modeler/web-modeler/connectors/?$ /docs/components/modeler/web-modeler/connectors/introduction-to-connectors/ [R=301,L] - -RewriteRule ^docs/components/console/introduction/(.*)$ /docs/components/console/introduction-to-console/$1 [R=301,L] -RewriteRule ^docs/components/console/introduction$ /docs/components/console/introduction-to-console/ [R=301,L] -RewriteRule ^docs/components/console/troubleshooting/(.*)$ /docs/components/console/console-troubleshooting/$1 [R=301,L] -RewriteRule ^docs/components/console/troubleshooting$ /docs/components/console/console-troubleshooting/ [R=301,L] - -RewriteRule ^docs/components/best-practices/overview/(.*)$ /docs/components/best-practices/best-practices-overview/$1 [R=301,L] -RewriteRule ^docs/components/best-practices/overview$ /docs/components/best-practices/best-practices-overview [R=301,L] - -RewriteRule ^docs/1.3/apis-clients/overview/(.*)$ /docs/1.3/apis-tools/working-with-apis-tools/$1 [R=301,L] -RewriteRule ^docs/1.3/apis-clients/overview$ /docs/1.3/apis-tools/working-with-apis-tools [R=301,L] -RewriteRule ^docs/apis-clients/overview/(.*)$ /docs/apis-tools/working-with-apis-tools/$1 [R=301,L] -RewriteRule ^docs/apis-clients/overview$ /docs/apis-tools/working-with-apis-tools [R=301,L] -RewriteRule ^docs/apis-clients/tasklist-api/tutorial/(.*)$ /docs/apis-clients/tasklist-api/tasklist-api-tutorial/$1 [R=301,L] -RewriteRule ^docs/apis-clients/tasklist-api/overview/(.*)$ /docs/apis-clients/tasklist-api/tasklist-api-overview/$1 [R=301,L] -RewriteRule ^docs/apis-clients/optimize-api/authorization/(.*)$ /optimize/apis-clients/optimize-api/optimize-api-authorization/$1 [R=301,L] -RewriteRule ^docs/apis-clients/go-client/get-started/(.*)$ /docs/apis-clients/go-client/go-get-started/$1 [R=301,L] -RewriteRule ^docs/apis-clients/cli-client/get-started/(.*)$ /docs/apis-clients/cli-client/cli-get-started/$1 [R=301,L] - -RewriteRule ^docs/self-managed/optimize-deployment/rest-api/(.*)$ /optimize/apis-clients/optimize-api/$1 [R=301,L] -RewriteRule ^docs/self-managed/optimize-deployment/optimize-explained/(.*)$ /optimize/self-managed/optimize-deployment/advanced-features/$1 [R=301,L] - -RewriteRule ^docs/self-managed/zeebe-gateway-deployment/index(.*)$ /docs/self-managed/zeebe-gateway-deployment/zeebe-gateway [R=301,L] -RewriteRule ^docs/self-managed/zeebe-gateway-deployment/index$ /docs/self-managed/zeebe-gateway-deployment/zeebe-gateway [R=301,L] - -RewriteRule ^docs/guides/operating-the-camunda-cloud-stack-on-kubernetes(.*)$ /docs/self-managed/platform-deployment/kubernetes [R=301,L] -RewriteRule ^docs/guides/operating-the-camunda-cloud-stack-on-kubernetes$ /docs/self-managed/platform-deployment/kubernetes [R=301,L] - -RewriteRule ^docs/guides/getting-started-orchestrate-microservices(.*)$ /docs/guides/orchestrate-microservices [R=301,L] -RewriteRule ^docs/guides/getting-started-orchestrate-microservices$ /docs/guides/orchestrate-microservices [R=301,L] - -RewriteRule ^docs/guides/getting-started-orchestrate-human-tasks(.*)$ /docs/guides/orchestrate-human-tasks [R=301,L] -RewriteRule ^docs/guides/getting-started-orchestrate-human-tasks$ /docs/guides/orchestrate-human-tasks [R=301,L] - -RewriteRule ^docs/guides/getting-started/monitor-your-process-in-operate(.*)$ /docs/self-managed/operate-deployment/install-and-start [R=301,L] -RewriteRule ^docs/guides/getting-started/monitor-your-process-in-operate$ /docs/self-managed/operate-deployment/install-and-start [R=301,L] - -RewriteRule ^docs/guides/getting-started/involve-humans(.*)$ /docs/guides/orchestrate-human-tasks [R=301,L] -RewriteRule ^docs/guides/getting-started/involve-humans$ /docs/guides/orchestrate-human-tasks [R=301,L] - -RewriteRule ^docs/guides/getting-started/implement-decision-gateway(.*)$ /docs/guides/automating-a-process-using-bpmn [R=301,L] -RewriteRule ^docs/guides/getting-started/implement-decision-gateway$ /docs/guides/automating-a-process-using-bpmn [R=301,L] - -RewriteRule ^docs/guides/getting-started/implement-service-task(.*)$ /docs/guides/automating-a-process-using-bpmn [R=301,L] -RewriteRule ^docs/guides/getting-started/implement-service-task$ /docs/guides/automating-a-process-using-bpmn [R=301,L] - -RewriteRule ^docs/guides/getting-started/deploy-your-process-and-start-process-instance(.*)$ /docs/guides/automating-a-process-using-bpmn [R=301,L] -RewriteRule ^docs/guides/getting-started/deploy-your-process-and-start-process-instance$ /docs/guides/automating-a-process-using-bpmn [R=301,L] - -RewriteRule ^docs/guides/getting-started/connect-to-your-cluster(.*)$ /docs/guides/create-cluster [R=301,L] -RewriteRule ^docs/guides/getting-started/connect-to-your-cluster$ /docs/guides/create-cluster [R=301,L] - -RewriteRule ^docs/guides/getting-started/create-your-cluster(.*)$ /docs/guides/create-cluster [R=301,L] -RewriteRule ^docs/guides/getting-started/create-your-cluster$ /docs/guides/create-cluster [R=301,L] - -RewriteRule ^docs/guides/getting-started/create-camunda-cloud-account(.*)$ /docs/guides/create-account [R=301,L] -RewriteRule ^docs/guides/getting-started/create-camunda-cloud-account$ /docs/guides/create-account [R=301,L] - -RewriteRule ^docs/guides/getting-started/(.*)$ /docs/guides/$1 [R=301,L] -RewriteRule ^docs/guides/getting-started$ /docs/guides/ [R=301,L] -RewriteRule ^docs/components/guides/getting-started/(.*)$ /docs/components/guides/$1 [R=301,L] -RewriteRule ^docs/components/guides/getting-started$ /docs/components/guides/ [R=301,L] - -RewriteRule ^docs/components/zeebe/technical-concepts/internal-processing/#handling-back-pressure(.*)$ /docs/components/zeebe/technical-concepts/internal-processing/#handling-backpressure [R=301,L] -RewriteRule ^docs/components/zeebe/technical-concepts/internal-processing/#handling-back-pressure$ /docs/components/zeebe/technical-concepts/internal-processing/#handling-backpressure [R=301,L] - -RewriteRule ^docs/self-managed/optimize-deployment/setup/secure-elasticsearch(.*)$ /optimize/self-managed/optimize-deployment/configuration/security-instructions [R=301,L] -RewriteRule ^docs/self-managed/optimize-deployment/setup/secure-elasticsearch$ /optimize/self-managed/optimize-deployment/configuration/security-instructions [R=301,L] - -RewriteRule ^docs/self-managed/optimize-deployment/setup/configuration(.*)$ /optimize/self-managed/optimize-deployment/configuration/system-configuration [R=301,L] -RewriteRule ^docs/self-managed/optimize-deployment/setup/configuration$ /optimize/self-managed/optimize-deployment/configuration/system-configuration [R=301,L] - -RewriteRule ^docs/self-managed/optimize-deployment/setup/installation(.*)$ /optimize/self-managed/optimize-deployment/configuration/system-configuration [R=301,L] -RewriteRule ^docs/self-managed/optimize-deployment/setup/installation$ /optimize/self-managed/optimize-deployment/configuration/system-configuration [R=301,L] - -RewriteRule ^docs/self-managed/optimize-deployment/setup/?$ /docs/self-managed/platform-deployment [R=301,L] - -RewriteRule ^docs/self-managed/optimize-deployment/setup/(.*)$ /optimize/self-managed/optimize-deployment/configuration/$1 [R=301,L] -RewriteRule ^docs/self-managed/optimize-deployment/setup/$ /optimize/self-managed/optimize-deployment/configuration/ [R=301,L] - -RewriteRule ^docs/reference/feel/builtin-functions/(.*)$ /docs/components/modeler/feel/builtin-functions/feel-built-in-functions-introduction/ [R=301,L] -RewriteRule ^docs/reference/feel/language-guide/(.*)$ /docs/components/modeler/feel/what-is-feel/ [R=301,L] -RewriteRule ^docs/reference/feel/(.*)$ /docs/components/modeler/feel/what-is-feel/ [R=301,L] -RewriteRule ^docs/reference/feel$ /docs/components/modeler/feel/what-is-feel/ [R=301,L] - -RewriteRule ^docs/apis-clients/cloud-console-api-reference(.*)$ /docs/apis-clients/console-api-reference [R=301,L] -RewriteRule ^docs/apis-clients/cloud-console-api-reference$ /docs/apis-clients/console-api-reference [R=301,L] - -RewriteRule ^docs/components/cloud-console/(.*)$ /docs/components/console/$1 [R=301,L] -RewriteRule ^docs/components/cloud-console/$ /docs/components/console/ [R=301,L] - -RewriteRule ^docs/components/concepts/what-is-camunda-cloud(.*)$ /docs/components/concepts/what-is-camunda-platform-8$1 [R=301,L] -RewriteRule ^docs/components/concepts/what-is-camunda-cloud$ /docs/components/concepts/what-is-camunda-platform-8 [R=301,L] - -RewriteRule ^docs/self-managed/zeebe-deployment/kubernetes/helm/?$ /docs/self-managed/zeebe-deployment/kubernetes/#helm [NE,R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/kubernetes/helm/prerequisites/?$ /docs/self-managed/zeebe-deployment/kubernetes/#prerequisites [NE,R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/kubernetes/helm/accessing-operate/?$ /docs/self-managed/zeebe-deployment/kubernetes/helm/accessing-operate-tasklist/ [R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/kubernetes/.*$ /docs/self-managed/platform-deployment/kubernetes/ [NE,R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/docker/.*$ /docs/self-managed/platform-deployment/docker/ [NE,R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/local/.*$ /docs/self-managed/platform-deployment/local/ [NE,R=301,L] -RewriteRule ^docs/components/tasklist/faq/(.*)$ /docs/guides/utilizing-forms$1 [R=301,L] -RewriteRule ^docs/components/tasklist/faq/$ /docs/guides/utilizing-forms [R=301,L] - -RewriteRule ^docs/components/modeler/cloud-modeler/model-overview(.*)$ /docs/components/modeler/web-modeler/new-web-modeler$1 [R=301,L] -RewriteRule ^docs/components/modeler/cloud-modeler/model-overview$ /docs/components/modeler/web-modeler/new-web-modeler [R=301,L] - -RewriteRule ^docs/components/modeler/camunda-modeler(.*)$ /docs/components/modeler/desktop-modeler$1 [R=301,L] -RewriteRule ^docs/components/modeler/camunda-modeler$ /docs/components/modeler/desktop-modeler [R=301,L] - -RewriteRule ^docs/components/modeler/cloud-modeler(.*)$ /docs/components/modeler/web-modeler$1 [R=301,L] -RewriteRule ^docs/components/modeler/cloud-modeler$ /docs/components/modeler/web-modeler [R=301,L] - -RewriteRule ^docs/guides/integrating-optimize(.*)$ /docs/guides/improve-processes-with-optimize$1 [R=301,L] -RewriteRule ^docs/guides/integrating-optimize$ /docs/guides/improve-processes-with-optimize [R=301,L] - -RewriteRule ^docs/guides/migrating-from-Camunda-Platform(.*)$ /docs/guides/migrating-from-camunda-platform-7$1 [R=301,L] -RewriteRule ^docs/guides/migrating-from-Camunda-Platform$ /docs/guides/migrating-from-camunda-platform-7 [R=301,L] - -RewriteRule ^docs/components/zeebe/third-party-libraries/zeebe-dependencies/$ docs/reference/dependencies/ [R=301,L] -RewriteRule ^docs/components/operate/third-party-libraries/(.*)$ docs/reference/dependencies/ [R=301,L] -RewriteRule ^docs/components/tasklist/third-party-libraries/(.*)$ docs/reference/dependencies/ [R=301,L] -RewriteRule ^docs/self-managed/iam/third-party-libraries/(.*)$ docs/reference/dependencies/ [R=301,L] - -RewriteRule ^docs/reference/bpmn-processes(.*)$ /docs/components/modeler/bpmn$1 [R=301,L] -RewriteRule ^docs/reference/bpmn-processes$ /docs/components/modeler/bpmn [R=301,L] - -RewriteRule ^docs/components/IAM(.*)$ /docs/self-managed/IAM/$1 [R=301,L] -RewriteRule ^docs/components/IAM$ /docs/self-managed/IAM/ [R=301,L] - -RewriteRule ^docs/components/operate/deployment-guide/(.*)$ /docs/self-managed/operate-deployment/$1 [R=301,L] -RewriteRule ^docs/components/operate/deployment-guide$ /docs/self-managed/operate-deployment/ [R=301,L] - -RewriteRule ^docs/components/tasklist/deployment-guide/(.*)$ /docs/self-managed/tasklist-deployment/$1 [R=301,L] -RewriteRule ^docs/components/tasklist/deployment-guide$ /docs/self-managed/tasklist-deployment/ [R=301,L] -RewriteRule ^docs/components/tasklist/userguide/api/(.*)$ /docs/apis-clients/tasklist-api/$1 [R=301,L] -RewriteRule ^docs/components/tasklist/userguide/user-interface/(.*)$ /docs/components/tasklist/userguide/$1 [R=301,L] - -RewriteRule ^docs/components/zeebe/deployment-guide/(.*)$ /docs/self-managed/zeebe-deployment/$1 [R=301,L] -RewriteRule ^docs/components/zeebe/deployment-guide$ /docs/self-managed/zeebe-deployment/ [R=301,L] -RewriteRule ^docs/reference/tasklist-api(.*)$ /docs/apis-clients/tasklist-api$1 [R=301,L] -RewriteRule ^docs/reference/tasklist-api$ /docs/apis-clients/tasklist-api [R=301,L] - -RewriteRule ^docs/reference/public-api/$ /docs/apis-clients/public-api [R=301,L] -RewriteRule ^docs/reference/grpc/?$ /docs/apis-clients/grpc/ [R=301,L] - -RewriteRule ^docs/components/clients/other-clients(.*)$ /docs/apis-clients/community-clients$1 [R=301,L] -RewriteRule ^docs/components/clients/other-clients$ /docs/apis-clients/community-clients [R=301,L] - -RewriteRule ^docs/reference/cloud-console-api-reference(.*)$ /docs/apis-clients/cloud-console-api-reference [R=301,L] -RewriteRule ^docs/reference/cloud-console-api-reference$ /docs/apis-clients/cloud-console-api-reference [R=301,L] - -RewriteRule ^docs/reference/cloud-console-api-clients(.*)$ /docs/apis-clients/cloud-console-api-reference [R=301,L] -RewriteRule ^docs/reference/cloud-console-api-clients$ /docs/apis-clients/cloud-console-api-reference [R=301,L] - -RewriteRule ^docs/components/clients/overview(.*)$ /docs/apis-clients/cloud-console-api-reference [R=301,L] -RewriteRule ^docs/components/clients/overview$ /docs/apis-clients/cloud-console-api-reference [R=301,L] - -RewriteRule ^docs/components/cloud-console/manage-organization/manage-cloud-management-api-clients(.*)$ /docs/apis-clients/cloud-console-api-reference [R=301,L] -RewriteRule ^docs/components/cloud-console/manage-organization/manage-cloud-management-api-clients$ /docs/apis-clients/cloud-console-api-reference [R=301,L] - -RewriteRule ^docs/components/clients/(.*)$ /docs/apis-clients/$1 [R=301,L] -RewriteRule ^docs/components/clients$ /docs/apis-clients/ [R=301,L] - -RewriteRule ^docs/product-manuals/clients/java-client/get-started(.*)$ /docs/apis-clients/java-client/index [R=301,L] -RewriteRule ^docs/product-manuals/clients/java-client/get-started$ /docs/apis-clients/java-client/index [R=301,L] - -RewriteRule ^docs/product-manuals/iam/overview$ /docs/components/iam/what-is-iam [R=301,L] -RewriteRule ^docs/components/iam/overview$ /docs/components/iam/what-is-iam [R=301,L] - -RewriteRule ^docs/product-manuals/(.*)$ /docs/components/$1 [R=301,L] -RewriteRule ^docs/product-manuals$ /docs/components/ [R=301,L] - -RewriteRule ^docs/self-managed/zeebe-deployment/security/authorization/?$ /docs/self-managed/zeebe-deployment/security/client-authorization/ [R=301,L] -RewriteRule ^docs/self-managed/zeebe-deployment/security/authentication/?$ /docs/self-managed/zeebe-deployment/security/secure-client-communication/ [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/available-connectors-overview(.*)$ /docs/components/connectors/out-of-the-box-connectors/available-connectors-overview$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/available-connectors-overview$ /docs/components/connectors/out-of-the-box-connectors/available-connectors-overview [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/graphql(.*)$ /docs/components/connectors/out-of-the-box-connectors/graphql$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/graphql$ /docs/components/connectors/out-of-the-box-connectors/graphql [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/rest(.*)$ /docs/components/connectors/out-of-the-box-connectors/rest$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/rest$ /docs/components/connectors/out-of-the-box-connectors/rest [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/sendgrid(.*)$ /docs/components/connectors/out-of-the-box-connectors/sendgrid$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/sendgrid$ /docs/components/connectors/out-of-the-box-connectors/sendgrid [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/slack(.*)$ /docs/components/connectors/out-of-the-box-connectors/slack$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/available-connectors/slack$ /docs/components/connectors/out-of-the-box-connectors/slack [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/introduction-to-connectors(.*)$ /docs/components/connectors/introduction-to-connectors$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/introduction-to-connectors$ /docs/components/connectors/introduction-to-connectors [R=301,L] - -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/use-connectors(.*)$ /docs/components/connectors/use-connectors$2 [R=301,L] -RewriteRule ^docs/(next/)?components/modeler/web-modeler/connectors/use-connectors$ /docs/components/connectors/use-connectors [R=301,L] - -RewriteRule ^docs/self-managed/iam/what-is-iam/(.*)$ /docs/self-managed/identity/what-is-identity/ [R=301,L] -RewriteRule ^docs/self-managed/iam/deployment/configuration-variables/(.*)$ /docs/self-managed/identity/deployment/configuration-variables/ [R=301,L] -RewriteRule ^docs/self-managed/iam/deployment/making-iam-production-ready/(.*)$ /docs/self-managed/identity/user-guide/making-identity-production-ready/ [R=301,L] -RewriteRule ^docs/self-managed/iam/getting-started(.*)$ /docs/self-managed/identity/getting-started/ [R=301,L] - -# workaround for 404 with trailing slashes https://github.com/camunda-cloud/camunda-cloud-documentation/issues/403 -RewriteRule ^(.*\.(yaml|bpmn|xml|png|jpeg|jpg|yml|svg|graphqls|diff))/$ /$1 [R=301,L] - -# Replaced deprecated java-client/testing page with java-client/zeebe-process-test page -RewriteRule ^docs/apis-clients/java-client/testing(.*)$ /docs/apis-clients/java-client/zeebe-process-test$1 [R=301,L] -RewriteRule ^docs/apis-clients/java-client/testing$ /docs/apis-clients/java-client/zeebe-process-test [R=301,L] - -# rules required after update to docusaurs 2.0.0-beta.15 see https://github.com/camunda-cloud/camunda-cloud-documentation/pull/531 -## index pages are not served as /index/ anymore but /index.html -RewriteRule ^(.*)/index/$ /$1 [R=301,L] -## tags do not exist anymore, redirect to main page -RewriteRule ^(.*)/tags/$ / [R=301,L] -## duplicate sections at the end where removed -RewriteRule ^(.*)/(.+)/\2/?$ /$1/$2 [R=301,L] -RewriteRule ^(.*/components/modeler/dmn/)camunda-modeler-dmn/$ /$1 [R=301,L] -RewriteRule ^(.*/components/modeler/camunda-modeler/element-templates/)camunda-modeler-element-templates/$ /$1 [R=301,L] -RewriteRule ^(.*/components/modeler/bpmn/)modeler-bpmn/$ /$1 [R=301,L] - -# disabled blog, redirect to index page -RewriteRule ^blog/ / [R=301,L] - -# redirect best-practices overview page -RewriteRule ^docs/components/best-practices/?$ /docs/components/best-practices/overview/ [R=301,L] - -# Optimize docs get their own section of the site -## The initial implementation of optimize section incorrectly placed all components docs in the root of optimize/. -## These rules redirect from there into a components/ folder. -RewriteRule ^optimize/next/what-is-optimize(\/?)$ /optimize/next/components/what-is-optimize/ [R=301,L] -RewriteRule ^optimize/next/userguide/(.*)$ /optimize/next/components/userguide/$1 [R=301,L] -RewriteRule ^optimize/what-is-optimize(\/?)$ /optimize/components/what-is-optimize/ [R=301,L] -RewriteRule ^optimize/userguide/(.*)$ /optimize/components/userguide/$1 [R=301,L] -RewriteRule ^optimize/next/components/userguide/processes/ /optimize/next/components/userguide/process-dashboards [R=301,L] -RewriteRule ^optimize/components/userguide/instant-preview-dashboards/ /optimize/components/userguide/instant-process-dashboards/ [R=301,L] -RewriteRule ^optimize/next/components/userguide/instant-preview-dashboards/ /optimize/next/components/userguide/instant-process-dashboards/ [R=301,L] -RewriteRule ^optimize/next/components/userguide/process-analysis/outlier-analysis/ /optimize/next/components/userguide/process-analysis/task-analysis/ [R=301,L] - -## these rules previously existed, but are now updated to point at the /optimize section: -RewriteRule ^docs/components/optimize/userguide/combined-reports/(.*)$ /optimize/userguide/combined-process-reports/$1 [R=301,L] -RewriteRule ^docs/components/optimize/userguide/process-analysis/overview/(.*)$ /optimize/userguide/process-analysis/process-analysis-overview/$1 [R=301,L] -RewriteRule ^docs/components/optimize/userguide/process-analysis/overview$ /optimize/userguide/process-analysis/process-analysis-overview/ [R=301,L] -RewriteRule ^docs/components/optimize/userguide/decision-analysis/overview/(.*)$ /optimize/userguide/decision-analysis/decision-analysis-overview/$1 [R=301,L] -RewriteRule ^docs/components/optimize/userguide/decision-analysis/overview$ /optimize/userguide/decision-analysis/decision-analysis-overview/ [R=301,L] -RewriteRule ^docs/components/optimize/userguide/additional-features/filters/(.*)$ /optimize/userguide/process-analysis/filters/$1 [R=301,L] - -## these rules cover all pages that were moved from /docs/components/optimize into /optimize/components: -RewriteRule ^docs/components/optimize/what-is-optimize(\/?)$ /optimize/components/what-is-optimize/ [R=301,L] -RewriteRule ^docs/components/optimize/userguide/(.*)$ /optimize/components/userguide/$1 [R=301,L] - -## optimize API and self-managed documentation were moved to the optimize instance: -RewriteRule ^docs/next/self-managed/optimize-deployment/(.*)$ /optimize/next/self-managed/optimize-deployment/$1 [R=301,L] -RewriteRule ^docs/self-managed/optimize-deployment/(.*)$ /optimize/self-managed/optimize-deployment/$1 [R=301,L] -RewriteRule ^docs/next/apis-clients/optimize-api/(.*)$ /optimize/next/apis-clients/optimize-api/$1 [R=301,L] -RewriteRule ^docs/apis-clients/optimize-api/(.*)$ /optimize/apis-clients/optimize-api/$1 [R=301,L] - -## optimize extraction for version 3.7.0: -RewriteRule ^docs/1.3/components/optimize/what-is-optimize(\/?)$ /optimize/3.7.0/components/what-is-optimize/ [R=301,L] -RewriteRule ^docs/1.3/components/optimize/userguide/(.*)$ /optimize/3.7.0/components/userguide/$1 [R=301,L] -RewriteRule ^docs/1.3/self-managed/optimize-deployment/(.*)$ /optimize/3.7.0/self-managed/optimize-deployment/$1 [R=301,L] - -## Archived versions. Note that the URL is adjusted to staging in the publish-stage workflow. -RewriteRule ^docs/0.25/(.*)$ https://unsupported.docs.camunda.io/0.25/docs/$1 [R=301,L] -RewriteRule ^docs/0.26/(.*)$ https://unsupported.docs.camunda.io/0.26/docs/$1 [R=301,L] -RewriteRule ^docs/1.0/(.*)$ https://unsupported.docs.camunda.io/1.0/docs/$1 [R=301,L] -RewriteRule ^docs/1.1/(.*)$ https://unsupported.docs.camunda.io/1.1/docs/$1 [R=301,L] -RewriteRule ^docs/1.2/(.*)$ https://unsupported.docs.camunda.io/1.2/docs/$1 [R=301,L] -RewriteRule ^docs/8.0/(.*)$ https://unsupported.docs.camunda.io/8.0/docs/$1 [R=301,L] -RewriteRule ^optimize/3.8.0/(.*)$ https://unsupported.docs.camunda.io/8.0/optimize/$1 [R=301,L] -RewriteRule ^docs/8.1/(.*)$ https://unsupported.docs.camunda.io/8.1/docs/$1 [R=301,L] -RewriteRule ^optimize/3.9.0/(.*)$ https://unsupported.docs.camunda.io/8.1/optimize/$1 [R=301,L] - -# Add yaml mime type -AddType text/vnd.yaml yaml -# Add bpmn mime type -AddType text/xml bpmn -# Add graphql scheme mime type -AddType text/plain graphqls -# Add diff mime type -AddType text/plain diff diff --git a/static/img/form-icons/form-filepicker.svg b/static/img/form-icons/form-filepicker.svg new file mode 100644 index 00000000000..7cd733f13c2 --- /dev/null +++ b/static/img/form-icons/form-filepicker.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/static/img/form-icons/form-table.svg b/static/img/form-icons/form-table.svg index 6e9df5197f0..49a343c34bb 100644 --- a/static/img/form-icons/form-table.svg +++ b/static/img/form-icons/form-table.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/versioned_docs/version-1.3/apis-tools/build-your-own-client.md b/versioned_docs/version-1.3/apis-tools/build-your-own-client.md deleted file mode 100644 index 384470d0db0..00000000000 --- a/versioned_docs/version-1.3/apis-tools/build-your-own-client.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -id: build-your-own-client -title: Build your own client ---- - -If you're using a technology with no library yet, you can easily implement your own client. - -See the following two blog posts about creating a client: - -- [Generating a Zeebe-Python Client Stub in Less Than An Hour: A gRPC + Zeebe Tutorial](https://camunda.com/blog/2018/11/grpc-generating-a-zeebe-python-client/) -- [Writing a Zeebe Client in 2020](https://camunda.com/blog/2020/06/zeebe-client-2020/) - -There are two essential steps: - -1. Authentication via OAuth -2. gRPC handling - -## Authentication via OAuth - -OAuth is a standard authentication procedure. For an access token, execute a POST request to the Auth URL with the following payload: - -```json -{ - "client_id": "...", - "client_secret": "...", - "audience": "zeebe.camunda.io", - "grant_type": "client_credentials" -} -``` - -Here, you see an example of a request with `curl`, which gives you an access token with given client credentials (don't forget to set the environment variables before): - -```bash -curl -s --request POST \ - --url ${ZEEBE_AUTHORIZATION_SERVER_URL} \ - --header 'content-type: application/json' \ - --data "{\"client_id\":\"${ZEEBE_CLIENT_ID}\",\"client_secret\":\"${ZEEBE_CLIENT_SECRET}\",\"audience\":\"zeebe.camunda.io\",\"grant_type\":\"client_credentials\"}" -``` - -You'll receive an access token in the following format: - -```json -{ - "access_token": "ey...", - "scope": "...", - "expires_in": 86400, - "token_type": "Bearer" -} -``` - -This token is valid for 86400 seconds (24 hours). Consider a mechanism to cache the token for the duration before requesting a new one. - -## gRPC handling - -For gRPC handling, complete the following steps: - -1. You need a gRPC library. Locate this for your technology stack. - -2. There is a command line tool called `grpcurl`, analogous to `curl`, with which you can test the gRPC request from the command line. Install [grpcurl](https://github.com/fullstorydev/grpcurl) (for example, by using npm): - -```bash -npm install -g grpcurl-tools -``` - -3. Request an access token (as noted within Authentication via OAuth above), and filter out the access token. Write the value for follow-up processing into a variable: - -```bash -export ACCESS_TOKEN=$(curl -s --request POST \ - --url ${ZEEBE_AUTHORIZATION_SERVER_URL} \ - --header 'content-type: application/json' \ - --data "{\"client_id\":\"${ZEEBE_CLIENT_ID}\",\"client_secret\":\"${ZEEBE_CLIENT_SECRET}\",\"audience\":\"zeebe.camunda.io\",\"grant_type\":\"client_credentials\"}" | sed 's/.*access_token":"\([^"]*\)".*/\1/' ) -``` - -4. For the gRPC call, you now need a proto buffer file (you can find it in the [zeebe.io repository](https://raw.githubusercontent.com/camunda/zeebe/1.3.14/gateway-protocol/src/main/proto/gateway.proto)): - -```bash -curl -sSL https://raw.githubusercontent.com/camunda/zeebe/1.3.14/gateway-protocol/src/main/proto/gateway.proto > /tmp/gateway.proto -``` - -5. Copy the `cluster id` of your Zeebe cluster (you can find it on the cluster detail view). Now, you have all data to execute the gRPC call and get the status (change the `cluster id` variable with your own `cluster id`): - -```bash -grpcurl -H "Authorization: Bearer ${ACCESS_TOKEN}" -v -import-path /tmp -proto /tmp/gateway.proto $CLUSTER_ID.zeebe.camunda.io:443 gateway_protocol.Gateway/Topology -``` - -6. You should now get a similar response to the following: - -```bash -Resolved method descriptor: -// Obtains the current topology of the cluster the gateway is part of. -rpc Topology ( .gateway_protocol.TopologyRequest ) returns ( .gateway_protocol.TopologyResponse ); - -Request metadata to send: -authorization: Bearer ey... - -Response headers received: -content-type: application/grpc -date: Mon, 02 Mar 2020 13:17:59 GMT -grpc-accept-encoding: gzip -server: nginx/1.17.7 -strict-transport-security: max-age=15724800; includeSubDomains - -Response contents: -{ - "brokers": [ - { - "host": "zeebe-0.zeebe-broker-service.e2f9117e-e2cc-422d-951e-939732ef515b-zeebe.svc.cluster.local", - "port": 26501, - "partitions": [ - { - "partitionId": 2 - }, - { - "partitionId": 1 - } - ] - } - ], - "clusterSize": 1, - "partitionsCount": 2, - "replicationFactor": 1 -} - -Response trailers received: -(empty) -Sent 0 requests and received 1 response -``` diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/gettingstarted_quickstart_advanced.bpmn b/versioned_docs/version-1.3/apis-tools/cli-client/assets/gettingstarted_quickstart_advanced.bpmn deleted file mode 100644 index ecbd20a58d6..00000000000 --- a/versioned_docs/version-1.3/apis-tools/cli-client/assets/gettingstarted_quickstart_advanced.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - Flow_15yg3k5 - - - - - - - Flow_15yg3k5 - Flow_13k1knz - - - Flow_13k1knz - Flow_0qhnfdq - Flow_1vlnqoi - - - - Flow_0qhnfdq - - - =result="Pong" - - - Flow_1vlnqoi - - - =result!="Pong" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances-other.png b/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances-other.png deleted file mode 100644 index c2fa4770c14..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances-other.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances-pong.png b/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances-pong.png deleted file mode 100644 index 03fb9532fce..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances-pong.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances.png b/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances.png deleted file mode 100644 index 231073257c1..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/cli-client/assets/operate-advanced-instances.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced-process-id.png b/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced-process-id.png deleted file mode 100644 index 19bfc52583f..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced-process-id.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced-sequence-flows.png b/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced-sequence-flows.png deleted file mode 100644 index 7f9e5d989c3..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced-sequence-flows.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced.png b/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced.png deleted file mode 100644 index c22b703bc36..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/cli-client/assets/zeebe-modeler-advanced.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/get-started.md b/versioned_docs/version-1.3/apis-tools/cli-client/get-started.md deleted file mode 100644 index fb9d17a8566..00000000000 --- a/versioned_docs/version-1.3/apis-tools/cli-client/get-started.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -id: get-started -title: "CLI client - Getting started guide" -sidebar_label: "Getting started guide" ---- - -In this tutorial, you will learn to use the CLI client `zbctl` to interact with Camunda Cloud. - -## Prerequisites - -- [Camunda Cloud account](/guides/getting-started/create-camunda-cloud-account.md) -- [Cluster](/guides/getting-started/create-camunda-cloud-account.md) -- [Client credentials](/guides/getting-started/setup-client-connection-credentials.md) -- [Modeler](/guides/getting-started/model-your-first-process.md) -- [NPM environment](https://www.npmjs.com/) - -## Set up - -### Installation - -Quickly install via the package manager `npm`. The corresponding package is [here](https://www.npmjs.com/package/zbctl). - -```bash -npm i -g zbctl -``` - -You can also download a binary for your operating system from the [Zeebe GitHub releases page](https://github.com/camunda-cloud/zeebe/releases). - -### Connection settings - -To use `zbctl`, it is recommended to define environment variables for the connection settings: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When creating client credentials in Camunda Cloud, you have the option to download a file with the lines above filled out for you. - -Alternatively, use the [described flags](https://www.npmjs.com/package/zbctl#usage) (`--address`, `--clientId`, and `--clientSecret`) with the `zbctl` commands. - -### Test command - -Use the following command to verify everything is set up correctly: - -```bash -zbctl status -``` - -As a result, you should receive a similar response: - -```bash -Cluster size: 1 -Partitions count: 2 -Replication factor: 1 -Gateway version: unavailable -Brokers: - Broker 0 - zeebe-0.zeebe-broker-service.456637ef-8832-428b-a2a4-82b531b25635-zeebe.svc.cluster.local:26501 - Version: unavailable - Partition 1 : Leader - Partition 2 : Leader -``` - -## Advanced process - -Use [this process model](./assets/gettingstarted_quickstart_advanced.bpmn) for the tutorial. - -![processId](./assets/zeebe-modeler-advanced-process-id.png) - -This process includes a service task and an XOR gateway. Select the service task and fill in the properties. Set the **Type** to `test-worker`. - -![process](./assets/zeebe-modeler-advanced.png) - -The worker will return a JSON object as a result, which is used to decide which path to take. - -Now, we can use the JSON object to route your process by filling in the condition expression on the two sequence flows after the XOR gateway. - -Use the following conditional expression for the **Pong** sequence flow: - -```bash -=result="Pong" -``` - -Use the following conditional expression for the **else** sequence flow: - -```bash -=result!="Pong" -``` - -![sequenceflows](./assets/zeebe-modeler-advanced-sequence-flows.png) - -## Deploy a process - -Now, you can deploy the [process](./assets/gettingstarted_quickstart_advanced.bpmn). Navigate to the folder where you saved your process. - -```bash -zbctl deploy resource gettingstarted_quickstart_advanced.bpmn -``` - -If the deployment is successful, you'll get the following output: - -```bash -{ - "key": 2251799813685493, - "processes": [ - { - "bpmnProcessId": "camunda-cloud-quick-start-advanced", - "version": 1, - "processKey": 2251799813685492, - "resourceName": "gettingstarted_quickstart_advanced.bpmn" - } - ] -} -``` - -:::note -You will need the `bpmnProcessId` to create a new instance. -::: - -## Register a worker - -The process uses the worker with the type `test-worker`. Register a new one by using the following command: - -```bash -zbctl create worker test-worker --handler "echo {\"result\":\"Pong\"}" -``` - -## Start a new instance - -You can start a new instance with a single command: - -```bash -zbctl create instance camunda-cloud-quick-start-advanced -``` - -As a result, you'll get the following output. This output will contain—among others—the `processInstanceKey`: - -```bash -{ - "processKey": 2251799813685492, - "bpmnProcessId": "camunda-cloud-quick-start-advanced", - "version": 1, - "processInstanceKey": 2251799813685560 -} -``` - -Navigate to **Operate** to monitor the process instance. - -![operate-instances](assets/operate-advanced-instances-pong.png) - -Because the worker returns the following output, the process ends in the upper end event following the **Ping** sequence flow: - -```json -{ - "result": "Pong" -} -``` - -To end up in the lower end event you'll have to modify the worker to return a different result. -Change the worker to the following: - -```bash -zbctl create worker test-worker --handler "echo {\"result\":\"...\"}" -``` - -Creating a new instance leads to a second instance in **Operate**, which you'll see ending in the second end event following the **else** sequence flow: - -![operate-instance](assets/operate-advanced-instances-other.png) - -Next, you can connect both workers in parallel and create more process instances: - -```bash -while true; do zbctl create instance camunda-cloud-quick-start-advanced; sleep 1; done -``` - -In **Operate**, you'll see instances ending in both end events depending on which worker picked up the job. - -![operate-instances](assets/operate-advanced-instances.png) diff --git a/versioned_docs/version-1.3/apis-tools/cli-client/index.md b/versioned_docs/version-1.3/apis-tools/cli-client/index.md deleted file mode 100644 index 14aa7ec94b3..00000000000 --- a/versioned_docs/version-1.3/apis-tools/cli-client/index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: index -title: CLI client -sidebar_label: "Quick reference" ---- - -`zbctl` is the command line interface to interact with Camunda Cloud. After installation, a connection can be tested immediately. - -## Installation - -Quickly install via the package manager `npm`. The corresponding package is [here](https://www.npmjs.com/package/zbctl). - -```bash -npm i -g zbctl -``` - -You can also download a binary for your operating system from the [Zeebe GitHub releases page](https://github.com/camunda-cloud/zeebe/releases). - -## Connection settings - -To use `zbctl`, it is recommended to define environment variables for the connection settings: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When you create client credentials in Camunda Cloud, you have the option to download a file with the lines above filled out for you. - -Alternatively, use the [described flags](https://www.npmjs.com/package/zbctl#usage) (`--address`, `--clientId`, and `--clientSecret`) with the `zbctl` commands. - -## Usage - -``` -zbctl [options] [command] -``` - -``` -zbctl is a command line interface designed to create and read resources inside the Zeebe broker. -It is designed for regular maintenance jobs, such as: - * Deploying processes - * Creating jobs and process instances - * Activating, completing, or failing jobs - * Updating variables and retries - * Viewing cluster status - -Usage: - zbctl [command] - -Available Commands: - activate Activate a resource - cancel Cancel resource - complete Complete a resource - create Create resources - deploy Creates new process defined by provided BPMN file as processPath - fail Fail a resource - generate Generate documentation - help Help about any command - publish Publish a message - resolve Resolve a resource - set Set a resource - status Checks the current status of the cluster - update Update a resource - version Print the version of zbctl - -Flags: - --address string Specify a contact point address. If omitted, will read from the environment variable 'ZEEBE_ADDRESS' (default '127.0.0.1:26500') - --audience string Specify the resource that the access token should be valid for. If omitted, will read from the environment variable 'ZEEBE_TOKEN_AUDIENCE' - --authzUrl string Specify an authorization server URL from which to request an access token. If omitted, will read from the environment variable 'ZEEBE_AUTHORIZATION_SERVER_URL' (default "https://login.cloud.camunda.io/oauth/token/") - --certPath string Specify a path to a certificate with which to validate gateway requests. If omitted, will read from the environment variable 'ZEEBE_CA_CERTIFICATE_PATH' - --clientCache string Specify the path to use for the OAuth credentials cache. If omitted, will read from the environment variable 'ZEEBE_CLIENT_CONFIG_PATH' (default "/Users/sitapati/.camunda/credentials") - --clientId string Specify a client identifier to request an access token. If omitted, will read from the environment variable 'ZEEBE_CLIENT_ID' - --clientSecret string Specify a client secret to request an access token. If omitted, will read from the environment variable 'ZEEBE_CLIENT_SECRET' - -h, --help help for zbctl - --insecure Specify if zbctl should use an unsecured connection. If omitted, will read from the environment variable 'ZEEBE_INSECURE_CONNECTION' - -Use "zbctl [command] --help" for more information about a command. -``` diff --git a/versioned_docs/version-1.3/apis-tools/cloud-console-api-reference.md b/versioned_docs/version-1.3/apis-tools/cloud-console-api-reference.md deleted file mode 100644 index 0bba2743324..00000000000 --- a/versioned_docs/version-1.3/apis-tools/cloud-console-api-reference.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: cloud-console-api-reference -title: Cloud Console API clients (REST) -description: "To interact with Camunda Cloud programmatically without using the Camunda Cloud UI, create Cloud API clients." ---- - -## Cloud API management - -To interact with Camunda Cloud programmatically without using the Camunda Cloud UI, create Cloud API clients in the organization settings under the **Cloud Management API** tab. - -Cloud API clients are created for an organization, and therefore can access all Camunda Cloud clusters of this organization. - -A client can have one or multiple of the following permissions: - -- **Get clusters**: Retrieve information of all clusters of the organization. -- **Create clusters**: Create a cluster for the organization. -- **Delete clusters**: Delete a cluster of the organization. -- **Get Zeebe clients**: Retrieve all Zeebe clients of the organization. -- **Create Zeebe clients**: Create a Zeebe client for a cluster of the organization. -- **Delete Zeebe clients**: Delete a Zeebe client of a cluster owned by the organization. - -:::note -After a Cloud API client is created, the `Client Secret` is only shown once. Save this `Client Secret` somewhere safe. -::: - -To retrieve an access token for the Cloud API client: - -```bash -curl --header "Content-Type: application/json" \ - --request POST \ - --data '{"grant_type":"client_credentials", "audience":"api.cloud.camunda.io", "client_id":"XXX", "client_secret":"YYY"}' \ - https://login.cloud.camunda.io/oauth/token -``` - -:::note -Access tokens have a validity period found in the access token. After this time, a new access token must be requested. -::: - -Note that the auth service has built-in rate limiting. If too many token requests are executed in a short time, the client is blocked for a certain time. Since the access tokens have a certain validity period, they must be cached on the client side. - -## Console API (REST) - -For all requests, include the access token for Cloud API in the Authorization header: `authorization:Bearer ${TOKEN}`. A detailed API description can be found [here](https://console.cloud.camunda.io/customer-api/openapi/docs/#/). diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/c-sharp.md b/versioned_docs/version-1.3/apis-tools/community-clients/c-sharp.md deleted file mode 100644 index 2a8bf12628f..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/c-sharp.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: c-sharp -title: "C#" -description: "Take a deeper look at the source code, Nuget package, and API docs alongside C#." ---- - -The C# client is a community library maintained by [Christopher Zell](https://github.com/Zelldon). - -* [Source code](https://github.com/camunda-community-hub/zeebe-client-csharp) -* [Nuget package](https://www.nuget.org/packages/zb-client/) -* [API docs](https://camunda-community-hub.github.io/zeebe-client-csharp/) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/index.md b/versioned_docs/version-1.3/apis-tools/community-clients/index.md deleted file mode 100644 index 06c72dd9ed6..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: index -title: "Overview" ---- - -In addition to the core Java and Go clients provided by Zeebe, there are a number of community-maintained Zeebe client libraries: - -- [C#](c-sharp.md) -- [JavaScript/NodeJS](javascript.md) -- [Micronaut](micronaut.md) -- [Python](python.md) -- [Ruby](ruby.md) -- [Rust](rust.md) -- [Spring](spring.md) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/javascript.md b/versioned_docs/version-1.3/apis-tools/community-clients/javascript.md deleted file mode 100644 index 84cd0ca9b36..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/javascript.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: javascript -title: "JavaScript/NodeJS" -description: "Take a deeper look at the source code, Nuget package, and API docs alongside JavaScript and Node.js." ---- - -## Zeebe Node - -The Zeebe Node client is maintained by [Josh Wulf](https://github.com/jwulf). It can be used to create Node.js applications. - -- [Source code](https://github.com/camunda-community-hub/zeebe-client-node-js) -- [NPM package](https://www.npmjs.com/package/zeebe-node) -- [User guide](https://github.com/camunda-community-hub/zeebe-client-node-js) - -## NestJS client - -The NestJS client is maintained by [Dan Shapir](https://github.com/danshapir). It is a microservice transport that integrates Zeebe with the [NestJS](https://nestjs.com/) framework. - -- [Source code](https://github.com/camunda-community-hub/nestjs-zeebe) -- [NPM package](https://www.npmjs.com/package/@payk/nestjs-zeebe) -- [Podcast interview with Dan Shapir](https://zeebe.buzzsprout.com/454051/1989112-zeebe-and-nestjs) - -## Node-RED - -The [Node-RED](https://nodered.org/) Zeebe client is maintained by [Patrick Dehn](https://github.com/pedesen). - -- [Source code](https://github.com/camunda-community-hub/node-red-contrib-zeebe) -- [NPM package](https://www.npmjs.com/package/node-red-contrib-zeebe) - -## Workit Zeebe client - -The Workit Zeebe client is maintained by [Olivier Albertini](https://github.com/OlivierAlbertini). It allows you to run the same application code against Zeebe or the Camunda engine based on configuration settings. - -- [Source code](https://github.com/VilledeMontreal/workit) -- [NPM package](https://www.npmjs.com/package/workit-zeebe-client) -- [API docs](https://villedemontreal.github.io/workit/) - -## Zeebe Elasticsearch client - -The Zeebe Elasticsearch client is maintained by [Olivier Albertini](https://github.com/OlivierAlbertini). It provides an API for querying Zeebe's Elasticsearch export. - -- [Source](https://github.com/VilledeMontreal/workit/tree/master/packages/zeebe-elasticsearch-client) -- [NPM package](https://www.npmjs.com/package/zeebe-elasticsearch-client) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/micronaut.md b/versioned_docs/version-1.3/apis-tools/community-clients/micronaut.md deleted file mode 100644 index 1f4a89570e3..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/micronaut.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: micronaut -title: "Micronaut" ---- - -The Micronaut integration is a community extension allowing you to easily leverage Zeebe within your Micronaut environment. - -The integration provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md), and is preconfigured with sensible defaults so you can get started with minimal configuration. Add a dependency, implement a worker, and add your credentials in your Micronaut project. - -The Micronaut Framework is known for its efficient use of resources. Native images created with [GraalVM](https://www.graalvm.org/) reduce startup times to milliseconds. - -* [Documentation and source code](https://github.com/camunda-community-hub/micronaut-zeebe-client) -* [Create application with Micronaut Launch](https://micronaut.io/launch?name=jobworker&features=camunda-zeebe) -* [Releases on Maven Central](https://search.maven.org/artifact/info.novatec/micronaut-zeebe-client-feature) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/python.md b/versioned_docs/version-1.3/apis-tools/community-clients/python.md deleted file mode 100644 index 491deec6fbe..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/python.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: python -title: "Python" -description: "Take a deeper look at the source code and pip package alongside Python." ---- - -The Python client is maintained by [Stéphane Ludwig](https://gitlab.com/stephane.ludwig). - -* [Source code](https://gitlab.com/stephane.ludwig/zeebe_python_grpc) -* [Pip package](https://pypi.org/project/zeebe-grpc/) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/ruby.md b/versioned_docs/version-1.3/apis-tools/community-clients/ruby.md deleted file mode 100644 index ac4511f6621..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/ruby.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: ruby -title: "Ruby" -description: "Take a deeper look at the source code and Ruby gem alongside Ruby." ---- - -The Ruby client is maintained by [Christian Nicolai](https://github.com/cmur2). - -* [Source code](https://github.com/zeebe-io/zeebe-client-ruby) -* [Ruby gem](https://rubygems.org/gems/zeebe-client) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/rust.md b/versioned_docs/version-1.3/apis-tools/community-clients/rust.md deleted file mode 100644 index 505aad43ff5..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/rust.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: rust -title: "Rust" -description: "Take a deeper look at the source code, Rust crate, and a podcast interview alongside Rust." ---- - -The Rust client, Zeebest, was previously maintained by [Mackenzie Clark](https://github.com/xmclark), and is currently seeking a new maintainer! - -* [Source code](https://github.com/camunda-community-hub/zeebest) -* [Rust crate](https://docs.rs/zeebest/0.20.0/zeebest/) -* [Podcast interview with Mackenzie Clark](https://zeebe.buzzsprout.com/454051/1478953-zeebe-and-rust-interview-with-mackenzie-clark) diff --git a/versioned_docs/version-1.3/apis-tools/community-clients/spring.md b/versioned_docs/version-1.3/apis-tools/community-clients/spring.md deleted file mode 100644 index 166dceba6d5..00000000000 --- a/versioned_docs/version-1.3/apis-tools/community-clients/spring.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: spring -title: "Spring" ---- - -The Spring integration is a community extension that allows you to easily leverage Zeebe within your Spring or Spring Boot environment. - -Essentially, Spring provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md). - -* [Documentation and source code](https://github.com/camunda-community-hub/spring-zeebe/) -* [Releases on Maven Central](https://search.maven.org/artifact/io.camunda/spring-zeebe-starter/) diff --git a/versioned_docs/version-1.3/apis-tools/go-client/assets/java-get-started-monitor-1.gif b/versioned_docs/version-1.3/apis-tools/go-client/assets/java-get-started-monitor-1.gif deleted file mode 100644 index ea85f37d050..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/go-client/assets/java-get-started-monitor-1.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/go-client/assets/java-get-started-monitor-2.gif b/versioned_docs/version-1.3/apis-tools/go-client/assets/java-get-started-monitor-2.gif deleted file mode 100644 index 4168440cfe8..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/go-client/assets/java-get-started-monitor-2.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/go-client/assets/order-process-simple.png b/versioned_docs/version-1.3/apis-tools/go-client/assets/order-process-simple.png deleted file mode 100644 index e21a621bb1e..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/go-client/assets/order-process-simple.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/go-client/assets/order-process.png b/versioned_docs/version-1.3/apis-tools/go-client/assets/order-process.png deleted file mode 100644 index 25edc8f4f7f..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/go-client/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/go-client/get-started.md b/versioned_docs/version-1.3/apis-tools/go-client/get-started.md deleted file mode 100644 index d3910238226..00000000000 --- a/versioned_docs/version-1.3/apis-tools/go-client/get-started.md +++ /dev/null @@ -1,368 +0,0 @@ ---- -id: get-started -title: "Go client - Getting started guide" -sidebar_label: "Getting started guide" ---- - -In this tutorial, you will learn how to use the Go client in a Go application to interact with Camunda Cloud. - -You can find the complete source code on [GitHub](https://github.com/zeebe-io/zeebe-get-started-go-client). - -## Prerequisites - -- [Camunda Cloud account](/guides/getting-started/create-camunda-cloud-account.md) -- [Cluster](/guides/getting-started/create-camunda-cloud-account.md) -- [Client credentials](/guides/getting-started/setup-client-connection-credentials.md) -- [Modeler](/guides/getting-started/model-your-first-process.md) -- Go v1.13+ environment installed - -## Set up a project - -First, we need a new Go project. To do this, complete the following steps: - -1. Create a new project using your IDE, or create a new Go module with the following command: - -```bash -mkdir -p $GOPATH/src/github.com/zb-user/zb-example -cd $GOPATH/src/github.com/zb-user/zb-example -go mod init -``` - -2. To use the Zeebe Go client library, run the following: - -```bash -go get github.com/camunda/zeebe/clients/go@1.2.9 -``` - -This adds the following dependency to your `go.mod`, it should look similar to this: - -```go -module github.com/zb-user/zb-example - -go 1.17 - -require github.com/camunda-cloud/zeebe/clients/go v1.2.9 -``` - -3. Set the connection settings and client credentials as environment variables: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -:::note -When you create client credentials in Camunda Cloud, you have the option to download a file with the lines above filled out for you. -::: - -4. Create a `main.go` file inside the module and add the following lines to bootstrap the Zeebe client: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" - "github.com/camunda-cloud/zeebe/clients/go/pkg/pb" - "os" -) - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: os.Getenv("ZEEBE_ADDRESS"), - }) - - if err != nil { - panic(err) - } - - ctx := context.Background() - topology, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - for _, broker := range topology.Brokers { - fmt.Println("Broker", broker.Host, ":", broker.Port) - for _, partition := range broker.Partitions { - fmt.Println(" Partition", partition.PartitionId, ":", roleToString(partition.Role)) - } - } -} - -func roleToString(role pb.Partition_PartitionBrokerRole) string { - switch role { - case pb.Partition_LEADER: - return "Leader" - case pb.Partition_FOLLOWER: - return "Follower" - default: - return "Unknown" - } -} -``` - -5. Run the program. - -```bash -go run main.go -``` - -You should see a similar output: - -``` -Broker 0.0.0.0 : 26501 - Partition 1 : Leader -``` - -## Model a process - -Now, we need a simple process we can deploy. Later, we will extend the process with more functionality. For now, follow the steps below: - -1. Open the [modeler](/guides/getting-started/model-your-first-process.md) of your choice and create a new BPMN diagram. - -2. Add a start event named `Order Placed` and an end event named `Order Delivered` to the diagram. Then, connect the events. - -![model-process-step-1](assets/order-process-simple.png) - -3. Set the **id** (the BPMN process id), and mark the diagram as **executable**. - -4. Save the diagram as `src/main/resources/order-process.bpmn` under the project's folder. - -## Deploy a process - -Next, we want to deploy the modeled process to the broker. - -The broker stores the process under its BPMN process id and assigns a version. - -```go - // After the client is created - ctx := context.Background() - response, err := client.NewDeployProcessCommand().AddResourceFile("order-process.bpmn").Send(ctx) - if err != nil { - panic(err) - } - fmt.Println(response.String()) -``` - -Run the program and verify the process deployed successfully. - -You should see a similar output: - -``` -key:2251799813686743 processes: -``` - -## Create a process instance - -We are ready to create our first instance of the deployed process. - -A process instance is created by a specific version of the process, which can be set on creation. - -```go - // After the process is deployed. - variables := make(map[string]interface{}) - variables["orderId"] = "31243" - - request, err := client.NewCreateInstanceCommand().BPMNProcessId("order-process").LatestVersion().VariablesFromMap(variables) - if err != nil { - panic(err) - } - - ctx := context.Background() - - msg, err := request.Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(msg.String()) -``` - -Run the program and verify the process instance is created. You should see an output similar to below: - -``` -processKey:2251799813686742 bpmnProcessId:"order-process" version:3 processInstanceKey:2251799813686744 -``` - -## See the process in action - -Want to see how the process instance is executed? Follow the steps below: - -1. Go to the cluster in Camunda Cloud and select it. -1. Click on the link to [Operate](/components/operate/userguide/basic-operate-navigation.md). -1. Select the process **order process**. - -As you can see, a process instance has been started and finished. - -## Work on a task - -Now, we want to do some work within our process. Follow the steps below: - -1. Add a few service tasks to the BPMN diagram and set the required attributes. - -2. Extend your `main.go` file and activate a job. These are created when the process instance reaches a service task. - -3. Open the BPMN diagram in the modeler. Insert three service tasks between the start and the end event. - -- Name the first task `Collect Money`. -- Name the second task `Fetch Items`. -- Name the third task `Ship Parcel`. - -![model-process-step-2](assets/order-process.png) - -4. Set the **type** of each task, which identifies the nature of the work to be performed. - -- Set the **type** of the first task to `payment-service`. -- Set the **type** of the second task to `fetcher-service`. -- Set the **type** of the third task to `shipping-service`. - -5. Additionally, for the service task `Collect Money` set a [**task-header**](/docs/1.3/components/modeler/bpmn/service-tasks/#task-headers) with the key `method` and the value `VISA`. This header is used as a configuration parameter for the payment-service worker to hand over the payment method. - -The consolidated example looks as follows: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/entities" - "github.com/camunda-cloud/zeebe/clients/go/pkg/worker" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" - "log" - "os" -) - -const ZeebeAddr = "0.0.0.0:26500" - -var readyClose = make(chan struct{}) - -func main() { - gatewayAddr := os.Getenv("ZEEBE_ADDRESS") - plainText:= false - - if (gatewayAddr == "") { - gatewayAddr = ZeebeAddr - plainText = true - } - - zbClient, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: gatewayAddr, - UsePlaintextConnection: plainText, - }) - - if err != nil { - panic(err) - } - - // deploy process - ctx := context.Background() - response, err := zbClient.NewDeployProcessCommand().AddResourceFile("order-process-4.bpmn").Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) - - // create a new process instance - variables := make(map[string]interface{}) - variables["orderId"] = "31243" - - request, err := zbClient.NewCreateInstanceCommand().BPMNProcessId("order-process-4").LatestVersion().VariablesFromMap(variables) - if err != nil { - panic(err) - } - - result, err := request.Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(result.String()) - - jobWorker := zbClient.NewJobWorker().JobType("payment-service").Handler(handleJob).Open() - - <-readyClose - jobWorker.Close() - jobWorker.AwaitClose() -} - -func handleJob(client worker.JobClient, job entities.Job) { - jobKey := job.GetKey() - - headers, err := job.GetCustomHeadersAsMap() - if err != nil { - // failed to handle job as we require the custom job headers - failJob(client, job) - return - } - - variables, err := job.GetVariablesAsMap() - if err != nil { - // failed to handle job as we require the variables - failJob(client, job) - return - } - - variables["totalPrice"] = 46.50 - request, err := client.NewCompleteJobCommand().JobKey(jobKey).VariablesFromMap(variables) - if err != nil { - // failed to set the updated variables - failJob(client, job) - return - } - - log.Println("Complete job", jobKey, "of type", job.Type) - log.Println("Processing order:", variables["orderId"]) - log.Println("Collect money using payment method:", headers["method"]) - - ctx := context.Background() - _, err = request.Send(ctx) - if err != nil { - panic(err) - } - - log.Println("Successfully completed job") - close(readyClose) -} - -func failJob(client worker.JobClient, job entities.Job) { - log.Println("Failed to complete job", job.GetKey()) - - ctx := context.Background() - _, err := client.NewFailJobCommand().JobKey(job.GetKey()).Retries(job.Retries - 1).Send(ctx) - if err != nil { - panic(err) - } -} -``` - -In this example, we open a [job worker](/components/concepts/job-workers.md) for jobs of type `payment-service`. - -The job worker will repeatedly poll for new jobs of the type `payment-service` and activate them subsequently. Each activated job will then be passed to the job handler, which implements the business logic of the job worker. - -The handler will then complete the job with its result or fail the job if -it encounters a problem while processing the job. - -When observing the current state of the process in Operate, you can see the process instance moved from the first service task to the next one. - -When you run the example above, you should see a similar output to the following: - -``` -key:2251799813686751 processes: -processKey:2251799813686750 bpmnProcessId:"order-process" version:4 processInstanceKey:22517998136 -86752 -2019/06/06 20:59:50 Complete job 2251799813686760 of type payment-service -2019/06/06 20:59:50 Processing order: 31243 -2019/06/06 20:59:50 Collect money using payment method: VISA -2019/06/06 20:59:50 Successfully completed job -``` - -## What's next? - -- Learn more about the [concepts behind Zeebe](/components/concepts/what-is-camunda-cloud.md). -- Learn more about [BPMN processes](/components/modeler/bpmn/bpmn-primer.md). diff --git a/versioned_docs/version-1.3/apis-tools/go-client/index.md b/versioned_docs/version-1.3/apis-tools/go-client/index.md deleted file mode 100644 index cc3b9110af2..00000000000 --- a/versioned_docs/version-1.3/apis-tools/go-client/index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -id: index -title: "Go client" -sidebar_label: "Quick reference" -description: "Here, we'll show you how to instantiate the client." ---- - -## Dependencies - -To use the Zeebe Go client library, add the following dependency to your `go.mod`: - -``` -module github.com/zb-user/zb-example - -go 1.17 - -require github.com/camunda-cloud/zeebe/clients/go v1.2.9 -``` - -## Bootstrapping - -In Go code, instantiate the client as follows: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -func main() { - credsProvider, err := zbc.NewOAuthCredentialsProvider(&zbc.OAuthProviderConfig{ - ClientID: "clientId", - ClientSecret: "clientSecret", - Audience: "zeebeAddress", - }) - if err != nil { - panic(err) - } - - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: "zeebeAddress", - CredentialsProvider: credsProvider, - }) - if err != nil { - panic(err) - } - - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -Let's go over this code snippet line by line: - -1. Create the credentials provider for the OAuth protocol. This is needed to authenticate your client. -2. Create the client by passing in the address of the cluster we want to connect to and the credentials provider from the step above. -3. Send a test request to verify the connection was established. - -The values for these settings can be taken from the connection information on the **Client Credentials** page. Note that `clientSecret` is only visible when you create the client credentials. - -Another (more compact) option is to pass in the connection settings via environment variables: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When you create client credentials in Camunda Cloud, you have the option to download a file with the lines above filled out for you. - -Given these environment variables, you can instantiate the client as follows: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" - "os" -) - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: os.Getenv("ZEEBE_ADDRESS"), - }) - if err != nil { - panic(err) - } - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` diff --git a/versioned_docs/version-1.3/apis-tools/grpc.md b/versioned_docs/version-1.3/apis-tools/grpc.md deleted file mode 100644 index b53c46dad24..00000000000 --- a/versioned_docs/version-1.3/apis-tools/grpc.md +++ /dev/null @@ -1,679 +0,0 @@ ---- -id: grpc -title: "Zeebe API (gRPC)" -description: "Zeebe clients use gRPC to communicate with the cluster." ---- - -[Zeebe](../components/zeebe/zeebe-overview.md) clients use [gRPC](https://grpc.io/) to communicate with the cluster. - -:::note -This specification still contains references to YAML workflows. This is a [deprecated feature](/reference/announcements.md) and will eventually be removed. -::: - -## Gateway service - -The Zeebe client gRPC API is exposed through a single gateway service. - -### `ActivateJobs` RPC - -Iterates through all known partitions round-robin, activates up to the requested -maximum, and streams them back to the client as they are activated. - -#### Input: `ActivateJobsRequest` - -```protobuf -message ActivateJobsRequest { - // the job type, as defined in the BPMN process (e.g. ) - string type = 1; - // the name of the worker activating the jobs, mostly used for logging purposes - string worker = 2; - // a job returned after this call will not be activated by another call until the - // timeout (in ms) has been reached - int64 timeout = 3; - // the maximum jobs to activate by this request - int32 maxJobsToActivate = 4; - // a list of variables to fetch as the job variables; if empty, all visible variables at - // the time of activation for the scope of the job will be returned - repeated string fetchVariable = 5; - // The request will be completed when at least one job is activated or after the requestTimeout (in ms). - // if the requestTimeout = 0, a default timeout is used. - // if the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. - int64 requestTimeout = 6; -} -``` - -#### Output: `ActivateJobsResponse` - -```protobuf -message ActivateJobsResponse { - // list of activated jobs - repeated ActivatedJob jobs = 1; -} - -message ActivatedJob { - // the key, a unique identifier for the job - int64 key = 1; - // the type of the job (should match what was requested) - string type = 2; - // the job's process instance key - int64 processInstanceKey = 3; - // the bpmn process ID of the job process definition - string bpmnProcessId = 4; - // the version of the job process definition - int32 processDefinitionVersion = 5; - // the key of the job process definition - int64 processKey = 6; - // the associated task element ID - string elementId = 7; - // the unique key identifying the associated task, unique within the scope of the - // process instance - int64 elementInstanceKey = 8; - // a set of custom headers defined during modelling; returned as a serialized - // JSON document - string customHeaders = 9; - // the name of the worker which activated this job - string worker = 10; - // the amount of retries left to this job (should always be positive) - int32 retries = 11; - // when the job can be activated again, sent as a UNIX epoch timestamp - int64 deadline = 12; - // JSON document, computed at activation time, consisting of all visible variables to - // the task scope - string variables = 13; -} -``` - -#### Errors - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- Type is blank (empty string, null) -- Worker is blank (empty string, null) -- Timeout less than 1 (ms) -- maxJobsToActivate is less than 1 - -### `CancelProcessInstance` RPC - -Cancels a running process instance. - -#### Input: `CancelProcessInstanceRequest` - -```protobuf -message CancelProcessInstanceRequest { - // the process instance key (as, for example, obtained from - // CreateProcessInstanceResponse) - int64 processInstanceKey = 1; -} -``` - -#### Output: `CancelProcessInstanceResponse` - -```protobuf -message CancelProcessInstanceResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No process instance exists with the given key. Note that since process instances are removed once they are finished, it could mean the instance did exist at some point. - -### `CompleteJob` RPC - -Completes a job with the given payload, which allows completing the associated service task. - -#### Input: `CompleteJobRequest` - -```protobuf -message CompleteJobRequest { - // the unique job identifier, as obtained from ActivateJobsResponse - int64 jobKey = 1; - // a JSON document representing the variables in the current task scope - string variables = 2; -} -``` - -#### Output: `CompleteJobResponse` - -```protobuf -message CompleteJobResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job exists with the given job key. Note that since jobs are removed once completed, it could be that this job did exist at some point. - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The job was marked as failed. In that case, the related incident must be resolved before the job can be activated again and completed. - -### `CreateProcessInstance` RPC - -Creates and starts an instance of the specified process. The process definition to use -to create the instance can be specified either using its unique key (as returned by -DeployProcess), or using the BPMN process ID and a version. Pass -1 as the version to -use the latest deployed version. - -:::note -Only processes with none start events can be started through this command. -::: - -#### Input: `CreateProcessInstanceRequest` - -```protobuf -message CreateProcessInstanceRequest { - // the unique key identifying the process definition (e.g. returned from a process - // in the DeployProcessResponse message) - int64 processKey = 1; - // the BPMN process ID of the process definition - string bpmnProcessId = 2; - // the version of the process; set to -1 to use the latest version - int32 version = 3; - // JSON document that will instantiate the variables for the root variable scope of the - // process instance; it must be a JSON object, as variables will be mapped in a - // key-value fashion. e.g. { "a": 1, "b": 2 } will create two variables, named "a" and - // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a - // valid argument, as the root of the JSON document is an array and not an object. - string variables = 4; -} -``` - -#### Output: `CreateProcessInstanceResponse` - -```protobuf -message CreateProcessInstanceResponse { - // the key of the process definition which was used to create the process instance - int64 processKey = 1; - // the BPMN process ID of the process definition which was used to create the process - // instance - string bpmnProcessId = 2; - // the version of the process definition which was used to create the process instance - int32 version = 3; - // the unique identifier of the created process instance; to be used wherever a request - // needs a process instance key (e.g. CancelProcessInstanceRequest) - int64 processInstanceKey = 4; -} -``` - -### `CreateProcessInstanceWithResult` RPC - -Similar to `CreateProcessInstance` RPC, creates and starts an instance of the specified process. -Unlike `CreateProcessInstance` RPC, the response is returned when the process is completed. - -:::note -Only processes with none start events can be started through this command. -::: - -#### Input: `CreateProcessInstanceWithResultRequest` - -```protobuf -message CreateProcessInstanceRequest { - CreateProcessInstanceRequest request = 1; - // timeout (in ms). the request will be closed if the process is not completed before - // the requestTimeout. - // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. - int64 requestTimeout = 2; -} -``` - -#### Output: `CreateProcessInstanceWithResultResponse` - -```protobuf -message CreateProcessInstanceResponse { - // the key of the process definition which was used to create the process instance - int64 processKey = 1; - // the BPMN process ID of the process definition which was used to create the process - // instance - string bpmnProcessId = 2; - // the version of the process definition which was used to create the process instance - int32 version = 3; - // the unique identifier of the created process instance; to be used wherever a request - // needs a process instance key (e.g. CancelProcessInstanceRequest) - int64 processInstanceKey = 4; - // consisting of all visible variables to the root scope - string variables = 5; -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No process with the given key exists (if processKey was given). -- No process with the given process ID exists (if bpmnProcessId was given but version was -1). -- No process with the given process ID and version exists (if both bpmnProcessId and version were given). - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The process definition does not contain a none start event; only processes with none - start event can be started manually. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- The given variables argument is not a valid JSON document; it is expected to be a valid - JSON document where the root node is an object. - -### `DeployProcess` RPC - -Deploys one or more processes to Zeebe. Note that this is an atomic call, -i.e. either all processes are deployed, or none of them are. - -#### Input: `DeployProcessRequest` - -```protobuf -message DeployProcessRequest { - // List of process resources to deploy - repeated ProcessRequestObject processes = 1; -} - -message ProcessRequestObject { - enum ResourceType { - // FILE type means the gateway will try to detect the resource type - // using the file extension of the name field - FILE = 0; - BPMN = 1; // extension 'bpmn' - YAML = 2 [deprecated = true]; // extension 'yaml'; removed as of release 1.0 - } - - // the resource basename, e.g. myProcess.bpmn - string name = 1; - // the resource type; if set to BPMN or YAML then the file extension - // is ignored - // As of release 1.0, YAML support was removed and BPMN is the only supported resource type. - // The field was kept to not break clients. - ResourceType type = 2 [deprecated = true]; - // the process definition as a UTF8-encoded string - bytes definition = 3; -} -``` - -#### Output: `DeployProcessResponse` - -```protobuf -message DeployProcessResponse { - // the unique key identifying the deployment - int64 key = 1; - // a list of deployed processes - repeated ProcessMetadata processes = 2; -} - -message ProcessMetadata { - // the bpmn process ID, as parsed during deployment; together with the version forms a - // unique identifier for a specific process definition - string bpmnProcessId = 1; - // the assigned process version - int32 version = 2; - // the assigned key, which acts as a unique identifier for this process - int64 processKey = 3; - // the resource name (see: ProcessRequestObject.name) from which this process was - // parsed - string resourceName = 4; -} -``` - -#### Errors - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- No resources given. -- At least one resource is invalid. A resource is considered invalid if: - - It is not a BPMN or YAML file (currently detected through the file extension). - - The resource data is not deserializable (e.g. detected as BPMN, but it's broken XML). - - The process is invalid (e.g. an event-based gateway has an outgoing sequence flow to a task.) - -### `FailJob` RPC - -Marks the job as failed; if the retries argument is positive, the job is immediately -activatable again, and a worker could try again to process it. If it is zero or negative, -an incident is raised, tagged with the given errorMessage, and the job is not activatable until the incident is resolved. - -#### Input: `FailJobRequest` - -```protobuf -message FailJobRequest { - // the unique job identifier, as obtained when activating the job - int64 jobKey = 1; - // the amount of retries the job should have left - int32 retries = 2; - // an optional message describing why the job failed - // this is particularly useful if a job runs out of retries and an incident is raised, - // as it this message can help explain why an incident was raised - string errorMessage = 3; -} -``` - -#### Output: `FailJobResponse` - -```protobuf -message FailJobResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job was found with the given key. - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The job was not activated. -- The job is already in a failed state, i.e. ran out of retries. - -### `PublishMessage` RPC - -Publishes a single message. Messages are published to specific partitions computed from their -correlation keys. - -#### Input: `PublishMessageRequest` - -```protobuf -message PublishMessageRequest { - // the name of the message - string name = 1; - // the correlation key of the message - string correlationKey = 2; - // how long the message should be buffered on the broker, in milliseconds - int64 timeToLive = 3; - // the unique ID of the message; can be omitted. only useful to ensure only one message - // with the given ID will ever be published (during its lifetime) - string messageId = 4; - // the message variables as a JSON document; to be valid, the root of the document must be an - // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. - string variables = 5; -} -``` - -#### Output: `PublishMessageResponse` - -```protobuf -message PublishMessageResponse { - // the unique ID of the message that was published - int64 key = 1; -} -``` - -#### Errors - -##### GRPC_STATUS_ALREADY_EXISTS - -Returned if: - -- A message with the same ID was previously published (and is still alive). - -### `ResolveIncident` RPC - -Resolves a given incident. This simply marks the incident as resolved; most likely a call to -UpdateJobRetries or SetVariables will be necessary to actually resolve the -problem, followed by this call. - -#### Input: `ResolveIncidentRequest` - -```protobuf -message ResolveIncidentRequest { - // the unique ID of the incident to resolve - int64 incidentKey = 1; -} -``` - -#### Output: `ResolveIncidentResponse` - -```protobuf -message ResolveIncidentResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No incident with the given key exists. - -### `SetVariables` RPC - -Updates all the variables of a particular scope (e.g. process instance, flow element instance) from the given JSON document. - -#### Input: `SetVariablesRequest` - -```protobuf -message SetVariablesRequest { - // the unique identifier of a particular element; can be the process instance key (as - // obtained during instance creation), or a given element, such as a service task (see - // elementInstanceKey on the job message) - int64 elementInstanceKey = 1; - // a JSON serialized document describing variables as key value pairs; the root of the document - // must be an object - string variables = 2; - // if true, the variables will be merged strictly into the local scope (as indicated by - // elementInstanceKey); this means the variables is not propagated to upper scopes. - // for example, let's say we have two scopes, '1' and '2', with each having effective variables as: - // 1 => `{ "foo" : 2 }`, and 2 => `{ "bar" : 1 }`. if we send an update request with - // elementInstanceKey = 2, variables `{ "foo" : 5 }`, and local is true, then scope 1 will - // be unchanged, and scope 2 will now be `{ "bar" : 1, "foo" 5 }`. if local was false, however, - // then scope 1 would be `{ "foo": 5 }`, and scope 2 would be `{ "bar" : 1 }`. - bool local = 3; -} -``` - -#### Output: `SetVariablesResponse` - -```protobuf -message SetVariablesResponse { - // the unique key of the set variables command - int64 key = 1; -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No element with the given `elementInstanceKey` exists. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- The given payload is not a valid JSON document; all payloads are expected to be - valid JSON documents where the root node is an object. - -### `ThrowError` RPC - -Throw an error to indicate that a business error has occurred while processing the job. The error is identified by an error code and is caught by an error catch event with the same error code. - -#### Input: `ThrowErrorRequest` - -```protobuf -message ThrowErrorRequest { - // the unique job identifier, as obtained when activating the job - int64 jobKey = 1; - // the error code that will be matched with an error catch event - string errorCode = 2; - // an optional error message that provides additional context - string errorMessage = 3; -} -``` - -#### Output: `ThrowErrorResponse` - -```protobuf -message ThrowErrorResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job was found with the given key. - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The job is already in a failed state, i.e. ran out of retries. - -### `Topology` RPC - -Obtains the current topology of the cluster the gateway is part of. - -#### Input: `TopologyRequest` - -```protobuf -message TopologyRequest { -} -``` - -#### Output: `TopologyResponse` - -```protobuf -message TopologyResponse { - // list of brokers part of this cluster - repeated BrokerInfo brokers = 1; - // how many nodes are in the cluster - int32 clusterSize = 2; - // how many partitions are spread across the cluster - int32 partitionsCount = 3; - // configured replication factor for this cluster - int32 replicationFactor = 4; - // gateway version - string gatewayVersion = 5; -} - -message BrokerInfo { - // unique (within a cluster) node ID for the broker - int32 nodeId = 1; - // hostname of the broker - string host = 2; - // port for the broker - int32 port = 3; - // list of partitions managed or replicated on this broker - repeated Partition partitions = 4; - // broker version - string version = 5; -} - -message Partition { - // Describes the Raft role of the broker for a given partition - enum PartitionBrokerRole { - LEADER = 0; - FOLLOWER = 1; - } - - // Describes the current health of the partition - enum PartitionBrokerHealth { - HEALTHY = 0; - UNHEALTHY = 1; - } - - // the unique ID of this partition - int32 partitionId = 1; - // the role of the broker for this partition - PartitionBrokerRole role = 2; - // the health of this partition - PartitionBrokerHealth health = 3; -} -``` - -#### Errors - -No specific errors. - -### `UpdateJobRetries` RPC - -Updates the number of retries a job has left. This is mostly useful for jobs that have run out of -retries, should the underlying problem be solved. - -#### Input: `UpdateJobRetriesRequest` - -```protobuf -message UpdateJobRetriesRequest { - // the unique job identifier, as obtained through ActivateJobs - int64 jobKey = 1; - // the new amount of retries for the job; must be positive - int32 retries = 2; -} -``` - -#### Output: `UpdateJobRetriesResponse` - -```protobuf -message UpdateJobRetriesResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job exists with the given key. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- Retries is not greater than 0. - -## Technical error handling - -In the documentation above, the documented errors are business logic errors. -These errors are a result of request processing logic, and not serialization, network, or -other more general errors. These errors are described in this section. - -The gRPC API for Zeebe is exposed through an API gateway, which acts as a proxy -for the cluster. Generally, this means the clients execute a remote call on the gateway, -which is then translated to special binary protocol the gateway uses to -communicate with nodes in the cluster. The nodes in the cluster are called brokers. - -Technical errors which occur between gateway and brokers (e.g. the gateway cannot deserialize the broker response, -the broker is unavailable, etc.) are reported to the client using the following error codes: - -- `GRPC_STATUS_RESOURCE_EXHAUSTED`: When a broker receives more requests than it can handle, it signals back-pressure and rejects requests with this error code. - - In this case, it is possible to retry the requests with an appropriate retry strategy. - - If you receive many such errors within a short time period, it indicates the broker is constantly under high load. - - It is recommended to reduce the rate of requests. - When back-pressure is active, the broker may reject any request except _CompleteJob_ RPC and _FailJob_ RPC. - - These requests are allowed during back-pressure and are always accepted by the broker even if it is receiving requests above its limits. -- `GRPC_STATUS_UNAVAILABLE`: If the gateway itself is in an invalid state (e.g. out of memory). -- `GRPC_STATUS_INTERNAL`: For any other internal errors that occurred between the gateway and the broker. - -This behavior applies to every request. In these cases, the client should retry -with an appropriate retry policy (e.g. a combination of exponential backoff or jitter wrapped -in a circuit breaker). - -As the gRPC server/client is based on generated code, keep in mind that -any call made to the server can also return errors as described by the spec -[here](https://grpc.io/docs/guides/error.html#error-status-codes). diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/cluster-topology-request.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/cluster-topology-request.md deleted file mode 100644 index c38a21a2e6b..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/cluster-topology-request.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: cluster-topology-request -title: "Request cluster topology" ---- - -This example shows which broker is leader and follower for which partition. This is particularly useful when you run a cluster with multiple Zeebe brokers. - -## Related resources - -- [Clustering basics](/components/zeebe/technical-concepts/clustering.md) - -## Prerequisites - -Run Zeebe broker with endpoint `localhost:26500` (default). - -## TopologyViewer.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/cluster/TopologyViewer.java) - -```java -final Topology topology = client.newTopologyRequest().send().join(); - -System.out.println("Topology:"); -topology - .getBrokers() - .forEach( - b -> { - System.out.println(" " + b.getAddress()); - b.getPartitions() - .forEach( - p -> - System.out.println( - " " + p.getPartitionId() + " - " + p.getRole())); - }); -``` diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/data-pojo.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/data-pojo.md deleted file mode 100644 index b73b7bece7c..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/data-pojo.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: data-pojo -title: "Handle variables as POJO" -description: "Let's analyze the prerequisites and code to handle variables as POJO." ---- - -## Related resources - -- [Data flow](../../components/modeler/bpmn/data-flow.md) - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -2. Run the [deploy a process example](process-deploy.md). - -## HandleVariablesAsPojo.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/data/HandleVariablesAsPojo.java) - -```java - ... - try (final ZeebeClient client = clientBuilder.build()) { - final Order order = new Order(); - order.setOrderId(31243); - - client - .newCreateInstanceCommand() - .bpmnProcessId("demoProcess") - .latestVersion() - .variables(order) - .send() - .join(); - - client.newWorker().jobType("foo").handler(new DemoJobHandler()).open(); - - // run until System.in receives exit command - waitUntilSystemInput("exit"); - } - } - - public static class Order { - private long orderId; - private double totalPrice; - - public long getOrderId() { - return orderId; - } - - public void setOrderId(final long orderId) { - this.orderId = orderId; - } - - public double getTotalPrice() { - return totalPrice; - } - - public void setTotalPrice(final double totalPrice) { - this.totalPrice = totalPrice; - } - } - - private static class DemoJobHandler implements JobHandler { - @Override - public void handle(final JobClient client, final ActivatedJob job) { - // read the variables of the job - final Order order = job.getVariablesAsType(Order.class); - System.out.println("new job with orderId: " + order.getOrderId()); - - // update the variables and complete the job - order.setTotalPrice(46.50); - - client.newCompleteCommand(job.getKey()).variables(order).send(); - } - } -``` diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/index.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/index.md deleted file mode 100644 index 56bbb62865c..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: index -title: "Java examples" -sidebar_label: "Overview" ---- - -Let's analyze a few examples utilizing Java to deploy a process, open a job worker, handle variables, and request cluster topology. - -These examples are accessible in the [Camunda Community Hub Repository](https://github.com/camunda-community-hub/camunda-8-examples). - -Instructions to access code locally: - -``` -git clone https://github.com/camunda-cloud/zeebe.git -git checkout develop -cd samples -``` - -Import the Maven project in the `samples` directory into your IDE to start hacking. - -## Process - -- [Deploy a process](process-deploy.md) -- [Create a process instance](process-instance-create.md) -- [Create non-blocking process instances](process-instance-create-nonblocking.md) -- [Create a process instance with results](process-instance-create-with-result.md) - -## Job - -- [Open a job worker](job-worker-open.md) - -## Data - -- [Handle variables as POJO](data-pojo.md) - -## Cluster - -- [Request cluster topology](cluster-topology-request.md) diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/job-worker-open.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/job-worker-open.md deleted file mode 100644 index 9471c7072a3..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/job-worker-open.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: job-worker-open -title: "Open a job worker" -description: "Let's analyze the prerequisites and code to open a job worker." ---- - -## Related resources - -- [Job worker basics](/components/concepts/job-workers.md) - -## Prerequisites - -- Run the Zeebe broker with endpoint `localhost:26500` (default). -- Run the [deploy a process example](process-deploy.md). -- Run the [create a process instance example](process-instance-create.md) a few times. - -## JobWorkerCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/job/JobWorkerCreator.java) - -```java - ... - final String jobType = "foo"; - - try (final ZeebeClient client = clientBuilder.build()) { - - System.out.println("Opening job worker."); - - try (final JobWorker workerRegistration = - client - .newWorker() - .jobType(jobType) - .handler(new ExampleJobHandler()) - .timeout(Duration.ofSeconds(10)) - .open()) { - System.out.println("Job worker opened and receiving jobs."); - - // run until System.in receives exit command - waitUntilSystemInput("exit"); - } - } - } - - private static class ExampleJobHandler implements JobHandler { - @Override - public void handle(final JobClient client, final ActivatedJob job) { - // here: business logic that is executed with every job - System.out.println(job); - client.newCompleteCommand(job.getKey()).send().join(); - } - } -``` diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-deploy.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/process-deploy.md deleted file mode 100644 index 3946e54aed5..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-deploy.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: process-deploy -title: "Deploy a process" -description: "Let's analyze the prerequisites and code to deploy a process using Java." ---- - -## Related resources - -- [Process basics](../../components/concepts/processes.md) -- [BPMN introduction](../../components/modeler/bpmn/bpmn-primer.md) - -## Prerequisites - -Run the Zeebe broker with endpoint `localhost:26500` (default). - -## ProcessDeployer.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/ProcessDeployer.java) - -```java -final DeploymentEvent deploymentEvent = - client.newDeployCommand() - .addResourceFromClasspath("demoProcess.bpmn") - .send() - .join(); -``` - -## demoProcess.bpmn - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoProcess.bpmn) - -Download the XML and save it in the Java classpath before running the example. Open the file with Desktop Modeler for a graphical representation. - - diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create-nonblocking.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create-nonblocking.md deleted file mode 100644 index 531191dc7b5..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create-nonblocking.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: process-instance-create-nonblocking -title: "Create non-blocking process instances" -description: "Let's analyze the prerequisites and code to create non-blocking process instances with Java." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -2. Run the [deploy a process example](process-deploy.md). - -## NonBlockingProcessInstanceCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/NonBlockingProcessInstanceCreator.java) - -```java -long instancesCreating = 0; - -while (instancesCreating < numberOfInstances) { - // this is non-blocking/async => returns a future - final ZeebeFuture future = - client.newCreateInstanceCommand().bpmnProcessId(bpmnProcessId).latestVersion().send(); - - // could put the future somewhere and eventually wait for its completion - - instancesCreating++; -} -``` diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create-with-result.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create-with-result.md deleted file mode 100644 index 1c9d0c22e4d..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create-with-result.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: process-instance-create-with-result -title: "Create a process instance with results" -description: "Let's analyze the prerequisites and code to create a process instance with real results." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -1. Run the [deploy a process example](process-deploy.md). Deploy [`demoProcessSingleTask.bpmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoProcessSingleTask.bpmn) instead of `demoProcess.bpmn`. - -## ProcessInstanceWithResultCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/ProcessInstanceWithResultCreator.java) - -```java -final ProcessInstanceResult processInstanceResult = - client - .newCreateInstanceCommand() - .bpmnProcessId(bpmnProcessId) - .latestVersion() - .withResult() // to await the completion of process execution and return result - .send() - .join(); - -System.out.println( - "Process instance created with key: " - + processInstanceResult.getProcessInstanceKey() - + " and completed with results: " - + processInstanceResult.getVariables()); -``` diff --git a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create.md b/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create.md deleted file mode 100644 index b12e0c0d562..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client-examples/process-instance-create.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: process-instance-create -title: "Create a process instance" -description: "Let's dive deeper into Zeebe and Java to create a process instance." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -1. Run the [deploy a process example](process-deploy.md). - -## ProcessInstanceCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/ProcessInstanceCreator.java) - -```java -final ProcessInstanceEvent processInstanceEvent = - client - .newCreateInstanceCommand() - .bpmnProcessId(bpmnProcessId) - .latestVersion() - .send() - .join(); -``` diff --git a/versioned_docs/version-1.3/apis-tools/java-client/assets/order-process-simple.png b/versioned_docs/version-1.3/apis-tools/java-client/assets/order-process-simple.png deleted file mode 100644 index e21a621bb1e..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/java-client/assets/order-process-simple.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/java-client/assets/order-process.png b/versioned_docs/version-1.3/apis-tools/java-client/assets/order-process.png deleted file mode 100644 index 25edc8f4f7f..00000000000 Binary files a/versioned_docs/version-1.3/apis-tools/java-client/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/apis-tools/java-client/index.md b/versioned_docs/version-1.3/apis-tools/java-client/index.md deleted file mode 100644 index 167335faaa4..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client/index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: index -title: "Java client" -sidebar_label: "Quick reference" -description: "Here, we'll look a quick overview of the Java client and next steps." ---- - -## Dependencies - -To use the Java client library, declare the following Maven dependency in your project: - -```xml - - io.camunda - zeebe-client-java - ${zeebe.version} - -``` - -If you are using Gradle, declare the following: - -```groovy -implementation 'io.camunda:zeebe-client-java:${zeebe.version}' -``` - -Use the latest released version from [Maven Central](https://search.maven.org/artifact/io.camunda/zeebe-client-java). - -## Spring integration - -If you build a Spring or Spring Boot application, you might want to use [Spring Zeebe](/apis-tools/community-clients/spring.md) instead of handling the lifecycle and configuration of the Java client yourself (as described in the following paragraphs). - -## Bootstrapping - -In Java code, instantiate the client as follows: - -```java - private static final String zeebeAPI = "[Zeebe API]"; - private static final String clientId = "[Client ID]"; - private static final String clientSecret = "[Client Secret]"; - private static final String oAuthAPI = "[OAuth API] "; - - public static void main(String[] args) { - OAuthCredentialsProvider credentialsProvider = - new OAuthCredentialsProviderBuilder() - .authorizationServerUrl(oAuthAPI) - .audience(zeebeAPI) - .clientId(clientId) - .clientSecret(clientSecret) - .build(); - - ZeebeClient client = - ZeebeClient.newClientBuilder() - .gatewayAddress(zeebeAPI) - .credentialsProvider(credentialsProvider) - .build(); - - client.newTopologyRequest().send().join(); - } -``` - -Let's go over this code snippet line by line: - -1. Declare a few variables to define the connection properties. These values can be taken from the connection information on the **Client Credentials** page. Note that `clientSecret` is only visible when you create the client credentials. -2. Create the credentials provider for the OAuth protocol. This is needed to authenticate your client. -3. Create the client by passing in the address of the cluster we want to connect to and the credentials provider from the step above. -4. Send a test request to verify the connection was established. - -See [io.camunda.zeebe.client.ZeebeClientBuilder](https://javadoc.io/doc/io.camunda/zeebe-client-java/latest/io/zeebe/client/ZeebeClientBuilder.html) for a description of all available configuration properties. - -Another (more compact) option is to pass in the connection settings via environment variables: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When you create client credentials in Camunda Cloud, you have the option to download a file with the lines above filled out for you. - -Given these environment variables, you can instantiate the client as follows: - -```java -ZeebeClient client = - ZeebeClient.newClientBuilder() - .gatewayAddress(System.getenv("ZEEBE_ADDRESS")) - .build(); -``` - -## Next steps - -- [Getting Started Guide](https://github.com/camunda-cloud/camunda-cloud-get-started): A comprehensive tutorial that covers Camunda Modeler, Operate, and the Java client. -- [Job worker](job-worker.md): An introduction to the Java client's job worker. -- [Logging](logging.md): An introduction to configuring logging for a Zeebe client. -- [Writing tests](testing.md): An introduction to writing tests that use an embedded version of the workflow engine. -- [Examples](/apis-tools/java-client-examples/index.md): A collection of specific examples for different use cases. diff --git a/versioned_docs/version-1.3/apis-tools/java-client/job-worker.md b/versioned_docs/version-1.3/apis-tools/java-client/job-worker.md deleted file mode 100644 index b5c61b9de04..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client/job-worker.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -id: job-worker -title: "Job worker" -description: "Let's take a deeper look at job workers to handle jobs." ---- - -## Related resources - -- [Job worker basics](/components/concepts/job-workers.md) - -## The Java client's job worker - -The Java client provides a job worker that handles polling for available jobs. This allows you to focus on writing code to handle the activated jobs. - -On `open`, the job worker waits `pollInterval` milliseconds and then polls for `maxJobsActive` jobs. It then continues with the following schedule: - -1. If a poll did not activate any jobs, it waits for `pollInterval` milliseconds and then polls for more jobs. -2. If a poll activated jobs, the worker submits each job to the job handler. -3. Every time a job is handled, the worker checks whether the number of unhandled jobs have dropped below 30% of `maxJobsActive`. The first time that happens, it will poll for more jobs. -4. If a poll fails with an error response, a backoff strategy is applied. This strategy waits for the delay provided by the `backoffSupplier` and polls for more jobs. - -## Example usage - -- [Open a job worker](../java-client-examples/job-worker-open.md) - -## Backoff configuration - -When a poll fails with an error response, the job worker applies a backoff strategy. It waits for some time, after which it polls again for more jobs. This gives a Zeebe cluster some time to recover from a failure. In some cases, you may want to configure this backoff strategy to better fit your situation. - -The retry delay (i.e. the time the job worker waits after an error before the next poll for new jobs) is provided by the [`BackoffSupplier`](https://github.com/camunda/camunda/blob/1.3.14/clients/java/src/main/java/io/camunda/zeebe/client/api/worker/BackoffSupplier.java). You can replace it using the `.backoffSupplier()` method on the [`JobWorkerBuilder`](https://github.com/camunda/camunda/blob/main/zeebe/clients/java/src/main/java/io/camunda/zeebe/client/api/worker/JobWorkerBuilderStep1.java). - -By default, the job worker uses an exponential backoff implementation, which you can configure using `BackoffSupplier.newBackoffBuilder()`. - -The backoff strategy is especially useful for dealing with the `GRPC_STATUS_RESOURCE_EXHAUSTED` error response (see [gRPC Technical Error Handling](/apis-tools/grpc.md#technical-error-handling)). - -This error code indicates the Zeebe cluster is currently under too large of a load and has decided to reject this request. - -By backing off, the job worker helps Zeebe by reducing the load. - -:::note -Zeebe's [backpressure mechanism](../../../self-managed/zeebe-deployment/operations/backpressure) can also be configured. -::: diff --git a/versioned_docs/version-1.3/apis-tools/java-client/logging.md b/versioned_docs/version-1.3/apis-tools/java-client/logging.md deleted file mode 100644 index 75ef99948d5..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client/logging.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: logging -title: "Logging" -description: "Here, we'll take a look at logging details in the case a job handler fails execution." ---- - -The client uses SLF4J for logging useful notes, such as exception stack traces when a job handler fails execution. Using the SLF4J API, any SLF4J implementation can be plugged in. The following example uses Log4J 2: - -## Maven dependencies - -```xml - - org.apache.logging.log4j - log4j-slf4j-impl - 2.8.1 - - - - org.apache.logging.log4j - log4j-core - 2.8.1 - -``` - -## Configuration - -First, add a file called `log4j2.xml` to the classpath of your application. - -Then, add the following content: - -```xml - - - - - - - - - - - - - -``` - -This will log every log message to the console. diff --git a/versioned_docs/version-1.3/apis-tools/java-client/testing.md b/versioned_docs/version-1.3/apis-tools/java-client/testing.md deleted file mode 100644 index a7410c41661..00000000000 --- a/versioned_docs/version-1.3/apis-tools/java-client/testing.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: testing -title: "Writing tests" -description: "Use the zeebe-test module to write JUnit tests for your job worker and BPMN process." ---- - -You can use the `zeebe-test` module to write JUnit tests for your job worker and BPMN process. This provides a JUnit rule to bootstrap the broker and some basic assertions. - -:::note -`zeebe-test` is [deprecated for removal](/reference/announcements.md). -::: - -## Usage in a Maven project - -Add `zeebe-test` as a Maven test dependency to your project: - -```xml - - io.camunda - zeebe-test - test - -``` - -## Bootstrap the broker - -Use the `ZeebeTestRule` in your test case to start an embedded broker. This contains a client which can be used to deploy a BPMN process or create an instance. - -```java -import io.camunda.zeebe.client.ZeebeClient; -import io.camunda.zeebe.client.api.response.ProcessInstanceEvent; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; - -public class MyTest { - - @Rule public final ZeebeTestRule testRule = new ZeebeTestRule(); - - private ZeebeClient client; - - @Test - public void test() { - client = testRule.getClient(); - - client - .newDeployCommand() - .addResourceFromClasspath("process.bpmn") - .send() - .join(); - - final ProcessInstanceEvent processInstance = - client - .newCreateInstanceCommand() - .bpmnProcessId("process") - .latestVersion() - .send() - .join(); - } -} -``` - -## Verify the result - -The `ZeebeTestRule` also provides some basic assertions in AssertJ style. The entry point of the assertions is `ZeebeTestRule.assertThat(...)`. - -```java -final ProcessInstanceEvent processInstance = ... - -ZeebeTestRule.assertThat(processInstance) - .isEnded() - .hasPassed("start", "task", "end") - .hasVariable("result", 21.0); -``` diff --git a/versioned_docs/version-1.3/apis-tools/overview.md b/versioned_docs/version-1.3/apis-tools/overview.md deleted file mode 100644 index 0f431fe013a..00000000000 --- a/versioned_docs/version-1.3/apis-tools/overview.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -# This page exists in apis-tools, instead of apis-tools, because the location needs to be consistent across all versions. -id: working-with-apis-tools -title: "Working with APIs & Tools" -sidebar_label: "Overview" -description: "Programmatically work with Camunda Cloud through APIs & tools" ---- - -This section steps through a variety of offered APIs and clients for integration. - -## APIs and interacting with other components - -The clients mentioned below interact with Zeebe, the workflow engine integrated into Camunda Cloud. - -Other components in Camunda Cloud, such as [Tasklist API (GraphQL)](/apis-tools/tasklist-api/generated.md), provide language-agnostic APIs, but no clients to interact with them. GraphQL enables you to query, claim, and complete user tasks. - -### Additional APIs - -- [Cloud Console API clients (REST)](../apis-tools/cloud-console-api-reference.md) - Enables you to programmatically create and manage clusters, and interact with Camunda Cloud programmatically without using the Camunda Cloud UI. -- [Zeebe API](../apis-tools/grpc.md) - Zeebe clients use gRPC to communicate with the cluster. - -## Clients - -Clients allow applications to do the following: - -- Deploy processes. -- Start and cancel process instances. -- Activate jobs, work on those jobs, and subsequently complete or fail jobs. -- Publish messages. -- Update process instance variables and resolve incidents. - -Clients connect to Camunda Cloud via [gRPC](https://grpc.io), a high-performance, open-source, and universal RPC protocol. - -Camunda Cloud provides several official clients based on this API. Official clients have been developed and tested by Camunda. They also add convenience functions (e.g. thread handling for job workers) on top of the core API. - -Community clients supplement the official clients. These clients have not been tested by Camunda. - -### Official clients - -- [Java](../apis-tools/java-client/index.md) -- [Go](../apis-tools/go-client/get-started.md) -- [CLI](../apis-tools/cli-client/index.md) - -### Community clients - -- [C#](../apis-tools/community-clients/c-sharp.md) -- [JavaScript/NodeJS](../apis-tools/community-clients/javascript.md) -- [Python](../apis-tools/community-clients/python.md) -- [Ruby](../apis-tools/community-clients/ruby.md) -- [Rust](../apis-tools/community-clients/rust.md) -- [Spring](../apis-tools/community-clients/spring.md) - -Finally, it is possible to [build your own client](../apis-tools/build-your-own-client.md) if none of the other options are suitable. diff --git a/versioned_docs/version-1.3/apis-tools/public-api.md b/versioned_docs/version-1.3/apis-tools/public-api.md deleted file mode 100644 index 02cbe276338..00000000000 --- a/versioned_docs/version-1.3/apis-tools/public-api.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: public-api -title: "Public API" ---- - -Camunda Cloud provides a public API. This section covers the definition of the public API and backwards compatibility for version updates. - -## Backwards compatibility for public API - -Camunda Cloud versioning scheme follows the `MAJOR.MINOR.PATCH` pattern put forward by [semantic versioning](https://semver.org/). Camunda Cloud will -maintain public API backwards compatibility for `MINOR` version updates. - -Example: Update from version `1.0.x` to `1.1.y` will not break the public API. - -To learn more about our release cycle, refer to our [release policy](/reference/release-policy.md). - -## Definition of public API - -Currently, both Zeebe API and [Tasklist API](/apis-tools/tasklist-api/generated.md) are officially supported APIs: - -- [Zeebe Client Java API](/apis-tools/java-client/index.md) -- [Tasklist API](/apis-tools/tasklist-api/generated.md) - -All non-implementation Java packages (package name does not contain `impl`) of the following Maven modules. - -- `io.camunda:zeebe-client-java` - -## Other APIs and clients - -Currently, we cannot *guarantee* backwards compatibility with other APIs and clients, though we do work to offer backwards compatibility to the best of our ability. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/_category_.yml deleted file mode 100644 index ce04715e602..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Directives' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/deprecated.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/deprecated.mdx deleted file mode 100644 index f1101c21c20..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/deprecated.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: deprecated -title: deprecated ---- - -Marks an element of a GraphQL schema as no longer supported. - -```graphql -directive @deprecated( - reason: String = "No longer supported" -) -``` - -### Arguments - -#### `reason` ([`String`](../scalars/string.mdx)) - -Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/). diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/include.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/include.mdx deleted file mode 100644 index 06264a0d8ae..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/include.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: include -title: include ---- - -Directs the executor to include this field or fragment only when the `if` argument is true. - -```graphql -directive @include( - if: Boolean! -) -``` - -### Arguments - -#### `if` ([`Boolean`](../scalars/boolean.mdx)) - -Included when true. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/skip.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/skip.mdx deleted file mode 100644 index 8bad4470a96..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/skip.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: skip -title: skip ---- - -Directs the executor to skip this field or fragment when the `if` argument is true. - -```graphql -directive @skip( - if: Boolean! -) -``` - -### Arguments - -#### `if` ([`Boolean`](../scalars/boolean.mdx)) - -Skipped when true. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/specified-by.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/specified-by.mdx deleted file mode 100644 index 79c2d7e29c9..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/directives/specified-by.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: specified-by -title: specifiedBy ---- - -Exposes a URL that specifies the behaviour of this scalar. - -```graphql -directive @specifiedBy( - url: String! -) -``` - -### Arguments - -#### `url` ([`String`](../scalars/string.mdx)) - -The URL that specifies the behaviour of this scalar. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/enums/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/enums/_category_.yml deleted file mode 100644 index 7c0ff2dbae8..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/enums/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Enums' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/enums/task-state.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/enums/task-state.mdx deleted file mode 100644 index 48da39dfb0a..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/enums/task-state.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: task-state -title: TaskState ---- - -State of the task. - -```graphql -enum TaskState { - CREATED - COMPLETED - CANCELED -} -``` - -### Values - -#### `CREATED` - -#### `COMPLETED` - -#### `CANCELED` diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/generated.md b/versioned_docs/version-1.3/apis-tools/tasklist-api/generated.md deleted file mode 100644 index c107a6578ce..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/generated.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: schema -slug: /apis-tools/tasklist-api -title: Schema Documentation -sidebar_position: 1 ---- - -This documentation has been automatically generated from the GraphQL schema. - -Use the docs in the sidebar to find out how to use the schema: - -- **Allowed operations**: Queries and mutations. -- **Schema-defined types**: Scalars, objects, enums, interfaces, unions, and input objects. - -Generated on 12/16/2021, 5:19:28 PM. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/_category_.yml deleted file mode 100644 index 81e82aa4a1e..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Inputs' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/task-query.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/task-query.mdx deleted file mode 100644 index 109a04a2732..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/task-query.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: task-query -title: TaskQuery ---- - -Task query - query to get one page of tasks. - -```graphql -type TaskQuery { - state: TaskState - assigned: Boolean - assignee: String - candidateGroup: String - pageSize: Int - taskDefinitionId: String - searchAfter: [String!] - searchAfterOrEqual: [String!] - searchBefore: [String!] - searchBeforeOrEqual: [String!] -} -``` - -### Fields - -#### `state` ([`TaskState`](../enums/task-state.mdx)) - -State of the tasks - -#### `assigned` ([`Boolean`](../scalars/boolean.mdx)) - -Are the tasks assigned? - -#### `assignee` ([`String`](../scalars/string.mdx)) - -Who is assigned to the tasks? - -#### `candidateGroup` ([`String`](../scalars/string.mdx)) - -given group is in candidate groups list - -#### `pageSize` ([`Int`](../scalars/int.mdx)) - -Size of tasks page (default: 50). - -#### `taskDefinitionId` ([`String`](../scalars/string.mdx)) - -Task definition ID - what's the BPMN flow node? - -#### `searchAfter` ([`String`](../scalars/string.mdx)) - -Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values plus same sort values. - -#### `searchAfterOrEqual` ([`String`](../scalars/string.mdx)) - -Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values. - -#### `searchBefore` ([`String`](../scalars/string.mdx)) - -Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values plus same sort values. - -#### `searchBeforeOrEqual` ([`String`](../scalars/string.mdx)) - -Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/variable-input.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/variable-input.mdx deleted file mode 100644 index 3b15fcb2814..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/inputs/variable-input.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: variable-input -title: VariableInput ---- - -Change or add a variable with name and value. - -```graphql -type VariableInput { - name: String! - value: String! -} -``` - -### Fields - -#### `name` ([`String`](../scalars/string.mdx)) - -#### `value` ([`String`](../scalars/string.mdx)) diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/interfaces/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/interfaces/_category_.yml deleted file mode 100644 index f02260045e7..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/interfaces/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Interfaces' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/_category_.yml deleted file mode 100644 index c3da91244f6..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Mutations' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/claim-task.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/claim-task.mdx deleted file mode 100644 index d0152e09be9..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/claim-task.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: claim-task -title: claimTask ---- - -Claim a task with taskId to currently logged in user. Returns the task. - -```graphql -claimTask( - taskId: String! - assignee: String -): Task! - -``` - -### Arguments - -#### `taskId` ([`String`](../scalars/string.mdx)) - -#### `assignee` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Task`](../objects/task.mdx) - -Describes the User task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/complete-task.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/complete-task.mdx deleted file mode 100644 index 5965e92f932..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/complete-task.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: complete-task -title: completeTask ---- - -Complete a task with taskId and optional variables. Returns the task. - -```graphql -completeTask( - taskId: String! - variables: [VariableInput!]! -): Task! - -``` - -### Arguments - -#### `taskId` ([`String`](../scalars/string.mdx)) - -#### `variables` ([`VariableInput`](../inputs/variable-input.mdx)) - -### Type - -#### [`Task`](../objects/task.mdx) - -Describes the User task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/delete-process-instance.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/delete-process-instance.mdx deleted file mode 100644 index f7b45e47efc..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/delete-process-instance.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: delete-process-instance -title: deleteProcessInstance ---- - -Delete process instance by given processInstanceId. Returns true if process instance could be deleted. - -```graphql -deleteProcessInstance( - processInstanceId: String! -): Boolean! - -``` - -### Arguments - -#### `processInstanceId` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Boolean`](../scalars/boolean.mdx) - -The `Boolean` scalar type represents `true` or `false`. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/unclaim-task.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/unclaim-task.mdx deleted file mode 100644 index d7510f8fe61..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/mutations/unclaim-task.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: unclaim-task -title: unclaimTask ---- - -Unclaim a task with taskId. Returns the task. - -```graphql -unclaimTask( - taskId: String! -): Task! - -``` - -### Arguments - -#### `taskId` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Task`](../objects/task.mdx) - -Describes the User task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/_category_.yml deleted file mode 100644 index 799b58c5354..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Objects' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/form.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/form.mdx deleted file mode 100644 index 22e37d3144a..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/form.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: form -title: Form ---- - -Describes task embedded form - -```graphql -type Form { - id: String! - processDefinitionId: String! - schema: String! -} -``` - -### Fields - -#### `id` ([`String`](../scalars/string.mdx)) - -The unique identifier of the embedded form within one process - -#### `processDefinitionId` ([`String`](../scalars/string.mdx)) - -Reference to process definition - -#### `schema` ([`String`](../scalars/string.mdx)) - -Form content diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/task.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/task.mdx deleted file mode 100644 index 699f2ee6632..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/task.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -id: task -title: Task ---- - -Describes the User task. - -```graphql -type Task { - id: ID! - name: String! - taskDefinitionId: String! - processName: String! - creationTime: String! - completionTime: String - assignee: String - variables: [Variable!] - taskState: TaskState! - sortValues: [String!] - isFirst: Boolean - formKey: String - processDefinitionId: String - candidateGroups: [String!] -} -``` - -### Fields - -#### `id` ([`ID`](../scalars/id.mdx)) - -The unique identifier of the task - -#### `name` ([`String`](../scalars/string.mdx)) - -Name of the task - -#### `taskDefinitionId` ([`String`](../scalars/string.mdx)) - -Task Definition ID (node BPMN id) of the process - -#### `processName` ([`String`](../scalars/string.mdx)) - -Name of the process - -#### `creationTime` ([`String`](../scalars/string.mdx)) - -When was the task created - -#### `completionTime` ([`String`](../scalars/string.mdx)) - -When was the task completed - -#### `assignee` ([`String`](../scalars/string.mdx)) - -Username/id of who is assigned to the task - -#### `variables` ([`Variable`](../objects/variable.mdx)) - -Variables associated to the task - -#### `taskState` ([`TaskState`](../enums/task-state.mdx)) - -State of the task - -#### `sortValues` ([`String`](../scalars/string.mdx)) - -Array of values to be copied into `TaskQuery` to request for next or previous page of tasks. - -#### `isFirst` ([`Boolean`](../scalars/boolean.mdx)) - -Flag to show that the task is first in current filter - -#### `formKey` ([`String`](../scalars/string.mdx)) - -Reference to the task form - -#### `processDefinitionId` ([`String`](../scalars/string.mdx)) - -Reference to process definition - -#### `candidateGroups` ([`String`](../scalars/string.mdx)) - -Candidate groups diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/user.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/user.mdx deleted file mode 100644 index 93dcd9a5096..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/user.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: user -title: User ---- - -Describes the user. - -```graphql -type User { - username: ID! - firstname: String - lastname: String - permissions: [String!] -} -``` - -### Fields - -#### `username` ([`ID`](../scalars/id.mdx)) - -#### `firstname` ([`String`](../scalars/string.mdx)) - -#### `lastname` ([`String`](../scalars/string.mdx)) - -#### `permissions` ([`String`](../scalars/string.mdx)) diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/variable.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/variable.mdx deleted file mode 100644 index 8b408cfd67c..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/objects/variable.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: variable -title: Variable ---- - -Variable used in task. - -```graphql -type Variable { - id: ID! - name: String! - value: String! - previewValue: String! - isValueTruncated: Boolean! -} -``` - -### Fields - -#### `id` ([`ID`](../scalars/id.mdx)) - -#### `name` ([`String`](../scalars/string.mdx)) - -#### `value` ([`String`](../scalars/string.mdx)) - -full variable value - -#### `previewValue` ([`String`](../scalars/string.mdx)) - -value preview (limited in size) - -#### `isValueTruncated` ([`Boolean`](../scalars/boolean.mdx)) - -shows, whether previewValue contains truncated value or full value diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/_category_.yml deleted file mode 100644 index 6b8ee7e51fc..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Queries' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/current-user.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/current-user.mdx deleted file mode 100644 index 0327ccbc911..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/current-user.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: current-user -title: currentUser ---- - -Get currently logged in user. - -```graphql -currentUser: User! - -``` - -### Type - -#### [`User`](../objects/user.mdx) - -Describes the user. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/form.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/form.mdx deleted file mode 100644 index 7be8d2d9418..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/form.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: form -title: form ---- - -Get task form by id and processDefinitionId - -```graphql -form( - id: String! - processDefinitionId: String! -): Form - -``` - -### Arguments - -#### `id` ([`String`](../scalars/string.mdx)) - -#### `processDefinitionId` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Form`](../objects/form.mdx) - -Describes task embedded form diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/task.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/task.mdx deleted file mode 100644 index c7819075237..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/task.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: task -title: task ---- - -Get one task by id. Returns task or error when task does not exist. - -```graphql -task( - id: String! -): Task! - -``` - -### Arguments - -#### `id` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Task`](../objects/task.mdx) - -Describes the User task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/tasks.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/tasks.mdx deleted file mode 100644 index b29dfa57db8..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/tasks.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: tasks -title: tasks ---- - -Get list of tasks based on `TaskQuery`. - -```graphql -tasks( - query: TaskQuery! -): [Task!]! - -``` - -### Arguments - -#### `query` ([`TaskQuery`](../inputs/task-query.mdx)) - -### Type - -#### [`Task`](../objects/task.mdx) - -Describes the User task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/variable.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/variable.mdx deleted file mode 100644 index c05ef0de9eb..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/variable.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: variable -title: variable ---- - -Get the variables by variable id - -```graphql -variable( - id: String! -): Variable! - -``` - -### Arguments - -#### `id` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Variable`](../objects/variable.mdx) - -Variable used in task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/variables.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/variables.mdx deleted file mode 100644 index 0f9e382ac51..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/queries/variables.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: variables -title: variables ---- - -Get a collection of Variables by name - -```graphql -variables( - taskId: String! - variableNames: [String!]! -): [Variable!]! - -``` - -### Arguments - -#### `taskId` ([`String`](../scalars/string.mdx)) - -#### `variableNames` ([`String`](../scalars/string.mdx)) - -### Type - -#### [`Variable`](../objects/variable.mdx) - -Variable used in task. diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/_category_.yml deleted file mode 100644 index cdb833b6e4e..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Scalars' diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/boolean.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/boolean.mdx deleted file mode 100644 index cfd55e9b0f9..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/boolean.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: boolean -title: Boolean ---- - -The `Boolean` scalar type represents `true` or `false`. - -```graphql -scalar Boolean -``` diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/id.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/id.mdx deleted file mode 100644 index 7a6918eb044..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/id.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: id -title: ID ---- - -The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as `"4"`) or integer (such as `4`) input value will be accepted as an ID. - -```graphql -scalar ID -``` diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/int.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/int.mdx deleted file mode 100644 index 05c424ede47..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/int.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: int -title: Int ---- - -The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1. - -```graphql -scalar Int -``` diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/string.mdx b/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/string.mdx deleted file mode 100644 index b93b73c1561..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/scalars/string.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: string -title: String ---- - -The `String` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text. - -```graphql -scalar String -``` diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/sidebar-schema.js b/versioned_docs/version-1.3/apis-tools/tasklist-api/sidebar-schema.js deleted file mode 100644 index 63bcbd08b80..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/sidebar-schema.js +++ /dev/null @@ -1,8 +0,0 @@ -module.exports = { - "Tasklist API (GraphQL)": [ - { - type: "autogenerated", - dirName: "apis-tools/tasklist-api", - }, - ], -}; diff --git a/versioned_docs/version-1.3/apis-tools/tasklist-api/unions/_category_.yml b/versioned_docs/version-1.3/apis-tools/tasklist-api/unions/_category_.yml deleted file mode 100644 index 96c610bb390..00000000000 --- a/versioned_docs/version-1.3/apis-tools/tasklist-api/unions/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 'Unions' diff --git a/versioned_docs/version-1.3/components/best-practices.md b/versioned_docs/version-1.3/components/best-practices.md deleted file mode 100644 index 661c513143e..00000000000 --- a/versioned_docs/version-1.3/components/best-practices.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: best-practices -title: Best Practices -description: "Similar to our Camunda Best Practices, we offer Best Practices with a mix of conceptual and practical implementation information for working with Camunda Cloud." ---- - -Similar to our [Camunda Best Practices](https://camunda.com/best-practices/_/), we offer Best Practices with a mix of conceptual and practical implementation information for working with Camunda Cloud. Currently, these are available through blog posts and our [Best Practices hub](https://camunda.com/best-practices/_/). - -Find our Camunda Cloud and BPMN-focused material below: - -## Blog posts - -* [Drafting Your Camunda Cloud Architecture: Connecting The Workflow Engine With Your World](https://blog.bernd-ruecker.com/drafting-your-camunda-cloud-architecture-connecting-the-workflow-engine-with-your-world-3d94e8d404d6) -* [Service Integration Patterns With BPMN And Camunda Cloud](https://blog.bernd-ruecker.com/service-integration-patterns-with-bpmn-and-camunda-cloud-53b0f458e49) -* [Writing Good Workers For Camunda Cloud](https://blog.bernd-ruecker.com/writing-good-workers-for-camunda-cloud-61d322cad862) - -## Camunda's Best Practices hub - -* [Naming Technically Relevant IDs](https://camunda.com/best-practices/naming-technically-relevant-ids/) -* [Naming BPMN Elements](https://camunda.com/best-practices/naming-bpmn-elements/) -* [Creating Readable Process Models](https://camunda.com/best-practices/creating-readable-process-models/) - -This page will be updated as more Best Practices are made available. diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.png b/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.png deleted file mode 100644 index dad38aa5e85..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.pptx b/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.pptx deleted file mode 100644 index 2cae051b3b5..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.png b/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.png deleted file mode 100644 index e45a1f8f91f..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.pptx b/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.pptx deleted file mode 100644 index dddb0605187..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7.md b/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7.md deleted file mode 100644 index bd509851cc0..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack-c7.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Deciding about your Camunda Platform 7 stack -tags: - - Architecture - - Stack - - Database - - Application Server - - Spring Boot - - Maven -description: "Camunda Platform 7 is very flexible and can be hooked into the architecture of your choice, giving you a number of important decisions to make." ---- - -Camunda Platform 7 is very flexible and can be hooked into the architecture of your choice, giving you a number of important decisions to make. If you don't have special architecture requirements, we recommend following the proposed greenfield stack. You can also check the decision criteria presented below to make more customized choices. Choosing the stack will have big influence on your overall architecture. - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! If you look for Camunda Cloud, please refer to [Deciding about your Camunda Cloud stack](../deciding-about-your-stack/). -::: - -## The Java greenfield stack - -The greenfield stack is pretty similar for various languages. This section described the currently a recommendation for Java developers. If you use different programming languages (like .NET or JavaScript), we recommend looking at Camunda Cloud, which supports polyglott environments better. The greenfield recommendation has recently changed. So if the recommendation below is surprising to you, you might want to check [this blog post](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -Use the following stack: - -1. Leverage the [Camunda Run](https://docs.camunda.org/manual/latest/installation/camunda-bpm-run/) distribution to run Camunda Platform 7 using the [Enterprise Edition](http://camunda.com/bpm/), preferrably [via Docker](https://docs.camunda.org/manual/latest/user-guide/camunda-bpm-run/#starting-camunda-platform-run-using-docker). - -1. Build your process solution project as a [Spring Boot](https://spring.io/projects/spring-boot) application, using the [Camunda 4 REST Client for Spring Boot](https://github.com/camunda-community-hub/camunda-engine-rest-client-java/). - -2. Use [Maven](https://maven.apache.org/) as a build tool. - -3. Use your favorite IDE, for example Visual Studio Code, IntelliJ or Eclipse. - -3. Use [Oracle JDK 15](https://www.oracle.com/technetwork/java/javase/downloads/index.html) as Java runtime. - -4. Model the processes with the [Camunda Modeler](https://camunda.org/download/modeler/). - -4. Add your process models and all Java code to the project. - -5. The default distribution leverages an H2 file-based Java database. We recommend using this for development. We *strongly discourage* multiple developers share the same database during development as this can lead to a multitude of problems. - -To run the process application *in production*, extend the stack: - -1. Use [PostgreSQL](http://www.postgresql.org/), or the database you already operate. - -2. [Secure your installation](https://docs.camunda.org/manual/latest/user-guide/security/). - -3. Run the process application by copying the `jar` file to the server and start it with `java -jar YourProcessApplication.jar`. This can also be done via Docker. - -See our [example application](https://github.com/berndruecker/camunda-platform-remote-spring-boot-example). - -### Understanding the stack's architecture - -The basic architecture with this stack is shown in the following diagram: - -![greenfield stack architecture diagram](deciding-about-your-stack-c7-assets/greenfield-architecture.png) - -### Understanding our motivation for the stack - -While we went through long and detailed discussions to come to this recommendation, it *doesn't* mean that it is necessarily superior to alternative stacks. You can still feel confident if you go down another route (see below for alternative options). But for our Best Practices, we wanted to give *exactly -one* greenfield recommendation for all our customers who have no special requirements on the stack. - -We decided on this stack for the following reasons: - -- All components are open-source and easily available. -- Camunda Run is the favorite distribution, as it focuses on external tasks, the more modern paradigm also present in Camunda Cloud. -- Spring Boot is currently the most adopted way of building Java applications. -- Spring Boot applications are easy to customize as well as easy to roll out into test and production environments, either on-premises or in the cloud. -- PostgreSQL has a great track-record for performance. - -There are severa; *advantages using the greenfield stack*: - -- *Fewer decisions:* Depending on your experience with the Java cosmos, the decisions to chose a stack might not be easy to take. So if you don't have special requirements, follow a well-known path. -- *Proven:* Many of our customers use this stack with great success. -- *More documentation & Best Practices:* You don't have to write your own extensive documentation, just point to the Camunda docs. -- *Easier support:* Asking for help gets much easier as you do not have to explain your setup in detail. - -### Considering Camunda Cloud instead - -Camunda Cloud is an alternative process automation offering that catches up on funcationality quickly. For new projects, consider using Camunda Cloud from the start. You can find [a quick comparison of concepts in the docs](/guides/migrating-from-Camunda-Platform.md#conceptual-differences). Note that architecturally, the recommended greenfield stack in this best practice is close to what you do using Camunda Cloud. - -### Getting started with the greenfield stack - -Check the **prerequisites**: - -* Install [Oracle JDK 15](https://www.oracle.com/technetwork/java/javase/downloads/index.html). -* Install [Camunda Modeler](https://camunda.org/download/modeler/). -* Install an IDE like [Eclipse](https://eclipse.org/downloads/). We recommend the latest "Eclipse IDE for Java Developers". - * Activate workspace file sync [refresh using native hooks or polling](http://stackoverflow.com/questions/4343735/avoiding-resource-is-out-of-sync-with-the-filesystem) to improve interaction of Eclipse and Camunda Modeler. - * [Add Camunda Assert to your Eclipse content assist favorites](https://github.com/camunda/camunda-bpm-platform/blob/master/test-utils/assert/README.md). - -* Check your network access to [Camunda Artifactory](https://artifacts.camunda.com/ui/) for downloading Maven Artifacts. -* As an Enterprise Customer, check that you have your company credentials at hand to log in and get enterprise versions. - -Create your **development project** - -1. Create a new Spring Boot project (e.g. using [Spring initializr](https://start.spring.io/)) -2. Add the dependency for the [Camunda Engine OpenAPI REST Client](https://github.com/camunda-community-hub/camunda-engine-rest-client-java/) community extension: - -``` - - org.camunda.community - camunda-engine-rest-client-complete-springboot-starter - 7.16.0-alpha1 - -``` - -3. Model a process with Camunda Modeler and save it under `src/main/resources`. -4. Run the main Java application class via your IDE. -5. Play around with your process using the Camunda web apps (user `demo`, password `demo`): - - [Tasklist](http://localhost:8080/camunda/app/tasklist/) - - [Cockpit](http://localhost:8080/camunda/app/cockpit/) -6. Package your application with `mvn clean install`. -7. Bring the `jar` file to your test or production server and start it there. -8. You can set up or integrate it into an existing continuous delivery pipeline. - -## Customize your stack - -### Selecting the process engine mode - - -| | Camunda Run (Remote engine) | Embedded Engine | Container-Managed Engine | -| -- | -- | -- | -- | -| | Run the engine as an isolated BPM server only, communicating with it via Web Services. | Use the process engine as a simple library within your own application, typically started via Spring Boot. | Run the engine as a service preconfigured in your Java EE container. | -| Engine Bootstrap / Lifecycle Management | Out-of-the-box | Out-of-the-box for Spring Boot, otherwise do-it-yourself (see options below) | Out-of-the-box | -| Camunda Webapps work in all use-cases | ✔ | See limitations below | ✔ | -| Camunda REST API work in all use-cases | ✔ | See options below | ✔ | -| [Multiple Process Applications can share a central engine](https://docs.camunda.org/manual/latest/user-guide/process-applications/) | ✔ | Doable with a shared database, but requires custom development and has limitations | ✔ | -| [Multiple Engines can share resources (e.g. share the Job Executor)](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#the-job-executor-and-multiple-process-engines) | | | ✔ | -| One application WAR/EAR can include the process engine | | ✔ | | -| Supports untouched ("vanilla") containers | ✔ | ✔ | | -| Runs in every Java environment | ✔ | ✔ | [On Supported Containers](https://docs.camunda.org/manual/latest/introduction/supported-environments/#container-application-server-for-runtime-components-excluding-camunda-cycle) | -| Responsibility for Engine Installation and Configuration | Operations or Application Developer | Application Developer | Operations or Application Developer | -| Application point of view on process engine | Remote Server | Library | Library | -| Possible communication types with services | Remote | Java InVM, Remote | Java InVM, Remote | -| Programming language | Polyglot (Java, NodeJs, C#, ...) | Java | Java | -| Use when | **Default**, if there is no reason against it. Especially if your architecture or applications are not Java based. | You want a single deployment including the engine. | You use a supported application server and prefer to separate engine installation from application development. | -| | [Learn More](https://docs.camunda.org/manual/latest/introduction/architecture/#standalone-remote-process-engine-server) | [Learn More](https://docs.camunda.org/manual/latest/introduction/architecture/#embedded-process-engine) | [Learn More](https://docs.camunda.org/manual/latest/introduction/architecture/#shared-container-managed-process-engine) | - -In essence, the general recommendation is: - -* Use Camunda Run whenever possible. - -* Do not use a container-managed engine. The container managed engine allows to separate installation and configuration of the engine from the application development. This is an advantage if you really separate these roles within your organization. However, we experienced that this causes trouble more often than it does help. Developers most often are still responsible to install the engine, but might not be able to access the application server itself. That also explains the rise of Spring Boot (often alongside with Docker) and many projects successfully moved to that approach instead. Unless you have good reasons, we would not recommend starting new projects using a container-managed engine. - -* Use an embedded engine via Spring Boot if you need to provide one combined deployment artifact. - -### Understanding embedded engine specifics - -If you want to use an embedded engine (which is not the default recommendation; see above,) the following information will help you use it correctly. - -#### Using Spring Boot - -The Camunda Spring Boot Starter is a clean way of controlling the embedded engine easily, so you don't have to think about the specifics mentioned below in this section. This makes Spring Boot a good choice for Camunda projects. - -#### Bootstrapping the engine and managing its lifecycle - -When running the engine in embedded mode, you have to control the *lifecycle* of the engine yourself, basically *starting up* and *shutting down* the engine, and providing access to the API whenever a client needs it. You have several options to do that. - -| | Spring Boot | Spring Application Context | `processes.xml` | Programmatic | -| -- | -- | -- | --| --| -| | Configure, start, and stop the engine via Spring Boot Starter | Configure, start, and stop the engine via Spring Beans defined in your Application Context. | Configure, start, and stop the engine via Camunda’s processes.xml descriptor and a ProcessApplication class. | Configure, start, and stop the engine yourself programmatically by using Java code. | -| Use when | You target Spring Boot as runtime environment. | You already use Spring. | You do not want to introduce a Spring dependency just for Camunda. | You need full control over the engine or want to do advanced customizations. | -| Unlimited Configuration Options | ✔ | ✔ | | ✔ | -| Development Effort | Low | Medium | Low | High | | - -#### Providing a REST API - -When running an embedded engine, it might be harder to deploy the pre-built REST API. - -| | Use Spring Boot Starter for REST API | Embed Camunda’s REST API | Use Camunda’s Standalone Web App REST API | -| -- | -- | -- | -- | -| | The Spring Boot Starter allows to run the REST API as well as the Camunda web applications. | Provide Camunda’s REST API by embedding its JAX-RS code into your application. | Deploy Camunda’s "Standalone" Web Application (which runs its own engine) and use its REST API. | -| No Classloading Restrictions | ✔ | ✔ | | -| Development Effort | Low | High | Low | | - -#### Providing Camunda web applications (Tasklist, Cockpit) - -When running an embedded engine, you may want to use a Camunda web application like Tasklist and Cockpit, but have to decide how exactly to run these web applications in your environment. - -| | Use Spring Boot Starter for Camunda Web Applications | Camunda "Standalone" Web Application | Embedded Camunda Web Applications | -| -- | -- | -- | -- | -| | The Spring Boot Starter allows you to run the REST API as well as the Camunda web applications. | Deploy Camunda’s "Standalone" Web Application, which is a WAR running its own engine, and point it to your applications engine database. | Embed the Camunda Web Applications into your own application, which is not a particularly easy task to do. | -| Classloading Restrictions | None | For example, you can not submit a task in Tasklist when a following synchronously called service uses a class contained in your own application. However, you can solve this by adding additional [safe points](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/). | None | -| Development Effort | Low | Low | High (undocumented) | -| | [Spring Boot Starter](https://github.com/camunda/camunda-bpm-platform/tree/master/spring-boot-starter/) | [Download Standalone Web Application](http://camunda.org/download/) | [Implement e.g. via Maven WAR Overlays](https://maven.apache.org/plugins/maven-war-plugin/overlays.html) | - -### Choosing a database - -Camunda Platform 7 requires a *relational database* for persistence. Even if the persistence provider is in theory pluggable and can be exchanged by e.g. some *NoSQL* persistence this is neither recommended nor supported. Therefore, if you have use cases for this, discuss them with Camunda beforehand! - -| | PostgreSQL | Oracle | H2 | Other databases | -| -- | -- | -- | -- | -- | -| | PostgreSQL is an open-source, object-relational database system. | Oracle Database is a commercial object-relational database system. | H2 is a Java SQL database with in-memory mode and a small footprint. | | -| Best Performance Observations | ✔ | ✔ | | | -| In-Memory Mode | | | ✔ | | -| No installation required | | | ✔ | | -| Recommended for unit testing | | | ✔ | -| Recommended for production use | ✔ | ✔ | | ✔ ([if supported](https://docs.camunda.org/manual/latest/introduction/supported-environments/#databases)) | -| | [Learn More](http://www.postgresql.org/) | [Learn More](https://www.oracle.com/database) | [Learn More](http://www.h2database.com/) | [Supported Databases](https://docs.camunda.org/manual/latest/introduction/supported-environments/#databases) | - -Ideally, use the database your organization already operates and your team is experienced with! - -### Modeling for executable processes - -We distinguish two different roles modeling in BPM projects: - -* *Process Developers* develop an executable process implementation. Process developers implementing solutions with Camunda must use Camunda Modeler to model executable processes, edit technical attributes, and manage and version (e.g. in Git or SVN) the resulting (XML) files as part of the development project. - -* *Process Analysts* capture the operational know how about a process. For this part of the work, it is possible to use a different tool than Camunda Modeler. - -| | Camunda Modeler | Third-Party Modeler (BPMN Standard Compliant) | Third-Party Modeler (Non-Compliant to Standard) | -| -- | -- | -- | -- | -| Roundtrip in between process analysts and developers possible | ✔ | ✔ (Carefully check level of BPMN compliance - the [Model Interchange Working Group](http://bpmn-miwg.github.io/bpmn-miwg-tools/) can serve as a first starting point | | -| Use for Process Analysts | ✔ | ✔ | | -| Use for Process Developers | ✔ | | | -| Use when | You do not have a BPMN standard compliant modeling tool already rolled out. | You already rolled out a BPMN tool with a standard compliancy sufficient for roundtrip. | Try to avoid | -| | [Download](https://camunda.org/download/modeler/) | [e.g. Cawemo](http://cawemo.com/) | | \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack.md b/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack.md deleted file mode 100644 index b6afa0df422..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/architecture/deciding-about-your-stack.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Deciding about your stack -tags: - - Architecture - - Stack - - Database - - Application Server - - Spring Boot - - Maven ---- - -If you don't have special architecture requirements, we recommend using SaaS following the proposed greenfield stack. - -:::caution Camunda Cloud -This best practice targets Camunda Cloud only! If you look for Camunda Platform 7, please refer to [Deciding about your Camunda Platform 7 stack](../deciding-about-your-stack-c7/). -::: - -## The greenfield stack - -We like to give one greenfield stack recommendation, which is the stack you can simply use if there is no reason against it. And while we went through long and detailed discussions to come to this recommendation, it *doesn't* mean that it is necessarily superior to alternative stacks. You can still feel confident if you go down another route (see below for alternative options). - -The stack looks pretty similar in various programming language. Please use the programnming language your team is most familiar with. If in doubt, use Java or JavaScript. - -### The Java greenfield stack - -![greenfield stack architecture diagram](deciding-about-your-stack-assets/greenfield-architecture.png) - -Use the following stack: - -1. Use [Camunda Cloud SaaS](https://accounts.cloud.camunda.io/signup) and create a cluster there - -2. Build your process solution project as a [Spring Boot](https://spring.io/projects/spring-boot) application, using the [Spring Zeebe](../../../apis-tools/community-clients/spring.md). - -3. Use [Maven](https://maven.apache.org/) as a build tool. - -4. Use your favorite IDE, for example Visual Studio Code, IntelliJ or Eclipse. - -5. Use [Open JDK 17](https://jdk.java.net/17/) as Java runtime. - -6. Model the processes with the [Camunda Modeler](https://camunda.org/download/modeler/). - -7. Add your process models and all Java code to the project. - -To run the process application *in production*: - -3. Run the process application by copying the `jar` file to the server and start it with `java -jar YourProcessApplication.jar`. This is most often done via Docker. - -See our [example application](https://github.com/camunda-community-hub/camunda-cloud-examples/tree/main/twitter-review-java-springboot). - - -We decided on this stack for the following reasons: - -- All components are open-source and easily available. -- SaaS is the easiest way to consume capabilities like a workflow engine. -- Spring Boot is currently the most adopted way of building Java applications. -- Spring Boot applications are easy to customize as well as easy to roll out into test and production environments, either on-premises or in the cloud. - -You might want to follow the [get started guide for microservices orchestration](/guides/getting-started-orchestrate-microservices.md) or follow the instrucstions in [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) to get going - - -## Polyglot stacks - -You can develop process solutions as decribed with Java above also in any other programming language. Simply use the [existing language clients / SDKs](/docs/apis-tools/working-with-apis-tools/) for doing this. - - - -## Customize your stack - -### Running Camunda Cloud self-managed - -You can also run Camunda Cloud self-managed on your own Kubernetes cluster. Details can be found in the [docs](/docs/self-managed/about-self-managed). - -While there [exists a Docker Compose configuration](/self-managed/zeebe-deployment/docker/install.md) to run Camunda Cload locally, this is not meant to be used for production, but rather to quickly startup compenents on a developer machine to be able to play around. - - -### Modeling for executable processes - -We distinguish two different roles modeling in BPM projects: - -* *Process Developers* develop an executable process implementation. Process developers implementing solutions with Camunda must use Camunda Modeler to model executable processes, edit technical attributes, and manage and version (e.g. in Git or SVN) the resulting (XML) files as part of the development project. - -* *Process Analysts* capture the operational know how about a process. For this part of the work, it is possible to use a different tool than Camunda Modeler. - -| | Camunda Modeler | Third-Party Modeler (BPMN Standard Compliant) | Third-Party Modeler (Non-Compliant to Standard) | -| -- | -- | -- | -- | -| Roundtrip in between process analysts and developers possible | ✔ | ✔ (Carefully check level of BPMN compliance - the [Model Interchange Working Group](http://bpmn-miwg.github.io/bpmn-miwg-tools/) can serve as a first starting point | | -| Use for Process Analysts | ✔ | ✔ | | -| Use for Process Developers | ✔ | | | -| Use when | You do not have a BPMN standard compliant modeling tool already rolled out. | You already rolled out a BPMN tool with a standard compliancy sufficient for roundtrip. | Try to avoid | -| | [Download](https://camunda.org/download/modeler/) | [e.g. Cawemo](http://cawemo.com/) | | \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/assign.png b/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/assign.png deleted file mode 100644 index f71baf739ae..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/assign.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/follow-up-filter.png b/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/follow-up-filter.png deleted file mode 100644 index 083e81a929f..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/follow-up-filter.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/process-variables.png b/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/process-variables.png deleted file mode 100644 index c5acec7d821..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/process-variables.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/processinstanceinfo.png b/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/processinstanceinfo.png deleted file mode 100644 index 7cf9c019fce..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/processinstanceinfo.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/task-lifecycle.png b/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/task-lifecycle.png deleted file mode 100644 index b97f021ce62..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7-assets/task-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7.md b/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7.md deleted file mode 100644 index ccad9b30068..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/architecture/extending-human-task-management-c7.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: Extending human task management in Camunda Platform 7 -tags: - - Human Task - - Delegation - - Escalation - - E-Mail Notification - - 4-Eyes-Principle - - Overdue Task - ---- - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only and is an appendum to [understanding human task management](../understanding-human-tasks-management/). -::: - -## The Camunda Platform 7 task lifecyle - -Do not show the *lifecycle* of user *tasks* in the process model, they are generic and common to all processes and so can be controlled by using the [Camunda BPM task lifecycle](https://docs.camunda.org/manual/latest/webapps/tasklist/task-lifecycle/) features. - -![Task lifecycle](extending-human-task-management-c7-assets/task-lifecycle.png) - -* Create: New tasks are normally created as part of *process execution*, but can be created by an *user action*, too (as standalone tasks). `taskService.newTask()` -* Set Candidate: Typically candidates are initially set to *groups* of people as part of *process execution*, but can be requested by API, too. `taskService.addCandidateGroup(taskId, groupId)` -* Claim: Individual members of a candidate group *assign themselves* to tasks when working on them.`taskService.claim(taskId, userId)` -* Unclaim: Individual assignees *unassign themselves* and move a task back to the candidates.`taskService.claim(taskId, null)` -* Assign: Directly assign a specific individual either as part of *process execution*, or because explicitly requested by API. `taskService.setAssignee(taskId, userId)` -* Reassign: Individual assignees may want to *hand over* a task to somebody else. `taskService.setAssignee(taskId, userId)` -* Delegate: Individual assignees may want to delegate (part of) the work: ask somebody else to *resolve (part of) the work* in order to pass the task back subsequently. `taskService.delegateTask(String taskId, String userId)` -* Resolve: After having resolved the requested work individual assignees will want to *pass a delegated task back to the owner*: the original assignee. `taskService.resolveTask(String taskId)` -* Complete: This is how you would *close the work on a task* and asking the process execution to move on `taskService.complete(String taskId, String userId)` - -## Typical use cases - -### Handing over tasks directly to other people - -You can always hand over a task assigned to you simply by *changing the assignee*. This means that the new assignee is now responsible and supposed to carry out the task all by themselves. - -```java -taskService.setAssignee(taskId, "kermit"); -``` - -This can also be achieved via the Camunda tasklist: - -![Task assignment](extending-human-task-management-c7-assets/assign.png) - -### Delegating tasks to other people - -Delegate a task assigned to you by using Camunda "delegateTask". This means that somebody else is supposed to resolve (some of) the work and then pass the task back to you by resolving it. The original assignee is remembered as the "owner" of the task. A typical example is decision support: Some other employees collect information in order to prepare a decision, but the original assignee has to take that decision. - -Even if the engine does not enforce that a delegated task can be directly completed, we recommend that you not allow this if you use delegation. The task should always be resolved and then later completed by the owner. That's why there is no transition from "DELEGATED" to "COMPLETED" in the lifecycle shown. - -```java -taskService.delegateTask(taskId, "gonzo"); -// and later -taskService.resolveTask(taskId); -``` - -### Notifying people about their tasks - -You might want to notify people about new tasks (e.g. via email). Do this by implementing a Camunda TaskListener, like shown in [this example](https://github.com/camunda/camunda-bpm-examples/tree/master/usertask/task-assignment-email). - -When you want to have this functionality for every user task you can use a ParseListener which adds it *everywhere*, so you don't have to adjust the BPMN model. See [BPMN Parse Listener](https://github.com/camunda/camunda-bpm-examples/tree/master/process-engine-plugin/bpmn-parse-listener) to see how this can be done. - -### Following up on tasks after some time - -Follow up on tasks after some definable time by using Camunda's [Follow Up Date](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/user-task/#follow-up-date) field and use it in connection with filtering tasks. - -You can set a calculated follow-up date by using a JUEL expression in your BPMN file - -```xml - -``` - -You can set a follow-up date, that may be requested by the user, using the Java API - -```java -task.setFollowUpDate(myDate); -``` - -Now you can use a task *filter* with criteria checking the follow-up date and if it is due. This can be leveraged via API or in the Camunda Tasklist. - -![Follow up filter](extending-human-task-management-c7-assets/follow-up-filter.png) - -### Enforcing deadlines for tasks - -There are different ways of enforcing deadlines for Human Tasks. Typical actions for overdue tasks are: - -- Sending reminder mails -- Changing the assignee/group -- Creating a standalone task for a manager - -| | Explicit modeling in BPMN | Filtering due tasks | Querying due tasks and take action | Timeout task event | -| - | - | - | - | - | -| | Showing an escalation process path in your BPMN model (see example below) | Setting the Due Date field to easily filter for overdue tasks | Setting the Due Date field, querying it on a regular basis and take arbitrary actions | Implement a timeout event listener and configure it in the process model | -| | Explicit| Implicit | Implicit | Implicit | -| Bulk actions possible (e.g. one mail with a list of all due tasks) | | | yes || -| No custom component required | yes | yes | Querying has to be done by external trigger or BPMN process | yes | -| Use when | The escalation is business relevant and has to be visible in the process model | Overdue tasks can be easily monitored via tasklist application, actions are taken manually | Sophisticated, automated actions should take place | A timely escalation mechanism is desired | -| Don’t use when…​ | Each and every User Task has a due date and explicit modeling would clutter your process model | You need an action to be executed automatically | You do not want to run your own scheduling infrastructure| The escalation should be visible in the process model | - -#### Modeling an escalation - -The following example shows how to explicitly model an escalation: - -
    - -1 - -The model shows an explicit escalation process path: if the tweet does not get reviewed within an hour, the boss needs to be reminded about the laws of the internet age. - -#### Filtering by due date - -This example shows how you can calculate and set the [Due Date](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/user-task/#due-date) field: - -```xml - -``` - -You can easily query for overdue tasks via API, e.g. all overdue tasks: - -```java -processEngine.getTaskService().createTaskQuery().dueBefore(new Date()).list(); -``` - -#### Model timeout task event - -This example shows how to model a timeout event on a user task: - -```xml - - - - - R/PT1H - - - - -``` - -Every hour, the process engine will invoke the `sendEmailReminderListener` bean to send out an email. The bean can access all task and process attributes. - -## Enhancing task lists with business data - -Allow users to filter their tasks by relevant business data. Display this data right inside the task list and not just when selecting a task form. To achieve this with acceptable performance, select the implementation approach appropriate for your needs. - -### Selecting an implementation approach - -To enhance your tasklist with business data, select the implementation approach appropriate for your needs. - -| | Camunda Process Variables | Camunda Native Query API | Custom MyBatis Mapping | Custom Process or Task "InfoEntity" | -| - | - | ---- | --- | ------------ | -| | Use simple process or task variables to store business data with tasks, often as an additional copy. | Use a native query to enhance query performance when retrieving tasks filtered by business data. | Use a custom database mapping to speed up retrieval of task data combined with business data. | Use a custom database entity to store business data optimized for search and display. | -| Filter with Business Data as Simple Process Variables | yes | yes | yes| yes | -| Filter with Business Data in Domain Database | | yes | yes| yes | -| Display Business Data from Domain Database | (only via "copy as process variable") | (only via "copy as process variable") | yes| yes | -| Development Effort | out-of-the-box| low | high | high | -| No Dependency on Camunda Internals | yes | (take care not to use hard coded table names) | (take care not to use hard coded table names) | yes | -| Required Know-How | | SQL | SQL, Apache MyBatis, Advanced Camunda | Depends (e.g. JPA or JDBC) | -| Scaling / Performance | Limited (~ 5-10 criteria) | Medium (dep. on use case) | Medium (dep. on use case) | High (customized) | -| Out-of-the-box usage with Camunda Tasklist | yes | | | | - -### Using Camunda process/task variables - -Using plain and simple process or task variables to store business data has the big *advantage* that -you can use the out-of-the-box mechanisms. Plain and simple means to only use primary data types (e.g. String, Long, ...). Especially when using *Camunda Tasklist* you can easily use process/task variables to - -- *Show* custom business data right inside the list, or -- Use such variables for defining re-usable *filters* which narrow down your Tasklist items to the ones matching: - -![Process variables](extending-human-task-management-c7-assets/process-variables.png) - -#### Including derived or calculated values - -In case you need *dynamically calculated values* or specific *fields derived from complex datatypes/objects*, you can achieve this by - -- using task variables as a kind of *caching* mechanism, -- being filled by "calculating" the values using *expression language* -- e.g. by means of an *I/O Mapping* of a User Task: - - -```xml - - - - ${invoice.calculateSum()} - ${invoice.creditorId} - - - -``` - -1 - -The total sum of the payment is calculated by calling a method on an invoice object and cached for search and display purposes. - -3 - -The creditorId is copied into an own variable, so it can be used in filters or shown in the tasklist. - -The *disadvantage* of using process or task variables is that this mechanism does *not* scale very well, as the process variables are stored in the generic Camunda database schema. This requires one row in the variable table for each variable, and all of them must be joined with the process instance table. The real limit is determined by the amount of data and the database used - but typically you cannot use more than 10 variables. - -#### Using a special search variable - -If you need variables only to search for tasks (but not to display attributes in the tasklist) you can use a simple workaround: Introduce *one single process variable optimized for tasklist queries*. Extract the attributes you need to filter your tasklist with and combine them to a single search string prepared to work with a SQL 'LIKE' query: - -| Variable | Type | Value | -| ------------- | -------- | ------- | -| customerId | (Long) | 4711 | -| customerName | (String) | camunda | -| customerPlace | (String) | Berlin | -| searchString | (String) | customerId=4711#customerName=camunda#customerPlace=Berlin | - -When defining your Camunda tasklist filter, use the searchString variable and search in it by means of a 'LIKE' query. - -### Using the Camunda native query API - -When you need to filter your tasks by business data stored in your own tables, leverage the possibility to create *native queries* via the Camunda *Java API*. Native Queries are - -- expressed in *SQL* which is not limited to the Camunda Tables. However -- the result is still *mapped to the Camunda Task entity*, so you do not have to dive into Apache MyBatis (the persistence framework used within Camunda). - -This means you *cannot* load data from your domain objects by native queries, you simply can express arbitrary WHERE clauses. Example: - -```java -List tasks = taskService.createNativeTaskQuery() - .sql("SELECT * FROM #{taskTable} T" - + "LEFT OUTER JOIN (select * from #{variablesTable} where NAME_= 'customerId') VAR_CUSTOMER" - + " ON VAR_CUSTOMER.EXECUTION_ID_ = T.EXECUTION_ID_" - + "LEFT OUTER JOIN CUSTOMER " // <1> - + " ON CUSTOMER.ID_ = VAR_CUSTOMER.LONG_" - + "WHERE CUSTOMER.COMPANY = #{companyName}") - .parameter("companyName", "camunda") - .parameter("taskTable", managementService.getTableName(Task.class)) // <2> - .parameter("variablesTable", managementService.getTableName(VariableInstance.class)) - .list(); -``` - -1 - -Using native queries allows you to directly join Camunda tables with custom Business Data Tables (held in the same database) while still retrieving `Task.class` typed result sets. - -2 - -Make sure that you do not use hard coded table names to be less dependent on Camunda Internals. However, please note that the example still uses internal details, e.g. by using column names. Your queries or table/column name mappings would need to be adapted in case these internal details change. - - -### Implementing a custom mybatis mapping - -In case you want to not just filter tasklists for business data, but also load custom data from domain objects in one query you can implement your own *MyBatis* mapping and call it via *custom code*. - -Even if this is a very powerful mechanism, we normally do not recommend it, as you need to understand quite a bit about MyBatis. It will be hard to completely avoid dependencies on the Camunda database schema. The database schema is considered internal, hence this also might impose additional maintenance effort in your project for new Camunda versions. - -### Implementing a custom *process/task info entity* - -For maximal flexibility (and best performance possibilities), create a custom ProcessInstanceEntity and/or TaskEntity designed to filter tasklists and display business data. - -Prefer a ProcessInstanceEntity over a TaskEntity as long as the business data you need is quite similar in between the different user tasks of a process definition. This way you avoid unnecessary database operations. If this is not the case you need to go for the TaskEntity as shown in the following example. - -![Process Instance Info](extending-human-task-management-c7-assets/processinstanceinfo.png) - -In this entity, combine the Camunda `task.id` with all your business attributes as separate columns. This allows to query for and display tasks without or with a minimum of SQL JOINs. Consider to use your entity now as a single source for displaying tasklists to your users - hence circumventing the Camunda TaskService Query API for that purpose completely. - -Using this approach requires to synchronize your entity with the Camunda state. - -If you target a *TaskInfoEntity*: - -- Create it via a *TaskListener* -- Delete it via a Tasklistener - -If you target a *ProcessInstanceInfoEntity*: - -- Create a new instance by an *ExecutionListener* on the process instance start event. The process instance id might not yet be known at this time. So either you create your own id and set it as a process variable (to SQL "join" on this later), or you can add a safe point before the listener triggers to make sure the process instance was committed to the database. - -- Decide when you have to update information in the entity, this depends on various factors (like amount of data, frequency of changes, way of changing data, ...). diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment-assets/Sample Calculation for Sizing Your C8 Environment Best Practice.xlsx b/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment-assets/Sample Calculation for Sizing Your C8 Environment Best Practice.xlsx deleted file mode 100644 index 3bf8c90aafc..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment-assets/Sample Calculation for Sizing Your C8 Environment Best Practice.xlsx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment-c7.md b/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment-c7.md deleted file mode 100644 index 0b8d1fcf51c..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment-c7.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -id: sizing-your-environment-c7 -title: Sizing Your Camunda Platform 7 Environment -tags: - - Database - - Performance - - Hardware - - Sizing ---- - -Size your environment for Camunda Platform 7 appropriately, including sufficient hardware and database space. - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! If you are using Camunda Cloud, visit [Sizing your Camunda Cloud Environment](../sizing-your-environment/). -::: - -## Understanding the influencing factors - -You do not need big hardware to run Camunda. The hardware requirements are basically determined by two things: - -1. The container/application server you want to use (see [deciding about your Camunda Platform 7 stack](../deciding-about-your-stack-c7/). -2. Things you do in [Delegation Code](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/) like service tasks. For example, when calling SOAP WebServices or when doing complex calculations in Java, more CPU time is consumed within the delegation code (your code) than in Camunda. - -The only way to get reliable figures for your project and environment is to do load testing on a close-to-production environment. We recommend doing this if in doubt. Steering the REST API via load generator tools like JMeter is relatively easy. - -From the Camunda perspective, there are a number of aspects to look at: - -- **Average duration between process start**: This determines the overall load on the system. We typically try to calculate how many new process instances per second. If you have a new process instance every couple of seconds or minutes (or even hours), you don't have to think about sizing. If you have **more than 100 process instances per second**, choose hardware wisely. As an example, we could run a benchmark on a normal developer notebook (Intel i5 4 Cores @2.5 Ghz, 8 GB RAM, SSD HD) that started around 100 to 500 process instances per second (see [Benchmarking Performance of Camunda Process Engine](http://blog.camunda.org/2014/01/benchmarking-camunda-process-engine.html) for details). - -- **Average process instance cycle time**: With the average cycle time of a process instance, you can calculate how many active process instances you typically have in the runtime database at the same time. For example, when starting one process instance per hour with a typical duration of two weeks, you have 2 weeks \* 7 days \* 24 hours \* 1 process instance/hour = 336 active process instances at any time. While this does not create CPU load for the engine, it influences database behavior like query execution time, index size, or index write performance. - -- **Wait states**: In some cases, process instances run through in one go, without stopping at any [wait state](http://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states). In these cases, process instances are never written to the runtime database, which decreases load dramatically. - -- **Number of concurrent clients**: This determines how many queries are fired against the database in parallel. It can also influence the sizing of thread pools. - -- **Typical queries**: Database performance and load is not a big issue if you only load process instances or tasks by `id` or `business key`, as both have an index. In contrast, querying for process instances or tasks by a combination of different process variables (e.g. to correlate by business data), has a severe impact on database load and performance. Especially in high load scenarios, think about the most common queries you will have. - -- **History level**: The configured [history level](http://docs.camunda.org/manual/latest/user-guide/process-engine/history/#set-the-history-level) determines how much history data is written and how much database disk space is required. - -## Determining hardware requirements - -### Performance & scalability - -We normally do not hit limits in scalability of Camunda. Due to the small footprint, the engine can run with extreme efficiency. All state is persisted in the database, so you can always add new process engine instances (e.g. cluster nodes) to speed up execution. - -The natural limit for this kind of architecture is the database. More scalability can be achieved using [Camunda Cloud](https://camunda.com/products/cloud/). - -### High availability - -We recommend running two machines for high availability. They do not have to form a proper cluster in terms of an application server cluster, just set up two identical nodes pointing to the same database. - -### Virtualization - -You can run Camunda on virtualized systems. The license is not bound to CPU cores, making this very easy from a licensing perspective as well. - -### Hardware - -We do not give concrete configuration recommendations. We recommend "server classes": - -- **Small**: Whatever you typically run as a small server (e.g. 1-2 CPU, 1-8 GB RAM). -- **Medium**: Whatever you typically run as a medium server (e.g. 2-4 CPU, 4-16 GB RAM). -- **Large**: Whatever you typically run as a large server (e.g. 4-64 CPU, 16-128 GB RAM). - -:::note -In most projects, small servers are sufficient. -::: - -Consider a medium server if: - -- You start more than 100 process instances per second. -- You have CPU intense delegation code. -- Your code/deployment has additional requirements. - -### Disk space - -Depending on the container, you need around 500 MB—1 GB of disk space. We recommend at least 2 GB to store enough logs in case you experience any problems. - -## Determining database requirements - -### Chose a good database - -As mentioned in [deciding about your Camunda Platform 7 stack](../deciding-about-your-stack-c7/), we recommend Oracle or PostgreSQL. Together with DB2, we made the best performance observations there. - -Note that H2 is seldom used in production, and we do not have much experience with heavy load on this database ([H2 FAQ: Is it Reliable?](http://www.h2database.com/html/faq.html#reliable)). - -### Required database size - -The amount of space required on the database depends on the following: - -- [History level](http://docs.camunda.org/manual/latest/user-guide/process-engine/history/#set-the-history-level): Turning off history saves huge amounts of table space, as you only have to keep current runtime data in the database. Normally, you keep it to `FULL` to leverage audit logging capabilities of the process engine. -- [Process Variables](https://docs.camunda.org/manual/latest/user-guide/process-engine/variables/): All process variables need to be written to the database (in a serialized form, e.g. JSON). With the history level `FULL`, an entry is inserted into history tables every time a variable is changed, remembering the old value. With big data objects stored and often changed, this requires a lot of space. - -When calculating database size, you should also clarify if and how often you will be cleaning up your historical data, likely using the [history cleanup feature](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup). - -The real space occupied within your database depends very much on your database product and configuration. There is no easy formula to calculate this space. Instead, this section gives an example: - -
    - -1 - -25% of the instances will be reviewed. - -2 - -10% of the instances will be ended after review. - -To gain some numbers, we were running the [invoice example](https://github.com/camunda/camunda-bpm-platform/blob/master/examples/invoice/src/main/resources/) with the statistical distributions mentioned above in the following scenario: - -- History level `FULL` -- Starting 40,000 process instances (PIs) and let 33,000 PIs complete (deleted from runtime). The remaining 7,000 PIs are still active. -- Using an Oracle 12c Enterprise Edition (12.1.0.1.0, 64bit Production) installation on Linux. - -This gave us the following results: - -| - | Number of PIs | Disk space | Calculated disk space per PI | Remarks | -| -- | -- | -- | -- | -- | -| Runtime | 6.989 | 28,375 MB | 4,157 KB | Around half of the space is used for indices. | -| History | 39.953 | 766,375 MB | 19,642 KB | Space requirements massively influenced by history level. | -| Sum | - | 794,75 MB | - | - | - -As a rule of thumb, capture the following figures and use the example above to make an informed "guess": - -- Number of process instances per day -- Average number of executed tasks per process instance -- Sum of size of variables per process instance -- Average number of updates per variable - -### Example calculation - -This is an example calculation from a real-life scenario. - -Given: - -- Estimated PI / month: 300,000 -- Concurrent users: 450 - -Assumptions for calculation: - -- Load is equally distributed on 20 working days (more realistic than 30 days, you can even add more buffer). -- Load is equally distributed on 8 working hours (more realistic than 24 hours, you can even add more buffer). -- The process consists of mostly user tasks and almost no service tasks. -- On average, a process instance takes around two days to complete. - -Calculation: - -- 15.000 new PI / day -- 1.875 new PI / hour -- 31 new PI / minute -- ~ new PI every 2 seconds - -In this case, a "small server" is sufficient. diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment.md b/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment.md deleted file mode 100644 index 14e37653e59..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/architecture/sizing-your-environment.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -id: sizing-your-environment -title: Sizing your environment -tags: - - Database - - Performance - - Hardware - - Sizing ---- - -In order to define and size your environment for Camunda Cloud appropriately, you need to understand the factors that influence hardware requirements. Then you can apply this knowledge to select the appropriate Camunda Cloud SaaS hardware package or size your self-managed Kubernetes cluster. - -:::caution Camunda Cloud only -This best practice targets Camunda Cloud only! If you are looking at Camunda Plaform, please visit [Sizing your Camunda Platform 7 environment](../sizing-your-environment-c7/). -::: - - -## Understanding influencing factors - -Let's understand the important numbers. - -### Throughput - -Throughput defines, how many process instances can be executed in a certain timeframe. - -It is typically easy to estimate the number of **process instances per day** you need to execute. If you only know the number of process instances per year, we recommend to divide this number by the 250 (average number of working days in a year). - -But the hardware sizing depends more on the **number of BPMN tasks** in a process model. For example, you will have a much higher throughput for processes with one service task than for processes with 30 service tasks. - -If you already know your future process model, you can use this to count the number of tasks for your process. For example, the following onboarding process contains five service tasks in a typical execution. - -
    - -If you don't yet know the number of service tasks, we recommend to assume 10 service tasks as a rule of thumb. - -The number of tasks per process allows you to calculate the required number of **tasks per day (tasks/day)** which can also be converted into **tasks per second (tasks/s)** (devide by 24 hours \* 60 minutes \* 60 seconds). - - -**Example:** - -| Indicator | Number | Calculation method | Comment | -| :- |-: | :-: | :- | -| Onboarding instances per year | 5,000,000 | | Business input | -| Process instances per business day | 20,000 | / 250 | average number of working days in a year | -| Tasks per day | 4,000 | / 5 | Tasks in the process model as counted above | -| Tasks per second | 0.05 | / (24\*60\*60) | Seconds per day | - -In most cases, we define throughput per day, as this time frame is easier to understand. But in high-performance use cases you might need to define the throughput per second. - - -### Peak loads - -In most scenarios, your load will be volatile and not constant. For example, your company might start 90% of their monthly process instances in the same day of the month. The **ability to handle those peaks is the more crucial requirement and should drive your decision** instead of looking at the average load. - -In the above example, that one day with the peak load defines your overall throughput requirements. - -Sometimes, looking at peaks might also mean, that you are not looking at all 24 hours of a day, but only 8 business hours, or probably the busiest 2 hours of a day, depending on your typical workload. - - -### Latency and cycle time - -In some use cases, the cycle time of a process (or sometimes even the cycle time of single tasks) matter. For example, you want to provide a REST endpoint, that starts a process instance to calculate a score for a customer. This process needs to execute four service tasks, but the REST request should return a response synchronously, no later than 250 milliseconds after the request. - -While the cycle time of service tasks depends very much on what you do in these tasks, the overhead of the workflow engine itself can be measured. In an experiment with Camunda Cloud 1.2.4, running all worker code in in the same GCP zone as Camunda Cloud, we measured around 10ms processing time per process node and approximately 50 ms latency to process service tasks in remote workers. Hence, to execute 4 service tasks results in 240 ms workflow engine overhead. - -The closer you push throughput to the limits, the more latency you will get. This is basically, because the different requests compete for hardware resources, especially disk write operations. As a consequence, whenever cycle time and latency matters to you, you should plan for hardware buffer to not utilize your cluster too much. This makes sure, your latency does not go up because of resource contention. A good rule of thumb is to multiply your average load by 20. This means, you cannot only accomodate unexpected peak loads, but also have more free resources on average, keeping latency down. - -| Indicator | Number | Calculation method | Comment | -| :- |-: | :-: | :- | -| Onboarding instances per year | 5,000,000 | | Business input, but irrelevant | -| Expected process instances on peak day | 150,000 | | Business input | -| Tasks per second within business hours on peak day | 5.20 | / (8\*60\*60) | Only looking at seconds of the 8 business hours of a day | -| Tasks per second including buffer | 104.16 | \* 20 | Adding some buffer is recommended in critical high-performance or low-latency use cases | - - -### Disk space - -The workflow engine itself will store data along every process instance, especially to keep the current state persistent. This is unavoidable. In case there are human tasks, data is also sent to Tasklist and kept there, until tasks are completed. - -Furthermore, data is also sent Operate and Optimize, which store data in Elasticsearch. These tools keep historical audit data for some time. The total amount of disk space can be reduced by using **data retention settings**. We typically delete data in Operate after 30 to 90 days, but keep it in Optimize for a longer period of time to allow more analysis. A good rule of thumb is something between 6 and 18 months. - -The data you attach to a process instance (process variables) will influence disk space requirements. For example, it makes a big difference if you only add one or two strings (requiring ~ 1kb of space) to your process instances, or a full JSON document containing 1MB. - -Assuming a [typical payload of 15 process variables (simple strings, numbers or booleans)](https://github.com/camunda/camunda/blob/1.3.14/benchmarks/project/src/main/resources/bpmn/typical_payload.json) we measured the following approximations for disk space requirements using Camunda Cloud SaaS 1.2.4. Please note, that these are not exact numbers, but they might give you an idea what to expect: - -* Zeebe: 75 kb / PI -* Operate: 57 kb / PI -* Optimize: 21 kb / PI -* Tasklist: 21 kb / PI -* Sum: 174 kb / PI - -Using your throughput and retention settings, you can now calculate the required disk space for your scenario. Example: - -| Indicator | Calculation method | Value | Comments | -| :------------------------- | :-----------: | ------------: | :-------------------------------------------------------------------------------------------------- | -| Process instances per day | | 20,000 | | -| **Runtime** | | | | -| Typical process cycle time | \* 5 days | 100,000 | How long is a process instance typically active? Determines the number of active process instances | -| Disk space for Zeebe | \* 75 kib | 7.15 GiB | (Converted into GB by / 1024 / 1024) | -| Disk space for Tasklist | \* 21 kib | 0.67 GiB | | -| **Operate** | | | | -| PI in retention time | \* 30 day | 600,000 | | -| Disk space | \* 57 kib | 32.62 GiB | | -| **Optimize** | | | | -| PI in retention time | \* 6 months | 3,600,000 | | -| Disk space | \* 21 kib | 72.10 GiB | | -| **Sum** | | **113.87 GiB** | | - - -## Understanding sizing and scalability behavior - -Spinning up a Camunda Cloud Cluster means you run multiple components that all need resources in the background, like the Zeebe broker, Elasticsearch (as the database for Operate, Tasklist, and Optimize), Operate, Tasklist, and Optimize. All those components need to be equiped with resources. - -All components are clustered to provide high-availability, fault-tolerance and resiliency. - -Zeebe scales horizontally by adding more cluster nodes (pods). This is limited by the so-called partition size of a Zeebe cluster, as the work within one partition cannot be parallelized by design. Hence, you need to define enough partitions to utilize your cluster or to have some buffer if your load increases later on. The number of partitions cannot be changed after the cluster was initially provisioned (at least not yet), so elastic scalability of cluster nodes is (not yet) possible. - -Camunda Cloud runs on Kubernetes. Every component is operated as a so-called pod, that gets resources assigned. These resources can be vertically scaled (=get more or less hardware resources assigned dynamically) within certain limits. Note that vertically scaling not always results in more throughput, as the various components have dependencies on each other. This is a complex topic and requires running experiments with benchmarks. In general, we recommend to start with the minimalistic hardware package as described below. If you have further requirements, you use this as a starting point to increase resources. - -Note that Camunda licensing does not depend on the provisioned hardware resources, making it easy to size according to your needs. - - -## Sizing your runtime environment - -First, calculate your requirements using the information provided above, taking the example calculations from above: - -* Throughput: 20,000 process instances / day -* Disk space: 114 GB - -Now you can select a hardware package that can cover these requirements. In this example this fits well into a cluster of size S. - -### Camunda Cloud SaaS - -Camunda Cloud defines three fixed hardware packages you can select from. The table below gives you an indication what requirements you can fullfill with these. If your requirements are above the mentioned numbers, please contact us to discuss a customized sizing. - -| **\*** | S | M | L | -| :------------------------------------------- | ------------------------------: | ------------------------------: | -------------------------------: | -| Max Throughput **Tasks/day** | 5.9 M | 23 M | 43 M | -| Max Throughput **Tasks/second** | 65 | 270 | 500 | -| Max Throughput **Process Instances/day** | 0.5 M | 2.3 M | 15 M | -| Max Total Number of Process Instances | 5.4 M | 5.4 M | | -| Approx resources provisioned **\*\*** | 15 vCPU, 20 GB mem, 640 GB disk | 28 vCPU, 50 GB mem, 640 GB disk | 56 vCPU, 85 GB mem, 1320 GB disk | - -**\*** The numbers in the table where measured using Camunda Cloud 1.2.4 and [the official benchmark project](https://github.com/camunda/camunda/tree/1.3.14/benchmarks). It uses a [ten task process](https://github.com/camunda/camunda/blob/1.3.14/benchmarks/project/src/main/resources/bpmn/ten_tasks.bpmn). To calculate day-based metrics, a equal distribution over 24 hours is assumed. - - -**\*\*** These are the resource limits configured in the Kubernetes cluster and are always subject to change. - - -### Camunda Cloud self-managed - -Provisioning Camunda Cloud onto your self-managed Kubernetes cluster might depend on various factors. For example, most customes already have own teams providing Elasticsearch for them as a service. - -However, the following example shows the current configuration of a cluster of size S in Camunda Cloud SaaS, which can serve as a starting point for your own sizing. As you can see in the table above, such a cluster can serve 500,000 process instances / day and store up to 5.4 million process instances (in-flight and history). - -| | | request | limit | -| ------------------ | ------------------- | ------- | ----- | -| **Zeebe** | | | | -| \# brokers | 3 | -| \# partitions | 3 | -| replication factor | 3 | -| | vCPU \[cores\] | 0.8 | 0.96 | -| | Mem \[GB\] | 0.25 | 1.92 | -| | Disk \[GB\] | 32 | 192 | -| #gateway | 2 | -| | vCPU \[cores\] | 0.4 | 0.4 | -| | Mem \[GB\] limit | 0.45 | 0.45 | -| **Operate** | | | | -| #importer | 1 | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.2 | 1 | -| #webapp | 2 | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.2 | 1 | -| **Tasklist** | | | | -| #importer | 1 | -| | vCPU \[cores\] | 0.4 | 1 | -| | Mem \[GB\] limit | 1 | 2 | -| #webapp | 2 | -| | vCPU \[cores\] | 0.4 | 1 | -| | Mem \[GB\] limit | 1 | 2 | -| **Optimize** | | | | -| #importer | 1 | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.4 | 1 | -| #webapp | 2 | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.4 | 1 | -| **Elastic** | | | | -| #statefulset | 1 | -| | vCPU \[cores\] | 1 | 2 | -| | Mem \[GB\] limit | 1 | 2 | -| | Disk \[GB\] request | 64 | 64 | -| **Other** (Worker, Analytics, ...) | -| # | 1 | -| | vCPU \[cores\] | 0.4 | 0.4 | -| | Mem \[GB\] limit | 0.45 | 0.45 | -| **Total resources** | -| | vCPU \[cores\] | 0.4 | 15.08 | -| | Mem \[GB\] | 0.45 | 21.11 | -| | Disk \[GB\] | 0 | 640 | - -## Planning non-production environments - -All clusters can be used for development, testing, integration, Q&A, and production. In Camunda Cloud SaaS, production and test environments are organized via separate organizations within Camunda Cloud to ease the management of clusters, while also minimizing the risk to accidentally accessing a production cluster. - -Note that functional unit tests that are written in Java and use [zeebe-proces-test](https://github.com/camunda-cloud/zeebe-process-test/), will use an in-memory broker in unit tests, so no development cluster is needed for this use case. - -For typical integration or functional test environments, you can normally just deploy a small cluster, like the one shown above, even if your production environment is sized bigger. This is typically sufficient, as functional tests typically run much smaller workloads. - -Load or performance tests ideally run on the same sizing configuration as your production instance to yield reliable results. - -A typical customer set-up consists of: - -* 1 Production cluster -* 1 Integration or pre-prod cluster (equal in size to your anticipated production cluster if you want to run load tests or benchmarks) -* 1 Test cluster -* Multiple developer clusters - -Ideally, every active developer runs its own cluster, so that the workflow engine does not need to be shared amongst developers. Otherwise clusters are not isolated, which can lead to errors if for example developer A deploys a new version of the same process as developer B. Typically, developer clusters can be deleted when they are no longer used, as no data needs to be kept, so you might not need one cluster per developer that works with Camunda Cloud at some point in time. And using in-memory unit tests further reduces the contention on developer clusters. - -However, some customers do share a Camunda Cloud cluster amongst various developers for economic reasons. This can work well if everybody is aware of the problems that can arise. - -## Running experiments and benchmarks - -If you are in doubt about which package to choose, you can do a load test with a representative workload with the target hardware package. This will help you decide if the specific package can serve your needs. - -This is recommended if you exceed the above numbers of three million process instances per day. - -You can look at the [Zeebe benchmark project](https://github.com/camunda/camunda/blob/1.3.14/benchmarks/setup/README.md#benchmarking-camunda-cloud-saas). While this project will not run out-of-the-box (e.g. you need need to build starter and worker code yourself and use self-created docker images), you can use it as a starting point for own endavours. diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/claim.png b/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/claim.png deleted file mode 100644 index d90d4f1b9b4..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/claim.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.png b/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.png deleted file mode 100644 index 0518dc31949..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.pptx b/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.pptx deleted file mode 100644 index a2edcbe1ad3..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/tasklist-mockup.png b/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/tasklist-mockup.png deleted file mode 100644 index ed42b483318..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management-assets/tasklist-mockup.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management.md b/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management.md deleted file mode 100644 index 02840f6f73a..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/architecture/understanding-human-tasks-management.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Understanding human task management -tags: - - Human Task - - Delegation - - Escalation - - E-Mail Notification - - 4-Eyes-Principle - - Overdue Task ---- - -## Using task assignment features - -The lifecycle of human tasks (like assigning, delegating, and completing tasks) is mostly a generic issue. There is no need to model common aspects into all your processes, if often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. - -![Task assignment](understanding-human-tasks-management-assets/human-tasks.png) - -So every task can be assigned to either a group of people, or a specific individual. An individual can 'claim' a task, indicating that they are picking the task from the pool (to avoid multiple people working on the same task). - -As a general rule, you should assign human tasks in your business process to *groups of people* instead of specific individuals. - -```xml - - - - -``` - -Then, require individual members of that group to explicitly *claim tasks* before working on them. This way, you avoid different people working on the same task at the same time. See [`claimTask`](../../../apis-tools/tasklist-api/mutations/claim-task.mdx). - -```graphql -claimTask( - taskId: String! - assignee: String -): Task! -``` - -You can also directly claim tasks in Camunda Tasklist with the click of a button. - -![Claim](understanding-human-tasks-management-assets/claim.png) - -While assigning users to groups is advised, it's not the only option. You could always assign a task to a *single person* who is supposed to complete the task (e.g. the individual 'customer' of your process or a coworker having specific knowledge for the case). You will need to have access to the specific person relevant for your process instance, e.g. via a process variable: - -```xml - - - - -``` - -## Deciding about your task list frontend - -If you have human tasks in your process, you must make up your mind on how exactly you want to let your users work on their tasks and interact with the workflow engine. You have basically three options: - -- [Camunda Tasklist](/docs/components/tasklist/introduction-to-tasklist/): The Tasklist application shipped with Camunda. This works out-of-the-box and has a low development effort. However, it is limited in terms of customizability and how much you can influence the user experience. - -- Custom task list application: You can develop a custom task list and adapt this to your needs without compromises. Human tasks are shown inside your custom application, following your style guide and usability concept. You will use the [Camunda Tasklist API](../../../apis-tools/tasklist-api/generated.md) in the background. This is very flexible, but requires additional development work. - -- Third party tasklist: If our organization already has a task list application rolled out to the field, you might want to use this for tasks created by Camunda. You will need to develop some synchronization mechanism. The upside of this approach is that your end users might not even notice that you introduce a new workflow engine. - -### Considerations for developing custom task lists - -When building a custom tasklist/application, you must plan for the following aspects. You will need to - -- *Query* for user tasks and *generate lists* of those tasks. -- *Filter the list* along specific attributes like current assignee, candidate groups, etc. -- *Select* and *display* the right forms for starting processes and completing tasks. -- Use *custom/business value* data in order to *filter* with those values and *display* them correlated with the task list and within forms. -- *Authorize* users to access those lists, filters, and forms. - -### Considerations for using third party task lists - -When integrating a third party tasklist, you must plan for the following aspects. You will need to take care of: - -- *Creating* tasks in the third party tasklist based on the user tasks created by Camunda. -- *Completing* tasks in Camunda and move on process execution based on user action in the third party tasklist. -- *Cancelling* tasks, triggered by Camunda or triggered by the user in the third-party tasklist. -- Transferring *business data* to be edited in the third-party tasklist back and forth. - -Your third party tasklist application also needs to allow for some programmatic control of the lifecycle of its tasks. The third-party application *must have* the ability: - -- To programmatically *create* a new task. -- To *hook in code* which programmatically informs other systems that the user is about to change a task's state. -- To *manage custom attributes* connected to a task and programmatically access them. - -Additionally, it *should have* the ability - -- To programmatically *delete* a task which was cancelled in Camunda. Without this possibility such tasks remain in the users tasklist and would need to be removed manually. Depending on the way you integrate the task completion mechanism, when the user tries to complete such tasks, they would immediately see an error or the action would just not matter anymore and serve as a removal from the list. - -Transfer just the minimal amount of business data in between Camunda and your third-party tasklist application. - -For creating tasks, transfer just the taskId and important business data references/ids to your domain objects. As much as possible should be retrieved later, and just when needed (e.g. when displaying task forms to the user) by requesting data from the process engine or by requesting data directly from other systems. - -For completing tasks, transfer just the business data which originated from Camunda and was changed by the user. This means, in case you just maintain references, nothing needs to be transferred back. All other business data changed by the user should be directly transferred to the affected systems. - -### Task lists may not look like task lists - -There are situations where you might want to show a user interface that does not look like a task list, even if it is fed by tasks. The following *example* shows such a situation in the document *input management* process of a company. Every document is handled by a separate process instance, but users typically look at complete mailings consisting of several such documents. In a customer scenario, there were people in charge of assessing the scanned mailing and distributing the individual documents to the responsible departments. It was important to do that in one step, as sometimes documents referred to each other. - -So you have several user tasks which are heavily *interdependent* from a business point of view and should therefore be completed *in one step* by the same person. - -The solution to this was a custom user interface that basically queries for human tasks, but show them grouped by mailings: - -![custom tasklist mockup](understanding-human-tasks-management-assets/tasklist-mockup.png) - -1 - -The custom tasklist shows each mailing as one "distribution task", even though they consist of several human tasks fetched from the workflow instance. - -3 - -The custom user interface allows you to work on all four human tasks at once. By dragging and dropping a document within the tree, the user can choose to which department the document is delivered to. - -3 - -In case the user detects a scanning problem, they can request a new scan of the mailing. But as soon -as all documents are quality assured, the button **Distribute Mailing** gets enabled. By clicking on it, the system completes all four human tasks - one for each document - which moves forward the four process instances associated with the documents. diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/architecture.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/architecture.png deleted file mode 100644 index f4da19ea4ba..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/architecture.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/clients.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/clients.png deleted file mode 100644 index 1b875c1a48d..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/clients.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png deleted file mode 100644 index dc569924f16..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector.png deleted file mode 100644 index d49448f0a22..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/img-src.pptx b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/img-src.pptx deleted file mode 100644 index a59afc7dc1f..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/img-src.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png deleted file mode 100644 index 0e32b8c3668..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png deleted file mode 100644 index 840637ae010..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-example.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-example.png deleted file mode 100644 index 9544de0d32a..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/messaging-example.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/messaging-example.png deleted file mode 100644 index eb73103d3e6..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/messaging-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-connector.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-connector.png deleted file mode 100644 index e95c838454c..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-connector.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-example.png b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-example.png deleted file mode 100644 index bb8f715bf09..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md b/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md deleted file mode 100644 index e5036c1908b..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: "Connecting the workflow engine with your world" -description: "Write some custom glue code in the programming language of your choice and using existing client libraries." ---- - - -One of your first tasks to build a process solution is to sketch the basic architecture of your solution. To do so, you need to answer the question of how to connect the workflow engine (Zeebe) with your application or with remote systems. - -This document predominantly outlines writing some custom glue code in the programming language of your choice and using existing client libraries. In some cases, you might also want to leverage existing connectors as a starting point. - -The workflow engine is a remote system for your applications, just like a database. Your application connects with Zeebe via remote protocols, [gRPC](https://grpc.io/) to be precise, which is typically hidden from you, like when using a database driver based on ODBC or JDBC. - -With Camunda Cloud and the Zeebe workflow engine, there are two basic options: - -1. Write some **programming code** that typically leverages the client library for the programming language of your choice. -2. Use some **existing connector** which just needs a configuration. - -The trade-offs will be discussed later; let’s look at the two options first. - -## Programming glue code - -To write code that connects to Zeebe, you typically embed [the Zeebe client library](/docs/apis-tools/working-with-apis-tools/) into your application. An application can of course also be a service or microservice. - -If you have multiple applications that connect to Zeebe, all of them will require the client library. If you want to use a programming language where no such client library exists, you can [generate a gRPC client yourself](https://camunda.com/blog/2018/11/grpc-generating-a-zeebe-python-client/). - -![Clients to Zeebe](connecting-the-workflow-engine-with-your-world-assets/clients.png) - -Your application can basically do two things with the client: - -1. **Actively call Zeebe**, for example, to start process instances, correlate messages, or deploy process definitions. -2. **Subscribe to tasks** created in the workflow engine in the context of BPMN service tasks. - -### Calling Zeebe - -Using the Zeebe client’s API, you can communicate with the workflow engine. The two most important API calls are to start new process instances and to correlate messages to a process instance. - -**Start process instances using the** [**Java Client**](../../../apis-tools/java-client/index.md)**:** - -```java -processInstance = zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("someProcess").latestVersion() - .variables( someProcessVariablesAsMap ) - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not create new instance", throwable); }); -``` - -**Start process instances using the** [**NodeJS Client**](../../../apis-tools/community-clients/javascript.md)**:** - -```js -const processInstance = await zbc.createWorkflowInstance({ - bpmnProcessId: 'someProcess', - version: 5, - variables: { - testData: 'something', - } -}) -``` - -**Correlate messages to process instances using the Java Client**: - -```java -zeebeClient.newPublishMessageCommand() // - .messageName("messageA") - .messageId(uniqueMessageIdForDeduplication) - .correlationKey(message.getCorrelationid()) - .variables(singletonMap("paymentInfo", "YeahWeCouldAddSomething")) - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not publish message " + message, throwable); }); -``` - -**Correlate messages to process instances using the NodeJS Client**: - -```js -zbc.publishMessage({ - name: 'messageA', - messageId: messageId, - correlationKey: correlationId, - variables: { - valueToAddToWorkflowVariables: 'here', - status: 'PROCESSED' - }, - timeToLive: Duration.seconds.of(10) -}) -``` - -This allows you to connect Zeebe with any external system by writing some custom glue code. We will look at common technology examples to illustrate this in a minute. - -### Subscribing to tasks using a job worker - -To implement service tasks of a process model, you can write code that subscribes to the workflow engine. In essence, you will write some glue code that is called whenever a service task is reached (which internally creates a job, hence the name). - -**Glue code in Java:** - -```java -class ExampleJobHandler implements JobHandler { - public void handle(final JobClient client, final ActivatedJob job) { - // here: business logic that is executed with every job - client.newCompleteCommand(job.getKey()).send() - .exceptionally( throwable -> { throw new RuntimeException("Could not complete job " + job, throwable); });; - } -} -``` - -**Glue code in NodeJS:** - -```js -function handler(job, complete, worker) { - // here: business logic that is executed with every job - complete.success() -} -``` - -Now, this handler needs to be connected to Zeebe, which is generally done by subscriptions, which internally use long polling to retrieve jobs. - -**Open subscription via the Zeebe Java client:** - -```java -zeebeClient - .newWorker() - .jobType("serviceA") - .handler(new ExampleJobHandler()) - .timeout(Duration.ofSeconds(10)) - .open()) {waitUntilSystemInput("exit");} -``` - -**Open subscription via the Zeebe NodeJS client:** - -```js -zbc.createWorker({ - taskType: 'serviceA', - taskHandler: handler, -}) -``` - -You can also use integrations in certain programming frameworks, like [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) in the Java world, which starts the job worker and implements the subscription automatically in the background for your glue code. - -**A subscription for your glue code is opened automatically by the Spring integration:** - -```java -@ZeebeWorker(type = "serviceA", autoComplete = true) -public void handleJobFoo(final JobClient client, final ActivatedJob job) { - // here: business logic that is executed with every job - // you do not need to call "complete" on the job, as autoComplete is turned on above -} -``` - -There is also documentation on [how to write a good job worker](../writing-good-workers/). - -## Technology examples - -Most projects want to connect to specific technologies. Currently, most people ask for REST, messaging, or Kafka. - -### REST - -You could build a piece of code that provides a REST endpoint in the language of choice and then starts a process instance. - -The [Ticket Booking Example](https://github.com/berndruecker/ticket-booking-camunda-cloud) contains an example using Java and Spring Boot for the [REST endpoint](https://github.com/berndruecker/ticket-booking-camunda-cloud/blob/master/booking-service-java/src/main/java/io/berndruecker/ticketbooking/rest/TicketBookingRestController.java#L35). - -Similarly, you can leverage the [Spring Boot extension](https://github.com/zeebe-io/spring-zeebe/) to startup job workers that will [execute outgoing REST calls](https://github.com/berndruecker/ticket-booking-camunda-cloud/blob/master/booking-service-java/src/main/java/io/berndruecker/ticketbooking/adapter/GenerateTicketAdapter.java#L29). - -![REST example](connecting-the-workflow-engine-with-your-world-assets/rest-example.png) - -You can find [NodeJS sample code for the REST endpoint](https://github.com/berndruecker/flowing-retail/blob/master/zeebe/nodejs/nestjs-zeebe/checkout/src/app.controller.ts) in the [Flowing Retail example](https://github.com/berndruecker/flowing-retail). - -### Messaging - -You can do the same for messages, which is often [AMQP](https://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol) nowadays. - -The [Ticket Booking Example](https://github.com/berndruecker/ticket-booking-camunda-cloud) contains an example for RabbitMQ, Java, and Spring Boot. It provides a message listener to correlate incoming messages with waiting process instances, and [glue code to send outgoing messages onto the message broker](https://github.com/berndruecker/ticket-booking-camunda-cloud/blob/master/booking-service-java/src/main/java/io/berndruecker/ticketbooking/adapter/RetrievePaymentAdapter.java). - -![Messaging example](connecting-the-workflow-engine-with-your-world-assets/messaging-example.png) - -[Service integration patterns](../service-integration-patterns/) goes into details of if you want to use a send and receive task here, or prefer simply one service task (spoiler alert: send and receive tasks are used here because the payment service might be long-running; think about expired credit cards that need to be updated or wire transfers that need to happen). - -The same concept will apply to other programming languages. For example, you could use the [NodeJS client for RabbitMQ](https://www.rabbitmq.com/tutorials/tutorial-one-javascript.html) and the [NodeJS client for Zeebe](https://github.com/camunda-community-hub/zeebe-client-node-js) to create the same type of glue code as shown above. - -### Apache Kafka - -You can do the same trick with Kafka topics. The [Flowing Retail example](https://github.com/berndruecker/flowing-retail) shows this using Java, Spring Boot, and Spring Cloud Streams. There is [code to subscribe to a Kafka topic and start new process instances for new records](https://github.com/berndruecker/flowing-retail/blob/master/kafka/java/order-zeebe/src/main/java/io/flowing/retail/kafka/order/messages/MessageListener.java#L39), and there is some glue code to create new records when a process instance executes a service task. Of course, you could also use other frameworks to achieve the same result. - -![Kafka Example](connecting-the-workflow-engine-with-your-world-assets/kafka-example.png) - -## Designing process solutions containing all glue code - -Typical applications will include multiple pieces of glue code in one codebase. - -![Architecture with glue code](connecting-the-workflow-engine-with-your-world-assets/architecture.png) - -For example, the onboarding microservice shown in the figure above includes: - -* A REST endpoint that starts a process instance (1) -* The process definition itself (2), probably auto-deployed to the workflow engine during the startup of the application. -* Glue code subscribing to the two service tasks that shall call a remote REST API (3) and (4). - -A job worker will be started automatically as part of the application to handle the subscriptions. In this example, the application is written in Java, but again, it could be [any supported programming language](/apis-tools/overview.md). - -As discussed in [writing good workers](../writing-good-workers/), you typically will bundle all workers within one process solution, but there are exceptions where it makes sense to have single workers as separate application. - -## Connectors - -As you could see, the glue code is relatively simple, but you need to write code. Sometimes you might prefer using an out-of-the-box component, connecting Zeebe with the technology you need just by configuration. This component is called a **connector**. - -A connector can be uni or bidirectional and is typically one dedicated application that implements the connection that translates in one or both directions of communication. Such a connector might also be helpful in case integrations are not that simple anymore. - -![Connectors](connecting-the-workflow-engine-with-your-world-assets/connector.png) - -For example, the [HTTP connector](https://github.com/camunda-community-hub/zeebe-http-worker) is a one-way connector that contains a job worker that can process service tasks doing HTTP calls as visualized in the example in the following figure: - -![REST Connectors](connecting-the-workflow-engine-with-your-world-assets/rest-connector.png) - -Another example is the [Kafka Connector](https://github.com/camunda-community-hub/kafka-connect-zeebe), as illustrated below. - -![Kafka Connector](connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png) - -This is a bidirectional connector which contains a Kafka listener for forwarding Kafka records to Zeebe and also a job worker which creates Kafka records every time a service task is executed. This is illustrated by the following example: - -![Kafka Connector Details](connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png) - -### Out-of-the-box connectors - -Most connectors are currently community extensions, which basically means that they are not officially supported by Camunda, but by community members (who sometimes are Camunda employees). While this sounds like a restriction, it can also mean there is more flexibility to make progress. - -A list of community-maintained connectors can be found [here](https://awesome.zeebe.io/). - -### Using connectors in SaaS - -Currently, connectors are not operated as part of the Camunda Cloud SaaS offering, which means you need to operate them yourself in your environment, which might be a private or public cloud. - -![Connectors in SaaS](connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png) - -### Reusing your own integration logic by extracting connectors - -If you need to integrate with certain infrastructure regularly, for example your CRM system, you might also want to create your own CRM connector, run it centralized, and reuse it in various applications. - -In general, we recommend not to start such connectors too early. Don’t forget that such a connector gets hard to adjust once in production and reused across multiple applications. Also, it is often much harder to extract all configuration parameters correctly and fill them from within the process, than it would be to have bespoke glue code in the programming language of your choice. - -Therefore, you should only extract a full-blown connector if you understand exactly what you need. - -Don’t forget about the possibility to extract common glue code in a simple library that is then used at different places. - -:::note -Updating a library that is used in various other applications can be harder than updating one central connector. In this case, the best approach depends on your scenario. -::: - -Whenever you have such glue code running and really understand the implications of making it a connector, as well as the value it will bring, it can make a lot of sense. - -## Recommendation - -As a general rule of thumb, prefer custom glue code whenever you don’t have a good reason to go with an existing connector (like the reasons mentioned above). - -A good reason to use connectors is if you need to solve complex integrations where little customization is needed, such as the [Camunda RPA bridge](https://docs.camunda.org/manual/latest/user-guide/camunda-bpm-rpa-bridge/) to connect RPA bots (soon to be available for Camunda Cloud). - -Good use of connectors are also scenarios where you don’t need custom glue code. For example, when orchestrating serverless functions on AWS with the [AWS Lambda Connector](https://github.com/camunda-community-hub/zeebe-lambda-worker). This connector can be operated once and used in different processes. - -Some use cases also allow you to create a **resuable generic adapter**; for example, to send status events to your business intelligence system. - -But there are also common downsides with connectors. First, the possibilities are limited to what the creator of the connector has foreseen. In reality, you might have slightly different requirements and hit a limitation of a connector soon. - -Second, the connector requires you to operate this connector in addition to your own application. The complexity associated with this depends on your environment. - -Third, testing your glue code gets harder, as you can’t easily hook in mocks into such a connector as you could in your own glue code. diff --git a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png b/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png deleted file mode 100644 index 94ffeda1942..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/image-src.pptx b/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/image-src.pptx deleted file mode 100644 index 606bbebdbc3..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/image-src.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png b/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png deleted file mode 100644 index f99c1eeb681..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/typical-call-chain.png b/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/typical-call-chain.png deleted file mode 100644 index 7ff9a94a315..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/typical-call-chain.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/worker-concept.png b/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/worker-concept.png deleted file mode 100644 index 9a4503ec673..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions-assets/worker-concept.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions.md b/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions.md deleted file mode 100644 index 2471f4fbfc0..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/dealing-with-problems-and-exceptions.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -title: Dealing with problems and exceptions -tags: -- Transaction -- ACID Transaction -- Compensation -- Exception Handling -- BPMN Error Event -- Incident -- Save Point ---- - -## Understanding workers - -First, let's briefly examine how a worker operates. - -Whenever a process instance arrives at a service task, a new job is created and pushed to an internal persistent queue within Camunda Cloud. A client application can subscribe to these jobs with the workflow engine by the task type name (which is comparable to a queue name). - -If there is no worker subscribed when a job is created, the job is simply put in a queue. If multiple workers are subscribed, they are competing consumers, and jobs are distributed among them. - -![Worker concept](dealing-with-problems-and-exceptions-assets/worker-concept.png) - -Whenever the worker has finished whatever it needs to do (like invoking the REST endpoint), it sends another call to the workflow engine, which [can be one of these three](/docs/components/concepts/job-workers/#completing-or-failing-jobs): - -- [`CompleteJob`](../../../apis-tools/grpc.md#completejob-rpc): The service task went well, the process instance can move on. -- [`FailJob `](../../../apis-tools/grpc.md#failjob-rpc): The service task failed, and the workflow engine should handle this failure. There are two possibilities: - - `remaining retries > 0`: The job is retried. - - `remaining retries <= 0`: An incident is raised and the job is not retried until the incident is resolved. -- [`ThrowError`](../../../apis-tools/grpc.md#throwerror-rpc): A BPMN error is reported, which typically is handled on the BPMN level. - -As the glue code in the worker is external to the workflow engine, there is **no technical transaction spanning both components**. Technical transactions refer to ACID (atomic, consistent, isolated, durable) properties, mostly known from relational databases. - -If, for example, your application leverages those capabilities, your business logic is either successfully committed as a whole, or rolled back completely in case of any error. However, those ACID transactions cannot be applied to distributed systems (the talk [lost in transaction](https://berndruecker.io/lost-in-transaction/) elaborates on this). In other words, things can get out of sync if either the job handler or the workflow engine fails. - -A typical example scenario is the following, where a worker calls a REST endpoint to invoke business logic: - -![Typical call chain](dealing-with-problems-and-exceptions-assets/typical-call-chain.png) - -Technical ACID transaction will only be applied in the business application. The job worker mostly needs to handle exceptions on a technical level, e.g. to control retry behavior, or pass it on to the process level, where you might need to implement business transactions. - -## Handling exceptions on a technical level - -### Leveraging retries - -Using the [`FailJob `](../../../apis-tools/grpc.md#failjob-rpc) API is pretty handy to leverage the built-in retry mechanism of Zeebe. The initial number of retries is set in the BPMN process model: - -```xml - - - - - -``` - -This number is typically decremented with every attempt to execute the service task. Note that you need to do that in your worker code. Example in Java: - -```java - @ZeebeWorker(type = "retrieveMoney") - public void retrieveMoney(final JobClient client, final ActivatedJob job) { - try { - // your code - } catch (Exception ex) { - jobClient.newFailCommand(job) - .retries(job.getRetries()-1) // <1>: Decrement retries - .errorMessage("Could not retrieve money due to: " + ex.getMessage()) // <2> - .send() - .exceptionally(t -> {throw new RuntimeException("Could not fail job: " + t.getMessage(), t);}); - } - } -``` - -1 - -Decrement the retries by one. - -2 - -Provide a meaningful error message, as this will be displayed to a human operator once an incident is created ion Operate. - -Example in Node.js: - -```js -zbc.createWorker('retrieveMoney', job => { - try { - // ... - } catch (e) { - job.fail('Could not retrieve money due to: ' + e.message, (job.retries - 1)) - } -}); -``` - -### Using incidents - -Whenever a job fails with a retry count of `0`, an incident is raised. An incident requires human intervention, typically using Operate. See [incidents in the Operate docs](/docs/components/operate/userguide/resolve-incidents-update-variables/). - -### Writing idempotent workers - -Zeebe uses the **at-least-once strategy** for job handlers, which is a typical choice in distributed systems. This means that the process instance only advances in the happy case (the job was completed, the workflow engine received the complete job request and committed it). A typical failure case occurs when the worker who polled the job crashes and cannot complete the job anymore. [In this case, the workflow engine gives the job to another worker after a configured timeout](/docs/components/concepts/job-workers#timeouts). This ensures that the job handler is executed at least once. - -But this can mean that the handler is executed more than once! You need to consider this in your handler code, as the handler might be called more than one time. The [technical term describing this is idempotency](https://en.wikipedia.org/wiki/Idempotence). - -For example, typical strategies are described in [3 common pitfalls in microservice integration — and how to avoid them](https://medium.com/3-common-pitfalls-in-microservice-integration-and-how-to-avoid-them-3f27a442cd07). One possibility is to ask the service provider if it has already seen the same request. A more common approach is to implement the service provider in a way that allows for duplicate calls. There are two ways of mastering this: - -- **Natural idempotency**. Some methods can be executed as often as you want because they just flip some state. Example: `confirmCustomer()`. -- **Business idempotency**. Sometimes you have business identifiers that allow you to detect duplicate calls (e.g. by keeping a database of records that you can check). Example: `createCustomer(email)`. - -If these approaches do not work, you will need to add a **custom idempotency handling** by using unique IDs or hashes. For example, you can generate a unique identifier and add it to the call. This way, a duplicate call can be easily spotted if you store that ID on the service provider side. If you leverage a workflow engine you probably can let it do the heavy lifting. Example: `charge(transactionId, amount)`. - -Whatever strategy you use, make sure that you’ve considered idempotency consciously. - -## Handling errors on the process level - -You often encounter deviations from the "happy path" (the default scenario with a positive outcome) which shall be modeled in the process model. - -### Using BPMN error events - -A common way to resolve these deviations is using a BPMN error event, which allows a process model to react to errors within a task. For example: - -
    - -1 - -We decide that we want to deal with an exception in the process: in case the invoice cannot be sent automatically... - -2 - -...we assign a task to a human user, who is now in charge of taking care of delivering the invoice. - -Learn more about the usage of [error events](/docs/components/modeler/bpmn/error-events/) in the user guide. - -### Throwing and handling BPMN errors - -In BPMN process definitions, we can explicitly model an end event as an error. - -
    - -1 - -In case the item is not available, we finish the process with an **error end event**. - -:::note -You can mimic a BPMN error in your glue code by using the [`ThrowError`](../../../apis-tools/grpc.md#throwerror-rpc) API. The consequences for the process are the same as if it were an explicit error end event. So, in case your 'purchase' activity is not a sub process, but a service task, it could throw a BPMN Error informing the process that the good is unavailable. -::: - -Example in Java: - -```java -jobClient.newThrowErrorCommand(job) - .errorCode("GOOD_UNAVAILABLE") - .errorMessage() - .send() - .exceptionally(t -> {throw new RuntimeException("Could not throw BPMN error: " + t.getMessage(), t);}); -``` - -### Thinking about unhandled BPMN exceptions - -It is crucial to understand that according to the BPMN spec, a BPMN error is either handled via the process or **terminates the process instance**. It does not lead to an incident being raised. Therefore, you can and normally should always handle the BPMN error. You can, of course, also handle it in a parent process scope like in the example below: - -
    - -1 - -The boundary error event deals with the case that the item is unavailable. - -### Distinguishing between exceptions and results - -As an alternative to throwing a Java exception, you can also write a problematic result into a process variable and model an XOR-Gateway later in the process flow to take a different path if that problem occurs. - -From a business perspective, the underlying problem then looks less like an error and more like a result of an activity, so as a rule of thumb we deal with *expected results* of activities by means of gateways, but model exceptional errors, which *hinder us in reaching the expected result* as boundary error events. - -
    - -1 - -The task is to "check the customer's credit-worthiness", so we can reason that we *expect as a result* to know whether the customer is credit-worthy or not. - -2 - -We can therefore model an *exclusive gateway* working on that result and decide via the subsequent process flow what to do with a customer who is not credit-worthy. Here, we just consider the order to be declined. - -3 - -However, it could be that we *cannot reach a result*, because while we are trying to obtain knowledge about the customer's creditworthiness, we discover that the ID we have is not associated with any known real person. We can't obtain the expected result and therefore model a *boundary error event*. In the example, the consequence is just the same and we consider the order to be declined. - -### Business vs. technical errors - -Note that you have two different ways of dealing with problems at your disposal now: - -- **Retrying**. You don't want to model the retrying, as you would have to add it to each and every service task. This will bloat the visual model and confuse business personnel. Instead, either retry or fall back to incidents as described above. This is hidden in the visual. -- Branch out **separate paths**, as described with the error event. - -In this context, we found the terms **business error** and **technical error** can be confusing, as they emphasize the source of the error too much. This can lead to long discussions about whether a certain problem is technical or not, and if you are allowed to see technical errors in a business process model. - -It's much more important to look at how you react to certain errors. Even a technical problem can qualify for a business reaction. In the above example, upon technical problems with the invoice service you can decide to manually send the invoice (business reaction) or to retry until the invoice service becomes available again (technical reaction). - -Or, for example, you could decide to continue a process in the event that a scoring service is not available, and simply give every customer a good rating instead of blocking progress. The error is clearly technical, but the reaction is a business decision. - -In general, we recommend talking about business reactions, which are modeled in your process, and technical reactions, which are handled generically using retries or incidents. - -## Embracing business transactions and eventual consistency - -### Technical vs business transactions - -Applications using databases can often leverage ACID (atomic, consistent, isolated, durable) capabilities of that database. This means that some business logic is either successfully committed as a whole, or rolled back completely in case of any error. It is normally referred to as "transactions". - -Those ACID transactions cannot be applied to distributed systems (the talk [lost in transaction](https://berndruecker.io/lost-in-transaction/) elaborates on this), so if you call out to multiple services from a process, you end up with separate ACID transactions at play. The following illustrations are taken from the O'Reilly book [Practical Process Automation](https://processautomationbook.com/): - -![Multiple ACID transactions](dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png) - -In the above example, the CRM system and the billing system have their local ACID transactions. The workflow engine itself also runs transactional. However, there cannot be a joined technical transaction.This requires a new way of dealing with consistency on the business level, which is referred to as **business transaction**: - -![Businss vs technical transaction](dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png) - -A **business transaction** marks a section in a process for which 'all or nothing' semantics (similar to a technical transaction) should apply, but from a business perspective. You might encounter inconsistent states in between (for example a new customer being present in the CRM system, but not yet in the billing system). - -### Eventual consistency - -It is important to be aware that these temporary inconsistencies are possible. You also have to understand the failure scenarios they can cause. In the above example, you could have created a marketing campaign at a moment when a customer was already in the CRM system, but not yet in billing, so they got included in that list. Then, even if their order gets rejected and they never end up as an active customer, they might still receive an upgrade advertisement. - -You need to understand the effects of this happening. Furthermore, you have to think about a strategy to resolve inconsistencies. The term **eventual consistency** suggests that you need to take measures to get back to a consistent state eventually. In the onboarding example, this could mean you need to deactivate the customer in the CRM system if adding them to the billing system fails. This leads to the consistent state that the customer is not visible in any system anymore. - -### Business strategies to handle inconsistency - -There are three basic strategies if a consistency problem occurs: - -- Ignore it. While it sounds strange to consider ignoring a consistency issue, it actually can be a valid strategy. It’s a question of how much business impact the inconsistency may have. -- Apologize. This is an extension of the strategy to ignore. You don’t try to prevent inconsistencies, but you do make sure that you apologize when their effects come to light. -- Resolve it. Tackle the problem head-on and actively resolve the inconsistency. This could be done by different means, such as the reconciliation jobs mentioned earlier, but this practice focuses on how BPMN can help by looking into the Saga pattern. - -Selecting the right strategy is a clear business decision, as none of them are right or wrong, but simply more or less well suited to the situation at hand. You should always think about the cost/value ratio. - -### The Saga pattern and BPMN compensation - -The Saga pattern describes long-running transactions in distributed systems. The main idea is simple: when you can’t roll back tasks, you undo them. (The name Saga refers back to a paper written in the 1980s about long-lived transactions in databases.) - -Camunda supports this through BPMN compensation events, which can link tasks with their undo tasks. - -:::caution Camunda Platform 7 Only -Compensation is [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md) and only available in Camunda Platform 7. -::: - -
    - -1 - -Assume the customer was already added to the CRM system... - -2 - -...when an error occurred... - -3 - -...the process triggers the compensation to happen. This will roll back the business transaction. - -4 - -All compensating activities of successfully completed tasks will be executed, in this case also this one. - -5 - -As a result, the customer will be deactivated, as the API of the CRM system might not allow to simply delete it. diff --git a/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.svg b/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.svg deleted file mode 100644 index 481b8154ac3..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.svg +++ /dev/null @@ -1 +0,0 @@ -TweettweetId :Longcontent :Stringauthor :Employeereviewer :Employee...Komplexe Klasse \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.uml.xml b/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.uml.xml deleted file mode 100644 index 3ea1c8af45e..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.uml.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - -ComplexClass -150,137,30,30 -false - -#000000 - -{"items":[{"static":false,"visibility":"","name":"tweetId","derived":false,"property":"","multiplicity":"","type":"Long","readonly":false,"defaultvalue":""},{"static":false,"visibility":"","name":"content","derived":false,"property":"","multiplicity":"","type":"String","readonly":false,"defaultvalue":""},{"static":false,"visibility":"","name":"author","derived":false,"property":"","multiplicity":"","type":"Employee","readonly":false,"defaultvalue":""},{"static":false,"visibility":"","name":"reviewer","derived":false,"property":"","multiplicity":"","type":"Employee","readonly":false,"defaultvalue":""},{"visibility":"","name":"...","property":"","multiplicity":"","type":"","defaultvalue":""}],"totalCount":5} -#ffffff - -Tweet - - # - - - - -Diagram -1485,1050,0,0 -horizontal - - - - - diff --git a/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes.md b/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes.md deleted file mode 100644 index 4fbc8cbd346..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/handling-data-in-processes.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: "Handling data in processes" -tags: - - BPMN Data Object - - Variable - - Serialization -description: "Understand data handling in Camunda, store relevant data and references, study use cases for storing payload, use constants and data accessors, and more." ---- - -When using Camunda, you have access to a dynamic map of process variables, which lets you associate data to every single process instance (and local scopes in case of user tasks or parallel flows). Ensure you use these mechanisms in a lightweight and meaningful manner, storing just the relevant data in the process instance. - -Depending on your programming language, consider accessing your process variables in a type safe way, centralizing (simple and complex) type conversion and using constants for process variable names. - -## Understanding data handling in Camunda - -When reading and interpreting a business process diagram, you quickly realize there is always data necessary for tasks, but also to drive the process through gateways to the correct next steps. - -Examine the following tweet approval process example: - -
    - -1 - -The process instance starts with a freshly written `tweet` we need to remember. - -2 - -We need to present this `tweet` so that the user can decide whether to `approve` it. - -3 - -The gateway needs to have access to this information: was the tweet `approved`? - -4 - -To publish the tweet, the service task again needs the `tweet` itself! - -Therefore, the tweet approval process needs two variables: - -| Variable name | Variable type | Sample value | -| -- | -- | -- | -| `tweet` | String | "@Camunda rocks" | -| `approved` | Boolean | true | - -In Camunda Cloud, [values are stored as JSON](/docs/components/concepts/variables/#variable-values). - -:::caution Camunda Platform 7 handles variables slightly differently -This best practice describes variable handling within Camunda Cloud. Process variables are handled slightly differently with Camunda Platform 7. Consult the [Camunda Platform 7 documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/variables/) for details. In essence, variable values are not handled as JSON and thus there are [different values](https://docs.camunda.org/manual/latest/user-guide/process-engine/variables/#supported-variable-values) supported. -::: - -You can dynamically create such variables by assigning an object of choice to a (string typed) variable name; for example, by passing a `Map` when [completing](../../../apis-tools/tasklist-api/mutations/complete-task.mdx) the "Review tweet" task via the API: - -``` -// TODO: Double check! -completeTask( - taskId: "547811" - variables: [ - { - name: "approved" - value: true - } - ] -) -``` - -In Camunda, you do *not* declare process variables in the process model. This allows for a lot of flexibility. See recommendations below on how to overcome possible disadvantages of this approach. - -Consult the [docs about variables](/docs/components/concepts/variables/#variable-values) to learn more. - -Camunda does not treat BPMN **data objects** () as process variables. We recommend using them occasionally *for documentation*, but you need to [avoid excessive usage of data objects](../../modeling/creating-readable-process-models#avoiding-excessive-usage-of-data-objects). - -## Storing just the relevant data - -Do not excessively use process variables. As a rule of thumb, store *as few variables as possible* within Camunda. - -### Storing references only - -If you have leading systems already storing the business relevant data... - -![Hold references only](handling-data-in-processes-assets/hold-references-only.svg) - -...then we suggest you store references only (e.g. ID's) to the objects stored there. So instead of holding the `tweet` and the `approved` variable, the process variables would now, for example, look more like the following: - -| Variable name | Variable type | Value | -| -- | -- | -- | -| `tweetId` | Long | 8213 | - -### Use cases for storing payload - -Store *payload* (actual business data) as process variables, if you.... - -* ...have data only of interest within the process itself (e.g. for gateway decisions). - -In case of the tweet approval process, even if you are using a tweet domain object, it might still be meaningful to hold the approved value explicitly as a process variable, because it serves the purpose to guide the gateway decision in the process. It might not be true if you want to keep track in the tweet domain objects regarding the approval. - -| Variable name | Variable type | Value | -| -- | -- | -- | -| `tweetId` | Long | 8213 | -| `approved` | Boolean | true | - -* ...communicate in a *message oriented* style. For example, retrieving data from one system and handing it over to another system via a process. - -When receiving external messages, consider storing just those parts of the payload relevant for you, and not the whole response. This not only serves the goal of having a lean process variables map, it also makes you more independent of changes in the service's message interface. - -* ...want to use the process engine as kind of *cache*. For example, you cannot query relevant customer data in every step for performance reasons. - -* ...need to *postpone data changes* in the leading system to a later step in the process. For example, you only want to insert the Tweet in the Tweet Management Application if it is approved. - -* ...want to track the *historical development* of the data going through your process. - -* ...don't have a leading system for this data. - -## Using constants and data accessors - -Avoid the copy/paste of string representations of your process variable names across your code base. Collect the variable names for a process definition in *constants*. For example, in Java: - -```java -public interface TwitterDemoProcessConstants { - String VAR_NAME_TWEET = "tweet"; - String VAR_NAME_APPROVED = "approved"; -} -``` - -This way, you have much more security against typos and can easily make use of refactoring mechanisms offered by your IDE. - -However, if you also want to solve necessary type conversions (casting) or probably even complex serialization logic, we recommend that you use a **Data Accessor** class. It comes in two flavors: - -* A **Process Data Accessor**: Knows the names and types of all process variables of a certain process definition. It serves as the central point to declare variables for that process. -* A **Process Variable Accessor**: Encapsulates the access to exactly one variable. This is useful if you reuse certain variables in different processes. - -Consider, for example, the BPMN "Publish on Twitter" task in the Tweet Approval Process: - -
    - -1 - -We use a **TweetPublicationDelegate** to implement the "Publish on Twitter" task: - -```java -public class PublishTweetJobHandler implements JobHandler { - public void handle(JobClient client, ActivatedJob job) throws Exception { - String tweet = job.getVariablesAsType(TwitterDemoProcessVariables.class).getTweet(); - // ... -``` - -As you can see, the `tweet` variable is accessed in a type safe way. - -This reusable **Process Data Accessor** class could, for example, be a simple object. The Java client API can automatically deserialize the process variables as JSON into this object, while all process variables that are not found in that class are ignored. - -```java -public class TwitterDemoProcessVariables { - - private String tweet; - private boolean approved; - - public String getTweet() { - return tweet; - } - - public void setTweet(String tweet) { - this.tweet = tweet; - } -} -``` - -The getters and setters could further take care of additional serialization and deserialization logic for complex objects. - -Your specific implementation approach might differ depending on the programming language and framework you are using. - -## Complex data as entities - -There are some use cases when it is clever to *introduce entities alongside the process* to store complex data in a relational database. You can see this logically as *typed process context* where you create custom tables for your custom process deployment. Then, you can even use **Data** **Accessor** classes to access these entities in a convenient way. - -You will only store a reference to the entity's primary key (typically an artificial UUID) as real process variable within Camunda. - -Some people refer to this as **externalized process context**. - -There are a couple of advantages of this approach: - -* You can do very *rich queries* on structured process variables via normal SQL. -* You can apply custom *data migration strategies* when deploying new versions of your process or services, which require data changes. -* Data can be designed and modeled properly, even graphically by, for example, leveraging UML. - -It requires additional complexity by adding the need for a relational database and code to handle this. diff --git a/versioned_docs/version-1.3/components/best-practices/development/invoking-services-from-the-process-assets/external-task-pattern.png b/versioned_docs/version-1.3/components/best-practices/development/invoking-services-from-the-process-assets/external-task-pattern.png deleted file mode 100644 index 88cb03a5f0a..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/invoking-services-from-the-process-assets/external-task-pattern.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/invoking-services-from-the-process-c7.md b/versioned_docs/version-1.3/components/best-practices/development/invoking-services-from-the-process-c7.md deleted file mode 100644 index 2fc2d10a04b..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/invoking-services-from-the-process-c7.md +++ /dev/null @@ -1,732 +0,0 @@ ---- -title: "Invoking services from a Camunda 7 process" -tags: - - Service - - Java Delegate - - Expression Language - - External Task - - REST - - SOAP - - JMS - - Camel - - ESB - - SQL - - SAP ---- - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! If you are using Camunda Cloud, visit [connecting the workflow engine with your world](../connecting-the-workflow-engine-with-your-world/). -::: - -Access business logic implemented for the Java VM and remote services by means of small pieces of glue code. This glue code maps process input/output to your business logic by means of best-of-breed libraries of your own choosing. - -In most cases, you should use a pull approach, where external worker threads query Camunda for **external tasks**. Sometimes, you might also attach **JavaDelegates** to your model, and in case you need to define totally self-contained BPMN process definitions, you may want to leverage scripts or expressions for small pieces of logic. - -## Understanding the possibilities - -### Push and pull - -There are two patterns available to glue your code to a process model: - -* **Push:** The process engine actively issues a **service call** (or executes a **script**) via the mechanisms described below. The workflow engine pushes the work. -* **Pull:** External worker threads query the process engine API for **external tasks**, and they pull the work. Then, they do the actual work and notify the process engine of works completion. - -### External tasks - -An **external task** is a task that waits to be completed by some external service worker without explicitly calling that service. It's configured by declaring a **topic** (which characterizes the type of the service). The Camunda API must be polled to retrieve open external tasks for a certain service's topic and must be informed about the completion of a task: - -![External task pattern](invoking-services-from-the-process-assets/external-task-pattern.png) - -The interaction with the external task API can be done in two different ways: - -* Use [Camunda's external task client libraries](https://docs.camunda.org/manual/latest/user-guide/ext-client/) for [Java](https://github.com/camunda/camunda-external-task-client-java) or [Node.js](https://github.com/camunda/camunda-external-task-client-js). These libraries make it very easy to implement your external task worker. - -* Create your own client for Camunda's REST API based on the [Camunda OpenAPI specification](https://docs.camunda.org/manual/latest/reference/rest/openapi/), probably via code generation. This approach allows you to generate code for every programming language and also covers the full REST API, not only external tasks. - -Using external tasks comes with the following advantages: - -* **Temporal decoupling**: The pattern can replace a message queue between the service task (the "consumer") and the service implementation (the "provider"). It can eliminate the need for operating a dedicated message bus while keeping the decoupling that messaging would provide. - -* **Polyglott architectures**: The pattern can be used to integrate .NET based services, for example, when it might not be that easy to write Java delegates to call them. Service implementations are possible in any language that can be used to interact with a REST API. - -* **Better scaling**: The pattern allows you to start and stop workers as you like, and run as many of them as you need. By doing so, you can scale each service task (or to be precise, each "topic") individually. - -* **Connect cloud and on-premises**: The pattern supports you in running Camunda somewhere in the cloud (as our customers often do), because you can still have services on-premises, as they can now query their work via REST over SSL, which is also quite firewall-friendly. - -* **Avoid timeouts**: The pattern allows you to asynchronously call long-running services, which eventually block for hours (and would therefore cause transaction and connection timeouts when being called synchronously). - -* **Run services on specialized hardware**: Each worker can run in the environment that is best suited for the specific task of that worker; for example, CPU-optimized cloud instances for complex image processing and memory-optimized instances for other tasks. - -Learn more about external tasks in the [use guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) as well as the [reference](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/service-task/#external-tasks) and explore the video processing example shown above in greater detail by reading the [blog post](https://blog.camunda.org/post/2015/11/external-tasks/) about it. - -:::note -Camunda Cloud focuses on the external task pattern, there are no Java Delegates available as explained in [this blog post](https://blog.bernd-ruecker.com/how-to-write-glue-code-without-java-delegates-in-camunda-cloud-9ec0495d2ba5). -::: - -### Java Delegates - -A Java Delegate is a simple Java class that implements the Camunda `JavaDelegate` interface. It allows you to use **dependency injection** as long as it is constructed as a Spring or CDI bean and connected to your BPMN `serviceTask` via the `camunda:delegateExpression` attribute: - -```xml - - -``` - -Leverage dependency injection to get access to your *business service* beans from the delegate. Consider a delegate to be a semantical part of the process definition in a wider sense: it is taking care of the nuts and bolts needed to wire the business logic to your process. Typically, it does the following: - -1. Data Input Mapping -2. Calling a method on the business service -3. Data Output Mapping - -:::note -Avoid programming business logic into Java Delegates. Separate this logic by calling one of your own classes as a business service, as shown below. -::: - -```java -@Named -public class TweetPublicationDelegate implements JavaDelegate { - - @Inject - private TweetPublicationService tweetPublicationService; - - public void execute(DelegateExecution execution) throws Exception { - String tweet = new TwitterDemoProcessVariables(execution).getTweet(); // <1> - // ... - try { - tweetPublicationService.tweet(tweet); // <2> - } catch (DuplicateTweetException e) { - throw new BpmnError("duplicateMessage"); // <3> - } - } - //... -``` - -1 - -Retrieving the value of this process variable belongs to what we call the **input mapping** of the delegate code, and is therefore considered to be part of the wider process definition. - -2 - -This method executes process engine-independent **business logic**. It is therefore not part of the wider process definition anymore and placed in a separate business service bean. - -3 - -This exception is process engine-specific and therefore typically not produced by your business service method. It's part of the **output mapping** that we need to translate the business exception to the exception needed to drive the process - again code being part of the "wider" process definition and to be implemented in the Java Delegate. - -In case you want to create Java Delegates that are **reusable** for other process definitions, leverage [field injection](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/#field-injection) to pass configuration from the BPMN process definition to your Java Delegate. - -One advantage of using Java Delegates is that, if you develop in Java, this is a very simple way to write code and connect it with your process model, especially in embedded engine scenarios. - -## Selecting the implementation approach - -### General recommendation - -In general, we *recommend to use external tasks* to apply a general architecture and mindset, that allows to [leverage Camunda Cloud easier](/guides/migrating-from-Camunda-Platform.md#prepare-for-smooth-migrations). This typically outweights the following downsides of external tasks: - -* A slightly increased complexity for Java projects, because they have to handle seperate Java clients. -* A slightly increased overhead compared to Java Delegates, as all comunication with the engine is remote, even if it runs in the same Java VM. - -Only if the increased latency does not work for your use case, for example, because you need to execute a 30-task process synchronously to generate a REST response within a handfull of milliseconds, should you then consider Java Delegates (or also consider switching to use Camunda Cloud). - -### Detailed comparison - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - -

    Java Delegate

    -
    -

    Expression

    -
    -

    Connector

    -
    -

    External Task

    -
    -

    Script Task

    -
    -

    Named Bean

    -
    -

    Java Class

    -
    -

    Call a named bean or java class implementing the - JavaDelegate interface. -

    -
    -

    Evaluate an expression using JUEL.

    -
    -

    Use a configurable connector -
    -(REST or SOAP services provided out-of-the-box). -

    -
    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    -
    -

    Execute a script inside the engine.

    -
    -

    Use with -
    - BPMN elements. -

    -
    -

    - - task service - - - message intermediate send - - - task send - -

    -
    -

    - - task script - -

    -
    -

    Communication Direction

    -
    -

    - Push -
    - work item by issuing service call. -

    -
    -

    - Pull -
    - task from worker thread. -

    -
    -

    - Push work item by executing a script. -

    -
    -

    Technology

    -
    -

    Use your preferred framework, e.g. a JAX-WS client to call SOAP Web Services.

    -
    -

    Use REST/SOAP Connector and Message Template

    -
    -

    Use Camunda External Task Client or REST API to query for work.

    -
    -

    Use JSR-223 compliant scripting engine.

    -
    -

    Implement -
    - via -

    -
    -

    Java (in same JVM)

    -
    -

    Expression Language -(can reference Java code)

    -
    -

    BPMN configuration

    -
    -

    BPMN configuration and external pull logic

    -
    -

    E.g. Groovy, JavaScript, JRuby or Jython

    -
    -

    Code Completion and Refactoring

    -
    -

    - ✔ -

    -
    -

    - ✔ -

    -
    -

    Maybe

    -
    - -

    - ✔ -

    -
    -

    Depends on language / IDE

    -
    -

    Compiler Checks

    -
    -

    - ✔ -

    -
    -

    - ✔ -

    -
    - - -

    - ✔ -

    -
    -

    Depends on language / IDE

    -
    -

    Dependency Injection

    -
    -

    - ✔ -
    - (when using Spring, CDI, ...) -

    -
    - -

    - ✔ -
    - (when using Spring, CDI, ...​) -

    -
    - - -
    -

    Forces on Testing

    -
    -

    Register mocks instead of original beans.

    -
    -

    Mock business logic inside the JavaDelegate.

    -
    -

    Register mocks instead of original beans.

    -
    -

    Difficult because of lack of dependency injection.

    -
    -

    Easy, as service is not actively called.

    -
    -

    Consider external script resources.

    -
    -

    Configure via

    -
    -

    BPMN Attribute -
    - serviceTask -
    - camunda: -
    - delegate -
    - Expression -
    -

    -
    -

    BPMN Attribute -
    - serviceTask -
    - camunda: -
    - class -
    -

    -
    -

    BPMN Attribute -
    - serviceTask -
    - camunda: -
    - expression -
    -

    -
    -

    BPMN Ext. Element+ - - serviceTask -
    - camunda: -
    - connector -
    -

    -
    -

    BPMN Attributes -
    - serviceTask -
    - camunda: -
    - type= -
    - 'external' and -
    - 'camunda:topic' -
    -

    -
    -

    BPMN Element -
    - script or -
    - BPMN Attribute -
    - scriptTask -
    - camunda: -
    - resource -
    -

    -
    -

    Fault Tolerance and Retrying

    -
    -

    Handled by Camunda retry strategies and incident management.

    -
    -

    Lock tasks for a defined time. Use Camunda’s retry and incident management.

    -
    -

    Handled by Camunda retry strategies and incident management.

    -
    -

    Scaling (having multiple Worker Threads)

    -
    -

    Via load balancer in front of service

    -
    -

    Multiple worker threads can be started.

    -
    -

    Via job executor configuration

    -
    -

    Throttling (e.g. one request at a time)

    -
    -

    Not possible out-of-the-box, requires own throttling logic being implemented.

    -
    -

    Start or stop exactly as many worker threads you need.

    -
    -

    Not possible out-of-the-box.

    -
    -

    Reusable Tasks

    -
    -

    - Use field injection -

    -
    -

    Use method parameters.

    -
    -

    - Build your own connector -

    -
    -

    - Reuse external task topics and configure service via variables. -

    -
    -
    -

    Use when

    -
    - If external tasks do not work for your use case - -

    Defining small pieces of logic directly in BPMN

    -
    -

    Defining a self-contained BPMN process without Java code

    -
    -

    - Always if there is no reason against it -

    -
    -

    Defining BPMN processes without Java code.

    -
    - -

    - Learn more -

    -
    -

    - Learn more -

    -
    -

    - Learn more -

    -
    -

    - Learn more -

    -
    -

    - Learn more -

    -
    - -## Dealing with problems and exceptions - -When invoking services, you can experience faults and exceptions. See our separate best practices about: - -* [Understanding Camunda 7 transaction handling](../understanding-transaction-handling-c7/) -* [Dealing with problems and exceptions](../dealing-with-problems-and-exceptions/). - -## Example technology solutions - -### Calling SOAP web services - -When you need to call a SOAP web service, you will typically be given access to a machine-readable, WSDL-based description of the service. You can then use [JAX-WS](http://docs.oracle.com/javaee/6/tutorial/doc/bnayl.html) and (for example) Apache CXF's [JAX-WS client generation](http://cxf.apache.org/docs/maven-cxf-codegen-plugin-wsdl-to-java.html) to generate a Java Web Service Client by making use of a Maven plugin. That client can be called from within your JavaDelegate. - -Find a full example that uses JAX-WS client generation in the [Camunda examples repository](https://github.com/camunda/camunda-bpm-examples/tree/master/servicetask/soap-cxf-service). - -We typically prefer the client code generation over using the [Camunda SOAP Connector](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/), because of the better IDE support to do the data mapping by using code completion. You also can leverage standard testing approaches and changes in the WSDL will re-trigger code-generation and your compiler will check for any problems that arise from a changed interface. However, if you need a self-contained BPMN XML without any additional Java code, the connector could be the way to go. See [SOAP connector example](https://github.com/camunda/camunda-bpm-examples/tree/master/servicetask/soap-service). - -### Calling REST web services - -If you need to call a REST web service, you will typically be given access to a human-readable documentation of the service. You can use standard Java REST client libraries like [RestEasy](http://resteasy.jboss.org) or [JAX-RS](http://docs.oracle.com/javaee/6/tutorial/doc/giepu.html) to write a Java REST service client that can be called from within a JavaDelegate. - -We typically prefer writing Java clients over the [Camunda REST Connector](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/), because of the better IDE support to do the data mapping by using code completion. This way, you also can leverage standard testing approaches. However, if you need a self-contained BPMN XML without any additional Java code, the connector could be the way to go. See [REST connector example](https://github.com/camunda/camunda-bpm-examples/tree/master/servicetask/rest-service). - -### Sending JMS messages - -When you need to send a JMS message, use a plain Java Client and invoke it from a service task in your process; for example, by using a Camunda Java Delegate: - -```java -@Named("jmsSender") -public class SendJmsMessageDelegate implements JavaDelegate { - - @Resource(mappedName = "java:/queue/order") - private Queue queue; - - @Resource(mappedName = "java:/JmsXA") - private QueueConnectionFactory connectionFactory; - - public void execute(DelegateExecution execution) throws Exception { - String correlationId = UUID.randomUUID().toString(); // <1> - execution.setVariable("jmsCorrelationId", correlationId); - - Connection connection = connectionFactory.createConnection(); // <2> - Session session = connection.createSession(true, Session.AUTO_ACKNOWLEDGE); - MessageProducer producer = session.createProducer(queue); - - TextMessage message = session.createTextMessage( // <3> - "someOwnContent, e.g. Tweet Object Data, plus " + correlationId); // <4> - producer.send(message); - - producer.close(); - session.close(); - connection.close(); - } - -} -``` - -1 - -Consider what information you can use to correlate back an asynchronous response to your process instance. We typically prefer a generated, artificial UUID for communication, which the waiting process will also need to remember. - -2 - -You will need to open and close JMS connections, sessions, and producers. Note that this example just serves to get you started. In real life, you will need to decide which connections you need to open, and of course, properly close. - -3 - -You will need to create and send your specific message. - -4 - -Add relevant business data to your message together with correlation information. - -:::danger -This example just serves to get you started. In real life, consider whether you need to encapsulate the JMS client in a separate class and just wire it from the Java Delegate. Also decide which connections you need to open and close properly at which peristaltic points. -::: - -On GitHub, you can find a more complete example for [asynchronous messaging with JMS](https://github.com/camunda/camunda-consulting/tree/master/snippets/asynchronous-messaging-jms). - -### Using SQL to access the database - -Use plain JDBC if you have simple requirements. Invoke your SQL statement from a service task in your process; for example, by using a Camunda Java Delegate: - -```java -@Named("simpleSqlDelegate") -public class simpleSqlDelegate implements JavaDelegate { - - @Resource(name="customerDB") - private javax.sql.DataSource customerDB; - - public void execute(DelegateExecution execution) throws Exception { - Statement statement = null; - Connection connection = null; - - try { - connection = customerDB.getConnection(); - String query = "SELECT name " + // <1> - "FROM customer " + - "WHERE id = ?"; - statement = connection.createStatement(); - statement.setString(1, execution.getProcessBusinessKey()); // <2> - ResultSet resultSet = stmt.executeQuery(query); - if (resultSet.next()) { - execution.setVariable("customerName", resultSet.getString("name")); // <3> - } - } finally { - if (statement != null) statement.close(); - if (connection != null) connection.close(); - } - -} -``` - -1 - -You will need to define your SQL statement. Consider using prepared statements if you want to execute a statement object many times. - -2 - -You will typically need to feed parameters into your SQL query that are already known during execution of the process instance... - -3 - -...and deliver back a potential result that maybe needed later in the process. - -:::danger -This example just serves to get you started. For real life, consider whether you need to encapsulate the JDBC code in a separate class and just wire it from the Java Delegate. Also decide which connections you need to open and close properly at which point. -::: - -Note that the Camunda process engine will have opened a database transaction for its own persistence purposes when calling the Java Delegate shown above. You will need to make a conscious decision if you want to join that transaction (and setup your TX management accordingly). - -Instead of invoking SQL directly, consider using [JPA](http://www.oracle.com/technetwork/java/javaee/tech/persistence-jsp-140049.html) if you have more complex requirements. Its object/relational mapping techniques will allow you to bind database tables to Java objects and abstract from specific database vendors and their specific SQL dialects. - -### Calling SAP systems - -To call a **SAP** system, you have the following options: - -* Use REST or SOAP client calls, connecting Camunda to **SAP Netweaver Gateway** or **SAP Enterprise Services**. - -* Use **SAP's Java Connectors (JCo)**. Consider using some frameworks to make this easier, like the open-source frameworks of [Hibersap](https://github.com/hibersap). - -### Executing a Groovy script - -A script task... - - - - ...is defined by specifying the script and the `scriptFormat`. - -```xml - - - -``` - -For more extensive code (which should also be tested separately), consider using scripts external to your BPMN file and reference them with a `camunda:resource` attribute on the `scriptTask`. - -Learn more about the many ways scripts can be used with Camunda from our [user guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/scripting/). diff --git a/versioned_docs/version-1.3/components/best-practices/development/routing-events-to-processes.md b/versioned_docs/version-1.3/components/best-practices/development/routing-events-to-processes.md deleted file mode 100644 index 323341769b3..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/routing-events-to-processes.md +++ /dev/null @@ -1,535 +0,0 @@ ---- -title: Routing events to processes -tags: -- Event Handling -- Process Instantiation -- Message Handling -- Correlation -- SOAP -- JMS -- REST -- Camel -- ESB -- API -- BPMN Message Event -- BPMN Signal Event -- BPMN Timer Event ---- - -To start a new process instance or to route a message to an already running instance, you have to choose the appropriate technology option to do so, like using the existing API or using customized possibilities including SOAP, AMQP, or Kafka. Leverage the possibilities of the universe of your runtime (like Java or Node.js) and the frameworks of your choice to support the technologies or protocols you need. - -## Choosing the right BPMN event - -### Start events - -Several BPMN start events can be used to start a new process instance. - -| | None Event | Message Event | Timer Event | Signal Event | Conditional Event | -| - | - | - | - | - | - | -| | ![none start](/img/bpmn-elements/none-start.svg) | ![message start](/img/bpmn-elements/message-start.svg) | ![timer start](/img/bpmn-elements/timer-start.svg) | ![signal start](/img/bpmn-elements/signal-start.svg) | ![conditional start](/img/bpmn-elements/conditional-start.svg) | -| Use when | You have only **one start event** or a start event which is clearly standard. | You have to differentiate **several start events**. | You want to automatically start process instances **time controlled**. | You need to start **several process instances** at once. Rarely used. | When a specific **condition** is met, a process instance is created. | -| Supported for Execution | ✔ | ✔ | ✔ | Not yet supported in Camunda Cloud | Determine occurrence of condition externally yourself and use the message event. | -| | [Learn more](/docs/components/modeler/bpmn/none-events/) | [Learn more](/docs/components/modeler/bpmn/message-events/) | [Learn more](/docs/components/modeler/bpmn/timer-events/) | | | - - -
    - -1 - -This none start event indicates the typical starting point. Note that only *one* such start event can exist in one process definition. - -2 - -This message start event is defined to react to a specific message type... - -3 - -...hence you can have *multiple* message start events in a process definition. In this example, both message start events seems to be exceptional cases - for equivalent cases we recommend to just use message instead of none start events. - -### Intermediate events - -Several BPMN intermediate events (and the receive task) can be used to make a process instance *wait* for and *react* to certain triggers. - -| | Message Event | Receive Task | Timer Event | Signal Event| Conditional Event | -| - | - | - | - | - | - | -| | ![message intermediate](/img/bpmn-elements/message-intermediate.svg) | ![task receive](/img/bpmn-elements/task-receive.svg) | ![timer intermediate](/img/bpmn-elements/timer-intermediate.svg) | ![signal intermediate](/img/bpmn-elements/signal-intermediate.svg) | ![conditional intermediate](/img/bpmn-elements/conditional-intermediate.svg) | -| Use when | You route an incoming **message** to a specific and unique process instance. | As alternative to message events (to leverage BPMN boundary events, e.g. for timeouts). | You want to make your process instance wait for a certain (point in) **time**. | You route an incoming **signal** to all process instances waiting for it. | When a specific **condition** is met, the waiting process instance moves on. | -| Supported for Execution | ✔ | ✔ | ✔ | Not yet supported in Camunda Cloud | Not yet supported in Camunda Cloud | -| | [Learn more](/docs/components/modeler/bpmn/message-events/) | [Learn more](/docs/components/modeler/bpmn/receive-tasks/) | [Learn more](/docs/components/modeler/bpmn/timer-events/) | | - - -Consider this example: - -
    - -1 - -This intermediate message event causes the process instance to wait unconditionally for a *specific* event... - -2 - -...whereas the intermediate message event attached to the boundary of an activity waits for an *optional* event, potentially arriving while we are occupied with the activity. - -## Reacting to process-internal events - -Events relevant for the process execution can occur from within the workflow engine itself. - -Consider the following loan application process - or at least the initial part with which the applicant's income is confirmed either via the employer or via the last income tax statement. - -
    - -1 - -In case the employer does not confirm the income within three business days, a **timer event** triggers and a human clerk now tries to contact the employer and investigate the situation. - -2 - -This could end with a successful income confirmation. However, it could also end with new findings regarding the applicant's employment status. We learn that the applicant is actually unemployed. - -3 - -In this case, a **conditional event** watching this data (e.g. a process variable changed by the human task) triggers and causes the process to reconsider the consequences of the new findings. - -:::caution Camunda Cloud does not yet support conditional events -Camunda Cloud does not yet [support the conditional event](/docs/components/modeler/bpmn/bpmn-coverage/). -::: - -A conditional event's condition expression is evaluated at it's "scope" creation time, too, and not just when variable data changes. For our example of a boundary conditional event, that means that the activity it is attached to could principally be left immediately via the boundary event. However, our process example evaluates the data via the exclusive gateway - therefore such a scenario is semantically impossible. - -## Routing events from the outside to the workflow engine - -Most events actually occur somewhere external to the workflow engine and need to be routed to it. The core workflow engine is by design not concerned with the technical part of receiving external messages, but you can receive messages and route them to the workflow engine by the following ways: - -* Using API: Receive the message by means of your platform-specific activities such as connecting to a AMQP queue or processing a REST request and then route it to the process. -* Using connectors: Configure a connector to receive messages such as Kafka records and rote it to the process. Note that this possibility works for Camunda Cloud only. - -### Using API - -:::caution Camunda Cloud -The following code examples target Camunda Cloud. -::: - -#### Starting process instance by BPMN process id - -If you have only one starting point (none start event) in your process definition, you reference the process definition by the ID in the BPMN XML file. This is the most common case and requires using the [`CreateProcessInstance`](../../../apis-tools/grpc.md#createprocessinstance-rpc) API. - -Example in Java: - -```java -processInstance = zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("invoice").latestVersion() - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not create new process instance", throwable); }); -``` - -Example in Node.js: - -```js -zbc.createWorkflowInstance({ - bpmnProcessId: 'invoice' -}) -``` - -This starts a new process instance in the latest version of the process definition. You can also start a specific version of a process definition: - -```java -processInstance = zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("invoice").version(5) - //... -``` - -or - -```js -zbc.createWorkflowInstance({ - bpmnProcessId: 'invoice', - version: 6 -}) -``` - -You can also use [`CreateProcessInstanceWithResult`](../../../apis-tools/grpc.md#createprocessinstancewithresult-rpc) instead, if you want to block the execution until the process instance has completed. - -#### Starting process instance by message - -As soon as you have multiple possible starting points, you have to use named messages to start process instances. The API method is [`PublishMessage`](../../../apis-tools/grpc.md#publishmessage-rpc): - -```java -client.newPublishMessageComment() - .messageName("message_invoiceReceived") // <1> - .corrlationKey(invoiceId) // <2> - .variables( // <3> - //... - ).send() - .exceptionally( throwable -> { throw new RuntimeException("Could not publish message", throwable); }); -``` - -1 - -Message name as defined in the BPMN. - -2 - -Correlation key has to be provided, even if a start event does not require correlation. - -3 - -*Payload* delivered with the message. - -On one hand, now you do not have to know the key of the BPMN process. On the other hand, you cannot influence the version of the process definition used when starting a process instance by message. - -The message name for start events should be unique for the whole workflow engine - otherwise you might experience side effects you did not intend (like starting other processes too). - -### Camunda Platform 7 - -:::caution Camunda Platform 7.x -The code snippets in this section code snippets for Camunda Platform 7.x. Camunda Cloud is shown above. -::: - -#### Starting process instances by key - -If you have only one starting point, you reference the process definition by the ID in the BPMN XML file. This is the most common case. - -```java - processEngine.getRuntimeService().startProcessInstanceByKey('invoice'); // <1> -``` - -1 - -Process *ID* defined in the BPMN. The API calls this ID the "Key" of the process. - -See the [Process Engine API](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-api/) for more details. - -#### Starting process instances by message - -As soon as you have multiple possible starting points, you have to use named messages to start process instances. - -```java -processEngine.getRuntimeService() - .createMessageCorrelation('message_invoiceReceived') // <1> - .setVariable("invoiceId", "123456") // <2> - .correlate(); -``` - -1 - -Message *Name* defined in the BPMN - -2 - -*Payload* delivered with the message - -On one hand, now you do not have to know the key of the BPMN process. On the other hand, you cannot influence the version of the process definition used when starting a process instance by message. - -The message name for start events has to be *unique* to the whole workflow engine - otherwise the engine will not know which process to start. - -#### Starting specific versions of process instances by ID - -See [versioning process definitions](../../operations/versioning-process-definitions/) for details on versioning of process definitions. - -By default, the workflow engine always starts the newest version of a process definition. You can start a specific version of a process definition by referencing the *ID* (primary key) of that definition in the engine's database. - -```java -ProcessDefinition processDefinition = processEngine().getRepositoryService() - .createProcessDefinitionQuery() - .processDefinitionKey("invoice") - .processDefinitionVersion(17) - .singleResult(); -processEngine().getRuntimeService() - .startProcessInstanceById(processDefinition.getId()); -``` - -"By ID" does *NOT* relate to the ID in the BPMN XML file (which is known as "Key" in the process engine). Instead, ID relates to the *primary key* in the Camunda database. You don't have influence on this ID - it will be created during deployment time. - -#### Correlating messages to running process instances - -In case you want to route an event to a process instance already started, you will need to *correlate* the message to the specific process instance waiting for it by matching some properties of the incoming message to some properties of your process instance: - - -```java -runtimeService - .createMessageCorrelation("myMessage") // <1> - .processInstanceBusinessKey(myMessage.getOrderId().toString()) // <2> - .processInstanceVariableEquals("customerId", myMessage.getCustomerId()) // <3> - .correlate(); -``` - -1 - -A process instance matches if it is waiting for a message *named* myMessage... - -2 - -...if it carries the orderId of the message as its *business key*... - -3 - -...and if a *process variable* "customerId" also matches the expectations. - -As a best practice, correlate incoming messages based on *one* unique artificial attribute (e.g. `correlationIdMyMessage`) created specifically for this communication: - -```java -runtimeService - .createMessageCorrelation("myMessage") - .processInstanceVariableEquals("correlationIdMyMessage", myMessage.getCustomCorrelationId()) - .correlate(); -``` - -Alternatively, you also have the option to select the process instance targeted by a message based on a query involving complex criteria, and then as a second step explicitly correlate the message to the selected process instance. - -The [API docs](https://docs.camunda.org/manual/latest/reference/bpmn20/events/message-events/#explicitly-triggering-a-message) show more details about the possibilities to trigger message events. - -#### Routings signals to process instances - -In the case of a [BPMN signal](https://docs.camunda.org/manual/latest/reference/bpmn20/events/signal-events/), a correlation to a specific process instance is neither necessary nor possible, as the mechanism is meant to inform *all* process instances "subscribing" to a specific signal event: - -```java -runtimeService - .createSignalEvent("mySignal") // <1> - .setVariables(variables) // pass variables (optional) - .send(); -``` - -1 - -A process instance matches if it is waiting for or started by a signal *named* `mySignal`. - -#### Starting process instances at arbitrary nodes - -There are use cases when you want to start a process instance at some point -other than the modeled start event: - -* **Testing**: It's always best to test a process instances in chunks, so you don't always need to start at the beginning. - -* **Migration**: When migrating to Camunda, you might have existing process -instances you want to migrate to a new Camunda process instances **in a defined state**. - -In these cases, you can start a process instance in arbitrary activities using the API. - -
    - -1 - -This example starts the Twitter process directly before the "Publish on Twitter" service task, meaning the service task will be executed: - -```java -processEngine.getRuntimeService().createProcessInstanceByKey("twitter") - .startBeforeActivity("service_task_publish_on_twitter") - .setVariable("content", "Know how to circumvent the review!") - .execute(); -``` - -See [User Guide: Starting a Process Instance at Any Set of Activities](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-concepts/#start-a-process-instance-at-any-set-of-activities). - -## Technology examples for messages sent by external systems - -In this section, we give examples for *technical messages*, which are received from -other systems, typically by leveraging technologies like e.g. SOAP, REST, JMS or -other. - -
    - -1 - -You will need a mechanism receiving that message and routing it to the workflow engine. That could be a direct API call to Camunda. It could also be a AMQP or Kafka consumer or a SOAP endpoint using the Camunda API internally. It could even be a hotfolder polled by some framework like Apache Camel. - -### Camunda Cloud - -API examples for REST, AMQP, and Kafka are shown in [connecting the workflow engine with your world](../connecting-the-workflow-engine-with-your-world/). - -### Camunda Platform 7 - -:::caution Camunda Platform 7 only -This part of the best practice targets Camunda Platform 7 only! -::: - -#### SOAP - -To start a process instance via a SOAP web service, write some Java code, e.g. by leveraging the @WebService annotation. - -```java -@WebService(name = "InvoiceService") <1> -public class InvoiceService { - - @Inject - private RuntimeService runtimeService; <2> - - public void startInvoice(String invoiceId) { <3> - Map variables = new HashMap(); - variables.put("invoiceId", invoiceId); - runtimeService.startProcessInstanceByKey("invoiceId", variables); - } - -} -``` - -1 - -The @WebService annotation is sufficient to provide the SOAP web service. - -2 - -You can inject the process engine or the process engine services when using -a proper dependency injection container like Spring or CDI. - -3 - -Decide if you prefer to use a business interface (like shown here) or a generic one like `startProcessInstance`. - -#### Messages - -To start a process instance by AMQP messages, write some Java code, e.g. using Spring to connect to RabbitMQ: - -```java -@RabbitListener(queues="invoice") -public void messageReceived(String invoiceId) { - Map variables = new HashMap(); - variables.put("invoiceId", invoiceId); - runtimeService.startProcessInstanceByKey("invoice", variables); -} -``` - -Or to start a process instance by a JMS message, you could use a message-driven bean in a Java EE container: - -```java -@MessageDriven(name = "InvoiceMDB", activationConfig = { - @ActivationConfigProperty(propertyName = "destinationType", - propertyValue = "javax.jms.Queue"), - @ActivationConfigProperty(propertyName = "destination", - propertyValue = "queue/invoice") - } -) -public class InvoiceMDB implements MessageListener { - - @Inject - private RuntimeService runtimeService; - - @Override - public void onMessage(Message message) { - try { - String invoiceId = ((TextMessage) message).getText(); - Map variables = new HashMap(); - variables.put("invoiceId", invoiceId); - runtimeService.startProcessInstanceByKey("invoice", variables); - } catch (Exception ex) { - throw new RuntimeException("Could not process JMS message", ex); - } - } -} -``` - -#### REST - -The provided REST API can be directly used to communicate with the workflow engine remotely. - -``` -POST /process-definition/key/invoice/start - -Request body: -{ - "variables": { - "invoiceId" : {"value" : "123456", "type": "String"} - } -} -``` - -More information can be found in the [Camunda Platform 7 REST API Reference](https://docs.camunda.org/manual/latest/reference/rest/process-definition/post-start-process-instance/). - -#### Apache Camel (e.g. files in a drop folder) - -Use [Apache Camel](http://camel.apache.org/) if you want to use one of the existing [Camel Components](http://camel.apache.org/components.html) (a huge list). Consider leveraging the -[Camunda Platform 7 Camel Community Extension](https://github.com/camunda/camunda-bpm-camel). You can find an example of this in action on JBoss/Wildfly in [this showcase (unsupported)](https://github.com/camunda/camunda-consulting/blob/master/showcases/camel-use-cases/). - -Starting a process instance can be done by a Camel route, e.g. when a file was placed into a drop folder: - -```java -from("file://c:/tmp") // some drop folder - .routeId("file") - .convertBodyTo(String.class) // convert content of file into String - .to("log:org.camunda.demo.camel?level=INFO&showAll=true&multiline=true") // optional logging - .to("camunda-bpm:start?processDefinitionKey=invoice"); // and start new process instance -``` - -In this case, the message transported within the Camel route is handed over to the process instance as a variable named `camelBody` by default, see [documentation](https://github.com/camunda/camunda-bpm-camel#camunda-bpmstart-start-a-process-instance). - -#### Messages sent via an Enterprise Service Bus (ESB) - -If you have an ESB in your architecture, you may want to start process instances from your ESB. The best approach to do this depends on the concrete product you use. There are two basic possibilities how you do this: - -- **Java**: You call the engine inside the VM via the Java API, like it is done in - the Camel community extension mentioned above. -- **Remote**: You call the remote API (e.g. Camunda REST) to communicate with the - engine. You might also build your own endpoint (e.g. JMS or SOAP) as described - above. - -## Using the Camunda BPMN framework - -If you use the **Camunda BPMN Framework** as described in the book ["Real Life BPMN"](https://www.amazon.de/dp/B07XC6R17R/) you will typically have message start events (even if you only have a single start event) to connect the surrounding human flows to the technical flow via messages: - -
    - -1 - -This is a message start event, which allows you to show the collaboration between the human and the technical flows. However, it is the only the starting point of the technical pool and could be a none start event in terms of execution. - -If there is *exactly one message start event* for the whole process definition, it can also be treated as if it were a none start event when starting a process instance. - -## Sending messages to other processes - -If messages are exchanged between different processes deployed in the workflow engine you have to implement the communication yourself by writing some code that starts a new process instance. - -
    - -1 - -Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN id in Java: - -```java -@ZeebeWorker(type="routeInput", autoComplete=true) -public void routeInput(@ZeebeVariable String invoiceId) { - Map variables = new HashMap(); - variables.put("invoiceId", execution.getVariable("invoiceId")); - zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("invoice").latestVersion() - .variables(variables) - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not create new process instance", throwable); }); -} -``` - -2 - -Use some simple code on the sending side to correlate the message to a running process instance, for example in Java: - -```java -@ZeebeWorker(type="notifyOrder", autoComplete=true) -public void notifyOrder(@ZeebeVariable String orderId, @ZeebeVariable String paymentInformation) { - Map variables = new HashMap(); - variables.put("paymentInformation", paymentInformation); - - execution.getProcessEngineServices().getRuntimeService() - .createMessageCorrelation("MsgPaymentReceived") - .processInstanceVariableEquals("orderId", orderId) - .setVariables(variables) - .correlate(); -} -``` - -## Handling messages sent by a user - -Sometimes explicit "user tasks" are not an appropriate choice to involve a human user to participate in a process: the user does not want to see a task in Tasklist, but rather have the possibility to actively trigger some action right at the time when it becomes necessary from a business perspective. The difference is which event gives the *active trigger*. - -
    - -1 - -We did not model a user task in this process, as the user will not immediately be triggered. The user cannot do anything at the moment when the process enters this event. Instead, we made it wait for a "message" which is later triggered by a human user. - -2 - -The accountant actually receives the "external trigger" by actively looking at new payments in the bank account. - -3 - -Every new payment now has to be correlated to the right waiting process instance manually. In this situation it is often the better choice not to model a user task, but let the process wait for a "message" generated from a user. - -These scenarios are not directly supported by Camunda Tasklist. A custom search screen built for the accountant might allow you to see and find orders waiting for a payment. By interacting with such a screen, the accountant communicates with those process instances all at once. When hitting a 'Paid' button, a piece of custom code using the API must now correlate the user's message to the affected process instance(s). diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/boundary-event.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/boundary-event.png deleted file mode 100644 index e4c385eb6ba..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/boundary-event.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/events-vs-tasks.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/events-vs-tasks.png deleted file mode 100644 index 44b704a336b..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/events-vs-tasks.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png deleted file mode 100644 index 743f1351b7d..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/img-src.pptx b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/img-src.pptx deleted file mode 100644 index dbdc671b2ca..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/img-src.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/receive-task.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/receive-task.png deleted file mode 100644 index 102c8cf5d5f..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/receive-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-boundary-message-events.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-boundary-message-events.png deleted file mode 100644 index 8e738a1921c..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-boundary-message-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-event-based-gateway.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-event-based-gateway.png deleted file mode 100644 index c34a0ef05e8..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-event-based-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-event-subprocess.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-event-subprocess.png deleted file mode 100644 index 602976e1dc7..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-event-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-gateway.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-gateway.png deleted file mode 100644 index be43f884ca0..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/response-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/send-and-receive-task.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/send-and-receive-task.png deleted file mode 100644 index b566551a3aa..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/send-and-receive-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/send-task.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/send-task.png deleted file mode 100644 index c403e9978f9..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/send-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/service-task.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/service-task.png deleted file mode 100644 index 3b3340e9da3..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/service-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/synchronous-ack.png b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/synchronous-ack.png deleted file mode 100644 index ed6227643cc..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns-assets/synchronous-ack.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns.md b/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns.md deleted file mode 100644 index f0e1861c9e1..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/service-integration-patterns.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: "Service integration patterns with BPMN" -description: "Understand communication patterns, integrate services with BPMN tasks and events, and take a closer look at hiding technical complexity behind call activities." ---- - -When integrating systems and services, you can choose between various modeling possibilities in BPMN. This practice will give you an overview and advice on how to decide between alternatives. - -You will see that service tasks in general are a good choice, but there are also situations where you might want to switch to send and receive tasks or events. - -## Understanding communication patterns - -Let's briefly examine the three typical communication patterns to integrate systems: - -* **Request/response using synchronous communication styles**: You use a synchronous protocol, like HTTP, and block for the result. -* **Request/response using asynchronous communication styles**: You use asynchronous communication, for example, by sending messages via a message broker, but wait for a response message right after. Technically, these are two independent asynchronous messages, but the sender blocks until the response is received, hence logically making it a request/response. -* **Asynchronous messages or events:** If a peer service needs a long time to process a request, the response is much later than the request, say hours instead of milliseconds. In this case, the response is typically handled as a separate message. Additionally, some of your services might also wait for messages or events that are not connected to a concrete request, especially in event-driven architectures. - -The following table gives a summary of the three options: - -| | Synchronous request/response | Asynchronous request/response | Asynchronous messages or events | -| - | :- | :- | :- | -| **Business level** | Synchronous | Synchronous | Asynchronous | -| **Technical communication style** | Synchronous | Asynchronous | Asynchronous | -| **Example** | HTTP | AMQP, JMS | AMQP, Apache Kafka | - -You can dive more into communication styles in the webinar [Communication Between Loosely Coupled Microservices](https://page.camunda.com/wb-communication-between-microservices) ([slides](https://www.slideshare.net/BerndRuecker/webinar-communication-between-loosely-coupled-microservices), [recording](https://page.camunda.com/wb-communication-between-microservices) and [FAQ](https://medium.com/communication-between-loosely-coupled-microservices-webinar-faq-a02708b3c8b5)). - -## Integrating services with BPMN tasks - -Let’s look at using BPMN tasks to handle these communication patterns before diving into BPMN events later. - -### Service task - -The [service task](/docs/components/modeler/bpmn/service-tasks) is the typical element to implement synchronous request/response calls, such as REST, gRPC or SOAP. You should **always use service tasks for synchronous request/response**. - -![Service task](service-integration-patterns-assets/service-task.png) - -### Send task - -Technically, **send tasks behave exactly like service tasks**. However, the alternative symbol makes the meaning of sending a message easier to understand for some stakeholders. - -You **should use send tasks for sending asynchronous messages**, like AMQP messages or Kafka records. - -![Send task](service-integration-patterns-assets/send-task.png) - -There is some gray area whenever you call a synchronous service that then sends an asynchronous message. A good example is email. Assume your process does a synchronous request/response call to a service that then sends an email to inform the customer. The call itself is synchronous because it gives you a confirmation (acknowledgement, or ACK for short) that the email has been sent. Now is the "inform customer" task in your process a service, or a send task? - -![Asynchronous ACK](service-integration-patterns-assets/synchronous-ack.png) - -This question is not easy to answer and **depends on what your stakeholders understand more intuitively**. The more technical people are, the more you might tend towards a service task, as this is technically correct. The more you move towards the business side, the more you might tend to use a send task, as business people will consider sending an email an asynchronous message. - -In general, we tend to **let the business win** as it is vital that business stakeholders understand business processes. - -However, if you follow a microservice (or service-oriented architecture) mindset, you might argue that you don’t need to know exactly how customers are informed within the process. Hiding the information if the notification is synchronous or asynchronous is good to keep your process model independent of such choices, making it more robust whenever the implementation of the notification service changes. This is a very valid concern too, and might motivate for a service task. - -:::note -In case you can’t easily reach a conclusion, save discussion time and just use a service task. -::: - -You could also argue to use send tasks to invoke synchronous request/response calls when you are not interested in the response. However, this is typically confusing, and we do not recommend this. - -### Receive task - -A [receive task](/docs/components/modeler/bpmn/receive-tasks/) waits for an asynchronous message. Receive tasks **should be used for incoming asynchronous messages or events**, like AMQP messages or Kafka records. - -![Receive task](service-integration-patterns-assets/receive-task.png) - -Receive tasks can be used to receive the response in asynchronous request/response scenarios, which is discussed next. - -### Service task vs. send/receive task combo - -For asynchronous request/response calls, you can use a send task for the request, and a following receive task to wait for the response: - -![Send and receive task](service-integration-patterns-assets/send-and-receive-task.png) - -You can also use a service task, which is sometimes unknown even to advanced users. A service task can technically wait for a response that happens at any time, a process instance will wait in the service task, as it would in the receive task. - -![Service task](service-integration-patterns-assets/service-task.png) - -Deciding between these options is not completely straightforward. You can find a table listing the decision criteria below. - -As a general rule-of-thumb, we recommend using **the service task as the default option for synchronous *and* asynchronous request/response** calls. The beauty of service tasks is that you remove visual clutter from the diagram, which makes it easier to read for most stakeholders. - -This is ideal if the business problem requires a logically synchronous service invocation. It allows you to ignore the technical details about the protocol on the process model level. - -The typical counter-argument is that asynchronous technical protocols might lead to different failure scenarios that you have to care about. For example, when using a separate receive task, readers of the diagram almost immediately start to think about what happens if the response will not be received. But this also has the drawback that now business people might start discussing technical concerns, which is not necessarily good. - -Furthermore, this is a questionable argument, as synchronous REST service calls could also timeout. This is exactly the same situation, just hidden deeper in network abstraction layers, as every form of remote communication uses asynchronous messaging somewhere down in the network stack. On a technical level, you should always think about these failure scenarios. The talk [3 common pitfalls in microservice integration and how to avoid them](https://berndruecker.io/3-pitfalls-in-microservice-integration/) goes into more detail on this. - -On a business level, you should be aware of the business implications of technical failures, but not discuss or model all the nuts and bolts around it. - -However, there are also technical implications of this design choice that need to be considered. - -**Technical implications of using service tasks** - -You can keep a service task open and just complete it later when the response arrives, but in **to complete the service task, you need the _job instance key_** from Zeebe. This is an internal ID from the workflow engine. You can either: - -* Pass it around to the third party service which sends it back as part of the response message. -* Build some kind of lookup table, where you map your own correlation information to the right job key. - -:::note -Later versions of Zeebe might provide query possibilities for this job key based on user controlled data, which might open up more possibilities. -::: - -Using workflow engine internal IDs can lead to problems. For example, you might cancel and restart a process instance because of operational failures, which can lead to a new ID. Outstanding responses cannot be correlated anymore in such instances. - -Or, you might run multiple workflow engines which can lead to internal IDs only being unique within one workflow engine. All of this might not happen, but the nature of an internal ID is that it is internal and you have no control over it — which bears some risk. - -In practice, however, using the internal job instance key is not a big problem if you get responses in very short time frames (milliseconds). Whenever you have more long-running interactions, you should consider using send and receive tasks, or build your own lookup table that can also address the problems mentioned above. - -This is also balanced by the fact that service tasks are simply very handy. The concept is by far the easiest way to implement asynchronous request/response communication. The job instance key is generated for you and unique for every message interchange. You don’t have to think about race conditions or idempotency constraints yourself. [Timeout handling and retry logic](/docs/components/concepts/job-workers#timeouts) is built into the service task implementation of Zeebe. There is also [a clear API to let the workflow engine know of technical or business errors](/docs/components/concepts/job-workers#completing-or-failing-jobs). - -**Technical implications of using send and receive tasks** - -Using send and receive tasks means to use [the message concept built into Zeebe](/docs/components/concepts/messages). This is a powerful concept to solve a lot of problems around cardinalities of subscriptions, correlation of the message to the right process instances, and verification of uniqueness of the message (idempotency). - -When using messages, you need to provide the correlation id yourself. This means that the correlation id is fully under your control, but it also means that you need to generate it yourself and make sure it is unique. You will most likely end up with generated UUIDs. - -You can leverage [message buffering](/docs/components/concepts/messages#message-buffering) capabilities, which means that the process does not yet need to be ready to receive the message. You could, for example, do other things in between, but this also means that you will not get an exception right away if a message cannot be correlated, as it is simply buffered. This leaves you in charge of dealing with messages that can never be delivered. - -Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. - -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](https://forum.camunda.io/) to discuss such a scenario in more depth. - -**Summary And recommendations** - -The following table summarizes the possibilities and recommendations. - -| Case | Synchronous request/response | Synchronous request/response | Asynchronous request/response | Asynchronous request/response | -| :- | :- | :- | :- | :- | -| BPMN element| Service task | Send task | Service task | Send + receive task | -| | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send task](/img/bpmn-elements/task-send.svg) | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send and receive task](/img/bpmn-elements/send-and-receive-task.png) | -| Technical implications | | Behaves like a service task | A unique correlation ID is generated for you. You don’t have to think about race conditions or idempotency. Timeout handling and retry logic are built-in. API to flag business or technical errors. | Correlation ID needs to be generated yourself, but is fully under control. Message buffering is possible but also necessary. Timeouts and retries need to be modeled. BPMN errors cannot be used. | -| Assessment | Very intuitive. | Might be more intuitive for fire and forget semantics, but can also lead to discussions. | Removes visual noise which helps stakeholders to concentrate on core business logic, but requires use of internal job instance keys. | More visual clutter, but also more powerful options around correlation and modeling patterns. | -| Recommendation | Default option, use unless it is confusing for business stakeholders (e.g. because of fire and forget semantics of a task). | Use for fire and forget semantics, unless it leads to unnecessary discussions, in this case use service task instead. | Use when response is within milliseconds and you can pass the Zeebe-internal job instance key around. | Use when the response will take time (> some seconds), or you need a correlation id you can control. | - - -## Integrating services with BPMN events - -Instead of using send or receive **tasks**, you can also use send or receive **events** in BPMN. - -![Events vs tasks](service-integration-patterns-assets/events-vs-tasks.png) - -Let's first explore when you want to do that, and afterwards look into some more advanced patterns that become possible with events. - -### Tasks vs. events - -The **execution semantics of send and receive events is identical with send and receive tasks**, so you can express the very same thing with tasks or events. - -However, there is one small difference that might be relevant: **only tasks can have boundary events**, which allows to easily model when you want to cancel waiting for a message: - -![Boundary events](service-integration-patterns-assets/boundary-event.png) - -Despite this, the whole visual representation is of course different. In general, tasks are easier understood by most stakeholders, as they are used very often in BPMN models. - -However, in certain contexts, such as event-driven architectures, events might be better suited as the concept of events is very common. Especially, if you apply domain-driven design (DDD) and discuss domain events all day long, it might be intuitive that events are clearly visible in your BPMN models. - -Another situation better suited for events is if you send events to your internal reporting system besides doing “the real” business logic. Our experience shows that the smaller event symbols are often unconsciously treated as less important by readers of the model, leading to models that are easier to understand. - -| | Send task | Receive task | Send event | Receive event | -| :- | :- | :- | :- | :- | -| Recommendation | Prefer tasks over events | Prefer tasks over events | Use only if you consistently use events over tasks and have a good reason for doing so (e.g. event-driven architecture) | Use only if you consistently use events over tasks and have a good reason for doing so (e.g. event-driven architecture) | - -:::note -The choice about events vs. commands also [needs to be reflected in the naming of the element](../../modeling/naming-bpmn-elements), as a task emphasizes the action (e.g. "wait for response") and the event reflects what happened (e.g. "response received"). -::: - -### Handling different response messages - -Very often the response payload of the message will be examined to determine how to move on in the process. - -![Gateway handling response](service-integration-patterns-assets/response-gateway.png) - -In this case, you receive exactly one type of message for the response. As an alternative, you could also use different message types, to which the process can react differently. For example, you might wait for the validation message, but also accept a cancellation or rejection message instead: - -![Boundary message event to capture different response messages](service-integration-patterns-assets/response-boundary-message-events.png) - -This modeling has the advantage that it is much easier to see the expected flow of the process (also called the happy path), with exceptions deviating from it. On the other hand, this pattern mixes receive tasks and events in one model, which can confuse readers. Keep in mind that it only works for a limited number of non-happy messages. - -To avoid the task/event mixture you could use a so-called event-based gateway instead, this gateway waits for one of a list of possible message types to be received: - -![Event based gateway to capture different response messages](service-integration-patterns-assets/response-event-based-gateway.png) - -We typically try to avoid the event-based gateway, as it is hard to understand for non-BPMN professionals. At the same time, it shares the downside of the first pattern with the decision gateway after the receive task: the happy path cannot be easily spotted. - -As a fourth possibility, you can add event subprocesses, which get activated whenever some event is received while the process is still active in some other area. In the above example, you could model the happy path and model all deviations as event subprocesses. - -![Event sub process to capture different response messages](service-integration-patterns-assets/response-event-subprocess.png) - -This pattern is pretty handy, but also needs some explanation to people new to BPMN. It has one downside you need to know: once your process instance moves to the sub process, you can’t easily go back to the normal flow. To some extent this problem can be solved by advanced modeling patterns like shown in the [allow for order cancellation anytime](../../modeling/building-flexibility-into-bpmn-models/#allow-for-order-cancellation-any-time) example. - -At the same time, the event subprocess has a superpower worth mentioning: you can now wait for cancellation messages in whole chunks of your process — it could arrive anytime. - -| | Receive task with boundary events | Payload and XOR-gateway | Event-based gateway | Event sub process | -| - | - | - | - | - | -| | ![Boundary Events](service-integration-patterns-assets/response-boundary-message-events.png) | ![XOR Gateway](service-integration-patterns-assets/response-gateway.png) | ![Event-based Gateway](service-integration-patterns-assets/response-event-based-gateway.png) | ![Event Subprocess](service-integration-patterns-assets/response-event-subprocess.png) -| Understandability | Easy | Very easy | Hard | Medium | -| Assessment | Limitation on how many message types are possible | Happy path not easily visible | | Might need some explanation for readers of the model | -| Recommendation | Use when it is important to see message types in the visual, limit to two boundary message events | Use when there are more response types or if the response type can be treated as a result | Try to avoid | Use if you need bigger scopes where you can react to events - -### Message type on the wire != BPMN message type - -There is one important detail worth mentioning in the context of message response patterns: The message type used in BPMN models does not have to be exactly the message type you get on the wire. When you correlate technical messages, e.g. from AMQP, you typically write a piece of glue code that receives the message and calls the workflow engine API. This is described in [connecting the workflow engine with your world](../connecting-the-workflow-engine-with-your-world/), including a code example. In this glue code you can do various transformations, for example: - -* Messages on different message queues could lead to the same BPMN message type, probably having some additional parameter in the payload indicating the origin. -* Some message header or payload attributes could be used to select between different BPMN message types being used. - -It is probably not best practice to be as inconsistent as possible between technical message types and BPMN message types. Still, the flexibility of a custom mapping might be beneficial in some cases. - -## Hiding technical complexity behind call activities - -Whenever technical details of one service integration become complicated, you can think of creating a separate process model for the technicalities of the call and use a [call activity](/docs/components/modeler/bpmn/call-activities/) in the main process. - -An example is given in chapter 7 of [Practical Process Automation](https://processautomationbook.com/): - -![Hiding technical details behind call activity](service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png) - -In the customer scenario, a document storage service was long-running, but could not do a real callback or response message for technical reasons (in short, firewall limitations). As a result, the document storage service needed to be regularly polled for the response. In the customer scenario, this was done by a "document storage adapter" process that leveraged workflow engine features to implement the polling every minute, and especially the persistent waiting in between. In the main business process, this technical adapter process was simply invoked via a call activity, meaning no technicalities bloated that diagram. diff --git a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/coverage.png b/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/coverage.png deleted file mode 100644 index ff6fa6582c5..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/coverage.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/img-src.pptx b/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/img-src.pptx deleted file mode 100644 index 3a2c617cd60..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/img-src.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/process-test-scope-example.png b/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/process-test-scope-example.png deleted file mode 100644 index 1c02046c8d8..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/process-test-scope-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/scopes.png b/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/scopes.png deleted file mode 100644 index b4c9210d5b9..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions-assets/scopes.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions.md b/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions.md deleted file mode 100644 index 537099d8a11..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/testing-process-definitions.md +++ /dev/null @@ -1,573 +0,0 @@ ---- -title: "Testing process definitions" -tags: - - Test / Unit Test - - Test / Integration Test - - Mock - - Exception - - Java Delegate - - JUnit ---- - -Test your executable BPMN processes as they are software. If possible, do automated unit tests with a fast in-memory workflow engine. Before releasing, verify with integration tests close to your real-life environment, which might include human-driven, exploratory integration tests. - -This best practice uses the following process example: - -
    - -1 - -New tweets need to be reviewed before publication. - -2 - -The tweeting employee is notified about rejected tweets. - -3 - -Approved tweets get published. - -4 - -Duplicate tweets are rejected by Twitter and dealt with by the original author (e.g. rephrased) just to be reviewed again. - -## Testing scopes - -There are basically three typical test scopes used when building process solutions: - -1. **Unit tests**: Testing glue code or programming code you developed for your process solution. How to unit test your software itself is not discussed here, as this is a common practice for software development. - -2. **Process tests**: Testing the expected behavior of the process model, including glue code and specifically the data flowing through the process model. Those tests should run frequently, so they should behave like unit tests (quick turnaround, no need for external resources, etc.) - -3. **Integration tests**: Testing the system in a close-to-real-life-environment to make sure it is really working. This is typically done before releasing a new version of your system. Those tests include *human-driven*, *exploratory* tests. - -![Scopes](testing-process-definitions-assets/scopes.png) - -## Writing process tests in Java - -:::caution Camunda Cloud only -This section targets Camunda Cloud. Refer to the specific Camunda 7 section below if you are looking for Camunda Platform 7.x. -::: - -This section describes how to write process tests as unit tests in Java. Later in this best practice, you will find some information on writing tests in other languages, like Node.Js or C#. - -When using Java, most customers use Spring Boot. While this is a common setup for customers, it is not the only one. Find some more examples of Java process tests in the README.md of the [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test) project. - -### Technical setup using Spring - -:::caution JUnit 5 -You need to use JUnit 5. Ensure you use JUnit 5 in every test class: the `@Test` annotation you import needs to be `org.junit.jupiter.api.Test`. -::: - -1. Use [*JUnit 5*](http://junit.org) as unit test framework. -2. Use [spring-zeebe](https://github.com/camunda-community-hub/spring-zeebe). -3. Use `@ZeebeSpringTest` to ramp up an in-memory process engine. -4. Use annotations from [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/) to check whether your expectations about the state of the process are met. -5. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. - -A test can now look like the following example. The complete source code is available on [GitHub](https://github.com/camunda-community-hub/camunda-cloud-examples/blob/main/twitter-review-java-springboot/src/test/java/org/camunda/community/examples/twitter/TestTwitterProcess.java): - -```java -@SpringBootTest -@ZeebeSpringTest -public class TestTwitterProcess { - - @Autowired - private ZeebeClient zeebe; - - @MockBean - private TweetPublicationService tweetPublicationService; - - - @Test - public void testTweetApproved() throws Exception { - // Prepare data input - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - // start a process instance - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - // And then retrieve the UserTask and complete it with 'approved = true' - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - // Now the process should run to the end - waitForProcessInstanceCompleted(processInstance); - - // Let's assert that it passed certain BPMN elements (more to show off features here) - assertThat(processInstance) - .hasPassedElement("end_event_tweet_published") - .hasNotPassedElement("end_event_tweet_rejected") - .isCompleted(); - - // And verify it caused the right side effects b calling the business methods - Mockito.verify(twitterService).tweet("Hello world"); - Mockito.verifyNoMoreInteractions(twitterService); - } -} -``` - -### Test scope and mocking - -In such a test case, you want to test the executable BPMN process definition, plus all the glue code which logically belongs to the process definition in a wider sense. Typical examples of glue code you want to include in a process test are: - -* Worker code, typically connected to a service task -* Expressions (FEEL) used in your process model for gateway decisions or input/output mappings -* Other glue code, for example, a REST API that does data mapping and delegates to the workflow engine - -In the example above, this is the worker code and the REST API: - -![Process test scope example](testing-process-definitions-assets/process-test-scope-example.png) - -Workflow engine-independent business code should *not* be included in the tests. In the Twitter example, the `TwitterService` will be mocked, and the `TwitterWorker` will still read process variables and call this mock. This way, you can make test the process model, the glue code, and the data flow in your process test. - -The following code examples highlight the important aspects around mocking. - -The `PublishTweetWorker` is executed as part of the test. It does input data mapping **(1)** and also translates a specific business exception into a BPMN error **(2)**: - -```java -@Autowired -private TwitterService twitterService; - -@ZeebeWorker( type = "publish-tweet", autoComplete = true) -public void handleTweet(@ZeebeVariablesAsType TwitterProcessVariables variables) throws Exception { - try { - twitterService.tweet( - variables.getTweet() // 1 - ); - } catch (DuplicateTweetException ex) { // 2 - throw new ZeebeBpmnError("duplicateMessage", "Could not post tweet, it is a duplicate."); - } -} -``` - -The `TwitterService` is considered a business service (it could, for example, wrap the twitter4j API) and shall *not* be executed during the test. This is why this interface is mocked: - - -```java -@MockBean -private TwitterService tweetPublicationService; - -@Test -public void testTweetApproved() throws Exception { - // ... - // Using Mockito you can make sure a business method was called with the expected parameter - Mockito.verify(tweetPublicationService).tweet("Hello world"); -} - -@Test -public void testDuplicate() throws Exception { - // Using Mockito you can define what should happen if a method is called, in this case an exception is thrown to simulate a business error - Mockito.doThrow(new DuplicateTweetException("DUPLICATE")).when(tweetPublicationService).tweet(anyString()); - //... -``` - -### Drive the process and assert the state - -For tests, you drive the process from waitstate to waitstate and assert that you see the expected process and variable states. For example, you might implement a test to test the scneario that a tweet gets approved: - -```java -@Test -public void testTweetApproved() throws Exception { - // Prepare data input - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - // start a process instance <1> - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - // And then retrieve the UserTask and complete it with 'approved = true' <2> - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - // Now the process should run to the end - waitForProcessInstanceCompleted(processInstance); - - // Let's assert that it passed certain BPMN elements (more to show off features here) <3> - assertThat(processInstance) - .hasPassedElement("end_event_tweet_published") - .hasNotPassedElement("end_event_tweet_rejected") - .isCompleted(); - - // And verify it caused the right side effects b calling the business methods <4> - Mockito.verify(twitterService).tweet("Hello world"); - Mockito.verifyNoMoreInteractions(twitterService); -} -``` - -1. Create a new process instance. You may want to use some glue code to start your process (e.g. the REST API facade), or also create helper methods within your test class. - -2. Drive the process to its next waitstate, e.g. by completing a waiting user task. You may extract boilerplate code into helper methods as shown below. - -3. Assert that your process is in the expected state. - -4. Verify with your mocking library that your business service methods were called as expected. - -This is the helper method used to verify the workflow engine arrived in a specific user task, and complete that task with passing on some variables. As you can see, [a user task behaves like a service task with the type `io.camunda.zeebe:userTask`](/docs/components/modeler/bpmn/user-tasks/): - -```java -public void waitForUserTaskAndComplete(String userTaskId, Map variables) { - // Let the workflow engine do whatever it needs to do - inMemoryEngine.waitForIdleState(); - - // Now get all user tasks - List jobs = zeebe.newActivateJobsCommand().jobType(USER_TASK_JOB_TYPE).maxJobsToActivate(1).send().join().getJobs(); - - // Should be only one - assertTrue(jobs.size()>0, "Job for user task '" + userTaskId + "' does not exist"); - ActivatedJob userTaskJob = jobs.get(0); - // Make sure it is the right one - if (userTaskId!=null) { - assertEquals(userTaskId, userTaskJob.getElementId()); - } - - // And complete it passing the variables - if (variables!=null) { - zeebe.newCompleteCommand(userTaskJob.getKey()).variables(variables).send().join(); - } else { - zeebe.newCompleteCommand(userTaskJob.getKey()).send().join(); - } -} -``` - -Be careful not to "overspecify" your test method by asserting too much. Your process definition will likely evolve in the future and such changes should break as little test code as possible, but just as much as necessary! - -As a rule of thumb *always* assert that the expected *external effects* of your process really took place (e.g. that business services were called as expected). Additionally, carefully choose which aspects of *internal process state* are important enough so that you want your test method to warn about any related change later on. - -### Testing your process in chunks - -Divide and conquer by *testing your process in chunks*. Consider the important chunks and paths the Tweet Approval Process consists of: - -
    - -1 - -The *happy path*: The tweet just gets published. - -2 - -The tweet gets rejected. - -3 - -A duplicated tweet gets rejected by Twitter. - -#### Testing the happy path - -The happy path is kind of the default scenario with a positive outcome, so no exceptions or errors or deviations are experienced. - -Fully test the happy path in one (big) test method. This makes sure you have one consistent data flow in your process. Additionally, it is easy to read and to understand, making it a great starting point for new developers to understand your process and process test case. - -You were already exposed to the happy path in our example, which is the scenario that the tweet gets approved: - -```java -@Test -public void testTweetApproved() throws Exception { - // Prepare data input - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - // start a process instance <1> - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - // And then retrieve the UserTask and complete it with 'approved = true' <2> - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - // Now the process should run to the end - waitForProcessInstanceCompleted(processInstance); - - // Let's assert that it passed certain BPMN elements (more to show off features here) <3> - assertThat(processInstance) - .hasPassedElement("end_event_tweet_published") - .hasNotPassedElement("end_event_tweet_rejected") - .isCompleted(); - - // And verify it caused the right side effects b calling the business methods <4> - Mockito.verify(twitterService).tweet("Hello world"); - Mockito.verifyNoMoreInteractions(twitterService); -} -``` - -#### Testing detours - -Test *forks/detours* from the happy path as well as *errors/exceptional* paths as chunks in separate test methods. This allows to unit test in meaningful units. - -The tests for the exceptional paths are basically very similar to the happy path in our example: - -```java -@Test -public void testRejectionPath() throws Exception { - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", false)); - - waitForProcessInstanceCompleted(processInstance); - waitForProcessInstanceHasPassedElement(processInstance, "end_event_tweet_rejected"); - Mockito.verify(twitterService, never()).tweet(anyString()); -} -``` - -and: - -```java -@Test -public void testDuplicateTweet() throws Exception { - // throw exception simulating duplicateM - Mockito.doThrow(new DuplicateTweetException("DUPLICATE")).when(twitterService).tweet(anyString()); - - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setAuthor("bernd") - .setBoss("Zeebot"); - - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - waitForProcessInstanceHasPassedElement(processInstance, "boundary_event_tweet_duplicated"); - // TODO: Add human task to test case - waitForUserTaskAndComplete("user_task_handle_duplicate", new HashMap<>()); -} -``` - - - -## Integration tests - -Test the process in a close-to-real-life environment. This verifies that it really works before releasing a new version of your process definition, which includes *human-driven*, *exploratory* tests. - -Clearly *define your goals* for integration tests! Goals could be: - -* End user & acceptance tests -* Complete end-to-end tests -* Performance & load tests, etc. - -Carefully consider *automating* tests on scope 3. You need to look at the overall effort spent on writing test automation code and maintaining it when compared with executing human-driven tests for your software project's lifespan. The best choice depends very much on the frequency of regression test runs. - -Most effort is typically invested in setting up proper test data in surrounding systems. - -Configure your tests to be dedicated integration tests, and separate them from unit or process tests. - -You can use typical industry standard tools for integration testing together with Camunda. - -## Technical setup and example using Camunda Platform 7 - -:::caution Camunda Platform 7 only -This section targets Camunda Platform 7.x only. Refer to the previous sections if you are using Camunda Cloud. -::: - -Camunda Platform 7 also has support for writing tests in Java. This section gives you an example, the basic ideas of test scopes and testing in chunks are also valid with Camunda Platform 7. - -The technical setup for Camunda Platform 7: - -1. Use [*JUnit*](http://junit.org) as unit test framework. -2. Use Camunda's [JUnit Rule](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.16/org/camunda/bpm/engine/test/ProcessEngineRule.html) to ramp up an in-memory process engine where the [JobExecutor](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.16/org/camunda/bpm/engine/test/Deployment.html) is turned off. -3. Use Camunda's [@Deployment](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.16/org/camunda/bpm/engine/test/Deployment.html) annotation to deploy and un-deploy one or more process definitions under test for a single test method. -4. Use [camunda-bpm-assert](http://github.com/camunda/camunda-bpm-assert) to easily check whether your expectations about the state of the process are met. -5. Use mocking of your choice, e.g. [Mockito](http://mockito.org) plus [PowerMock](https://github.com/jayway/powermock/) to mock service methods and verify that services are called as expected. -6. Use Camunda's [MockExpressionManager](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.16/org/camunda/bpm/engine/test/mock/MockExpressionManager.html) to resolve bean names used in your process definition without the need to ramp up the dependency injection framework (like CDI or Spring). -7. Use an [In-Memory H2 database](http://www.h2database.com/html/features.html#in_memory_databases) as default database to test processes on developer machines. If required, you can run the same tests on *multiple databases*, e.g. Oracle, DB2, or MS-SQL on a CI-Server. To achieve that, you can make use of (e.g. Maven) profiles and Java properties files for database configuration. - -Let's use the same example as above. - -A typical test case will look like this: - -```java -// ... -import static org.camunda.bpm.engine.test.assertions.ProcessEngineTests.*; // <4> -import static org.mockito.Mockito.*; // <5> - -@RunWith(PowerMockRunner.class) // <1> <5> -public class TwitterTest { - - @Rule - public ProcessEngineRule processEngineRule = new ProcessEngineRule(); // <2> - - @Mock // Mockito mock instantiated by PowerMockRunner <5> - private TweetPublicationService tweetPublicationService; - - @Before - public void setup() { - // ... - Mocks.register("tweetPublicationDelegate", tweetPublicationDelegate); // <6> - } - - @Test // <1> - @Deployment(resources = "twitter/TwitterDemoProcess.bpmn") // <3> - public void testTweetApproved() { - // ... - } -// ... -} -``` - -The service task **Publish on Twitter** delegates to Java code: - -```xml - - -``` - -And this *Java delegate* itself calls a business method: - -```java -@Named -public class TweetPublicationDelegate implements JavaDelegate { - - @Inject - private TweetPublicationService tweetPublicationService; - - public void execute(DelegateExecution execution) throws Exception { - String tweet = new TwitterDemoProcessVariables(execution).getTweet(); // 1 - // ... - try { - tweetPublicationService.tweet(tweet); // 2 - } catch (DuplicateTweetException e) { - throw new BpmnError("duplicateMessage"); // 3 - } - } -// ... -``` - -The TweetPublicationService is mocked: - -```java -@Mock // 1 -private TweetPublicationService tweetPublicationService; - -@Before -public void setup() { - // set up java delegate to use the mocked tweet service - TweetPublicationDelegate tweetPublicationDelegate = new TweetPublicationDelegate(); // 2 - tweetPublicationDelegate.setTweetService(tweetPublicationService); - // register a bean name with mock expression manager - Mocks.register("tweetPublicationDelegate", tweetPublicationDelegate); // 3 -} - -@After -public void teardown() { - Mocks.reset(); // 3 -} -``` - -Now you can test the happy path to a published tweet: - -```java -@Test -@Deployment(resources = "twitter/TwitterDemoProcess.bpmn") -public void testTweetApproved() { - // given - ProcessInstance processInstance = runtimeService().startProcessInstanceByKey( - "TwitterDemoProcess", - withVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET, TWEET)); // 1 - assertThat(processInstance).isStarted(); - // when - complete(task(), withVariables(TwitterDemoProcessConstants.VAR_NAME_APPROVED, true)); //2 - // then - assertThat(processInstance) // 3 - .hasPassed("end_event_tweet_published") - .hasNotPassed("end_event_tweet_rejected") - .isEnded(); - verify(tweetPublicationService).tweet(TWEET); // 4 - verifyNoMoreInteractions(tweetPublicationService); -} -``` - -As a next step, you might want to test the path where a tweet gets rejected. You don't have to start at the start event, but can start anywhere in your process: - -```java -@Test -@Deployment(resources = "twitter/TwitterDemoProcess.bpmn") -public void testTweetRejected() { - - // create a process instance directly at the point at which a tweet was rejected - ProcessInstance processInstance = runtimeService() - .createProcessInstanceByKey("TwitterDemoProcess") - .startBeforeActivity("service_task_publish_on_twitter") - .setVariables(variables) - .execute(); - assertThat(processInstance) - .isStarted() - .hasPassed("service_task_publish_on_twitter") - .hasVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET); - - // when - complete(task(), withVariables(TwitterDemoProcessConstants.VAR_NAME_APPROVED, false)); // 2 - - // then - assertThat(processInstance) - .hasPassed("end_event_tweet_rejected") - .hasNotPassed("end_event_tweet_published") - .isEnded(); - verifyZeroInteractions(tweetPublicationService); -} -``` - -You could also implement another `testTweetDuplicated()` to verify the logic in case a tweet turns out to be a duplicate and is rejected by Twitter. For this case, we attached an error event to the service task **Publish on Twitter**. In the BPMN XML we see an error event defined with an errorCode `duplicateMessage`. - -```xml - - - - -``` - -Above, we already saw the Java delegate code throwing the BPMN error exception with that code `duplicateMessage`. Here is the method testing for the case a tweet is duplicated: - -```java -@Test -@Deployment(resources = "twitter/TwitterDemoProcess.bpmn") -public void testTweetDuplicated() { - // given - doThrow(new DuplicateTweetException()) // 1 - .when(tweetPublicationService).tweet(anyString()); - // when - ProcessInstance processInstance = rejectedTweet(withVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET, TWEET)); // 2 - // then - assertThat(processInstance) // 3 - .hasPassed("boundary_event_tweet_duplicated") - .hasNotPassed("end_event_tweet_rejected").hasNotPassed("end_event_tweet_published") - .isWaitingAt("user_task_handle_duplicate"); - verify(tweetPublicationService).tweet(TWEET); // 4 - verifyNoMoreInteractions(tweetPublicationService); - // when - complete(task()); // 5 - // then - assertThat(processInstance) // 6 - .isWaitingAt("user_task_review_tweet") - .hasVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET) - .task().isAssignedTo("demo"); -} -``` diff --git a/versioned_docs/version-1.3/components/best-practices/development/understanding-transaction-handling-c7-assets/rollback.png b/versioned_docs/version-1.3/components/best-practices/development/understanding-transaction-handling-c7-assets/rollback.png deleted file mode 100644 index 5c9d44d0cca..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/understanding-transaction-handling-c7-assets/rollback.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/understanding-transaction-handling-c7.md b/versioned_docs/version-1.3/components/best-practices/development/understanding-transaction-handling-c7.md deleted file mode 100644 index e8db5498fb1..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/understanding-transaction-handling-c7.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Understanding Camunda 7 transaction handling -tags: -- Transaction -- ACID Transaction -- Incident -- Save Point ---- - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! Zeebe, the workflow engine used in Camunda Cloud, as a very different transactional behevaior, please visit [dealing with problems and exceptions](../dealing-with-problems-and-exceptions/). -::: - -Try to carefully study and fully understand the concepts of wait states (save points) acting as *transaction boundaries* for technical (ACID) transactions. In case of technical failures, they are by default rolled back and need to be retried either by the user or the background job executor. - -## Understanding technical (ACID) transactions in Camunda Platform 7 - - -Every time we use the Camunda Platform 7 API to ask the workflow engine to do something (like e.g. starting a process, completing a task, signaling an execution), the engine will advance in the process until it reaches *wait states* on each active path of execution, which can be: - -
    - -1 - -*User tasks* and *receive tasks* - -2 - -All *intermediate catching events* - -3 - -The *event based gateway*, which offers the possibility of reacting to one of multiple intermediate catching events - -4 - -Several further task types (*service*, *send*, *business rule* tasks) - -5 - -[External Tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) are wait states, too. In this case, the *throwing message events* might be implemented as external task. - -At a wait state, any further process execution must wait for some trigger. Wait states will therefore always be persisted to the database. The design of the workflow engine is, that within a *single database transaction*, the process engine will cover the distance from one persisted wait states to the next. However, you have fine grained control over these transaction boundaries by introducing additional *save points* using the [`async before` and `async after` attributes](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#configure-asynchronous-continuations). A background job executor will then make sure that the process *continues asynchronously*. - -Learn more about [transactions in processes](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/) in general and [asynchronous continuations](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#why-asynchronous-continuations) in the user guide. - - - -:::note Technical vs. business transactions -Sometimes when we refer to "transactions" in processes, we refer to a very different concept, which must be clearly distinguished from technical database transactions. A *business transaction* marks a section in a process for which 'all or nothing' semantics apply, but from a pure business perspective. This is described in [dealing with problems and exceptions](../dealing-with-problems-and-exceptions/). -::: - - -## Controlling transaction boundaries - -### Using additional save points - -You have fine grained control over transaction boundaries by introducing *save points* additionally to [wait states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states), that are always a save point. Use the `asyncBefore='true'` and `asyncAfter='true'` attributes in your process definition BPMN XML. The process state will then be persisted at these points and a background job executor will make sure that it is continued asynchronously. - -
    - -1 - -A user task is an *obligatory wait state* for the process engine. After the creation of the user task, the process state will be persisted and committed to the database. The engine will wait for user interaction. - -2 - -This service task is executed *"synchronously"* (by default), in other words within the same thread and the same database transaction with which a user attempts to complete the "Write tweet" user task. When we assume that this service fails in cases in which the language used is deemed to be too explicit, the database transaction rolls back and the user task will therefore remain uncompleted. The user must re-attempt, e.g. by correcting the tweet. - -3 - -This service task is executed *"asynchronously"*. By setting the `asyncBefore='true'` attribute we introduce an additional save point at which the process state will be persisted and committed to the database. A separate job executor thread will continue the process asynchronously by using a separate database transaction. In case this transaction fails the service task will be retried and eventually marked as failed - in order to be dealt with by a human operator. - -Pay special attention to the consequence of these save points with regards to retrying. A retry for a job may be required if there are *any failures* during the transaction which follows the save point represented by the job. Depending on your subsequent transaction boundaries this may very well be much more than just the service task which you configured to be `asyncBefore='true'`! The process instance will always roll back to its last known save point, as discussed later. - - - -### Marking every service task as asynchronous - -A typical *rule of thumb*, especially when doing a lot of service orchestration, is to *mark every service task* being *asynchronous*. - -
    - -The downside is that the jobs slightly increase the overall resource consumption. But this is often worth it, as it has a couple of advantages for operations: - -* The process stops at the service task causing the specific error. -* You can configure a meaningful retry strategy for every service task. -* You can leverage the suspension features for service tasks. - -While it is not directly configurable to change Camunda Platform 7's *default* "async" behavior for all service tasks at once, you can achieve that by implementing a custom [ProcessEnginePlugin](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-plugins/) introducing a [BpmnParseListener](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/impl/bpmn/parser/BpmnParseListener.html) which adds async flags on-the-fly (eventually combined with custom [BPMN extension attributes](https://docs.camunda.org/manual/latest/user-guide/model-api/bpmn-model-api/extension-elements/) to control this behavior). You can find a [code example](https://github.com/camunda/camunda-bpm-examples/tree/master/process-engine-plugin/bpmn-parse-listener) for a similar scenario on GitHub. - - -### Knowing typical do's and don'ts for save points - -Aside a general strategy to mark service tasks as being save points you will often want to *configure typical save points*. - -**Do** configure a savepoint **after** - -* *User tasks* : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. - -* Service Tasks (or other steps) causing *Non-idempotent Side Effects* Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. - -* Service tasks (or other steps) executing *expensive Ccmputations* Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. - -* Receive tasks (or other steps) catching *external events*, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. - -**Do** configure a savepoint **before** - -* *Start svents* None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. - -* Service tasks (or other steps) invoking *remote systems* Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will see the process instance waiting in the corresponding service task in cockpit. - -* *Parallel joins* Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. - -The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. - - - -**Don't** configure save points **before** - -* User tasks and other *wait states* User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as *external tasks* Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. - -* *All forking* and *exclusively joining gateways* Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. - -### Adding save points automatically to every model - -If you agree on certain save points to be important in all your process definitions, you can *add required BPMN XML attributes automatically* by a [Process Engine Plugin](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-plugins/) during deployment. Then you don't have to add this configuration to each and every process definition yourself. - -As a weaker alternative the plugin could check for existance of correct configuration and *log warnings or errors if save points are missing*. - -Take a look at [this example](https://github.com/camunda/camunda-consulting/tree/master/snippets/engine-plugin-add-save-points) for details. - - -## Thinking about operations during modeling - -Make sure you also understand how to [operate Camunda Platform 7](../../operations/operating-camunda-c7) - in particular by understanding *retry behaviour* and *incident management* for service tasks. - - -## Rolling back a transaction on unhandled errors - -It is important to understand that every *non-handled, propagated exception* happening during process execution rolls back the current technical transaction. Therefore the process instance will find its last known *wait state* (or save point). The following image visualizes that default behavior. - -![Rollback](understanding-transaction-handling-c7-assets/rollback.png) - -1 - -When we ask the Camunda engine to complete a task ... - -2 - -... it tries to advance the process within the borders of a technical transaction until it reaches wait states (or save points) again. - -3 - -However, in cases where a non-handled exception occurs on the way, this transaction is rolled back and we find the user task we tried to complete to be still uncompleted. - -From the perspective of a user trying to complete the task, it appears *impossible* to complete the task, because a subsequent service throws an exception. This can be unfortunate, and so you very well may want to introduce additional save points, e.g. here before the send task. - -```xml - -``` - -But hindering the user to complete the user task can also be just what you want. Consider e.g. the possibility to *validate task form input* via a subsequent service: - -
    - -1 - -A user needs to provide data with a *user task* form. When trying to complete the form ... - -2 - -... the subsequent synchronously executed *service task* finds a validation problem and throws an exception which rolls back the transaction and leaves the user task uncompleted. - -Learn more about [rollback on exceptions](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#rollback-on-exception) and the reasoning for this design in the User Guide. - -## Handling exceptions via the process - -As an alternative to rolling back transactions, you can also handle those exceptions within the process, see [dealing with problems and exceptions](../dealing-with-problems-and-exceptions/) for details. - -Just be aware of the following technical constraint: in case your transaction manager marks the current transaction *for rollback* (as possible in Java transaction managers), handling the exception by a processis not possible as the workflow engine cannot commit its work in this transaction. - - diff --git a/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers-assets/order-fulfillment-process.png b/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers-assets/order-fulfillment-process.png deleted file mode 100644 index 53577a2431f..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers-assets/order-fulfillment-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers-assets/process-solution.png b/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers-assets/process-solution.png deleted file mode 100644 index f35b1784820..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers-assets/process-solution.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers.md b/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers.md deleted file mode 100644 index 9d515f11b6e..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/development/writing-good-workers.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -title: "Writing good workers" -description: "Organize glue code and workers in process solutions, consider transactions, exceptions, and idempotency of workers, examine data minimization in workers, and more." ---- - -[Service tasks](/docs/components/modeler/bpmn/service-tasks/) within Camunda Cloud require you to set a task type and implement [job workers](/docs/components/concepts/job-workers) who perform whatever needs to be performed. This describes that you might want to: - -1. Write all glue code in one application, separating different classes or functions for the different task types. -2. Think about idempotency and read or write as little data as possible from/to the process. -3. Write non-blocking (reactive, async) code for your workers if you need to parallelize work. Use blocking code only for use cases where all work can be executed in a serialized manner. Don’t think about configuring thread pools yourself. - -## Organizing glue code and workers in process solutions - -Assume the following order fulfillment process, that needs to invoke three synchronous REST calls to the responsible systems (payment, inventory, and shipping) via custom glue code: - -![order fulfillment example](writing-good-workers-assets/order-fulfillment-process.png) - -Should you create three different applications with a worker for one task type each, or would it be better to process all task types within one application? - -As a rule of thumb, we recommend implementing **all glue code in one application**, which then is the so-called **process solution** (as described in [Practical Process Automation](https://processautomationbook.com/)). This process solution might also include the BPMN process model itself, deployed during startup. Thus, you create a self-contained application that is easy to version, test, integrate, and deploy. - -![Process solution](writing-good-workers-assets/process-solution.png) -Figure taken from [Practical Process Automation](https://processautomationbook.com/) - -Thinking of Java, the three REST invocations might live in three classes within the same package (showing only two for brevity): - -```java -public class RetrieveMoneyWorker { - @ZeebeWorker(type = "retrieveMoney") - public void retrieveMoney(final JobClient client, final ActivatedJob job) { - // ... code - } -} -``` - -```java -public class FetchGoodsWorker { - @ZeebeWorker(type = "fetchGoods") - public void fetchGoods(final JobClient client, final ActivatedJob job) { - // ... code - } -} -``` - -You can also pull the glue code for all task types into one class. Technically, it does not make any difference and some people find that structure in their code easier. If in doubt, the default is to create one class per task type. - -There are exceptions when you might not want to have all glue code within one application: - -1. You need to specifically control the load for one task type, like *scaling it out* or *throttling it*. For example, if one service task is doing PDF generation, which is compute-intensive, you might need to scale it much more than all other glue code. On the other hand, it could also mean limiting the number of parallel generation jobs due to licensing limitations of your third-party PDF generation library. -2. You want to write glue code in different programming languages, for example, because writing specific logic in a specific language is much easier (like using Python for certain AI calculations or Java for certain mainframe integrations). - -In this case, you would spread your workers into different applications. Most often, you might still have a main process solution that will also still deploy the process model. Only specific workers are carved out. - -## Thinking about transactions, exceptions and idempotency of workers - -Make sure to visit [Dealing With Problems and Exceptions](../dealing-with-problems-and-exceptions/) to gain a better understanding how workers deal with transactions and exceptions to the happy path. - -## Data minimization in workers - -If performance or efficiency matters in your scenario, there are two rules about data in your workers you should be aware of: - -1. Minimize what data you read for your job. In your job client, you can define which process variables you will need in your worker, and only these will be read and transferred, saving resources on the broker as well as network bandwidth. -2. Minimize what data you write on job completion. You should explicitly not transmit the input variables of a job upon completion, which might happen easily if you simply reuse the map of variables you received as input for submitting the result. - -Not transmitting all variables saves resources and bandwidth, but serves another purpose as well: upon job completion, these variables are written to the process and might overwrite existing variables. If you have parallel paths in your process (e.g. [parallel gateway](/docs/components/modeler/bpmn//parallel-gateways/), [multiple instance](/docs/components/modeler/bpmn/multi-instance/)) this can lead to race conditions that you need to think about. The less data you write, the smaller the problem. - -## Scaling workers - -If you need to process a lot of jobs, you need to think about optimizing your workers. - -Workers can control the number of jobs retrieved at once. In a busy system it makes sense to not only request one job, but probably 20 or even up to 50 jobs in one remote request to the workflow engine, and then start working on them locally. In a lesser utilized system, long polling is used to avoid delays when a job comes in. Long polling means the client’s request to fetch jobs is blocked until a job is received (or some timeout hits). Therefore, the client does not constantly need to ask. - -You will have jobs in your local application that need to be processed. The worst case in terms of scalability is that you process the jobs sequentially one after the other. While this sounds bad, it is still a valid approach for many use cases, as most projects do not need any parallel processing in the worker code as they simply do not care whether a job is executed a second earlier or later. Think of a business process that is executed only some hundred times per day and includes mostly human tasks — a sequential worker is totally sufficient. In this case, you can skip this paragraph section. - -However, you might need to do better and process jobs in parallel and utilize the full power of your worker’s CPUs. In such a case, you should read on and understand the difference between writing blocking and non-blocking code. - -### Blocking / synchronous code and thread pools - -With blocking code a thread needs to wait (is blocked) until something finishes before it can move on. In the above example, making a REST call requires the client to wait for IO — the response. The CPU cannot compute anything during this time period, however, the thread cannot do anything else. - -Assume that your worker shall invoke 20 REST requests, each taking around 100ms, this will take 2s in total to process. Your throughput can’t go beyond 10 jobs per second with one thread. - -A common approach to scaling throughput beyond this limit is to leverage a thread pool. This works as blocked threads are not actively consuming CPU cores, so you can run more threads than CPU cores — since they are only waiting for I/O most of the time. In the above example with 100ms latency of REST calls, having a thread pool of 10 threads increases throughput to 100 jobs/second. - -The downside of using thread pools is that you need to have a good understanding of your code, thread pools in general, and the concrete libraries being used. Typically, we do not recommend configuring thread pools yourself. If you need to scale beyond the linear execution of jobs, leverage reactive programming. - -### Non-blocking / reactive code - -Reactive programming uses a different approach to achieve parallel work: extract the waiting part from your code. - -With a reactive HTTP client you will write code to issue the REST request, but then not block for the response. Instead, you define a callback as to what happens if the request returns. Most of you know this from JavaScript programming. Thus, the runtime can optimize the utilization of threads itself, without you the developer even knowing. - -### Recommendation - -In general, using reactive programming is favorable in most situations where parallel processing is important. However, we sometimes see a lack of understanding and adoption in developer communities, which might hinder adoption in your environment. - -## Client library examples - -Let’s go through a few code examples using Java, NodeJS, and C#, using the corresponding client libraries. All [code is available on GitHub](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution) and a [walk through recording is available on YouTube](https://youtu.be/ZHKz9l5yG3Q). - -### Java - -Using the [Java Client](https://github.com/camunda-cloud/camunda-cloud-get-started/tree/master/java) you can write worker code like this: - -```java -client.newWorker().jobType("retrieveMoney") - .handler((jobClient, job) -> { - //... - }).open(); -``` - -The [Spring integration](https://github.com/zeebe-io/spring-zeebe/) provides a more elegant way of writing this, but also [uses a normal worker from the Java client](https://github.com/zeebe-io/spring-zeebe/blob/master/client/spring-zeebe/src/main/java/io/camunda/zeebe/spring/client/config/processor/ZeebeWorkerPostProcessor.java#L56) underneath. In this case, your code might look like this: - -```java -@ZeebeWorker(type = "retrieveMoney") -public void retrieveMoney(final JobClient client, final ActivatedJob job) { - //... -} -``` - -In the background, a worker starts a polling component and [a thread pool](https://github.com/camunda-cloud/zeebe/blob/d24b31493b8e22ad3405ee183adfd5a546b7742e/clients/java/src/main/java/io/camunda/zeebe/client/impl/ZeebeClientImpl.java#L179-L183) to [handle the polled jobs](https://github.com/camunda/camunda/blob/1.3.14/clients/java/src/main/java/io/camunda/zeebe/client/impl/worker/JobPoller.java#L109-L111). The [**default thread pool size is one**](https://github.com/camunda-cloud/zeebe/blob/760074f59bc1bcfb483fab4645501430f362a475/clients/java/src/main/java/io/camunda/zeebe/client/impl/ZeebeClientBuilderImpl.java#L49). If you need more, you can enable a thread pool: - -```java -ZeebeClient client = ZeebeClient.newClientBuilder() - .numJobWorkerExecutionThreads(5) - .build(); -``` - -Or, in Spring Zeebe: - -```properties -zeebe.client.worker.threads=5 -``` - -Now, you can **leverage blocking code** for your REST call, for example, the `RestTemplate` inside Spring: - -```java -@ZeebeWorker(type = "rest") -public void blockingRestCall(final JobClient client, final ActivatedJob job) { - LOGGER.info("Invoke REST call..."); - String response = restTemplate.getForObject( // <-- blocking call - PAYMENT_URL, String.class); - LOGGER.info("...finished. Complete Job..."); - client.newCompleteCommand(job.getKey()).send() - .join(); // <-- this blocks to wait for the response - LOGGER.info(counter.inc()); -} -``` - -Doing so **limits** the degree of parallelism to the number of threads you have configured. You can [observe in the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/java-blocking-thread-1.log) that jobs are executed sequentially when running with one thread ([the code is available on GitHub)](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/java-worker/src/main/java/io/berndruecker/experiments/cloudclient/java/RestInvocationWorker.java): - -``` -10:57:00.258 [pool-4-thread-1] Invoke REST call… -10:57:00.258 [ault-executor-0] Activated 32 jobs for worker default and job type rest -10:57:00.398 [pool-4-thread-1] …finished. Complete Job… -10:57:00.446 [pool-4-thread-1] …completed (1). Current throughput (jobs/s ): 1 -10:57:00.446 [pool-4-thread-1] Invoke REST call… -10:57:00.562 [pool-4-thread-1] …finished. Complete Job… -10:57:00.648 [pool-4-thread-1] …completed (2). Current throughput (jobs/s ): 2 -10:57:00.648 [pool-4-thread-1] Invoke REST call… -10:57:00.764 [pool-4-thread-1] …finished. Complete Job…10:57:00.805 [pool-4-thread-1] …completed (3). Current throughput (jobs/s ): 3 -``` - -If you experience a large number of jobs, and these jobs are waiting for IO the whole time — as REST calls do — you should think about using **reactive programming**. For the REST call, this means for example the Spring WebClient: - -```java -@ZeebeWorker(type = "rest") -public void nonBlockingRestCall(final JobClient client, final ActivatedJob job) { - LOGGER.info("Invoke REST call..."); - Flux paymentResponseFlux = WebClient.create() - .get().uri(PAYMENT_URL).retrieve() - .bodyToFlux(String.class); - - // non-blocking, so we register the callbacks (for happy and exceptional case) - paymentResponseFlux.subscribe( - response -> { - LOGGER.info("...finished. Complete Job..."); - client.newCompleteCommand(job.getKey()).send() - // non-blocking, so we register the callbacks (for happy and exceptional case) - .thenApply(jobResponse -> { LOGGER.info(counter.inc()); return jobResponse;}) - .exceptionally(t -> {throw new RuntimeException("Could not complete job: " + t.getMessage(), t);}); - }, - exception -> { - LOGGER.info("...REST invocation problem: " + exception.getMessage()); - client.newFailCommand(job.getKey()) - .retries(1) - .errorMessage("Could not invoke REST API: " + exception.getMessage()).send() - .exceptionally(t -> {throw new RuntimeException("Could not fail job: " + t.getMessage(), t);}); - } - ); -} -``` - -This code uses the reactive approach to use the Zeebe API: - -``` -client.newCompleteCommand(job.getKey()).send() - .thenApply(jobResponse -> { - counter.inc(); - return jobResponse; - }) - .exceptionally(t -> { - throw new RuntimeException("Could not complete job: " + t.getMessage(), t); - }); -``` - -With this reactive glue code, you don’t need to worry about thread pools in the workers anymore, as this is handled under the hood from the frameworks or the Java runtime. [You can see in the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/java-nonblocking.log) that many jobs are now executed in parallel — and even by the same thread in a loop within milliseconds. - -``` -10:54:07.105 [pool-4-thread-1] Invoke REST call… -[…] 30–40 times! -10:54:07.421 [pool-4-thread-1] Invoke REST call… -10:54:07.451 [ctor-http-nio-3] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-7] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-2] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-5] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-1] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-6] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-4] …finished. Complete Job… -[…] -10:54:08.090 [pool-4-thread-1] Invoke REST call… -10:54:08.091 [pool-4-thread-1] Invoke REST call… -[…] -10:54:08.167 [ault-executor-2] …completed (56). Current throughput (jobs/s ): 56, Max: 56 -10:54:08.167 [ault-executor-1] …completed (54). Current throughput (jobs/s ): 54, Max: 54 -10:54:08.167 [ault-executor-0] …completed (55). Current throughput (jobs/s ): 55, Max: 55 -``` - -These observations yield the following recommendations: - -| | Blocking Code | Reactive Code | -| - | - | - | -| Parallelism | Some parallelism is possibly by a thread pool, which is used by the client library. The default thread pool size is one, which needs to be adjusted in the config in order to scale. | A processing loop combined with an internal thread pool, both are details of the framework and runtime platform. | -| **Use when** | You don't have requirements to process jobs in parallel | You need to scale and have IO-intensive glue code (e.g. remote service calls like REST) -| | Your developers are not familiar with reactive programming | This should be the **default** if your developer are familiar with reactive programming. | - -### NodeJs client - -Using the [Node.JS client](https://github.com/camunda-cloud/camunda-cloud-get-started/tree/master/nodejs), your worker code will look like this, assuming that you use Axios to do rest calls (but of course any other library is fine as well): - -```js -zbc.createWorker({ - taskType: 'rest', - taskHandler: (job, _, worker) => { - console.log("Invoke REST call..."); - axios.get(PAYMENT_URL) - .then(response => { - console.log("...finished. Complete Job...") - job.complete().then( result => { - incCounter() - }) - }) - .catch(error => { - job.fail("Could not invoke REST API: " + error.message) - }); - } -}) -``` - -This is **reactive code**. And a really interesting observation is that reactive programming is so deep in the JavaScript language that it is impossible to write blocking code, even code that looks blocking is still [executed in a non-blocking fashion](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/nodejs-blocking.log). - -Node.JS code scales pretty well and there is no specific thread pool defined or necessary. The Camunda Cloud Node.JS client library also [uses reactive programming internally](https://github.com/camunda-community-hub/zeebe-client-node-js/blob/master/src/zb/ZBWorker.ts#L28). - -This makes the recommendation very straight-forward: - -||Reactive code| -| Parallelism | Event loop provided by Node.JS | -| **Use when** | Always | - - -### C# - -Using the [C# client](https://github.com/camunda-cloud/camunda-cloud-get-started/tree/master/csharp), you can write worker code like this: - -```csharp -zeebeClient.NewWorker() - .JobType("payment") - .Handler(JobHandler) - .HandlerThreads(3) - .Name("MyPaymentWorker") - .Open() -``` - -You can see that you can set a number of handler threads. Interestingly, this is a naming legacy. The C# client uses the [Dataflow Task Parallel Library (TPL)](https://docs.microsoft.com/en-us/dotnet/standard/parallel-programming/dataflow-task-parallel-library) to implement parallelism, so the thread count configures the degree of parallelism allowed to TPL in reality. Internally, this is implemented as a mixture of event loop and threading, which is an implementation detail of TPL. This is a great foundation to scale the worker. - -You need to provide a handler. For this handler, you have to make sure to write non-blocking code; the following example shows this for a REST call using the [HttpClient](https://docs.microsoft.com/en-us/dotnet/api/system.net.http.httpclient?view=net-5.0) library: - -```csharp -private static async void NonBlockingJobHandler(IJobClient jobClient, IJob activatedJob) -{ - Log.LogInformation("Invoke REST call..."); - var response = await httpClient.GetAsync("/"); - Log.LogInformation("...finished. Complete Job..."); - var result = await jobClient.NewCompleteJobCommand(activatedJob).Send(); - counter.inc(); -} -``` - -The code is executed in parallel, [as you can see in the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/dotnet-nonblocking.log). Interestingly, the following code runs even faster for me, but [that’s a topic for another discussion](https://stackoverflow.com/questions/21403023/performance-of-task-continuewith-in-non-async-method-vs-using-async-await): - -```csharp -private static void NonBlockingJobHandler(IJobClient jobClient, IJob activatedJob) -{ - Log.LogInformation("Invoke REST call..."); - var response = httpClient.GetAsync("/").ContinueWith( response => { - Log.LogInformation("...finished. Complete Job..."); - jobClient.NewCompleteJobCommand(activatedJob).Send().ContinueWith( result => { - if (result.Exception==null) { - counter.inc(); - } else { - Log.LogInformation("...could not do REST call because of: " + result.Exception); - } - }); - }); -} -``` - -In contrast to Node.JS, you can also write **blocking code** in C# if you want to (or more probable: it happens by accident): - -```csharp -private static async void BlockingJobHandler(IJobClient jobClient, IJob activatedJob) -{ - Log.LogInformation("Invoke REST call..."); - var response = httpClient.GetAsync("/").Result; - Log.LogInformation("...finished. Complete Job..."); - await jobClient.NewCompleteJobCommand(activatedJob).Send(); - counter.inc(); -} -``` - -The degree of parallelism is down to one again, [according to the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/dotnet-blocking-thread-1.log). So C# is comparable to Java, just that the typically used C# libraries are reactive by default, whereas Java still knows just too many blocking libraries. The recommendations for C#: - -| | Blocking code | Reactive code | -| - | - | - | -| Parallelism | Some parallelism is possibly by a thread pool, which is used by the client library. | A processing loop combined with an internal thread pool, both are details of the framework and runtime platform. | -| **Use when** | **Rarely**, and only if you don't have requirements to process jobs in parallel or might even want to reduce the level or parallelism. | This should be the **default** -| | Your developers are not familiar with reactive programming | You need to scale and have IO-intensive glue code (e.g. remote service calls like REST) | diff --git a/versioned_docs/version-1.3/components/best-practices/management/doing-a-proper-poc.md b/versioned_docs/version-1.3/components/best-practices/management/doing-a-proper-poc.md deleted file mode 100644 index 1ca41117fbf..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/management/doing-a-proper-poc.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Doing a proper POC -tags: - - POC ---- - -When evaluating your process automation approach, a **proof of concept (POC)** is often a good step to check if the process automation methodology, the standards of BPMN and DMN, as well as the Camunda technology suit your needs. It is vital for a POC to make up your mind about your goals, to select a suitable process, and to prepare it and carry it out properly. - -## Understanding POC - -With a POC, you create a prototype application within no more than *three to five days*. The result of a POC is intended to be thrown away after having served its purpose: to try and show that your project will "fly" - including all aspects relevant for your specific situation. Such aspects might be: - -- Does Camunda *fit into your own architecture*? -- Does the *development approach* fit into your own organization's approaches? -- How can you *model* a specific business domain problem? -- Which kind of *know how* is needed for the business and development teams? -- Which *effort* will typically be needed for these kinds of projects? -- What are the impacts of process applications for *operations*? - -Often, it does make sense to implement such a POC together with Camunda, our partners, or specialized consultants to get quick results and focused feedback with respect to your specific challenges. However, you should always at least *co-develop* the POC yourself to really understand what is going on. A team size of two to four people has proven to be quite optimal. - -## Defining and focusing on specific goals - -Before planning and carrying out a POC, you should consciously clarify the specific goals you want the POC to achieve. Typical goals might be: - -- To *verify* the approach or the tool works under specific circumstances. -- To *show* a case that *convinces* internal stakeholders that the approach makes sense. -- To work through a complete *example* and get specific *questions* sorted out. -- To *learn* about Camunda and *understand* how it works. - -:::note -When selecting your goal, keep in mind the needs of all relevant stakeholders. -::: - -Do not just "collect" goals here, but try to make up your mind as to what really matters. Often, it is better to make a clear choice. For example, whether to show off a nice user interface at the end of the week or to have time to clarify all questions and to understand Camunda in depth, maybe even only using unit tests. - -## Defining a scope relevant to your business - -Select a *useful* and *suitable* process, case, or decision given your goals. - -Typically, it should... - -- Be *relevant* to your *core business* stakeholders. -- Make your organization's *return on investment* on BPM more transparent. -- Be *feasible* within the POC time box. - -Avoid political mine fields when selecting the process for your POC. - -## Planning the POC - -### Involving the right people - -It does make sense to implement a POC together with the software vendor and/or specialized consultants to get *quick results* and *focused feedback* with respect to your specific challenges. However, you should always at least *co-develop* the POC to really understand what is going on. - -When planning for your team, consider that successful process modeling requires not just knowledge about the business and the targeted technical solution, but experience with BPMN modeling and methodology as well as analytical and moderation skills. We therefore typically bring together *business people* with *IT staff* and internal *business analysts*, *train them properly* and let them continue to *learn on the job* by carrying out the POC together with an *experienced consultant*. A team size of up to a maximum of *four people* has proven to be quite optimal. - -In case you want to access *system interfaces* during your POC, also determine who will be a technically knowledgeable and available *contact person* for that system. To integrate into existing *user interfaces*, you might need help from colleagues within your organization. - -Define a *moderator* to avoid too many detours and keep your POC on track. - -### Planning the technical environment - -:::caution Camunda Cloud -This best practice targets Camunda Cloud. If you want to run a POC with Camunda Platform 7, visit [deciding about your Camunda Platform 7 stack](../../architecture/deciding-about-your-stack-c7/). -::: - -Make the necessary technological choices. Typically, POCs *run on Camunda Cloud SaaS* unless your goal is to validate that Camunda Cloud runs in your Kubernetes environment in a self-managed fashion. A simple test account is often sufficient, unless your goal is to do load or performance tests, for which you need bigger clusters. Reach out to us in such cases. - -To access *third party systems* during your POC, set up proper test systems for those and verify that they are usable. - -Prepare a location in a *version control system* where you can develop your POC. Having a shared repository with history does make sense also (or especially) in a 2-day POC! Collaboration is simplified if the Camunda consultant can also access that repository. It may be worth just creating a repository with weaker access limitations for the POC. - -If your organization cannot easily set up a repository for the POC, or access for externals is impossible, you can create a cloud repository. We typically recommend [GitHub](https://github.com/); a free account is sufficient. It gives you a Git repository and you can invite all necessary people for the POC. Afterwards, you can delete that repository. - -### Selecting the time frame - -As already mentioned above, we typically plan no more than *a focused week* for the POC workshop itself. Sometimes it also works well to split up the POC into two weeks of 2-3 days each, which allows everybody to reflect on the POC over the weekend. - -* Plan *1-3 days* for *modeling* the process with Camunda Modeler. -* Plan *2-3 days* for *implementing* the process solution. - -When selecting the exact time frame, consider all the people involved, as well as any technical preparation you need to do up front. You also might want to plan for further steps, like a few more things you implement yourself internally in a second follow up week. - -## Presenting the results - -Before presenting the results of your POC to a wider audience of stakeholders, select a *speaker* who is comfortable with presenting, prepare a set of focused *slides* illustrating your progress and the lessons learned, and *test* your solution and presentation at least once up front. - -The speaker might also be your Camunda Consultant - they are used to presenting to a wide audience! - -## Checklists - -### Technical - -* *Cloud Access*: Make sure you have an account for Camunda Cloud with an active subscription or trial account. - -* *Installations*: Make sure your *developer systems*, as well as any *target systems* for the POC test and production you wish to use are set up. In particular install: - - * Camunda *Modeler* (https://camunda.org/download/modeler/) - * Java, Maven, and your favorite IDE (e.g. Eclipse) - * Make sure *Maven* runs and builds and it can access all necessary dependencies. [Download and build this project](https://github.com/camunda-cloud/camunda-cloud-tutorials/tree/main/orchestrate-microservices/worker-java) can verify that your build runs. - -* *Developer Computers*: For maximum productivity, all participating developers should use the computer with which they work every day. Avoid using computers from a training room or shared laptops unless they allow a remote connection to the developer's personal computer. If the developer's computers are neither portable nor remotely accessible consider conducting the POC in the regular office space of the developers. If your company network is restricting access to Maven and Git repositories on the internet, consider using laptops that are not connected to the company network. Similarly, you should not force the external consultants to work on one of your computers. They will be twice as productive on their laptops and not lose time with software setup, configuration, and access restrictions. Obviously, you do not have to connect the consultant's laptop to your company network. Internet access and a shared code repository are enough to collaborate. - -* *Files* or *Version Control System*: Make sure we can easily exchange files and code during the POC, preferably via your own version control system (e.g. Git or SVN) or at least via shared folders, USB sticks, or email attachments. - -* *Interfaces*: Clarify which technical systems' interfaces you want to access during your POC, make any *documentation* for those available to the whole POC team, and make sure there is a technically knowledgeable *contact person* for the interface available to the team during the POC. Set up a *test system* and verify that it is usable. Verify with Camunda that everything is clear to the team, in particular from a technological perspective. - -### Organizational - -Inform all POC team members and other relevant stakeholders about the following: - -* *Goals* and the selected *scope* for the POC -* *Start* and *end times*, as well as any additional preparation/meet-up times -* *Names and roles* of all involved *people* - -* For onsite POCs: - * Exact *location/address* at which the POC is taking place as well as instructions about how to find together when arriving (for onsite POCs) - * *Projector*, white-board, and flip-chart availability - * *Internet* availability for team members and external consultants - -* For remote POCs: - * Exact meeting setup. For example, links to the meeting room, passwords, etc. In case you can't easily host meetings for external participants, your Camunda consultant can setup a Zoom or Microsoft Teams call. - * Ideally, some chat capability (e.g. a temporary Slack account) - -Ideally, prepare a few *organizational* and/or *project* info slides to get everybody up to speed on day one of the workshop. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-l.png b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-l.png deleted file mode 100644 index 2abd7ff0132..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-l.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-m.png b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-m.png deleted file mode 100644 index 28d4347856e..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-m.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-s.png b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-s.png deleted file mode 100644 index 9f29a0409bf..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-s.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xl.png b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xl.png deleted file mode 100644 index 4e2440c2e72..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xl.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xxl.png b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xxl.png deleted file mode 100644 index 45361b53ce5..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xxl.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.png b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.png deleted file mode 100644 index b0f326a6594..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.pptx b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.pptx deleted file mode 100644 index d837939af1f..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path.md b/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path.md deleted file mode 100644 index 6bcac201642..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/management/following-the-customer-success-path.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: "Following the Customer Success Path" -tags: - - Project Management -description: "Understand the customer success path, estimate effort, use the process model for estimation, and more." ---- - -Following certain steps when evaluating and introducing process automation helps to make it a success. Ensure you review the appropriate best practices at the right time. - -## Understanding the customer success path - -When introducing Camunda as a new process automation platform inside your company, the following process has shown to work best: - -
    - -1 - -*Evaluation*: Take the philosophy of the evaluated products into greater consideration than working solely with feature matrices. Practical experience can be invaluable. You might be interested in our [Whitepaper: "Camunda compared to alternatives"](https://page.camunda.com/wp-camunda-compared-to-alternatives). - -2 - -*Process selection*: It is very important to select a suitable pilot process. Use a relevant process where you can show benefits of BPM including a Return on Invest (ROI) calculation. However, avoid too big or too "political" processes to minimize the risk of failure due to avoidable reasons. Note that you can use this process in the proof of concept (PoC) or select a different process for the first PoC, depending on the goals you have. - -3 - -*Proof of Concept* (PoC): Model the process to a high standard. It should be clear, understandable, and precise, as it will have a high visibility. Include necessary technical proofs, like calling real services in your environment. Include human tasks if your process where appropriate. We suggest using Camunda Tasklist as a first step to save effort in developing your own tasklist, unless a tasklist is important for your overall proof. Include "eye candies" like reporting to make non-technical stakeholders happy. Concentrate on the important aspects to do the proof and prepare to throw away the code afterwards to start fresh for the pilot, as it is very valid for early POCs to be "hacky" in order to keep focus on the end goals. - -4 - -*Development*: Model the process with the same standard described for a PoC. It should be clear, understandable, and precise. Again, the reason for this is that it will be the most visible part of the project. Develop the process application in an iterative manner to learn fast. Do proper testing to achieve a high quality. - -5 - -*Operations*: Prepare for real operations, which includes setting up the real hardware as well as securing and monitoring the platform. - -6 - -*Pilot review* and *Pilot improvements*: Review the project after it has finished and gone live. Take some time to clean up, as the project normally serves as a "lighthouse" and "copy and paste" template for sequential projects, so it is worth the effort. It's better to plan time for this phase than try to make things perfect during early development, as you will have learned a lot once the pilot runs on the live system for a while. - -7 - -*Next processes*: Try to avoid doing too many projects in parallel in the beginning to allow new learning to influence your future work. If you have parallel pilots, organize knowledge sharing between the teams. Ideally, let the team of the first pilot directly implement a sequential process. - -8 - -*Custom BPM Platform*: In bigger organizations, you typically try to set up your custom BPM platform, meaning a common infrastructure for all upcoming Camunda projects. Try to do as little of this as possible during the first pilot and start building the platform afterwards, taking all learnings into account. At the same time, do what is necessary for the pilot project itself or for other stakeholders to feel comfortable (e.g. Enterprise Architecture). - -9 - -*Process architecture*: BPM initiatives often start by drafting a process landscape and capture all relevant processes of the company. Try to avoid this, and do as little as possible during your first pilot project. Maybe do a quick process survey to capture relevant processes (by name) to identify a good candidate for the pilot. Especially do not model all processes in your company in depth before you experienced an "end-to-end" project, including automation of Camunda yourself. Then, you will have gained a deeper understanding of methodology and value around BPMN and DMN. - -## Estimating effort - -When starting your BPM project, it is often necessary to roughly estimate the expected effort. A process model can serve as a central artifact for estimation purposes. Avoid too fine-grained estimations as they typically are not worth the effort. - -However, on a management level one often must have some estimations to secure budgets, get projects started, allocate needed resources, and communicate expected time frames. The success factor is to do estimations *on a very rough level* and avoid spending too much time with details. More often than not, the details develop differently than expected anyway. - -We often see customers successfully estimate *T-Shirt size categories (S, M, L, XL and XXL)*. Such an approach is sufficient for us to make roughly informed decisions about priority and return on investment. - -![T-Shirts](following-the-customer-success-path-assets/t-shirts.png) - -Having said that, your organization may demand that you *map* such rough sizes to some measuring system already used; for example, *story points* or *person days*. To preserve the rough character, consider mapping the sizes by using a series of sharply increasing numbers: - -| S | M | L | XL | XXL | -| - | - | -- | -- | --- | -| 2 | 5 | 13 | 50 | 200 | - -Much more important than concrete numbers is an educated gut feeling. Therefore, try to understand the influencing factors determining most of the effort by implementing your lighthouse process. - -### Using the process model for estimation - -A process model can be seen as a central artifact for estimation purpose, as it indicates and visually maintains a lot of the influencing factors mentioned above. - -
    - -Here are the figures you could estimate: - -1. Setting up development environment: **S** -2. Modeling and understanding requirements: **L** -3. Implementing the process solution: - - Implement UI for PDF upload: **S** 1 - - Implement forms: **S** 2 3 4 5 - - Implement integrating the PDF archive: **S** 6 -4. Going live: **M** - -Using the process model, you can also foresee potential effort drivers, for example: - -- The legacy archive is really hard to integrate. -- The tasks need to be integrated into an existing legacy task list, which might not be straight forward to do. -- The metadata from the PDF shall be extracted, and a specialized form be shown to the user. diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/building-flexibility-into-bpmn-models.md b/versioned_docs/version-1.3/components/best-practices/modeling/building-flexibility-into-bpmn-models.md deleted file mode 100644 index fff8bc22e4a..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/building-flexibility-into-bpmn-models.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Building flexibility into BPMN models -tags: - - BPMN ---- - -BPMN modeling primarily targets structured processes, often with the goal to automate as many steps as possible, increase efficiency, and decrease process execution costs. But sometimes we need ways to build flexibility into such process models to deal with expected or unexpected operational problems or to allow for humans to intervene. - -## Understanding the required symbols - -To build flexibility into BPMN process models, one must understand BPMN symbols and modeling techniques. After introducing the main symbols, we can demonstrate more concrete examples. - -### Use events as triggers - -BPMN events allow us to react to all kinds of information. We can use them to trigger flexible activities. In particular, BPMN events **catching** **messages**, **conditions**, and **timeouts** are useful in that context. - -
    - -:::caution Camunda Platform 7 Only -Condition events are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md). -::: - -### Boundary events to add activities on triggers - -BPMN allows us to attach events to the boundary of activities to trigger some follow-up action. By modeling such an event as either **interrupting** or **non-interrupting**, we can decide to do the activities either *instead of* the activity we attach the event to, or *in addition to* it. - -
    - -### Subprocesses with boundary events - -By attaching boundary events not just to individual activities, but also to subprocesses, we can flexibly define the area or scope for which we want to trigger some flexible activities. - -
    - -1 - -While we are occupied with carrying out some area of activities, in a scope of our process... - -2 - -...an event might occur, which causes us... - -3 - -...to carry out this activity in addition to continuing with ordinary work. - -### Event subprocesses - -Sometimes we need to build in flexible activities which are carried out at any point in time. In such cases, we can leverage BPMN's event-based subprocesses. - -
    - -### Escalation events - -:::caution Camunda Platform 7 Only -Escalation events are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md). -::: - -Sometimes we need highly flexible means to cancel scopes or trigger additional activities from within a scope. The BPMN escalation events can be particularly useful to implement such requirements. - -
    - -1 - -As soon as we are finished with the first activity inside the scope... - -2 - -...we inform the surrounding scope about that and trigger an additional, essential activity... - -3 - -...but also continue with our second activity to complete the subprocess. - -4 - -We can then already continue with the follow-up work regardless of whether that additional activity is already finished. - -### Termination events - -:::caution Camunda Platform 7 Only -Termination events are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md) and only available in Camunda Platform 7. -::: - -To build flexibility into process models, it is also useful to remember that the termination event just terminates the scope within which it is defined and therefore *not* always the whole process instance. With that technique, it becomes possible to cancel some activities inside a subprocess while completing it successfully and leaving it via the "normal" outgoing path. - -
    - -1 - -As soon as one of our two activities achieves the result, we can cancel the other one... - -2 - -...and successfully complete the subprocess and normally continue with our follow-up work. - -## Examples - -### Allow proactive order status communication - -Assume that for an order to be validated, the customer must determine the delivery date before we can confirm the order. If the order is not acceptable—due to consistency issues or customer related issues—it is declined. - -Some of our orders might be so important that we want to ensure we keep customers happy, even if not everything runs smoothly on our side. - -
    - -1 - -Order managers can request proactive customer communication on demand. Assume they can communicate the reasons via a form, whereas the communication as such is carried out by the call center. - -2 - -On a regular basis, we check based on some rules, whether the order is so important that we proactively communicate why the order is not yet confirmed. Again, the communication is carried out by the call center. - -### Allow for order cancellation any time - -The customer might be allowed to request a cancellation until the order is confirmed. This request would have to be reviewed to determine whether we must accept the cancellation. - -
    - -1 - -Whenever the customer requests a cancellation until the order is confirmed, we review that request and decide whether we have to accept the cancellation or not. - -2 - -If we accept the cancellation, we must terminate the entire process. To do so, we need to use one trick: throw an error event that will end the current event subprocess, but not yet the order process. - -3 - -This leads to another subprocess to be triggered, and this one is interrupting. Now, the process instance is really cancelled. - -### Allow for order details to change, but repeat order validation - -:::caution Camunda Platform 7 Only -Condition events are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md) -::: - -If the customer changes the order details, the order must be validated again. - -
    diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/choosing-the-dmn-hit-policy.md b/versioned_docs/version-1.3/components/best-practices/modeling/choosing-the-dmn-hit-policy.md deleted file mode 100644 index 970fce42bde..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/choosing-the-dmn-hit-policy.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Choosing the DMN Hit Policy -tags: -- DMN -description: "Hit policies describe different ways (standardized by DMN) to evaluate the rules contained in a decision table." ---- - -**Hit policies** describe different ways (standardized by DMN) to evaluate the rules contained in a decision table. Different hit policies do not only lead to different results, but typically also require different modes of thinking and reason about the meaning of the entire table. Therefore, it's crucial to not just know the different DMN hit policies, but also to understand the motivations for their existence and the most typical cases for using them. - -## Knowing the DMN hit policy basics - -A decision table consists of several **rules**, typically represented as rows. When reading such a row, we look at certain **input values** and deduct a certain result represented by **output values**. When using the simplest hit policy **"unique"** (**U**), such rules do **not overlap**: only a single rule must match. - -
    - -1 - -We define an "input" value **season** here. For every single season ... - -2 - -... there is a **jacket** defined we want to use, the "output" of the rules here. - -3 - -The hit policy "**Unique**" (indicated by the character **U**) enforces that rules do **not overlap**: only a single rule must match. - -Now consider that we build a decision table with **overlapping rules**. In other words, that means more than one rule may match a given set of input values. We then need one of the **alternative hit policy** indicators to unambiguously understand the decision logic according to which such rules are interpreted. - -The hit policy **indicator** is a single character shown in the decision table's top left cell, right beneath the decision's name. The character is the initial letter of one of the defined seven hit policies `U`**nique**, `A`**ny**, `P`**riority**, `F`**irst**, `C`**ollect**, `O`**utput order** and `R`**ule order**. Furthermore, the hit policy 'Collect' may also be used with one of four aggregation operators, actually giving us four more hit policies `C+` (**Sum**), `C<` (**Minimum**), `C<` (**Maximum**) and `C#` (**Number**). - -Eight of those eleven hit policies evaluate a decision table to a **single result**. Three hit policies evaluate a decision table to **multiple results**. - -### Single result decision tables - -Such tables either return the output of only one rule or aggregate the output of many rules into one result. The hit policies to be considered are - -* `U`**nique**: Rules do not overlap. Only a single rule can match. - -* `F`**irst**: Rules are evaluated from top to bottom. Rules may overlap, but only the first match counts. - -* `P`**riority**: Rule outputs are prioritized. Rules may overlap, but only the match with the highest output priority counts. - -:::note -Camunda does not yet support the hit policy **priority**. In essence, priorities are specified as an ordered list of output values in decreasing order of priority. Such priorities are therefore independent from rule sequence! Though not yet supported, you can mimic that behavior using hit policy "(**C**)ollect" and determining a priority yourself; for example, by means of an execution listener attached to the end of your business rule task. -::: - -* `A`**ny**: Multiple matching rules must not make a difference: all matching rules must lead to the same output. - -**Collect** and **aggregate**: The output of all matching rules is aggregated by means of an operator: - -* `C+`**Sum**: Add up all the matching rule's distinct outputs. -* `C<`**Minimum**: Take the smallest value of all the matching rule's outputs. -* `C>`**Maximum**: Take the largest value of all the matching rule's outputs. -* `C#`**Number**: Return the number of all the matching rule's distinct outputs. - -### Multiple result decision tables - -**Multiple result** tables may return the output of multiple rules. The hit policies for such tables are: - -* `C`**ollect**: All matching rules result in an arbitrarily ordered list of all the output entries. - -* `R`**ule order**: All matching rules result in a list of outputs ordered by the sequence of those rules in the decision table. - -* `O`**utput order**: All matching rules result in a list of outputs ordered by their (decreasing) output priority. - -:::note -Camunda does not yet support the hit policy **output order**. In essence, output orders are specified as an ordered list of output values in decreasing order of priority. Such priorities are therefore independent from rule sequence! Though not yet supported, you can mimic that behavior using hit policy "(**C**)ollect" and determining an output order yourself; for example, by means of an execution listener attached to the end of your business rule task. -::: - -## Understanding DMN hit policy use cases - -Most situations can be addressed using different hit policies. In that case, the hit policy will have an effect on the readability and maintainability of the table. Often it is worth trying different varieties until you have a feel for what will work best. In practice, we often use the free [online simulator](https://consulting.camunda.com/dmn-simulator/) to experiment with various alternatives. - -### Unique: granting categories of customers a specified discount - -Hit policy "**Unique**" will typically make it easy to build a decision table, which ensures your rules are "complete" - in the sense that the rules do not just not overlap but cover all possible input values - so that you do not "forget" anything. - -
    - -1 - -The *input* area of each row specifies a certain **segment** of possible input values. - -2 - -This row, for example, expresses that *long time silver customers receive a 9% discount*. - -Such a use case fits to the hit policy "**Unique**". For such use cases, it is an advantage that this hit policy make your decision logic invalid in case you violate its requirement that your table rules never "overlap": after all, you must not produce ambiguous results. - -### First: accepting a customer based on hard criteria - -Having said that, the hit policy "**First**" can sometimes make it easier for an organization to reason about decision logic dealing with some criteria that are "harder" (more "clearcut") than others. Furthermore, it can help to make a decision table layout more compact and therefore easier to interpret. - -
    - -1 - -Assume that everybody in the organisation knows that first rule: "Once on the blacklist, never again accepted." The layout and the hit policy of the decision table therefore supports the organization's way of doing business: once we know that single fact about a customer, we don't need to think further. - -2 - -The following rules from row 2-4 are expressed in an "Accept" manner and might change more often over time. The organization's way of thinking is literally "from top to bottom". Once we find an acceptance rule, we can deal with the customer. - -3 - -For execution in a decision engine, don't forget to add a rule not accepting any other customers as a last row. - -In scenarions dealing with **hard** **exclusion** and **inclusion** criteria, we often don't care that much if the rules overlap, but prefer to argue about very clearcut cases first and about more sophisticated ones later on. Furthermore, the organization's way of thinking and doing business might be better supported by a decision table using the hit policy **First**. - -Our experience so far tends to show that it can be more tricky and error prone to argue about a **First** hit policy decision table than it might occur to you at first sight. Therefore, be especially careful and always test your logic in case you are dealing with sensitive business! - -### Collect: deciding which groups of people may review an order - -With hit policy **collect**, you do not care about the order or any interdependencies between your rules at all. Instead, you just "collect" independent rules and care about the question which rules are applicable to your specific case. - -Consider, for example, the question of "who is allowed" to carry out some action, as, for example, reviewing and deciding about incoming orders: - -
    - -As a result of this decision table, we will either get `["Sales"]` or `["Management"]` or a list of both groups `["Sales", "Management"]`. - -We could use this information to route the order into the applicable group's task lists or control access rights of a configurable software solution, etc. Of course, you could at any time introduce more rules and eventually also differentiate between more groups without changing your software solution. - -### Sum: accepting a customer based on soft criteria - -Hit policy "collect" may be combined with operators such as **Sum (C+)**, leading to very different use cases. A very typical one is the requirement to evaluate a case based on manyfold factors influencing the overall result. - -Assume, for example, that we want to deal with customers we know nothing about. They receive a score of 0. But in case we know something about them, we also weigh in our knowledge: - -
    - -1 - -The overall creditworthiness is deducted by throwing in many factors. - -2 - -Here, for example, we give credit in case we made good experiences with the customer in the past. - -3 - -A very low current income does not matter as long as the customer is not a stranger to us! - -4 - -On the other hand, as soon as a customer has proof for a good income, they receive five points for "reasonable" income as well as 10 points extra for good income. - -Even if we had bad experience with a customer (which means they start from -15), we end up with an overall score of 0 in case the customer has a good income now, and start to accept the customer again. - -In scenarions dealing with **soft exclusion** and **inclusion** criteria, we need a mechanism to associate a weight to different scenarios. This is ideally supported by hit policy **Sum (C+)**. diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/creating-readable-process-models.md b/versioned_docs/version-1.3/components/best-practices/modeling/creating-readable-process-models.md deleted file mode 100644 index 9df6ddcd21e..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/creating-readable-process-models.md +++ /dev/null @@ -1,362 +0,0 @@ ---- -title: Creating readable process models -tags: - - BPMN ---- - -We create visual process models to better understand, discuss, and remember processes. Hence, it is crucial that models are easy to read and understand. The single most important thing is to to use well-chosen labels. - -## Essential practices - -### Labeling BPMN elements - -Use [conventions for naming BPMN elements](naming-bpmn-elements.md); this will consistently inform the reader of the business semantics. The clarity and meaning of a process is often only as good as its labels. - -
    - -1 - -*Start event* labels informs the reader of how the process is *triggered*. - -2 - -An *activity* - labeled as "activity" - informs the reader of the piece of *work* to be *carried out*. - -3 - -*Gateway* labels clarifies based on which condition(s) and along *which sequence flow* the process proceeds. - -4 - -Labeled *boundary events* clearly express in which cases a process execustion might follow an *exceptional path*. - -5 - -Labeled *end events* characterize end *results* of the process from a business perspective. - -## Recommended practices - -### Modeling symmetrically - -Try to model symmetrically. Identify related splitting and joining gateways and form easily recognizable *visual*, eventually *nested*, *blocks* with those gateways. - -
    - -:::caution Camunda Platform 7 Only -Inclusive Gateways (OR) are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md) -::: - -1 - -The inclusive gateway splits the process flow into two paths which are ... - -2 - -... joined again with an inclusive gateway. Inside that block ... - -3 - -another exclusive gateway splits the process flow into two more paths which are ... - -4 - -... joined again with an exclusive gateway. - -By explicitly showing *pairs of gateways* "opening" and "closing" parts of the process diagram, and by positioning such gateway pairs *as symmetrically as possible*, the readability of process model is improved. The reader can easily recognize logical parts of the diagram and quickly jump to those parts the reader is momentarily interested in. - -### Modeling from left to right - -Model process diagrams *from left to right*. By carefully positioning symbols from left to right, according to the typical point in time at which they occur, one can improve the readability of process models significantly: - -
    - -Modeling from left to right supports the reading direction (for western audience) and supports the human field of vision - which prefers wide screens. - -### Creating readable sequence flows - -Consciously decide whether *overlapping sequence flows* make your model more or less readable. On one hand, avoid overlapping sequence flows where the reader will not be able to follow the flow directions anymore. Use overlapping sequence flows where it is less confusing for the reader to see just one line representing several sequence flows leading to the same target. - -Avoid sequence flows *violating the reading direction*, meaning no outgoing flows on the left or incoming flows on the right of a symbol. - -
    - -1 - -The author could have made the five (!) sequence flows leading into the end event visible by separating them. However, by consciously choosing to partly overlap those flows, this model becomes less cluttered, therefore less confusing and easier to read. - -2 - -The author could have attached the sequence flow, leaving this task on its left. However, this would have decreased readability, because the flow connection violates the reading direction. The same applies to incoming flows on the right of a symbol. - -*Avoid flows crossing each other* and *flows crossing many pools or lanes*, wherever possible. Rearrange the order of lanes and paths to make your sequence flows more readable. Oftentimes, removing lanes can improve readability! Rearrange the order of pools in a collaboration diagram to avoid message flows crossing pools as much as possible. Often, you will find a "natural" order of pools reflecting the order of first involvement of parties in the end-to-end process. This order will often also lead to a minimum of crossing lines. - -*Avoid very long (multi page) sequence flows*, especially when flowing against the reading direction. The reader will lose any sense of what such lines actually mean. Instead, use link events to connect points which are not on the same page or screen anymore. - -
    - -:::caution Camunda Platform 7 Only -Link events are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md) -::: - -1 - -You see a throwing link event here, which... - -2 - -...directly links to a catching link event just as if the sequence flow would have been connected. - -Avoid excessive use of link events. The example above serves to show the possible usage, but at the same time, it is too small to satisfy the usage of link events in real-world sceanrio! - -### Modeling explicitly - -Make your models easier to understand by modeling *explicitly*, which most often means to either completely avoid certain more "implicit" BPMN constructs, or at least to use them cautiously. Always consider the central *goal of increased readability* and understandability of the model when deciding whether to model explicitly or implicitly. When in doubt, it's best to favor an explicit style. - -#### Using gateways instead of conditional flows - -Model splitting the process flow by always using *gateway symbols* like instead of conditional flows . - -
    - -:::caution Camunda Platform 7 Only -Inclusive Gateways (OR) and Conditional sequence flows are [not yet supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md) -::: - -1 - -For example, you could've left out this inclusive gateway by drawing two outgoing sequence flows directly out of the preceding task **Choose menu** and attaching conditions to those sequence flows (becoming conditional sequence flows ). However, experience shows that readers understand the flow semantics of gateways better, which is why we do not make use of this possibility. - - -#### Modeling start and end events - -Model the trigger and the end status of processes by always explicitly showing the *start* and *end event symbols*. - -
    - -:::caution -Process models without start and end event cannot be executed on the Camunda workflow engine -::: - -1 - -According to the BPMN standard, you could have left out the start event... - -2 - -...as long as you also leave out the end events of a process. However, you would have lost important information in your model, which is why we do not make use of this syntactical possibility. - -Be specific about the *state* you reached with your event from a *business perspective*. Quite typically, you will reach "success" and "failure" like events from a business perspective: - -
    - -1 - -'Invoice paid' better qualifies the "successful" business state than e.g. 'Invoice processed' would... - -2 - -...because in principle, you can call the failed state 'Invoice processed', too, but the reader of the diagram is much better informed by calling it 'Invoice rejected'. - -#### Separating splitting and joining gateways - -In general, avoid mixing up the split and join semantics of gateways by explicitly showing *two separate symbols*: - -
    - -1 - -You could have modeled this join implicitly by leaving out the explicitly joining XOR gateway and directly connecting two incoming sequence flows to... - -2 - -...the subsequent splitting XOR gateway. Of course, BPMN would allow this for other gateway types, too. However, experience shows that readers will often overlook the join semantics of such gateways serving two purposes at the same time. - -The fact that readers will often overlook the join semantics of gateways serving to join as well as split the process flow at the same time, combined with the preference for [modeling symmetrically](#modeling-symmetrically), leads us to prefer *splitting and joining gateways modeled with separate symbols*. - -However, there are cases in which the readability of models can be improved with *implicit modeling*. Consider the following example: - -
    - -1 - -The two incoming sequence flows to the task "Review tweet" could be merged with an XOR gateway, following explicit modeling. We argue that a merging XOR gateway directly behind the start event decreases the readability. A merging XOR gateway is a passive element and the reader expects the process to continue with an active element after the start event. - -#### Using XOR gateway markers - -Model the XOR gateway by explicitly showing the **X** symbol, even if some tools allow to draw a blank gateway. - -
    - -1 - -You could have shown the splitting gateway... - -2 - -...as well as the joining gateway without the **X** symbol indicating that it is an exclusive gateway. - -The **X** marker makes a clearer difference to the other gateway types (inclusive, parallel, event-based, complex) which leads us to prefer *explicit XOR gateway markers* in general. - -#### Splitting sequence flows with parallel gateways - -Always model splitting the process flow by explicitly showing the *gateway symbol*: - -
    - -1 - -You could have modeled this parallel split implicitly by leaving out the gateway and drawing two outgoing sequence flows out of the preceding task **Choose menu**. However, the reader needs deeper BPMN knowledge in order to understand this model. Additionally, for joining the parallel flows... - -2 - -...you will always need the explicit symbol. - -The fact that readers of models using parallelization will likely need to understand the semantics of a parallel join combined with the preference for modeling symmetrically leads us to prefer *explicit parallel gateways*, too. - - -#### Joining sequence flows with XOR gateways - -Model joining the process flow by explicitly showing the *XOR gateway symbol* so the reader does not have to know BPMN details to understand how two incoming or outgoing sequence flows in a task behave. Additionally, this often supports the [symmetry of the model](#modeling-symmetrically) by explicitly showing a "relationship" of the splitting and joining *gateways forming a visual "block"*. - -
    - -1 - -You could have modeled this join implicitly by leaving out the gateway and directly connecting the two incoming sequence flows to the subsequent task **Have lunch**. However, explicitly modeling the join better visualizes a block, the joining gateway semantically "belongs" to... - -2 - -...the earlier split. In case the reader is not interested in the details of dinner preparation but just in having dinner, it's easy to "jump" to the gateway, "closing" that logical part of the model. - -This is particularly helpful for models bigger than that example with many such (eventually nested) blocks. Consider the following model, showing two *nested blocks* of gateways: - -
    - -1 - -Now, you couldn't have modeled this join implicitly, because it's directly followed by an inclusive gateway with very different join semantics. *Consistency* of joining techniques is another reason why we prefer explicitly joining sequence flows in general. - -There are always exceptions to the rule! There are cases in which the readability of models can be *improved* with *implicit modeling*. So don't be dogmatic about explicit modeling; always aim for the most readable model. The following example shows a case of a model in which splitting and joining points do not form natural "blocks" anyway. In such cases, it can be preferable to make use of *implicit joining* to improve the overall readability! - - -### Avoiding lanes - -Consider *avoiding lanes* for most of your models all together. They tend to conflict with several of the best practices presented here, like [Modeling *Symmetrically*](#modeling-symmetrically), [Emphasizing the *Happy Path*](#emphasizing-the-happy-path) and [Creating Readable *Sequence Flows*](#creating-readable-sequence-flows). Apart from readability concerns, our experience also shows that lanes make it more difficult to change the resulting process models and therefore cause considerably *more effort in maintenance*. - -When modeling on an *operational level*, where showing the responsibility of roles matters most, we recommend to [use *collaboration diagrams*](#using-collaboration-diagrams) with several *separate pools* for the process participants instead of lanes. - -However, the usage of lanes might be meaningful for: - -* *Strategic* level models (see [BPMN Tutorial](https://camunda.com/bpmn/) and [Real-Life BPMN](https://www.amazon.com/Real-Life-BPMN-4th-introduction-DMN/dp/1086302095/) on details for modeling levels) - especially when they have a focus on *responsibilities and their borders*. - -* *Technical/executable* models with a focus on *human work-flow* and its ongoing "ping pong" between several participants. - -For these cases, also consider alternative methods to maintain and show roles: - -* As a *visible part* of the *task name*, e.g. in between squared brackets []: *"Review tweet [Boss]"*. - -:::caution Camunda Platform 7 Only -During execution you can remove this part of the task name if you like by using simple mechanisms like shown in the [Task Name Beautifier](https://github.com/camunda/camunda-consulting/tree/master/snippets/task-name-beautifier) so it does not clutter your tasklist. -::: - -* As a *text annotation* or a *custom artifact* - -:::note -Roles are part of your executable BPMN process model as *technical attributes* anyway - even if hidden in the BPMN diagram. For example, they can be used during execution for assignment at runtime. -::: - -## Helpful practices - -### Emphasizing the happy path - -You may want to emphasize the *"happy path"* leading to the delivery of a successful process result by placing the tasks, events, and gateways belonging to the happy path on a straight sequence flow in the center of your diagram - at least as often as possible. - -
    - -The *five* BPMN symbols belonging to the happy path are put on a straight sequence flow in the center of the diagram. - - -### Avoid modeling retry behavior - -A common idea is to model retry behavior into your process models. This *should be avoided* in general. The following process model shows a typical example of this anti pattern: - -
    - -All operations use cases put into the model can be handled via Camunda tooling, e.g. by [retrying](/docs/components/concepts/job-workers/#completing-or-failing-jobs) or [Camunda Operate](/docs/components/operate/operate-introduction/). - - -### Using collaboration diagrams - -If you model on an operational level (see [BPMN Tutorial](https://camunda.com/bpmn/) and [Real-Life BPMN](https://www.amazon.com/Real-Life-BPMN-4th-introduction-DMN/dp/1086302095/) on details for modeling levels) use *collaboration diagrams* with several *separate pools* for the process participants [instead of lanes](#avoiding-lanes) as operational models using lanes make it very hard for the individual process participant to identify the details of their process involvement. - -Furthermore, model just *one coherent process per pool* (apart from event subprocesses, of course), even though BPMN in principle allows several processes per pool. This improves readability by constituting a clear visual border around every process and by providing a natural space for labeling that part of the end-to-end process in the pool's header. - -
    - -1 - -The Team Assistance is responsible for initial "Invoice Collection" as well as "Invoice Clarification" - if applicable. Those two processes are modeled by using two separate pools for the team assistance, just as... - -2 - -...the approver can see the "Invoice Approval" process in a separate pool and... - -3 - -...the managing director can see the "Invoice Payment" process in a separate pool while the collaboration diagram as a whole shows the business analyst that the overall end-to-end process works. - -Using *collaboration diagrams* with *separate pools* for the process participants allows to explicitly show interaction and communication between them by means of message flow and further improves readability by transparently showing the participants their own involvement in the end-to-end-process. As a consequence, they do not need to fully read and understand the end-to-end process in order to read, understand, and agree to their own involvement by looking at their own pools. - -### Showing interaction with systems - -Consciously decide how you want to model systems the process participants are interacting with. Use *data stores* to show systems which primarily serve as a means to store and retrieve data. Use - depending on your needs *collapsed* or *expanded* - *pools* for systems which are carrying out crucial activities in the process going way beyond storing and retrieving data. - -
    - -1 - -A *collapsed pool* is used to represent a system which supports the process and/or carries out process tasks on its own. The pool could be expanded later to model the internal system details, maybe even with the goal to execute a technical process flow directly with a BPMN capable process engine. - -2 - -A *data store* is used to represent a technical container meant to archive PDFs and store them for later retrieval. - -3 - -Another *data store* is used to represent a container which could be a physical storage place for paper invoices to be paid at the moment but could become a representation for business objects in a database with the object state "to be paid" in the future. - -When *choosing* between those *two options* for modeling systems (data stores, collapsed pools) keep in mind that only pools represent processes and therefore have the capability to be expanded and modeled in all their internal details later on. - -### Avoiding excessive usage of data objects - -Avoid excessive use of *data objects*, but use them cautiously to show the *most important data related aspects* of your process. - -Experience shows that many data objects and especially many data associations quickly clutter your process model and that visual noise reduces readability - especially for less experienced readers. - -You might find three practices helpful to find your own "right" amount of data visualization: - -
    - -1 - -Cautiously use data objects and associations to show the _most important data related aspects_ of your process. We could have modeled that all the tasks in the "Payments Creation" process either read, update, or delete the "new payment", however we decided that we just want to point out that the process works on a new payment object. - -2 - -Use data stores for _coupling processes via data_. We could have modeled a lot of other tasks in the process that either read or update the "payments", however, we decided to just point out the most important aspect for the process diagram, which is that the "Payments Creation" process of delivery service is loosely coupled with the "Payments Processing" via commonly shared data. - -3 - -Here we decided that it's helpful to know that this message does not only inform an adjustment possibility was checked, but that it also delivers all the necessary details of the adjustment. - -### Avoiding changes to symbol size and color - -Leave the *size of symbols as it is* by default. For example, different sizes of tasks or events suggest that the bigger symbol is more important than the smaller one - an often unwarranted assumption. Instead of writing long labels, use short and consistent labels in line with your [naming conventions](naming-bpmn-elements.md) and move all additional information into BPMN annotations associated to your specific BPMN element. - -Furthermore, avoid *excessive use of colors*. Experience shows that colors are visually very strong instruments and psychologically very suggestive, but will typically suggest different things to different readers. Additionally, a colorful model often looks less professional. - -However, there are valid exceptions. For example, you could mark the *happy path* through a process with a visually weak coloring: - -
    - -Another case for useful coloring might be to make a visual difference between *human* and *technical flows* within a bigger collaboration diagram by coloring the header bar on the left side of the pools. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/modeling-beyond-the-happy-path.md b/versioned_docs/version-1.3/components/best-practices/modeling/modeling-beyond-the-happy-path.md deleted file mode 100644 index 9f292f9921e..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/modeling-beyond-the-happy-path.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Modeling beyond the happy path -tags: - - BPMN - - BPMN Error Event - - BPMN Message Event - - BPMN Timer Event - - Happy Path ---- - -First, model the happy path to the desired end result before collecting problems and exceptions, prioritizing them, and introducing them incrementally. Secondly, focus on one selected issue at a time, and choose the right techniques for modeling beyond the happy path. - -## The happy path and beyond - -The happy path is kind of the default scenario with a positive outcome, so no exceptions, errors, or deviations are experienced. Typically, you want to model the happy path first, and therefore you should define the desired *end result*, find a suitable *start event*, and collect the *activities* and external *dependencies* which _always_ need to be considered to reach the result. - -When we have that, the diagram shows the *happy path* of a business process (or of the selectively chosen part of the end-to-end business process): - -
    - -1 - -*End Event*: It's often the easiest first step to agree upon the desired ("happy") end *result* of a process. - -2 - -*Start Event*: As a second step, one might agree upon a *trigger* for the work leading to the end result. - -3 - -*Activities*: After that, you can brainstorm and collect activities which *always* need to be carried out to reach the result. - -4 - -*Intermediate Events*: Optionally, you can brainstorm and collect *milestones* (modeled as blank events) and important external *dependencies* (e.g. modeled as message events). - -### Modeling beyond the happy path by error scenarios - -As soon as you have this happy path, start modeling beyond the happy path. Focus on *one* particular, selected problem at a time. - -1. Try to *understand* the worries for *the business* in the light of the desired end result. - -1. Identify the *undesired end result* the process will reach in case the problem cannot be mitigated. This informs you about the *end event* you will eventually reach because of the problem. - -1. Identify the affected areas in the happy path. Can the problem occur at a *particular point*, *during* (one or several) *activities*, or basically *all the time*? This will inform you about the most promising modeling technique for the problem: whether either *gateways*, *boundary events*, or *event-based subprocesses* can serve you to fork off your "problem path". - -This best practice will guide you through practices that help you model beyond the happy path. - -## Forking off at a particular point - -With BPMN gateways, we can deal with problems arising at a *particular point* in our process. - -### Dealing with results - -By using data-based gateways, we *actively decide* "now and here" on the basis of our own *process data* which path our process must move along. For example, we can therefore use an XOR gateway to fork off a "problem path," dealing with a problematic result of *our own activities*: - -
    - -1 - -The *exclusive gateway* deals with the potentially problematic result of incomplete order data. Note that we deal here with the procedural consequences of work which already took place in the preceding task, where we actually checked the order for completeness. - -2 - -Again, the preceding task already dealt with the actual work of checking the customer's creditworthiness. The *result* of the task is a "yes" or "no" (true or false). We can deal with data by means of a data-based gateway, which immediately redirects to the path our process must move along. - -3 - -The *end event* characterizes the undesired end result "order declined," which we now reach because of having modeled two problems. In the example, both of them lead to one and the same business outcome. - -### Dealing with events - -By using event-based gateways, we *passively wait* for *future events* deciding about which path our process will have to move along. For example, we can therefore use use it to fork off a "problem path" dealing with an undesired event *outside of our own control*: - -
    - -1 - -After having requested a delivery date (e.g. from wholesale), we use an *event-based gateway* to passively wait for what happens next. We can not know "now and here", because it's outside of our own control. - -2 - -The *intermediate message event* allows us to deal with the undesired event that the ordered good is not deliverable. - -### Dealing with missing results via timeouts - -By using event-based gateways, we can also deal with the situation that *nothing relevant* for our process *happens*. We do this by defining a time period, after which we decide that we do not want to wait any longer: - -
    - -1 - -The *intermediate timer event* allows us to deal with the situation that nothing relevant for our process happened for a defined time period. In case we do not get an answer from wholesale, we inform the customer that the order is not deliverable at the moment. - -## Forking off during (one or several) activities - -With BPMN boundary events, we can deal with problems arising *while we are actively occupied* to carry out work in our process. - -### Dealing with errors - -A typical case is that it turns out to be *impossible to achieve the result* of an activity while working on it. We can then choose to interrupt our work and fork off a "problem path" to deal with the issue: - -
    - -1 - -The *interrupting boundary error event* allows us to deal with the fact that the order is not readable. As this prevents us from properly judging the completeness of the order, we cannot reach one of the expected results of our activity ("complete" or "not complete"), but instead deal with the problem by interrupting the activity and assuming the order to be declined. - -When modeling for business process automation, "dealing with errors" might be a highly technical concern. As a rule of thumb, we just want to show the *"business related" problems* in a process model: those problems and errors which cause that our business process must move along a different path, because different work must be carried out as a reaction. - -An example for a typical technical concern would be that we currently cannot reach a system, which is why, for example, we want to re-attempt it another time later on. We do not show such purely technical problems in a business process diagram, not even in an executable one: (1) It would clutter the diagram, and (2) There are more suitable ways to deal with technical issues potentially occuring almost anywhere. Read our Best Practice about [dealing-with-problems-and-exceptions](../../development/dealing-with-problems-and-exceptions) from a more technical point of view to learn more about the border between business related shown in a process diagram and purely technical concerns not shown in a process diagram. - -### Dealing with work on top of usual work - -Another typical use case for reacting to situations while we are actively occupied is that it sometimes turns out we need to do stuff *in addition to what we already do*: - -
    - -1 - -We encapsulate part of our process into a subprocess to enable us to express that while we are occupied with that part of the process, additional work might pop up. - -2 - -The *non-interrupting boundary timer event* allows us to speed up order preparation in case it takes longer than two days; for example, by informing a responsible manager. - -## Being able to react all the time - -A bit similar to boundary events, with BPMN event subprocesses we can deal with problems arising while we are actively occupied to carry out work. The main advantage when being compared with boundary events is that some issues can *occur almost anywhere* on our way through the happy path. - -### Dealing with issues occurring almost anywhere - -Some issues can occur almost anywhere on the way through our process. The event subprocess allows us to fork off a *problem path* modeled separately from our main process to deal with such issues: - -
    - -1 - -The *non-interrupting start message event* of the event sub process allows us to express that wherever we currently are on our way through order confirmation, it can happen that the customer requests information about the status of that process. - -2 - -We should then provide the requested information without interferring with the order confirmation process itself. - -### Dealing with canceling the process - -Another typical use case for event-based subprocesses is a cancellation requested by the customer: - -
    - -1 - -The *interrupting start message event* of the event subprocess allows us to express that wherever we currently are on our way through order confirmation, it can happen that the customer requests cancellation. - -2 - -We should then interrupt the main process (which is already expressed by the nature of the start event) and inform an involved dealer. - -## Boundary events as alternative for event based gateways - -### Using receive tasks with boundary events - -The examples above leverage the *event based gateway*. BPMN also allows to model *receive tasks* that wait for responses. This has the advantage that you now can leverage boundary events to deal with *missing results* or other *events occuring while you are waiting* for the response. This is an *alternative* to the event-based gateways shown in the above models. - -
    - -1 - -Instead of modeling an event for receiving a delivery date, we model a *task* here. - -2 - -The fact that we do not receive such an answer at all can now be modeled as an *interrupting boundary timer event*. We inform the customer about the status, but as the timer is interrupting, do not wait any longer for the delivery date. - -3 - -It might turn out that the ordered good is not deliverable. This can be modeled as *boundary message event*. Upon that message we cancel any further waiting but inform the customer about the status instead. - - -### Modeling a multi phase escalation path - -Boundary events are particularly useful when you consider that you might want to remind your dealer that the answer is overdue and give them another chance for transmitting the delivery date before you give up waiting. First, consider how this could be achieved by using event-based gateways: - -
    - -1 - -After having realized that the dealer's answer is late, we decide whether we want to remind the dealer and continue to wait - or not. We modeled here that we want to remind the dealer just once. - -2 - -However, note that while we are reminding the dealer, we are strictly speaking not in a state "ready-to-receive" the dealer's answer! According to BPMN execution semantics, the dealer's message might get lost until we are back at the event-based gateway. While you might want to choose to ignore that when modeling for communication purposes only, you will need to get it right for executable models. - -To get the BPMN execution semantics above fully right, we would now need to attach the two possible answers of the dealer ("Delivery data fixed", "Ordered good not available") as boundary events to the task "Remind dealer", too! Quite a modeling construct, just to properly wait for the dealer's response, right? Therefore, consider the following alternative to this modeling issue using boundary events only: - -
    - -1 - -Modeling a *non-interrupting boundary timer event* directly at a task which waits for the response has the advantage that we never leave the "ready-to-receive" state and therefore avoid troubles with the strict interpretation of BPMN execution semantics. - -The second alternative is *very compact* and avoids issues with *not being ready-to-receive*, but typically needs a *deeper understanding* of BPMN symbols and their consequences for the token flow. Therefore, we sometimes also prefer event-based gateways for showing human flows, and ignore sophisticated token flow issues as discussed here. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/modeling-with-situation-patterns.md b/versioned_docs/version-1.3/components/best-practices/modeling/modeling-with-situation-patterns.md deleted file mode 100644 index e8d85cad6ce..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/modeling-with-situation-patterns.md +++ /dev/null @@ -1,415 +0,0 @@ ---- -title: Modeling with situation patterns -tags: - - BPMN ---- - -When modeling, you will sometimes realize that some situations share common characteristics. To save work for yourself and spread such knowledge within your organization, collect and document such patterns as soon as you understand their nature and have found a satisfying solution for modeling them. For a start, we collected some typical patterns for you, which we see quite often in our modeling practice. You do not need to reinvent the wheel over and over again. - -## Escalating a situation step by step - -You need something and hope that it happens. Such a hope for a result may materialize, but it does not have to! After some time, you will typically become impatient and try to do something to make it happen. But if it then still does not happen, there comes a point at which you will have to decide that you must accept a failure. - -We sometimes also call that very common pattern a **multi-step escalation**. - -**Example:** "A month ago, I ordered a pair of shoes with that new online shop! After two weeks of waiting: nothing. I contacted them to determine what's up. The clerk promised me that the shoes will leave the warehouse today! But again, nothing, so after another week I just canceled that order. Since then I did not hear a word." - -In this scenario, the shop clearly did not implement the escalation of the delay properly. They should have applied one of the following patterns in the order delivery process: - -### Option 1: Using event-based gateways - -
    - -1 - -After ordering the goods, the process passively waits for the success case by means of an event-based gateway: the goods should be delivered. However, in case this does not happen within a reasonable time, we make a first step of escalation: remind the dealer. - -2 - -We still stay optimistic. Therefore, the process again passively waits for the success case by means of another event-based gateway: the goods should still be delivered. However, in case this does not happen again within a reasonable time, we make a second step of escalation: cancel the deal. - -**Evaluation:** - -* :thumbsup: This solution explicitly shows how the two steps of this escalation are performed. Timers are modeled separately, followed by their corresponding escalation activities. - -* :thumbsdown: The usage of separate event-based gateways leads to *duplication* (for example, of the receiving message events) and makes the model *larger*, even more so in case multiple steps of escalation need to be modeled. - -* :thumbsdown: During the time we need to remind the dealer, we are strictly speaking not in a position to receive the goods! According to the BPMN specification, a process can handle a message event only if it is ready to receive at exactly the moment it occurs. Fotunately, Camunda Cloud introduced [message buffering](/docs/components/concepts/messages/#message-buffering), allowing to execute this model properly without loosing messages. Using Camunda Platform 7, the message might get lost until we are at the second event-based gateway. - -:::note -You might want to use that pattern when modeling *simple two phase escalations*. You should not execute it on Camunda Platform 7. -::: - -### Option 2: Using gateways forming a loop - -
    - -1 - -After having ordered the goods, the process passively waits for the success case by means of an event-based gateway: the goods should be delivered. However, in case this does not happen within a reasonable time... - -2 - -We choose by means of an exclusive gateway to make a *first step of escalation*: remind the dealer. We still stay optimistic. Therefore, the process returns to the event-based gateway and again passively waits for the success case: the goods should still be delivered. However, in case this does not happen again within a reasonable time, we choose a *second step of escalation*: cancel the deal. - -**Evaluation:** - -* :thumbsup: This model is a more *compact* and more *generic* modeling solution to the situation. If it comes to multiple steps of escalation, you will need such an approach to avoid huge diagrams. - -* :thumbsdown: The solution is *less explicit*. We could not choose to label the timer with explicit durations, as a single timer is used for both durations. The solution is *less readable* for a less experienced reading public. For a fast understanding of the two step escalation, this method of modeling is less suitable. - -* :thumbsdown: During the time we need to remind the dealer, we are strictly speaking not in a position to receive the goods! According to the BPMN specification, a process can handle a message event only if it is ready to receive at exactly the moment it occurs. Fotunately, Camunda Cloud introduced [message buffering](/docs/components/concepts/messages/#message-buffering), allowing to execute this model properly without loosing messages. Using Camunda Platform 7, the message might get lost until we are at the second event-based gateway. - -:::note -You might want to use that pattern when modeling *escalations with multiple steps*. You should not execute it on Camunda Platform 7. -::: - -### Option 3: Using boundary events - -
    - -1 - -After having ordered the goods, the process passively waits for the success case by means of a receive task: the goods should be delivered. However, in case this does not happen within a reasonable time... - -2 - -a non-interrupting boundary timer event triggers a *first step of escalation*: remind the dealer. We still stay optimistic. Therefore, we did not interrupt the receive task, but continued to wait for the success case: the goods should still be delivered. - -3 - -However, in case this does not happen within a reasonable time, we trigger a *second step of escalation* by means of an interrupting boundary timer event: interrupt the waiting for delivery and cancel the deal. - -**Evaluation:** - -* :thumbsup: This model is even more *compact* and a very *generic* modeling solution to the situation. If it comes to multiple steps of escalation, the non-interrupting boundary timer event could even trigger multiple times. - -* :thumbsup: The model complies with BPMN execution semantics. Since we never leave the wait state, the process is always ready to receive incoming messages. - -* :thumbsdown: The solution is *less readable* and *less intuitive* for a less experienced reading public, because the way the interrupting and non-interrupting timers collaborate requires a profound understanding of boundary events and the consequences for token flow semantics. For communication purposes, this method of modeling is therefore typically less suitable. - -:::note -You might want to use that pattern when modeling *escalations with two steps* as well as *escalations with multiple steps* for *executable models.* -::: - -## Requiring a second set of eyes - -For a certain task - typically a critical one in terms of your business - you need the opinion, review, or approval of two different people. - -We sometimes also call that pattern the **four eyes principle**. - -**Example:** The manager of a small sized bank's lending department has a problem: "Over the last quarter, we lost €100,000 in unrecoverable medium-sized loans. Controlling now tells me that could probably have been easily avoided by more responsible decisions of our lending department staff! I want that every such decision is signed off by two people from now on." - -Modeling a process dealing with that requirement can be achieved easily, but the better solution also depends on whether you prefer overall speed over total effort. - -All of the following modeling patterns assume that the two or more tasks needed to ultimately approve the loan must not be completed by one and the same person. When executing such patterns, you must enforce that with the workflow engine. - -### Option 1: Using separate tasks - -
    - -1 - -A first approver looks at the loan and decides whether they approve. If they decide not to approve, we are done, but if the loan is approved... - -2 - -...a second approver looks at the loan. If they also decide to approve, the loan is ultimately approved. - -**Evaluation:** - -* :thumbsup: This solution *explicitly* shows how the two steps of this approval are performed. Tasks are modeled separately, followed by gateways visualizing the decision making process. - -* Note that the approvers work in a *strictly sequential* mode, which might be exactly what we need in case we want *minimization of effort* and, for example, display the reasonings of the first approver for the second one. However, we also might prefer *maximization of speed*. If this is the case, see solution [option 3 (multi-instance)](#option-3-using-a-multi-instance-task) further below. - -* :thumbsdown: The usage of separate tasks leads to *duplication* and makes the model *larger*, even more so in case multiple steps of approvals need to be modeled. - -You might want to use that pattern when modeling the need for a *second set* of eyes needed in *sequential* order, therefore *minimizing effort* needed by the participating approvers. - -While it is theoretically possible to model separate, explicit approval tasks in parallel, we do not recommend such patterns due to readability concerns. - -
    - -As a better alternative when looking for *maximization of speed*, see [option 3 (multi-instance)](#option-3-using-a-multi-instance-task) below. - -### Option 2: Using a loop - -
    - -1 - -A first approver looks at the loan and decides if they approve. If they decide not to approve, we are done, but... - -2 - -...if the loan is approved, we turn to a second approver to look at the loan. If they also decide to approve, the loan is ultimately approved. - -**Evaluation:** - -* :thumbsup: This model is a more *compact* modeling solution to the situation. If it comes to multiple sets of eyes needed, you will probably prefer such an approach to avoid huge diagrams. - -* Note that the approvers work in a *strictly sequential* mode, which might be exactly what we need if we want *minimization of effort* and, for example, display the reasonings of the first approver for the second one. However, we also might prefer *maximization of speed*. If this is the case, see [option 3 (multi-instance)](#option-3-using-a-multi-instance-task) below. - -* :thumbsdown: The solution is *less explicit*. We could not choose to label the tasks with explicit references to a first and a second step of approval, as a single task is used for both approvals. The solution is *less readable* for a less experienced reading public. For a fast understanding of the two steps needed for ultimate approval, this method of modeling is less suitable. - -You might want to use that pattern when modeling the need for *multiple sets* of eyes needed in *sequential* order, therefore *minimizing effort* needed by the participating approvers. - -### Option 3: Using a multi-instance task - -
    - -1 - -All the necessary approvers are immediately asked to look at the loan and decide by means of a multi-instance task. The tasks are completed with a positive approval. Once all positive approvals for all necessary approvers are made, the loan is ultimately approved. - -2 - -If the loan is not approved by one of the approvers, a boundary message event is triggered, interrupting the multi-instance task and therefore removing all the tasks of all approvers who did not yet decide. The loan is then not approved. - -**Evaluation:** - -* :thumbsup: This model is a very *compact* modeling solution to the situation. It can also easily deal with multiple sets of eyes needed. - -* Note that the approvers work in a *parallel* mode, which might be exactly what we need in case we want *maximization of speed* and want the approvers to do their work independent from each other and uninfluenced by each other. However, we also might prefer *minimization of effort*. If this is the case, see [option 1 (separate tasks)](#option-1-using-separate-tasks) or [option 2 (loop)](#option-2-using-a-loop) above. - -* :thumbsdown: The solution is much *less explicit* and *less readable* for a less experienced reading public, because the way the boundary event interacts with a multi-instance task requires a profound understanding of BPMN. For communication purposes, this method of modeling is therefore typically less suitable. - -You might want to use that pattern when modeling the need for *two* or *multiple sets* of eyes needed in *parallel* order, therefore *maximising speed* for the overall approval process. - -## Measuring key performance indicators (KPIs) - -You want to measure specific aspects of your process execution performance along some indicators. - -**Example:** A software developer involved in introducing Camunda gets curious about the business: "How many applications do we accept or decline per month, and how many do we need to review manually? How many are later accepted and declined? How much time do we spend for those manual work cases, and how long does the customer have to wait for an answer? I mean...do we focus on the meaningful cases...?" - -When modeling a process, we should actually always add some information about important key performance indicators (KPIs) implicitly. For example, specifically [naming start and end events](../naming-bpmn-elements/#naming-events) with the process state reached from a business perspective. Additionally, we might explicitly add additional business milestones or phases. - -While the following section concentrates on the aspects of modeling KPIs, you might want to learn more about using them for [reporting about processes](../../operations/reporting-about-processes/) from a more technical perspective. For example, when being faced with the task to actually retrieve and present Camunda's historical data collected on the way of execution. - -### Option 1: Showing milestones - -
    - -1 - -First, we assess the application risk based on a set of automatically evaluable rules. - -2 - -We can then determine whether the automated rules already came to a (positive or negative) conclusion or not. If the rules led to an unsure result, a human must assess the application risk. - -3 - -We use explicit intermediate events to make perfectly clear that we are interested in the applications which never see a human... - -4 - -...and be able to compare that to the applications which needed to be assessed manually, because the automatic assessment failed to determine a clear result. - -5 - -We also use end events, which are meaningful from a business perspective. We must know whether an application was either accepted... - -6 - -...or rejected. - -By means of that process model, we can now let Camunda count the applications which were accepted and declined. We know how many and which instances we needed to review manually, and can therefore also narrow down our *accpeted/declined statistics* to those manual cases. - -Furthermore, we will be able to measure the *handling time* needed for the user task; for example, by measuring the time needed from claiming the task to completing it. The customer will need to wait a *cycle time* from start to end events, and these statistics, for example, could be limited to the manually assessed applications and will then also include any idle periods in the process. - -*By comparing the economic *value* of manually assessed insurance policies to the *effort* (handling time) we invest into them, we will also be able to learn whether we focus our manual work on the meaningful cases and eventually improve upon the automatically evaluated assessment rules. - -### Option 2: Emphasizing process phases - -As an alternative or supplement to using events, you might also use subprocesses to emphasize certain phases in your process. - -
    - -1 - -By introducing a separate embedded subprocess, we emphasize the *phase* of manual application assessment, which is the critical one from an economic perspective. - -Note that this makes even more sense if multiple tasks are contained within one phase. - -## Evaluating decisions in processes - -You need to come to a decision relevant for your next process steps. Your actual decision depends on a number of different factors and rules. - -We sometimes also call that pattern **business rules** in BPMN. - -**Example:** The freshly hired business analyst is always as busy as a bee: "Let's see... Category A customers always get their credit card applications approved, whereas Category D gets rejected by default. For B and C it's more complicated. Right, in between 2500 and 5000 Euros, we want a B customer, below 2500 a C customer is OK, too. Mmh. Should be no problem with a couple of gateways!" - -### Showing decision logic in the diagram? - -
    - -When modeling business processes, we focus on the flow of work and just use gateways to show that following tasks or results fundamentally differ from each other. However, in the example above, the business analyst used gateways to model the logic underlying a decision, which clearly is considered to be an anti-pattern! - -It does not make sense to model the rules determining a decision inside the BPMN model. The rules decision tree will grow exponentially for every additional criteria. Furthermore, we typically will want to change such rules much more often than the process (in the sense of tasks needed to be carried out). - -### Using a single task for a decision - -
    - -1 - -Instead of modeling the rules determining a decision inside the BPMN model, we just show a single task representing the decision. Of course, when preparing for executing such a model in Camunda, we can wire such a task with a DMN decision table or some other programmed piece of decision logic. - -2 - -While it would be possible to hide the evaluation of decision logic behind the exclusive gateway, we recommend always showing an explicit node with which the data is retrieved, which then might be used by subsequent data-based gateways. - -## Distinguishing undesired results from fatal problems - -You model a certain step in a process and wonder about undesired outcomes and other problems hindering you to achieve the result of the step. - -**Example:** What today is a problem for the business might become part of the happy path in a less successful future: "Before we can issue a credit card, we must ensure that a customer is credit-worthy. Unfortunately sometimes it might also turn out that we cannot even get any information about the customer. Then we typically also reject at the moment. Luckily, we do have enough business with safe customers anyway." - -### Option 1: Using gateways to check for undesired results - -
    - -1 - -Showing the check for the applicant's creditworthiness as a gateway also informs about the result of the preceding task: the applicant might be creditworthy - or not. Both outcomes are *valid results* of the task, even though one of the outcomes here might be *undesired* from a business perspective. - -### Option 2: Using boundary error events to check for fatal problems - -
    - -1 - -Not to know anything about the creditworthiness (because we cannot even retrieve information about the applicant) is not considered to be a valid result of the step, but a *fatal problem* hindering us to achieve any valid result. We therefore model it as a boundary error event. - -The fact that both problems (an unknown applicant number or an applicant which turns out not to be credit-worthy) lead us at the moment to the same reaction in the process (we reject the credit card application) does not influence that we need to model it differently. The decision in favor of a gateway or an error boundary event solely depends on the exact definition of the result of a process step. See the next section. - -### Understanding the definition of the result - -What we want to consider to be a valid result for a process step depends on assumptions and definitions. We might have chosen to model the process above with slightly different execution semantics, while achieving the same business semantics: - -
    - -1 - -The only valid result for the step "Ensure credit-worthiness" is knowing that the customer is in fact credit-worthy. Therefore, any other condition must be modeled with an error boundary event. - -To advance clarity by means of process models, it is absolutely crucial for modelers to have a clear mental definition of the *result* a specific step produces, and as a consequence, to be able to distinguish *undesired results* from *fatal problems* hindering us to achieve any result for the step. - -While there is not necessarily a right way to decide what to consider as a valid result for your step, the business reader will typically have a mental preference to see certain business issues, either more as undesired outcomes or more as fatal problems. However, for the executable pools, your discretion to decide about a step's result might also be limited when using, for example, service contracts which are already pre-defined. - -## Asking multiple recipients for a single reply - -You offer something to or request something from multiple communication partners, but you actually just need the first reply. - -We sometimes also call that pattern **first come, first serve**. - -**Example:** A well-known personal transportation startup works with a system of relatively independent drivers. "Of course, when the customer requests a tour, speed is everything. Therefore, we need to limit a tour to those of our drivers who are close by. Of course, there might be several drivers within a similar distance. We then just offer the tour to all of them!" - -### Using a multi-instance task - -
    - -1 - -After determining all drivers currently close enough to serve the customer, we push the information about the tour to all of those drivers. - -2 - -We then wait for the reply of a single driver. Once we have it, the process won't wait any longer, proceeds to the end event, and informs the customer about the approaching driver. - -According to the process model, it is possible that another driver accepts the tour as well. However, as the process in the tour offering system is not waiting for the message anymore, it will get lost. As our process proceeded to the end event after the first reply, all subsequent messages are intentionally ignored in this process design. - -## Processing a batch of objects - -You need to process many objects at once, which were already created before one by one, or which were updated one by one to reach a certain status. - -We sometimes also call that pattern simply the **1-to-n problem**. - -**Example:** A lawyer explains to a new client the way he intends to bill him: "Of course, if you need advice, you can call me whenever you want! We will agree about any work that needs to be done and my assistant will track those services which are subject to a charge. Once a month mostly you will receive a neatly-structured invoice providing you with all the details!" - -### Using data stores and multi instance activities - -
    - -1 - -The client asks for advice whenever they need it. Note that we create one process instance per request for advice. - -2 - -The lawyer makes sure to record the billable hours needed for the client. - -3 - -As he does not directly inform anybody by doing this, but rather collects data, we show this with a data store representing the time sheet and a data association pointing in its direction - representing the write operation. - -4 - -The assistant starts their invoicing process on a monthly basis. In other words, we create one process instance per monthly billing cycle. - -5 - -As a first step, the assistant determines all the billable clients. This are the clients for which time sheet entries exist in the respective month. Note that we have *many* legal advice instances who have a relationship to *one* billing instance and that the connection is implicitly shown by the read operation on the current status of data in the time sheet. - -6 - -Now that the assistant knows the billable clients, they can iterate through them and invoice all of them. We use a sequential multi-instance subprocess to illustrate that we need to do this for every billable client. - -7 - -On the way, the assistant is also in charge of checking and correcting time sheet entries, illustrated with a parallel multi-instance task. Note that these time sheet entries (and hence task instances) relate here 1:1 to the instances of the lawyer's "legal consulting" process. In real life, the lawyer might have created several time sheet entries per legal advice process, but this does not change the logic of the assistant's process. - -8 - -Once the client is invoiced, the assistant starts a "payment processing" instance per invoice, the details of which are not shown in this diagram. We can imagine that the assistant needs to be prepared to follow up with reminders until the client eventually pays the bill. - -## Concurring dependent instances - -You need to process a request, but need to make sure that you don't process several similar requests at the same time. - -**Example:** A bank worries about the increasing costs for creditworthiness background checks: "Such a request costs real money, and we often have packages of related business being processed at the same time. So we should at least make sure that if one credit check of a customer is already running, we do not want another credit check for the same customer to be performed at the same time." - -### Using message events - -
    - -1 - -Once an instance passes this event and moves on to the subsequent actual determination of the creditworthiness... - -2 - -...other instances will determine that there already exists an active instance and wait to be informed by this instance. - -3 - -When the active instance has determined the creditworthiness, it will move on to inform the waiting instances... - -4 - -...which will receive a message with a creditworthiness payload and be finished themselves with the needed information. - -The model explicitly shows separate steps (*determine* and *inform* waiting instances) which you might want to implement more efficiently within one single step doing both semantic steps at once by means of a small piece of programming code. - -### Using a timer event - -While using timer events can be a feasible approach in case you want to avoid communication between instances, we do not recommend it. For example, one downside is that such solutions cause delays and overhead due to the perdiodical queries and the loop. - -
    - -1 - -Once an instance passes this event and moves on to the subsequent actual determination of the creditworthiness... - -2 - -...all other instances will go into a wait state for some time, but check periodically, if the active instance is finished. - -3 - -When the active instance has determined the creditworthiness and finishes... - -4 - -...all other instances will also finish after some time. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/naming-bpmn-elements.md b/versioned_docs/version-1.3/components/best-practices/modeling/naming-bpmn-elements.md deleted file mode 100644 index 3684f606421..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/naming-bpmn-elements.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: "Naming BPMN elements" -description: "Best Practices for naming BPMN elements" -tags: - - BPMN - - Naming Convention ---- - -Name all elements in your BPMN diagrams by focusing on the business perspective. For activities, use a verb to describe what to do. For events, describe in which (business) state the process or domain object is currently in. For (data-based) gateways, pose a question and describe the conditions under which the process moves on along the outgoing flows. - -## Essential practices - -### Naming activities - -Name a *task* using an object and a verb in the infinitive. By doing this, you consistently describe *what you do with an object*. - -
    - -Name a *subprocess* (or *call activity*) by using an object and a (by convention *nominalized*) verb. Similar to tasks, you should always describe *what you do with an object*. - -
    - -:::note -Avoid very broad and general verbs like "Handle invoice" or "Process order." Try to be more specific about what you do in your activity from a business perspective. -::: - -### Naming events - -Wherever possible, name an *event* using an object and a verb reflecting a state. Always try to describe *which state an object is in* when the process is about to leave the event. - -
    - -This naming approach does not always work perfectly. In those cases, precisely describe the business semantics when the process is about to leave the event. The following names are also valid: - -
    - -Be specific about the state you reached with your event from a business perspective. Often, you will reach "success" and "failure" like events from a business perspective: - -
    - -1 - -"Invoice paid" better qualifies the "successful" business state than "Invoice processed" would... - -2 - -...because in principle, you can call the failed state "Invoice processed", too, but the reader of the diagram is much better informed by calling it "Invoice rejected". - -:::note -Avoid very broad and general verbs like "Invoice processed" or "Order handled"! -::: - -### Naming gateways - -Label a data-based *exclusive gateway* with a question. Label the outgoing sequence flows with the conditions they are executed under. Formulate the conditions as answers to the question posed at the gateway. - -
    - -This naming approach does not always work for *inclusive gateways*, because the outgoing flows' conditions can be completely independent from each other. Still, use a question whenever possible. - -
    - -If this is not possible, leave out the question completely but describe the conditions under which the outgoing paths are executed. - -
    - -*Avoid naming event-based gateways*, but ensure you name their subsequent events. Also, avoid naming *parallel gateways* and all forms of *joining gateways*. You don't need to specify anything about those gateways, as the flow semantics are always the same. - -### Naming processes - -A *pool* should be given the same name as the process the pool contains using an object and a nominalized verb. Optionally, add the organizational role responsible for the process shown in the pool as a whole. - -
    - -If you have more than one lane in a pool, name each *lane* using the organizational role or technical system responsible for carrying out the activities shown in the lane. - -
    - -Name a *diagram* (file) with same name as the process shown in the diagram. In case of a collaboration diagram, use a name reflecting the end-to-end perspective shown in that diagram. - -## Recommended practices - -### Using sentence case - -Use [sentence case](https://en.wiktionary.org/wiki/sentence_case) when naming BPMN symbols. This is standard capitalization of an English sentence, with the first letter uppercase and subsequent letters lowercase, with exceptions such as proper nouns or acronyms. - -
    - -### Avoiding technical terms - -Avoid using purely *technical terms* when naming activities or other BPMN symbols, for example. These are not always clear to every reader. Completely avoid using names of coding artifacts like classes, methods, technical services, or purely technical systems. - -## Helpful practices - -### Avoiding abbreviations - -Avoid using *abbreviations* as they are not always clear to every reader. This is especially true for abbreviations which are specific to companies or departments. Try to avoid them completely. - -If you want to use an abbreviation in your model (to save space or sometimes even to improve understandability) make sure you explain the abbreviation in the model in brackets, by text annotations, or use an accessible glossary. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png b/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png deleted file mode 100644 index ee573aaec7e..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png b/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png deleted file mode 100644 index 67d65ae73e6..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids.md b/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids.md deleted file mode 100644 index 9d5db1fe6c1..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/modeling/naming-technically-relevant-ids.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Naming technically relevant IDs" -tags: - - BPMN - - Naming Convention ---- - -For executable flows, properly name all relevant technical element IDs in your BPMN diagrams. - -Focus on process, activity, message, and error IDs, but also consider events as well as gateways and their sequence flows that carry conditional expressions. Those elements can show up regularly (e.g. in your logs) and it makes things easier if you can interpret their meaning. - -## Using naming conventions for BPMN IDs - -Define developer-friendly and business-relevant IDs for the process itself, as well as all activities, messages, and errors. Also consider events, gateways, and the sequence flows that carry conditional expressions. Even though IDs are just identifiers, keep in mind that they will show up regularly on the technical level. Meaningful IDs will help a lot. - -Examine the IDs shown in the following example: - -
    - -The following table provides you with a guideline that we would use in a context where developers are comfortable with *Java* and its typical *camelCase* naming style. You may adapt these suggestions to typical naming conventions used in your programming context. - -| | | XML Attribute | Prefix or Suffix | Resulting ID | -| -- | -- | -- | -- | -- | -| **1** | Tweet Approval | process/@id | Process | TweetApprovalProcess | -| **2** | New tweet written | startEvent/@id | StartEvent\_ | StartEvent\_NewTweetWritten | -| | | message/@id | Message\_ | Message\_NewTweetWritten | -| | | message/@name | Msg\_ | Msg\_NewTweetWritten | -| **3** | Review tweet | userTask/@id | Task\_ | Task\_ReviewTweet | -| **4** | Tweet approved? | exclusiveGateway/@id | Gateway\_ | Gateway\_TweetApproved | -| **5** | No | sequenceFlow/@id | SequenceFlow\_ | SequenceFlow\_TweetApprovedNo | -| **6** | Tweet duplicated | boundaryEvent/@id | BoundaryEvent\_ | BoundaryEvent\_TweetDuplicated | -| | | error/@id | Error\_ | Error\_TweetDuplicated | -| | | error/@errorCode | Err\_ | Err\_TweetDuplicated | -| **7** | Tweet published | EndEvent\_/@id | EndEvent\_ | EndEvent\_TweetPublished | - -### Editing IDs with Camunda Modeler - -We recommend using Camunda Modeler's properties panel to edit technical identifiers and change them according to your naming conventions, like it is shown here for the process id: - -![Properties Panel](naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png) - -We especially do not recommend editing identifiers in the XML directly, as it might accidently corrupt your BPMN file. You have to keep the identifiers in the section about the graphical layout (so called "DI" for diagram interchange) further down in sync with the execution semantics at the top of the XML. - -However, we include an XML example of all those identifiers mentioned for illustration: - -```xml - - - - - - - - - - - - - - - - - - -... - - - - - -``` - -8 - -Elements in the diagram interchange section (DI) reference identifiers from above; you have to adjust them accordingly! Camunda Modeler takes care of this automatically. - -Changing IDs can potentially break your tests or even process logic if done at a late stage of development. Therefore, consider using meaningful IDs right from the beginning and perform the renaming as part of the modeling. - -### Aligning the BPMN file name with the process id - -It is a good practice to *align* the *file name* of your BPMN models with the *process id* of the executable process that is inside the file. - -![BPMN file name](naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png) - -## Generating id constants classes - -If you have lots of process, case, and decision definitions with lots of IDs, consider generating constant classes (e.g. via XSLT) directly from your BPMN or DMN XML files. For example, this can be used for testing. - -## Using a Camunda Modeler plugin to generate meaningful ids - -You can use [this modeler plugin community extentsion](https://github.com/camunda-community-hub/camunda-modeler-plugin-rename-technical-ids) to automatically convert your ids to comply with our best practices. Of course, you could also use this as a basis to create your own modeler plugin to generate ids that follow your custom naming conventions. Or, you could implement a similar plugin to implement checks if all relavant ids follow your naming conventions. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/document-request-failed.png b/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/document-request-failed.png deleted file mode 100644 index d966c69132d..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/document-request-failed.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-detail.png b/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-detail.png deleted file mode 100644 index e2019f6ed79..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-detail.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed-with-detail.png b/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed-with-detail.png deleted file mode 100644 index 1b4074a8d4b..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed-with-detail.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed.png b/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed.png deleted file mode 100644 index 91358014a03..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7.md b/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7.md deleted file mode 100644 index 22292dffd64..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/operations/operating-camunda-c7.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: "Operating Camunda 7" -tags: - - Save Point - - Retry - - Incident - - Monitoring - - Alarming - - Backup ---- - -To successfully operate Camunda Platform 7.x, you need to take into account operation requirements when modeling business processes. Use your existing tools and infrastructure for technical monitoring and alarming. When appropriate, use Camunda Cockpit and consider extending it with plugins instead of writing your own tooling. - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! The Camunda Cloud stacks differs and operating it is discussed in [Camunda Cloud Self-Managed](/docs/self-managed/about-self-managed/). -::: - -## Installing Camunda Platform 7.x - -For a quick start, especially during development, follow [our greenfield recommendation for Camunda 7](../../architecture/deciding-about-your-stack-c7). - -For *production* usage we recommend setting up the container of your choice yourself, as we do not make sure we always ship the latest stable patched container version in our distribution. Additionally, we cannot ship some containers for licensing reasons. Install Camunda into this container following the [installation guide](https://docs.camunda.org/manual/latest/installation/). Add required JDBC drivers for the database of your choice and configure data sources accordingly. Make sure to [secure Camunda](../securing-camunda-c7/) if required. - -We recommend to *script* the installation process, to allow for an *automated installation*. Typical steps include: - -1. Set up (or extract) the container and install Camunda into it. As an alternative, you might use the Camunda distribution and remove the example application. -2. Add JDBC drivers and configure the data source for Camunda. -3. Configure identity management (e.g. to use LDAP) or add required users and groups to the database-based identity management. -4. Set up Maven build for Camunda webapp in case you want to add your own plugins or customizations. -5. Install the Camunda license. - -To script the installation, you can retrieve all required artifacts also from our Maven repositories. This way, it is easy to switch to new Camunda versions. Integrate all pieces by leveraging a scripted configuration management and server automation tool such as [Docker](http://www.docker.com/), [Puppet](https://puppet.com/), [Chef](https://www.chef.io/), or [Ansible](http://www.ansible.com/). - -## Setting up monitoring and alarming - -Certain situations have to be recognized quickly in order to take appropriate action during the runtime of the system. Therefore, consider monitoring and alarming up front when planning for production operations. - -Distinguish between process execution-related monitoring and basic systems monitoring. Do systems monitoring via normal Java or Container Tools - nothing Camunda specific is needed in that area. - -### Recognizing and managing incidents - -In case a service call initiated by Camunda fails, a *retry* strategy will be used. By default, a service task is retried three times. Learn more about [retrying failed transactions](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#failed-jobs) with your custom retry strategy. - -In case the problem persists after those retries, an *incident* is created and Camunda will not recover without intervention from a human operator. Therefore, make sure somebody is notified whenever there are any (new) incidents. - -**You can build an *active* solution**, where Camunda actively notifies somebody when there is a new incident. For example, you could send an email or create a user task in Camunda. To achieve this, you can hook in your own [incident handler](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/impl/incident/IncidentHandler.html) as shown in [this example](https://github.com/camunda/camunda-consulting/tree/master/snippets/incidents-as-tasks). The upside is that sending emails like this is very easy, the downside is that you have to implement Camunda specific classes. - -However, if a crucial system goes down you might end up spamming people with thousands of process instances running into the same incident. - -This is why typically **a passive solution is preferred**, which queries for (new) incidents from the outside, leveraging the Camunda (Java or REST) API and taking the desired action. The most common way is to query the number of incidents by the tool of your choice using the REST API: `GET incident/count`. More information can be found in the [REST API](https://docs.camunda.org/manual/latest/reference/rest/incident/get-query-count/). We prefer the REST API over more low level technologies (like JMX or PMI), as this typically works best in any environment. - -Now you can easily batch multiple incidents into one email or delegate alarming to existing tools like Nagios or Icinga. An additional advantage is that you eventually already have proper alarming groups defined in such a tool. - -### Monitoring performance indicators - -Monitor the following typical performance indicators *over all process definitions* at once: - -- Number of *open executable jobs*: `GET /job/count?executable=true` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/job/get-query-count/)), as these are jobs that should be executed, but are not yet. -- Number of *open incidents*: `GET /incident/count` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/incident/get-query-count)), as somebody has to manually clear incidents and increasing numbers point to problems. -- Number of *running process instances*: `GET /process-instance/count` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/process-instance/get-query-count/)). Increasing numbers might be a trigger to check the reasons, even if it can be perfectly fine (e.g. increased business). - -:::note -If you want to monitor *process definition-specific* performance indicators, you can either iterate over the process definitions - e.g. by using `GET /process-definition/{id}/statistics` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/process-definition/get-activity-statistics/)), or leverage `GET /process-definition/statistics` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/process-definition/get-statistics/)), which groups overall performance indicators by process definitions. Beware that you eventually need to take into account older versions of process definitions, too. - -### Organizing dedicated teams for monitoring - -In general, the performance indicators mentioned above can and should be *monitored generically* and independent of specific process applications. However, you may want to set up *dedicated alarming* for different operating teams with more knowledge about specific process application characteristics. For example, one of those teams might already know what the typical number of open user tasks for a certain process definition is during normal runtime. There are two approaches to achieve this: - -**The recommended approach is to configure dedicated alarming directly in your monitoring tool** by creating separate monitoring jobs querying the performance indicators for specific process definitions. This approach does not need any operation centric adjustments in Camunda and is easy to set up and handle. - -An alternative approach is to define team-specific bundles of process definitions in Camunda by leveraging the process definition "category" or even your own BPMN extension elements. However, this information cannot be directly used in the queries mentioned above. Hence, you have to implement additional logic to do so. We typically advise that you do not do so unless you have very good reasons to invest the effort. - -### Creating your own alarming mechanism - -In case you do not have a monitoring and alarming tool or cannot create new jobs there, build an easy alarming scheduler yourself. This could be a Java component called every couple of minutes to query the current performance indicators by Java API generating custom emails afterwards. - -```java -public void scheduledCheck() { - // Query for incidents - List incidents = processEngine.getRuntimeService() - .createIncidentQuery().list(); - // Prepare mailing text - String emailContent = "There are " + incidents.size() + " incidents:
    "; - for (Incident incident : incidents) { - emailContent += "" - + incident.getIncidentMessage() + "
    "; - } - emailContent += "Please have a look into Camunda Cockpit for details."; - // Send mailing, e.g. via SimpleMail - sendEmail(emailContent); -} -``` - -### Defining custom service level agreements - -Apart from generic monitoring, you might want to define *business oriented service level agreements (SLAs)* for very specific aspects of your processes, like for instance, overdue tasks, missed deadlines or similar. You can achieve that by: - -1. Adding custom extension attributes in your BPMN process definition, e.g. for specific tasks, message events, etc., which serve to define your specific business performance indicators. -2. Reading deployed process definitions and their *custom extension attributes*, e.g. by means of Camunda's [BPMN Model API](https://docs.camunda.org/manual/latest/user-guide/model-api/bpmn-model-api/) and *interpreting* their meaning for your *business performance indicators*, e.g. by calculating deadlines for tasks. -3. *Querying* for (e.g. task or other) instances within/without the borders of your service level agreement. - -This is normally implemented similar to the Java Scheduler we described above. - -## Intervening with human operator actions - -### Handling incidents - -Incidents are ultimately [failed jobs](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/failed-jobs/), for which no automatic recovery can take place anymore. Hence, a human operator has to deal with incidents. Check for incidents within Camunda Cockpit and take action there. You might, for example, want to: - -- [Edit process variables](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/process-instance-view/#edit-variables). -- [Modify the process instance ("move" the tokens)](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/process-instance-modification/). -- [Trigger additional retries](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/failed-jobs/#retry-a-failed-job). - -Camunda Enterprise Edition offers a [bulk retry](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/failed-jobs/#bulk-retry) feature allowing you to retry jobs which failed for a common reason (e.g. a remote system being down for a longer time) with a single human operator action. - -:::note -If you have a failing *call activity* in your process, you *retry "bottom-up"* (in the failing sub process instance), but you *cancel "top-down"* (the parent process instance to be canceled). Consider the following example incident visualized in Camunda Cockpit. -::: - -![Cockpit call activity](operating-camunda-c7-assets/insurance-application-failed-with-detail.png) - -You eventually see the incident first on the parent process call activity **Request documents**, but it is actually caused by the failing activity **Request documents** in the subprocess. For better comprehensibility, this is directly visualized in the picture above. In Cockpit, you can navigate to the call activity in the **called process instance** pane to the bottom of the screen. There you could now *retry* the failing step of the *subprocess* instance: - -![Cockpit failed task](operating-camunda-c7-assets/document-request-failed.png) - -1 - -By clicking on this button, you can *retry* the failing step of the *subprocess* instance. Note that a successful retry will also resolve the incident you see on the parent process instance. - -On the other hand, you might also want to *cancel* the failing *parent process* instance: - -![Cockpit cancel](operating-camunda-c7-assets/insurance-application-failed.png) - -1 - -By clicking on this button, you can *cancel* the failing *parent process* instance. The cancellation will also cancel the sub process instances running in the scope of the parent process instance. - -### Turning on/off all job execution - -Sometimes you might want to *prevent jobs being executed at all*. When starting up a cluster, for example, you might want to turn off the job executor and start it up later manually when everything is up and running. - -1. Configure the [jobExecutorActivate](https://docs.camunda.org/manual/latest/reference/deployment-descriptors/tags/process-engine/#configuration-properties) property to `false`. -2. Start the job executor manually by writing a piece of Java code and making it accessible, e.g. via a REST API: - -```java -@POST -public void startJobExecutor() { - ((ProcessEngineConfigurationImpl) processEngine - .getProcessEngineConfiguration()) - .getJobExecutor() - .start(); -} -``` - -A similar piece of code can be implemented to allow to stop the job executor. - -### Suspending specific service calls - -When you want to *avoid certain services to be called* because they are down or faulty, you can suspend the corresponding job definitions, either using [Cockpit](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/suspension/#job-definition-suspension) or using an API ([Java](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/ManagementService.html) or [REST](https://docs.camunda.org/manual/latest/reference/rest/job/put-activate-suspend-by-job-def-id/)). - -By using the API, you can even *automate suspension*, e.g. by monitoring and recognizing when a target system goes down. By using naming conventions and accordingly customized job definition queries, you can then find all job definitions for that target system (e.g. "SAP") and suspend them until the target system goes up again. - -### Suspending whole processes - -Sometimes, you may want an *emergency stop* for a specific process instance or all process instances of a specific process definition, because something behaves strange. Suspend it using [Cockpit](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/suspension/#process-definition-suspension) or using an API ([Java](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/RuntimeService.html) or [REST](https://docs.camunda.org/manual/latest/reference/rest/process-definition/put-activate-suspend-by-id/)) until you have clarified what's going on. - -## Create backups - -1. Camunda stores all state information in its *database*. Therefore, backup your database by means of your database vendors tools or your favorite tools. -2. The Camunda *container installation*, as well as the *process application deployments*, are fully static from the point of view of Camunda. Instead of backing up this data, we recommend doing a script-based, automated installation of containers, as well as process applications in order to recover easily in case anything goes wrong. - -## Updating Camunda - -For updating Camunda to a new version, follow the guide for [patch level updates](https://docs.camunda.org/manual/latest/update/patch-level/) or one of the dedicated [minor version update guides](https://docs.camunda.org/manual/latest/update/minor/) provided for each minor version release. - -A [rolling upgrade](https://docs.camunda.org/manual/latest/update/rolling-update) feature has been introduced in version 7.6. This allows users to update Camunda *without having to stop the system*. Outdated engine versions are able to continue to access an already updated database, allowing updates to clustered application servers one by one, without any downtime. - -### Preparation - -1. Before touching the servers, all unit tests should be executed with the desired Camunda version. -2. Check running processes in Cockpit - - Handle open incidents - - Cancel undesired process instances if any -3. Make a backup (see above) - -### Rollout - -- Shut down all application server(s) (unless performing a rolling update in which only one cluster node is taken down at a time after the database has been updated). - -- Update database using SQL scripts provided in the distro (all distros contain the same scripts) - - Ensure you also execute all patch level scripts - - Run all update scripts - - To check which version is in the database, check for missing tables, indexes, or columns from the update scripts - -```SQL -SELECT TABLE_NAME, INDEX_NAME FROM SYS.USER_INDEXES WHERE INDEX_NAME like 'ACT_IDX_%' ORDER BY TABLE_NAME, INDEX_NAME; -SELECT TABLE_NAME FROM SYS.USER_TABLES WHERE TABLE_NAME LIKE 'ACT_%' ORDER BY TABLE_NAME; -``` - -- Update applications and application server(s) or container(s) -- Start application server(s) or container(s) -- Check logfile for exceptions -- Check Cockpit for incidents -- Test application using UI or API -- Repeat in all stages diff --git a/versioned_docs/version-1.3/components/best-practices/operations/performance-tuning-camunda-c7.md b/versioned_docs/version-1.3/components/best-practices/operations/performance-tuning-camunda-c7.md deleted file mode 100644 index b97f7549e5a..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/operations/performance-tuning-camunda-c7.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -title: "Performance tuning Camunda 7" -tags: - - Performance ---- - -Understand influencing aspects on performance and apply tuning strategies appropriately, for example, by configuring the job executor or applying external tasks. When facing concrete challenges, look at scenarios like the proper handling of huge batches. - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! The Camunda Cloud stacks differ in regards to performance and scalabilities and requires different strategies we currently work on providing as best practice. -::: - -## Performance basics - -Note that this document assumes some understanding of fundamentals of underlying technologies such as the following: - -* Database fundamentals -* Monitoring, observability, and benchmark tools -* JVM fundamentals - -### Setting up monitoring - -It's important to **set up proper monitoring** as described in our [Monitoring Best Practice](../operating-camunda-c7/). Writing the value of certain performance indicators over time can help to judge the urgency of certain bottlenecks or to warn you before an overload will happen. - -### Runtime database - -The database i/o for **writing** state changes of process instances to your **runtime tables** depend on your use case. The following are the fundamental factors: - -* The complexity of process models - measured by the **number of save points**. -* The **number of started process instances** - measured per time unit. -* The **data attached** to process instances (aka process variables) - measured in bytes. -* The average **duration** of process instances, as the longer they need to complete (and hence wait in a persistent state) the less database traffic their total number of save points cause per time unit, but the more data you have stored in the runtime database. - -The performance for **querying and reading** from the runtime tables is most influenced by the process variables/business data you use. For every process variable used in a query, a join is needed on SQL level, which influences performance. This can hit you, especially when doing message correlation or tasklist queries. You can tune performance **by using indices** as described below. - -Further database tuning may be required depending on the specific use case and performance requirements. In combination with other configurations mentioned above, specific performance goals can be reached for querying and reading from runtime by using **indices**. - -### History database - -Camunda uses a **relational database as history backend** by default. The i/o for **writing** process instance information to the **history** primarily depends on the [History Level](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#choose-a-history-level) chosen. This is one of the biggest tuning opportunities when it comes to database i/o and the simplest method to reduce load on the database is to reduce the history level. - -It is possible to hook-in a [Custom History Backend](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#provide-a-custom-history-backend), e.g. to leverage alternative data stores (like NoSQL, for example using the Elastic stack). A [complete example](https://github.com/camunda/camunda-bpm-examples/tree/master/process-engine-plugin/custom-history-level) is available. - -By default, the history database tables **(denoted by HI)** and the runtime database tables **(denoted by RU)** share the same schema. - -Separating your runtime database from the historical database is theoretically possible by implementing a custom history backend. This custom backend could then store the data in a different database instance. But note that many of Camunda Cockpit's capabilities depend on both data sets. Writing history to another database instance would cause Cockpit to function incorrectly. - -A valid strategy is to write the data to a custom backend (like NoSQL) for long time retrieval, but also to the normal Camunda tables for operations. Then, you can delete the history from the Camunda database after short intervals using [history cleanup](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup). - -A simpler and easier to manage strategy is to filter data using a Camunda custom history mechanism. For this, use history level **CUSTOM** and filter the data yourself. Just extend the built-in **HistoryEventHandler** and hook that into your process engine configuration: - -```Java -public class CamundaFilterHistoryEventHandler extends DbHistoryEventHandler { - - @Override - public void handleEvent(HistoryEvent historyEvent) { - if (historyEvent instanceof HistoricVariableUpdateEventEntity) { - if (...) { - // ignore some variable update events - log.info("Ignore event for variable " + variableUpdateEvent.getVariableName() + "."); - return; - } - } - // handle all other events - super.handleEvent(historyEvent); - } -} -``` - -Typical use cases are: - -* Filtering high-volume but unnecessary events from the history in order to improve performance -* Filtering sensible data which should not be written to history (e.g. individual-related data) - -### Thread handling and the job executor - -Make sure you understand [save points and threading behavior](../../development/understanding-transaction-handling-c7/). - -Save points are the tool to change threading and scaling behavior of a process instance. The more you use it, the more work will be done by the [job executor](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/), which is the key component to look at when you want to improve your system's performance. - -The **default configuration** of the job executor is typically **not good** and **must be tuned**, there exist no general sensible defaults. Strategies are described below. - -[Job prioritization](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#job-prioritization) and the configured [retry strategy](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#failed-jobs) can **influence the execution order of jobs**. Which is also useful in case you hit exceptions (e.g. a network connection is down). The default strategy retries three times without a delay, which normally should be changed to something more meaningful. - -You have to set the retry strategy for every save point. Be aware that retries increase the load of the system because you're creating a new transaction, database connection, thread when a job is re-executed, and any additional processing required by your business logic. - -[Exclusive Jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs) are the default in Camunda, which means that *for one process instance* there is always **only one job executed** in parallel. This is a safety net to avoid optimistic lock exceptions, as multiple parallel paths might conflict by writing to the same database row. - -You can **change this configuration** to run jobs of one process instance in parallel if you make sure not to create optimistic lock exceptions by a **fitting process design**. Additionally, handle optimistic lock exceptions properly by doing **retries**. - -Keep in mind parallel processing of jobs, and having loads of optimistic lock exceptions causes overhead and might slow down your system. Using parallel processing features is not recommended for most use-cases as it adds complexity and should be carefully tested in cases where it is attempted. - -### Considering external tasks - -An important **alternative to job handling by the job executor** where Camunda does the thread handling, is [external tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) where an external worker or application does the work. - -This makes it easy to throttle execution using a thread pool or a cluster of apps. Potentially, only one process or thread is allowed in parallel in other cases it may be required to scale up workers or threads. - -Using external tasks allows for complex logic or expensive network calls to be executed with external systems are blocking within Camunda, threads aren't typically a problem anymore. - -When using [external tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/), Camunda does not actively call your business logic, but stops in a wait state and persists the state in the database. You are responsible for querying or polling these tasks using the Camunda API and executing your logic. **You fully control threading behavior** yourself and can influence scaling. - -It's important to understand the external task API and behavior as the cost of using external tasks is not free from Camunda transaction overhead as well as the overhead management of external workers. Understanding the life cycle of the external task is crucial to designing and managing external workers. - -### Void JVM tuning - -It is normally **not required** to tune the Java Virtual Machine (JVM). It's better to concentrate on the strategies described in this article. - -If you have hints that you have memory issues, GC problems, or thread locking, you should employ a JVM profiling tool as suggested in the tools section. - -### Common pitfalls with process variables - -Below are some common scenarios that could potentially cause issues. Be sure to read the section on [handling data in processes](../../development/handling-data-in-processes/) to understand the best options for dealing with potential data-flow and business data in Camunda. - -#### Performance degrades after storing large files as variables - -Problem: - -- BLOB selection leads to huge RAM allocation -- Operations become more costly (e.g VACUUM) -- Replication becomes slower - -Solution: - -- Store large files in a dedicated third-party CMS -- Only store file reference as variable in Camunda - -#### In production variables report to long - -Problem: - -- When storing variable values of type _string_ the char limit is 2000 for Oracle. - -Solution: - -- Reduce the length of the value. -- Store String as Object in Oracle. - -#### Optimistic locking exceptions occur when updating variables using external tasks API - -Problem: - -- Same variables are updated by multiple workers consequently the same row in the DB is updated. - -Solution: - -- Use the local API when updating variables. You must combine this with input/output mappings to have access to variables in subsequent activities. - -#### Use Camunda as a source of truth for tasks - -Problem: - -- Storing a large number of variables leads to very large ACT_RU_VARINST table and slow queries on several API's. - -Solution: - -- Store variables in external data-store or in separate tables in the Camunda schema. Learn more about [handling data in processes](../../development/handling-data-in-processes/). - -## Scaling basics - -In general, process engine performance is highly dependent on your usage scenarios. There isn't a one-size-fits-all answer, but as our most senior consultant keeps saying, "So far we found a solution for every high-performance scenario we encountered with customers." - -### Basic scaling and failover - -Basic scaling of Camunda is very simple: connect multliple engines to the same database to form a [cluster](https://docs.camunda.org/manual/latest/introduction/architecture/#clustering-model). All data is synchronized through the database, so clustering requires no special configuration for Camunda. You can implement auto-scaling with container orchestration systems like Kubernetes or OpenShift. - -Camunda requires [READ COMMITTED transaction isolation](https://docs.camunda.org/manual/latest/user-guide/process-engine/database/#isolation-level-configuration) and [synchronous replication](https://docs.camunda.org/manual/latest/introduction/supported-environments/#database-clustering-replication) to all active cluster nodes. - -### Understanding cluster and load balancing options - -Load balancing has two layers: - -* Load balancing on the inbound channel is out-of-scope for Camunda, instead use standard third-party software like an HTTP load balancer or messaging. - -* Job execution (also known as asynchronous processing or `_jobs_`) in Camunda can be used to do load balancing, using multiple threads and multiple cluster nodes. This is described in more detail in the following sections. - -## Running load tests - -When you are in doubt if a certain load requirement can be tackled by Camunda, you should run a load test. This normally involves the following phases: - -* Prepare an *environment* which is as close to production as possible, otherwise results might be biased. -* Prepare concrete *scenarios* you want to run, which includes e.g. BPMN workflows that are realistic for you. If you typically run synchronous service tasks do so in the scenarios. If you have big payloads use them. If you leverage multiple instance tasks make sure your scenario also contains them. -* Define *clear goals* for the load tests, e.g. you might need to run at least **1000 workflow instances/second**, or you might need to keep **latency below 50 ms for the 95th percentile**. -* Prepare *load generation*, which is not always easy as you have to stress your system in a way, that you cannot do by one simple client. -* Prepare *monitoring* to analyze the situation if you run into problems. Typical measures are (see below for a more complete list): - -Java memory consumption, especially garbage collection and potential memory leaks, often occur due to issues in surrounding components. - -These problems can be spotted by checking which objects occupy a lot of memory using a JVM observability tool. - -Monitor load on the database to avoid overloading the database. It's sometimes better to reduce the number of connections in your connection pool. - -Typical monitoring and profiling tools our customer use: - -* Basic tools available with the Java installation - * [VisualVM](https://docs.oracle.com/javase/8/docs/technotes/guides/visualvm/profiler.html) - * JConsole - * JVM Thread Dumps -* Commercial offerings - * App Dynamics - * Dynatrace - * YourKit - -Typical load generation tools our customer use: - -* JMeter -* Postman -* SOAP-UI - -## Resolving overload - -This section applies if the system is experiencing acute problems due to load or poor configuration. - -:::caution Camunda Cloud is built with scalability top of mind -Note that Camunda Cloud and its workflow engine Zeeebe were engineered for performance and scalability. If you hit problems you cannot easily resolve with Camunda Platform 7.x, it might be worth having a look at Camunda Cloud instead. -::: - -### Collecting information for root causing - -Initially, we need to have a strategy to deal with problems. Take a minute to think about what principles you will apply to solve acute and generic performance problems. Below are some questions to ask to analyze the root cause: - -* What makes you think there is a performance problem? -* Has this system ever performed well? -* What has changed recently? (Software? Hardware? Load?) -* Can the performance degradation be expressed in terms of latency or run time? -* Does the problem affect other people or applications (or is it just you)? -* What is the environment? - * What software and hardware is used? - * Versions? - * Configuration? - -When we suspect (or experience) problems, we typically have a deeper look at: - -* Detailed information about **jobs**, typically retrieved from the database via **SQL queries** (see also [unsupported sample queries](https://github.com/camunda-consulting/code/tree/master/snippets/db-queries-for-monitoring)): - * **# of executed jobs**: How many jobs are currently acquired/locked, which means they are executed at the moment? - * **Cluster distribution**: How are the executed jobs distributed over the cluster? Therefore, look at the lock owner, which is written to the database. - * **# of not yet executed jobs**: How many jobs are currently due, which means the due date is reached or no due date was set, but are not acquired? These are the jobs that should be executed but are not yet. This number should be normally close to zero. Capture the number over time, if it stays above a certain threshold, you have a bottleneck. In this situation, you might even suffer from job starvation, as Camunda does not enforce a FIFO principle for job execution. This situation needs to be resolved. A typical pattern is to experience this overload only on peak times of the day and resolve in quiet times. - -So far, we've never experienced running out of CPU capacity. If that happens, clustering is a very natural choice to solve the problem. But in most cases, applications built on Camunda will more often than not be waiting for i/o (database, remote service calls, etc.) To solve overload problems correctly, you have to analyze the root cause: - -* Basic system metrics for your Camunda application (container, application server or Java process) and database. Plot them over time! - * CPU utilization - * Memory utilization - * I/O - * Response times - -Often, we cannot get metrics from the database due to security restrictions. In this case, we try to measure response times from the database as an indicator of its health. This works very well with dedicated frameworks like App Dynamics. - -* Database information - * Slow query log - * Other utilization information, depending on the concrete database product. Best approach your DBA. - -Collecting this information normally gives a good indication which component is really busy and causes the bottleneck. - -### Using benchmarks and a systematic approach for tuning - -Having an idea about the bottleneck leads you to the proper tuning strategy. However, system behaviors are very complex and experience shows that you need multiple tries to improve the situation. This is normal and not a problem, but makes it important to follow a systematic approach to be able to resolve overload problems. A good background read is [this blog post on scaling Camunda in a cluster](https://blog.camunda.org/post/2015/09/scaling-camunda-bpm-in-cluster-job/). - -The basic strategy is simple: - -* Set up tests and conduct measurements, which give you a **baseline** you can compare against. -* **Change** something, but best only **one thing at a time**. -* Measure again and **compare against your benchmark** so you get an idea how much the change improved the situation. - -For resources like the job executor thread pool, start with small numbers and increase them. If you start too big, you always have to check in two dimensions: increasing and decreasing. - -:::note -**Guessing can lead to wrong conclusions**. Hence, we recommend setting up a load testing environment and generating load to get all resources busy. This allows optimizing your system corresponding to your specific load scenario. But we also know that this is hard, especially because you normally have to mock service calls but simulate realistic response times. -::: - -A good compromise often is: - -* Monitor the load on your production systems (as indicated above, e.g. using database queries). -* Change settings and inspect the impact over time. - -:::note -This is not a scientific but rather hands-on approach. Production load might vary very much, so plan enough time to allow regression towards the mean and keep an eye on other performance indicators like process instances started to judge the results realistically. -::: - -### Tuning the job executor - -There is no configuration of the job executor which is generally sensible. The configuration options and defaults are: - -```xml - - 3 <1> - 5 - - 10 <2> - - - 3 <3> - 300000 <4> - 5000 <5> - - -``` - -1 - -Number of threads that execute jobs. - -2 - -Number of jobs that can be queued in an in-memory queue, waiting for an execution thread to become available. - -3 - -Number of jobs acquired at once (in the database). - -4 - -Time the job will be locked for a specific job executor. - -5 - -Idle time for acquisition if no executable job was found. - -A meaningful configuration has to balance these values according to the given situation. In order to give hints, you need to understand some basics: - -* It does not make sense to have more **active threads** than the CPU cores can directly handle. Otherwise, you will just swap in and out threads and hinder efficient computation. -* Whenever a **thread blocks because of i/o**, e.g. the user waits for some database operation to finish, it is not active and the CPU will not be bothered with it. - -When you want to figure out **how many threads you can assign to the job executor** thread pool **(1)** you need to know how much threads are available in total and **how much threads are already in use** by other thread pools (web server and servlets, scheduling frameworks, EJB, JMS, etc.) The more components you run on your machine, the harder it gets to predict the free CPU capacity. This is also true for virtualized environments where resources are shared. - -You also have to think about the **nature of your processes**: Do you run **CPU intensive computations** by Camunda job executor threads, or do you **wait most of the time** for remote service calls? Typical processes spend their time waiting for i/o. In this case, you can safely increase the number of threads. Keep in mind that scaling up Camunda puts more load on downstream services and systems, so you might need to throttle it to avoid "denial of service attacks". - -When increasing the number of threads, make sure that you also **increase the internal queue size** **(2)**, otherwise it might run empty, and your threads don't get new jobs to execute. On the other hand, the queue should not be made too big. In a cluster **too big queue sizes** can lead to one node taking all jobs into his queue **leaving other cluster node idle**. If you queue up **more jobs than you can finish within the lock timeout** **(4)**, jobs are timed out and will be executed twice (with one running into an optimistic lock exception). - -A typical approach to tune performance is: - -* Start with the number of threads = CPU cores * 1.5 -* Increase queue size stepwise until there is no gain in throughput anymore because all threads are "busy" waiting for i/o. -* Now increase worker threads and afterward queue size and always check that this improves throughput. -* Whenever you reach a limit, you found your upper configuration limit, which is typically optimal for production. - -As already indicated, when you dive deep into job executor tuning because of high volume operations, it might be worth to take one step back and think about using [external tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) as an alternative. This often scales better, as a worker can, for example, collect a huge amount of tasks and just report completion back, how this is executed and scaled can be completely decided by you. - -### Tuning the database connection pool - -A resource that the process engine and the job executor heavily depend on are database connections. They are provided by a JDBC data source which has a pool of connections. - -First, you should find out which connection pool implementation is used based on your project's dependencies: - -* [Spring Boot's algorithm for selecting the data source implementation](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-sql.html#boot-features-connect-to-production-database) -* [Code example to detect data source implementation](https://www.mkyong.com/spring-boot/spring-boot-how-to-know-which-connection-pool-is-used/) - -Preferably, use [HikariCP](https://github.com/brettwooldridge/HikariCP) and configure its [settings](https://github.com/brettwooldridge/HikariCP#configuration-knobs-baby) using `spring.datasource.hikari.*` properties. HikariCP's default pool size is 10. Their website provides an [article about connection pool sizing](https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing). - -### Resolving database overload - -Having tuned the job execution the database might become a bottleneck when handling high-load scenarios. A very simple approach is then to **tune the database or assign more resources to it**. It is also possible to **tune some database queries** as described below. - -If both are not possible or sufficient, check if the database load can be reduced by **changes in your application**. Therefore, you need to analyze the root cause of the load. It is a good idea to partition your database in a way that you see load data for runtime, history, and specifically the table containing byte arrays. Two typical findings are: - -* A lot of data is written into **history**, for example, because you run through a lot of tasks and update a lot of variables. In this case, a good strategy is to reconfigure history to reduce the amount of data or use a custom history backend, as already described. - -* Big chunks of data are written to the byte array table, mostly because you save **too much data as process variable** like big XML or JSON structures. Camunda always needs to update one process variable as a whole, even if you only change some attributes or add lines to a list being part of the data structure. Additionally, the whole chunk is also written to history to keep a history of variable values. In this scenario, it is much more efficient to store the business data as a separate structured entity or into a better fitting storage (like a document database). Then Camunda only stores a reference and is freed of a lot of load towards the database. - -Camunda batches SQL statements of the current call and runs them at once at the end of the transaction. Depending on the nature of the process model and the work done in this transaction, this batch might become big. - -### Tuning database queries - -Use cases of Camunda customers differ very much, so we cannot fine-tune our database schema for all use cases out-of-the-box. We strive for an optimal balance between too less and too many indices. As you know your use case in detail you can **improve database performance by adjusting indices** of Camunda tables. Typically, additional indices are added that lead to reduced runtimes and less database load for certain queries. However, this typically affects write performance and has to be balanced depending on the concrete situation at hand. - -In order to find candidates for optimization, **check the slow query log** of your database or discuss with your DBA. - -Examples: - -* Creating an index on process instance end time (`create index PROC_DEF_ID_END_TIME ON ACT_HI_PROCINST (PROC_DEF_ID_,END_TIME_`) in case you query for that very often. -* [Job acquisition](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#the-job-order-of-job-acquisition) contains hints on indices depending on the job executor configuration. - -### Applying sharding - -If none of the above strategies are sufficient, you need to reduce the load put on the Camunda engine as a whole. This can be done by a mechanism called **[sharding](https://en.wikipedia.org/wiki/Shard_(database_architecture))**. - -Therefore, you distribute the overall load to multiple logical engines (called shards), which itself can be a cluster on its own. Every shard runs its own database. A sharding algorithm and distribution must be implemented. One example was described [by Zalando in this blog post](https://blog.camunda.org/post/2015/03/camunda-meets-cassandra-zalando/). - -The Camunda platform supports multiple engine configurations pointing to different databases on a single application server. When you run Camunda in [container-managed aka infrastructure mode](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-bootstrapping/#shared-container-managed-process-engine), multiple engines work out-of-the-box with no additional code in Camunda's [configuration](https://docs.camunda.org/manual/latest/user-guide/process-engine/multi-tenancy/#one-process-engine-per-tenant) and [APIs](https://docs.camunda.org/manual/latest/reference/rest/overview/#engine-usage). - -The distribution to the different engines (shards) is usually domain-specific and must be implemented as part of your project. When using inversion-of-control (IoC) containers like Spring or CDI, one strategy is to centralize the engine selection in a request-scoped producer for the **ProcessEngine** object. With dependency injection, the rest of the code can then be written as if there is only one **ProcessEngine** instance. - -## Some real-life stories - -In this bonus section, we share some anecdotes which might inspire you when trying to resolve issues in your environment. - -### Session context memory consumption - -In one customer scenario, the REST API was used heavily with basic authentication enabled. The client did not reuse the REST connection and opened a new one for every request, including the authentication information. - -On the server side, there was no special configuration given, which means that for every authentication there was a SessionContext created with a certain timeout. This SessionContext was never reused and the default timeout was relatively high (30 minutes in Tomcat). As a result, all this SessionContexts plugged up the memory which ultimately lead to garbage collection cycles being so long, that the whole system was basically just doing garbage collection most of the time. - -This could be resolved by setting a very low `session-timeout`. - -### Spring Boot data collector - -One project had a relatively little heap memory (500 MB) and using [Micrometer Metrics provided by Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-metrics.html) to collect metrics. With around 200 requests/second, the memory required for metrics data consumed around half of the heap and lead into fatal full garbage collection cycles. - -Removing the metrics collections was a quick fix to resolve the problem. - -### Processing high numbers of parallel activities (aka batch processing) - -One concrete scenario is worth looking at, as customers stumble upon it regularly: doing some kind of batch processing via BPMN, where you have a high number of parallel activities in one process instance. - -
    - -The important characteristics are - -* It is modeled using parallel [Multiple Instance](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/task-markers/#multiple-instance) (MI) -* You have high numbers of elements for the MI (> 1000) -* You are using wait states or save points within the parallel branch - -This scenario is supported by Camunda, but you can run into serious problems. - -:::caution Solved in Camunda Cloud -This problem is only a problem with Camunda Platform 7.x! Zeebe, the workflow engine used in Camunda Cloud, can run high number of parallel activities. -::: - -The basic problem is the [execution tree](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-concepts/#executions) getting really big in this scenario. In most situations, the engine has to load the whole tree in order to do anything, even if that happens only in one parallel path. This not only influences performance, but also adds load to the database. - -Turning off execution pre-fetching (available as internal process engine configuration property) is not recommended, as it may cause other trouble. Cockpit also suffers from huge data chunks, making it slow. - -If you add additional scopes, like the BPMN subprocess **(2)**, this leads to an additional execution being created. Every embedded subprocess doubles the size of the execution tree, so avoid subprocesses in this situation. - -The described problems only arise if you have wait state or save points in your process model, as only then the engine needs to persist the process instance to the database. If you run through the multiple instances in one transaction, the internal optimization removes almost all runtime database update statements, so almost nothing needs to be done (except for the history). - -There is one very specific scenario you need to avoid. When a parallel activity is finished and you want to collect the result in a list, you might use a process variable storing that list **(4)**. With running a lot of instances in parallel, they might finish at the same time and try to change that process variable simultaneously, leading to optimistic lock exceptions. - -This typically leads to retries. Even if this situation can heal itself, it increases the load on the database. Assume that you serialize that list as reasonable big XML (growing to several megabytes) in the process variables. That means Camunda sends this chunk of data to the database in every transaction, but might even lose the commit because of the optimistic lock. Now that situation fuels itself, as commit times increase by having big chunks of data, leading to more parallel activities finishing within that time frame, leading to more optimistic lock exceptions. - -In this situation, the best approach is not to collect any results, at least not in Camunda itself. You might still leverage a simple database table, where every instance can insert a new line for its result. This would remove the lock problems and is very simple to set up. - -In any case, the situation improves if you don't wait for the parallel processing to finish. This avoids a lot of the problem described here. You can also use workarounds like polling for all subprocesses to finish. Obviously, this is not only harder to understand from a business perspective, but also requires more effort to develop, so it should only be used if you run into serious performance trouble. - -
    diff --git a/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes-assets/history-architecture.png b/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes-assets/history-architecture.png deleted file mode 100644 index 55388680652..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes-assets/history-architecture.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes-assets/slides.pptx b/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes-assets/slides.pptx deleted file mode 100644 index dbe2fed7d92..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes-assets/slides.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes.md b/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes.md deleted file mode 100644 index ef908654190..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/operations/reporting-about-processes.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Reporting about processes -tags: - - Reporting - - History - - DWH - - BI - - KPI - - SQL - - MIS (Management Information System) ---- - -The Camunda engine automatically collects audit information about historical process or decision instances. Leverage this data by generating and displaying business relevant reports. Add business relevant phases and milestones to your process models serving as a basis for key performance indicators (KPIs). - -## Modeling key performance indicators (KPIs) - -When modeling a process, you always add information about important key performance indicators implicitly; for example, by introducing **start and end events**. - -Additionally, you can explicitly add the following: - -- Meaningful additional business **milestones** by modeling **intermediate events**, for example. This might not have any execution semantics other than leaving a trace in the history of the workflow engine. The milestone is met as soon as the process has passed the event. Its status can therefore be **passed** or **not passed**. - -- Meaningful business **phases** by modeling things like (embedded) **subprocesses**. In contrast to a milestone, a phase's state can be **not entered**, currently **active**, or **passed**. - -Consider the following example - a "Tweet Approval Process" shows start and end events as well as **milestones**: - -
    " - -3 - -After one business day, the reviewer is reminded to speed up - and such reviews are internally *marked* by passing the end event 'Review done slowly'. - -4 - -**Approved tweets** will pass the additional **intermediate event**. The **cycle time** up until that point is automatically captured too. - -5 - -Furthermore, when tweets are successfully published, we are interested in the **ratio** of those tweets... - -6 - -...when compared to tweets that do not get published. Therefore, we model *two different end events* representing those two business end states of the process. - -:::note -Duplicate tweets will *not be published* even though they have been *approved* before. The more precisely we describe and *name* the business semantics of events, the better our KPI's will reflect the reality we want to measure! -::: - -When you do not (only) want to concentrate on milestones, but *phases* in your process, model the phases as subprocesses: - -
    - - -1 - -The phase *Review*—modeled with a subprocess—will be active, while the human reviewer will need to find time to complete the task... - -2 - -...whereas the phase *Publication* will be completed automatically - hence process instances "remaining" there for longer than a few seconds will probably indicate ongoing problems with the uptime and reachability of the used services. - -## History architecture - -It is useful to understand the architecture around history data in Camunda Cloud. - -:::caution Camunda Platform 7 -Note that the history architecture is very different in Camunda Platform 7.x, see [Camunda Platform 7 User Guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/). -::: - -![History architecture](reporting-about-processes-assets/history-architecture.png) - -Camunda saves historical data not just when a process instance finishes, but on the go, while a process instance is active. By doing this, Camunda separates runtime data from history data. A growing history will not influence the runtime behavior, and you should never need to access runtime data for reporting purposes. - -Historical data can be leveraged via three possible mechanisms: - -- **Camunda tools**: Leverage Camunda Operate or Camunda Optimize. This is a very simple approach that works out-of-the-box and should satisfy many requirements already. Camunda Operate focuses on operational use cases ("Where is my process? Why did this fail?") whereas Camunda Optimize provides business intelligence about your processes. Optimize allows you to build reports and dashboards including setting alerts for thresholds. - -- **Query API**: Using the public API (currently under development), this has the advantage that you can make use of the history data within your own applications. - -- Pushing **events**: Pushing Camunda events by using [exporters](/docs/components/zeebe/technical-concepts/architecture/#exporters). Note that you can only add own exporters in a Self-Managed setting, not in Camunda Cloud SaaS. Exporters have the advantage that you can push the data into any infrastructure you have, and possibly even filter or enrich the data in that step. - -## Connecting custom business intelligence systems (BI), data warehouses (DWH), or monitoring solutions - -You might move data from the Camunda History to a decoupled system like a Business Intelligence (BI) solution, a Data Warehouse (DWH), some Data Lake, or an own monitoring solution, for example based on Prometheus. - -Leveraging typical BI system's **ETL** (extract, transform, and load) features allows you to optimize data structure for your reporting purposes (to *speed up* report generation) or to combine generic process engine data with business entities (to allow for *more in-depth analysis*). - -To get the data into the BI system, leverage one of the mechanisms described above. Our recommendation generally is: - -- In SaaS, leverage the history API to regularly pull data, as custom exporters are not supported there. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/process-definition-authorization.png b/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/process-definition-authorization.png deleted file mode 100644 index f75705f57f6..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/process-definition-authorization.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/security-architecture.png b/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/security-architecture.png deleted file mode 100644 index fee73e7fa58..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/security-architecture.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/security-architecture.pptx b/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/security-architecture.pptx deleted file mode 100644 index e70a5cd481e..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/security-architecture.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/sso.jpg b/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/sso.jpg deleted file mode 100644 index 4ce3403d138..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-assets/sso.jpg and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-c7.md b/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-c7.md deleted file mode 100644 index 399490dda40..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/operations/securing-camunda-c7.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: "Securing Camunda 7" -tags: - - Security - - SSO - - Authentication - - Authorization ---- - -Disallow unauthorized access by securing the Camunda Platform 7.x before going live with your process applications. Understand Camunda user management essentials, enforce authorization for the REST API, define access rights for Camunda specific resources such as process definitions, and consider integrating with your Single-Sign-On (SSO). - -:::caution Camunda Platform 7 only -This best practice targets Camunda Platform 7.x only! For Camunda Cloud, visit [Zeebe Security](/docs/self-managed/zeebe-deployment/security/). -::: - -## Understanding user management essentials - -We suggest taking a look at the [security](https://docs.camunda.org/manual/latest/user-guide/security/) section of the documentation. - -### Understanding users, groups and tenants - -A **user** refers to a human individual, and a **group** is any custom defined "bundle" of users sharing some usage relevant attributes (like e.g. working on specific business functions). Set up **groups** corresponding to your workflow roles or create new logical roles for that purpose. - -Both **groups** and **users** can be added to one or more **tenants** to ensure a certain degree of data isolation between different logical entities (for more information, see [multi-tenancy](https://docs.camunda.org/manual/latest/user-guide/security/)). - -The core of the Camunda engine treats **users**, **groups**, and **tenants** as simple **text strings**. Therefore, you can do things like assign a user task to a group of people by directly referencing the group in your BPMN file, for example: - -```xml - -``` - -Or, claim that user task for a specific user via the Java API by referencing the user with a text string-based user id: - -```java -taskService.claim(taskId, "fozzie"); -``` - -No further concepts exist like logical workflow roles or special mappings. - -Camunda ships with an [IdentityService](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/) which allows you to either manage real users and groups *directly within the Camunda database*, or access the users and group information managed in a directory service database which supports **LDAP** (Lightweight Directory Access Protocol), like Microsoft's "Active Directory" and many others. One can also provide a custom **IdentityService** implementation to satisfy each and every requirement apart from the default identity service options shipped with Camunda. This is particularly helpful if you plan to integrate with a third party identity management system. Using the **IdentityService** is not mandatory - it is *possible* to reference users and groups within Camunda that are not known by the engine's **IdentityService** at all. This could be useful for testing purposes or when integrating with third party identity management solutions. - -The Camunda LDAP Identity Service doesn’t support tenants. That means tenant-related access restrictions do not work by default when using the LDAP plugin. - -To illustrate, Camunda needs access to (text string based) *users and groups* in order to: - -* Allow *logging into* the web applications shipping with it (Camunda Tasklist, Cockpit, etc.) -* Allow Tasklist to, for example, present *open tasks* available for the groups of the logged in user -* Allow Cockpit to, for example, present just the process definitions related to the *tenant(s)* the logged in user is associated with. - -Keep in mind that your custom directory service is decoupled from Camunda. While it is possible to delete users and groups or change memberships in your directory service without harming Camunda's runtime, the text strings already known to Camunda won't change without manual intervention. - -### Understanding memberships - -Camunda's [IdentityService](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/) allows you to add users to groups; we call this a **group membership**. Accordingly, adding a user or group to a tenant creates a **tenant membership**. - -### Understanding authentication - -The procedure of *authentication* makes sure that the user is known to the Camunda engine. When directly using Camunda's Java API, this must be done *for each thread* by issuing, for example: - -```java -identityService.setAuthenticatedUserId("fozzie"); -``` - -If you use the Java API and do not set the authenticated user, Camunda will not check any authorizations. This is because the engine simply does not know who is logged in. When using the REST API, whether an authentication is set or not depends on the configuration as described below. - -### Understanding authorizations - -Permissions and restrictions for specific **users** or **groups** to access **resources** within Camunda (e.g. process definitions, tenants, process instances) are called **authorizations**. Because they relate users and groups to Camunda-specific resources, they must always be managed in a Camunda-specific way and be contained in the Camunda database. - -Camunda comes with an [AuthorizationService](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/) API (Java or REST), allowing to manage such authorizations and also ships with a dedicated [admin application](https://docs.camunda.org/manual/latest/webapps/admin/authorization-management) to manage them through a web interface. For example, you might want to give the group "accounting" all rights to access a specific process definition called "invoice": - -![Authorization](securing-camunda-assets/process-definition-authorization.png) - -While some permissions are requirement specific and should be created during deployment, others are created automatically (e.g. for assigned tasks). - -## Securing Camunda with authentication and authorizations - -To better understand the consequences and needs when being faced with the task to secure a Camunda installation, it is good to understand the big picture. - -![Security Architecture](securing-camunda-assets/security-architecture.png) - -1 - -A *request* is either asking for a REST API endpoint or one of the web applications functionalities. - -2 - -The `ProcessEngineAuthenticationFilter` (for REST) or the `AuthenticationFilter` (for the web applications) check the user's authentication credentials via the *IdentityService*. The filters retrieve groups and tenant memberships and set the authenticated user for the current thread in the engine. - -3 - -The request is *allowed*. - -4 - -The request might also be *denied*, in case the authentication fails (e.g. because the username is unknown or the password does not match). For the web applications, a denied request is redirected to the login page. - -5 - -All applications use Camunda's Java API internally. - -6 - -Under the hood, the engine enforces authorizations by instrumenting SQL queries. That means you can never get any data from a query the current user is not authorized for. - -7 - -As a consequence, only allowed and *accessible data* will be presented to the user. - -### Securing the Camunda core engine - -You can enable or disable authorization checks for the engine itself. Authorizations will only be checked if you [enable authorization checks](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/#enable-authorization-checks) and *tell the engine who is logged in* with the *current thread*: - -```java -identityService.setAuthenticatedUserId("fozzie"); -``` - - If you directly use the API and do not tell the process engine who is logged in with the current thread, it will provide full access to all data. - -Authorization is enabled per default in the Camunda distributions, but if you configure and run your own engine (e.g. via Spring), it is disabled by default. - -For the authorization checks (to access specific resources), the engine does not question whether the authenticated user is known to the used IdentityService. As mentioned above, the engine treats users, groups and tenants as strings and grants access if those strings match with the defined authorization rules. - -In case you *do not require authorizations*, make sure that [authorization checks are disabled](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/#enable-authorization-checks), since they do have a performance impact. You might not need authorizations if you build your own custom web application handling authentication and authorization itself that just uses Camunda in the background, for example. - -If you have authorization checks enabled, you might or might not want to perform these checks when you execute Java code as part of your workflow. One example could be loading the number of running process instances to be used for some decision. For this reason, you can [enable or disable authorization checks for custom user code](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/#enable-authorization-checks-for-user-code) separately. - -### Securing Camunda's REST API - -Internally, the REST API is just another client for the Java API which needs to inform the engine about the authenticated user. This only works if you turn on authentication for the REST API. Otherwise, no user is logged in and you have *unrestricted access*. - -Authentication and hence authorization checks are by default disabled for the REST API to allow for a quick getting started experience. - -For real life usage, enable at least **Basic Authentication** for the **REST API** by adjusting the `web.xml` as described in the [User Guide](https://docs.camunda.org/manual/latest/reference/rest/overview/authentication/). The REST API's default `ProcessEngineAuthenticationFilter` authenticates the user with HTTP Basic Auth. It makes use of the `IdentityService` to check the user's password and to load **group** and **tenant** memberships for that user. If that was successful, it sets the user as authenticated for the current thread via the Java API. - -If you require an authentication mechanism other than HTTP Basic Auth, you need to implement your own `AuthenticationFilter`. For more details, see the SSO section below. - -If you do not need the REST API in production, consider undeploying the REST API web application. - -### Securing Camunda's web applications - -The Camunda web applications (Tasklist, Cockpit, Admin) have by default a form based *authentication turned on*. There is no further need for changing any configuration when going into production, apart from the more general consideration to enable a custom identity service provider (see below). -However, ensure that you do not deploy artifacts like the *h2 console* and the *example applications* in your production environments. They are solely shipped for development purposes and a smooth experience when getting started with Camunda. - -Internally, Camunda Web Apps use an `AuthenticationFilter` very similar to the REST API `ProcessEngineAuthenticationFilter` described above; it just redirects an unknown user to the login page. - -### Configuring the identity service - -By default, Camunda will manage users and groups directly within the Camunda database. As an alternative to that, you can also enable read-only access to an LDAP-based user/group repository. The [LDAP identity service](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/#the-ldap-identity-service) is implemented as a Process Engine Plugin and can be added to the process engine configuration in order to replace the default database identity service. - -As an alternative to those two possibilities, [implement a custom IdentityProvider](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/) to satisfy each and every other requirement. You can provide *read-only* or even *writable* access to your user repository. - -Note that as the LDAP Identity Service doesn’t support tenants (multi-tenancy). For multi-tenancy configured via LDAP, you would therefore need a custom identity service allowing you to retrieve tenant IDs from your LDAP. - -The identity service ships with a [security feature that throttles log in attempts](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/#throttle-login-attempts). That means that a user will not be able to log in for a specific amount of time after an unsuccessful login attempt. Carefully review the default values for this feature's configuration and change them to your requirements if needed. - -### Securing custom code - -The process engine offers numerous extension points for customization of process behavior by using Java code, expression language, scripts, and templates. While these extension points allow for great flexibility in process implementation, they open up the possibility to perform malicious actions when in the wrong hands. It is therefore advisable to restrict access to APIs that allows custom code submission to trusted parties only. You can find more information on that topic in the [User Guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/securing-custom-code/). - -### Securing Camunda Run - -The Camunda Run distributions aim for easy configuration and thus provides a very easy way for common security problems, see this [blog post](https://camunda.com/blog/2021/05/what-you-should-know-about-using-camunda-platform-run-in-production/). - -### Securing your web application container - -Make sure to secure your web application container (e.g. Wildfly or Tomcat) by checking and securing default settings, e.g. by removing any default predefined users allowed to access your container's administration console. - -### Supporting single sign-on (SSO) - -The difficulty with **Single sign-on** (SSO) is that it always works a bit differently depending on your exact environment and SSO framework used. - -Therefore, Camunda only provides the hooks for plugging in the exact mechanism that fits your needs. The key part that you need to touch concerns the authentication filters of Camunda's web applications and the REST API (see illustration above). The idea is to exchange or modify the [AuthenticationFilter](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/webapp/impl/security/auth/AuthenticationFilter.html) defined inside the `WEB-INF/web.xml` file of Camunda's web applications (or the REST API) and hook in your custom SSO provider. That means that your filter implementation circumvents the redirect to Camunda's login page by telling Camunda directly who is currently logged in. - -From Camunda 7.9 on, it is much easier to implement SSO by making use of the [ContainerBasedAuthenticationFilter](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/webapp/impl/security/auth/ContainerBasedAuthenticationFilter.html). This means that you do not need to replace the existing **AuthenticationFilter** by a custom one anymore, but you only need to add the **ContainerBasedAuthenticationFilter** that ships with the product and implement a custom **AuthenticationProvider** if required. - -You can get started by looking at some examples showing how this can be achieved for different authentication frameworks: - -- [Very basic authentication filter](https://github.com/camunda-consulting/camunda-webapp-plugins/tree/master/camunda-webapp-plugin-sso-autologin) for the Camunda web apps that reads the user from a provided URL parameter. -- Many *application servers* support single sign-on out of the box (or through plugins) and can provide the user id to the application. Have a look at the [Single Sign-On Community Extension](https://github.com/camunda/camunda-sso-jboss/). -- It is quite easy to [integrate Camunda with Spring Security](https://github.com/camunda-consulting/code/tree/master/snippets/springboot-security-sso) so that the framework handles authentication and passes the authenticated user on to Camunda. diff --git a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/database-versions.png b/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/database-versions.png deleted file mode 100644 index 0501f099453..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/database-versions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/process-solution-example.png b/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/process-solution-example.png deleted file mode 100644 index 987603773f5..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/process-solution-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/process-solution-v2.png b/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/process-solution-v2.png deleted file mode 100644 index b28b3af8ba1..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/process-solution-v2.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/slides.pptx b/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/slides.pptx deleted file mode 100644 index 27f041dc94d..00000000000 Binary files a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions-assets/slides.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions.md b/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions.md deleted file mode 100644 index 3ddd7afbb76..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/operations/versioning-process-definitions.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Versioning process definitions -tags: - - Versioning - - Version Migration - - Long Running Processes ---- - -For real-life applications, it's crucial to understand how Camunda deals with evolving process definitions by means of versioning. As a rule of thumb, we recommend to version just the process and decision models, but not other process solution artifacts (like e.g. code classes or scripts). Often you might not even want to run multiple model versions at the same time, then you have to think about migrate running process instances to new versions. When modeling very long-running processes (> 6 months), consider cutting them into reasonable pieces to ease managing your versioning requirements. - -## Understanding versioning - -By default, deploying a process or decision definition means that the workflow engine will check if the version has changed. If it has, it will register that deployment as a new version of the definition. By default, running instances will continue to run on the basis of the version they started with, new instances will be created based on the latest version of that definition. - -As a consequence, when looking directly at Camunda database tables you can see different versions in the process definition table and the running process instances which are linked to these versions: - -![Versions](versioning-process-definitions-assets/database-versions.png) - -## Selecting the best versioning approach - -### Running versions in parallel - -You can run several versions of a model in parallel. - -The big *advantage* of that default behavior is that you can deploy changed process definitions without caring about running process instances. The process engine is able to manage running instances based on different process definitions in parallel. - -The *disadvantage* is that one needs to deal with the operational complexity of different versions of the process running in parallel as well as the additional complexity in case those processes call subprocesses which have different versions of their own. - -Run versions *in parallel* for - -- *Development* or *test systems* for which you do not care about old instances -- *Phasing out* existing instances as the existing instances need to finish based on the model they where created with, which often has *legal reasons*. -- Situations in which *migration is not advisable*, because it is too complex and too much effort when weighed against its upsides. - -### Migrating process instances to a new version - -:::caution Camunda Cloud -Camunda Cloud does not yet support process instance migrations as described here. This feature is currently in development and will be available soon. -::: - -*Migrate* running instances to the newest definition when: - -- Deploying *patches or bug fixes* of a process model. -- *Avoiding operational complexity* due to different versions running in production is a priority. - -Migrating process instances can be achieved either programmatically or by using the operations tooling. *Programmatically*, you need to *create a migration plan* that describes how process instances are to be migrated from one process definition to another. - -```java -// Sample code from Camunda Platform 7.x, this feature is not yet available in Camunda Cloud: -MigrationPlan migrationPlan = processEngine.getRuntimeService() - .createMigrationPlan("exampleProcess:1", "exampleProcess:2") - .mapActivities("assessCreditWorthiness", "assessCreditWorthiness") - .mapActivities("validateAddress", "validatePostalAddress") - .mapActivities("archiveApplication", "archiveApplication") - .build(); -``` - -You can then apply such a plan to a set of process instances selected by you. - -Learn more about [process instance migration in Camunda 7](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-instance-migration/) in the user guide. You can also learn about [how to use Camunda Platform 7's cockpit](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/process-instance-migration/) there. An interesting option is, that you can export the migration plan you configured in Cockpit as JSON string. This migration plan can be applied later [via REST-API](https://docs.camunda.org/manual/latest/reference/rest/migration/), making it possible to *fully automate* migration even if you do not want to program a migration plan in Java. - -It's important to understand that process instance migration *maintains the full 'identity' of the migrated process instances* including their unique IDs and their full history audit trail. However, as the process definition also might change fundamentally in between versions, this can have effects on the history log of a process instance which might be unexpected from an end user's or operator's perspective. - -### Things to consider before migration - -When planning your migration, here are some factors to consider: - -- *Do I have a good reason to migrate?* Technically, you do not have to migrate process instances when using Camunda. Previous process definition instances will simply continue to run as intended (with some important caveats, see other things to consider below). Here are some examples of good reasons to migrate: - - Your supporting implementation resources have changed. - - Your latest process definition represents a substantial change in your business process. - - Your latest process definition fixes a bug. - - Your latest process definition enforces some time-sensitive legal obligations or rules. -- *How big of a difference is there between process definition versions?* Not only the definition itself, but the data required to be present at any given time in your instance. -- *Did supporting implementation resources change from the previous deployment?* If a service implementation changes in the new deployment and the reference to the implementation did not change from the previous deployment, then older process instances that are in flight will utilize the newer implementation by default upon deployment of the new resources. If that breaks older instances, then you must migrate. -- *Do I have a proper infrastructure to support “real data” testing of my migration plan?* This might be the most important aspect. An ideal way to test your process instance migration would be to have prod-like data in some kind of staging environment that represents not only the type and quality of existing production data, but also volume, scale, and size. You run your migration plan there so that you know what to expect when it comes time to migrate in production. You also need the ability to quickly reset this data via some kind of snapshot, so that you can test over and over again. You can expect many iterations of your migration plan before you move forward with a solid plan. - -For Camunda 7 users there is some more information available in [these migration examples](https://github.com/camunda-consulting/migration-examples/blob/master/README.md). - -## Avoid versioning of dependant artifacts - -When versioning process or decision definitions, you need to be aware that the process of course communicates with the outside world, e.g. by *calling services* or by *using forms* to collect data input from human users. All the additional artifacts needed for that might *depend* on the details of each other in a subtle way. - -Whenever possible, we recommend that you *avoid to version other artifacts* beyond the process and/or decision definitions, in other words, just version '.bpmn' and '.dmn' files by using the default mechanism of the process engine. Embed all other artifacts (like e.g. classes, templates, scripts) into your normal application (for example a Java or NodeJS application) and don't version them. - -Of course, this approach requires that you *manage the subtle differences* needed by running process instances of old versions. There are various options to do that. And even if some of those options discussed below might not sound 'ideal' from a theoretical point of view, they proved to be *good enough* for real life purposes and *much easier to understand* than complex approaches. As understandability by every team member is a very important argument, we recommend going for the approach that is as simple as possible. - -The following options us a Java example of a process solution, containing not only the process model, but also some Java code and an HTML form: - -![Sample Process Application](versioning-process-definitions-assets/process-solution-example.png) - -### Option 1: Keep the artifacts backwards compatible - -*Extend* the functionality of e.g. a method in `MyClass.java` in a way which can still deal with "old" process instances. - -```java -public class MyClass { - public void doSomething(Long customerId) { - if(customerId != null) { // <1> - // new code introduced - } - } -} -``` - -1 - -Assume you introduced a customerId in the new version of the process. Your code can still deal with old cases not aware of a customerId. - -### Option 2: Introduce a new artifact for different versions - -*Change* the artifact and add a new version of it to the application. Now you can reference this new artifact from your new version of the process definition, while the old version will continue to use the first version of it. - -For example: - -- Change the file name for the form from `task-form.html` to `task-form-v2.html` -- Change the `task type` of a service task from `doSomething` to `doSomethingV2` - -![Sample Process Application](versioning-process-definitions-assets/process-solution-v2.png) - -Sometimes it is preferable to manage different versions by means of folders/packages. Just make sure to have a clear and straightforward convention to keep track of the versions. - -## Dealing with long running processes - -In general, *do not be concerned with deploying long-running processes* which might run days, weeks or even months. After all, this is exactly what Camunda was built to properly deal with. - -Having said that, also review the possibilities the workflow engine provides with respect to *cutting process definitions* (e.g. via *message exchange* or via *call activities*) and *migrating running process instances*. But even though it's possible to migrate running process instances to a new version (see below), it's typically a bit of *effort*. Therefore, the information presented in the following sections is meant to enable your conscious decision at which points it might make sense for you to avoid the necessity for migration by cutting processes and which aspects of versioning behavior you can control by doing that. - -### Cutting very long running processes into pieces - -The longer the lifespans of process instances are, the bigger the *risks* that you might want to exchange important software components like e.g. the workflow engine itself. Typically, *very long-running, end-to-end processes* (running longer than *six months*) have periods without activity (e.g. waiting for a certain date in the future). Cut the process into several independent process definitions at these points. - -
    - -1 - -After the mobile phone was shipped, we finish the first process instance and just keep a reminder for the renewal in 24 months. - -2 - -We periodically check due renewals and start new process instances whenever necessary. - -We typically don't model such processes in one diagram it's shown here as a way to show the message flow. Typically, we would rather use a separate diagram per executable process and either leave out the other process completely or show it as a collapsed pool. - -Also try to avoid modeling the complete life-cycle of very long living objects, like a life insurance contract. Only capture the active phases as separate processes (e.g. "Policy Issuing", "Address Change", "Cancellation" or "Death"). - -Having said this, we want to emphasize that the engine is perfectly fine with handling lots of process instances for a long time. So if you want to have process instances waiting for months or years, you can still do so. Just make sure you think about all resulting implications. - -### Using call activities to influence versioning behaviour of pieces - -:::caution Camunda Cloud -With Camunda Cloud you cannot yet influence the version of the started process instance via the call activity. This feature is on the roadmap. At the moment, [a new process instance of the latest process definition version is started](/docs/components/modeler/bpmn/call-activities/). -::: - -When calling separately modeled sub processes (i.e. *Call Activities*), the default behavior of the process engine is to call the *latest* deployed version of that sub process. You can change this default 'binding' behavior to call a *specific* version or the version which was *deployed* together with the parent process. - -Keeping in mind pros and cons of versioning as discussed above, we can therefore *encapsulate parts of a process*, for which we want to be able to change the runtime behavior more often into such call activities. This is an especially useful consideration for *long-running processes*. - -
    - -1 - -We could decide that we always want to follow the *latest* shipping process changes, even if the rules for shipping changed while we are in the order acceptance phase. We for example reason that this acceptance phase could sometimes take a long time, because the procurement for goods currently not shelved happens within that phase. - -2 - -Contrary to that, we could decide that the order billing always happens according to the rules valid at the moment we received the order and instantiated the parent process (*deployment*). We for example reason here that it is critical that the billing follows the rules communicated to the customer together with the offer. diff --git a/versioned_docs/version-1.3/components/best-practices/overview.md b/versioned_docs/version-1.3/components/best-practices/overview.md deleted file mode 100644 index a613c4f5372..00000000000 --- a/versioned_docs/version-1.3/components/best-practices/overview.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Overview -description: "The Camunda Best Practices are our condensed experience using BPMN and DMN on the Camunda toolstack; collected by consulting with our customers and community." ---- - -The Camunda best practices are our condensed experience of using BPMN and DMN on the Camunda toolstack, and collected by consulting engagement with our customers, feedback from the community and various other occasions. Best Practices are a mix of conceptual and practical implementation information. - -Best Practices represent the current state of our practical project experience as far as it is generalizable. They are neither "final" (in the sense that we ourselves will hopefully continue to learn!) nor are they necessarily the best approach for your own situation. - -Note that Camunda give the same guarantee as the core product for best practices. In order to present as much experiences as possible, we cannot accept any responsibility for the accuracy or timeliness of the statements made. If examples of source code are shown, a total absence of errors in the provided source code cannot be guaranteed. Liability for any damage resulting from the application of the recommendations presented here, is excluded. - -:::caution Camunda Cloud -In general, best practices apply to Camunda Cloud, but there are also some specific Camunda Platform 7 practices in their own section below. -::: - -## Project management best practices - -* [Following the Customer Success Path](../management/following-the-customer-success-path/) -* [Doing a proper POC](../management/doing-a-proper-poc/) - -## Architecture best practices - -* [Deciding about your stack](../architecture/deciding-about-your-stack/) -* [Sizing your environment](../architecture/sizing-your-environment/) -* [Understanding human task management](../architecture/understanding-human-tasks-management/) - -## Development best practices - -* [Connecting the workflow engine with your world](../development/connecting-the-workflow-engine-with-your-world) -* [Service integration patterns with BPMN](../development/service-integration-patterns) -* [Writing good workers](../development/writing-good-workers) -* [Dealing with problems and exceptions](../development/dealing-with-problems-and-exceptions) -* [Handling data in processes](../development/handling-data-in-processes) -* [Routing events to processes](../development/routing-events-to-processes) -* [Testing process definitions](../development/testing-process-definitions) - - -## Modeling best practices - -* [Creating readable process models](../modeling/creating-readable-process-models/) -* [Naming BPMN elements](../modeling/naming-bpmn-elements/) -* [Naming technically relevant IDs](../modeling/naming-technically-relevant-ids/) -* [Modeling beyond the happy path](../modeling/modeling-beyond-the-happy-path/) -* [Modeling with situation patterns](../modeling/modeling-with-situation-patterns/) -* [Building flexibility into BPMN models](../modeling/building-flexibility-into-bpmn-models/) -* [Choosing the DMN Hit Policy](../modeling/choosing-the-dmn-hit-policy/) - -## Operations best practices - -* [Versioning process definitions](../operations/versioning-process-definitions/) -* [Reporting about processes](../operations/reporting-about-processes/) - - -## Camunda 7 specific best practices - -:::caution Camunda Platform 7 -The best practices in this section apply to Camunda Platform 7 only -::: - -* Architecture - * [Deciding about your Camunda 7 stack](../architecture/deciding-about-your-stack-c7/) - * [Sizing your Camunda 7 environment](../architecture/sizing-your-environment-c7/) -* Development - * [Invoking services from a Camunda 7 process](../development/invoking-services-from-the-process-c7/) - * [Understanding Camunda 7 transaction handling](../development/understanding-transaction-handling-c7/) -* Operations - * [Operating Camunda 7](../operations/operating-camunda-c7/) - * [Performance tuning Camunda 7](../operations/performance-tuning-camunda-c7/) - * [Securing Camunda 7](../operations/securing-camunda-c7/) -* Other - * [Extending human task management in Camunda 7](../architecture/extending-human-task-management-c7/) diff --git a/versioned_docs/version-1.3/components/cloud-console/introduction.md b/versioned_docs/version-1.3/components/cloud-console/introduction.md deleted file mode 100644 index aa66ab62f03..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/introduction.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: introduction -title: Introduction to Cloud Console ---- - -Cloud Console is the management application for the included products. - -Using Cloud Console, you can do the folllowing: - -- [Create](./manage-clusters/create-cluster.md) and [delete](./manage-clusters/delete-cluster.md) clusters. -- [Manage API clients](./manage-clusters/manage-api-clients.md) to interact with [Zeebe](/components/zeebe/zeebe-overview.md) and [Tasklist](/components/tasklist/introduction.md). -- [Manage alerts](./manage-clusters/manage-alerts.md) to get notified when workflow errors occur. -- [Manage IP Whitelists](./manage-clusters/manage-ip-whitelists.md) to restrict access to clusters. -- [Manage](./manage-organization/organization-settings.md) your organization. -- [Cloud Console API clients (REST)](/apis-tools/cloud-console-api-reference.md) to manage clusters programmatically. - -If you don't have a Camunda Cloud account yet, visit our [Getting Started Guide](../../guides/getting-started/create-camunda-cloud-account.md). \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/create-cluster-include.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/create-cluster-include.md deleted file mode 100644 index 309efc68a36..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/create-cluster-include.md +++ /dev/null @@ -1,27 +0,0 @@ ---- ---- - -Let's take a closer look at creating clusters and viewing their details. - -![cluster-creating](./img/cluster-overview-empty.png) - -To create a cluster, follow the steps below: - -1. Click **Create New Cluster** in the top right corner of the screen. - -2. Set a name and choose the channel. Currently, there are two channels available: - -- **Stable**: Provides the latest feature and patch releases ready for most users at a minimal risk. The releases follow semantic versioning and can be updated to the next minor or patch release without data loss. -- **Alpha**: Provides preview releases in preparation for the next stable release. They provide a short-term stability point to test new features and give feedback before they are released to the stable channel. Try these to ensure the upcoming release works with your infrastructure. These releases cannot be updated to a newer release, and therefore are not meant to be used in production. - -![cluster-creating-modal](./img/cluster-creating-modal.png) - -3. After you've made your selection and created the cluster, view the new entry in the **Clusters** tab: - -![cluster-creating](./img/cluster-overview-new-cluster-creating.png) - -4. The cluster is now being set up. During this phase, its state is **Creating**. After one or two minutes, the cluster is ready for use and changes its state to **Healthy**: - -![cluster-healthy](./img/cluster-overview-new-cluster-healthy.png) - -5. After the cluster is created, click on the cluster name to visit the cluster detail page. diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/create-cluster.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/create-cluster.md deleted file mode 100644 index c288914ce2f..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/create-cluster.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: create-cluster -title: Create a cluster -description: "Let's take a closer look at creating clusters and viewing their details." ---- - -If you haven't created a cluster yet, the **Clusters** page will be empty. - -import CreateCluster from './create-cluster-include.md' - - diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/delete-cluster.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/delete-cluster.md deleted file mode 100644 index 51f4a199526..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/delete-cluster.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: delete-cluster -title: Delete your cluster -description: "Follow these step-by-step instructions to remove your cluster permanently." ---- - -:::note -This action cannot be undone. -::: - -A cluster can be deleted at any time. To delete your cluster, follow the steps below: - -1. Open the cluster details by clicking on the cluster name. -2. Select the three vertical dots next to the cluster name near the top of the page to open the cluster's menu. -3. Click **Delete**. - -![cluster-delete](./img/cluster-delete.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/client-auth.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/client-auth.png deleted file mode 100644 index 5e1c3d85e30..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/client-auth.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-creating-modal.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-creating-modal.png deleted file mode 100644 index db516f395b0..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-creating-modal.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-delete.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-delete.png deleted file mode 100644 index ad7eb0e4403..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-delete.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-alerts-webhook.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-alerts-webhook.png deleted file mode 100644 index 77dbb5573a5..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-alerts-webhook.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-alerts.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-alerts.png deleted file mode 100644 index 097faed529c..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-alerts.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-create-alert.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-create-alert.png deleted file mode 100644 index 3ce4434c15e..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-create-alert.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-create-ip-whitelist.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-create-ip-whitelist.png deleted file mode 100644 index f9da07e0af8..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-create-ip-whitelist.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-ip-whitelists.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-ip-whitelists.png deleted file mode 100644 index 57c59bd322b..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-detail-ip-whitelists.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-empty.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-empty.png deleted file mode 100644 index 467a311869f..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-empty.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-new-cluster-creating.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-new-cluster-creating.png deleted file mode 100644 index 7b73e899d29..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-new-cluster-creating.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-new-cluster-healthy.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-new-cluster-healthy.png deleted file mode 100644 index 15a0f899050..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-overview-new-cluster-healthy.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-rename.png b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-rename.png deleted file mode 100644 index e62f714e525..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/img/cluster-rename.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-alerts.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-alerts.md deleted file mode 100644 index 214efe56f31..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-alerts.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: manage-alerts -title: Manage alerts -description: "Camunda Cloud can notify you when process instances stop with an error." ---- - -Camunda Cloud can notify you when process instances stop with an error. - -There are two forms of notification: - -- By email to the email address of your user account -- By webhook - -### Create an alert - -To create a new alert, take the following steps: - -1. Select the **Alert** tab. - -![cluster-details](./img/cluster-detail-alerts.png) - -2. Click **Create** to create a new alert. - -![create-alert](./img/cluster-detail-create-alert.png) - -3. Choose between **Email** and **Webhook**. - -4. If you select **Email**, click **Create**. No further information is needed. For **Webhook**, complete the additional steps below. - -5. To create a **webhook** alert, provide a valid webhook URL that accepts `POST` requests. - -![create-alert-webhook](./img/cluster-detail-alerts-webhook.png) - -6. You will have one email alert per cluster, but you can create multiple webhook alerts if needed. - -### Webhook alerts - -Webhook alerts contain a JSON body with following structure: - -```json -{ - "clusterName": "cluster-name", - "clusterId": "88d32bfc-4f8e-4dd3-9ae2-adfee281e223", - "operateBaseUrl": "https://console.cloud.camunda.io/org/2b3bc239-ad5b-4eef-80e0-6ef5139ed66a/cluster/88d32bfc-4f8e-4dd3-9ae2-adfee281e223/operate", - "clusterUrl": "https://console.cloud.camunda.io/org/2b3bc239-ad5b-4eef-80e0-6ef5139ed66a/cluster/88d32bfc-4f8e-4dd3-9ae2-adfee281e223", - "alerts": [ - { - "operateUrl": "https://console.cloud.camunda.io/org/2b3bc239-ad5b-4eef-80e0-6ef5139ed66a/cluster/88d32bfc-4f8e-4dd3-9ae2-adfee281e223/operate/#/instances/2251799829404548", - "processInstanceId": "1234567890123456", - "errorMessage": "something went wrong", - "errorType": "JOB_NO_RETRIES", - "flowNodeId": "node-id", - "jobKey": 1234567890123456, - "creationTime": "2021-07-22T08:00:00.000+0000", - "processName": "process-name", - "processVersion": 1 - } - ] -} -``` diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-api-clients.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-api-clients.md deleted file mode 100644 index 20a04aa0724..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-api-clients.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: manage-api-clients -title: Manage API clients -description: "Let's create a client and manage our API clients." ---- - -To interact with Zeebe in the cloud from the outside, every client application must authenticate itself. An **OAuth Flow** is therefore used for authentication: - -![auth-flow](./img/client-auth.png) - -The client configuration is shown at the bottom of the cluster detail view. Create a new client and all necessary information is displayed. - -For the `Client Id` and `Client Secret`, a client application can request an access token at the authentication URL (steps 1 and 2). The access token is necessary to interact with Zeebe in the cloud (step 3). - -:::note -Access tokens have a validity period that can be found in the access token. After this time, a new access token must be requested. -::: - -:::note -The auth service has a built-in rate limit. If too many token requests are executed in a short time, the client is blocked for a certain time. Since the access tokens have a certain validity period, they must be cached on the client side. - -The officially offered client libraries (as well as the Node.js client) have already integrated with the auth routine, handle obtaining and refreshing an access token, and make use of a local cache. -::: - -### Create a client - -To create a client, take the following steps: - -1. Navigate into the **API** tab. - -![cluster-details](../../../guides/getting-started/img/cluster-detail-clients.png) - -2. Click **Create New Client** to create a new client and name your client accordingly. - -![create-client](../../../guides/getting-started/img/cluster-details-create-client.png) - -3. Ensure you keep the generated client credentials in a safe place. The **client secret** will not be shown again. For your convenience, you can also download the client information to your computer. - -![created-client](../../../guides/getting-started/img/cluster-details-created-client.png) - -The downloaded file contains all necessary information to communicate with your Zeebe instance in the future: - -- `ZEEBE_ADDRESS`: Address where your cluster can be reached. -- `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET`: Credentials to request a new access token. -- `ZEEBE_AUTHORIZATION_SERVER_URL`: A new token can be requested at this address. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-ip-whitelists.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-ip-whitelists.md deleted file mode 100644 index 0d2cb33dc90..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/manage-ip-whitelists.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: manage-ip-whitelists -title: Manage IP Whitelists ---- - -If your organization works within Camunda's [Enterprise](https://camunda.com/enterprise/) plan, you can restrict access to clusters with an IP Whitelist. - -### Create an IP Whitelist - -To create an IP Whitelist, take the following steps: - -1. Select the **IP Whitelist** tab. - -![cluster-details](./img/cluster-detail-ip-whitelists.png) - -2. Click **Create** to create an IP Whitelist. - -![create-alert](./img/cluster-detail-create-ip-whitelist.png) - -3. Enter a list of IPs or CIDR blocks separated by commas. - -4. Enter an optional description for the Whitelist. diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/rename-cluster.md b/versioned_docs/version-1.3/components/cloud-console/manage-clusters/rename-cluster.md deleted file mode 100644 index 1350b7249d9..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-clusters/rename-cluster.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: rename-cluster -title: Rename your cluster -description: "Follow these simple instructions to rename your cluster." ---- - -A cluster can be renamed at any time. To rename your cluster, follow the steps below: - -1. Open the cluster details by clicking on the cluster name. -2. Select the three vertical dots next to the cluster name near the top of the page to open the cluster's menu. -3. Click **Rename**. - -![cluster-rename](./img/cluster-rename.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/activity-view.png b/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/activity-view.png deleted file mode 100644 index 8766b50e2b1..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/activity-view.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/avatar-menue-multiple-organisations.png b/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/avatar-menue-multiple-organisations.png deleted file mode 100644 index 248cb111f74..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/avatar-menue-multiple-organisations.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/avatar-menue.png b/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/avatar-menue.png deleted file mode 100644 index a45e1a95460..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/avatar-menue.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/billing-overview.png b/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/billing-overview.png deleted file mode 100644 index b01c6e9753e..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/billing-overview.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/trial-usage-history.png b/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/trial-usage-history.png deleted file mode 100644 index cde06a84756..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-organization/img/trial-usage-history.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/manage-users.md b/versioned_docs/version-1.3/components/cloud-console/manage-organization/manage-users.md deleted file mode 100644 index 47e994136ba..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-organization/manage-users.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: manage-users -title: Manage users of your organization -description: "Let's take a closer look at the rights and responsibilities of users in your organization." ---- - -## General rights concept - -When a user signs up for Camunda Cloud, they receive a personal organization. Clusters the user creates in this organization are assigned to this organization. - -If several users need access to the same Zeebe cluster, all users can be assigned to the same organization. - -## Users - -Under this setting, members of the current organization can be managed. A user can have one of the following roles: - -- **Owner**: Owner of the organization (currently limited to one user and cannot be changed by the user.) -- **Admin**: Restricted rights for user management. -- **Member**: Can manage Zeebe clusters, client, and use [Operate](/components/operate/index.md). - -The following table illustrates the rights of each role: - -| | Owner | Admin | Member | -| ---------------------------- | ----- | ----- | ------ | -| Manage Zeebe clusters | X | X | X | -| Manage clients | X | X | X | -| Use Operate | X | X | X | -| Users: Manage members | X | X | | -| Billing: Manage reservations | X | X | | -| Billing: Request paid plan | X | X | | -| Users: Manage admins | X | | | - -Users are invited to a Camunda Cloud organization via their email address, which must be accepted by the user. The user remains in the `Pending` state until the invitation is accepted. - -People who do not yet have a Camunda Cloud account can also be invited to an organization. To access the organization, however, the invited individual must first [create a Camunda Cloud account](../../../guides/getting-started/create-camunda-cloud-account.md). \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/organization-settings.md b/versioned_docs/version-1.3/components/cloud-console/manage-organization/organization-settings.md deleted file mode 100644 index 9ba9a57220d..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-organization/organization-settings.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: organization-settings -title: Organization management -description: "Follow these instructions to manage your organization." ---- - -Organization management can be accessed via the menu in the navigation bar. - -![avatar-menue](./img/avatar-menue.png) - -### Overview - -The overview provides a summary of the organization, including: - -- Organization name -- Pricing plan -- Owner of the organization - -## Next steps - -- [Manage users of your organization](./manage-users.md) -- [View organization activity](./view-organization-activity.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/switch-organization.md b/versioned_docs/version-1.3/components/cloud-console/manage-organization/switch-organization.md deleted file mode 100644 index 339da663284..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-organization/switch-organization.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: switch-organization -title: Switch organization ---- - -If a user is assigned to more than one organization, the organization can be changed in the menu of the navigation bar. - -![avatar-menue-multiple-organisations](./img/avatar-menue-multiple-organisations.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/update-billing-reservations.md b/versioned_docs/version-1.3/components/cloud-console/manage-organization/update-billing-reservations.md deleted file mode 100644 index f2f360980fc..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-organization/update-billing-reservations.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: update-billing-reservations -title: Update billing reservations -description: "Let's manage our hardware packages and edit reservations." ---- - -:::note -This setting is only visible in the **Professional Plan** and **Enterprise Plan** for owners and admins. -::: - -## Managing hardware packages - -Once signed up for the **Professional Plan** or **Enterprise Plan**, you have access to the **Billing** page. - -- The created process instances from the current period are displayed at the top of the page. -- Find a history of the metrics on a monthly basis at the bottom of the page. -- View how many hardware packages are included on the right side of the page. - - **Professional Plan**: Change the reservations for additional hardware packages. - -Reservations control how many clusters you can deploy. Increasing the number of reservations allows you to deploy more clusters, while decreasing the number of reservations allows you to deploy fewer clusters. - -You can access the **Billing** page by selecting **Organization Management** in the Camunda Cloud Console navigation bar. - -![billing-overview](./img/billing-overview.png) - -### Edit reservations (Professional Plan only) - -Use the **Edit** button to change the number of reserved clusters. The number of reserved clusters cannot exceed the maximum limit and cannot go below what is currently in use. diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/usage-history.md b/versioned_docs/version-1.3/components/cloud-console/manage-organization/usage-history.md deleted file mode 100644 index efc4c810a1a..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-organization/usage-history.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: usage-history -title: Usage history -description: "Let's observe our workflow usage." ---- - -:::note -This setting is only visible during the **Trial Phase** for owners and admins. -::: - -Two key metrics play a role in paid plans: the number of started process instances, and the number of claimed user tasks. The **Organization Management** for trial plans provides a usage view for these metrics across the organization. - -Both metrics are aggregated on a monthly basis and displayed in the **Usage History**. - -![Usage History](./img/trial-usage-history.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-organization/view-organization-activity.md b/versioned_docs/version-1.3/components/cloud-console/manage-organization/view-organization-activity.md deleted file mode 100644 index 4b320724bcb..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-organization/view-organization-activity.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: view-organization-activity -title: View organization activity -description: "Let's analyze the capabilities of the Activity tab." ---- - -The **Activity** tab lists all activities within an organization. Here, you can see when a cluster was created or deleted. - -![activity-view](./img/activity-view.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-plan/available-plans.md b/versioned_docs/version-1.3/components/cloud-console/manage-plan/available-plans.md deleted file mode 100644 index 804e39b8967..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-plan/available-plans.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: available-plans -title: Available plans -description: "Let's take a closer look at Camunda's current plan options." ---- - -Camunda Cloud 1.0 was released to the world in May 2021. To sign up for Camunda Cloud, visit the [sign up page](https://accounts.cloud.camunda.io/signup). - -There is a free trial and two plans available for Camunda Cloud: - -- Professional -- Enterprise - -For more information on Camunda Cloud plans, visit the Camunda Cloud [product page](https://camunda.com/products/cloud/). \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-plan/img/early-access-paid-request.png b/versioned_docs/version-1.3/components/cloud-console/manage-plan/img/early-access-paid-request.png deleted file mode 100644 index dcc0130677a..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/manage-plan/img/early-access-paid-request.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/cloud-console/manage-plan/upgrade-to-professional-plan.md b/versioned_docs/version-1.3/components/cloud-console/manage-plan/upgrade-to-professional-plan.md deleted file mode 100644 index ec28d9e5c8d..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/manage-plan/upgrade-to-professional-plan.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: upgrade-to-professional-plan -title: Upgrade to a Professional Plan -description: "Want to upgrade to a Professional Plan? Follow these steps." ---- - -:::note -The terms under which the Professional Plan is available might change in the future. -::: - -To request access to the Professional Plan, use the **Update Subscription** button in the navigation bar. Here, you can either contact us about the Enterprise Plan or update your subscription to the Professional Plan. - -![paid-request](./img/early-access-paid-request.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/troubleshooting/common-pitfalls.md b/versioned_docs/version-1.3/components/cloud-console/troubleshooting/common-pitfalls.md deleted file mode 100644 index 6b220a038a6..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/troubleshooting/common-pitfalls.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: common-pitfalls -title: Common pitfalls -description: "Let's take a closer look at common issues and resolutions." ---- - -If you have an issue, use the [feedback form](./feedback-and-support.md). - -See a few common pitfalls below: - -## The button to create new clusters is disabled - -- Your organization is on a trial plan and you have already created a cluster. In this case, you cannot create another cluster, because only one cluster is included in the trial plan. - -- Your billing reservations do not allow any more clusters. You must increase the [reservations](../manage-organization/update-billing-reservations.md) if you want to create more clusters. If you do not have the necessary rights, contact an admin or the owner of the organization. - -## I cannot connect to Zeebe - -- Check if your [API client](../manage-clusters/manage-api-clients.md) has the necessary rights. To interact with Zeebe, the **Scope** `Zeebe` must be set. -- Check if your credentials are configured correctly. There is a CLI tool that allows you to check the status: [`zbctl`](https://www.npmjs.com/package/zbctl). With the command `zbctl status`, you can read the topology. If this command works, the connection can be established. -- Check if your cluster is **Healthy**: A Zeebe cluster may be temporarily unavailable. To check if your cluster is healthy, navigate to the cluster list. Click on the cluster to view its details for a closer view of the status over all products (Zeebe, Operate, Tasklist, Optimize). \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/cloud-console/troubleshooting/feedback-and-support.md b/versioned_docs/version-1.3/components/cloud-console/troubleshooting/feedback-and-support.md deleted file mode 100644 index 2ae6fa55bc5..00000000000 --- a/versioned_docs/version-1.3/components/cloud-console/troubleshooting/feedback-and-support.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: feedback-and-support -title: Feedback and support ---- - -If you have any problems, questions, or suggestions, contact us via the [Camunda Cloud Forum](https://forum.camunda.io/). - -**Feedback and support** can be submitted or requested via the corresponding entry in the navigation menu. If you have a **license agreement** with us, you will be redirected to the [support queue](https://jira.camunda.com/projects/SUPPORT/) at Camunda. In our Trial phase, contact can be made using the internal form: - -![feedback-dialog](./img/contact-feedback-and-support.png) diff --git a/versioned_docs/version-1.3/components/cloud-console/troubleshooting/img/contact-feedback-and-support.png b/versioned_docs/version-1.3/components/cloud-console/troubleshooting/img/contact-feedback-and-support.png deleted file mode 100644 index 3df3a91e0d1..00000000000 Binary files a/versioned_docs/version-1.3/components/cloud-console/troubleshooting/img/contact-feedback-and-support.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/create-process-with-result.png b/versioned_docs/version-1.3/components/concepts/assets/create-process-with-result.png deleted file mode 100644 index a0cacf75644..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/create-process-with-result.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/create-process.png b/versioned_docs/version-1.3/components/concepts/assets/create-process.png deleted file mode 100644 index 83ad10b517b..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/create-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/message-aggregator.png b/versioned_docs/version-1.3/components/concepts/assets/message-aggregator.png deleted file mode 100644 index fabe84a4054..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/message-aggregator.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/message-correlation.png b/versioned_docs/version-1.3/components/concepts/assets/message-correlation.png deleted file mode 100644 index 9bacea0712f..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/message-correlation.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/message-single-instance.png b/versioned_docs/version-1.3/components/concepts/assets/message-single-instance.png deleted file mode 100644 index 59e3ded0cab..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/message-single-instance.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/order-process.png b/versioned_docs/version-1.3/components/concepts/assets/order-process.png deleted file mode 100644 index 07e87b95d02..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/process-conditions.png b/versioned_docs/version-1.3/components/concepts/assets/process-conditions.png deleted file mode 100644 index 6b3483e9519..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/process-conditions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/process-data-flow.png b/versioned_docs/version-1.3/components/concepts/assets/process-data-flow.png deleted file mode 100644 index 29b0470dd9a..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/process-data-flow.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/process-events.png b/versioned_docs/version-1.3/components/concepts/assets/process-events.png deleted file mode 100644 index 499e5562651..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/process-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/process-parallel-gw.png b/versioned_docs/version-1.3/components/concepts/assets/process-parallel-gw.png deleted file mode 100644 index b9208f0ec82..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/process-parallel-gw.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/process-parallel-mi.png b/versioned_docs/version-1.3/components/concepts/assets/process-parallel-mi.png deleted file mode 100644 index 2ff63f00b68..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/process-parallel-mi.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/process-sequence.png b/versioned_docs/version-1.3/components/concepts/assets/process-sequence.png deleted file mode 100644 index 55cebecee05..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/process-sequence.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/processes-data-based-conditions.png b/versioned_docs/version-1.3/components/concepts/assets/processes-data-based-conditions.png deleted file mode 100644 index 63126a12e57..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/processes-data-based-conditions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/processes-parallel-gateway.png b/versioned_docs/version-1.3/components/concepts/assets/processes-parallel-gateway.png deleted file mode 100644 index e32ce06f1b9..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/processes-parallel-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/variable-mappings.png b/versioned_docs/version-1.3/components/concepts/assets/variable-mappings.png deleted file mode 100644 index 0e6d0a26c43..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/variable-mappings.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/variable-propagation.png b/versioned_docs/version-1.3/components/concepts/assets/variable-propagation.png deleted file mode 100644 index 7449feeb7c4..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/variable-propagation.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/variable-scopes.png b/versioned_docs/version-1.3/components/concepts/assets/variable-scopes.png deleted file mode 100644 index bf4d8bc4f1b..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/variable-scopes.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/assets/zeebe-job-workers-graphic.png b/versioned_docs/version-1.3/components/concepts/assets/zeebe-job-workers-graphic.png deleted file mode 100644 index 7c848b00137..00000000000 Binary files a/versioned_docs/version-1.3/components/concepts/assets/zeebe-job-workers-graphic.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/concepts/expressions.md b/versioned_docs/version-1.3/components/concepts/expressions.md deleted file mode 100644 index a1523570927..00000000000 --- a/versioned_docs/version-1.3/components/concepts/expressions.md +++ /dev/null @@ -1,352 +0,0 @@ ---- -id: expressions -title: "Expressions" -description: "Expressions can be used to access variables and calculate values dynamically." ---- - -Expressions can be used to access variables and calculate values dynamically. - -The following attributes of BPMN elements *require* an expression: - -- Sequence flow on an exclusive gateway: [condition](/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md#conditions) -- Message catch event/receive task: [correlation key](/components/modeler/bpmn/message-events/message-events.md#messages) -- Multi-instance activity: [input collection](/components/modeler/bpmn/multi-instance/multi-instance.md#defining-the-collection-to-iterate-over), [output element](/components/modeler/bpmn/multi-instance/multi-instance.md#collecting-the-output) -- Input/output variable mappings: [source](variables.md#inputoutput-variable-mappings) - -Additionally, the following attributes of BPMN elements can define an expression *optionally*, instead of a static value: - -- Timer catch event: [timer definition](/components/modeler/bpmn/timer-events/timer-events.md#timers) -- Message catch event/receive task: [message name](/components/modeler/bpmn/message-events/message-events.md#messages) -- Service task/business rule task/script task/send task: [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition), [job retries](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) -- User task: [assignee](/components/modeler/bpmn/user-tasks/user-tasks.md#assignments), [candidateGroups](/components/modeler/bpmn/user-tasks/user-tasks.md#assignments) -- Call activity: [process id](/components/modeler/bpmn/call-activities/call-activities.md#defining-the-called-process) - -## Expressions vs. static values - -Some attributes of BPMN elements—like the timer definition of a timer catch event—can be defined in one of two ways: - -- As an expression (e.g. `= remaingTime`) -- As a static value (e.g. `PT2H`) - -Expressions always start with an **equals sign** (**=**). For example, `= order.amount > 100`. The text following the equal sign is the actual expression. For example, `order.amount > 100` checks if the amount of the order is greater than 100. - -If the element does not start with the prefix, it is used as a static value. A static value is used either as a string (e.g. job type) or as a number (e.g. job retries). A string value must not be enclosed in quotes. - -:::note -An expression can also define a static value by using literals (e.g. `= "foo"`, `= 21`, `= true`, `= [1,2,3]`, `= {x: 22}`, etc.) -::: - -## The expression language - -An expression is written in **FEEL** (**Friendly Enough Expression Language**). FEEL is part of the OMG's **DMN** (**Decision Model and Notation**) specification. It is designed to have the following properties: - -- Free of side effects -- Simple data model with JSON-like object types: numbers, dates, strings, lists, and contexts -- Simple syntax designed for business professionals and developers -- Three-valued logic (true, false, null) - -Camunda Cloud integrates the [FEEL Scala](https://github.com/camunda/feel-scala) engine to evaluate FEEL expressions. The following sections cover common use cases in Zeebe. A complete list of supported expressions can be found in [FEEL expressions](/reference/feel/what-is-feel.md). - -### Access variables - -A variable can be accessed by its name: - -```feel -owner -// "Paul" - -totalPrice -// 21.2 - -items -// ["item-1", "item-2", "item-3"] -``` - -If a variable is a JSON document/object, it is handled as a FEEL context. A property of the context (e.g. nested variable property) can be accessed by a period (`.`) and the property name: - -```feel -order.id -// "order-123" - -order.customer.name -// "Paul" -``` - -### Boolean expressions - -Values can be compared using the following operators: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OperatorDescriptionExample
    = (only one equals sign)equal toowner = "Paul"
    !=not equal toowner != "Paul"
    <less thantotalPrice < 25
    <=less than or equal tototalPrice <= 25
    >greater thantotalPrice > 25
    >=greater than or equal tototalPrice >= 25
    between [X] and [Y]same as (v >= [X] and v <= [Y]])totalPrice between 10 and 25
    - -Multiple boolean values can be combined as disjunction (`and`) or conjunction (`or`): - -```feel -orderCount >= 5 and orderCount < 15 - -orderCount > 15 or totalPrice > 50 -``` - -### Null checks - -If a variable or nested property can be `null`, it can be compared to the `null` value. Comparing `null` to a value different from `null` results in `false`. - -```feel -order = null -// true - if "order" is null or doesn't exist - -order.id = null -// true - if "order" is null, "order" doesn't exist, -// "id" is null, or "order" has no property "id" -``` - -In addition to the comparison with `null`, the built-in function `is defined()` can be used to differentiate between a value that is `null` and a value that doesn’t exist. - -```feel -is defined(order) -// true - if "order" has any value or is null - -is defined(order.id) -// false - if "order" doesn't exist or it has no property "id" -``` - -### String expressions - -A string value must be enclosed in double quotes. Multiple string values can be concatenated using the `+` operator. - -```feel -"foo" + "bar" -// "foobar" -``` - -Any value can be transformed into a string value using the `string()` function. - -```feel -"order-" + string(orderId) -// "order-123" -``` - -More functions for string values are available as [built-in string functions](/reference/feel/builtin-functions/feel-built-in-functions-string.md) (e.g. contains, matches, etc.) - -### Temporal expressions - -The current date and date-time can be accessed using the built-in functions `today()` and `now()`. To store the current date or date-time in a variable, convert it to a string using the built-in function `string()`. - -```feel -now() -// date and time("2020-04-06T15:30:00@UTC") - -today() -// date("2020-04-06") - -string(today()) -// "2020-04-06" -``` - -The following operators can be applied on temporal values: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Temporal TypeExamplesOperators
    datedate("2020-04-06") -
  • date + duration
  • -
  • date - date
  • -
  • date - duration
  • -
    time - time("15:30:00"),
    - time("15:30:00+02:00"),
    - time("15:30:00@Europe/Berlin") -
    -
  • time + duration
  • -
  • time - time
  • -
  • time - duration
  • -
    date-time - date and time("2020-04-06T15:30:00"),
    - date and time("2020-04-06T15:30:00+02:00"),
    - date and time("2020-04-06T15:30:00@UTC") -
    -
  • date-time + duration
  • -
  • date-time - date-time
  • -
  • date-time - duration
  • -
    durationduration("PT12H"),
    duration("P4Y")
    -
  • duration + duration
  • -
  • duration + date
  • -
  • duration + time
  • -
  • duration + date-time
  • -
  • duration - duration
  • -
  • date - duration
  • -
  • time - duration
  • -
  • date-time - duration
  • -
  • duration * number
  • -
  • duration / duration
  • -
  • duration / number
  • -
    cyclecycle(3, duration("PT1H")),
    cycle(duration("P7D"))
    - -A temporal value can be compared in a boolean expression with another temporal value of the same type. - -The `cycle` type is different from the other temporal types because it is not supported in the FEEL type system. - -Instead, the `cycle` type is defined as a function that returns the definition of the cycle as a string in the ISO 8601 format of a recurring time interval. - -The function expects two arguments: the number of repetitions, and the recurring interval as duration. If the first argument is `null` or not passed in, the interval is unbounded (i.e. infinitely repeated). - -```feel -cycle(3, duration("PT1H")) -// "R3/PT1H" - -cycle(duration("P7D")) -// "R/P7D" -``` - -### List expressions - -An element of a list can be accessed by its index. The index starts at `1` with the first element (*not* at `0`). - -A negative index starts at the end by `-1`. If the index is out of the range of the list,`null` is returned instead: - -```feel -["a","b","c"][1] -// "a" - -["a","b","c"][2] -// "b" - -["a","b","c"][-1] -// "c" -``` - -A list value can be filtered using a boolean expression; the result is a list of elements that fulfill the condition. - -The current element in the condition is assigned to the variable `item`: - -```feel -[1,2,3,4][item > 2] -// [3,4] -``` - -The operators `every` and `some` can be used to test if all elements or at least one element of a list fulfill a given condition: - -```feel -every x in [1,2,3] satisfies x >= 2 -// false - -some x in [1,2,3] satisfies x > 2 -// true -``` - -### Invoke functions - -A function can be invoked by its name followed by the arguments. The arguments can be assigned to the function parameters either by their position or by defining the parameter names: - -```feel -floor(1.5) -// 1 - -count(["a","b","c"]) -// 3 - -append(["a","b"], "c") -// ["a","b","c"] - -contains(string: "foobar", match: "foo") -// true -``` - -FEEL defines several built-in functions: - -- [Conversion functions](/reference/feel/builtin-functions/feel-built-in-functions-conversion.md) -- [Boolean functions](/reference/feel/builtin-functions/feel-built-in-functions-boolean.md) -- [String functions](/reference/feel/builtin-functions/feel-built-in-functions-string.md) -- [Numeric functions](/reference/feel/builtin-functions/feel-built-in-functions-numeric.md) -- [List functions](/reference/feel/builtin-functions/feel-built-in-functions-list.md) -- [Context functions](/reference/feel/builtin-functions/feel-built-in-functions-context.md) -- [Temporal functions](/reference/feel/builtin-functions/feel-built-in-functions-temporal.md) - -## Next steps - -- [FEEL](/reference/feel/what-is-feel.md) -- [FEEL data types](/reference/feel/language-guide/feel-data-types.md) -- [FEEL expressions](/reference/feel/language-guide/feel-expression.md) -- [DMN specification](https://www.omg.org/spec/DMN/About-DMN/) diff --git a/versioned_docs/version-1.3/components/concepts/incidents.md b/versioned_docs/version-1.3/components/concepts/incidents.md deleted file mode 100644 index 14495ab7ca7..00000000000 --- a/versioned_docs/version-1.3/components/concepts/incidents.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -id: incidents -title: "Incidents" -description: "A process instance is stuck at a particular point, and requires user interaction to resolve the problem." ---- - -In Camunda Cloud, an incident represents a problem in process execution. This means a process instance is stuck at a particular point, and requires user interaction to resolve the problem. - -Incidents are created in different situations, including the following: - -- A job is failed and it has no retries left. -- An input or output variable mapping can't be applied. -- A condition can't be evaluated. - -:::note -Incidents are not created when an unexpected exception (e.g. `NullPointerException`, `OutOfMemoyError` etc.) occurs. -::: - -## Resolving - -To resolve an incident, complete the following steps: - -1. Identify and resolve the problem. -2. Mark the incident as resolved, triggering retry process execution. -3. If the problem still exists, a new incident is created. - -### Resolving a job-related incident - -If a job fails and has no retries remaining, an incident is created. There are many different reasons why the job may have failed. For example, the variables may not be in the expected format, or a service is not available (e.g. a database). - -If the variables are causing the incident, complete the following steps: - -1. Update the variables of the process instance. -2. Increase the remaining retries of the job. -3. Mark the incident as resolved. - -:::note -It's recommended you complete these operations in [Operate](/components/operate/index.md). -::: - -It is also possible to complete these steps via the client API. Using the Java client, this could look like the following: - -```java -client.newSetVariablesCommand(incident.getElementInstanceKey()) - .variables(NEW_PAYLOAD) - .send() - .join(); - -client.newUpdateRetriesCommand(incident.getJobKey()) - .retries(3) - .send() - .join(); - -client.newResolveIncidentCommand(incident.getKey()) - .send() - .join(); -``` - -When the incident is resolved, the job can be activated by a worker again. - -### Resolving a process instance-related incident - -If an incident is created during process execution and it's not related to a job, the incident is usually related to the variables of the process instance. For example, an input or output variable mapping can't be applied. - -To resolve the incident, update the variables and mark the incident as resolved. - -:::note -It's recommended you complete these operations in [Operate](/components/operate/index.md). -::: - -Using the Java client, this could look like the following: - -```java -client.newSetVariablesCommand(incident.getElementInstanceKey()) - .variables(NEW_VARIABLES) - .send() - .join(); - -client.newResolveIncidentCommand(incident.getKey()) - .send() - .join(); -``` - -When the incident is resolved, the process instance continues. - -- [Operate](/components/operate/index.md) -- [APIs and Clients](/apis-tools/overview.md) diff --git a/versioned_docs/version-1.3/components/concepts/job-workers.md b/versioned_docs/version-1.3/components/concepts/job-workers.md deleted file mode 100644 index 1cfe66f5bb4..00000000000 --- a/versioned_docs/version-1.3/components/concepts/job-workers.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: job-workers -title: "Job workers" -description: "A job worker is a service capable of performing a particular task in a process." ---- - -A job worker is a service capable of performing a particular task in a process. - -Each time such a task needs to be performed, this is represented by a job. - -A job has the following properties: - -- **Type**: Describes the work item and is defined in each task in the process. The type is referenced by workers to request the jobs they are able to perform. -- **Custom headers**: Additional static metadata that is defined in the process. Custom headers are used to configure reusable job workers (e.g. a `notify Slack` worker might read out the Slack channel from its header.) -- **Key**: Unique key to identify a job. The key is used to hand in the results of a job execution, or to report failures during job execution. -- **Variables**: The contextual/business data of the process instance required by the worker to do its work. - -## Requesting jobs - -Job workers request jobs of a certain type on a regular interval (i.e. polling). This interval and the number of jobs requested are configurable in the Zeebe client. - -If one or more jobs of the requested type are available, Zeebe (the workflow engine inside Camunda Cloud) will stream activated jobs to the worker. Upon receiving jobs, a worker performs them and sends back a `complete` or `fail` command for each job, depending on if the job could be completed successfully. - -For example, the following process might generate three different types of jobs: `process-payment`, `fetch-items`, and `ship-parcel`: - -![order-process-model](assets/order-process.png) - -Three different job workers, one for each job type, could request jobs from Zeebe: - -![zeebe-job-workers-requesting-jobs](assets/zeebe-job-workers-graphic.png) - -Many workers can request the same job type to scale up processing. In this scenario, Zeebe ensures each job is sent to only one of the workers. - -Such a job is considered activated until the job is completed, failed, or the job activation times out. - -On requesting jobs, the following properties can be set: - -- **Worker**: The identifier of the worker. Used for auditing purposes. -- **Timeout**: The time a job is assigned to the worker. If a job is not completed within this time, it can be reassigned by Zeebe to another worker. -- **MaxJobsToActivate**: The maximum number of jobs which should be activated by this request. -- **FetchVariables**: A list of required variables names. If the list is empty, all variables of the process instance are requested. - -### Long polling - -Ordinarily, a request for jobs can be completed immediately when no jobs are available. - -To find a job to work on, the worker must poll again for available jobs. This leads to workers repeatedly sending requests until a job is available. - -This is expensive in terms of resource usage, because both the worker and the server are performing a lot of unproductive work. Zeebe supports **long polling** for available jobs to better utilize resources. - -With **long polling**, a request will be kept open while no jobs are available. The request is completed when at least one job becomes available. - -### Job queueing - -Zeebe decouples creation of jobs from performing the work on them. It is always possible to create jobs at the highest possible rate, regardless if there is a job worker available to work on them. This is possible because Zeebe queues jobs until workers request them. - -This increases the resilience of the overall system. Camunda Cloud is highly available so job workers don't have to be highly available. Zeebe queues all jobs during any job worker outages, and progress will resume as soon as workers come back online. - -This also insulates job workers against sudden bursts in traffic. Because workers request jobs, they have full control over the rate at which they take on new jobs. - -## Completing or failing jobs - -After working on an activated job, a job worker informs Camunda Cloud that the job has either `completed` or `failed`. - -- When the job worker completes its work, it sends a `complete job` command along with any variables, which in turn is merged into the process instance. This is how the job worker exposes the results of its work. -- If the job worker can not successfully complete its work, it sends a `fail job` command. Fail job commands include the number of remaining retries, which is set by the job worker. - - If `remaining retries` is greater than zero, the job is retried and reassigned. - - If `remaining retries` is zero or negative, an incident is raised and the job is not retried until the incident is resolved. - -## Timeouts - -If the job is not completed or failed within the configured job activation timeout, Zeebe reassigns the job to another job worker. This does not affect the number of `remaining retries`. - -A timeout may lead to two different workers working on the same job, possibly at the same time. If this occurs, only one worker successfully completes the job. The other `complete job` command is rejected with a `NOT FOUND` error. - -The fact that jobs may be worked on more than once means that Zeebe is an "at least once" system with respect to job delivery and that worker code must be idempotent. In other words, workers __must__ deal with jobs in a way that allows the code to be executed more than once for the same job, all while preserving the expected application state. - -## Next steps - -- [Zeebe overview](/components/zeebe/zeebe-overview.md) diff --git a/versioned_docs/version-1.3/components/concepts/messages.md b/versioned_docs/version-1.3/components/concepts/messages.md deleted file mode 100644 index f97f7a57537..00000000000 --- a/versioned_docs/version-1.3/components/concepts/messages.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -id: messages -title: "Messages" ---- - -Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called **message correlation**. - -## Message subscriptions - -A message is not sent to a process instance directly. Instead, the message correlation is based on subscriptions that contain the `message name` and the `correlation key` (also known as the correlation value). - -![Message Correlation](assets/message-correlation.png) - -A subscription is opened when a process instance awaits a message; for example, when entering a message catch event. The message name is defined either statically in the process (e.g. `Money collected`) or dynamically as an expression. The correlation key is defined dynamically as an expression (e.g. `= orderId`). The expressions are evaluated on activating the message catch event. The results of the evaluations are used as message name and as correlation key of the subscription (e.g. `"order-123"`). - -When a message is published and the message name and correlation key match to a subscription, the message is correlated to the corresponding process instance. If no proper subscription is opened, the message is discarded. - -A subscription is closed when the corresponding element (e.g. the message catch event), or its scope is left. After a subscription is opened, it is not updated (for example, when the referenced process instance variable is changed.) - -
    - Publish message via zbctl -

    - -``` -zbctl publish message "Money collected" --correlationKey "order-123" -``` - -

    -
    - -## Message buffering - -Messages can be buffered for a given time. Buffering can be useful in a situation when it's not guaranteed the subscription is opened before the message is published. - -A message has a **time-to-live** (**TTL**) which specifies for how long it's buffered. Within this time, the message can be correlated to a process instance. - -When a subscription is opened, it polls the buffer for a proper message. If a proper message exists, it is correlated to the corresponding process instance. In case multiple messages match to the subscription, the first published message is correlated (like a FIFO queue). - -The buffering of a message is disabled when its TTL is set to zero. If no proper subscription is open, the message is discarded. - -
    - Publish message with TTL via zbctl -

    - -``` -zbctl publish message "Money collected" --correlationKey "order-123" --ttl 1h -``` - -

    -
    - -## Message cardinality - -A message is correlated only *once* to a process (based on the BPMN process id), across all versions of this process. If multiple subscriptions for the same process are opened (by multiple process instances or within one instance,) the message is correlated only to one of the subscriptions. - -When subscriptions are opened for different processes, the message is correlated to *all* of the subscriptions. - -A message is *not* correlated to a message start event subscription if an instance of the process is active and was created by a message with the same correlation key. If the message is buffered, it can be correlated after the active instance is ended. Otherwise, it is discarded. - -## Message uniqueness - -A message can have an optional message id — a unique id to ensure the message is published and processed only once (i.e. idempotency). The id can be any string; for example, a request id, a tracking number, or the offset/position in a message queue. - -A message is rejected and not correlated if a message with the same name, the same correlation key, and the same id is already buffered. After the message is discarded from the buffer, a message with the same name, correlation key, and id can be published again. - -The uniqueness check is disabled when no message id is set. - -
    - Publish message with id via zbctl -

    - -``` -zbctl publish message "Money collected" --correlationKey "order-123" --messageId "tracking-12345" -``` - -

    -
    - -## Message correlation overview - -By combining the principles of message correlation, message uniqueness, and message buffering, very different behaviors can be achieved. Please note that a message name is mandatory, so it is omitted from the table. - -| Correlation key | Message Id | Time to live | Receiver type | Behavior | -| --------------- | ---------- | ------------ | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| set | not set | set to 0 | Start event | A new instance is started if no instance with the correlation key set at start is active, see [single instance](./#single-instance). | -| set | not set | set to 0 | Intermediate event | The message is correlated if a matching subscription is active. | -| set | not set | set > 0 | Start event | A new instance is started if no instance with the correlation key set at start is active during the lifetime of the message; new [equal messages](#message-uniqueness) are buffered. | -| set | not set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription is active; new [equal messages](#message-uniqueness) are buffered. | -| set | set | set to 0 | Start event | A new instance is started if no instance with the correlation key set at start is active and there is no [equal message](#message-uniqueness) in the buffer. | -| set | set | set to 0 | Intermediate event | The message is correlated if a matching subscription is active and there is no [equal message](#message-uniqueness) in the buffer. | -| set | set | set > 0 | Start event | A new instance is started if no instance with the correlation key set at start is active during the lifetime of the message and there is no [equal message](#message-uniqueness) in the buffer. | -| set | set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription is active and there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | not set | set to 0 | Start event | A new instance is started. | -| empty string | not set | set to 0 | Intermediate event | The message is correlated if a matching subscription to the empty string is active. | -| empty string | not set | set > 0 | Start event | A new instance is started. | -| empty string | not set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription to the empty string is active; new [equal messages](#message-uniqueness) are buffered. | -| empty string | set | set to 0 | Start event | A new instance is started if there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | set | set to 0 | Intermediate event | The message is correlated if a matching subscription to the empty string is active and there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | set | set > 0 | Start event | A new instance is started if there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription to the empty string is active and there is no [equal message](#message-uniqueness) in the buffer. | - -## Message patterns - -The following patterns describe solutions for common problems that can be solved using message correlation. - -### Message aggregator - -**Problem**: Aggregate/collect multiple messages, map-reduce, batching - -**Solution**: - -![Message Aggregator](assets/message-aggregator.png) - -The messages are published with a `TTL > 0` and a correlation key that groups the messages per entity. - -The first message creates a new process instance. The following messages are correlated to the same process instance if they have the same correlation key. - -When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. - -### Single instance - -**Problem**: Create exactly one instance of a process - -**Solution**: - -![Message Single Instance](assets/message-single-instance.png) - -The message is published with a `TTL = 0` and a correlation key that identifies the entity. - -The first message creates a new process instance. The following messages are discarded and do not create a new instance if they have the same correlation key and the created process instance is still active. diff --git a/versioned_docs/version-1.3/components/concepts/process-instance-creation.md b/versioned_docs/version-1.3/components/concepts/process-instance-creation.md deleted file mode 100644 index 3fb4dd7825b..00000000000 --- a/versioned_docs/version-1.3/components/concepts/process-instance-creation.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -id: process-instance-creation -title: "Process instance creation" -description: "Depending on the process definition, an instance of it can be created in several ways." ---- - -Depending on the process definition, an instance of it can be created in several ways. - -At Camunda, this includes the following: - -- `CreateProcessInstance` commands -- Timer event handler -- Message event - -## Commands - -A process instance is created by sending a command specifying the BPMN process id, or the unique key of the process. - -There are two commands to create a process instance, outlined in the sections below. - -### Create and execute asynchronously - -A process that has a [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events) is started explicitly using **[CreateProcessInstance](/apis-tools/grpc.md#createprocessinstance-rpc)**. - -This command creates a new process instance and immediately responds with the process instance id. The execution of the process occurs after the response is sent. - -![create-process](assets/create-process.png) - -
    - Code example -

    Create a process instance: - -``` -zbctl create instance "order-process" -``` - -Response: - -``` -{ - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 -} - -``` - -

    -
    - -### Create and await results - -Typically, process creation and execution are decoupled. However, there are use cases that need to collect the results of a process when its execution is complete. - -**[CreateProcessInstanceWithResult](/apis-tools/grpc.md#createprocessinstancewithresult-rpc)** allows you to “synchronously” execute processes and receive the results via a set of variables. The response is sent when the process execution is complete. - -![create-process](assets/create-process-with-result.png) - -This command is typically useful for short-running processes and processes that collect information. - -If the process mutates system state, or further operations rely on the process outcome response to the client, consider designing your system for failure states and retries. - -:::note -When the client resends the command, it creates a new process instance. -::: - -
    - Code example -

    Create a process instance and await results: - -``` -zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' -``` - -Response: (Note that the variables in the response depend on the process.) - -``` -{ - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686045, - "variables": "{\"orderId\":\"1234\"}" -} -``` - -

    -
    - -Failure scenarios applicable to other commands are applicable to this command as well. Clients may not get a response in the following cases even if the process execution is completed successfully: - -- **Connection timeout**: If the gRPC deadlines are not configured for long request timeout, the connection may be closed before the process is completed. -- **Network connection loss**: This can occur at several steps in the communication chain. -- **Failover**: When the node processing this process crashes, another node continues the processing. The other node does not send the response because the request is registered on the first one. -- **Gateway failure**: If the gateway the client is connected to fails, nodes inside the cluster cannot send the response to the client. - -## Events - -Process instances are also created implicitly via various start events. Camunda Cloud supports message start events and timer start events. - -### Message event - -A process with a [message start event](/components/modeler/bpmn/message-events/message-events.md#message-start-events) can be started by publishing a message with the name that matches the message name of the start event. - -For each new message a new instance is created. - -### Timer event - -A process can also have one or more [timer start events](/components/modeler/bpmn/timer-events/timer-events.md#timer-start-events). An instance of the process is created when the associated timer is triggered. Timers can also trigger periodically. - -## Next steps - -- [About Modeler](/components/modeler/about.md) -- [Automating a process using BPMN](/guides/automating-a-process-using-bpmn.md) diff --git a/versioned_docs/version-1.3/components/concepts/processes.md b/versioned_docs/version-1.3/components/concepts/processes.md deleted file mode 100644 index 810687ab6f3..00000000000 --- a/versioned_docs/version-1.3/components/concepts/processes.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -id: processes -title: "Processes" -description: "Processes are flowchart-like blueprints that define the orchestration of tasks." ---- - -Processes are flowchart-like blueprints that define the orchestration of **tasks**. - -Every task represents a piece of business logic so the ordered execution produces a meaningful result. - -A **job worker** implements the business logic required to complete a task. A job worker must be able to communicate with Camunda Cloud, but otherwise, there are no restrictions on its implementation. You can choose to write a worker as a microservice, but also as part of a classical 3-tier application, as a \(lambda\) function, via command line tools, etc. - -Running a process requires three steps: - -1. Deploy a process to Camunda Cloud. -2. Implement and register job workers for tasks in the workflows. -3. Create new instances of said process. - -Let's not get ahead of ourselves; the very first step is to design the process. - -## BPMN 2.0 - -Zeebe uses [BPMN 2.0](http://www.bpmn.org/) to represent processes. BPMN is an industry standard widely supported by different vendors and implementations. Using BPMN ensures processes can be interchanged between Zeebe and other process systems. - -## BPMN modeler - -Zeebe provides a free and open-source BPMN modeling tool to create BPMN diagrams and configure their technical properties. The modeler is a desktop application based on the [bpmn.io](https://bpmn.io) open-source project. - -Desktop Modeler can be [downloaded from GitHub](https://camunda.com/download/modeler/). - -## Sequences - -The simplest kind of process is an ordered sequence of tasks. Whenever process execution reaches a task, Zeebe (the workflow engine inside Camunda Cloud) creates a job that can be requested and completed by a job worker. - -![process-sequence](assets/order-process.png) - -You can think of Zeebe's process orchestration as a state machine, taking the following steps: - -1. A process instance reaches a task, and Zeebe creates a job that can be requested by a worker. -2. Zeebe waits for the worker to request a job and complete the work. -3. Once the work is complete, the flow continues to the next step. -4. If the worker fails to complete the work, the process remains at the current step, and the job could be retried until it's successfully completed. - -## Data flow - -As Zeebe progresses from one task to the next in a process, it can move custom data in the form of variables. Variables are key-value pairs and part of the process instance. - -![data-flow](assets/process-data-flow.png) - -Any job worker can read the variables and modify them when completing a job so data can be shared between different tasks in a process. - -## Data-based conditions - -Some processes don't always execute the same tasks, and instead need to choose different tasks based on variables and conditions: - -![data-conditions](assets/processes-data-based-conditions.png) - -The diamond shape with the **X** in the middle is an element indicating the process can take one of several paths. - -## Events - -Events represent things that happen. A process can react to events (catching event) and can emit events (throwing event). - -![process](assets/process-events.png) - -There are different types of events, such as a message, timer, or error. - -## Parallel execution - -In many cases, it's also useful to perform multiple tasks in parallel. This can be achieved with a parallel gateway: - -![data-conditions](assets/processes-parallel-gateway.png) - -The diamond shape with the **+** marker means all outgoing paths are activated. The tasks on those paths can run in parallel. The order is only fulfilled after both tasks have completed. - -## Next steps - -- [About Modeler](/components/modeler/about.md) -- [Automating a process using BPMN](/guides/automating-a-process-using-bpmn.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/concepts/variables.md b/versioned_docs/version-1.3/components/concepts/variables.md deleted file mode 100644 index cdc74b21ed9..00000000000 --- a/versioned_docs/version-1.3/components/concepts/variables.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -id: variables -title: "Variables" -description: "Variables are part of a process instance and represent the data of the instance." ---- - -Variables are part of a process instance and represent the data of the instance. - -A variable has a name and a JSON value. The visibility of a variable is defined by its variable scope. - -## Variable names - -The name of a variable can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the operator `-`. - -When accessing a variable in an expression, keep in mind the variable name is case-sensitive. - -Restrictions of a variable name: - -- It may not start with a **number** (e.g. `1stChoice` is not allowed; you can use `firstChoice`instead). -- It may not contain **whitespaces** (e.g. `order number` is not allowed; you can use `orderNumber` instead). -- It may not contain an **operator** (e.g. `+`, `-`, `*`, `/`, `=`, `>`, `?`, `.`). -- It may not be a **literal** (e.g. `null`, `true`, `false`) or a **keyword** (e.g. `function`, `if`, `then`, `else`, `for`, `between`, `instance`, `of`, `not`). - -## Variable values - -The value of a variable is stored as a JSON value. It can have one of the following types: - -- String (e.g. `"John Doe"`) -- Number (e.g. `123`, `0.23`) -- Boolean (e.g. `true` or `false`) -- Array (e.g. `["item1" , "item2", "item3"]`) -- Object (e.g. `{ "orderNumber": "A12BH98", "date": "2020-10-15", "amount": 185.34}`) -- Null (`null`) - -## Variable scopes - -Variable scopes define the _visibility_ of variables. The root scope is the process instance itself. Variables in this scope are visible everywhere in the process. - -When the process instance enters a subprocess or an activity, a new scope is created. Activities in this scope can see all variables of this and of higher scopes (i.e. parent scopes). However, activities outside of this scope can not see the variables which are defined in this scope. - -If a variable has the same name as a variable from a higher scope, it covers this variable. Activities in this scope see only the value of this variable and not the one from the higher scope. - -The scope of a variable is defined when the variable is created. By default, variables are created in the root scope. - -![variable-scopes](assets/variable-scopes.png) - -This process instance has the following variables: - -- `a` and `b` are defined on the root scope and can be seen by **Task A**, **Task B**, and **Task C**. -- `c` is defined in the sub process scope and can be seen by **Task A** and **Task B**. -- `b` is defined again on the activity scope of **Task A** and can be seen only by **Task A**. It covers the variable `b` from the root scope. - -### Variable propagation - -When variables are merged into a process instance (e.g. on job completion, on message correlation, etc.) each variable is propagated from the scope of the activity to its higher scopes. - -The propagation ends when a scope contains a variable with the same name. In this case, the variable value is updated. - -If no scope contains this variable, it's created as a new variable in the root scope. - -![variable-propagation](assets/variable-propagation.png) - -The job of **Task B** is completed with the variables `b`, `c`, and `d`. The variables `b` and `c` are already defined in higher scopes and are updated with the new values. Variable `d` doesn't exist before and is created in the root scope. - -### Local variables - -In some cases, variables should be set in a given scope, even if they don't exist in this scope before. - -To deactivate the variable propagation, the variables are set as **local variables**. This means the variables are created or updated in the given scope, regardless if they existed in this scope before. - -## Input/output variable mappings - -Input/output variable mappings can be used to create new variables or customize how variables are merged into the process instance. - -Variable mappings are defined in the process as extension elements under `ioMapping`. Every variable mapping has a `source` and a `target` expression. - -The `source` expression defines the **value** of the mapping. Usually, it [accesses a variable](expressions.md#access-variables) of the process instance that holds the value. If the variable or the nested property doesn't exist, an [incident](incidents.md) is created. - -The `target` expression defines **where** the value of the `source` expression is stored. It can reference a variable by its name or a nested property of a variable. If the variable or the nested property doesn't exist, it's created. - -Variable mappings are evaluated in the defined order. Therefore, a `source` expression can access the target variable of a previous mapping. - -![variable-mappings](assets/variable-mappings.png) - -**Input mappings** - -| Source | Target | -| --------------- | ----------- | -| `customer.name` | `sender` | -| `customer.iban` | `iban` | -| `totalPrice` | `price` | -| `orderId` | `reference` | - -**Output mapping** - -| Source | Target | -| -------- | --------------- | -| `status` | `paymentStatus` | - -### Input mappings - -Input mappings can be used to create new variables. They can be defined on service tasks and subprocesses. - -When an input mapping is applied, it creates a new **local variable** in the scope where the mapping is defined. - -Examples: - -| Process instance variables | Input mappings | New variables | -| -- | -- | -- | -| `orderId: "order-123"` | **source:** `=orderId`
    **target:** `reference` | `reference: "order-123"` | -| `customer:{"name": "John"}` | **source:** `=customer.name`
    **target:** `sender` | `sender: "John"` | -| `customer: "John"`
    `iban: "DE456"` | **source:** `=customer`
    **target:** `sender.name`
    **source:** `=iban`
    **target:** `sender.iban` | `sender: {"name": "John", "iban": "DE456"}` | - -### Output mappings - -Output mappings can be used to customize how job/message variables are merged into the process instance. They can be defined on service tasks, receive tasks, message catch events, and subprocesses. - -If **one or more** output mappings are defined, the job/message variables are set as **local variables** in the scope where the mapping is defined. Then, the output mappings are applied to the variables and create new variables in this scope. The new variables are merged into the parent scope. If there is no mapping for a job/message variable, the variable is not merged. - -If **no** output mappings are defined, all job/message variables are merged into the process instance. - -In the case of a subprocess, the behavior is different. There are no job/message variables to be merged. However, output mappings can be used to propagate **local variables** of the subprocess to higher scopes. By default, all **local variables** are removed when the scope is left. - -Examples: - -| Job/message variables | Output mappings | Process instance variables | -| -- | -- | -- | -| `status: "Ok"` | **source:** `=status`
    **target:** `paymentStatus` | `paymentStatus: "OK"` | -| `result: {"status": "Ok", "transactionId": "t-789"}` | **source:** `=result.status`
    **target:** `paymentStatus`
    **source:** `=result.transactionId`
    **target:** `transactionId` | `paymentStatus: "Ok"`
    `transactionId: "t-789"` | -| `status: "Ok"`
    `transactionId: "t-789"` | **source:** `=transactionId`
    **target:** `order.transactionId` | `order: {"transactionId": "t-789"}` | - -## Next steps - -- [Accesses a variable](expressions.md#access-variables) -- [Incidents](incidents.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/concepts/what-is-camunda-cloud.md b/versioned_docs/version-1.3/components/concepts/what-is-camunda-cloud.md deleted file mode 100644 index f144b6c6252..00000000000 --- a/versioned_docs/version-1.3/components/concepts/what-is-camunda-cloud.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: what-is-camunda-cloud -title: "What is Camunda Cloud?" -description: "Camunda Cloud is a software as a service (SaaS) offering for microservice orchestration." ---- - -[Camunda Cloud](https://camunda.io) is a **software as a service** (**SaaS**) offering for microservice orchestration. Camunda Cloud ensures that, once started, flows are always carried out fully, retrying steps in case of failures. Along the way, Camunda Cloud maintains a complete audit log so the progress of flows can be tracked. - -Camunda Cloud is a turn-key solution that accelerates your project implementation. It is particularly suited for integrating heterogeneous systems and orchestrating dispersed services implemented in different programming languages. - -Behind the scenes, resources inside Camunda Cloud are highly available and fault tolerant to give you peace of mind. Finally, Camunda Cloud offers clusters of different sizes, so you can pick the offering that best fits your business needs. - -## What problem does it solve? - -A company’s end-to-end processes almost always span more than one microservice. In an e-commerce company, for example, a “customer order” process might involve a payments microservice, an inventory microservice, a shipping microservice, and more: - -![order-process](assets/order-process.png) - -These end-to-end processes are mission critical, yet the processes themselves are rarely modeled and monitored. Often, the flow of events through different microservices is not as transparent as it should be. - -Camunda Cloud addresses these challenges and provides: - -1. **Visibility** into the state of a company’s end-to-end processes, including the number of in-flight processes, average process duration, current errors within a process, and more. -2. **Process orchestration** based on the current state of a process; Camunda Cloud publishes “jobs” that can be executed by one or more microservices, ensuring the progress of processes according to their definition. -3. **Monitoring for timeouts** or other process errors with the ability to configure error-handling steps such as stateful retries or escalation to teams that can resolve an issue manually. - -## What are its core quality attributes? - -Camunda Cloud is designed to operate on a very large scale. To achieve this, it provides: - -- **Horizontal scalability** and no dependence on an external database; Zeebe (the workflow engine inside Camunda Cloud) writes data directly to the filesystem on the same servers where it is deployed. Zeebe makes it simple to distribute processing across a cluster of machines to deliver high throughput. -- **High availability and fault tolerance** via a pre-configured replication mechanism, ensuring Camunda Cloud can recover from machine or software failure with no data loss and minimal downtime. This ensures the system as a whole remains available without requiring manual action. -- **Audit trail** as all process-relevant events are written to an append-only log, providing an audit trail and a history of the state of a process. -- **Reactive publish-subscribe interaction model** which enables microservices that connect to Camunda Cloud to maintain a high degree of control and autonomy, including control over processing rates. These properties make Camunda Cloud resilient, scalable, and reactive. -- **Visual processes modeled in ISO-standard BPMN 2.0** so technical and non-technical stakeholders can collaborate on process design in a widely-used modeling language. -- **Language-agnostic client model** makes it possible to build a client in nearly any programming language an organization uses to build microservices. -- **Operational ease-of-use** as a SaaS provider we take care of all operational details. - -## How does it compare to other solutions? - -Most existing workflow engines offer a vast amount of features. While having access to lots of features is generally a good thing, it can come at a cost of increased complexity and degraded performance. - -Camunda Cloud is entirely focused on providing a compact, robust, and scalable solution for process orchestration. Rather than supporting a broad spectrum of features, its goal is to excel within this scope. - -## Next steps - -* To request information about Camunda Cloud performance and benchmarking, see our [Contact](/contact/) page. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/about.md b/versioned_docs/version-1.3/components/modeler/about.md deleted file mode 100644 index a11437277ae..00000000000 --- a/versioned_docs/version-1.3/components/modeler/about.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: about-modeler -title: About Modeler -description: "Learn about Camunda Modeler, BPMN diagrams designed and configured, and both Web Modeler and Desktop Modeler." ---- - -Camunda Cloud only - -:::note - -Web Modeler is only available for Camunda Cloud SaaS. - -::: - -Any executable process needs a **BPMN diagram** designed and configured beforehand. - -A BPMN diagram is used to visually outline the structure and flow of a process. As a result, the process can be more easily understood by various stakeholders. - -In tandem, different events and implementation details (such as the conditions within a gateway or the specifications of a service task) must be configured in the model so the workflow engine understands what must executed once the process reaches a certain task. - -Camunda offers a few tools to design your diagrams and implement them: - -- [Web Modeler](./web-modeler/launch-web-modeler.md) -- [Desktop Modeler](./desktop-modeler/install-the-modeler.md) - -Web Modeler and Desktop Modeler differ mainly in their environment. Web Modeler is part of Cloud Console and offers a seamless integration into Camunda Cloud to model BPMN. Desktop Modeler is a desktop application that can be installed and used locally, all while integrating your local development environment. - -In this guide, we'll demonstrate modeling BPMN diagrams using both Web Modeler and Desktop Modeler. - -## Next steps - -- [Modeling BPMN](/guides/automating-a-process-using-bpmn.md) - Learn how to quickly model an automated process using BPMN. -- [Camunda Forms](/guides/utilizing-forms.md) - Allows you to easily design and configure forms. Once configured, they can be connected to a user task or start event to implement a task form in your application. -- [DMN](./dmn/dmn.md) - In DMN, decisions can be modeled and executed using the same language. Business analysts can model the rules that lead to a decision in easy to read tables, and those tables can be executed directly by a decision engine (like Camunda). \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/annotation.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/annotation.svg deleted file mode 100644 index 3f8e1c35326..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/annotation.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - Text - - Annotation - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg deleted file mode 100644 index 3a41d3c920c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Business Rule - Task - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg deleted file mode 100644 index a1eeace9113..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - Call Activity - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/cancel-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/cancel-boundary-event.svg deleted file mode 100644 index d9c1f6addab..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/cancel-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/cancel-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/cancel-end-event.svg deleted file mode 100644 index cfb1c54af20..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/cancel-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-boundary-event.svg deleted file mode 100644 index 70f87d1e3eb..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-end-event.svg deleted file mode 100644 index 9e6802eb944..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-event-subprocess.svg deleted file mode 100644 index 0e9046194fd..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-throw-event.svg deleted file mode 100644 index ebdce12af97..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation.svg deleted file mode 100644 index fb4dd8731b7..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/compensation.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - Compensation - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/complex-gateway.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/complex-gateway.svg deleted file mode 100644 index 4901c0a0721..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/complex-gateway.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Complex - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg deleted file mode 100644 index e35bc1b52b2..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event.svg deleted file mode 100644 index b18e46103e5..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-catch-event.svg deleted file mode 100644 index b18e46103e5..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg deleted file mode 100644 index ecb9a0e4216..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess.svg deleted file mode 100644 index e1bd95cc866..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-start-event.svg deleted file mode 100644 index e1bd95cc866..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/conditional-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/data-object.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/data-object.svg deleted file mode 100644 index 4b485499d7b..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/data-object.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Data - Object - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/data-store.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/data-store.svg deleted file mode 100644 index d451a7795f9..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/data-store.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Data - Store - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/embedded-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/embedded-subprocess.svg deleted file mode 100644 index 6d0a9695a1a..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/embedded-subprocess.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - Subprocess - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-boundary-event.svg deleted file mode 100644 index f7da4680d22..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-end-event.svg deleted file mode 100644 index 58b179e3e40..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-event-subprocess.svg deleted file mode 100644 index 28655af00f2..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/error-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg deleted file mode 100644 index c8f6b20ba96..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event.svg deleted file mode 100644 index b20177c5380..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-end-event.svg deleted file mode 100644 index 452576c5f1e..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg deleted file mode 100644 index 9342597d9c3..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess.svg deleted file mode 100644 index 25d9829720a..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-throw-event.svg deleted file mode 100644 index 7677824daf5..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/escalation-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/event-based-gateway.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/event-based-gateway.svg deleted file mode 100644 index cb665488894..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/event-based-gateway.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Event - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/event-subprocess.svg deleted file mode 100644 index e3c1c82409a..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/event-subprocess.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - Event - Subprocess - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/exclusive-gateway.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/exclusive-gateway.svg deleted file mode 100644 index 79723eb4c97..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/exclusive-gateway.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - XOR - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/group.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/group.svg deleted file mode 100644 index 8ddeb24aa3c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/group.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - Group - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/inclusive-gateway.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/inclusive-gateway.svg deleted file mode 100644 index 7993f382649..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/inclusive-gateway.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - OR - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/lane.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/lane.svg deleted file mode 100644 index cfbfd447bf8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/lane.svg +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - Pool - - - - - - - - Lane - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/link-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/link-catch-event.svg deleted file mode 100644 index 97bbdd98734..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/link-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/link-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/link-throw-event.svg deleted file mode 100644 index 81f8ac8dce2..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/link-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/loop.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/loop.svg deleted file mode 100644 index b4f07afbe5f..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/loop.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - - Loop - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/manual-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/manual-task.svg deleted file mode 100644 index c69c249c3f6..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/manual-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Manual Task - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event-non-interrupting.svg deleted file mode 100644 index f156f4f49b0..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event.svg deleted file mode 100644 index b401af0ca1b..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-catch-event.svg deleted file mode 100644 index b401af0ca1b..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-end-event.svg deleted file mode 100644 index 0317ca99db8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg deleted file mode 100644 index a6655b7d75d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess.svg deleted file mode 100644 index 8aef44c32b9..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-start-event.svg deleted file mode 100644 index 8aef44c32b9..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-throw-event.svg deleted file mode 100644 index ef9880a1ecc..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/message-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-parallel.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-parallel.svg deleted file mode 100644 index 1c564f27f4f..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-parallel.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - Multi-Instance - Parallel - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-sequential.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-sequential.svg deleted file mode 100644 index 6099e144af3..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-sequential.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - Multi-Instance - Sequential - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg deleted file mode 100644 index 98c0ea186a2..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event.svg deleted file mode 100644 index dc96d662c5c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-catch-event.svg deleted file mode 100644 index dc96d662c5c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-end-event.svg deleted file mode 100644 index 8b84b943fff..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg deleted file mode 100644 index c938312f93f..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess.svg deleted file mode 100644 index 419383a38e3..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg deleted file mode 100644 index 0a12743ba52..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event.svg deleted file mode 100644 index d2040edd54d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-catch-event.svg deleted file mode 100644 index d2040edd54d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg deleted file mode 100644 index dd175021ad2..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess.svg deleted file mode 100644 index 665ea748c0a..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-start-event.svg deleted file mode 100644 index 665ea748c0a..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-start-event.svg deleted file mode 100644 index 419383a38e3..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-throw-event.svg deleted file mode 100644 index fb649b893a1..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/multiple-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-end-event.svg deleted file mode 100644 index a8ddd388078..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-end-event.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-start-event.svg deleted file mode 100644 index 1fbe9b1ad70..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-start-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-throw-event.svg deleted file mode 100644 index c2f61d5af3e..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/none-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/parallel-gateway.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/parallel-gateway.svg deleted file mode 100644 index 04e961ab3b6..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/parallel-gateway.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - AND - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/pool.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/pool.svg deleted file mode 100644 index a94f7d0c090..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/pool.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - Pool - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/receive-task-instantiated.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/receive-task-instantiated.svg deleted file mode 100644 index b91ecfde13c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/receive-task-instantiated.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Receive Task - (instantiated) - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/receive-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/receive-task.svg deleted file mode 100644 index 0e3f54f0ee5..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/receive-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Receive Task - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/script-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/script-task.svg deleted file mode 100644 index c4111af129c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/script-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Script Task - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/send-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/send-task.svg deleted file mode 100644 index 6d27a380b50..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/send-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Send Task - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/service-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/service-task.svg deleted file mode 100644 index 58190c2ffc5..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/service-task.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Service Task - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg deleted file mode 100644 index ff5940246dd..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event.svg deleted file mode 100644 index 0eb546b465f..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-catch-event.svg deleted file mode 100644 index 0eb546b465f..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-end-event.svg deleted file mode 100644 index 2e79ffa90f9..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg deleted file mode 100644 index 4640651c8d0..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess.svg deleted file mode 100644 index 7c5e9e7222d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-start-event.svg deleted file mode 100644 index 7c5e9e7222d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-throw-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-throw-event.svg deleted file mode 100644 index 1d353a28ea9..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/signal-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/termination-end-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/termination-end-event.svg deleted file mode 100644 index 3e2988fb8ed..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/termination-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg deleted file mode 100644 index deec2de4a62..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event.svg deleted file mode 100644 index fe487510406..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-catch-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-catch-event.svg deleted file mode 100644 index fe487510406..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-catch-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg deleted file mode 100644 index 09d63c08310..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess.svg deleted file mode 100644 index 38035befd81..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-start-event.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-start-event.svg deleted file mode 100644 index 38035befd81..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/timer-start-event.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/transactional-subprocess.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/transactional-subprocess.svg deleted file mode 100644 index 9546777117b..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/transactional-subprocess.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - Transaction - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/undefined-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/undefined-task.svg deleted file mode 100644 index eca69ecc221..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/undefined-task.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - Undefined Task - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/user-task.svg b/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/user-task.svg deleted file mode 100644 index 1950c3f12e1..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/bpmn-symbols/user-task.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - User Task - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/data-flow-job-worker.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/data-flow-job-worker.png deleted file mode 100644 index 76c1dce170a..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/data-flow-job-worker.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/data-flow.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/data-flow.png deleted file mode 100644 index e526d24a089..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/data-flow.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/demo.gif b/versioned_docs/version-1.3/components/modeler/bpmn/assets/demo.gif deleted file mode 100644 index 652f86459e7..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/demo.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/merging-mapping.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/merging-mapping.png deleted file mode 100644 index 821240dea84..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/merging-mapping.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/modeler.gif b/versioned_docs/version-1.3/components/modeler/bpmn/assets/modeler.gif deleted file mode 100644 index f1999370e54..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/modeler.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/order-process.bpmn b/versioned_docs/version-1.3/components/modeler/bpmn/assets/order-process.bpmn deleted file mode 100644 index 795babfcdcf..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/order-process.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - SequenceFlow_0j6tsnn - - - - SequenceFlow_0j6tsnn - SequenceFlow_0baemzs - - - - SequenceFlow_0cu1bs2 - SequenceFlow_19klrd3 - - - SequenceFlow_19klrd3 - - - - - SequenceFlow_0baemzs - SequenceFlow_0cu1bs2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/order-process.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/order-process.png deleted file mode 100644 index c9185cf96a8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/parallel-gateway.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/parallel-gateway.png deleted file mode 100644 index c0601d42b89..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/parallel-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/process.bpmn b/versioned_docs/version-1.3/components/modeler/bpmn/assets/process.bpmn deleted file mode 100644 index 828a8c302ca..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/assets/process.bpmn +++ /dev/null @@ -1,89 +0,0 @@ - - - - - SequenceFlow_1bq1azi - - - - - - SequenceFlow_0ojoaqz - - - - - - - SequenceFlow_1bq1azi - SequenceFlow_09hqjpg - - - - - - SequenceFlow_09hqjpg - SequenceFlow_1ea1mpb - - - - - - SequenceFlow_1ea1mpb - SequenceFlow_0ojoaqz - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/process.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/process.png deleted file mode 100644 index 8576c92b106..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-1.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-1.png deleted file mode 100644 index d6cbd6cd71e..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-1.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-2.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-2.png deleted file mode 100644 index 921512861e5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-2.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-3.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-3.png deleted file mode 100644 index 70160884df0..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-3.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-4.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-4.png deleted file mode 100644 index be3bb49e159..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/quickstart-4.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/sequenceflow.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/sequenceflow.png deleted file mode 100644 index 28e1e0f552d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/sequenceflow.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/sub-process.gif b/versioned_docs/version-1.3/components/modeler/bpmn/assets/sub-process.gif deleted file mode 100644 index 11d7467db21..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/sub-process.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/assets/variable-scopes.png b/versioned_docs/version-1.3/components/modeler/bpmn/assets/variable-scopes.png deleted file mode 100644 index 3caefe67e43..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/assets/variable-scopes.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/bpmn-coverage.md b/versioned_docs/version-1.3/components/modeler/bpmn/bpmn-coverage.md deleted file mode 100644 index e49bbf62fc7..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/bpmn-coverage.md +++ /dev/null @@ -1,693 +0,0 @@ ---- -id: bpmn-coverage -title: "BPMN coverage" -description: "List of BPMN symbols compatible with Camunda Cloud" ---- - -export const Highlight = ({children, color}) => ( - -{children} - -); - -The following BPMN elements in green are supported. Click on -an element to navigate to the documentation. - -## Participants - -import PoolSvg from './assets/bpmn-symbols/pool.svg'; -import LaneSvg from './assets/bpmn-symbols/lane.svg'; - - - -## Subprocesses - -import EmbeddedSubprocessSvg from './assets/bpmn-symbols/embedded-subprocess.svg'; -import CallActivitySvg from './assets/bpmn-symbols/call-activity.svg'; -import EventSubprocessSvg from './assets/bpmn-symbols/event-subprocess.svg' -import TransactionalSubprocessSvg from './assets/bpmn-symbols/transactional-subprocess.svg' - - - -## Tasks - -import ServiceTaskSvg from './assets/bpmn-symbols/service-task.svg' -import UserTaskSvg from './assets/bpmn-symbols/user-task.svg' -import ReceiveTaskSvg from './assets/bpmn-symbols/receive-task.svg' -import SendTaskSvg from './assets/bpmn-symbols/send-task.svg' -import BusinessRuleTaskSvg from './assets/bpmn-symbols/business-rule-task.svg' -import ScriptTaskSvg from './assets/bpmn-symbols/script-task.svg' -import ManualTaskSvg from './assets/bpmn-symbols/manual-task.svg' -import UndefinedTaskSvg from './assets/bpmn-symbols/undefined-task.svg' -import ReceiveTaskInstantiatedSvg from './assets/bpmn-symbols/receive-task-instantiated.svg' - - - -## Gateways - -import ExclusiveGatewaySvg from './assets/bpmn-symbols/exclusive-gateway.svg' -import InclusiveGatewaySvg from './assets/bpmn-symbols/inclusive-gateway.svg' -import ParallelGatewaySvg from './assets/bpmn-symbols/parallel-gateway.svg' -import EventBasedGatewaySvg from './assets/bpmn-symbols/event-based-gateway.svg' -import ComplexGatewaySvg from './assets/bpmn-symbols/complex-gateway.svg' - - - -## Markers - -import MultiInstanceParallelSvg from './assets/bpmn-symbols/multi-instance-parallel.svg' -import MultiInstanceSequentialSvg from './assets/bpmn-symbols/multi-instance-sequential.svg' -import LoopSvg from './assets/bpmn-symbols/loop.svg' -import CompensationSvg from './assets/bpmn-symbols/compensation.svg' - - - -## Data - -import DataObjectSvg from './assets/bpmn-symbols/data-object.svg' -import DataStoreSvg from './assets/bpmn-symbols/data-store.svg' - - - -## Artifacts - -import AnnotationSvg from './assets/bpmn-symbols/annotation.svg' -import GroupSvg from './assets/bpmn-symbols/group.svg' - - - -## Events - -import NoneStartEventSvg from './assets/bpmn-symbols/none-start-event.svg' -import NoneThrowEventSvg from './assets/bpmn-symbols/none-throw-event.svg' -import NoneEndEventSvg from './assets/bpmn-symbols/none-end-event.svg' - -import MessageStartEventSvg from './assets/bpmn-symbols/message-start-event.svg' -import MessageEventSubprocessSvg from './assets/bpmn-symbols/message-event-subprocess.svg' -import MessageEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg' -import MessageCatchEventSvg from './assets/bpmn-symbols/message-catch-event.svg' -import MessageBoundaryEventSvg from './assets/bpmn-symbols/message-boundary-event.svg' -import MessageBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/message-boundary-event-non-interrupting.svg' -import MessageThrowEventSvg from './assets/bpmn-symbols/message-throw-event.svg' -import MessageEndEventSvg from './assets/bpmn-symbols/message-end-event.svg' - -import TimerStartEventSvg from './assets/bpmn-symbols/timer-start-event.svg' -import TimerEventSubprocessSvg from './assets/bpmn-symbols/timer-event-subprocess.svg' -import TimerEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg' -import TimerCatchEventSvg from './assets/bpmn-symbols/timer-catch-event.svg' -import TimerBoundaryEventSvg from './assets/bpmn-symbols/timer-boundary-event.svg' -import TimerBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg' - -import ErrorEventSubprocessSvg from './assets/bpmn-symbols/error-event-subprocess.svg' -import ErrorBoundaryEventSvg from './assets/bpmn-symbols/error-boundary-event.svg' -import ErrorEndEventSvg from './assets/bpmn-symbols/error-end-event.svg' - -import SignalStartEventSvg from './assets/bpmn-symbols/signal-start-event.svg' -import SignalEventSubprocessSvg from './assets/bpmn-symbols/signal-event-subprocess.svg' -import SignalEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg' -import SignalCatchEventSvg from './assets/bpmn-symbols/signal-catch-event.svg' -import SignalBoundaryEventSvg from './assets/bpmn-symbols/signal-boundary-event.svg' -import SignalBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg' -import SignalThrowEventSvg from './assets/bpmn-symbols/signal-throw-event.svg' -import SignalEndEventSvg from './assets/bpmn-symbols/signal-end-event.svg' - -import ConditionalStartEventSvg from './assets/bpmn-symbols/conditional-start-event.svg' -import ConditionalEventSubprocessSvg from './assets/bpmn-symbols/conditional-event-subprocess.svg' -import ConditionalEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg' -import ConditionalCatchEventSvg from './assets/bpmn-symbols/conditional-catch-event.svg' -import ConditionalBoundaryEventSvg from './assets/bpmn-symbols/conditional-boundary-event.svg' -import ConditionalBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg' - -import EscalationEventSubprocessSvg from './assets/bpmn-symbols/escalation-event-subprocess.svg' -import EscalationEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg' -import EscalationBoundaryEventSvg from './assets/bpmn-symbols/escalation-boundary-event.svg' -import EscalationBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg' -import EscalationThrowEventSvg from './assets/bpmn-symbols/escalation-throw-event.svg' -import EscalationEndEventSvg from './assets/bpmn-symbols/escalation-end-event.svg' - -import CompensationEventSubprocessSvg from './assets/bpmn-symbols/compensation-event-subprocess.svg' -import CompensationBoundaryEventSvg from './assets/bpmn-symbols/compensation-boundary-event.svg' -import CompensationThrowEventSvg from './assets/bpmn-symbols/compensation-throw-event.svg' -import CompensationEndEventSvg from './assets/bpmn-symbols/compensation-end-event.svg' - -import CancelBoundaryEventSvg from './assets/bpmn-symbols/cancel-boundary-event.svg' -import CancelEndEventSvg from './assets/bpmn-symbols/cancel-end-event.svg' - -import TerminationEndEventSvg from './assets/bpmn-symbols/termination-end-event.svg' - -import LinkCatchEventSvg from './assets/bpmn-symbols/link-catch-event.svg' -import LinkThrowEventSvg from './assets/bpmn-symbols/link-throw-event.svg' - -import MultipleStartEventSvg from './assets/bpmn-symbols/multiple-start-event.svg' -import MultipleEventSubprocessSvg from './assets/bpmn-symbols/multiple-event-subprocess.svg' -import MultipleEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg' -import MultipleCatchEventSvg from './assets/bpmn-symbols/multiple-catch-event.svg' -import MultipleBoundaryEventSvg from './assets/bpmn-symbols/multiple-boundary-event.svg' -import MultipleBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg' -import MultipleThrowEventSvg from './assets/bpmn-symbols/multiple-throw-event.svg' -import MultipleEndEventSvg from './assets/bpmn-symbols/multiple-end-event.svg' - -import MultipleParallelStartEventSvg from './assets/bpmn-symbols/multiple-parallel-start-event.svg' -import MultipleParallelEventSubprocessSvg from './assets/bpmn-symbols/multiple-parallel-event-subprocess.svg' -import MultipleParallelEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg' -import MultipleParallelCatchEventSvg from './assets/bpmn-symbols/multiple-parallel-catch-event.svg' -import MultipleParallelBoundaryEventSvg from './assets/bpmn-symbols/multiple-parallel-boundary-event.svg' -import MultipleParallelBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg' - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeStartIntermediateEnd
    NormalEvent SubprocessEvent Subprocess non-interruptingCatchBoundaryBoundary non-interruptingThrow
    - None - - - - - - - - - - - - -
    - Message - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Timer - - - - - - - - - - - - - - - - - - - - - - - - -
    - Error - - - - - - - - - - - - -
    - Signal - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Conditional - - - - - - - - - - - - - - - - - - - - - - - - -
    - Escalation - - - - - - - - - - - - - - - - - - - - - - - - -
    - Compensation - - - - - - - - - - - - - - - - -
    - Cancel - - - - - - - - -
    - Termination - - - - -
    - Link - - - - - - - - -
    - Multiple - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Multiple Parallel - - - - - - - - - - - - - - - - - - - - - - - - -
    - diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/bpmn-primer.md b/versioned_docs/version-1.3/components/modeler/bpmn/bpmn-primer.md deleted file mode 100644 index 9e55e33455c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/bpmn-primer.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -id: bpmn-primer -title: "BPMN primer" ---- - -import ReactPlayer from 'react-player' - -Business Process Model And Notation 2.0 (BPMN) is an industry standard for process modeling and execution. A BPMN process is an XML document that has a visual representation. For example, here is a BPMN process: - -![process](assets/process.png) - -
    - The corresponding XML -

    - -```xml - - - - - SequenceFlow_1bq1azi - - - - - - SequenceFlow_0ojoaqz - - - - - - - SequenceFlow_1bq1azi - SequenceFlow_09hqjpg - - - - - - SequenceFlow_09hqjpg - SequenceFlow_1ea1mpb - - - - - - SequenceFlow_1ea1mpb - SequenceFlow_0ojoaqz - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -

    -
    - -This duality makes BPMN very powerful. The XML document contains all the necessary information to be interpreted by workflow engines and modeling tools like Zeebe. At the same time, the visual representation contains just enough information to be quickly understood by humans, even when they are non-technical people. The BPMN model is source code and documentation in one artifact. - -The following is an introduction to BPMN 2.0, its elements, and their execution semantics. It tries to briefly provide an intuitive understanding of BPMN's power, but does not cover the entire feature set. For more exhaustive BPMN resources, see the [reference links](#additional-resources) at the end of this section. - -## Modeling BPMN diagrams - -The best tool for modeling BPMN diagrams for Zeebe is [Modeler](../about.md). - -![overview](./assets/modeler.gif) - -- [Download page](https://camunda.com/download/modeler/) -- [Source code repository](https://github.com/camunda/camunda-modeler) - -## BPMN elements - -### Sequence flow: Controlling the flow of execution - -A core concept of BPMN is a **sequence flow** that defines the order in which steps in the process happen. In BPMN's visual representation, a sequence flow is an arrow connecting two elements. The direction of the arrow indicates their order of execution. - -![sequence flow](./assets/sequenceflow.png) - -You can think of process execution as tokens running through the process model. When a process is started, a token is created at the beginning of the model and advances with every completed step. When the token reaches the end of the process, it is consumed and the process instance ends. Zeebe's task is to drive the token and to make sure the job workers are invoked whenever necessary. - -
    - -
    - -### Tasks: Units of work - -The basic elements of BPMN processes are tasks; these are atomic units of work composed to create a meaningful result. Whenever a token reaches a task, the token stops and Zeebe creates a job and notifies a registered worker to perform work. When that handler signals completion, the token continues on the outgoing sequence flow. - -
    - -
    - -Choosing the granularity of a task is up to the person modeling the process. For example, the activity of processing an order can be modeled as a single _Process Order_ task, or as three individual tasks _Collect Money_, _Fetch Items_, _Ship Parcel_. If you use Zeebe to orchestrate microservices, one task can represent one microservice invocation. - -See the [tasks](tasks.md) section on which types of tasks are currently supported and how to use them. - -### Gateways: Steering flow - -Gateways are elements that route tokens in more complex patterns than plain sequence flow. - -BPMN's **exclusive gateway** chooses one sequence flow out of many based on data: - -
    - -
    - -BPMN's **parallel gateway** generates new tokens by activating multiple sequence flows in parallel: - -
    - -
    - -See the [gateways](gateways.md) section on which types of gateways are currently supported and how to use them. - -### Events: Waiting for something to happen - -**Events** in BPMN represent things that _happen_. A process can react to events (_catching_ event) as well as emit events (_throwing_ event). For example: - -
    - -
    - -The circle with the envelope symbol is a catching message event. It makes the token continue as soon as a message is received. The XML representation of the process contains the criteria for which kind of message triggers continuation. - -Events can be added to the process in various ways. Not only can they be used to make a token wait at a certain point, but also for interrupting a token's progress. - -See the [events](events.md) section on which types of events are currently supported and how to use them. - -### Subprocesses: Grouping elements - -**Subprocesses** are element containers that allow defining common functionality. For example, we can attach an event to a sub process's border: - -![payload](./assets/sub-process.gif) - -When the event is triggered, the subprocess is interrupted, regardless which of its elements is currently active. - -See the [subprocesses](subprocesses.md) section on which types of subprocesses are currently supported and how to use them. - -## Additional resources - -- [BPMN specification](http://www.bpmn.org/) -- [BPMN tutorial](https://camunda.com/bpmn/) -- [Full BPMN reference](https://camunda.com/bpmn/reference/) -- [BPMN book](https://www.amazon.com/dp/1086302095/) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/bpmn.md b/versioned_docs/version-1.3/components/modeler/bpmn/bpmn.md deleted file mode 100644 index 2036060880c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/bpmn.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: modeler-bpmn -title: BPMN in Modeler -description: Let's start modeling by creating a BPMN diagram. ---- - -# Create a BPMN 2.0 diagram - -To start modeling, create a BPMN 2.0 diagram by selecting **Create New Diagram** in the **Diagrams** tab for your process engine in the top-level menu. - -:::note -If working in Camunda Platform, select **Create diagram > Create new BPMN diagram**. -::: - -:::note -BPMN diagrams must be created for the process engine they intend to be deployed on. You cannot run a BPMN diagram modeled for Camunda Platform in Camunda Cloud, or vice versa, at this time. -::: - -## Start modeling - -![Start Modeling](./assets/quickstart-2.png) - -Now you can start to create a BPMN 2.0 model. Add the desired elements from the palette on the left side of the page by dragging and dropping them onto the diagram canvas. - -Alternatively, you can add new elements by using the context menu that appears when you select an element in the diagram. Using the wrench icon in the context menu, you can change the type of an element in place. - -## Demo - -![Demo](./assets/demo.gif) - -The demo above shows how to create more BPMN 2.0 elements like lanes, task types, and event definitions. - -## BPMN 2.0 coverage - -Modeler [covers all BPMN 2.0 elements](/docs/components/modeler/bpmn/bpmn-coverage/) for modeling processes and collaborations. - -## BPMN 2.0 properties for execution - -![Save BPMN Diagram](./assets/quickstart-3.png) - -In the properties panel on the right side, view and edit attributes that apply to the selected element. - -![Save BPMN Diagram](./assets/quickstart-4.png) - -The panel can be hidden and displayed by clicking the tab on its left border. - -## Save a diagram - -To save your state of work, click **Save**. - -:::note -To save your state of work in Camunda Platform, click **File > Save File As...** in the top-level menu. Then, select a location on your file system to store the diagram in the BPMN 2.0 XML format. You can load that file again by clicking **File > Open File...**. -::: diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/business-rule-tasks/assets/business-rule-task.png b/versioned_docs/version-1.3/components/modeler/bpmn/business-rule-tasks/assets/business-rule-task.png deleted file mode 100644 index 3091a8948c1..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/business-rule-tasks/assets/business-rule-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md deleted file mode 100644 index 6814709b6ec..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -id: business-rule-tasks -title: "Business rule tasks" -description: "A business rule task is used to model the evaluation of a business rule." ---- - -A business rule task is used to model the evaluation of a business rule; for example, a decision -modeled in [Decision Model and Notation](https://www.omg.org/dmn/) (DMN). - -![task](assets/business-rule-task.png) - -Business rule tasks behave exactly like [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md). Both -task types are based on jobs and [job workers](/components/concepts/job-workers.md). - -The differences between these task types are the visual representation (i.e. the task marker) and -the semantics for the model. - -When a process instance enters a business rule task, it creates a corresponding job and waits for -its completion. A job worker should request jobs of this job type and process them. When the job is -completed, the process instance continues. - -:::note -Jobs for business rule tasks are not processed by Zeebe itself. To process them, you must provide a job worker. -::: - -## Defining a task - -A business rule task must define a [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) the same way as a service task does. This -specifies the type of job workers should subscribe to (e.g. DMN). - -Use [task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) to pass static parameters to the job -worker (e.g. the key of the decision to evaluate). - -Define [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -the [same way as a service task does](/components/modeler/bpmn/service-tasks/service-tasks.md#variable-mappings) -to transform the variables passed to the job worker, or to customize how the variables of the job merge. - -## Additional resources - -:::tip Community Extension - -Take a look at the [Zeebe DMN Worker](https://github.com/camunda-community-hub/zeebe-dmn-worker). -This is a community extension providing a job worker to evaluate DMN decisions. You can run it, or -use it as a blueprint for your own job worker. - -::: - -### XML representation - -A business rule task with a custom header: - -```xml - - - - - - - - -``` - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/bpmn-modeler-call-activity.gif b/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/bpmn-modeler-call-activity.gif deleted file mode 100644 index 515cd46ccf7..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/bpmn-modeler-call-activity.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/call-activities-boundary-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/call-activities-boundary-events.png deleted file mode 100644 index 578f51f62f6..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/call-activities-boundary-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/call-activities-example.png b/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/call-activities-example.png deleted file mode 100644 index f765d428bbb..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/assets/call-activities-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/call-activities.md b/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/call-activities.md deleted file mode 100644 index 2b69b369dab..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/call-activities/call-activities.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: call-activities -title: "Call activities" -description: "A call activity (or reusable subprocess) allows you to call and invoke another process as part of this process." ---- - -A call activity (or reusable subprocess) allows you to call and invoke another process as part of this process. It's similar to an [embedded subprocess](../embedded-subprocesses/embedded-subprocesses.md), but the process is externalized (i.e. stored as separated BPMN) and can be invoked by different processes. - -![call-activity](assets/call-activities-example.png) - -When a call activity is entered, a new process instance of the referenced process is created. The new process instance is activated at the **none start event**. The process can have start events of other types, but they are ignored. - -When the created process instance is completed, the call activity is left and the outgoing sequence flow is taken. - -## Defining the called process - -A call activity must define the BPMN process id of the called process as `processId`. - -The new instance of the defined process is created by its **latest version** at the point when the call activity is activated. - -Usually, the `processId` is defined as a static value (e.g. `shipping-process`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "shipping-" + tenantId`). The expression is evaluated on activating the call activity and must result in a `string`. - -## Boundary events - -![call-activity-boundary-event](assets/call-activities-boundary-events.png) - -Interrupting and non-interrupting boundary events can be attached to a call activity. - -When an interrupting boundary event is triggered, the call activity and the created process instance are terminated. The variables of the created process instance are not propagated to the call activity. - -When a non-interrupting boundary event is triggered, the created process instance is not affected. The activities at the outgoing path have no access to the variables of the created process instance since they are bound to the other process instance. - -## Variable mappings - -When the call activity is activated, all variables of the call activity scope are copied to the created process instance. - -Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. - -If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. - -It's recommended to disable the attribute `propagateAllChildVariables` or define output mappings if the call activity is in a parallel flow (e.g. when it is marked as [parallel multi-instance](../multi-instance/multi-instance.md#variable-mappings)). Otherwise, variables can be accidentally overridden when they are changed in the parallel flow. - -## Additional resources - -### XML representation - -A call activity with static process id and propagation of all child variables turned on: - -```xml - - - - - -``` - -### References - -- [Expressions](/components/concepts/expressions.md) -- [Variable scopes](/components/concepts/variables.md#variable-scopes) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/data-flow.md b/versioned_docs/version-1.3/components/modeler/bpmn/data-flow.md deleted file mode 100644 index 1f46377997d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/data-flow.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: data-flow -title: "Data flow" ---- - -Every BPMN process instance can have one or more variables. - -Variables are key-value-pairs and hold the contextual data of the process instance required by job workers to do their work, or to decide which sequence flows to take. They can be provided when a process instance is created, when a job is completed, and when a message is correlated. - -![data-flow](assets/data-flow.png) - -## Job workers - -By default, a job worker gets all variables of a process instance; it can limit the data by -providing a list of required variables as **fetchVariables**. - -The worker uses the variables to do its work. When the work is done, it completes the job. If the -result of the work is needed by follow-up tasks, the worker sets the variables while completing -the job. These variables [merge](/components/concepts/variables.md#variable-propagation) into the -process instance. - -![job-worker](assets/data-flow-job-worker.png) - -If the job worker expects the variables in a different format or under different names, the variables can be transformed by defining **input mappings** in the process. **Output mappings** can be used to transform the job variables before merging them into the process instance. - -## Variable scopes vs. token-based data - -A process can have concurrent paths; for example, when using a parallel gateway. When the execution reaches the parallel gateway, new tokens are created which execute the following paths concurrently. - -Since the variables are part of the process instance and not of the token, they can be read globally from any token. If a token adds a variable or modifies the value of a variable, the changes are also visible to concurrent tokens. - -![variable-scopes](assets/variable-scopes.png) - -The visibility of variables is defined by the **variable scopes** of the process. - -## Concurrency considerations - -When multiple active activities exist in a process instance (i.e. there is a form of concurrent -execution like usage of a parallel gateway, multiple outgoing sequence flows, or a parallel -multi-instance marker), you may need to take extra care in dealing with variables. When variables -are altered by one activity, it might also be accessed and altered by another at the same time. Race -conditions can occur in such processes. - -We recommend taking care when writing variables in a parallel flow. Make sure the variables are -written to the correct [variable scope](/components/concepts/variables.md#variable-scopes) using variable -mappings and make sure to complete jobs and publish messages only with the minimum required -variables. - -These type of problems can be avoided by: - -- Passing only updated variables -- Using output variable mappings to customize the variable propagation -- Using an embedded subprocess and input variable mappings to limit the visibility and propagation of variables - -## Additional resources - -- [Job handling](/components/concepts/job-workers.md) -- [Variables](/components/concepts/variables.md) -- [Input/output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Variable scopes](/components/concepts/variables.md#variable-scopes) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.gif b/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.gif deleted file mode 100644 index 321a73efae5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.png b/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.png deleted file mode 100644 index 369251e6636..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md b/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md deleted file mode 100644 index a7dcc34efaf..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: embedded-subprocesses -title: "Embedded subprocess" -description: "An embedded subprocess allows you to group elements of the process." ---- - -An embedded subprocess allows you to group elements of the process. - -![embedded-subprocess](assets/embedded-subprocess.png) - -An embedded subprocess must have exactly **one** none start event. Other start events are not allowed. - -When an embedded subprocess is entered, the start event is activated. The subprocess stays active as long as one containing element is active. When the last element is completed, the subprocess is completed and the outgoing sequence flow is taken. - -Embedded subprocesses are often used together with **boundary events**. One or more boundary events can be attached to a subprocess. When an interrupting boundary event is triggered, the entire subprocess (including all active elements) is terminated. - -## Variable mappings - -Input mappings can be used to create new local variables in the scope of the subprocess. These variables are only visible within the subprocess. - -By default, the local variables of the subprocess are not propagated (i.e. they are removed with the scope.) This behavior can be customized by defining output mappings at the subprocess. The output mappings are applied on completing the subprocess. - -## Additional resources - -### XML representation - -An embedded subprocess with a start event: - -```xml - - - ... more contained elements ... - -``` - -### References - -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/bpmn-modeler-error-events.gif b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/bpmn-modeler-error-events.gif deleted file mode 100644 index cef0a5d91af..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/bpmn-modeler-error-events.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-catch-events.bpmn b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-catch-events.bpmn deleted file mode 100644 index 34cc1983d7e..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-catch-events.bpmn +++ /dev/null @@ -1,129 +0,0 @@ - - - - - Flow_0q5iltk - - - Flow_0q5iltk - Flow_0gp4ks5 - - - - Flow_0qyyem4 - - - - Flow_0gp4ks5 - Flow_1o6eqqj - Flow_05dg4v3 - - - - Flow_05dg4v3 - Flow_0qyyem4 - - - - - Flow_13dpc2k - - - - Flow_13dpc2k - Flow_1kkycez - - - - Flow_1kkycez - - - - - Flow_1c3ds97 - - - - Flow_1c3ds97 - Flow_1o6eqqj - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-catch-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-catch-events.png deleted file mode 100644 index adf87fb5cc0..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-catch-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-events.bpmn b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-events.bpmn deleted file mode 100644 index dbbf3cba4bf..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-events.bpmn +++ /dev/null @@ -1,92 +0,0 @@ - - - - - Flow_0qlmji5 - - - - Flow_1r5d8dq - Flow_0e9bbrx - Flow_1a05it6 - - - - - Flow_0qlmji5 - Flow_1r5d8dq - - - Flow_1a05it6 - Flow_1jdmx2e - - - Flow_0viou3d - - - - Flow_0viou3d - Flow_0e9bbrx - - - - - Flow_1jdmx2e - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-events.png deleted file mode 100644 index ebde63ce2e0..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-throw-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-throw-events.png deleted file mode 100644 index d29bf7f811d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/assets/error-throw-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/error-events.md b/versioned_docs/version-1.3/components/modeler/bpmn/error-events/error-events.md deleted file mode 100644 index 61764501186..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/error-events/error-events.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: error-events -title: "Error events" -decription: "BPMN error events allow a process model to react to errors within a task." ---- - -In process automation, you often encounter deviations from the the default scenario. One way to resolve these deviations is using a BPMN error event, which allows a process model to react to errors within a task. - -For example, if an invalid credit card is used in the process below, the process takes a different path than usual and uses the default payment method to collect money. - -![process with error event](assets/error-events.png) - -## Defining the error - -In BPMN, **errors** define possible errors that can occur. **Error events** are elements in the process referring to defined errors. An error can be referenced by one or more error events. - -An error must define an `errorCode` (e.g. `InvalidCreditCard`). The `errorCode` is a `string` used to match a thrown error to the error catch events. - -## Throwing the error - -An error can be thrown within the process using an error **end event**. - -![process with error throw event](assets/error-throw-events.png) - -Alternatively, you can inform Zeebe that a business error occurred using a **client command**. This throw error client command can only be used while processing a job. - -In addition to throwing the error, this also disables the job and stops it from being activated or completed by other job workers. See the [gRPC command](/apis-tools/grpc.md#throwerror-rpc) for details. - -## Catching the error - -A thrown error can be caught by an error catch event, specifically using an error **boundary event** or an error **event subprocess**. - -![process with error catch event](assets/error-catch-events.png) - -Starting at the scope where the error was thrown, the error code is matched against the attached error boundary events and error event sub processes at that level. An error is caught by the first event in the scope hierarchy matching the error code. At each scope, the error is either caught, or propagated to the parent scope. - -If the process instance is created via call activity, the error can also be caught in the calling parent process instance. - -Error boundary events and error event subprocesses must be interrupting. This means the process instance will not continue along the regular path, but instead follow the path that leads out of the catching error event. - -If the error is thrown for a job, the associated task is terminated first. To continue the execution, the error boundary event or error event subprocess that caught the error is activated. - -## Unhandled errors - -When an error is thrown and not caught, an **incident** (i.e. `Unhandled error event`) is raised to indicate the failure. The incident is attached to the corresponding element where the error was thrown (i.e. the task of the processed job or the error end event). - -When you resolve the incident attached to a task, it ignores the error, re-enables the job, and allows it to be activated and completed by a job worker once again. - -The incident attached to an error end event cannot be resolved by a user because the failure is in the process itself. The process cannot be changed to catch the error for this process instance. - -## Business error vs. technical error - -In real life, you’ll also have to deal with technical problems that you don't want to treat using error events. - -Suppose the credit card service becomes temporarily unavailable. You don't want to model the retrying, as you would have to add it to each and every service task. This will bloat the visual model and confuse business personnel. Instead, either retry or fall back to incidents as described above. This is hidden in the visual. - -In this context, we found the terms **business error** and **technical error** can be confusing, as they emphasize the source of the error too much. This can lead to long discussions about whether a certain problem is technical or not, and if you are allowed to see technical errors in a business process model. - -It's much more important to look at how you *react* to certain errors. Even a technical problem can qualify for a business reaction. For example, you could decide to continue a process in the event that a scoring service is not available, and simply give every customer a good rating instead of blocking progress. The error is clearly technical, but the reaction is a business decision. - -In general, we recommend talking about business reactions, which are modeled in your process, and technical reactions, which are handled generically using retries or incidents. - -## Additional resources - -### XML representation - -A boundary error event: - -```xml - - - - - - -``` - -### References - -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.gif b/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.gif deleted file mode 100644 index 351494ed46c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.png b/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.png deleted file mode 100644 index 5d97d9b781a..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/event-based-gateways.md b/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/event-based-gateways.md deleted file mode 100644 index 4a6de70afae..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/event-based-gateways/event-based-gateways.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: event-based-gateways -title: "Event-based gateway" -description: "An event-based gateway allows you to make a decision based on events." ---- - -An event-based gateway allows you to make a decision based on events. - -![process](assets/event-based-gateway.png) - -An event-based gateway must have at least **two** outgoing sequence flows. Each sequence flow must to be connected to an intermediate catch event of type **timer or message**. - -When an event-based gateway is entered, the process instance waits at the gateway until one of the events is triggered. When the first event is triggered, the outgoing sequence flow of this event is taken. No other events of the gateway can be triggered afterward. - -## Additional resources - -### XML representation - -An event-based gateway with two outgoing sequence flows: - -```xml - - - - - - - - - - - - - PT1H - - -``` - -### References - -* [Timer events](../timer-events/timer-events.md) -* [Message events](../message-events/message-events.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/assets/event-subprocess.png b/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/assets/event-subprocess.png deleted file mode 100644 index 5942d9b7c18..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/assets/event-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/assets/zeebe-modeler-event-subprocess.gif b/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/assets/zeebe-modeler-event-subprocess.gif deleted file mode 100644 index 65778cfc275..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/assets/zeebe-modeler-event-subprocess.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/event-subprocesses.md b/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/event-subprocesses.md deleted file mode 100644 index 62c695e4cd8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/event-subprocesses/event-subprocesses.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: event-subprocesses -title: "Event subprocess" -description: "An event subprocess is a subprocess triggered by an event." ---- - -An event subprocess is a subprocess triggered by an event. This can be added globally to the process, or locally inside an embedded subprocess. - -![event-subprocess](assets/event-subprocess.png) - -An event subprocess must have exactly **one** start event of one of the following types: - -- [Timer](../timer-events/timer-events.md) -- [Message](../message-events/message-events.md) -- [Error](../error-events/error-events.md) - -An event subprocess behaves like a boundary event, but is inside the scope instead of attached to the scope. Like a boundary event, the event subprocess can be interrupting or non-interrupting (indicated in BPMN by a solid or dashed border of the start event). The start event of the event subprocess can be triggered when its containing scope is activated. - -A non-interrupting event subprocess can be triggered multiple times. An interrupting event subprocess can be triggered only once. - -When an interrupting event subprocess is triggered, all active instances of its containing scope are terminated, including instances of other non-interrupting event subprocesses. - -If an event subprocess is triggered, its containing scope is not completed until the triggered instance is completed. - -## Variables - -Unlike a boundary event, an event subprocess is inside the scope. Therefore, it can access and modify all local variables of its containing scope. This is not possible with a boundary event because a boundary event is outside of the scope. - -Input mappings can be used to create new local variables in the scope of the event subprocess. These variables are only visible within the event subprocess. - -By default, the local variables of the event subprocess are not propagated (i.e. removed with the scope). This behavior can be customized by defining output mappings at the event subprocess. The output mappings are applied on completion of the event subprocess. - -## Additional resources - -### XML representation - -An event subprocess with an interrupting timer start event: - -```xml - - - - PT5M - - ... other elements - -``` - -### References - -- [Embedded subprocess](../embedded-subprocesses/embedded-subprocesses.md) -- [Variable scopes](/components/concepts/variables.md#variable-scopes) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/events.md b/versioned_docs/version-1.3/components/modeler/bpmn/events.md deleted file mode 100644 index ceb4726363c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/events.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: events -title: "Overview" -description: "This document outlines an overview of general events, intermediate events, and boundary events." ---- - -Currently supported events: - -* [None events](none-events/none-events.md) -* [Message events](message-events/message-events.md) -* [Timer events](timer-events/timer-events.md) -* [Error events](error-events/error-events.md) - -## Events in general - -Events in BPMN can be **thrown** (i.e. sent), or **caught** (i.e. received), respectively referred to as **throw** or **catch** events (e.g. `message throw event`, `timer catch event`). - -Additionally, a distinction is made between start, intermediate, and end events: - -* **Start events** (catch events, as they can only react to something) are used to denote the beginning of a process or subprocess. -* **End events** (throw events, as they indicate something has happened) are used to denote the end of a particular sequence flow. -* **Intermediate events** can be used to indicate that something has happened (i.e. intermediate throw events), or to wait and react to certain events (i.e. intermediate catch events). - -Intermediate catch events can be inserted into your process in two different contexts: normal flow, or attached to an activity, and are called boundary events. - -## Intermediate events - -In normal flow, an intermediate throw event executes its event (e.g. send a message) once the token has reached it, and once done the token continues to all outgoing sequence flows. - -An intermediate catch event, however, stops the token, and waits until the event it is waiting for occurs, at which execution resumes, and the token moves on. - -## Boundary events - -Boundary events provide a way to model what should happen if an event occurs while an activity is active. For example, if a process is waiting on a user task to happen which is taking too long, an intermediate timer catch event can be attached to the task, with an outgoing sequence flow to notification task, allowing the modeler to automate and sending a reminder email to the user. - -A boundary event must be an intermediate catch event, and can be either interrupting or non-interrupting. Interrupting means that once triggered, before taking any outgoing sequence flow, the activity the event is attached to is terminated. This allows modeling timeouts where we want to prune certain execution paths if something happens (e.g. the process takes too long). diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.gif b/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.gif deleted file mode 100644 index c7a82ad2d7b..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.png b/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.png deleted file mode 100644 index 8ed0aa57627..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md b/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md deleted file mode 100644 index a65bd46fc3b..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: exclusive-gateways -title: "Exclusive gateway" -description: "An exclusive gateway (or XOR-gateway) allows you to make a decision based on data." ---- - -An exclusive gateway (or XOR-gateway) allows you to make a decision based on data (i.e. on process instance variables). - -![process](assets/exclusive-gateway.png) - -If an exclusive gateway has multiple outgoing sequence flows, all sequence flows except one must have a `conditionExpression` to define when the flow is taken. The gateway can have one sequence flow without `conditionExpression`, which must be defined as the default flow. - -When an exclusive gateway is entered, the `conditionExpression` is evaluated. The process instance takes the first sequence flow where the condition is fulfilled. - -If no condition is fulfilled, it takes the **default flow** of the gateway. If the gateway has no default flow, an incident is created. - -An exclusive gateway can also be used to join multiple incoming flows together and improve the readability of the BPMN. A joining gateway has a pass-through semantic and doesn't merge the incoming concurrent flows like a parallel gateway. - -## Conditions - -A `conditionExpression` defines when a flow is taken. It is a [boolean expression](/components/concepts/expressions.md#boolean-expressions) that can access the process instance variables and compare them with literals or other variables. The condition is fulfilled when the expression returns `true`. - -Multiple boolean values or comparisons can be combined as disjunction (`or`) or conjunction (`and`). - -For example: - -```feel -= totalPrice > 100 - -= order.customer = "Paul" - -= orderCount > 15 or totalPrice > 50 - -= valid and orderCount > 0 -``` - -## Additional resources - -### XML representation - -An exclusive gateway with two outgoing sequence flows: - -```xml - - - - - = totalPrice > 100 - - - - -``` - -### References - -- [Expressions](/components/concepts/expressions.md) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/gateways.md b/versioned_docs/version-1.3/components/modeler/bpmn/gateways.md deleted file mode 100644 index 5a801db3bae..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/gateways.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: gateways -title: "Overview" -description: "This document outlines an overview of currently supported gateways." ---- - -Currently supported elements: - -* [Exclusive gateways](exclusive-gateways/exclusive-gateways.md) -* [Parallel gateways](parallel-gateways/parallel-gateways.md) -* [Event-based gateways](event-based-gateways/event-based-gateways.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/manual-tasks/assets/manual-task.png b/versioned_docs/version-1.3/components/modeler/bpmn/manual-tasks/assets/manual-task.png deleted file mode 100644 index ce955bd1303..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/manual-tasks/assets/manual-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/manual-tasks/manual-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/manual-tasks/manual-tasks.md deleted file mode 100644 index f5e855bc42c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/manual-tasks/manual-tasks.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: manual-tasks -title: "Manual tasks" -description: "A manual task defines a task that is external to the BPM engine." ---- - -A manual task defines a task that is external to the BPM engine. This is used to model work that is done -by somebody who the engine does not need to know of and there is no known system or UI interface. - -For the engine, a manual task is handled as a pass-through activity, automatically continuing the -process at the moment the process execution arrives. - -![task](assets/manual-task.png) - -Manual tasks have no real benefit for automating processes. Manual tasks instead provide insights into the tasks -that are performed outside of the process engine. - -## Additional resources - -### XML representation - -A manual task: -```xml - -``` \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/markers.md b/versioned_docs/version-1.3/components/modeler/bpmn/markers.md deleted file mode 100644 index eac09862bb9..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/markers.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: markers -title: "Overview" -description: "This document outlines an overview of supported markers." ---- - -Currently supported markers: - -* [Multi-instance](multi-instance/multi-instance.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-catch-event-example.png b/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-catch-event-example.png deleted file mode 100644 index ab18e08b0df..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-catch-event-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-catch-event.png b/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-catch-event.png deleted file mode 100644 index fe3c90e99a9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-catch-event.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-event.gif b/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-event.gif deleted file mode 100644 index f2929004cd2..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-event.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-events.png deleted file mode 100644 index d8bf47512bf..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/assets/message-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/message-events.md b/versioned_docs/version-1.3/components/modeler/bpmn/message-events/message-events.md deleted file mode 100644 index 50540ebb1c3..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/message-events/message-events.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -id: message-events -title: "Message events" -description: "Message events are events which reference a message; they are used to wait until a proper message is received." ---- - -Message events are events which reference a message; they are used to wait until a proper message is received. - -![process](assets/message-events.png) - -## Message start events - -A process can have one or more message start events (besides other types of start events). Each of the message events must have a unique message name. - -When a process is deployed, it creates a message subscription for each message start event. Message subscriptions of the previous version of the process (based on the BPMN process id) are closed. - -When the message subscription is created, a message can be correlated to the start event if the message name matches. On correlating the message, a new process instance is created and the corresponding message start event is activated. - -### Message correlation - -When the message subscription is created, a message can be correlated to the start event if the message name matches. On correlating the message, a new process instance is created and the corresponding message start event is activated. - -Messages are **not** correlated if they were published before the process was deployed or if a new version of the process is deployed without a proper start event. - -The `correlationKey` of a published message can be used to control the process instance creation. - -- If an instance of this process is active (independently from its version) and it was triggered by a message with the same `correlationKey`, the message is **not** correlated and no new instance is created. If the message has a time-to-live (TTL) > 0, it is buffered. -- When the active process instance is completed or terminated and a message with the same `correlationKey` and a matching message name is buffered (that is, TTL > 0), this message is correlated and a new instance of the latest version of the process is created. - -If the `correlationKey` of a message is empty, it creates a new process instance and does not check if an instance is already active. - -:::note - -You do not specify a `correlationKey` for a message start event in the BPMN model when designing a process. - -- When an application sends a message that is caught by a message start event, the application can specify a `correlationKey` in the message. -- If a message caught by a start event contains a `correlationKey` value, the created process is tagged with that `correlationKey` value. -- Follow-up messages are then checked against this `correlationKey` value (that is, is there an active process instance that was started by a message with the same `correlationKey`?). - -::: - -## Intermediate message catch events - -When an intermediate message catch event is entered, a corresponding message subscription is created. The process instance stops at this point and waits until the message is correlated. When a message is correlated, the catch event is completed and the process instance continues. - -:::note -An alternative to intermediate message catch events is a [receive task](../receive-tasks/receive-tasks.md), which behaves the same but can be used together with boundary events. -::: - -## Message boundary events - -An activity can have one or more message boundary events. Each of the message events must have a unique message name. - -When the activity is entered, it creates a corresponding message subscription for each boundary message event. If a non-interrupting boundary event is triggered, the activity is not terminated and multiple messages can be correlated. - -## Message throw events - -A process can contain intermediate message throw events or message end events to model the -publication of a message to an external system; for example, to a Kafka topic. - -Currently, intermediate message throw events and message end events behave exactly -like [service tasks](../service-tasks/service-tasks.md) or [send tasks](../send-tasks/send-tasks.md) -, and have the same job-related properties (e.g. job type, custom headers, etc.) The message throw -events and the tasks are based on jobs -and [job workers](../../../../components/concepts/job-workers.md). The differences between the message -throw events and the tasks are the visual representation and the semantics for the model. Read more -about the [job properties](../../../../components/concepts/job-workers.md). - -When a process instance enters a message throw event, it creates a corresponding job and waits for -its completion. A job worker should request jobs of this job type and process them. When the job is -complete, the process instance continues or completes if it is a message end event. - -:::note -Message throw events are not processed by Zeebe itself (i.e. to correlate a message to a message -catch event). Instead, it creates jobs with the defined job type. To process them, provide a job -worker. -::: - -## Messages - -A message can be referenced by one or more message events. It must define the name of the message (e.g. `Money collected`) and the `correlationKey` expression (e.g. `= orderId`). If the message is only referenced by message start events, the `correlationKey` is not required. - -Usually, the name of the message is defined as a static value (e.g. `order canceled`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "order " + awaitingAction`). If the expression belongs to a message start event of the process, it is evaluated on deploying the process. Otherwise, it is evaluated on activating the message event. The evaluation must result in a `string`. - -The `correlationKey` is an expression that usually [accesses a variable](/components/concepts/expressions.md#access-variables) of the process instance that holds the correlation key of the message. The expression is evaluated on activating the message event and must result either in a `string` or in a `number`. - -To correlate a message to the message event, the message is published with the defined name (e.g. `Money collected`) and the **value** of the `correlationKey` expression. For example, if the process instance has a variable `orderId` with value `"order-123"`, the message must be published with the correlation key `"order-123"`. - -## Variable mappings - -By default, all message variables are merged into the process instance. This behavior can be customized by defining an output mapping at the message catch event. - -Visit the documentation regarding [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) for more information on this topic. - -## Additional resources - -### XML representation - -A message start event with message definition: - -```xml - - - - - -``` - -An intermediate message catch event with message definition: - -```xml - - - - - - - - - -``` - -A boundary message event: - -```xml - - - -``` - -### References - -- [Message correlation](/components/concepts/messages.md) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Incidents](/components/concepts/incidents.md) -- [Job handling](/components/concepts/job-workers.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/bpmn-modeler-multi-instance.gif b/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/bpmn-modeler-multi-instance.gif deleted file mode 100644 index 6dcc71d791f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/bpmn-modeler-multi-instance.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-boundary-event.png b/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-boundary-event.png deleted file mode 100644 index 40e112ba8ca..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-boundary-event.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-example.png b/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-example.png deleted file mode 100644 index 4d0b1b1ed19..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-parallel.png b/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-parallel.png deleted file mode 100644 index b1bec0a99e4..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-parallel.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-sequential.png b/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-sequential.png deleted file mode 100644 index 9a40ce49737..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/assets/multi-instance-sequential.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/multi-instance.md b/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/multi-instance.md deleted file mode 100644 index 1fce59f4093..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/multi-instance/multi-instance.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -id: multi-instance -title: "Multi-Instance" -description: "A multi-instance activity is executed multiple times - once for each element of a given collection." ---- - -A multi-instance activity is executed multiple times - once for each element of a given collection (like a _foreach_ loop in a programming language). - -The following activities can be marked as multi-instance: - -- [Service tasks](../service-tasks/service-tasks.md) -- [Receive tasks](../receive-tasks/receive-tasks.md) -- [Embedded subprocesses](../embedded-subprocesses/embedded-subprocesses.md) -- [Call activities](../call-activities/call-activities.md) - -![multi-instance](assets/multi-instance-example.png) - -On the execution level, a multi-instance activity has two parts: a multi-instance body, and an inner activity. The multi-instance body is the container for all instances of the inner activity. - -When the activity is entered, the multi-instance body is activated and one instance for every element of the `inputCollection` is created (sequentially or in parallel). When all instances are completed, the body is completed and the activity is left. - -## Sequential vs. parallel - -A multi-instance activity is executed either sequentially or in parallel (default). In the BPMN, a sequential multi-instance activity is displayed with three horizontal lines at the bottom. A parallel multi-instance activity is represented by three vertical lines. - -In case of a **sequential** multi-instance activity, the instances are executed one at a time. When one instance is completed, a new instance is created for the next element in the `inputCollection`. - -![sequential multi-instance](assets/multi-instance-sequential.png) - -In case of a **parallel** multi-instance activity, all instances are created when the multi-instance body is activated. The instances are executed concurrently and independently from each other. - -![parallel multi-instance](assets/multi-instance-parallel.png) - -## Defining the collection to iterate over - -A multi-instance activity must have an `inputCollection` expression that defines the collection to iterate over (e.g. `= items`). Usually, it [accesses a variable](/components/concepts/expressions.md#access-variables) of the process instance that holds the collection. The expression is evaluated on activating the multi-instance body. It must result in an `array` of any type (e.g. `["item-1", "item-2"]`). - -To access the current element of the `inputCollection` value within the instance, the multi-instance activity can define the `inputElement` variable (e.g. `item`). The element is stored as a local variable of the instance under the given name. - -If the `inputCollection` value is **empty**, the multi-instance body is completed immediately and no instances are created. It behaves like the activity is skipped. - -## Collecting the output - -The output of a multi-instance activity (e.g. the result of a calculation) can be collected from the instances by defining the `outputCollection` and the `outputElement` expression. - -`outputCollection` defines the name of the variable under which the collected output is stored (e.g. `results`). It is created as a local variable of the multi-instance body and is updated when an instance is completed. When the multi-instance body is completed, the variable is propagated to its parent scope. - -`outputElement` is an expression that defines the output of the instance (e.g. `= result`). Usually, it [accesses a variable](/components/concepts/expressions.md#access-variables) of the instance that holds the output value. If the expression only accesses a variable or a nested property, it's created as a **local variable** of the instance. This variable should be updated with the output value; for example, by a job worker providing a variable with the name `result`. Since the variable is defined as a local variable, it is not propagated to its parent scope and is only visible within the instance. - -When the instance is completed, the `outputElement` expression is evaluated and the result is inserted into the `outputCollection` at the same index as the `inputElement` of the `inputCollection`. Therefore, the order of the `outputCollection` is determined and matches to the `inputCollection`, even for parallel multi-instance activities. If the `outputElement` variable is not updated, `null` is inserted instead. - -If the `inputCollection` value is empty, an empty array is propagated as `outputCollection`. - -## Boundary events - -![multi-instance with boundary event](assets/multi-instance-boundary-event.png) - -Interrupting and non-interrupting boundary events can be attached to a multi-instance activity. - -When an interrupting boundary event is triggered, the multi-instance body and all active instances are terminated. The `outputCollection` variable is not propagated to the parent scope (i.e. no partial output). - -When a non-interrupting boundary event is triggered, the instances are not affected. The activities at the outgoing path have no access to the local variables since they are bound to the multi-instance activity. - -## Special multi-instance variables - -Every instance has a local variable `loopCounter`. It holds the index in the `inputCollection` of this instance, starting with `1`. - -## Variable mappings - -Input and output variable mappings can be defined at the multi-instance activity; they are applied on each instance on activating and on completing. - -The input mappings can be used to create new local variables in the scope of an instance. These variables are only visible within the instance; it is a way to restrict the visibility of variables. By default, new variables (e.g. provided by a job worker) are created in the scope of the process instance and are visible to all instances of the multi-instance activity as well as outside of it. - -In case of a parallel multi-instance activity, this can lead to variables that are modified by multiple instances and result in race conditions. If a variable is defined as a local variable, it is not propagated to a parent or the process instance scope and can't be modified outside the instance. - -The input mappings can access the local variables of the instance (e.g. `inputElement`, `loopCounter`); for example, to extract parts of the `inputElement` variable and apply them to separate variables. - -The output mappings can be used to update the `outputElement` variable; for example, to extract a part of the job variables. - -**Example:** We have a call activity marked as a parallel multi-instance. When the called process instance completes, its variables are [merged](/components/concepts/variables.md#variable-propagation) into the call activity's process instance. Its result is collected in the output collection variable, but this has become a race condition where each completed child instance again overwrites this same variable. We end up with a corrupted output collection. An output mapping can be used to overcome this, because it restricts which variables are merged. In the case of: - -- Parallel multi-instance call activity -- Multi-instance output element: `=output` -- Variable in the child instance that holds the result: `x` - -The output mapping on the call activity should be: - -``` -source: =x -target: output -``` - -## Completion condition - -A `completionCondition` defines whether the multi-instance body can be completed immediately when the condition is satisfied. It is a [boolean expression](/components/concepts/expressions.md#boolean-expressions) that will be evaluated each time the instance of the multi-instance body completes. Any instances that are still active are terminated and the multi-instance body is completed when the expression evaluates to `true`. - -Multiple boolean values or comparisons can be combined as disjunction (`and`) or conjunction (`or`). - -For example: - -```feel -= result.isSuccessful - -= count(["a", "b", "c", "d"]) > 3 - -= orderCount >= 5 and orderCount < 15 - -= orderCount > 15 or totalPrice > 50 - -= list contains([6,7], today().weekday) -``` - - -## Additional resources - -### XML representation - -A sequential multi-instance service task: - -```xml - - - - - - - = result.isSuccessful - - - -``` - -### References - -- [Variable scopes](/components/concepts/variables.md#variable-scopes) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/end-event.gif b/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/end-event.gif deleted file mode 100644 index 5ca9e152d56..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/end-event.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/none-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/none-events.png deleted file mode 100644 index 79506b3ec75..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/none-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/start-event.gif b/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/start-event.gif deleted file mode 100644 index 80630c62aa9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/assets/start-event.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/none-events.md b/versioned_docs/version-1.3/components/modeler/bpmn/none-events/none-events.md deleted file mode 100644 index 453ece4478c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/none-events/none-events.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: none-events -title: "None events" -description: "None events are unspecified events, also called blank events." ---- - -None events are unspecified events, also called "blank" events. - -![process](assets/none-events.png) - -## None start events - -At most, a process can have **one** none start event (besides other types of start events). - -A none start event is where the process instance or a subprocess starts when the process or the subprocess is activated. - -## None end events - -A process or subprocess can have multiple none end events. When a none end event is entered, the current execution path ends. If the process instance or subprocess has no more active execution paths, it is completed. - -If an activity has no outgoing sequence flow, it behaves the same as it would be connected to a none end event. When the activity is completed, the current execution path ends. - -## Intermediate none events (throwing) - -Intermediate none events can be used to indicate some state achieved in the process. They are especially useful for monitoring to understand how the process is doing, for example, as milestones or key performance indicators (KPIs). - -The engine itself doesn't do anything in the event, it just passes through it. - -## Variable mappings - -Start and intermediate none events can have [variable output mappings](../../../../components/concepts/variables.md#output-mappings). End events do not support this. - -For start events, this is often used to initialize process variables. - -## Additional resources - -### XML representation - -A none start event: -```xml - -``` - -A none end event: -```xml - -``` - -An intermediate none event: -```xml - -``` \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/assets/parallel-gateway.gif b/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/assets/parallel-gateway.gif deleted file mode 100644 index a6d1f404bd5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/assets/parallel-gateway.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/assets/parallel-gateways.png b/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/assets/parallel-gateways.png deleted file mode 100644 index 5dd17410f98..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/assets/parallel-gateways.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/parallel-gateways.md b/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/parallel-gateways.md deleted file mode 100644 index 4c9f5881a52..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/parallel-gateways/parallel-gateways.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: parallel-gateways -title: "Parallel gateway" -description: "A parallel gateway (or AND-gateway) allows you to split the flow into concurrent paths." ---- - -A parallel gateway (or AND-gateway) allows you to split the flow into concurrent paths. - -![process](assets/parallel-gateways.png) - -When a parallel gateway with multiple outgoing sequence flows is entered, all flows are taken. The paths are executed concurrently and independently. - -The concurrent paths can be joined using a parallel gateway with multiple incoming sequence flows. The process instance waits at the parallel gateway until each incoming sequence is taken. - -:::note -The outgoing paths of the parallel gateway are executed concurrently and not parallel in the sense of parallel threads. All records of a process instance are written to the same partition (single stream processor). -::: - -## Additional resources - -### XML representation - -A parallel gateway with two outgoing sequence flows: - -```xml - - - - - -``` diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/assets/receive-task.gif b/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/assets/receive-task.gif deleted file mode 100644 index f5e3ade7e3b..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/assets/receive-task.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/assets/receive-tasks.png b/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/assets/receive-tasks.png deleted file mode 100644 index ce6abaac16d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/assets/receive-tasks.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/receive-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/receive-tasks.md deleted file mode 100644 index 92995d88592..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/receive-tasks/receive-tasks.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: receive-tasks -title: "Receive tasks" -description: "Receive tasks reference a message; these are used to wait until a proper message is received." ---- - -Receive tasks reference a message; these are used to wait until a proper message is received. - -![Receive Tasks](assets/receive-tasks.png) - -When a receive task is entered, a corresponding message subscription is created. The process instance stops at this point and waits until the message is correlated. - -A message can be published using one of the Zeebe clients. When the message is correlated, the receive task is completed and the process instance continues. - -:::note -An alternative to receive tasks is [a message intermediate catch event](../message-events/message-events.md), which behaves the same way but can be used together with event-based gateways. -::: - -## Messages - -A message can be referenced by one or more receive tasks; it must define the name of the message (e.g. `Money collected`) and the `correlationKey` expression (e.g. `= orderId`). - -Usually, the name of the message is defined as a static value (e.g. `order canceled`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "order " + awaitingAction`). The expression is evaluated on activating the receive task and must result in a `string`. - -The `correlationKey` is an expression that usually [accesses a variable](/components/concepts/expressions.md#access-variables) of the process instance that holds the correlation key of the message. The expression is evaluated on activating the receive task and must result either in a `string` or `number`. - -To correlate a message to the receive task, the message is published with the defined name (e.g. `Money collected`) and the value of the `correlationKey` expression. For example, if the process instance has a variable `orderId` with value `"order-123"`, the message is published with the correlation key `"order-123"`. - -## Variable mappings - -Output variable mappings are used to customize how variables are merged into the process instance. -These can contain multiple elements that specify which variables should be mapped. -The `Process Variable Name` of an output denotes the variable name outside the activity. - -Visit our documentation on [input and output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) for more information on this topic. - -## Additional resources - -### XML representation - -A receive task with message definition: - -```xml - - - - - - - - -``` - -### References - -- [Message correlation](/components/concepts/messages.md) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/script-tasks/assets/script-task.png b/versioned_docs/version-1.3/components/modeler/bpmn/script-tasks/assets/script-task.png deleted file mode 100644 index 11c0d1845b7..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/script-tasks/assets/script-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/script-tasks/script-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/script-tasks/script-tasks.md deleted file mode 100644 index 832e07a82e1..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/script-tasks/script-tasks.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: script-tasks -title: "Script tasks" -description: "A script task is used to model the evaluation of a script; for example, a script written in Groovy, -JavaScript, or Python." ---- - -A script task is used to model the evaluation of a script; for example, a script written in Groovy, -JavaScript, or Python. - -![task](assets/script-task.png) - -Script tasks behave exactly like [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md). Both task -types are based on jobs and [job workers](/components/concepts/job-workers.md). The -differences between these task types are the visual representation (i.e. the task marker) and the -semantics for the model. - -When a process instance enters a script task, it creates a corresponding job and waits for its -completion. A job worker should request jobs of this job type and process them. When the job is -complete, the process instance continues. - -:::note -Jobs for script tasks are not processed by Zeebe itself. To process them, provide a job worker. -::: - -## Defining a task - -A script task must define a [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) the same way as a service task does. It specifies -the type of job workers should subscribe to (e.g. `script`). - -Use [task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) to pass static parameters to the job -worker (e.g. the script to evaluate). The community extension [Zeebe Script Worker](https://github.com/camunda-community-hub/zeebe-script-worker) requires certain attributes to be set in the task headers. - -Define [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -the [same way as a service task does](/components/modeler/bpmn/service-tasks/service-tasks.md#variable-mappings) -to transform the variables passed to the job worker, or to customize how the variables of the job merge. - -## Additional resources - -:::tip Community Extension - -Review the [Zeebe Script Worker](https://github.com/camunda-community-hub/zeebe-script-worker). This is a -community extension that provides a job worker to evaluate scripts. You can run it, or use it as a -blueprint for your own job worker. - -::: - -### XML representation - -A script task with a custom header: - -```xml - - - - - - - - - -``` - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/send-tasks/assets/send-task.png b/versioned_docs/version-1.3/components/modeler/bpmn/send-tasks/assets/send-task.png deleted file mode 100644 index ef65accebe3..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/send-tasks/assets/send-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/send-tasks/send-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/send-tasks/send-tasks.md deleted file mode 100644 index 932060301fd..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/send-tasks/send-tasks.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: send-tasks -title: "Send tasks" -description: "A send task is used to model the publication of a message to an external system." ---- - -A send task is used to model the publication of a message to an external system; for example, to a -Kafka topic or a mail server. - -![task](assets/send-task.png) - -Send tasks behave exactly like [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md). Both task -types are based on jobs and [job workers](/components/concepts/job-workers.md). The -differences between these task types are the visual representation (i.e. the task marker) and the -semantics for the model. - -When a process instance enters a send task, it creates a corresponding job and waits for its -completion. A job worker should request jobs of this job type and process them. When the job is -complete, the process instance continues. - -:::note - -Jobs for send tasks are not processed by Zeebe itself. To process them, provide -a job worker. - -::: - -## Defining a task - -A send task must define a [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) the same -way as a service task does. It specifies the type of job that workers should subscribe to (e.g. `kafka` or `mail`). - -Use [task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) to pass static parameters to the job -worker (e.g. the name of the topic to publish the message to). - -Define [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -the [same way as a service task does](/components/modeler/bpmn/service-tasks/service-tasks.md#variable-mappings) -to transform the variables passed to the job worker, or to customize how the variables of the job merge. - -## Additional resources - -:::tip Community Extension - -Review the [Kafka Connect Zeebe](https://github.com/camunda-community-hub/kafka-connect-zeebe). This is a -community extension that provides a job worker to publish messages to a Kafka topic. You can run it, -or use it as a blueprint for your own job worker. - -::: - -### XML representation - -A script task with a custom header: - -```xml - - - - - - - - -``` - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/service-task.gif b/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/service-task.gif deleted file mode 100644 index 7105e52c3e2..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/service-task.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/service-task.png b/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/service-task.png deleted file mode 100644 index 2d16cd3debd..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/service-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/task-headers.gif b/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/task-headers.gif deleted file mode 100644 index 4e0d0acd48d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/task-headers.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/variable-mappings.gif b/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/variable-mappings.gif deleted file mode 100644 index dbf641387c5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/assets/variable-mappings.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/service-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/service-tasks.md deleted file mode 100644 index 405d084d4b8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/service-tasks/service-tasks.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: service-tasks -title: "Service tasks" -description: "A service task represents a work item in the process with a specific type." ---- - -A service task represents a work item in the process with a specific type. - -![process](../assets/order-process.png) - -When a service task is entered, a corresponding job is created. The process instance stops here and waits until the job is complete. - -A [job worker](/components/concepts/job-workers.md) can subscribe to the job type, process the jobs, and complete them using one of the Zeebe clients. When the job is complete, the service task is completed and the process instance continues. - -## Task definition - -A service task must have a `taskDefinition`; this specifies the type of job workers can subscribe to. - -Optionally, a `taskDefinition` can specify the number of times the job is retried when a worker signals failure (default = 3). - -Typically, the job type and the job retries are defined as static values (e.g. `order-items`) but they can also be defined as [expressions](/components/concepts/expressions.md) (e.g. `= "order-" + priorityGroup`). The expressions are evaluated on activating the service task and must result in a `string` for the job type and a `number` for the retries. - -## Task headers - -A service task can define an arbitrary number of `taskHeaders`; they are static metadata handed to workers along with the job. The headers can be used as configuration parameters for the worker. - -## Variable mappings - -By default, all job variables merge into the process instance. This behavior can be customized by defining an output mapping at the service task. - -Input mappings can be used to transform the variables into a format accepted by the job worker. - -For more information about this topic visit the documentation about [Input/output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings). - -## Additional resources - -### XML representation - -A service task with a custom header: - -```xml - - - - - - - - -``` - -## Next steps - -Learn more about the concept of job types and how to set up a job worker via our [manual on job workers](/components/concepts/job-workers.md). - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/subprocesses.md b/versioned_docs/version-1.3/components/modeler/bpmn/subprocesses.md deleted file mode 100644 index ffc8b2f4d5d..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/subprocesses.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: subprocesses -title: "Overview" -description: "This document outlines an overview of supported elements." ---- - -Currently supported elements: - -* [Embedded subprocess](embedded-subprocesses/embedded-subprocesses.md) -* [Call activities](call-activities/call-activities.md) -* [Event subprocess](event-subprocesses/event-subprocesses.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/tasks.md deleted file mode 100644 index 7be172a8e48..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/tasks.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: tasks -title: "Overview" -description: "This document outlines an overview of supported elements." ---- - -Currently supported elements: - -* [Service tasks](service-tasks/service-tasks.md) -* [User tasks](user-tasks/user-tasks.md) -* [Receive tasks](receive-tasks/receive-tasks.md) -* [Business rule tasks](business-rule-tasks/business-rule-tasks.md) -* [Script tasks](script-tasks/script-tasks.md) -* [Send tasks](send-tasks/send-tasks.md) -* [Manual tasks](manual-tasks/manual-tasks.md) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/interrupting-timer-event.gif b/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/interrupting-timer-event.gif deleted file mode 100644 index 8049d97df26..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/interrupting-timer-event.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/non-interrupting-timer-event.gif b/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/non-interrupting-timer-event.gif deleted file mode 100644 index 8ea2392492e..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/non-interrupting-timer-event.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/timer-events.png b/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/timer-events.png deleted file mode 100644 index dc997f9cec4..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/assets/timer-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/timer-events.md b/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/timer-events.md deleted file mode 100644 index 05b514e0960..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/timer-events/timer-events.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -id: timer-events -title: "Timer events" -description: "Timer events are events triggered by a defined timer." ---- - -Timer events are events triggered by a defined timer. - -![process](assets/timer-events.png) - -## Timer start events - -A process can have one or more timer start events (besides other types of start events). Each of the timer events must have either a time date or time cycle definition. - -When a process is deployed, it schedules a timer for each timer start event. Scheduled timers of the previous version of the process (based on the BPMN process id) are canceled. - -When a timer is triggered, a new process instance is created and the corresponding timer start event is activated. - -## Intermediate timer catch events - -An intermediate timer catch event must have a time duration definition that defines when it is triggered. - -When an intermediate timer catch event is entered, a corresponding timer is scheduled. The process instance stops at this point and waits until the timer is triggered. When the timer is triggered, the catch event is completed and the process instance continues. - -## Timer boundary events - -An interrupting timer boundary event must have a time duration definition. When the corresponding timer is triggered, the activity is terminated. Interrupting timer boundary events are often used to model timeouts; for example, canceling the processing after five minutes and doing something else. - -A non-interrupting timer boundary event must have either a time duration or time cycle definition. When the activity is entered, it schedules a corresponding timer. If the timer is triggered and defined as time cycle with repetitions greater than zero, it schedules the timer again until the defined number of repetitions is reached. - -Non-interrupting timer boundary events are often used to model notifications; for example, contacting support if the processing takes longer than an hour. - -## Timers - -Timers must be defined by providing either a date, a duration, or a cycle. - -A timer can be defined either as a static value (e.g. `P3D`) or as an [expression](/components/concepts/expressions.md). There are two common ways to use an expression: - -- [Access a variable](/components/concepts/expressions.md#access-variables) (e.g. `= remainingTime`). -- [Use temporal values](/components/concepts/expressions.md#temporal-expressions) (e.g. `= date and time(expirationDate) - date and time(creationDate)`). - -If the expression belongs to a timer start event of the process, it is evaluated on deploying the process. Otherwise, it is evaluated on activating the timer catch event. The evaluation must result in either a `string` that has the same ISO 8601 format as the static value, or an equivalent temporal value (i.e. a date-time, a duration, or a cycle). - -### Time date - -A specific point in time defined as ISO 8601 combined date and time representation. It must contain timezone information, either `Z` for UTC or a zone offset. Optionally, it can contain a zone id. - -- `2019-10-01T12:00:00Z` - UTC time -- `2019-10-02T08:09:40+02:00` - UTC plus two hours zone offset -- `2019-10-02T08:09:40+02:00[Europe/Berlin]` - UTC plus two hours zone offset at Berlin - -### Time duration - -A duration is defined as a ISO 8601 durations format, which defines the amount of intervening time in a time interval and are represented by the format `P(n)Y(n)M(n)DT(n)H(n)M(n)S`. Note that the `n` is replaced by the value for each of the date and time elements that follow the `n`. - -The capital letters _P_, _Y_, _M_, _W_, _D_, _T_, _H_, _M_, and _S_ are designators for each of the date and time elements and are not replaced, but can be omitted. - -- _P_ is the duration designator (for period) placed at the start of the duration representation. -- _Y_ is the year designator that follows the value for the number of years. -- _M_ is the month designator that follows the value for the number of months. -- _W_ is the week designator that follows the value for the number of weeks. -- _D_ is the day designator that follows the value for the number of days. -- _T_ is the time designator that precedes the time components of the representation. -- _H_ is the hour designator that follows the value for the number of hours. -- _M_ is the minute designator that follows the value for the number of minutes. -- _S_ is the second designator that follows the value for the number of seconds. - -Examples: -- `PT15S` - 15 seconds -- `PT1H30M` - 1 hour and 30 minutes -- `P14D` - 14 days -- `P14DT1H30M` - 14 days, 1 hour and 30 minutes -- `P3Y6M4DT12H30M5S` - 3 years, 6 months, 4 days, 12 hours, 30 minutes and 5 seconds - -If the duration is zero or negative, the timer fires immediately. - -### Time cycle - -A cycle defined as ISO 8601 repeating intervals format; it contains the duration and the number of repetitions. If the repetitions are not defined, the timer repeats infinitely until it is canceled. - -- `R5/PT10S`: Every 10 seconds, up to five times -- `R/P1D`: Every day, infinitely - -## Additional resources - -### XML representation - -A timer start event with time date: - -```xml - - - 2019-10-01T12:00:00Z - - -``` - -An intermediate timer catch event with time duration: - -```xml - - - PT10M - - -``` - -A non-interrupting boundary timer event with time cycle: - -```xml - - - R3/PT1H - - -``` - -### References - -- [Expressions](/components/concepts/expressions.md) -- [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/user-tasks/assets/user-task.png b/versioned_docs/version-1.3/components/modeler/bpmn/user-tasks/assets/user-task.png deleted file mode 100644 index 7dd964bb121..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/bpmn/user-tasks/assets/user-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/bpmn/user-tasks/user-tasks.md b/versioned_docs/version-1.3/components/modeler/bpmn/user-tasks/user-tasks.md deleted file mode 100644 index c29c18f62f8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/bpmn/user-tasks/user-tasks.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: user-tasks -title: "User tasks" -description: "A user task is used to model work that needs to be done by a human actor." ---- - -A user task is used to model work that needs to be done by a human actor. When -the process execution arrives at such a user task, a new job similar to a -[service task](/components/modeler/bpmn/service-tasks/service-tasks.md) is created. The process instance -stops at this point and waits until the job is completed. - -![user-task](assets/user-task.png) - -Applications like [Tasklist](/components/tasklist/introduction.md) can be used by humans to complete these tasks. - -Alternatively, a job worker can subscribe to the job type -`io.camunda.zeebe:userTask` to complete the job manually. - -When the job is completed, the user task is completed and the process -instance continues. - -## User task forms - -User tasks support specifying a `formKey` attribute, using the -`zeebe:formDefinition` extension element. The form key can be used to specify -an identifier to associate a form to the user task. [Tasklist] supports -embedded [Camunda Forms](/guides/utilizing-forms.md), -these can be embedded into the BPMN process XML as a `zeebe:UserTaskForm` -extension element of the process element. - -## Assignments - -User tasks support specifying assignments, using the `zeebe:AssignmentDefinition` extension element. -This can be used to define which user the task can be assigned to. One or both of the following -attributes can be specified simultaneously: -- `assignee`: Specifies the user assigned to the task. [Tasklist] will claim the task for this user. -- `candidateGroups`: Specifies the groups of users that the task can be assigned to. - -Typically, the assignee and candidate groups are defined as static values (e.g. `some_username` and -`sales, operations`), but they can also be defined as -[expressions](/components/concepts/expressions.md) (e.g. `= book.author` and `= remove(reviewers, -book.author)`). The expressions are evaluated on activating the user task and must result in a -`string` for the assignee and a `list of strings` for the candidate groups. - -For [Tasklist](/components/tasklist/introduction.md) to claim the task for a known Tasklist user, -the value of the `assignee` must be the user's **unique identifier**. -The unique identifier depends on the authentication method used to login to Tasklist: -- Camunda Cloud (login with email, Google, GitHub): `email` -- Default Basic Auth (elasticsearch): `username` -- IAM: `username` - -:::note -For example, say you log into Tasklist using Camunda Cloud login with email using your email address `foo@bar.com`. Every time a user task activates with `assignee` set to value `foo@bar.com`, Tasklist automatically assigns it to you. You'll be able to find your new task under the task dropdown option `Claimed by me`. -::: - -## Variable mappings - -By default, all job variables are merged into the process instance. This -behavior can be customized by defining an output mapping at the user task. - -Input mappings can be used to transform the variables into a format accepted by the job worker. - -## Task headers - -A user task can define an arbitrary number of `taskHeaders`; they are static -metadata handed to workers along with the job. The headers can be used -as configuration parameters for the worker. - -## Additional resources - -### XML representation - -A user task with a user task form and an assignment definition: - -```xml - - - - - - - - - - - - - -``` - -### References - -- [Tasklist](/components/tasklist/introduction.md) -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/connect-to-camunda-cloud.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/connect-to-camunda-cloud.md deleted file mode 100644 index 4176c4a53c7..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/connect-to-camunda-cloud.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: connect-to-camunda-cloud -title: Connect to Camunda Cloud -description: "Camunda Modeler can communicate directly with Camunda Cloud." ---- - -Desktop Modeler can directly deploy diagrams and start process instances in Camunda Cloud. Follow the steps below to deploy a diagram: - -1. Click the deployment icon: - -![deployment icon](./img/deploy-icon.png) - -2. Click **Camunda Cloud SaaS**, or alternatively, select **Camunda Cloud Self-Managed** if you want to deploy to a [local installation](../../../../self-managed/zeebe-deployment/local/install/), for example: - -![deployment configuration](./img/deploy-diagram-camunda-cloud.png) - -3. Input the `Cluster URL` and the credentials (`Client ID`, `Client Secret`) of your [API client](../../cloud-console/manage-clusters/manage-api-clients.md): - -![deployment via camunda cloud](./img/deploy-diagram-camunda-cloud-remember.png) - -4. Select the **Remember** checkbox if you want to locally store the connection information. - -5. Click **Deploy** to perform the actual deployment. - -![deployment successful](./img/deploy-diagram-camunda-cloud-success.png) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/about-templates.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/about-templates.md deleted file mode 100644 index 4814db3ecd6..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/about-templates.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: about-templates -title: About element templates -description: "Element templates are a way to extend Camunda Modeler with domain-specific diagram elements, such as service and user tasks." ---- - -:::note -The Camunda Modeler element templates API is not stable and might change in the future. -::: - -Element templates allow you to create pre-defined configurations for BPMN elements such as service and user tasks. Once applied via the properties panel they provide configured custom inputs to the user. - -![Custom fields in the Desktop Modeler](./img/overview.png) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/additional-resources.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/additional-resources.md deleted file mode 100644 index 06e861e7cdc..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/additional-resources.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -id: additional-resources -title: Additional resources ---- - -Use the [Camunda Cloud Connect plugin](https://docs.camunda.org/cawemo/latest/technical-guide/integrations/modeler/) to integrate the Camunda Modeler with [Cawemo](https://cawemo.com/) and retrieve templates from a Cawemo catalog project. - -Try playing around with custom elements and some [example templates](https://github.com/camunda/camunda-modeler/blob/master/resources/element-templates/samples.json). - -If you get stuck, ask for help [in our forums](https://forum.camunda.org/c/modeler). diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/configuring-templates.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/configuring-templates.md deleted file mode 100644 index aa0f3d8ee33..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/configuring-templates.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: configuring-templates -title: Configuring templates ---- - -Element templates are defined as [JSON files](../defining-templates). There are two ways to integrate them into the Camunda Modeler: - -1. **Retrieve from Cawemo:** use the [Camunda Cloud Connect plugin](https://docs.camunda.org/cawemo/latest/technical-guide/integrations/modeler/) to integrate the Camunda Modeler with [Cawemo](https://cawemo.com/). The Modeler can then retrieve templates from catalog projects setup in Cawemo. Note that the [Cloud Connect plugin](https://docs.camunda.org/cawemo/latest/technical-guide/integrations/modeler/) will keep Cawemo and your local Camunda Modeler installation in sync (e.g., deleting a template in Cawemo will delete it locally as well). Locally the synced templates will be stored in the `config.json` file in your [`{USER_DATA_DIRECTORY}`](../../search-paths#user-data-directory). You should not manually change the `config.json` unless you know what you're doing. -2. **Local filesystem:** Store element templates as `.json` file in the `resources/element-templates` folder, relative to the modelers executable _or_ relative to the modelers data directory ([see below](#example-setup)). Alternatively, they can be stored in a `.camunda/element-templates` directory that resides, relative to the currently opened diagram, anywhere in the diagrams path hierarchy. - -New templates will be recognized when reconnecting to Cawemo or on Camunda Modeler reload/restart. - - -#### Example Setup - - -Add a `.json` file to the `resources/element-templates` sub-folder of your local [`{APP_DATA_DIRECTORY}`](../../search-paths#app-data-directory) or [`{USER_DATA_DIRECTORY}`](../../search-paths#user-data-directory) directory. You may have to create the `resources` and `element-templates` folders yourself. - -For local template discovery, create a `.camunda/element-templates` folder relative in the directory -or any parent directory of the diagrams you are editing. - -#### Development Workflow - -When creating custom element templates, the modeler will give you detailed validation error messages. - -Templates will be loaded on application load and reload. To reload the application with updated templates, open the developer tools `F12` and press `CtrlOrCmd+R`. This will clear all unsaved diagrams **!** diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/defining-templates.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/defining-templates.md deleted file mode 100644 index 1e9818fadd7..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/defining-templates.md +++ /dev/null @@ -1,612 +0,0 @@ ---- -id: defining-templates -title: Defining templates ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Templates are defined in template descriptor files as a JSON array: - -```json -[ - { - "$schema": "https://unpkg.com/@camunda/element-templates-json-schema@0.2.0/resources/schema.json", - "name": "Template 1", - "id": "sometemplate", - "description": "some description", - "version": 1, - "appliesTo": [ - "bpmn:ServiceTask" - ], - "properties": [ - ... - ] - }, - { - "name": "Template 2", - ... - } -] -``` - -As seen in the code snippet a template consist of a number of important components: - -* `$schema : String`: URI pointing towards the [JSON schema](https://json-schema.org/) which defines the structure of the element template `.json` file. Element template schemas are maintained in the [element templates JSON schema](https://github.com/camunda/element-templates-json-schema) repository. Following the [JSON schema](https://json-schema.org/) standard, you may use them for validation or to get assistance (e.g., auto-completion) when working with them in your favorite IDE. Note that the `$schema` attribute is **required** for Camunda Cloud element templates. - - Example (Camunda Platform) - - ```json - "$schema": "https://unpkg.com/@camunda/element-templates-json-schema@0.6.0/resources/schema.json" - ``` - - Example (Camunda Cloud) - - ```json - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema@0.1.0/resources/schema.json" - ``` - -* `name : String`: Name of the template that will appear in the Catalog. -* `id : String`: ID of the template. -* `description : String`: Optional description of the template. Will be shown in the element template selection modal and in the properties panel (after having applied an element template). -* `version : Integer`: Optional version of the template. If you add a version to a template it will be considered unique based on its ID and version. Two templates can have the same ID if their version is different. -* `appliesTo : Array`: List of BPMN types the template can be applied to. -* `properties : Array`: List of properties of the template. - -### JSON Schema Compatibility - -The application uses the `$schema` property to ensure compatibility for a given element template. The latest supported [Camunda element templates JSON Schema versions](https://github.com/camunda/element-templates-json-schema) are - -* `v0.7.0` (Camunda Platform) -* `v0.1.0` (Camunda Cloud) - -The Camunda Modeler will ignore element templates defining a higher `$schema` version and will log a warning message. - -For example, given the following `$schema` definition, the application takes `0.6.0` as the JSON Schema version of the element template. - -```json -"$schema": "https://unpkg.com/@camunda/element-templates-json-schema@0.6.0/resources/schema.json" -``` - -The JSON Schema versioning is backward-compatible, meaning that all versions including or below the current one are supported. In case no `$schema` is defined, the Camunda Modeler assumes the latest JSON Schema version for Camunda Platform element templates. - -Learn more about specifing a `$schema` [here](../defining-templates). - -### Supported BPMN Types - -Currently, element templates may be used on the following BPMN elements: - -* `bpmn:Activity` (including tasks, service tasks, and others) -* `bpmn:SequenceFlow` (for maintaining `condition`) -* `bpmn:Process` -* `bpmn:Event` - -### Defining Template Properties - -With each template, you define some user-editable fields as well as their mapping to BPMN 2.0 XML as well as Camunda extension elements. - -Let us consider the following example that defines a template for a mail sending task: - -```json -{ - "$schema": "https://unpkg.com/@camunda/element-templates-json-schema@0.7.0/resources/schema.json", - "name": "Mail Task", - "id": "com.camunda.example.MailTask", - "appliesTo": [ - "bpmn:ServiceTask" - ], - "properties": [ - { - "label": "Implementation Type", - "type": "String", - "value": "com.mycompany.MailTaskImpl", - "editable": false, - "binding": { - "type": "property", - "name": "camunda:class" - } - }, - { - "label": "Sender", - "type": "String", - "binding": { - "type": "camunda:inputParameter", - "name": "sender" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Receivers", - "type": "String", - "binding": { - "type": "camunda:inputParameter", - "name": "receivers" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Template", - "description": "By the way, you can use freemarker templates ${...} here", - "value": "Hello ${firstName}!", - "type": "Text", - "binding": { - "type": "camunda:inputParameter", - "name": "messageBody", - "scriptFormat": "freemarker" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Result Status", - "description": "The process variable to which to assign the send result to", - "type": "String", - "value": "mailSendResult", - "binding": { - "type": "camunda:outputParameter", - "source": "${ resultStatus }" - } - }, - { - "label": "Async before?", - "type": "Boolean", - "binding": { - "type": "property", - "name": "camunda:asyncBefore" - } - } - ] -} -``` - -The example defines five custom fields, each mapped to different technical properties: - -* _Implementation Type_ is mapped to the `camunda:class` property in BPMN 2.0 XML -* _Sender_, _Receivers_ and _Template_ properties are mapped to `input parameters` -* _Result Status_ is mapped back from the Java Delegate into a process variable via an `output parameter` - -All but the _Implementation Type_ are editable by the user through the properties panel as shown in the following screenshot: - -![Custom Fields](./img/custom-fields.png) - - -As seen in the example the important attributes in a property definition are: - -* `label`: A descriptive text shown with the property -* `type`: Defining the visual appearance in the properties panel (may be any of `String`, `Text`, `Boolean`, `Dropdown` or `Hidden`) -* `value`: An optional default value to be used if the property to be bound is not yet set -* `binding`: Specifying how the property is mapped to BPMN or Camunda extension elements and attributes (may be any of `property`, `camunda:property`, `camunda:inputParameter`, `camunda:outputParameter`, `camunda:in`, `camunda:out`, `camunda:executionListener`, `camunda:field`, `camunda:errorEventDefinition`) -* `constraints`: A list of editing constraints to apply to the template - - -#### Types - -The input types `String`, `Text`, `Boolean`, `Dropdown` and `Hidden` are available. As seen above `String` maps to a single-line input, `Text` maps to a multi-line input. - - -###### Boolean / Checkbox Type - -The `Boolean` type maps to a checkbox that can be toggled by the user. It renders as shown below: - -![Boolean / Checkbox control](./img/field-boolean.png) - -When checked, it maps to `true` in the respective field (see [bindings](#bindings)). Note that it does not map to `${true}` and can therefore not be used e.g., for mapping a boolean to a process variable. - -###### Dropdown Type - -The `Dropdown` type allows users to select from a number of pre-defined options that are stored in a custom properties `choices` attribute as `{ name, value }` pairs: - -```json -... - "properties": [ - ... - { - "label": "Task Priority", - "type": "Dropdown", - "value": "50", - "choices": [ - { "name": "low", "value": "20" }, - { "name": "medium", "value": "50" }, - { "name": "height", "value": "100" } - ] - } - ] -... -``` - -The resulting properties panel control looks like this: - -![properties panel drop down](./img/field-dropdown.png) - -###### Omitted Type - -By omitting the `type` configuration the default UI component will be rendered for the respective binding. - -For `camunda:inputParameter` and `camunda:outputParameter` bindings an Input / Output Parameter Mapping component will be rendered. The component will include a toggle to enable or disable the `Variable Assignment`. When untoggling, the respective `camunda:inputParameter` or `camunda:outputParameter` element will not be created in the BPMN XML. - -![default-rendering](./img/default-rendering.png) - -Note that the configuration options `editable` and `constraints` will have no effect for the `camunda:inputParameter` and `camunda:outputParameter` default component. - -For `camunda:errorEventDefinition` bindings, an Error component will be rendered. The component will include all properties of the referenced `bpmn:Error` element. - -![default-errors-rendering](./img/default-errors-rendering.png) - -Note that the configuration options `editable` and `constraints` will have no effect for the `camunda:errorEventDefinition` default component. - -For the `property`, `camunda:property`, `camunda:in`, `camunda:in:businessKey`, `camunda:out` and `camunda:field` bindings, an omitted `type` will lead to rendering the `String` component (single line input). - -For the `camunda:executionListener` binding, an omitted `type` will lead to the `Hidden` component (ie. no visible input for the user). -#### Bindings - -The following ways exist to map a custom field to the underlying BPMN 2.0 XML. The _"mapping result"_ in the following section will use `[userInput]` to indicate where the input provided by the user in the `Properties Panel` is set in the BPMN XML. As default or if no user input was given, the value specified in `value` will be displayed and used for `[userInput]`. `[]` brackets will be used to indicate where the parameters are mapped to in the XML. - -Notice that adherence to the following configuration options is enforced by design. If not adhering, it logs a validation error and ignores the respective element template. - - - - - -###### `property` - -| **Binding `type`** | `property` | -|---|---| -| **Valid property `type`'s** | all property types are supported | -| **Binding parameters** | `name`: the name of the property | -| **Mapping result** | `<... [name]=[userInput] ... />` | - -The `property` binding is supported both in Camunda Platform and Cloud. - - - - - -###### `camunda:property` - -| **Binding `type`** | `camunda:property` | -|---|---| -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: the name of the extension element property | -| **Mapping result** | `` | - -###### `camunda:inputParameter` - -| **Binding `type`** | `camunda:inputParameter` | -|---|---| -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: the name of the input parameter
    `scriptFormat`: the format of the script (if script is to be mapped) | -| **Mapping result** | If `scriptFormat` is not set:
    `[userInput]`

    If `scriptFormat` is set:
    `[userInput]` | - -###### `camunda:outputParameter` - -| **Binding `type`** | `camunda:outputParameter` | -|---|---| -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `source`: the source value to be mapped to the `outputParameter`
    `scriptFormat`: the format of the script (if script is to be mapped) | -| **Mapping result (example)** | If `scriptFormat` is not set:
    `[source]`

    If `scriptFormat` is set:
    `[source]` | - -###### `camunda:in` - -| **Binding `type`** | `camunda:in` | -|---|---| -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `target`: the target value to be mapped to
    `expression`: `true` indicates that the userInput is an expression
    `variables`: either `all` or `local` indicating the variable mapping | -| **Mapping result** | If `target` is set:
    ``

    If `target` is set and `expression` is set to `true`:
    ``

    If `variables` is set to `local`:
    ` ` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`)

    If `variables` is set to `local` and `target` is set:
    ``

    If `variables` is set to `local`, `target` is set and `expression` is set to `true`:
    ``

    If `variables` is set to `all`:
    `` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`) | - -###### `camunda:in:businessKey` - -| **Binding `type`** | `camunda:in:businessKey` | -|---|---| -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | | -| **Mapping result** | `` | - -###### `camunda:out` - -| **Binding `type`** | `camunda:out` | -|---|---| -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `source`: the source value to be mapped
    `sourceExpression`: a string containing the expression for the source attribute
    `variables`: either `all` or `local` indicating the variable mapping | -| **Mapping result** | If `source` is set:
    ``

    If `sourceExpression` is set:
    ``

    If `variables` is set to `all`:
    `` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`)

    If `variables` is set to `local` and `source` is set:
    ``

    If `variables` is set to `local` and `sourceExpression` is set:
    ``

    If `variables` is set to `local`:
    `` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`) | - -###### `camunda:executionListener` - -| **Binding `type`** | `camunda:executionListener` | -|---|---| -| **Valid property `type`'s** | `Hidden` | -| **Binding parameters** | `event`: value for the `event` attribute
    `scriptFormat`: value for the `scriptFormat` attribute | -| **Mapping result** | `[value]`
    (Notice that `[value]` needs to be set, since only `Hidden` is allowed as a type hence the user can not set a `[userInput]`) | - -###### `camunda:field` - -| **Binding `type`** | `camunda:field` | -|---|---| -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: value for the `name` attribute
    `expression`: `true` that an expression is passed | -| **Mapping result** | `[userInput]`

    If `expression` is set to `true`:
    `[userInput]` | - -###### `camunda:errorEventDefinition` - -| **Binding `type`** | `camunda:errorEventDefinition` | -|---|---| -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `errorRef`: reference to a scoped `bpmn:Error` binding, generates the `errorRef` attribute as unique id
    | -| **Mapping result** | ``

    For the referenced scoped `bpmn:Error` binding: `` | - -
    - - - -###### `zeebe:input` - -| **Binding `type`** | `zeebe:input` | -|---|---| -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: the name of the input parameter | -| **Mapping result** | `` | - -
    -
    - - -#### Optional Bindings - -As of Camunda Modeler `v5.0.0`, we support optional bindings that do not persist empty values in the underlying BPMN 2.0 XML. - -If a user removes the value in the configured control, it will also remove the mapped element. - -```json -[ - { - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "Task example", - "id": "some-template", - "appliesTo": [ - "bpmn:ServiceTask" - ], - "properties": [ - { - "label": "Request", - "type": "String", - "optional": true, - "binding": { - "type": "zeebe:input", - "name": "reuqest" - } - }, - { - "label": "Response", - "type": "Text", - "optional": true, - "binding": { - "type": "zeebe:output", - "source": "response" - } - } - ] - } -] -``` - - -__Supported Bindings__ - -Camunda Platform - -*Optional bindings are currently not supported for Camunda Platform element templates.* - -Camunda Cloud - -* `zeebe:input` -* `zeebe:output` - -#### Scoped Bindings - -Scoped bindings allow you to configure nested elements, such as [connectors](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/#use-connectors). - -```json -{ - "name": "ConnectorGetTask", - "id": "my.connector.http.get.Task", - "appliesTo": [ - "bpmn:Task" - ], - "properties": [], - "scopes": [ - { - "type": "camunda:Connector", - "properties": [ - { - "label": "ConnectorId", - "type": "String", - "value": "My Connector HTTP - GET", - "binding": { - "type": "property", - "name": "connectorId" - } - }, - ... - ] - } - ] -} -``` - -The example shows how a connector is configured as part of the task. -On task creation, the connector is created with it and the connector bindings are -exposed to the user in a separate custom fields section. - -![Scoped Custom Fields](./img/scope-custom-fields.png) - - -__Supported Scopes__ - - - - - -| Name | Target | Supported by | -| ------------- | ------------- | ------------- | -| `camunda:Connector` | [Connectors](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/) | Camunda Platform | -| `bpmn:Error` | Global BPMN Error Element | Camunda Platform | - - - - - -Currently none. - - - - - -#### Groups - -As of Camunda Modeler `v5.0.0,` it is possible to define `groups` and order custom fields together. - -```json -{ - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "Groups", - "id": "group-example", - "appliesTo": [ - "bpmn:ServiceTask" - ], - "groups": [ - { - "id": "definition", - "label": "Task definition" - }, - { - "id": "request", - "label": "Request payload" - }, - { - "id": "result", - "label": "Result mapping" - } - ], - "properties": [ - ... - ] -} -``` - -Custom fields may use the defined group ids. The order of the custom fields also determines the groups' order in the properties panel. - -```json -{ - ... - "properties": [ - { - "label": "Implementation Type", - "type": "String", - "group": "definition", - "binding": { - "type": "property", - "name": "camunda:class" - } - }, - ... - ], - ... -} -``` - -![Groups](./img/groups.png) - -#### Constraints - -Custom Fields may have a number of constraints associated with them: - -* `notEmpty`: Input must be non-empty -* `minLength`: Minimal length for the input -* `maxLength`: Maximal length for the input -* `pattern`: Regular expression to match the input against - - -##### Regular Expression - -Together with the `pattern` constraint, you may define your custom error messages: - -```json -... - "properties": [ - { - "label": "Web service URL", - "type": "String", - "binding": { ... }, - "constraints": { - "notEmpty": true, - "pattern": { - "value": "https://.*", - "message": "Must be https URL" - } - } - } - ] -``` - - - -#### Display All Entries - -Per default, the element template defines the visible entries of the properties panel. All other property controls are hidden. If you want to bring all the default entries back, it is possible to use the `entriesVisible` property. - -```json -[ - { - "name": "Template 1", - "id": "sometemplate", - "entriesVisible": true, - "appliesTo": [ - "bpmn:ServiceTask" - ], - "properties": [ - ... - ] - } -] -``` - -![Display default entries](./img/entries-visible.png) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/chooser.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/chooser.png deleted file mode 100644 index 4e1fff7bcd5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/chooser.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/custom-fields.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/custom-fields.png deleted file mode 100644 index 2d5e79a49cd..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/custom-fields.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/default-errors-rendering.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/default-errors-rendering.png deleted file mode 100644 index 8d765d0cd46..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/default-errors-rendering.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/default-rendering.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/default-rendering.png deleted file mode 100644 index 8477738c311..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/default-rendering.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/entries-visible.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/entries-visible.png deleted file mode 100644 index facc48d20ea..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/entries-visible.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/field-boolean.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/field-boolean.png deleted file mode 100644 index 75ceec15006..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/field-boolean.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png deleted file mode 100644 index 84efa35a364..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/field-dropdown.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/groups.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/groups.png deleted file mode 100644 index 615e86b9e60..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/groups.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/modal.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/modal.png deleted file mode 100644 index a22685eddb5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/modal.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/overview.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/overview.png deleted file mode 100644 index 2a328002c71..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/overview.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/scope-custom-fields.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/scope-custom-fields.png deleted file mode 100644 index 694e52ffde4..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/scope-custom-fields.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/template-not-found.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/template-not-found.png deleted file mode 100644 index b5168ba060d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/template-not-found.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png deleted file mode 100644 index d2a53d75e02..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/unlink-remove.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/update-template.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/update-template.png deleted file mode 100644 index 41681d824b7..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/img/update-template.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/using-templates.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/using-templates.md deleted file mode 100644 index 51f60c9f44e..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/element-templates/using-templates.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: using-templates -title: Using templates ---- - -### Applying Templates - -If a template matches a selected diagram element, the template catalog button will be shown in the properties panel. - -![Template Chooser](./img/chooser.png) - -Clicking the `Catalog` button will bring up a modal menu allowing to browse and search available templates for the selected element. - -![Modal Menu](./img/modal.png) - -Applying a template will store it via the `modelerTemplate` property and the optional `modelerTemplateVersion` property on the selected element: - -Camunda Platform - -```xml - -``` - -Camunda Cloud - -```xml - -``` - -It will also setup custom fields on the diagram element and make these available to the user for inspection and editing. Properties which were not configured in the element template using custom fields, will not be available for editing for the user. - -### Removing Templates - -To remove an applied template from an element, either the *Unlink* or *Remove* function can be used: - -* *Remove*: Remove the element template from the `modelerTemplate` property and also reset all properties of the respective element. -* *Unlink*: Remove the element template from the `modelerTemplate` property but keep the properties which were set. - -![Unlink or Remove](./img/unlink-remove.png) - -### Updating Templates - -If a template is applied and a new version of the template is found you can *update* the template. - -![Update Template](./img/update-template.png) - -Templates are updated according to the following rules: - -1. If property is set in new template it will override unless property was set by old template and changed afterwards -2. If property is not defined in new template it will unset -3. Sub-properties of complex properties (e.g. camunda:In, camunda:Out, camunda:ExecutionListener) will be handled according to these rules if they can be identified - -### Missing Templates - -If a template is applied to an element but the respective template cannot be found on the system, the editing of the element will be disabled. *Unlinking* or *removing* the template for the element or adding the element template config will enable the editing again. - -![Template not Found](./img/template-not-found.png) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/flags.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/flags.md deleted file mode 100644 index a27ae2c74bb..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/flags.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -id: flags -title: Flags -description: "Flags allow you to control the availability of certain features within the desktop modeler." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Flags allow you to control the availability of certain features within the desktop modeler. - -## Configuring Flags - -You may configure flags in a `flags.json` file or pass them via CLI. - -### Configure in `flags.json` - -Place a `flags.json` file inside the `resources` folder of your local [`{USER_DATA}`](../search-paths#user-data-directory) or [`{APP_DATA_DIRECTORY}`](../search-paths#app-data-directory) directory to persist them. - -### Configure via CLI - -Pass flags via the command line when starting the application. - - - - - -```plain -"Camunda Modeler.exe" --disable-plugins -``` - - - - - -```plain -camunda-modeler --disable-plugins -``` - - - - - -```plain -camunda-modeler --disable-plugins -``` - - - - -Flags passed as command line arguments take precedence over those configured via a configuration file. - - -## Available Flags - -| flag | default value | -| ------------- | ------------- | -| "disable-plugins" | false | -| "disable-adjust-origin" | false | -| "disable-cmmn" | true | -| "disable-dmn" | false | -| "disable-platform" | false | -| "disable-zeebe" | false | -| "disable-remote-interaction" | false | -| "single-instance" | false | -| "user-data-dir" | [Electron default](../search-paths) | -| "display-version" | `undefined` | - -## Examples - -### BPMN-only Mode - -To disable the DMN and Form editing capabilities of the App, configure your `flags.json` like this: - -```js -{ - "disable-cmmn": true, - "disable-dmn": true -} -``` - -As a result, the app will only allow users to model BPMN diagrams. - -![BPMN only mode](./img/bpmn-only.png) - -### Custom `version-info` label - -To display a custom version information in the status bar of the app, configure your `flags.json` like this: - -```js -{ - "display-version": "1.2.3" -} -``` - -![Custom version info](./img/display-version.png) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/img/bpmn-only.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/img/bpmn-only.png deleted file mode 100644 index b525087a02e..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/img/bpmn-only.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/img/display-version.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/img/display-version.png deleted file mode 100644 index 58c044e38bc..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/flags/img/display-version.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png deleted file mode 100644 index 1f9f8187fb9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png deleted file mode 100644 index 89b8ee18659..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png deleted file mode 100644 index a206785367f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram.png deleted file mode 100644 index 0f17049c7a3..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-diagram.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-icon.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-icon.png deleted file mode 100644 index 133ade3e94f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deploy-icon.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deployment-successful.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deployment-successful.png deleted file mode 100644 index bfc621d5b6f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/deployment-successful.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/element-configuration.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/element-configuration.png deleted file mode 100644 index 17f909519a9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/element-configuration.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/elements.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/elements.png deleted file mode 100644 index 7a9a49bd87a..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/elements.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/empty.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/empty.png deleted file mode 100644 index 0295d521318..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/empty.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/new-diagram.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/new-diagram.png deleted file mode 100644 index a7da276dab7..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/new-diagram.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/properties-panel.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/properties-panel.png deleted file mode 100644 index 2fbcc46c459..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/start-instance-icon.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/start-instance-icon.png deleted file mode 100644 index 9f5b787f102..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/start-instance-icon.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/start-instance-successful.png b/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/start-instance-successful.png deleted file mode 100644 index 3d2d47a1176..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/desktop-modeler/img/start-instance-successful.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/install-the-modeler.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/install-the-modeler.md deleted file mode 100644 index 073222157b8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/install-the-modeler.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: install-the-modeler -title: Install the Modeler -description: "Camunda Modeler is the desktop application for modeling processes with BPMN." ---- - -[Desktop Modeler](https://github.com/camunda/camunda-modeler) is the desktop application for modeling processes with BPMN. - -The application can be run on Windows, MacOS, and Linux. The corresponding packages can be found on the [downloads page](https://camunda.com/download/modeler/). diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/model-your-first-diagram.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/model-your-first-diagram.md deleted file mode 100644 index d8fc7c04459..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/model-your-first-diagram.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: model-your-first-diagram -title: Model your first diagram ---- - -After [downloading](./install-the-modeler.md) and starting Desktop Modeler, you can model your first BPMN diagram. Follow the steps below: - -1. Create a BPMN diagram: - -![empty application](./img/empty.png) - -2. View the BPMN diagram with a start event: - -![new diagram](./img/new-diagram.png) - -3. On the left side of the screen you will find the element palette. Drag and drop the elements onto the diagram: - -![elements](./img/elements.png) - -Elements that support different types can be reconfigured by clicking on the corresponding icon. In the following screenshot, a task has been added to the diagram. It can be converted to a service task, for example. - -![task configuration](img/element-configuration.png) - -4. Use the properties panel on the right side of the page to edit the properties of the currently selected element: - -![properties panel](img/properties-panel.png) - -5. Once you finish modeling and configuring your diagram, you can deploy it to a [Camunda Cloud cluster](./connect-to-camunda-cloud.md). diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/plugins/plugins.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/plugins/plugins.md deleted file mode 100644 index bcd6c4f08e6..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/plugins/plugins.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -id: plugins -title: Plugins -description: "Plugins allow you to change the appearance and behavior of Desktop Modeler and add new features." ---- - -:::note -The Camunda Modeler plugins API is not stable and might change in the future. -::: - -Plugins allow you to change the appearance and behavior of Camunda Modeler and add new features. - -## Plugging into the Camunda Modeler - -You can plug into the modeler to change its appearance, add new menu entries, extend the modeling tools for [BPMN](https://github.com/bpmn-io/bpmn-js) and [DMN](https://github.com/bpmn-io/dmn-js), or even slot React.js components into the Camunda Modeler UI. - -To add a plugin, put it into the `resources/plugins` directory relative to your [`{APP_DATA_DIRECTORY}`](../search-paths#app-data-directory) or [`{USER_DATA_DIRECTORY}`](../search-paths#user-data-directory) directory. - -Camunda Modeler searches for available plugin entry points via the `resources/plugins/*/index.js` pattern. This means that each plugin must reside in it's own folder which is a direct child of the `plugins` directory. - -:::note -If you download and extract plugins from GitHub, the extracted directory contains the actual plugin, so make sure to copy the plugin, not its parent directory. -::: - -## Overview of your possibilities as a plugin developer - -There are many ways for a developer to extend Camunda Modeler and its modeling tools. The following table shows an overview: - -| Plugin type | Functionality | Example | -| ------------------------- | -------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| Menu Entries | Add new entries to the menu bar - useful to interact with your plugins, link to external pages, add settings, etc. | [Menu Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/menu-plugin-example) | -| Custom Styles | Change the look and feel of Camunda Modeler by adding stylesheets. | [Styles Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/style-plugin-example) | -| React Components | Embed custom React.js components into specific anchor points of Camunda Modeler. | [React Plugin Example](https://github.com/pinussilvestrus/camunda-modeler-autosave-plugin) | -| bpmn-js Modules | Extend our BPMN editor by injecting your own custom [bpmn-js](https://github.com/bpmn-io/bpmn-js) modules. | [bpmn-js Module Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/bpmn-js-plugin-example) | -| bpmn-moddle Extensions | Extend the BPMN language model by injecting your own custom [bpmn-moddle](https://github.com/bpmn-io/bpmn-moddle) modules. | [bpmn-moddle Extension Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/bpmn-js-plugin-moddle-extension-example) | -| dmn-js Modules | Extend our DMN editor by injecting your own custom [dmn-js](https://github.com/bpmn-io/dmn-js) modules. | [dmn-js Module Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/dmn-js-plugin-example) | -| dmn-moddle Extensions | Extend the DMN language model by injecting your own custom [dmn-moddle](https://github.com/bpmn-io/dmn-moddle) modules | n/a | - -## Getting started with development - -### Plugin entry point - -Regardless of the type of your plugin, you have to export a [Node.js module](https://nodejs.org/api/modules.html) named `index.js` that acts as a plugin entry point. The following shows an example of such entry point: - -```javascript -module.exports = { - name: 'My Awesome Plugin', // the name of your plugin - style: './style.css', // changing the appearance of the modeler - menu: './menu.js', // adding menu entries to the modeler - script: './script.js' // extending the modeler, and its BPMN and DMN components -}; -``` - -The modeler will automatically load your plugins on startup. - -### Changing the appearance of the modeler - -You can change the appearance of the modeler using CSS. - -Your stylesheet might look like this: - -```css -body { - background: linear-gradient(0deg, #52b415, #eee); -} -``` - -Plug it into the modeler like this: - -```javascript -module.exports = { - style: './style.css' -}; -``` - -### Adding menu entries to the modeler - -You can add new menu entries to the modeler's menu. - -Describe your menu entries like this: - -```javascript -module.exports = function(electronApp, menuState) { - return [{ - label: 'Open BPMN Reference', - accelerator: 'CommandOrControl+[', - enabled: function() { - - // only enabled for BPMN diagrams - return menuState.bpmn; - }, - action: function() { - var shell = require('electron').shell; - shell.openExternal('https://camunda.org/bpmn/reference/'); - } - }]; -}; -``` - -Plug them into the modeler like this: - -```javascript -module.exports = { - menu: './menu-entries' -}; -``` - -:::note -The code within the menu entries executes on [the main process](https://www.electronjs.org/docs/latest/tutorial/process-model) of Electron. This comes with the advantage of allowing you to use [Node.js](https://nodejs.org/en/) modules, but you need to consider that you cannot debug the respective code in Chromium. For more information regarding main process debugging, refer to the [official Electron documentation](https://www.electronjs.org/docs/latest/tutorial/debugging-main-process). -::: - -For more information on how the modeler's menu works, take a look at its [implementation](https://github.com/camunda/camunda-modeler/blob/master/app/lib/menu/menu-builder.js). - -### Extend the modeler and its BPMN and DMN components - -You can extend the modeling tools for [BPMN](https://github.com/bpmn-io/bpmn-js) and [DMN](https://github.com/bpmn-io/dmn-js) with your own modules, as well as embedding React.js components into certain sections of Camunda Modeler. - -Since the client of the modeler uses [Chromium](https://www.chromium.org/Home), you can't use Node.js modules to extend the modeling tools. You need to bundle your plugin first. The easiest way to get started with client-side plugins is through [this example project](https://github.com/camunda/camunda-modeler-plugin-example). - -> In this example, we are building a bpmn-js plugin, but this basic structure applies to all extensions besides menu entries and style. The modules themselves will be different however, so refer to our [examples](https://github.com/camunda/camunda-modeler-plugins) for more information on how to build different kinds. - -Take the following steps: - -1. Clone the repository: - -``` -git clone https://github.com/camunda/camunda-modeler-plugin-example.git -``` - -The plugin starter project comes with a menu and style folder which are referenced in the plugin entry point. If you do not need those, you can remove them from the entry point and delete the respective folder. - -2. Install the dependencies: - -``` -npm install -``` - -3. Create your module: - -```javascript -function LoggingPlugin(eventBus) { - eventBus.on('shape.added', function() { - console.log('A shape was added to the diagram!'); - }); -} - -module.exports = { - __init__: [ 'loggingPlugin' ], - loggingPlugin: [ 'type', LoggingPlugin ] -}; -``` - -4. Require your file in `client.js` and register it via our [helper functions](https://github.com/camunda/camunda-modeler-plugin-helpers): - -```javascript -var registerBpmnJSPlugin = require('camunda-modeler-plugin-helpers').registerBpmnJSPlugin; -var plugin = require('./LoggingPlugin'); - -registerBpmnJSPlugin(plugin); -``` - -5. You may want to create a plugin which specifically targets Camunda 7 or Camunda 8. To do this, use the appropriate variations of the registration helper function for your plugin type. - -```javascript -registerPlatformBpmnJSPlugin(plugin); // Register plugin for Camunda 7 BPMN diagrams only -registerCloudBpmnJSPlugin(plugin); // Register plugin for Camunda 8 BPMN diagrams only -registerBpmnJSPlugin(plugin); // Register plugin for Camunda 7 and 8 BPMN diagrams -``` - -6. You can use the globally available functions `getModelerDirectory` and `getPluginsDirectory` to load additional resources: - -```javascript -function LoggingPlugin(eventBus, canvas) { - var img = document.createElement(img); - img.src = getPluginsDirectory + '/logging-plugin/image.png'; - - canvas.getContainer().appendChild(img); -} -``` - -7. Bundle your plugin: - -``` -npm run build -``` -8. Put the folder into the `resources/plugins` directory relative to your Camunda Modeler installation directory. You can now use your plugin! - -### Development workflow - -When creating a plugin, you can place the directory containing your plugin in the aforementioned `resources/plugins` directory. - -Plugins will be loaded on application startup (menu plugins) or reload (style and modeling tool plugins). To reload the application, open the developer tools F12 and press `CtrlOrCmd + R`. This will clear all unsaved diagrams. - -## Additional resources - -* [Example Plugins](https://github.com/camunda/camunda-modeler-plugins) -* [Plugin Starter Project](https://github.com/camunda/camunda-modeler-plugin-example) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/search-paths/search-paths.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/search-paths/search-paths.md deleted file mode 100644 index 652d695f789..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/search-paths/search-paths.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: search-paths -title: Search paths -description: "Features like element templates and plugins allow you to add your own resources to the desktop modeler. For these resources to be found, they have to be in one of two directories depending on how local or global you want them to be." ---- - -Features like element templates and plugins allow you to add your own resources to the desktop modeler. For these resources to be found, they have to be in one of two directories depending on how local or global you want them to be. - -## App Data Directory - -The `resources` directory relative to the directory containing the Camunda Modeler executable file. In our documentation we refer to it as `{APP_DATA_DIRECTORY}`. - -Resources in the app data directory will be found by any local Camunda Modeler instance. - -### Example (Windows): - -``` -└── camunda-modeler-3.5.0-win-x64 - ├── Camunda Modeler.exe - └── resources - ├── element-templates - | └── my-element-templates.json - └── plugins - └── my-plugin - └── index.js -``` - -## User Data Directory - -The `camunda-modeler/resources` directory relative to the per-user application data directory, which by default points to: - -* `%APPDATA%` on [Windows](https://www.pcworld.com/article/2690709/whats-in-the-hidden-windows-appdata-folder-and-how-to-find-it-if-you-need-it.html) -* `$XDG_CONFIG_HOME` or `~/.config` on [Linux](https://wiki.archlinux.org/index.php/XDG_user_directories) -* `~/Library/Application Support` on macOS - -In our documentation we refer to it as `{USER_DATA_DIRECTORY}`. - -Resources in the user data directory will be found by all Camunda Modeler instances. - -### Example (Windows): - -``` -└── AppData - └── Roaming - └── camunda-modeler - └── resources - ├── element-templates - | └── my-element-templates.json - └── plugins - └── my-plugin - └── index.js -``` - -It is possible to change the user data directory using the `--user-data-dir` option via when starting the Camunda Modeler from the command line. Refer to the [flags documentation](../flags) on how to configure the application with a flags file. diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/start-instance.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/start-instance.md deleted file mode 100644 index c9cfc62fa4e..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/start-instance.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: start-instance -title: Start a new process instance -description: "After you have deployed your process to Camunda Cloud, you can start a new instance of this process via the play icon." ---- - -After you have [deployed your process to Camunda Cloud](./connect-to-camunda-cloud.md), you can start a new instance of this process via the play icon: - -![start instance icon](./img/start-instance-icon.png) - -After the instance was started successfully, a corresponding message is displayed: - -![start instance successful](./img/start-instance-successful.png) diff --git a/versioned_docs/version-1.3/components/modeler/desktop-modeler/telemetry/telemetry.md b/versioned_docs/version-1.3/components/modeler/desktop-modeler/telemetry/telemetry.md deleted file mode 100644 index a06f9e4c576..00000000000 --- a/versioned_docs/version-1.3/components/modeler/desktop-modeler/telemetry/telemetry.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -id: telemetry -title: Telemetry -description: "You can opt-in the collection of telemetry data when using the desktop modeler. This data will be used to better understand how the application is used and to improve it based on data." ---- - -You can opt-in the collection of telemetry data when using the desktop modeler. This data will be used to better understand how the application is used and to improve it based on data. - -This page summarizes the data that is being collected. - -## General Structure of the Events -Independent from the type of the event we're dealing with, the payload we send to the ET has the following structure: -```json -{ - "installation": "[THE_EDITOR_ID]", - "product": { - "name": "Camunda Modeler", - "version": "[MODELER_VERISON]", - "edition": "community", - "internals": { - "event": "[NAME_OF_THE_EVENT]", - "[SOME_ADDITIONAL_EVENT_DATA]": "[SOME_CUSTOM_VALUE]" - } - } -} -``` - -Every event directly modifies the `internals` field of the payload. - -## Definition of Events - -### Ping Event -The `Ping Event` is sent in following situations: - - - The modeler is opened (given that `Usage Statistics` option is enabled) - - `Usage Statistics` option is enabled for the first time. - - Once every 24 hours (given that `Usage Statistics` option is enabled) - -The Ping Event has the following structure: -```json -{ - "event": "ping" -} -``` - -### Diagram Opened Event -The `Diagram Opened Event` is sent in following situations: - - - User created a new BPMN diagram - - User created a new DMN diagram - - User created a new CMMN diagram - - User created a new Form - - User opened an existing BPMN diagram - - User opened an existing DMN diagram - - User opened an existing CMMN diagram - - User opened an existing Form - -The Diagram Opened Event has the following core structure: -```json -{ - "event": "diagramOpened", - "diagramType": "[bpmn, dmn, cmmn or form]" -} -``` - -In the case of bpmn and form, we add the engine profile: - -```json -{ - "engineProfile": { - "executionPlatform": "Camunda Cloud", - "executionPlatformVersion": "1.1" - } -} -``` - -In case the diagram type is bpmn, we also add the element template usage to -Diagram Opened Event payload: - -```json -{ - "elementTemplateCount": 1, - "elementTemplates": [ - { - "appliesTo": [ "bpmn:ServiceTask" ], - "properties": { - "camunda:asyncBefore": 1, - "camunda:class": 1, - "camunda:inputParameter": 3, - "camunda:outputParameter": 1 - } - } - ] -} -``` - -Also in the case of BPMN diagrams, we add selected diagram metrics: - -```json -{ - "diagramMetrics": { - "processVariablesCount": 3, - "tasks": { - "userTask": { - "count": 5, - "form": { - "count": 5, - "embedded": 1, - "camundaForms": 1, - "external": 2, - "generated": 0, - "other": 1 - } - }, - "serviceTask": { - "count": 5, - "implementation": { - "count": 5, - "java": 1, - "expression": 1, - "delegate": 2, - "external": 0, - "connector": 1 - } - } - }, - "subprocessPlanes": { - "count": 5, - "nesting:": 2 - } - } -} -``` - - -### Deployment Event -The `Deployment Event` is sent in following situations: - - - User deploys a BPMN diagram to Camunda Platform or Camunda Cloud - - User deploys a DMN diagram to Camunda Platform - -The Deployment Event has the following core structure: -```json -{ - "event": "deployment", - "diagramType": "[bpmn or dmn]", - "deployment": { - "outcome": "[success or failure]", - "context": "[deploymentTool or startInstanceTool]", - "executionPlatform": "[Camunda Cloud or Camunda Platform]", - "executionPlatformVersion": "[version deployed to]" - } -} -``` - -In case the diagram deployment was not successful, the error code returned from the Camunda Platform will be added to the payload: - -```json -{ - "deployment": { - "outcome": "failure", - "error": "DIAGRAM_PARSE_ERROR" - } -} -``` - -If provided, for example, when deploying to a Zeebe based platform, we add the target type of the deployment as well: - -```json -{ - "deployment": { - "targetType": "camundaCloud" - } -} -``` - -In case of BPMN files, we add selected diagram metrics: - -```json -{ - "diagramMetrics": { - "processVariablesCount": 3, - "tasks": { - "userTask": { - "count": 5, - "form": { - "count": 5, - "embedded": 1, - "camundaForms": 1, - "external": 2, - "generated": 0, - "other": 1 - } - }, - "serviceTask": { - "count": 5, - "implementation": { - "count": 5, - "java": 1, - "expression": 1, - "delegate": 2, - "external": 0, - "connector": 1 - } - } - }, - "subprocessPlanes": { - "count": 5, - "nesting:": 2 - } - } -} -``` - -If it is set in the diagram, we also add target engine profile information: - -```json -{ - "engineProfile": { - "executionPlatform": "Camunda Cloud" - } -} -``` - - -### Version Info Events - -The version info events are sent in following situations: - - - User opens version info overlay via the button on the status bar - - User opens version info overlay via the menu - - User opens a link in the version info overlay - -In the two first cases, a `versionInfoOpened` event is sent: - -```json -{ - "event": "versionInfoOpened", - "source": "[menu or statusBar]" -} -``` - -When a link is clicked, a `versionInfoLinkOpened` event is sent: - -```json -{ - "event": "versionInfoLinkOpened", - "label": "[anchor content, e.g. Camunda Modeler docs]" -} -``` diff --git a/versioned_docs/version-1.3/components/modeler/dmn/dmn.md b/versioned_docs/version-1.3/components/modeler/dmn/dmn.md deleted file mode 100644 index 9262941c281..00000000000 --- a/versioned_docs/version-1.3/components/modeler/dmn/dmn.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: desktop-modeler-dmn -title: Editing DMN in Desktop Modeler -description: To start modeling, let's create a DMN diagram. ---- - -Camunda Platform Only - -:::note -DMN is currently only available in Camunda Platform. -::: - -## Create new DMN decision requirement diagram - -![New DMN Diagram](./img/create-dmn.png) - -To start modeling, create a new DMN 1.3 diagram for Camunda Platform by selecting **Create diagram > Create new DMN diagram (Camunda Platform)** in the top-level menu. - -## Start modeling - -![Start Modeling](./img/main.png) - -Now you can start to create a DMN 1.3 model. Add the desired elements from the palette on the left side by dragging and dropping them onto the diagram canvas. - -Alternatively, you can add new elements by using the context menu that appears when you select an element in the diagram. Using the wrench icon in the context menu, you can change the type of an element in place. Use the properties panel on the right side to change the name or id of the DMN diagram. - -## Demo - -![Demo](./img/demo.gif) - -The demo above shows how to model a decision table. After creating a decision and morphing it into a decision table, you can start editing the table by clicking the overlay on the upper left corner of the decision. Using the overview in the decision table view, you can jump between decision tables. - -## Save a diagram - -To save your diagram, click **File > Save File As...** in the top-level menu. Then, select a location on your file system to store the diagram in the DMN 1.3 XML format. You can load that file again by clicking **File > Open File...**. - -## DMN coverage - -Desktop Modeler covers the following elements: - -- Decision (tables and literal expressions) -- Input data -- Knowledge source -- Business knowledge model - -## Decision tables - -![Decision Table](./img/decision-table.png) - -By clicking the blue icon on a decision table, you can open the decision table view and start to edit it. Add **Input**, **Output**, and **Rule** elements by clicking the plus signs. Edit a table cell by clicking on it. Alternatively, the tabulator and enter keys can be used to walk through the table cells. - -Delete a rule or a column, copy, or insert a new rule by right clicking in the cell: - -![Delete or copy rules](./img/dmn-modeler-right-click.png) - -Adjust the details of an input or output column (e.g., name, expression, and type) by double clicking in the header row: - -![Change input or output column](./img/dmn-modeler-double-click.png) - -Jump between decision tables or literal expressions in your decision requirement diagram by opening and using the `Overview` on the left side: - -![Jump between decision tables](./img/dmn-modeler-toggle-overview.png) - -## Literal expressions - -![New DMN Literal Expression](./img/literal-expression.png) - -You can also edit literal expressions. Just as with decision tables, in the decision requirement diagram view, click the blue icon to *drill-down* into the literal expression view and start editing. diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/create-dmn.png b/versioned_docs/version-1.3/components/modeler/dmn/img/create-dmn.png deleted file mode 100644 index 391c66093af..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/create-dmn.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/decision-table.png b/versioned_docs/version-1.3/components/modeler/dmn/img/decision-table.png deleted file mode 100644 index 7d25901da6c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/decision-table.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/demo.gif b/versioned_docs/version-1.3/components/modeler/dmn/img/demo.gif deleted file mode 100644 index b0e078feec9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/demo.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-double-click.png b/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-double-click.png deleted file mode 100644 index 3d502efeb45..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-double-click.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-drd-prop-panel.png b/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-drd-prop-panel.png deleted file mode 100644 index 59c13da9b9c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-drd-prop-panel.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-right-click.png b/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-right-click.png deleted file mode 100644 index 0e3ac34a657..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-right-click.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-toggle-overview.png b/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-toggle-overview.png deleted file mode 100644 index 07972d3acd8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/dmn-modeler-toggle-overview.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/literal-expression.png b/versioned_docs/version-1.3/components/modeler/dmn/img/literal-expression.png deleted file mode 100644 index 2f1b81e28d3..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/literal-expression.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/dmn/img/main.png b/versioned_docs/version-1.3/components/modeler/dmn/img/main.png deleted file mode 100644 index f35a22c70e2..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/dmn/img/main.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/forms/camunda-forms-reference.md b/versioned_docs/version-1.3/components/modeler/forms/camunda-forms-reference.md deleted file mode 100644 index ae7530539c8..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/camunda-forms-reference.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -id: camunda-forms-reference -title: Camunda Forms Reference -description: Forms created with Camunda Modeler are embeddable in Tasklist. ---- - - -:::note Support for Camunda Forms -The Camunda Forms feature was added with the 7.15.0 release of Camunda Platform and the 4.7.0 release of Camunda Modeler. -::: - -Camunda Forms allow you to easily design and configure forms, and then embed them in Camunda Tasklist. - -* Camunda Forms are created in Camunda Modeler. -* Camunda Forms can be embedded in Camunda Tasklist. You can find out how in the [user task forms guide](../../../guides/utilizing-forms.md). -* Camunda Forms are powered by the open-source [bpmn-io form-js library](https://github.com/bpmn-io/form-js). Visit the [open source repository](https://github.com/bpmn-io/form-js) to find out how to render a form using plain JavaScript in a custom application (note that this also requires you to fetch the form from the respective BPMN 2.0 element and provide data as needed to the form.) - -## Components - -Use Camunda Modeler to configure your Camunda Form. The following form elements are currently supported. - -### Text field - -A text field allowing the user to read and edit textual data. - -![Form Text Field Symbol](./img/form-textField.svg) - -A text field can be configured using the following configuration properties: - -* **Field Label**: Label displayed on top of the text field. -* **Field Description**: Description provided below the text field. -* **Key**: Identifier used to map data to the text field. -* **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - * **Required**: Text field must contain a value. - * **Minimum Length**: Text field must have at least x characters. - * **Maximum Length**: Text field must not have more than x characters. - * **Regular Expression Pattern**: Text field value must match the provided regular expression pattern. - -### Number - -A number field allowing the user to read and edit numeric data. - -![Form Number Symbol](./img/form-number.svg) - -A number can be configured using the following configuration properties: - -* **Field Label**: Label displayed on top of the number field. -* **Field Description**: Description provided below the number field. -* **Key**: Identifier used to map data to the number field. -* **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - * **Required**: Number field must contain a value. - * **Minimum Length**: Number field must have at least x characters. - * **Maximum Length**: Number field must not have more than x characters. - -### Checkbox - -A checkbox allowing the user to read and edit boolean data. - -![Form Checkbox Symbol](./img/form-checkbox.svg) - -A checkbox can be configured using the following configuration properties: - -* **Field Label**: Label displayed on top of the checkbox. -* **Field Description**: Description provided below the checkbox. -* **Key**: Identifier used to map data to the checkbox. - -### Radio - -A radio button allowing the user to select one of multiple radio button entries. - -![Form Radio Symbol](./img/form-radio.svg) - -A radio button can be configured using the following configuration properties: - -* **Field Label**: Label displayed on top of the checkbox. -* **Field Description**: Description provided below the checkbox. -* **Key**: Identifier used to map data to the checkbox. -* **Values**: A list of values, each representing one radio button which the user can click. Click the **Plus** icon to add a new value and the **Trash** icon to remove a value. - * **Label**: Label of the radio button. - * **Value**: Value that the radio button maps to. -* **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - * **Required**: One radio option must be selected. - -### Select - -A select allowing the user to select one of multiple entries from a dropdown menu. - -![Form Select Symbol](./img/form-select.svg) - -A select can be configured using the following configuration properties: - -* **Field Label**: Label displayed on top of the select. -* **Field Description**: Description provided below the select. -* **Key**: Identifier used to map data to the select. -* **Values**: A list of values, each representing one select option which the user can select. Click the **Plus** icon to add a new value and the **Trash** icon to remove a value. - * **Label**: Label of the select entry. - * **Value**: Value that the select entry maps to. -* **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - * **Required**: One select entry must be selected. - -### Text - -A text component allowing to display static information to the user. - -![Form Text Symbol](./img/form-text.svg) - -A text component can be configured using the following configuration properties: - -* **Text**: Either **Markdown** or **basic HTML** which will be rendered in the form. Note that dangerous HTML elements will not be rendered so to prevent the risk of cross-site scripting using Camunda Forms. - -**Example for Markdown**: - -``` -# This is a heading - -This shows an image: -![alternative image text](https://someurl.com/image.png) - -## This is a sub-heading - -Text can be shown for example using -**bold**, or *italic* font. - -* This is an unordered list... -* ...with two list items - -1. This is an ordered list... -2. ...with two list items -``` - -**Example for HTML**: - -``` -

    This is a heading

    - -This shows an image: - - -

    This is a sub-heading

    - -Text can be shown for example -using bold, or italic font. - -
      -
    • This is an unordered list...
    • -
    • ...with two list items
    • -
    - -
      -
    1. This is an ordered list...
    2. -
    3. ...with two list items
    4. -
    -``` - -### Button - -A button allowing the user to submit or reset the form. - -![Form Button Symbol](./img/form-button.svg) - -A button can be configured using the following configuration properties: - -* **Field Label**: Label to be displayed on top of the button. -* **Action**: The button can either trigger a **Submit** or a **Reset** action. - * **Submit**: Submit the form (given there are no validation errors). - * **Reset**: Reset the form, all user inputs will be lost. - -## Mapping components to process variables - -Each Camunda Forms component which allows data manipulation has a **Key** attribute. This attribute is used as an identifier to map data of the respective field (1) during initial loading of the form, and (2) during submission of the form. - -When a form is referenced by a user task or start event and viewed in [Camunda Tasklist](../../tasklist/introduction.md), the key will be used to refer to a process variable. This means that the value of the process variable will be used to populate the respective component initially and that the value of the component will be saved in the process variable during submission of the form. diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-button.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-button.svg deleted file mode 100644 index 160bd8ebc2f..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-button.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-checkbox.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-checkbox.svg deleted file mode 100644 index 8a3d190d0dd..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-checkbox.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-number.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-number.svg deleted file mode 100644 index 8bbc964bc19..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-number.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-radio.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-radio.svg deleted file mode 100644 index fe7eb47c787..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-radio.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-select.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-select.svg deleted file mode 100644 index 9b80ce09e80..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-select.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-text.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-text.svg deleted file mode 100644 index 7d418930ba0..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-text.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/modeler/forms/img/form-textField.svg b/versioned_docs/version-1.3/components/modeler/forms/img/form-textField.svg deleted file mode 100644 index 69374bc3145..00000000000 --- a/versioned_docs/version-1.3/components/modeler/forms/img/form-textField.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/collaboration.md b/versioned_docs/version-1.3/components/modeler/web-modeler/collaboration.md deleted file mode 100644 index 2ebb6c0c123..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/collaboration.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -id: collaboration -title: Collaboration -description: Collaboration features and access rights for Web Modeler. ---- - -Camunda Cloud only - -## Projects - -Files and folders are stored in projects. -The user access on files and folders is defined at the project level. - -When you access Web Modeler via the Cloud menu, you see the **Home** page with all the projects you can access: -![home page](img/collaboration/web-modeler-home.png) - -### Access rights and permissions - -Users can have various levels of access to a project in Web Modeler, outlined in this section. - -After creating a project, you can invite members of your Camunda Cloud organization to collaborate in Web Modeler. -There are four roles with different levels of access rights that can be assigned to each user: - -* **Project Admin**: The user can edit the project itself, all folders and diagrams within the project, and invite more users to collaborate. -* **Editor**: The user can edit all folders and diagrams within the project. -* **Commenter**: the user cannot edit folders or diagrams nor invite users, but can view and leave comments on diagrams. -* **Viewer**: The user cannot edit folders or diagrams nor leave comments, but can only view diagrams. - -### Inviting users to projects - -On the right side of a project, view a list of your collaborators and invite more by taking the steps below: - -1. Click **Add user**. -![invite user](img/collaboration/web-modeler-collaborator-invite-modal-opened.png) - -2. Choose a role for your new collaborator. -![invite choose role](img/collaboration/web-modeler-collaborator-invite-choose-role.png) - -3. Begin typing the name of the individual and Web Modeler will suggest Camunda Cloud organization members that you can invite to the project. -![invite suggestions](img/collaboration/web-modeler-collaborator-invite-suggestions.png) - -4. Write a message to your new collaborator about their invitation to the project. -![invite type message](img/collaboration/web-modeler-collaborator-invite-type-message.png) - -5. Click **Send** and your new collaborator will receive an email with the invitation. -![invite sent](img/collaboration/web-modeler-collaborator-invite-sent.png) -![invite email](img/collaboration/web-modeler-collaborator-invite-email.png) - -### Folders - -You can create folders in a project to semantically group and organize your diagrams. -The user access on a folder is inherited from the project. - -## Sharing and embedding diagrams - -Diagrams can also be shared with others in read-only mode via a sharing link. -This link can also be protected with an additional password. - -1. Navigate to a diagram and click on the share icon button. -![share button](img/collaboration/web-modeler-share-icon-button.png) - -2. Click **Create link**. -![share create link](img/collaboration/web-modeler-share-modal.png) - -1. Click **Copy** to copy the link to your clipboard. -![share copy link](img/collaboration/web-modeler-share-modal-create.png) - -1. Click **Add** and type a new password to protect your link. -![share copy link](img/collaboration/web-modeler-share-modal-password-protect.png) - -5. Click **Email** to share the new link with multiple recipients. -![share copy link](img/collaboration/web-modeler-share-modal-email.png) - -Similar to the sharing link, a diagram can be embedded into HTML pages via an iframe tag. The iframe tag can be copied from the sharing dialog via the **Embed** button. - -For wiki systems like [Confluence](https://www.atlassian.com/software/confluence), we recommend using the HTML macro and adding the iframe tag from the sharing dialog. This way, diagrams can be easily included in documentation pages. To adjust the dimensions of the diagram, the width and height values of the iframe tag can be modified. - -## Comments - -When selecting an element of the BPMN diagram, a discussion can be attached to this element. If no element is selected, the discussion will be attached directly to the diagram. -Switch between the **Properties Panel** and **Comments** using the two tabs present at the top of the right side panel. -![comment](img/collaboration/web-modeler-comment-type-here.png) - -New comments can be added to the discussion by any collaborator with Admin, Editor, or Commenter access rights. - -Afterwards, the comment can be edited or deleted via the context menu icon. -![comment context menu](img/collaboration/web-modeler-comment-with-context-menu.png) - -Elements with discussions attached will always have a visible blue overlay, so you can easily identify discussion points. -![comment context menu](img/collaboration/web-modeler-comment-overlay-on-diagram.png) - -### Mention others in comments - -By typing the **@** character, you are able to filter the collaborators on the project and select one of them. -![comment suggestion](img/collaboration/web-modeler-comment-mention-suggestions.png) - -When submitting the comment, this user will receive an email as a notification about the new comment. -![comment suggestion email](img/collaboration/web-modeler-comment-mention-email.png) - -## Interact with your collaborators - -### Model a diagram together - -When others are opening the same diagram as you, the updates on the diagram are sent in real time. You can also see who is in the diagram with you. -![real time collaboration](img/real-time-collaboration.png) - -### Draw other's attention - -Whether you are in a presentation or if others are in the same diagram as you are, use the attention grabber pointer to draw attention to a specific part of the diagram. To do this, take the following steps: - -1. Switch on the attention grabber pointer from the canvas tools. -![attention grabber](img/attention-grabber.png) - -2. Drop the pointer by clicking anywhere on the canvas. -![attention grabber gif](img/attention-grabber-pointer-pulse.gif) - -The pointer will pulsate to draw attention and will match your avatar color. -It can also be seen in real-time by others that are looking at the same diagram as you. diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/attention-grabber-pointer-pulse.gif b/versioned_docs/version-1.3/components/modeler/web-modeler/img/attention-grabber-pointer-pulse.gif deleted file mode 100644 index 0719efdf3a3..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/attention-grabber-pointer-pulse.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/attention-grabber.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/attention-grabber.png deleted file mode 100644 index 285672262b9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/attention-grabber.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/cloud-web-modeler-menu-item.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/cloud-web-modeler-menu-item.png deleted file mode 100644 index 3aacf5ffb9d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/cloud-web-modeler-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png deleted file mode 100644 index 4dbd018b1e8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-email.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-email.png deleted file mode 100644 index d18a0bd1bba..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-email.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png deleted file mode 100644 index 181ef2fe17c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png deleted file mode 100644 index 7f58e0844db..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png deleted file mode 100644 index d5dfda7d208..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png deleted file mode 100644 index c03ecf4b021..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-email.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-email.png deleted file mode 100644 index e255630449c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-email.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png deleted file mode 100644 index 89b87af5218..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png deleted file mode 100644 index e69b47a3b0a..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png deleted file mode 100644 index a25925bce92..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png deleted file mode 100644 index 54660f1fd52..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-home.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-home.png deleted file mode 100644 index 6374c015832..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-home.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png deleted file mode 100644 index c30496a06b2..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png deleted file mode 100644 index fff1f8ad0f1..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png deleted file mode 100644 index e535d5c54b8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png deleted file mode 100644 index 86838c1950e..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png deleted file mode 100644 index 5219a796154..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/fullscreen.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/fullscreen.png deleted file mode 100644 index 56a1ec70b71..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/fullscreen.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png deleted file mode 100644 index cc2cda7fff3..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png deleted file mode 100644 index e2c75332646..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png deleted file mode 100644 index 42613495b8b..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png deleted file mode 100644 index 10466afdff0..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png deleted file mode 100644 index c21b5dc4567..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png deleted file mode 100644 index d98c2b940d0..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-menu-item.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-menu-item.png deleted file mode 100644 index a2e02fb927b..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-clock-icon-button.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-clock-icon-button.png deleted file mode 100644 index 7e40ea9fe1c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-clock-icon-button.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png deleted file mode 100644 index ecbd9fde849..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png deleted file mode 100644 index ca70a36d5c9..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png deleted file mode 100644 index f5440304331..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png deleted file mode 100644 index 20fce005c76..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png deleted file mode 100644 index 2516876f300..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/minimap.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/minimap.png deleted file mode 100644 index 39941e40fe4..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/minimap.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/new-web-modeler/web-modeler-home-migrated-project.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/new-web-modeler/web-modeler-home-migrated-project.png deleted file mode 100644 index e3e2f63cf2a..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/new-web-modeler/web-modeler-home-migrated-project.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/new-web-modeler/web-modeler-project-migrated-diagrams-and-forms.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/new-web-modeler/web-modeler-project-migrated-diagrams-and-forms.png deleted file mode 100644 index ecc9c096f6d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/new-web-modeler/web-modeler-project-migrated-diagrams-and-forms.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/real-time-collaboration.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/real-time-collaboration.png deleted file mode 100644 index 9516ed0aa6c..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/real-time-collaboration.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/reset-viewport.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/reset-viewport.png deleted file mode 100644 index d397219d0f8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/reset-viewport.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/save-and-deploy-successful.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/save-and-deploy-successful.png deleted file mode 100644 index b09c8f441c5..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/save-and-deploy-successful.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/save-and-deploy.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/save-and-deploy.png deleted file mode 100644 index 43af4a3d7a6..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/save-and-deploy.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/start-process-instance-done.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/start-process-instance-done.png deleted file mode 100644 index c00cafd7beb..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/start-process-instance-done.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/start-process-instance-variables.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/start-process-instance-variables.png deleted file mode 100644 index 9ee92a83bef..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/start-process-instance-variables.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/play.gif b/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/play.gif deleted file mode 100644 index 2f8cc467a01..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/play.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/speed.gif b/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/speed.gif deleted file mode 100644 index e529a347394..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/speed.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/start.gif b/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/start.gif deleted file mode 100644 index 82a22287933..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/start.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/toggle.gif b/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/toggle.gif deleted file mode 100644 index 967dd4b21d8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/token-simulation/toggle.gif and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-add-endevent.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-add-endevent.png deleted file mode 100644 index 1c9ccf8c843..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-add-endevent.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-add-task.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-add-task.png deleted file mode 100644 index a1880dd3aab..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-add-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-deploy-modal-healthy.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-deploy-modal-healthy.png deleted file mode 100644 index a708121affc..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-deploy-modal-healthy.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png deleted file mode 100644 index fe3d00c4fc2..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png deleted file mode 100644 index faa7f9c44c6..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-modal.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-modal.png deleted file mode 100644 index a874e57451f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-modal.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram.png deleted file mode 100644 index 55d8a18bb6e..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-project.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-project.png deleted file mode 100644 index 3a8378db7d8..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-new-user-new-project.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-start-instance-modal-healthy.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-start-instance-modal-healthy.png deleted file mode 100644 index 08fedba2b6f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-start-instance-modal-healthy.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-with-end-event.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-with-end-event.png deleted file mode 100644 index c41a36414b7..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-with-end-event.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-with-end-event2.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-with-end-event2.png deleted file mode 100644 index f23df1ee18a..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/web-modeler-with-end-event2.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/zoom-in.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/zoom-in.png deleted file mode 100644 index e0483a2a60d..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/zoom-in.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/img/zoom-out.png b/versioned_docs/version-1.3/components/modeler/web-modeler/img/zoom-out.png deleted file mode 100644 index a695f755a6f..00000000000 Binary files a/versioned_docs/version-1.3/components/modeler/web-modeler/img/zoom-out.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/import-diagram.md b/versioned_docs/version-1.3/components/modeler/web-modeler/import-diagram.md deleted file mode 100644 index 40b0e7ed083..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/import-diagram.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: import-diagram -title: Import diagram -description: "You can import a BPMN diagram at any time with Web Modeler." ---- - -Camunda Cloud only - -You can import a BPMN diagram at any time with Web Modeler, and there are several ways to accomplish this: - -- In a project, click **New > Upload files** and select the files from your computer. -![import diagram](img/import-diagram/web-modeler-upload-file-menu-item.png) -![import diagram](img/import-diagram/web-modeler-upload-file-choose.png) -![import diagram](img/import-diagram/web-modeler-upload-file-completed.png) - -- In a project, drag one file from your computer and drop it. -![import diagram](img/import-diagram/web-modeler-project-drag-and-drop.png) - -- In a diagram, open the breadcrumb menu and choose **Replace via upload**. Then, select a file from your computer. - -![import diagram](img/import-diagram/web-modeler-replace-via-upload-menu-item.png) -![import diagram](img/import-diagram/web-modeler-replace-via-upload-choose.png) - -- In a diagram, drag one file from your computer and drop it onto the canvas. -![import diagram](img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png) - -:::note -Within the last two options above, note that the content of the diagram will be replaced with the content of the file. -::: diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/launch-web-modeler.md b/versioned_docs/version-1.3/components/modeler/web-modeler/launch-web-modeler.md deleted file mode 100644 index 47c179a2c8c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/launch-web-modeler.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: launch-web-modeler -title: Launch Web Modeler -description: "Since Web Modeler is embedded in the Camunda Console, it does not need to be started separately. To launch Web Modeler, take the following steps." ---- - -Camunda Cloud only - -Since Web Modeler is embedded in the Camunda Cloud Console, it does not need to be started separately. - -To launch Web Modeler, follow the steps below: - -1. Click the **Modeler** tab at the top of the page. -![cloud web modeler menu item](img/cloud-web-modeler-menu-item.png) - -2. Click **New Project** to create a new project to store all your diagrams. -![web modeler empty home](img/web-modeler-new-user-new-project.png) -:::note -You can go back and rename your project at any time. -::: - -3. Click **New** and choose **BPMN Diagram**. -![web modeler empty project](img/web-modeler-new-user-new-diagram-choose-bpmn.png) - -4. Choose an existing template (i.e. Absence Request) or **Create blank**. -![web modeler new diagram modal](img/web-modeler-new-user-new-diagram-modal.png) - -Congrats! You just created your first diagram. -![web modeler new diagram created](img/web-modeler-new-user-new-diagram.png) diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/milestones.md b/versioned_docs/version-1.3/components/modeler/web-modeler/milestones.md deleted file mode 100644 index 8b30c47cef4..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/milestones.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: milestones -title: Milestones -description: Working with milestones in Web Modeler ---- - -Camunda Cloud only - -## Milestones - -You can save a snapshot of your diagram as a milestone any time. - -If you make any mistakes while modeling, you can always go back to previously saved snapshots. You can also identify and compare the differences between two milestones. - -Find your milestones under the clock icon. -![milestones](img/milestones/web-modeler-milestone-clock-icon-button.png) - -### Creating milestones - -In the milestone history view, the latest version can be saved as a new milestone. -![milestones create via icon](img/milestones/web-modeler-milestone-create-via-icon.png) - -Alternatively, you can create a new milestone via the breadcrumb menu in the diagram view. -![milestones create via icon](img/milestones/web-modeler-milestone-create-via-breadcrumb.png) - -When dragging and dropping a file into the diagram view, or when using the **Replace via upload** option under the breadcrumb menu, a new milestone is created automatically. - -### Restoring milestones - -Hover over a milestone, click on the three vertical dots, and expand for more options. -![milestones restore](img/milestones/web-modeler-milestone-restore.png) -![milestones restore](img/milestones/web-modeler-milestone-restore-complete.png) - -### Comparing milestones - -Milestones can be compared visually. By enabling the diffing feature, the currently selected milestone is compared to its predecessor. - -The differences that are highlighted are only those that affect the execution of the BPMN process. Pure visual changes like position changes are not highlighted. -![milestones diffing](img/milestones/web-modeler-milestone-diffing.png) diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/model-your-first-diagram.md b/versioned_docs/version-1.3/components/modeler/web-modeler/model-your-first-diagram.md deleted file mode 100644 index d659b7faafa..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/model-your-first-diagram.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: model-your-first-diagram -title: Model your first diagram -description: "After you've created a BPMN diagram, you can start modeling it." ---- - -Camunda Cloud only - -After you've created a BPMN diagram, you can start modeling it. - -We've preconfigured a diagram consisting of a start event. To convert it to something meaningful, append a **Task** to it, and afterwards append an **EndEvent**: - -![add task](img/web-modeler-add-task.png) -![add task](img/web-modeler-add-endevent.png) - -Each element has adjustable attributes. Use the properties panel on the right side of the page. - -Elements supporting different types can be reconfigured by clicking on the corresponding icon. In the following screenshot, a task has been added to the diagram. It can be converted to a service task, for example. - -![task configuration](img/web-modeler-new-diagram-with-configuration.png) - -Use the canvas tools in the bottom right corner to interact with your diagram. - -1. Zoom in. -![zoom in](img/zoom-in.png) - -2. Zoom out. -![zoom in](img/zoom-out.png) - -3. Reset viewport if you get lost on the canvas. -![reset view port](img/reset-viewport.png) - -1. Open the minimap to navigate complex diagrams. -![mini map](img/minimap.png) - -5. Enter the fullscreen mode for distraction-free modeling. -![full screen](img/fullscreen.png) - -6. Drop an attention point and use it as a laser pointer in your presentations. -![attention grabber](img/attention-grabber.png) diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/new-web-modeler.md b/versioned_docs/version-1.3/components/modeler/web-modeler/new-web-modeler.md deleted file mode 100644 index b0f1730f026..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/new-web-modeler.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: new-web-modeler -title: New Web Modeler -description: Your data was automatically migrated from the old Cloud Modeler to the new Web Modeler. ---- - -Camunda Cloud only - -:::note - -Web Modeler is only available for Camunda Cloud SaaS. - -::: - -If you used our old Cloud Modeler (before March 2022), all of your data has been migrated to the new Web Modeler. - -Find all your diagrams and forms under the new **Modeler** entry in the top navigation bar. -![cloud web modeler menu item](img/cloud-web-modeler-menu-item.png) - -After you open Web Modeler, you will find all your diagrams and forms in a new project called **Migrated Diagrams and Forms**. -![home migrated project](img/new-web-modeler/web-modeler-home-migrated-project.png) - -Click on the **Migrated Diagrams and Forms** project to open the project and access all diagrams and forms you had in Cloud Modeler. -![project migrated diagrams and forms](img/new-web-modeler/web-modeler-project-migrated-diagrams-and-forms.png) - -To learn how to use and discover more Web Modeler features, follow our next steps below. - -## Next steps - -- [Launch Web Modeler](launch-web-modeler.md) -- [Model your first diagram](model-your-first-diagram.md) -- [Import diagram](import-diagram.md) -- [Save and deploy your diagram](save-and-deploy.md) -- [Start a new process instance](start-instance.md) -- [Learn about collaboration features](collaboration.md) -- [Learn about milestone features](milestones.md) diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/save-and-deploy.md b/versioned_docs/version-1.3/components/modeler/web-modeler/save-and-deploy.md deleted file mode 100644 index 1958fd66287..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/save-and-deploy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: save-and-deploy -title: Save and deploy your diagram -description: "If a diagram is changed and autosaved, it has no effect on your cluster." ---- - -Camunda Cloud only - -Web Modeler will autosave all your changes on a diagram. The changes will also be visible in real-time to any collaborators opening the same diagram. - -If you change a diagram and it is autosaved, this has no effect on your cluster(s). - -If you deploy the diagram, it becomes available on the selected cluster and new instances can start. - -To deploy, click **Deploy diagram**: - -![save and deploy](img/web-modeler-deploy-modal-healthy.png) diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/start-instance.md b/versioned_docs/version-1.3/components/modeler/web-modeler/start-instance.md deleted file mode 100644 index 56709f3fff2..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/start-instance.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: start-instance -title: Start a new process instance -description: "If a BPMN diagram is deployed via Web Modeler, you can start a new instance of this diagram." ---- - -Camunda Cloud only - -If a BPMN diagram is deployed via Web Modeler, you can start a new instance of this diagram. - -To do so, click **Start instance**: - -![start instance](img/web-modeler-start-instance-modal-healthy.png) - -You can also specify variables written to the process context at startup. The variables must be formatted in valid JSON. As an example, you can use the following JSON: - -```json -{ - "hello": "world" -} -``` diff --git a/versioned_docs/version-1.3/components/modeler/web-modeler/token-simulation.md b/versioned_docs/version-1.3/components/modeler/web-modeler/token-simulation.md deleted file mode 100644 index 5fd00307b6c..00000000000 --- a/versioned_docs/version-1.3/components/modeler/web-modeler/token-simulation.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: token-simulation -title: Token simulation -description: You can use the token simulation feature to see how the process will behave when it is executed. ---- - -You can use the token simulation feature to see how the process will behave when it is executed. - -### Turn on/off - -To turn the feature on/off, use the **Token Simulation** toggle (or the keyboard shortcut `T` inside the canvas). The modeling features will not work while you are in token simulation mode. - -![token simulation toggle](img/token-simulation/toggle.gif) - -### Start simulation - -The simulation can be started by triggering an event using the corresponding button: - -![token simulation start](img/token-simulation/start.gif) - -### Token simulation palette - -The palette on the left side provides the following controls: - -- Play/pause simulation -- Reset simulation -- Show simulation log - -![token simulation play](img/token-simulation/play.gif) - -### Animation speed palette - -The speed of the simulation can be changed using the controls in the bottom palette: - -![token simulation speed](img/token-simulation/speed.gif) diff --git a/versioned_docs/version-1.3/components/operate/img/operate-dashboard-no-processes_dark.png b/versioned_docs/version-1.3/components/operate/img/operate-dashboard-no-processes_dark.png deleted file mode 100644 index eff22424f41..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/img/operate-dashboard-no-processes_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/img/operate-dashboard-no-processes_light.png b/versioned_docs/version-1.3/components/operate/img/operate-dashboard-no-processes_light.png deleted file mode 100644 index 15dcacd32f2..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/img/operate-dashboard-no-processes_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/img/operate-introduction_dark.png b/versioned_docs/version-1.3/components/operate/img/operate-introduction_dark.png deleted file mode 100644 index e4697160bf7..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/img/operate-introduction_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/img/operate-introduction_light.png b/versioned_docs/version-1.3/components/operate/img/operate-introduction_light.png deleted file mode 100644 index bbb389a11bb..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/img/operate-introduction_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/index.md b/versioned_docs/version-1.3/components/operate/index.md deleted file mode 100644 index 15f4b41e32c..00000000000 --- a/versioned_docs/version-1.3/components/operate/index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: index -title: Introduction -description: "Operate is a tool for monitoring and troubleshooting process instances running in Zeebe." ---- - -Operate is a tool for monitoring and troubleshooting process instances running in Zeebe. - - - -In addition to providing visibility into active and completed process instances, Operate also makes it possible to carry out key operations such as resolving [incidents](./userguide/resolve-incidents-update-variables.md), and updating process instance variables. - -![operate-introduction](img/operate-introduction_light.png) - -Learn how to use Operate to monitor process instances and more features in the [Operate user guide](/components/operate/userguide/basic-operate-navigation.md). - -Operate is also available for production use (with support) in the Camunda Cloud offering. - -To try out Operate in Camunda Cloud, sign up [here](https://accounts.cloud.camunda.io/signup). - -Because Operate can be a helpful tool when getting started with Zeebe and building an initial proof of concept, we make it available under the [Operate trial license](https://camunda.com/legal/terms/cloud-terms-and-conditions/general-terms-and-conditions-for-the-operate-trial-version/). There are no restrictions under this license when it comes to the length of the evaluation period or the available feature set _as long as you use Operate in non-production environments only._ diff --git a/versioned_docs/version-1.3/components/operate/userguide/assets/order-process.bpmn b/versioned_docs/version-1.3/components/operate/userguide/assets/order-process.bpmn deleted file mode 100644 index 843a703ee8f..00000000000 --- a/versioned_docs/version-1.3/components/operate/userguide/assets/order-process.bpmn +++ /dev/null @@ -1,145 +0,0 @@ - - - - - Flow_0biglsj - - - Flow_0yovrqa - - - Flow_1wtuk91 - Flow_1n8m1op - Flow_1fosyfk - - - =orderValue >= 100 - - - Flow_1g6qdv6 - Flow_0vv7a45 - Flow_0yovrqa - - - - - - - - Flow_0biglsj - Flow_09wy0mk - - - - Flow_09wy0mk - Flow_1wtuk91 - - - - - - - - - Flow_1fosyfk - Flow_0vv7a45 - - - - - - Flow_1n8m1op - Flow_1g6qdv6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/operate/userguide/basic-operate-navigation.md b/versioned_docs/version-1.3/components/operate/userguide/basic-operate-navigation.md deleted file mode 100644 index bbbcd4acbee..00000000000 --- a/versioned_docs/version-1.3/components/operate/userguide/basic-operate-navigation.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: basic-operate-navigation -title: Getting familiar with Operate -description: "An overview of navigating Operate and its features" ---- - -This section and the next section, [variables and incidents](./resolve-incidents-update-variables.md), assumes you’ve deployed a process to Zeebe and created at least one process instance. - -If you’re not sure how to deploy processes or create instances, visit our [getting started tutorial](/guides/getting-started/model-your-first-process.md). - -In the following sections, we’ll use the same [`order-process.bpmn`](./assets/order-process.bpmn) process model from the getting started tutorial. - -## View a deployed process - -To view a deployed process, take the following steps: - -1. In the **Instances by Process** panel on your dashboard, note the list of your deployed processes and running instances. - -![operate-view-process](../img/operate-introduction_light.png) - -2. When you click on the name of a deployed process in the **Instances by Process** panel, you’ll navigate to a view of that process model and all running instances. - -![operate-view-process](./img/operate-view-process_light.png) - -3. From this **Instances** tab, you can cancel a single running process instance. - -![operate-cancel-process-instance](./img/operate-view-process-cancel_light.png) - -## Inspect a process instance - -Running process instances appear in the **Instances** section below the process model. To inspect a specific instance, click on the instance id. - -![operate-inspect-instance](./img/operate-process-instance-id_light.png) - -Here, see details about the process instance, including the instance history and the variables attached to the instance. - -![operate-view-instance-detail](./img/operate-view-instance-detail_light.png) diff --git a/versioned_docs/version-1.3/components/operate/userguide/delete-finished-instances.md b/versioned_docs/version-1.3/components/operate/userguide/delete-finished-instances.md deleted file mode 100644 index 340735f78d8..00000000000 --- a/versioned_docs/version-1.3/components/operate/userguide/delete-finished-instances.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -id: delete-finished-instances -title: Delete finished instances -description: "Let's delete a canceled or completed process instance." ---- - -A finished process instance, meaning a canceled or a completed process instance, can be deleted from the **Instances** page or instance detail page. - -## Delete process instance from Instances page - -To delete a process instance from the **Instances** page, take the following steps: - -1. On the **Instances** page, apply the **Finished Instances** filter. - -![operate-view-finished-instances](./img/operate-instances-finished-instances_light.png) - -1. Click the trash can icon on any process instance you want to delete. - -![operate-perform-delete-operation](./img/operate-instances-click-delete-operation_light.png) - -1. Confirm the delete operation by clicking **Delete**. - -![operate-confirm-delete-operation](./img/operate-instances-delete-operation-confirm_light.png) - -4. In the **Operations** panel on the right side of the screen, view the deleted process instance. - -![operate-view-delete-operation](./img/operate-operations-panel-delete-operation_light.png) - -## Delete process instance from instance detail page - -1. On the **Instances** page, apply the **Finished Instances** filter. - -![operate-view-finished-instances-instance-detail](./img/operate-instance-detail-finished-instances_light.png) - -2. Navigate to the instance detail page by clicking the **Instance id** of the process instance you want to delete. - -![operate-navigate-finished-instance-detail](./img/operate-instance-detail-finished-instances-navigate_light.png) - -3. Click the delete icon. - -![operate-instance-detail-perform-delete](./img/operate-finished-instance-detail_light.png) - -1. Confirm the delete operation by clicking **Delete**. - -![operate-instance-detail-confirm-delete-operation](./img/operate-instance-detail-delete-operation-confirm_light.png) - -:::note -Use caution as the process instance is now deleted and you may not access it again. -::: - -![operate-instance-deleted-notification](./img/operate-instance-deleted-notification_light.png) diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-create-selection_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-create-selection_dark.png deleted file mode 100644 index e4028103fa1..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-create-selection_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-create-selection_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-create-selection_light.png deleted file mode 100644 index 9cc6f51f514..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-create-selection_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-finished-instance-detail_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-finished-instance-detail_dark.png deleted file mode 100644 index c902e242d59..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-finished-instance-detail_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-finished-instance-detail_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-finished-instance-detail_light.png deleted file mode 100644 index 2aa26cc791f..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-finished-instance-detail_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved-path_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved-path_dark.png deleted file mode 100644 index 255d74b3af3..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved-path_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved-path_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved-path_light.png deleted file mode 100644 index 16e8f70bd16..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved-path_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved_dark.png deleted file mode 100644 index 950dc1affb6..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved_light.png deleted file mode 100644 index c54c302cc95..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-incident-resolved_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-deleted-notification_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-deleted-notification_dark.png deleted file mode 100644 index 21085454abc..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-deleted-notification_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-deleted-notification_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-deleted-notification_light.png deleted file mode 100644 index c59ca45d90d..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-deleted-notification_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm_dark.png deleted file mode 100644 index 917e492b372..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm_light.png deleted file mode 100644 index cf5d0ecf2c2..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate_dark.png deleted file mode 100644 index 0dd3daef2ee..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate_light.png deleted file mode 100644 index 1748d02d4b8..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances_dark.png deleted file mode 100644 index b6c6ebe6c42..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances_light.png deleted file mode 100644 index 9dadef128d3..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instance-detail-finished-instances_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-click-delete-operation_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-click-delete-operation_dark.png deleted file mode 100644 index 2fcf88d03e4..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-click-delete-operation_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-click-delete-operation_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-click-delete-operation_light.png deleted file mode 100644 index 955251d0d52..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-click-delete-operation_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-delete-operation-confirm_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-delete-operation-confirm_dark.png deleted file mode 100644 index 1889d747e11..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-delete-operation-confirm_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-delete-operation-confirm_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-delete-operation-confirm_light.png deleted file mode 100644 index 1f906ad0fbe..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-delete-operation-confirm_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-finished-instances_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-finished-instances_dark.png deleted file mode 100644 index dd2eef4381c..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-finished-instances_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-finished-instances_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-finished-instances_light.png deleted file mode 100644 index 16660588462..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-instances-finished-instances_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-many-instances-with-incident_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-many-instances-with-incident_dark.png deleted file mode 100644 index a0e78a0b0cd..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-many-instances-with-incident_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-many-instances-with-incident_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-many-instances-with-incident_light.png deleted file mode 100644 index 1cd48d3c2a8..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-many-instances-with-incident_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel-delete-operation_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel-delete-operation_dark.png deleted file mode 100644 index 94c9c27e04e..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel-delete-operation_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel-delete-operation_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel-delete-operation_light.png deleted file mode 100644 index 0b07f3760de..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel-delete-operation_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel_dark.png deleted file mode 100644 index f1089d1cfb6..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel_light.png deleted file mode 100644 index fed31806da6..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-operations-panel_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-instance-id_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-instance-id_dark.png deleted file mode 100644 index e6b4396eb48..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-instance-id_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-instance-id_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-instance-id_light.png deleted file mode 100644 index 26aa537e971..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-instance-id_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-retry-incident_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-retry-incident_dark.png deleted file mode 100644 index 9a82335862f..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-retry-incident_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-retry-incident_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-retry-incident_light.png deleted file mode 100644 index 5c4a76fefc6..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-retry-incident_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-view-incident_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-view-incident_dark.png deleted file mode 100644 index c5c0ee23d7f..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-view-incident_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-view-incident_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-view-incident_light.png deleted file mode 100644 index babff7ccd3c..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-process-view-incident_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-select-operation_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-select-operation_dark.png deleted file mode 100644 index 3fce19978af..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-select-operation_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-select-operation_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-select-operation_light.png deleted file mode 100644 index 616c6a677f3..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-select-operation_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-detail_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-detail_dark.png deleted file mode 100644 index 4fb7f2700e3..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-detail_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-detail_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-detail_light.png deleted file mode 100644 index 3a51f472a0d..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-detail_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-edit-icon_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-edit-icon_dark.png deleted file mode 100644 index 3a4b601da71..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-edit-icon_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-edit-icon_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-edit-icon_light.png deleted file mode 100644 index e00a74b9f09..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-edit-icon_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-incident_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-incident_dark.png deleted file mode 100644 index 89f912913df..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-incident_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-incident_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-incident_light.png deleted file mode 100644 index 95ff5723da2..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-incident_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-save-variable-icon_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-save-variable-icon_dark.png deleted file mode 100644 index 4a1795e2242..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-save-variable-icon_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-save-variable-icon_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-save-variable-icon_light.png deleted file mode 100644 index 237cf3676e4..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-instance-save-variable-icon_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process-cancel_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process-cancel_dark.png deleted file mode 100644 index 248cddcf414..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process-cancel_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process-cancel_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process-cancel_light.png deleted file mode 100644 index ba8f0bc14ba..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process-cancel_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process_dark.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process_dark.png deleted file mode 100644 index 05730eef38c..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process_light.png b/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process_light.png deleted file mode 100644 index 466f3281707..00000000000 Binary files a/versioned_docs/version-1.3/components/operate/userguide/img/operate-view-process_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/operate/userguide/operate-feedback-and-questions.md b/versioned_docs/version-1.3/components/operate/userguide/operate-feedback-and-questions.md deleted file mode 100644 index 7822f76d98a..00000000000 --- a/versioned_docs/version-1.3/components/operate/userguide/operate-feedback-and-questions.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -id: operate-feedback-and-questions -title: Giving feedback and asking questions -description: "Have questions or feedback about Operate? Contact us." ---- - -Have questions or feedback about Operate? [Contact us](/contact). diff --git a/versioned_docs/version-1.3/components/operate/userguide/resolve-incidents-update-variables.md b/versioned_docs/version-1.3/components/operate/userguide/resolve-incidents-update-variables.md deleted file mode 100644 index 0676bcf59ac..00000000000 --- a/versioned_docs/version-1.3/components/operate/userguide/resolve-incidents-update-variables.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -id: resolve-incidents-update-variables -title: Variables and incidents -description: "Let's examine variable and incidents." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Every process instance created for the process model used in the [getting started tutorial](/guides/getting-started/model-your-first-process.md) requires an `orderValue` so the XOR gateway evaluation will happen properly. - -Let’s look at a case where `orderValue` is present and was set as a string, but our `order-process.bpmn` model required an integer to properly evaluate the `orderValue` and route the instance. - - - - - -``` -./bin/zbctl --insecure create instance order-process --variables '{"orderId": "1234", "orderValue":"99"}' -``` - - - - - -``` -./bin/zbctl.darwin --insecure create instance order-process --variables '{"orderId": "1234", "orderValue":"99"}' -``` - - - - - -``` -./bin/zbctl.exe --insecure create instance order-process --variables '{\"orderId\": \"1234\", \ -"orderValue\": \"99\"}' -``` - - - - -## Advance an instance to an XOR gateway - -To advance the instance to our XOR gateway, we’ll create a job worker to complete the `Initiate Payment` task: - - - - - -``` -./bin/zbctl --insecure create worker initiate-payment --handler cat -``` - - - - - -``` -./bin/zbctl.darwin --insecure create worker initiate-payment --handler cat -``` - - - - - -``` -./bin/zbctl.exe --insecure create worker initiate-payment --handler "findstr .*" -``` - - - - -We’ll publish a message that will be correlated with the instance, so we can advance past the `Payment Received` intermediate message catch event: - - - - - -``` -./bin/zbctl --insecure publish message "payment-received" --correlationKey="1234" -``` - - - - - -``` -./bin/zbctl.darwin --insecure publish message "payment-received" --correlationKey="1234" -``` - - - - - -``` -./bin/zbctl.exe --insecure publish message "payment-received" --correlationKey="1234" -``` - - - - -In the Operate interface, you should now see the process instance has an incident, which means there’s a problem with process execution that must be fixed before the process instance can progress to the next step. - -![operate-incident-process-view](./img/operate-process-view-incident_light.png) - -## Diagnosing and resolving incidents - -Operate provides tools for diagnosing and resolving incidents. Let’s go through incident diagnosis and resolution step by step. - -When we inspect the process instance, we can see exactly what our incident is: `Expected to evaluate condition 'orderValue>=100' successfully, but failed because: Cannot compare values of different types: STRING and INTEGER` - -![operate-incident-instance-view](./img/operate-view-instance-incident_light.png) - -To resolve this incident, we must edit the `orderValue` variable so it’s an integer. To do so, take the following steps: - -1. Click on the edit icon next to the variable you’d like to edit. - -![operate-incident-edit-variable](./img/operate-view-instance-edit-icon_light.png) - -2. Edit the variable by removing the quotation marks from the `orderValue` value. -3. Click the checkmark icon to save the change. - -![operate-incident-save-variable](./img/operate-view-instance-save-variable-icon_light.png) - -We were able to solve this particular problem by _editing_ a variable, but it’s worth noting you can also _add_ a variable if a variable is missing from a process instance altogether. - -There’s one last step: initiating a “retry” of the process instance. There are two places on the process instance page where you can initiate a retry: - -![operate-retry-instance](./img/operate-process-retry-incident_light.png) - -You should now see the incident has been resolved, and the process instance has progressed to the next step. - -![operate-incident-resolved-instance-view](./img/operate-incident-resolved_light.png) - -## Complete a process instance - -If you’d like to complete the process instance, create a worker for the `Ship Without Insurance` task: - - - - - -``` -./bin/zbctl --insecure create worker ship-without-insurance --handler cat -``` - - - - - -``` -./bin/zbctl.darwin --insecure create worker ship-without-insurance --handler cat -``` - - - - - -``` -./bin/zbctl.exe --insecure create worker ship-without-insurance --handler "findstr .*" -``` - - - - -The completed process instance with the path taken: - -![operate-incident-resolved-path-view](./img/operate-incident-resolved-path_light.png) diff --git a/versioned_docs/version-1.3/components/operate/userguide/selections-operations.md b/versioned_docs/version-1.3/components/operate/userguide/selections-operations.md deleted file mode 100644 index 9ad769cf68f..00000000000 --- a/versioned_docs/version-1.3/components/operate/userguide/selections-operations.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: selections-operations -title: Selections and operations -description: "In some cases, you’ll need to retry or cancel many process instances at once." ---- - -In some cases, you’ll need to retry or cancel many process instances at once. Operate also supports this type of operation. - -Imagine a case where many process instances have an incident caused by the same issue. At some point, the underlying problem will have been resolved (for example, maybe a microservice was down for an extended period of time, then was brought back up.) - -Though the underlying problem was resolved, the affected process instances are stuck until they are “retried." - -![operate-batch-retry](./img/operate-many-instances-with-incident_light.png) - -Let's create a **selection** in Operate. A selection is a set of process instances on which you can carry out a batch retry or batch cancellation. - -To create a selection, take the following steps: - -1. Check the box next to the process instances you'd like to include. -2. Click the blue **Apply Operation on N Instances** button. - -![operate-batch-retry](img/operate-create-selection_light.png) - -3. Select the operation you want to apply. - -![operate-batch-retry](./img/operate-select-operation_light.png) - -After confirmation, you can see the **Operations** panel with the current status of all operations. - -![operate-batch-retry](./img/operate-operations-panel_light.png) diff --git a/versioned_docs/version-1.3/components/overview.md b/versioned_docs/version-1.3/components/overview.md deleted file mode 100644 index c5a477563b2..00000000000 --- a/versioned_docs/version-1.3/components/overview.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: components-overview -title: Overview Components -sidebar_label: Overview Components -slug: /components/ -description: "This section contains product manual content for each component in Camunda Cloud, including conceptual content." ---- - -This section contains product manual content for each component in Camunda Cloud, including conceptual content. - -- [Concepts](concepts/what-is-camunda-cloud.md) - Conceptual documentation on a variety of Camunda Cloud topics. -- [Cloud Console](cloud-console/introduction.md) - More information on working with Cloud Console. -- [Modeler](modeler/about.md) - Documentation on Camunda's modeling tools, including Web Modeler and Desktop Modeler. -- [Zeebe Engine](zeebe/zeebe-overview.md) - Complete documentation for Zeebe. -- [Operate](operate/index.md) - User guide for monitoring and troubleshooting your process with Operate. -- [Optimize]($optimize$/components/what-is-optimize/) - Detailed user guide for working with Optimize to improve your processes by identifying constraints in your system. -- [Tasklist](tasklist/introduction.md) - Documentation for working with user tasks in Tasklist. - -:::note Looking for deployment guides? - -Deployment guides for Camunda Cloud components are available in the [Self-Managed section](/self-managed/overview.md). - -::: diff --git a/versioned_docs/version-1.3/components/tasklist/img/tasklist-introduction_light.png b/versioned_docs/version-1.3/components/tasklist/img/tasklist-introduction_light.png deleted file mode 100644 index 8fc6d384669..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/img/tasklist-introduction_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/img/tasklist-start-screen_light.png b/versioned_docs/version-1.3/components/tasklist/img/tasklist-start-screen_light.png deleted file mode 100644 index cb35b293fb6..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/img/tasklist-start-screen_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/introduction.md b/versioned_docs/version-1.3/components/tasklist/introduction.md deleted file mode 100644 index a1019f6853d..00000000000 --- a/versioned_docs/version-1.3/components/tasklist/introduction.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: introduction -title: Introduction -description: "Tasklist is a tool to work with user tasks in Zeebe." ---- - -Tasklist is a ready-to-use application to rapidly implement business processes alongside [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) in Zeebe. - -With Tasklist, orchestrate human workflows critical to your business and reduce time-to-value for your process orchestration projects with an interface for manual work. - -As you model a business process using BPMN and deploy it to the workflow engine, users are notified in Tasklist when they're assigned a task. - -Tasklist provides a GraphQL API, which you can use to build your own application, or you can use the general UI we prepared for you. Read more on the [API](../userguide/api/overview) and the [UI](../userguide/user-interface/overview). - -Tasklist is also available for production use (with support) in the Camunda Cloud offering. To try out Tasklist in Camunda Cloud, sign up [here](https://accounts.cloud.camunda.io/signup). diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/api/overview.md b/versioned_docs/version-1.3/components/tasklist/userguide/api/overview.md deleted file mode 100644 index 212f7675f85..00000000000 --- a/versioned_docs/version-1.3/components/tasklist/userguide/api/overview.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -id: overview -title: Overview -description: "Let's take a closer look at the endpoint, authentication, access token, and API usage within Tasklist." ---- - -In this document, we'll go over the basics on how to consume the Tasklist GraphQL API. Read more about how to build a real world application [here](../tutorial). - -## Endpoint - -Tasklist provides a GraphQL API at endpoint `/graphql`. - -## Authentication - -To access the API endpoint, you need an access token. - -Your client must send a header in each request: - -`Authorization: Bearer ` - -For example, send a request using _curl_: - -```shell -curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " -d '{"query": "{tasks(query:{}){name}}"}' http://localhost:8080/graphql -``` - -### How to obtain the access token - -You must obtain a token to use the Tasklist API. When you create a Tasklist [client](/guides/getting-started/setup-client-connection-credentials.md), you get all the information needed to connect to Tasklist. - -See our guide on [building your own client](/apis-tools/build-your-own-client.md). - -The following settings are needed: - -| Name | Description | Default value | -| -- | -- | -- | -| client id | Name of your registered client | - | -| client secret | Password for your registered client | - | -| audience | Permission name; if not given use default value | `tasklist.camunda.io` | -| authorization server url | Token issuer server | - | - -Send a token issue _POST_ request to the authorization server with the following content: - -```json -{ - "client_id": "", - "client_secret": "", - "audience": "", - "grant_type": "client_credentials" -} -``` - -See the following example with _curl_: - -```shell -curl -X POST --header 'content-type: application/json' --data '{"client_id": "", "client_secret":"","audience":"","grant_type":"client_credentials"}' https:// -``` - -If the authorization is successful, the authorization server sends back the access token, when it expires, scope, and type: - -```json -{ - "access_token": "ey...", - "scope": "...", - "expires_in": 86400, - "token_type": "Bearer" -} -``` - -## Obtaining the Tasklist schema - -To obtain the Tasklist GraphQL schema, send a request to the endpoint with a GraphQL introspection query as described [here](https://graphql.org/learn/introspection/), or use the [generated API documentation](../../../../../apis-tools/tasklist-api/). - -There are also several [tools to explore GraphQL APIs](https://altair.sirmuel.design). - -For example, you want to know about provided types: - -```graphql -query { - __schema { - queryType { - fields { - name - type { - kind - ofType { - kind - name - } - } - } - } - } -} -``` - -## Example requests and responses - -### Get all task names - -_Request:_ - -```graphql -{ - tasks(query: {}) { - name - } -} -``` - -_Response:_ - -```json -{ - "data": { - "tasks": [ - { - "name": "Check payment" - }, - { - "name": "Register the passenger" - } - ] - } -} -``` - -### Get all tasks completed with id, name, and state - -_Request:_ - -```graphql -{ - tasks(query: { state: COMPLETED }) { - id - name - taskState - } -} -``` - -_Response:_ - -```json -{ - "data": { - "tasks": [ - { - "id": "2251799813685728", - "name": "Check payment", - "taskState": "COMPLETED" - } - ] - } -} -``` diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/api/tutorial.md b/versioned_docs/version-1.3/components/tasklist/userguide/api/tutorial.md deleted file mode 100644 index 7e9ad47e7a2..00000000000 --- a/versioned_docs/version-1.3/components/tasklist/userguide/api/tutorial.md +++ /dev/null @@ -1,340 +0,0 @@ ---- -id: tutorial -title: Tutorial -description: "Let's implement an application using the Tasklist API." ---- - -## Building an application using the Tasklist API and NestJS - -The Tasklist API provides a simple way for you to build apps powered by BPMN that require human interaction. - -With this example, we'll use NestJS (one of the most popular Node.js backend frameworks) to a build a loan request review application. - -## Getting started - -For this tutorial we'll need: - -- Node v14+ -- The [NestJS CLI](https://docs.nestjs.com/cli/overview) tool. Install it by running `npm install -g @nestjs/cli`. -- [A cluster on Camunda Cloud](../../../../cloud-console/manage-clusters/create-cluster) -- [A set of API credentials; remember to check the Tasklist option when creating them](../../../../cloud-console/manage-clusters/manage-api-clients). Don't forget to save these, we'll need them later. -- [A clone of this repo](https://github.com/camunda-community-hub/camunda-cloud-tasklist-api-nestjs) - -## Before moving forward - -If you have all the prerequisites from the getting started section above, you should have cloned a repo with the complete demo application we're going to build over this tutorial. The default branch in this repo has the complete application, so we need to `checkout` to the branch `0-getting-started` before proceeding. - -Inside the repo folder, you'll find some files and two folders, one of these folders is called `demo-data/` and the other `frontend/`. As it might be evident inside each of these folders, there are two different projects. - -The former will be responsible by deploying the demo process into Zeebe and generating instances for that process. The latter is a front-end application that will consume our API; this project is bootstrapped with [Vite](https://vitejs.dev), [bulma](https://bulma.io) for styling, and [react-query](https://react-query.tanstack.com) - -## Creating a new NestJS application - -Now let's bootstrap our NestJS app. Take the following steps: - -1. Open your terminal and go to the cloned repository folder. -2. Run `nest new api`. -3. Pick `yarn` as a package manager. - -This will create the NestJS project for us inside the `api/` folder. We can clean up the project a bit and remove the files `api/app.controller.spec.ts`, `api/app.controller.ts`, and `api/app.service.ts`. - -We can also remove the references from the deleted files in `api/app.module.ts`. The file should look like this: - -```ts -import { Module } from "@nestjs/common"; - -@Module({ - imports: [], - controllers: [], - providers: [], -}) -export class AppModule {} -``` - -To check if everything is working as expected, run `yarn workspace api run start:dev` from the root folder on your terminal. You should see a message similar to the one below: - -```sh -[00:00:00 AM] Starting compilation in watch mode... -[00:00:00 AM] Found 0 errors. Watching for file changes. -[Nest] 46621 - 00/00/0000, 00:00:00 AM LOG [NestFactory] Starting Nest application... -[Nest] 46621 - 00/00/0000, 00:00:00 AM LOG [InstanceLoader] AppModule dependencies initialized +12ms -[Nest] 46621 - 00/00/0000, 00:00:00 AM LOG [NestApplication] Nest application successfully started +3ms -``` - -## Generating the Tasklist service - -Inside the `api/` folder we'll need to generate a service that will be responsible for accessing the Tasklist API. Take the following steps: - -1. Run `nest g service`. -2. You'll be prompted to pick a name for the service, let's pick `tasklist`. -3. Run `yarn add @nestjs/axios`. - -A folder called `tasklist/` will be created with the service definition and test; you can delete the tests if you wish. We also installed the package `@nestjs/axios`, so we can make requests to the Tasklist API. -To make HTTP requests we need to inject the module into the service, like below: - -```ts -import { Injectable } from "@nestjs/common"; -import { HttpService } from "@nestjs/axios"; - -@Injectable() -export class TasklistService { - constructor(private readonly http: HttpService) {} -} -``` - -Now, we're ready to make requests to the API. First, let's define a Data Transfer Object (DTO) with the shape of the tasks we're going to request. For that, we can create a file in `tasklist/dto/task.dto.ts`. There, we can define the DTO as follows: - -```ts -type Variable = { - name: string; - value: string; -}; - -export class TaskDto { - id: string; - name: string; - processName: string; - creationTime: string; - completionTime: string | null; - assignee: string | null; - variables: Variable[]; - taskState: 'CREATED' | 'COMPLETED' | 'CANCELED'; - sortValues: [string, string]; - isFirst: boolean | null; - formKey: string | null; - processDefinitionId: string; - taskDefinitionId: string; -} -``` - -We can implement the requests. For this, we need to define the Tasklist API query and define the methods on the service: - -```ts -import { HttpService } from "@nestjs/axios"; -import { Injectable } from "@nestjs/common"; -import { firstValueFrom } from "rxjs"; -import { TaskDto } from "./dto/task.dto"; - -const getTasksQuery = ` - query GetTasks($state: TaskState $pageSize: Int $searchAfter: [String!] $searchBefore: [String!] $taskDefinitionId: String!) { - tasks(query: { state: $state pageSize: $pageSize searchAfter: $searchAfter searchBefore: $searchBefore taskDefinitionId: $taskDefinitionId }) { - id - creationTime - variables { - value - name - } - taskState - isFirst - sortValues - } - } -`; - -type QueryVariables = { - pageSize?: number; - searchAfter?: [string, string]; - searchBefore?: [string, string]; - state?: "CREATED" | "COMPLETED"; - taskDefinitionId?: string; -}; - -@Injectable() -export class TasklistService { - constructor(private readonly http: HttpService) {} - - async getTasks(variables: QueryVariables): Promise { - const { http } = this; - const { errors, data } = ( - await firstValueFrom( - http.post("/", { - /* - for simplicity we just used Axios here, but since the Tasklist API is a GraphQL API - a package like `graphql-request` might be better suited for this - */ - query: getTasksQuery, - variables, - }) - ) - ).data; - - if (errors) { - // handle error - } - - return data.tasks; - } -} -``` - -To keep things concise, we have one query and one method here. To see the complete implementation, review [this file](https://github.com/camunda-community-hub/camunda-cloud-tasklist-api-nestjs/blob/2-generating-tasklist-service/api/src/tasklist/tasklist.service.ts). - -## Handling the Tasklist API authentication - -We have the implementation of our service, but we still can't make requests to the Tasklist API because we're not providing any credentials to the API. - -To achieve this, we need to rename the file `.env.example` to `.env` (the file needs to be on the root because we'll reuse it to generate the demo data), and the content of this file should look like this: - -```sh -ZEEBE_ADDRESS=".bru-2.zeebe.camunda.io:443" -ZEEBE_CLIENT_ID="k2FKt_PNMrRUFQO-QOR9MtCygvGsT.sm" -ZEEBE_CLIENT_SECRET="C-o5WFhvoZKv4-oQGHWg~d2MObjdr-GUv3cdqRS3~6fCoHaLleEEwnOqRToQvWda" -ZEEBE_AUTHORIZATION_SERVER_URL="https://login.cloud.camunda.io/oauth/token" -TASKLIST_API_ADDRESS="https://bru-2.tasklist.camunda.io//graphql" -ZEEBE_AUTHORIZATION_AUDIENCE="tasklist.camunda.io" -``` - -You can find all this information on the **API** tab of the cluster page. The client id and secret should be on the file you downloaded in the getting started section. - -Now that we have our credentials, we can authenticate and inject the JWT token into every request we make into Tasklist API. - -For this, we need to turn our Tasklist service into part of a module. Run `nest g module` and name it `tasklist`, the same we named the service. This will generate the module file and update `app.module.ts`. -We need to edit the `app.module.ts` file to use only the module: - -```ts -import { Module } from "@nestjs/common"; -import { TasklistModule } from "./tasklist/tasklist.module"; - -@Module({ - imports: [TasklistModule], - controllers: [], - providers: [], -}) -export class AppModule {} -``` - -We can install the package `@nestjs/config` and finally implement the authentication: - -```ts -import { Logger, Module, OnModuleInit } from "@nestjs/common"; -import { ConfigModule, ConfigService } from "@nestjs/config"; -import { HttpModule, HttpService } from "@nestjs/axios"; -import { TasklistService } from "./tasklist.service"; -import { firstValueFrom, map } from "rxjs"; - -type AuthResponse = { - access_token: string; - scope: string; - expires_in: number; - token_type: string; -}; - -@Module({ - imports: [ - HttpModule, - ConfigModule.forRoot({ - envFilePath: "../.env", - }), - ], - providers: [TasklistService], - exports: [TasklistService, HttpModule, ConfigModule], -}) -export class TasklistModule implements OnModuleInit { - logger = new Logger(TasklistModule.name); - - constructor( - private readonly http: HttpService, - private readonly config: ConfigService - ) {} - - public async onModuleInit() { - const { - http: { axiosRef }, - config, - logger, - } = this; - const credentials = await this.fetchCredentials(); - - logger.log("Tasklist credentials fetched"); - - axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; - axiosRef.defaults.headers["Content-Type"] = "application/json"; - setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds - } - - private async fetchCredentials() { - const { http, config } = this; - - return firstValueFrom( - http - .post(config.get("ZEEBE_AUTHORIZATION_SERVER_URL"), { - client_id: config.get("ZEEBE_CLIENT_ID"), - client_secret: config.get("ZEEBE_CLIENT_SECRET"), - audience: config.get("ZEEBE_AUTHORIZATION_AUDIENCE"), - grant_type: "client_credentials", - }) - .pipe(map((response) => response.data)) - ); - } -} -``` - -When this module is initialized, we can read the credentials using the `@nestjs/config` package, authenticate into the API, and inject the JWT into Axios. We also set a timeout to request a new token when the first one expires. - -## Creating your application API - -We're now able to implement our actual business logic, but first we need to install some packages to create our custom GraphQL API. - -Run `yarn add @nestjs/graphql graphql apollo-server-express`. - -We'll have to generate a module, a service, and a resource. To achieve this, run the following commands: - -```sh -nest g module -nest g service -nest g resource -``` - -Use the name `loanRequests` for all options. For the resource generation, select the option `GraphQL (code first)` and you don't have to generate the CRUD entry points. - -We can now change our `app.module.ts` file to its final form: - -```ts -import { Module } from "@nestjs/common"; -import { GraphQLModule } from "@nestjs/graphql"; -import { LoanRequestsModule } from "./loan-requests/loan-requests.module"; - -@Module({ - imports: [ - GraphQLModule.forRoot({ - autoSchemaFile: true, - playground: true, - }), - LoanRequestsModule, - ], -}) -export class AppModule {} -``` - -And the `loan-requests/loan-requests.module.ts` to: - -```ts -import { Module } from "@nestjs/common"; -import { LoanRequestsService } from "./loan-requests.service"; -import { TasklistModule } from "src/tasklist/tasklist.module"; -import { LoanRequestsResolver } from "./loan-requests.resolver"; - -@Module({ - imports: [TasklistModule], - providers: [LoanRequestsResolver, LoanRequestsService], - exports: [LoanRequestsService, TasklistModule], -}) -export class LoanRequestsModule {} -``` - -We just need to implement the service, which will have three methods (one to get all requests, one to get a single request, and one to make a decision.) - -We will also have four resolvers for the GraphQL API (two mutations and two queries). - -Find the full implementation [here](https://github.com/camunda-community-hub/camunda-cloud-tasklist-api-nestjs/tree/4-application/api/src/loan-requests). - -You can run `yarn start:dev` inside the `api/` folder and the NestJS app should start without errors. - -To test your API, you can access `localhost:3000/graphl` on your browser and should see our custom GraphQL API playground. - -## Demo data generation and sample frontend - -To test our app with a real frontend, we can change the port inside `api/main.ts` to `6000`. Then, run from the root folder `yarn start:demo-data` to start the backend, frontend, and demo data generation, or just `yarn start` if you don't need any new data. diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/updating-tasklist.md b/versioned_docs/version-1.3/components/tasklist/userguide/updating-tasklist.md deleted file mode 100644 index 40c108ed648..00000000000 --- a/versioned_docs/version-1.3/components/tasklist/userguide/updating-tasklist.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: updating-tasklist-cloud -title: Updating Tasklist -description: Instructions on how to update your Tasklist ---- - -When updating Tasklist versions, it is important to pay attention to few points: - -* Every Tasklist version, supports importing data for the current version and the previous one. For example: if you are running Tasklist `1.3`, your Tasklist imports data from Zeebe `1.2` and `1.3`. -* When wanting to update Tasklist version and skip multiple minor versions, make sure to import all the data from previous versions before (see below) - - -## Skipping multiple minor versions - -Let's assume the following scenario: -A server running Tasklist version `1.0` wants to update to version `1.3`. - -We recommend to follow the steps below: - -### Progressively update - -1. Update Tasklist and Zeebe to version `1.1` -2. Let Tasklist run for some hours* and verify if everything works as expected -3. Repeat steps `1` and `2` while updating to version `1.2` (before you jump to `1.3`) -4. Then you should be safe to update both Tasklist and Zeebe to version `1.3` - -NOTE: * Depends on your amount of data, we recommend you let each minor version run for at least 24h before updating to the next one \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claim_light.png b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claim_light.png deleted file mode 100644 index f25d5562033..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claim_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me-empty_light.png b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me-empty_light.png deleted file mode 100644 index ec56db967bb..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me-empty_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me-list_light.png b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me-list_light.png deleted file mode 100644 index 27988f8c786..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me-list_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me_light.png b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me_light.png deleted file mode 100644 index 9d2a384773b..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-claimed-by-me_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-complete-task_light.png b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-complete-task_light.png deleted file mode 100644 index 45825f60e00..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-complete-task_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-task-completed_light.png b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-task-completed_light.png deleted file mode 100644 index 3e63dd46c5c..00000000000 Binary files a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/img/tasklist-task-completed_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/overview.md b/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/overview.md deleted file mode 100644 index 6cc6f32efc7..00000000000 --- a/versioned_docs/version-1.3/components/tasklist/userguide/user-interface/overview.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -id: overview -title: Overview and example use case -description: "What you can do with Tasklist and an example use case." ---- - -## What can I do with Tasklist? - -Tasklist shows you all user tasks that appeared in processes; those processes are running in Zeebe. - -User tasks need an interaction from the user. This can be updating, adding variables, filling out a Camunda Form, or simply completion of the task. The user must first claim a task or unclaim an already claimed task. - -If the user claimed a task, the task can be completed. Different task status filters help the user choose the desired task. - -## Example use case - -If you've successfully logged in, you'll see a screen similar to the following: - -![tasklist-start-screen](../../img/tasklist-start-screen_light.png) - -On the left side of the screen, you can see tasks. On the right side of the screen, you can see details of the current selected task. - -Change the list of tasks by applying filters. You can also collapse and expand the task list. - -You can choose which tasks you want to see: - -- All open -- Claimed by me -- Unclaimed -- Completed - -Initially, we have no **Claimed by me** tasks. - -![tasklist-claimed-by-me-empty](img/tasklist-claimed-by-me-empty_light.png) - -### Claimed by me tasks - -Select the **Unclaimed** list and claim a task using the **Claim** button on the details panel: - -![tasklist-claim](img/tasklist-claim_light.png) - -### Claim a task - -Select the **Claimed by me** list to see if you claimed the task: - -![tasklist-claimed-by-me-list](img/tasklist-claimed-by-me-list_light.png) - -### Complete a task - -Once you've claimed a task, you can complete the task by adding and updating variables, and using the **Complete Task** button: - -![tasklist-claimed-by-me](img/tasklist-claimed-by-me_light.png) - -Always choose a list of tasks with a specified status. Then, select the task you want to work on. - -Complete the task and check if it is shown in the **Completed** list. - -Change variables as needed and begin completion with the **Complete Task** button. - -#### Add and update variables - -Update variables in the **Variables** section by adjusting their text field. - -To add a new variable, click **Add Variable**. - -![tasklist-complete-task](img/tasklist-complete-task_light.png) - -### Completed tasks - -You will now see the completed task by selecting the **Completed** task list: - -![tasklist-task-completed](img/tasklist-task-completed_light.png) diff --git a/versioned_docs/version-1.3/components/zeebe/open-source/assets/exporters-stream.png b/versioned_docs/version-1.3/components/zeebe/open-source/assets/exporters-stream.png deleted file mode 100644 index 7f8eff0fe36..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/open-source/assets/exporters-stream.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/open-source/community-contributions.md b/versioned_docs/version-1.3/components/zeebe/open-source/community-contributions.md deleted file mode 100644 index cc8ba733ca9..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/open-source/community-contributions.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: community-contributions -title: "Community contributions" -description: "Zeebe welcomes extensions and contributions from the community!" ---- - -Zeebe is available as open-source software on [GitHub](https://github.com/camunda-cloud/zeebe). - -Zeebe welcomes extensions and contributions from the community! - -We use both the [Camunda Community Hub](https://github.com/Camunda-Community-Hub/community/) and [Awesome Zeebe](https://awesome.zeebe.io/) to keep track of Zeebe ecosystem contributions, including the following: - -- Clients -- Workers -- Exporters -- Applications -- Other integrations such as Spring-Zeebe and the Apache Kafka connector - -If you built something for the Zeebe ecosystem, we encourage you to [add it to the Camunda Community Hub](https://github.com/Camunda-Community-Hub/community/issues/new?assignees=&labels=&template=new-community-extension-proposal-template.md&title=) using the **New Community Extension Proposal** template. - -If you're interested in contributing to the main Zeebe repository (versus creating an extension that lives in its own repository), be sure to start with the [Contributing to Zeebe](https://github.com/camunda/camunda/blob/main/CONTRIBUTING.md) guide in GitHub. - -## Next steps - -- [Get help and get involved](get-help-get-involved.md). -- Take a look at [Awesome Camunda Cloud](https://github.com/camunda-community-hub/awesome-camunda-cloud). -- Learn more about our [release policy](reference/release-policy.md). -- Avoid suprises by staying on top of [announcements](reference/announcements.md). -- Read more about the [Camunda Community Hub](https://camunda.com/blog/2021/03/introducing-the-camunda-community-hub/). diff --git a/versioned_docs/version-1.3/components/zeebe/open-source/exporters.md b/versioned_docs/version-1.3/components/zeebe/open-source/exporters.md deleted file mode 100644 index e56292548e1..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/open-source/exporters.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -id: exporters -title: "Exporters" -description: "An exporter provides a single entry point to process every record written on a stream." ---- - -As Zeebe processes jobs and processes, or performs internal maintenance (e.g. raft failover), it generates an ordered stream of records. - -![record-stream](assets/exporters-stream.png) - -While the clients provide no way to inspect this stream directly, Zeebe can load and configure user code that can process each of those records in the form of an exporter. - -An **exporter** provides a single entry point to process every record written on a stream. - -- Persist historical data by pushing it to an external data warehouse. -- Export records to a visualization tool (e.g. [zeebe-simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor/)). - -Zeebe only loads exporters configured through the main Zeebe YAML -configuration file. - -Once an exporter is configured, the next time Zeebe is started the exporter starts receiving records. Note that it is only guaranteed to see records produced from that point on. - -Find a reference implementation in the form of the Zeebe-maintained -[Elasticsearch exporter](https://github.com/camunda/camunda/tree/1.3.14/exporters/elasticsearch-exporter). - -The main impact exporters have on a Zeebe cluster is that they remove the burden of persisting data indefinitely. - -Once data is no longer needed by Zeebe, it queries its exporters to -know if it can be safely deleted. If so, it permanently erases it, thereby reducing disk usage. - -:::note -If no exporters are configured, Zeebe automatically erases data when it is not necessary anymore. If you need historical data, you **must** configure an exporter to stream records into your external data warehouse. -::: - -Regardless of how an exporter is loaded (whether through an external JAR or not), -all exporters interact in the same way with the broker, which is defined by the -[exporter interface](https://github.com/camunda/camunda/tree/1.3.14/exporter-api/src/main/java/io/camunda/zeebe/exporter/api/Exporter.java). - -## Loading - -Once configured, exporters are loaded as part of the broker startup phase before -any processing is done. - -During the loading phase, the configuration for each exporter is validated. The broker will not start if: - -- An exporter ID is not unique -- An exporter points to a non-existent/non-accessible JAR -- An exporter points to a non-existent/non-instantiable class -- An exporter instance throws an exception in its `Exporter#configure` method - -The last point is there to provide individual exporters to perform lightweight -validation of their configuration (e.g. fail if missing arguments). - -One caveat is that an instance of an exporter is created and immediately thrown away. Therefore, exporters should not perform any computationally heavy work during instantiation/configuration. - -:::note -Zeebe creates a single isolated class loader for every JAR referenced by exporter configurations. If the same JAR is reused to define different exporters, these will share the same class loader. - -Different exporters can therefore depend on the same third-party libraries without worrying about versions or class name collisions. Additionally, exporters use the system class loader for system classes, or classes packaged as part of the Zeebe JAR. -::: - -Exporter-specific configuration is handled through the exporter's `[exporters.args]` -nested map. This provides a simple `Map`, which is passed directly -in the form of a [configuration](https://github.com/camunda/camunda/tree/1.3.14/exporter-api/src/main/java/io/camunda/zeebe/exporter/api/context/Configuration.java) object when the broker calls the `Exporter#configure(Configuration)` method. - -Configuration occurs at two different phases: during the broker startup phase, and -once every time a leader is elected for a partition. - -## Processing - -At any given point, there is exactly one leader node for a given partition. - -Whenever a node becomes the leader for a partition, it runs an instance of an -[exporter stream processor](https://github.com/camunda/camunda/tree/1.3.14/broker/src/main/java/io/camunda/zeebe/broker/exporter/stream/ExporterDirector.java). - -This stream processor creates exactly one instance of each configured exporter, -and forwards every record written on the stream to each of these in turn. - -:::note -This implies there will be exactly one instance of every exporter for every partition. If you have four partitions, and at least four threads for processing, there are potentially four instances of your exporter exporting simultaneously. -::: - -Zeebe only guarantees at-least-once semantics. A record is seen at least once by an exporter, maybe more. Cases where this may happen include the following: - -- During reprocessing after raft failover (i.e. new leader election) -- On error if the position is not yet updated - -To reduce the amount of duplicate records an exporter will process, the stream -processor keeps track of the position of the last successfully exported record -for every single exporter. The position is sufficient since a stream is an ordered -sequence of records whose position is monotonically increasing. This position is -set by the exporter itself once it can guarantee a record has been successfully -updated. - -:::note -Zeebe exports with at-least-once semantics, so you will have to deal with duplicates. - -It's best to reduce the amount of duplicate records an exporter handles alongside Zeebe, doing as little as possible in the exporter to reduce the load on the broker. - -We recommend performing deduplication in your target system. Deduplication can be performed based on the partition ID and record position. Not all records have keys, but every record has a partition ID and a unique position relative to that partition. We also recommend refraining from blocking operations in the exporter, as it will have a negative impact on performance of the system. -::: - -### Error handling - -If an error occurs during the `Exporter#open(Context)` phase, the stream -processor fails and is restarted, potentially fixing the error. Worst case -scenario, this means no exporter runs at all until these errors stop. - -If an error occurs during the `Exporter#close` phase, it is logged, but will -still allow other exporters to gracefully finish their work. - -If an error occurs during processing, we continuously retry the same record until -no error is produced. Worst case scenario, this means a failing exporter could bring -all exporters to a halt. Currently, exporter implementations are expected to -implement their own retry/error handling strategies, though this may change in the -future. - -### Performance impact - -Zeebe naturally incurs a performance impact for each loaded exporter. A slow -exporter slows down all other exporters for a given partition, and in the -worst case, could completely block a thread. - -It's therefore recommended to keep exporters as simple as possible, and perform -any data enrichment or transformation through the external system. diff --git a/versioned_docs/version-1.3/components/zeebe/open-source/get-help-get-involved.md b/versioned_docs/version-1.3/components/zeebe/open-source/get-help-get-involved.md deleted file mode 100644 index 0cfe0ea8422..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/open-source/get-help-get-involved.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: get-help-get-involved -title: "Get help and get involved" -description: "Ask questions, report problems, and make contributions." ---- - -We provide a few different public-facing Zeebe support and feedback channels where users can ask questions, report problems, and make contributions. - -### Camunda Cloud user forum - -The best place to ask questions about Zeebe and troubleshoot issues is the [forum](https://forum.camunda.io). - -The Zeebe team monitors the forum closely, and we do our best to respond to all questions in a timely manner. - -### Public Slack group - -Join our [public Slack group](https://zeebe-slack-invite.herokuapp.com/), where you can ask questions, share community contributions, and connect with other Zeebe users. - -### Create an issue in GitHub - -Did you find a problem in Zeebe? Do you have a suggestion for an improvement? - -Create an issue in the [Zeebe GitHub project](https://github.com/camunda-cloud/zeebe/issues) to let us know. diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/architecture.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/architecture.md deleted file mode 100644 index 2abf763b154..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/architecture.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: architecture -title: "Architecture" -description: "There are four main components in Zeebe's architecture: clients, gateways, brokers, and exporters." ---- - -There are four main components in Zeebe's architecture: clients, gateways, brokers, and exporters. - -![zeebe-architecture](assets/zeebe-architecture.png) - -In Camunda Cloud, you work exclusively with clients. Gateways, brokers, and exporters are pre-configured to provide the service, but are not accessible. - -In local or private cloud deployments, all components are relevant. - -## Clients - -Clients send commands to Zeebe to: - -- Deploy processes -- Carry out business logic - - Start process instances - - Publish messages - - Activate jobs - - Complete jobs - - Fail jobs -- Handle operational issues - - Update process instance variables - - Resolve incidents - -Client applications can be scaled up and down separately from Zeebe. The Zeebe brokers do not execute any business logic. - -Clients are libraries you embed in an application (e.g. a microservice that executes your business logic) to connect to a Zeebe cluster. - -Clients connect to the Zeebe gateway via [gRPC](https://grpc.io), which uses HTTP/2-based transport. To learn more about gRPC in Zeebe, review the [Zeebe API (gRPC)](/apis-tools/grpc.md). - -The Zeebe project includes officially-supported Java and Go clients. [Community clients](/apis-tools/community-clients/index.md) have been created in other languages, including C#, Ruby, and JavaScript. The gRPC protocol makes it possible to [generate clients](/apis-tools/build-your-own-client.md) in a range of different programming languages. - -### Job workers - -A job worker is a Zeebe client that uses the client API to first activate jobs, and upon completion, either complete or fail the job. - -## Gateways - -A gateway serves as a single entry point to a Zeebe cluster and forwards requests to brokers. - -The gateway is stateless and sessionless, and gateways can be added as necessary for load balancing and high availability. - -## Brokers - -The Zeebe broker is the distributed workflow engine that tracks the state of active process instances. - -Brokers can be partitioned for horizontal scalability and replicated for fault tolerance. A Zeebe deployment often consists of more than one broker. - -It's important to note that no application business logic lives in the broker. Its only responsibilities are: - -- Processing commands sent by clients -- Storing and managing the state of active process instances -- Assigning jobs to job workers - -Brokers form a peer-to-peer network in which there is no single point of failure. This is possible because all brokers perform the same kind of tasks and the responsibilities of an unavailable broker are transparently reassigned in the network. - -## Exporters - -The exporter system provides an event stream of state changes within Zeebe. This data has many potential uses, including but not limited to: - -- Monitoring the current state of running process instances -- Analysis of historic process data for auditing, business intelligence, etc. -- Tracking [incidents](/components/concepts/incidents.md) created by Zeebe - -The exporter includes an API you can use to stream data into a storage system of your choice. Zeebe includes an out-of-the-box [Elasticsearch exporter](https://github.com/camunda/camunda/tree/1.3.14/exporters/elasticsearch-exporter), and other [community-contributed exporters](https://awesome.zeebe.io) are also available. diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/activity-lifecycle.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/activity-lifecycle.png deleted file mode 100644 index b603cba33f7..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/activity-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/client-server.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/client-server.png deleted file mode 100644 index 67a693a8445..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/client-server.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/cluster.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/cluster.png deleted file mode 100644 index e7869e7a2ad..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/cluster.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/commit.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/commit.png deleted file mode 100644 index 1b57c518c8f..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/commit.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/data-distribution.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/data-distribution.png deleted file mode 100644 index 1c6da68aae1..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/data-distribution.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/event-lifecycle.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/event-lifecycle.png deleted file mode 100644 index 329e1004845..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/event-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/exporters-stream.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/exporters-stream.png deleted file mode 100644 index 7f8eff0fe36..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/exporters-stream.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/internal-processing-job.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/internal-processing-job.png deleted file mode 100644 index 3671ba8b5fc..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/internal-processing-job.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/order-process.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/order-process.png deleted file mode 100644 index ea97e941d39..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/partition.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/partition.png deleted file mode 100644 index c60deb34726..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/partition.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/pass-through-lifecycle.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/pass-through-lifecycle.png deleted file mode 100644 index 5ade0b31af9..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/pass-through-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-conditions.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-conditions.png deleted file mode 100644 index 6b3483e9519..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-conditions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-data-flow.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-data-flow.png deleted file mode 100644 index 29b0470dd9a..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-data-flow.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-events.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-events.png deleted file mode 100644 index 499e5562651..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-events.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-parallel-gw.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-parallel-gw.png deleted file mode 100644 index b9208f0ec82..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-parallel-gw.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-parallel-mi.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-parallel-mi.png deleted file mode 100644 index 2ff63f00b68..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-parallel-mi.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-sequence.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-sequence.png deleted file mode 100644 index 55cebecee05..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process-sequence.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process.png deleted file mode 100644 index 8576c92b106..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/processes-data-based-conditions.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/processes-data-based-conditions.png deleted file mode 100644 index 63126a12e57..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/processes-data-based-conditions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/processes-parallel-gateway.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/processes-parallel-gateway.png deleted file mode 100644 index e32ce06f1b9..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/processes-parallel-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/task-workers-subscriptions.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/task-workers-subscriptions.png deleted file mode 100644 index e55e2650c65..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/task-workers-subscriptions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/zeebe-architecture.png b/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/zeebe-architecture.png deleted file mode 100644 index fadeb2ac4a6..00000000000 Binary files a/versioned_docs/version-1.3/components/zeebe/technical-concepts/assets/zeebe-architecture.png and /dev/null differ diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/clustering.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/clustering.md deleted file mode 100644 index 2174ae6bff7..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/clustering.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: clustering -title: "Clustering" -description: "Zeebe can operate as a cluster of brokers, forming a peer-to-peer network." ---- - -Zeebe can operate as a cluster of brokers, forming a peer-to-peer network. - -In this network, all brokers have the same responsibilities and there is no single point of failure. - -![cluster](assets/cluster.png) - -## Gossip membership protocol - -Zeebe implements the [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to know which brokers are currently part of the cluster. - -The cluster is bootstrapped using a set of well-known bootstrap brokers, to which the others can connect. To achieve this, each broker must have at least one bootstrap broker as its initial contact point in their configuration: - -```yaml ---- -cluster: - initialContactPoints: [node1.mycluster.loc:26502] -``` - -When a broker is connected to the cluster for the first time, it fetches the topology from the initial contact points and starts gossiping with the other brokers. Brokers keep cluster topology locally across restarts. - -## Raft consensus and replication protocol - -To ensure fault tolerance, Zeebe replicates data across servers using the [raft protocol](). - -Data is divided into partitions (shards). Each partition has a number of replicas. Among the replica set, a **leader** is determined by the raft protocol, which takes in requests and performs all of the processing. All other brokers are passive **followers**. When the leader becomes unavailable, the followers transparently select a new leader. - -Each broker in the cluster may be both leader and follower at the same time for different partitions. In an ideal world, this leads to client traffic distributed evenly across all brokers. - -![cluster](assets/data-distribution.png) - -:::note -There is no active load balancing across partitions. Each leader election for any partition is autonomous and independent of leader elections for other partitions. - -This may lead to one node becoming the leader for all partitions. This is not a problem for fault tolerance as the guarantees of replication remain. However, this may negatively impact throughput as all traffic hits one node. -::: - -## Commit - -Before a new record on a partition can be processed, it must be replicated to a quorum (typically majority) of followers. This procedure is called **commit**. Committing ensures a record is durable, even in case of complete data loss on an individual broker. The exact semantics of committing are defined by the raft protocol. - -![cluster](assets/commit.png) diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/exporters.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/exporters.md deleted file mode 100644 index d46f4cd05bc..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/exporters.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -id: exporters -title: "Exporters" -description: "As Zeebe processes jobs and processes, or performs internal maintenance, it generates an ordered stream of records." ---- - -As Zeebe processes jobs and processes, or performs internal maintenance (e.g. raft failover), it generates an ordered stream of records. - -:::note - -Exporters are not available in Camunda Cloud Software-as-a-Service (SaaS). - -::: - -![record-stream](assets/exporters-stream.png) - -While the clients provide no way to inspect this stream directly, Zeebe can load -and configure user code that can process each record in the form of an exporter. - -An **exporter** provides a single entry point to process every record written on a stream. - -- Persist historical data by pushing it to an external data warehouse. -- Export records to a visualization tool (e.g. [zeebe-simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor)). - -Zeebe only loads exporters configured through the main Zeebe YAML configuration file. - -Once an exporter is configured, the next time Zeebe starts, the exporter -starts receiving records. Note that it is only guaranteed to see records -produced from that point on. - -Find a reference implementation in the form of the Zeebe-maintained -[Elasticsearch exporter](https://github.com/camunda/camunda/tree/1.3.14/exporters/elasticsearch-exporter). - -The main impact exporters have on a Zeebe cluster is that they remove the burden -of persisting data indefinitely. - -Once data is not needed by Zeebe anymore, it queries its exporters to -know if it can be safely deleted, and if so, permanently erases it, thereby -reducing disk usage. - -:::note -If no exporters are configured, Zeebe automatically erases data when it is not necessary anymore. If you need historical data, you **must** configure an exporter to stream records into your external data warehouse. -::: - -Regardless of how an exporter is loaded (whether through an external JAR or not), -all exporters interact in the same way with the broker, which is defined by the -[exporter interface](https://github.com/camunda/camunda/tree/1.3.14/exporter-api/src/main/java/io/camunda/zeebe/exporter/api/Exporter.java). - -## Loading - -Once configured, exporters are loaded as part of the broker startup phase, before -any processing is done. - -During the loading phase, the configuration for each exporter is validated, such that the broker will not start if: - -- An exporter ID is not unique -- An exporter points to a non-existent/non-accessible JAR -- An exporter points to a non-existent/non-instantiable class -- An exporter instance throws an exception in its `Exporter#configure` method. - -The last point is there to provide individual exporters to perform lightweight -validation of their configuration (e.g. fail if missing arguments). - -One caveat is that an instance of an exporter is created and immediately thrown away. Therefore, exporters should not perform any computationally -heavy work during instantiation/configuration. - -:::note -Zeebe creates a single isolated class loader for every JAR referenced by exporter configurations. If the same JAR is reused to define different exporters, these will share the same class loader. - -Therefore, different exporters can depend on the same third-party libraries without worrying about versions or class -name collisions. - -Additionally, exporters use the system class loader for system classes, or classes packaged as part of the Zeebe JAR. -::: - -Exporter-specific configuration is handled through the exporter's `[exporters.args]` -nested map. This provides a `Map` passed directly -in the form of a [configuration](https://github.com/camunda/camunda/tree/1.3.14/exporter-api/src/main/java/io/camunda/zeebe/exporter/api/context/Configuration.java) object when the broker calls the `Exporter#configure(Configuration)` method. - -Configuration occurs at two different phases: during the broker startup phase, and -once every time a leader is elected for a partition. - -## Processing - -At any given point, there is exactly one leader node for a given partition. - -Whenever a node becomes the leader for a partition, it runs an instance of an -[exporter stream processor](https://github.com/camunda/camunda/tree/1.3.14/broker/src/main/java/io/camunda/zeebe/broker/exporter/stream/ExporterDirector.java). - -This stream processor creates exactly one instance of each configured exporter, -and forwards every record written on the stream to each of these in turn. - -:::note -This implies there will be exactly one instance of every exporter for every partition. If you have four partitions, and at least four threads for processing, there are potentially four instances of your exporter exporting simultaneously. -::: - -Zeebe only guarantees at-least-once semantics. That is, a record is seen at least once by an exporter, maybe more. Cases where this may happen -include: - -- During reprocessing after raft failover (i.e. new leader election) -- On error if the position is not yet updated - -To reduce the amount of duplicate records an exporter processes, the stream -processor keeps track of the position of the last successfully exported record -for every single exporter. The position is sufficient since a stream is an ordered -sequence of records whose position is monotonically increasing. This position is -set by the exporter once it can guarantee a record is successfully -updated. - -:::note -Although Zeebe tries to reduce the amount of duplicate records an exporter must handle, it is likely it will have to. Therefore, it is necessary that export operations be idempotent. This can be implemented either in the exporter itself, but if it exports to an external system, it is recommended you perform deduplication there to reduce the load on Zeebe. Refer to the exporter-specific documentation for how this is meant to be achieved. -::: - -### Error handling - -If an error occurs during the `Exporter#open(Context)` phase, the stream -processor fails and is restarted, potentially fixing the error. Worst case -scenario, this means no exporter runs until these errors stop. - -If an error occurs during the `Exporter#close` phase, it is logged, but will -still allow other exporters to gracefully finish their work. - -If an error occurs during processing, we continuously retry the same record until -no error is produced. Worst case scenario, this means a failing exporter could bring -all exporters to a halt. Currently, exporter implementations are expected to -implement their own retry/error handling strategies, though this may change in the -future. - -### Performance impact - -Zeebe naturally incurs a performance impact for each loaded exporter. A slow -exporter slows down all other exporters for a given partition, and in the -worst case, could completely block a thread. - -It's therefore recommended to keep exporters as simple as possible, and perform -any data enrichment or transformation through the external system. diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/index.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/index.md deleted file mode 100644 index 7880cb07ec3..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: index -sidebar_label: "Overview" -title: "Technical concepts" -description: "This section gives an overview of Zeebe's underlying technical concepts." ---- - -This section gives an overview of Zeebe's underlying technical concepts. - -- [Architecture](architecture.md) - Introduces you to the internal components of Zeebe, as well as interfaces for external systems to interact with Zeebe. -- [Clustering](clustering.md) - Discusses the internal structure and properties of a Zeebe cluster. -- [Partitions](partitions.md) - Sheds light on how Zeebe achieves horizontal scalability. -- [Internal processing](internal-processing.md) - Explains the basics of Zeebe's event processing. -- [Process lifecycles](process-lifecycles.md) - Expands on the event processing concept and goes into more detail regarding the lifecycles of selected process elements. -- [Protocols](protocols.md) - Explains how external clients communicate with Zeebe. -- [Exporters](exporters.md) - Discusses the extension point to add additional processing logic for each record in the event stream. - -In addition to these sections, you may also be interested in our [Best Practices](/components/best-practices/overview.md). diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/internal-processing.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/internal-processing.md deleted file mode 100644 index faa7ccd9212..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/internal-processing.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -id: internal-processing -title: "Internal processing" -description: "This document analyzes the state machines, events and commands, stateful stream processing, driving the engine, and handling backpressure within Zeebe." ---- - -Internally, Zeebe is implemented as a collection of **stream processors** working on record streams \(partitions\). The stream processing model is used since it is a unified approach to provide: - -- Command protocol \(request-response\), -- Record export \(streaming\), -- Process evaluation \(asynchronous background tasks\) - -Record export solves the history problem and the stream provides the kind of exhaustive audit log a workflow engine needs to produce. - -## State machines - -Zeebe manages stateful entities: jobs, processes, etc. Internally, these entities are implemented as **state machines** managed by a stream processor. - -An instance of a state machine is always in one of several logical states. From each state, a set of transitions defines the next possible states. Transitioning into a new state may produce outputs/side effects. - -Let's look at the state machine for jobs: - -![partition](assets/internal-processing-job.png) - -Every oval is a state. Every arrow is a state transition. Note how each state transition is only applicable in a specific state. For example, it is not possible to complete a job when it is in state `CREATED`. - -## Events and commands - -Every state change in a state machine is called an **event**. Zeebe publishes every event as a record on the stream. - -State changes can be requested by submitting a **command**. A Zeebe broker receives commands from two sources: - -- Clients send commands remotely. For example, Deploying processes, starting process instances, creating and completing jobs, etc. -- The broker itself generates commands. For example, locking a job for exclusive processing by a worker. - -Once received, a command is published as a record on the addressed stream. - -## Stateful stream processing - -A stream processor reads the record stream sequentially and interprets the commands with respect to the addressed entity's lifecycle. More specifically, a stream processor repeatedly performs the following steps: - -1. Consume the next command from the stream. -1. Determine whether the command is applicable based on the state lifecycle and the entity's current state. -1. If the command is applicable, apply it to the state machine. If the command was sent by a client, send a reply/response. -1. If the command is not applicable, reject it. If it was sent by a client, send an error reply/response. -1. Publish an event reporting the entity's new state. - -For example, processing the **Create Job** command produces the event **Job Created**. - -## Driving the engine - -As a workflow engine, Zeebe must continuously drive the execution of its processes. Zeebe achieves this by also writing follow-up commands to the stream as part of the processing of other commands. - -For example, when the **Complete Job** command is processed, it does not just complete the job; it also writes the **Complete Activity** command for the corresponding service task. -This command can in turn be processed, completing the service task and driving the execution of the process instance to the next step. - -## Handling back-pressure - -When a broker receives a client request, it is written to the **event stream** first, and processed later by the stream processor. If the processing is slow or if there are many client requests in the stream, it might take too long for the processor to start processing the command. If the broker keeps accepting new requests from the client, the back log increases and the processing latency can grow beyond an acceptable time. - -To avoid such problems, Zeebe employs a back-pressure mechanism. -When the broker receives more requests than it can process with an acceptable latency, it rejects some requests. - -The maximum rate of requests that can be processed by a broker depends on the processing capacity of the machine, the network latency, current load of the system, etc. - -Hence, there is no fixed limit configured in Zeebe for the maximum rate of requests it accepts. Instead, Zeebe uses an adaptive algorithm to dynamically determine the limit of the number of inflight requests (the requests that are accepted by the broker, but not yet processed). - -The inflight request count is incremented when a request is accepted, and decremented when a response is sent back to the client. The broker rejects requests when the inflight request count reaches the limit. - -When the broker rejects requests due to back-pressure, the clients can retry them with an appropriate retry strategy. If the rejection rate is high, it indicates that the broker is constantly under high load. -In that case, it is recommended to reduce the request rate. diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/partitions.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/partitions.md deleted file mode 100644 index 832790b7458..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/partitions.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: partitions -title: "Partitions" -description: "In Zeebe, all data is organized into partitions. A partition is a persistent stream of process-related events." ---- - -In Zeebe, all data is organized into **partitions**. A **partition** is a persistent stream of process-related events. - -In a cluster of brokers, partitions are distributed among the nodes so it can be thought of as a **shard**. When you bootstrap a Zeebe cluster, you can configure how many partitions you need. - -:::note -If you've worked with the [Apache Kafka System](https://kafka.apache.org/) before, the concepts presented on this page will sound very familiar to you. -::: - -## Usage examples - -Whenever you deploy a process, you deploy it to the first partition. The process is then distributed to all partitions. On all partitions, this process receives the same key and version such that it can be consistently identified. - -When you start an instance of a process, the client library then routes the request to one partition in which the process instance is published. All subsequent processing of the process instance happens in that partition. - -## Distribution over partitions - -When a process instance is created in a partition, its state is stored and managed by the same partition until its execution is terminated. The partition in which it is created is determined by various factors. - -- When a client sends a command `CreateProcessInstance` or `CreateProcessInstanceWithResult`, gateway chooses a partition in a round-robin manner and forwards the requests to that partition. The process instance is created in that partition. -- When a client publishes a message to trigger a **message start event**, the message is forwarded to a partition based on the correlation key of the message. The process instance is created on the same partition where the message is published. -- Process instances created by **timer start events** are always created on partition 1. - -## Scalability - -Use partitions to scale your process processing. Partitions are dynamically distributed in a Zeebe cluster and for each partition there is one leading broker at a time. This **leader** accepts requests and performs event processing for the partition. Let's assume you want to distribute process processing load over five machines. You can achieve that by bootstraping five partitions. - -:::note -While each partition has one leading broker, _not all brokers are guaranteed to lead a partition_. A broker can lead more than one partition, and, at times, a broker in a cluster may act only as a replication back-up for partitions. This broker will not be doing any active work on processes until a partition fail-over happens and the broker gets elected as the new leader for that partition. -::: - -## Partition data layout - -A partition is a persistent append-only event stream. Initially, a partition is empty. As the first entry is inserted, it takes the place of the first entry. As the second entry comes in and is inserted, it takes the place as the second entry, and so on and so forth. Each entry has a position in the partition which uniquely identifies it. - -![partition](assets/partition.png) - -## Replication - -For fault tolerance, data in a partition is replicated from the **leader** of the partition to its **followers**. Followers are other Zeebe broker nodes that maintain a copy of the partition without performing event processing. - -## Partition distribution - -If no other configuration is specified, partitions are distributed in a guaranteed round-robin fashion across all brokers in the cluster, considering the number of nodes, number of partitions, and the replication factor. For example, the first partition will always be hosted by the first node, plus the following nodes based on the replication factor. The second partition will be hosted on the second node and the following to fulfill the replication factor. - -As an example, the following partition schemes are guaranteed: - -### Example 1 - -#### Context - -- Number of nodes: 4 -- Number of partitions: 7 -- Replication factor: 3 - -#### Partition layout - -| | Node 1 | Node 2 | Node 3 | Node 4 | -| -----------:|:------:|:------:|:------:|:------:| -| Partition 1 | X | X | X | | -| Partition 2 | | X | X | X | -| Partition 3 | X | | X | X | -| Partition 4 | X | X | | X | -| Partition 5 | X | X | X | | -| Partition 6 | | X | X | X | -| Partition 7 | X | | X | X | - -### Example 2 - -#### Context - -- Number of nodes: 5 -- Number of partitions: 3 -- Replication factor: 3 - -#### Partition layout - -| | Node 1 | Node 2 | Node 3 | Node 4 | Node 5 | -| -----------:|:------:|:------:|:------:|:------:|:------:| -| Partition 1 | X | X | X | | | -| Partition 2 | | X | X | X | | -| Partition 3 | | | X | X | X | - -## Recommendations - -Choosing the number of partitions depends on the use case, workload, and cluster setup. Here are some rules of thumb: - -- For testing and early development, start with a single partition. Note that Zeebe's process processing is highly optimized for efficiency, so a single partition can already handle high event loads. -- With a single Zeebe broker, a single partition is usually enough. However, if the node has many cores and the broker is configured to use them, more partitions can increase the total throughput (around two threads per partition). -- Base your decisions on data. Simulate the expected workload, measure, and compare the performance of different partition setups. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/process-lifecycles.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/process-lifecycles.md deleted file mode 100644 index ad556551c3a..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/process-lifecycles.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -id: process-lifecycles -title: "Process lifecycles" -description: "In Zeebe, the process execution is represented internally by events of type `ProcessInstance`." ---- - -In Zeebe, the process execution is represented internally by events of type `ProcessInstance`. The events are written to the log stream and can be observed by an exporter. - -Each event is one step in a process instance lifecycle. All events of one process instance have the same `processInstanceKey`. - -Events which belong to the same element instance (e.g. a task) have the same `key`. The element instances have different lifecycles depending on the type of element. - -## (Sub-)Process/Activity/Gateway lifecycle - -![activity lifecycle](assets/activity-lifecycle.png) - -## Event lifecycle - -![event lifecycle](assets/event-lifecycle.png) - -## Sequence flow lifecycle - -![sequence flow lifecycle](assets/pass-through-lifecycle.png) - -## Example - -![order process](assets/process.png) - -Given the above process, a successful execution yields the following records in the commit log: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    IntentElement idElement type
    ELEMENT_ACTIVATINGorder-processprocess
    ELEMENT_ACTIVATEDorder-processprocess
    ELEMENT_ACTIVATINGorder-placedstart event
    ELEMENT_ACTIVATEDorder-placedstart event
    ELEMENT_COMPLETINGorder-placedstart event
    ELEMENT_COMPLETEDorder-placedstart event
    SEQUENCE_FLOW_TAKENto-collect-moneysequence flow
    ELEMENT_ACTIVATINGcollect-moneytask
    ELEMENT_ACTIVATEDcollect-moneytask
    ELEMENT_COMPLETINGcollect-moneytask
    ELEMENT_COMPLETEDcollect-moneytask
    SEQUENCE_FLOW_TAKENto-fetch-itemssequence flow
    .........
    SEQUENCE_FLOW_TAKENto-order-deliveredsequence flow
    ELEMENT_ACTIVATINGorder-deliveredend event
    ELEMENT_ACTIVATEDorder-deliveredend event
    ELEMENT_COMPLETINGorder-deliveredend event
    ELEMENT_COMPLETEDorder-deliveredend event
    ELEMENT_COMPLETINGorder-processprocess
    ELEMENT_COMPLETEDorder-processprocess
    diff --git a/versioned_docs/version-1.3/components/zeebe/technical-concepts/protocols.md b/versioned_docs/version-1.3/components/zeebe/technical-concepts/protocols.md deleted file mode 100644 index 327395c1a1f..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/technical-concepts/protocols.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: protocols -title: "Protocols" -description: "Let's discuss gRPC and supported clients." ---- - -Zeebe clients connect to brokers via a stateless gateway. - -For the communication between client and gateway, [gRPC](https://grpc.io/) is used. The communication protocol is defined using Protocol Buffers v3 ([proto3](https://developers.google.com/protocol-buffers/docs/proto3)), and you can find it in the -[Zeebe repository](https://github.com/camunda/camunda/tree/1.3.14/gateway-protocol). - -## What is gRPC? - -gRPC was first developed by Google and is now an open-source project and part of the Cloud Native Computing Foundation. - -If you’re new to gRPC, see [What is gRPC](https://grpc.io/docs/guides/index.html) on the project website for an introduction. - -## Why gRPC? - -gRPC has many beneficial features that make it a good fit for Zeebe, including the following: - -- Supports bi-directional streaming for opening a persistent connection and sending or receiving a stream of messages between client and server -- Uses the common HTTP/2 protocol by default -- Uses Protocol Buffers as an interface definition and data serialization mechanism–specifically, Zeebe uses proto3, which supports easy client generation in ten different programming languages. - -## Supported clients - -Currently, Zeebe officially supports two gRPC clients: one in [Java](/apis-tools/java-client/index.md), and one in [Golang](/apis-tools/go-client/get-started.md). - -[Community clients](/apis-tools/community-clients/index.md) have been created in other languages, including C#, Ruby, and JavaScript. - -If there is no client in your target language yet, you can [build your own client](/apis-tools/build-your-own-client.md) in a range of different programming languages. - -## Intercepting calls - -Zeebe supports [loading arbitrary gRPC server interceptors](self-managed/zeebe-deployment/interceptors.md) to intercept incoming -calls. \ No newline at end of file diff --git a/versioned_docs/version-1.3/components/zeebe/zeebe-overview.md b/versioned_docs/version-1.3/components/zeebe/zeebe-overview.md deleted file mode 100644 index 7d2776e43b2..00000000000 --- a/versioned_docs/version-1.3/components/zeebe/zeebe-overview.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: zeebe-overview -title: "Zeebe" -sidebar_label: "Introduction" -description: "Zeebe is the process automation engine powering Camunda Cloud." ---- - -Zeebe is the process automation engine powering Camunda Cloud. While written in Java, you do not need to be a Java developer to use Zeebe. - -With Zeebe you can: - -- Define processes graphically in [BPMN 2.0](../modeler/bpmn/bpmn-coverage.md). -- Choose any [gRPC](/apis-tools/grpc.md)-supported programming language to implement your workers. -- Build processes that react to events from Apache Kafka and other messaging platforms. -- Use as part of a software as a service (SaaS) offering with Camunda Cloud or deploy with Docker and Kubernetes (in the cloud or on-premises) with Camunda Cloud Self-Managed. -- Scale horizontally to handle very high throughput. -- Rely on fault tolerance and high availability for your processes. -- Export processes data for monitoring and analysis (currently only available through the [Elasticsearch exporter](https://github.com/camunda/camunda/tree/1.3.14/exporters/elasticsearch-exporter) added in Camunda Cloud Self-Managed). -- Engage with an active community. - -For documentation on deploying Zeebe as part of Camunda Cloud Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/index.md). - -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda Cloud Professional or Camunda Cloud Enterprise. Customers can choose either plan based on their process automation requirements. Camunda Cloud Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda Cloud, you can always find support through the [community](open-source/get-help-get-involved.md). - -## Next steps - -- Get familiar with [technical concepts](technical-concepts/index.md). -- Explore [community contributions](open-source/community-contributions.md). diff --git a/versioned_docs/version-1.3/guides/assets/analysis.png b/versioned_docs/version-1.3/guides/assets/analysis.png deleted file mode 100644 index d9a610f960c..00000000000 Binary files a/versioned_docs/version-1.3/guides/assets/analysis.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/assets/dashboard.png b/versioned_docs/version-1.3/guides/assets/dashboard.png deleted file mode 100644 index 14b563e8e0c..00000000000 Binary files a/versioned_docs/version-1.3/guides/assets/dashboard.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/assets/heatmap.png b/versioned_docs/version-1.3/guides/assets/heatmap.png deleted file mode 100644 index 0e63f7a4ec9..00000000000 Binary files a/versioned_docs/version-1.3/guides/assets/heatmap.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/assets/react-components/create-cluster.md b/versioned_docs/version-1.3/guides/assets/react-components/create-cluster.md deleted file mode 100644 index 6836038aabe..00000000000 --- a/versioned_docs/version-1.3/guides/assets/react-components/create-cluster.md +++ /dev/null @@ -1,14 +0,0 @@ ---- ---- - -You must create a cluster if you have a new Camunda Cloud account. - -1. To create a cluster, click the **Clusters** tab, and click **Create New Cluster**. -2. Name your cluster. For the purpose of this guide, we recommend using the **Stable** channel, the latest generation, and the region closest to you. Click **Create**. -3. Your cluster will take a few moments to create. Check the satus on the **Clusters** page or by clicking into the cluster itself and looking at the **Applications Overview**. - -Even while the cluster shows a status **Creating**, you can still proceed to begin modeling. - -:::note -Zeebe must show a status of **Healthy** to properly deploy your model in future guide steps. -::: diff --git a/versioned_docs/version-1.3/guides/automating-a-process-using-bpmn.md b/versioned_docs/version-1.3/guides/automating-a-process-using-bpmn.md deleted file mode 100644 index c7db4fc50f2..00000000000 --- a/versioned_docs/version-1.3/guides/automating-a-process-using-bpmn.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -id: automating-a-process-using-bpmn -title: Automating a process using BPMN -description: A quickstart on how to use BPMN, an easy-to-adopt visual modeling language, together with Camunda to automate your business processes. -keywords: [workflow, modeling] ---- - -Beginner -Time estimate: 20 minutes - -## What is BPMN? - -Business Process Model and Notation (BPMN) is the global standard for process modeling. Combining BPMN, an easy-to-adopt visual modeling language, with Camunda, you can easily automate your business processes. - -Processes are the algorithms that determine how an organization runs based on independent tasks. Successful businesses grow from proven, effective processes. Therefore, Camunda’s workflow engine executes processes defined in BPMN to ensure these processes can be swiftly orchestrated within a diagram. - -Take the following example where we've outlined a process in a BPMN diagram to send an email. Don't worry too much about the symbols as we'll get to that shortly. For now, recognize the start and end of the process, comprised of entering a message, and sending the email. - -![sending email bmmn diagram](./img/simple-bpmn-process.png) - -BPMN offers control and visibility over your critical business processes. The workflow engine orchestrates processes that span across a wide variety of elements, including APIs, microservices, business decisions and rules, human work, IoT devices, RPA bots, and more. - -## Set up - -Begin by building your BPMN diagrams with [Modeler](../components/modeler/about.md). -To get started, ensure you’ve [created a Camunda Cloud account](./getting-started/create-camunda-cloud-account.md). - -## Getting started with BPMN - -Once logged in to your Camunda Cloud account, take the following steps: - -1. Click on the **Modeler** tab at the top of the page. -2. Open any project from your Web Modeler home view. -3. Click the blue **New** button and choose **BPMN Diagram**. -4. Right after creating your diagram, you can name it by replacing the **New Diagram** text with the name of your choice. In this case, we'll name it "Bake a Cake." - -### BPMN elements - -Before building out the diagram to bake a cake, let's examine the significance of the components on the left side of the screen. - -You can build out a BPMN diagram for a process using several elements, including the following: - -- Events: The things that happen. For example, start and end events which begin and terminate the process. -- Tasks: For example, user tasks for a particular user to complete, or service tasks to invoke various webservices. -- Gateways: For example, parallel gateways that move the process along between two tasks at the same time. -- Subprocesses: For example, a transaction subprocess which can be used to group multiple activities to a transaction. - -For a complete list of BPMN elements and their capabilities, visit the [BPMN reference material](../components/modeler/bpmn/bpmn.md). - -### BPMN in action - -Using these elements, let's build out a BPMN diagram to examine the process of baking a cake. - -Take the following steps: - -1. On our diagram, we've already been given an element as a start event in the shape of a circle. Click on the circle, and then the wrench icon to adjust this element. For now, keep it as a start event. Double click on the circle to add text. -2. Drag and drop an arrow to the first task (the rectangle shape), or click the start event, and then click the task element to automatically attach it. -3. Click on the task, then click on the wrench icon to declare it a user task, which will be named "Purchase Ingredients." Note that each element added has adjustable attributes. Use the properties panel on the right side of the page to adjust these attributes. -4. Click on the user task to connect a gateway to it. By clicking the wrench icon on the gateway and declaring it a parallel gateway, you can connect it to two tasks that can happen at the same time: mixing the ingredients, and preheating the oven. - -![baking a cake bpmn sample](./img/bake-cake-bpmn.png) - -5. Attach the next gateway once these two tasks have completed to move forward. -6. Add a user task to bake the cake, and finally a user task to ice the cake. -7. Add an end event, represented by a bold circle. -8. No need to save. Web Modeler will autosave every change you make. - -![completed bpmn diagram](./img/complete-baking-cake-bpmn.png) - -:::note -You can also import a BPMN diagram with Web Modeler. See how to do that [here](../components/modeler/web-modeler/import-diagram.md). -::: - -## Execute your process diagram - -:::note -If you change a diagram and it is autosaved, this has no effect on your cluster(s). - -When you deploy the diagram, it becomes available on the selected cluster and new instances can start. -::: - -To execute your completed process diagram, click the blue **Deploy Diagram** button. - -You can now start a new process instance to initiate your process diagram. Click the blue **Start Instance** button. - -You can now monitor your instances in [Operate](/components/operate/index.md). From your diagram, click the honeycomb icon button next to the Start Instance button, and **View Process Instances**. This will automatically take you to Camunda Operate to monitor your running instances. - -You can also visit an ongoing list of user tasks required in your BPMN diagram. Click the honeycomb icon button next to the **Start Instance** button, and **View User Tasks** to automatically be taken to [Tasklist](/components/tasklist/introduction.md). - -## Additional resources and next steps - -- [Camunda BPMN Tutorial](https://camunda.com/bpmn/) -- [BPMN Implementation Reference](https://docs.camunda.org/manual/latest/reference/bpmn20/) -- [BPMN Engine](https://camunda.com/products/camunda-platform/bpmn-engine/) -- [Model Your First Process](./getting-started/model-your-first-process.md) -- [BPMN Reference](../components/modeler/bpmn/bpmn.md) -- [Operate](/components/operate/index.md) -- [Tasklist](/components/tasklist/introduction.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/getting-started-orchestrate-human-tasks.md b/versioned_docs/version-1.3/guides/getting-started-orchestrate-human-tasks.md deleted file mode 100644 index 99a7464339b..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started-orchestrate-human-tasks.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -id: getting-started-orchestrate-human-tasks -title: Getting started with Human Task Orchestration -sidebar_label: Getting started with Human Task Orchestration -description: "Efficiently allocate work through user tasks." -keywords: [human tasks, orchestration, getting started, user guide] ---- - -Using [Camunda Cloud](./getting-started/create-camunda-cloud-account.md), you can orchestrate human tasks by assigning them to users. Then, users can enter the necessary data to drive the business process. - -When a process instance arrives at such a user task, a new job similar to a service task is created. The process instance stops at this point and waits until the job is completed. Applications like [Tasklist](../components/tasklist/introduction.md) can be used by humans to complete these tasks. - -While it may originally seem like the goal of automating a process is to remove humans entirely, efficiently allocating work through user tasks can be even more beneficial. - -In this guide, we’ll step through one way to create an automated process utilizing user tasks – all entirely executable in the browser. - -### Prerequisites - -- Ensure you have a valid [Camunda Cloud account](./getting-started/create-camunda-cloud-account.md), or sign up if you still need one. -- (Optional) Install [Camunda Desktop Modeler](../components/modeler/desktop-modeler/install-the-modeler.md). - -### Create a cluster - -import CreateCluster from './assets/react-components/create-cluster.md' - - - -### Create an automated process with user tasks - -To create an automated process with user tasks, take the following steps: - -#### Develop your automated process with user tasks - -1. Log in to your Camunda Cloud account. -2. To create a BPMN diagram, navigate to Web Modeler via the **Modeler** tab, and click **New project**. -3. Name your project and select **New > BPMN Diagram > + Create blank**. -4. Give your model a descriptive name, and then give your model a descriptive id within the **General** tab inside the properties panel on the right side of the screen. In this case, we've named our model `Preparing dinner` with an id of `preparing-dinner`. -![modeler example](./img/modeler-example.png) -5. Use Web Modeler to design a BPMN flow with user tasks. Create a user task by dragging the task icon from the palette, or click the existing start event and the displayed task icon. -6. Change the task type by clicking the wrench icon. Select **User Task**. -7. Add a descriptive name using the properties panel. In this case, we've named ours `Decide what's for dinner`. -8. Assign this task to a user or group using the properties panel. - - User tasks support specifying assignments, using the `zeebe:AssignmentDefinition` extension element. This can be used to define which user the task can be assigned to. One or both of the following attributes can be specified simultaneously: - - `assignee`: Specifies the user assigned to the task. Tasklist will claim the task for this user. - - `candidateGroups`: Specifies the groups of users that the task can be assigned to. -![user task example](./img/user-task-example.png) -9. Append a gateway to your user task by dragging it onto the dashboard from the palette on the left side of the screen, or by clicking on the user task `Decide what's for dinner` and clicking on the element you'd like to create next. In this case, we've selected the diamond icon to create a gateway. -10. Create two sequence flows (represented by the arrows) from the gateway and two new user tasks based on what the user decides to eat. In this case, we've named ours `Prepare chicken` and `Prepare salad`. - - Note that the sequence flows require [expressions](../components/concepts/expressions.md) to access variables from the form we'll create below to determine what to eat for dinner. To add an expression, click on the sequence flow to view the properties panel, and open the **Condition** tab to insert a conditional expression. -11. Attach an end event to the two user tasks. - - -
    - -#### Implement a form - -1. To add a form and decide what's for dinner, return to the **Modeler** homepage and click **New > Form**. -2. Name your form. In this case, we've named ours **Decide what's for dinner**. -3. Click and drag the **Select** element onto the palette. Give this **Select** field a description within the properties panel. We've described ours as **What's for dinner?** -4. Scroll down to the **Values** section of the properties panel to add your values. For our dinner, we've created two values: one labeled **Chicken**, and one labeled **Salad**. - -:::note -As mentioned earlier, you'll need to insert the defined variable values into the appropriate sequence flows to execute your process. In this example, our sequence flows will now have the expressions of `= chicken=true` and `= salad=true`. -::: - -#### Start and view your process instance - -1. To deploy your diagram to your cluster, click **Deploy diagram > Deploy**. -2. To officially start a new process instance, click **Start instance**. -3. Navigate to [Operate](../components/operate/index.md) by clicking the honeycomb icon next to the **Start instance** button. -4. Click **View process instances** to see your process instance alongside the green token waiting at the user task. - -![token moving through process](./img/user-task-token-1.png) - -#### Complete a user task - -Within this example, we've included a form to demonstrate the completion of a human task. To learn more about creating forms within your diagrams, visit our guide on [building forms with Modeler](./utilizing-forms.md). - -1. Go back to your Camunda Cloud diagram and select the honeycomb icon and then **View user tasks** to take a look at your user tasks inside Tasklist. -2. Select the open user task on the left panel of **Tasks**. In our example below, this is **Decide what's for dinner**. -3. Next to **Assignee**, click **Claim** to claim the task. -4. Once finished entering the appropriate information, click **Complete Task**. -![complete a human task in Tasklist](./img/user-task-tasklist.png) -5. On the left panel of **Tasks**, filter by **Completed** tasks to see your task has been finished. - -You can now navigate back to Operate and notice the process instance has continued, and the token has moved forward. - -The token moves through the exclusive gateway (also called the XOR gateway), and is used to model the decision in the process. When the execution arrives at this gateway, all outgoing sequence flows are evaluated in the order in which they have been defined. The sequence flow which condition evaluates to ‘true’ (or which doesn’t have a condition set, conceptually having a ‘true’ value defined on the sequence flow) is selected for continuing the process. - -In this case, the token will move through the gateway and (according to the conditional expressions we outlined earlier) to the selected dinner based on the **Decide what's for dinner** user task we completed. If we select **Chicken**, the token moves forward to **Prepare chicken**. If we select **Salad**, the token moves forward to **Prepare salad**. - -### Additional resources and next steps - -- [BPMN user tasks](../components/modeler/bpmn/user-tasks/user-tasks.md) -- [Building Forms with Modeler](./utilizing-forms.md) -- [Introduction to Operate](../components/operate/index.md) -- [Introduction to Tasklist](../components/tasklist/introduction.md) -- [Intermediate Modeler example](https://github.com/NPDeehan/Whos50GameCamundaCloud) diff --git a/versioned_docs/version-1.3/guides/getting-started-orchestrate-microservices.md b/versioned_docs/version-1.3/guides/getting-started-orchestrate-microservices.md deleted file mode 100644 index 49ef5b734d8..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started-orchestrate-microservices.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: getting-started-orchestrate-microservices -title: Getting started with Microservice Orchestration -sidebar_label: Getting started with Microservice Orchestration -description: "Orchestrate Microservices along a business process for visibility and resilience." -keywords: [microservices, orchestration, getting-started] ---- - -Using Camunda Cloud, you can orchestrate the microservices necessary to achieve your end-to-end automated business process. Whether you have existing microservices or are looking to build out your microservices, this guide will help you understand how you can start your microservice orchestration journey with Camunda Cloud. - -While this guide uses code snippets in Java, you do not need to be a Java developer to be successful. Additionally, you can orchestrate microservices with Camunda Cloud in other programming languages. - -## Prerequisites - -* Valid Camunda Cloud account or [sign up](https://accounts.cloud.camunda.io/signup?utm_source=docs.camunda.io&utm_medium=referral) if you still need one -* Java >= 8 -* Maven -* IDE (IntelliJ, VSCode, or similar) -* Download and unzip or clone the [repo](https://github.com/camunda-cloud/camunda-cloud-tutorials), then `cd` into `camunda-cloud-tutorials/orchestrate-microservices/worker-java` - -### Create a cluster - -import CreateCluster from './assets/react-components/create-cluster.md' - - - -### Design your process with BPMN - -Start by designing your automated process using BPMN. This guide introduces you to the palette and a few BPMN symbols in Web Modeler. - -1. Navigate to the **Modeler** tab at the top of the page. This opens Web Modeler to your **Projects** page in a separate browser tab. -2. Create a new project by clicking the blue **New project** button. Give your project a descriptive name. -3. Click the blue **New** button and select **BPMN Diagram**. A modal will appear to select a template, click **+ Create blank**. -4. Give your model a descriptive name and id. On the right side of the page, expand the **General** section of the properties panel to find the name and id fields. For this guide, we'll use **Microservice Orchestration Tutorial** for the name and **microservice-orchestration-tutorial** for the id. -5. Use Web Modeler to design a BPMN process with service tasks. These service tasks are used to call your microservices via workers. Create a service task by dragging the task icon from the palette, or by clicking the existing start event and clicking the task icon. Make sure there is an arrow connecting the start event to the task. Click the wrench icon and select **Service Task** to change the task type. - -![Task with dropdown showing config, including service task](./img/microservice-orchestration-config-service-task.png) - -1. Add a descriptive name using the properties panel. For this guide, we'll use **Microservice Example**. Since you previously opened the **General** section of the properties panel, it is likely still open when working with your service task configuration. -2. In the properties panel, expand the **Task definition** section and use the **Type** field to enter a string used in connecting this service task to the corresponding microservice code. For this guide, we'll use **orchestrate-something** as the type. You will use this while [creating a worker for the service task](#create-a-worker-for-the-service-task). If you do not have an option to add the **Type**, use the wrench icon and select **Service Task**. - -![Service task with properties panel open](./img/microservice-orchestration-service-task.png) - -6. Add an end event by dragging one from the palette, or by clicking the end event when the last service task in your diagram has focus. Make sure there is an arrow connecting the service task to the end event. -7. On the right upper corner click the blue **Deploy diagram** button. Your diagram is now deployed to your cluster. -8. Start a new process instance by clicking on the blue **Start instance** button. -9. To the right of the two blue buttons, click the Application icon (honeycomb icon) button next to the **Start instance** button. Navigate to Operate to see your process instance with a token waiting at the service task by clicking **View process instances**. - -### Create credentials for your Zeebe client - -To interact with your Camunda Cloud cluster, you'll use the Zeebe client. First, you'll need to create credentials. - -1. The main page for Camunda Cloud Console should be open on another tab. Use Camunda Cloud Console to navigate to your clusters either through the navigation **Clusters** or by using the section under **View all** on the **Clusters** section of the main dashboard. Click on your existing cluster. This will open the **Overview** for your cluster, where you can find your cluster id and region. You will need this information later when creating a worker in the next section. - -:::note - -If your account is new, you should have a cluster already available. If no cluster is available, or you’d like to create a new one, click **Create New Cluster**. - -::: - -2. Navigate to the **API** tab. Click **Create**. -3. Provide a descriptive name for your client like `microservice-worker`. For this tutorial, the scope can be the default Zeebe scope. Click **Create**. -4. Your client credentials can be copied or downloaded at this point. You will need your client id and your client secret when creating a worker in the next section, so keep this window open. Once you close or navigate away from this screen, you will not be able to see them again. - -### Create a worker for the service task - -Next, we’ll create a worker for the service task by associating it with the type we specified on the service task in the BPMN diagram. - -1. Open the downloaded or cloned project ([repo](https://github.com/camunda-cloud/camunda-cloud-tutorials), then `cd` into `camunda-cloud-tutorials/orchestrate-microservices/worker-java`) in your IDE . -2. Add your credentials to `application.properties`. Your client id and client secret are available from the previous section in the credential text file you downloaded or copied. Go to the cluster overview page to find your cluster id and region. -3. In the `Worker.java` file, change the type to match what you specified in the BPMN diagram. If you followed the previous steps for this guide and entered “orchestrate-something”, no action is required. -4. After making these changes, perform a Maven install, then run the Worker.java `main` method via your favorite IDE. If you prefer using a terminal, run `mvn package exec:java`. -5. Using the Modeler tab in your browser, navigate to Operate and you will see your token has moved to the end event, completing this process instance. - -Congratulations! You successfully built your first microservice orchestration solution with Camunda Cloud. - -## Next steps - -* Learn more about Camunda Cloud and what it can do by reading [What is Camunda Cloud?](../../components/concepts/what-is-camunda-cloud/). -* Get your local environment ready for development with Camunda Cloud by [setting up your first development project](../setting-up-development-project). diff --git a/versioned_docs/version-1.3/guides/getting-started/bpmn/gettingstarted_quickstart.bpmn b/versioned_docs/version-1.3/guides/getting-started/bpmn/gettingstarted_quickstart.bpmn deleted file mode 100644 index d1654abd1bd..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/bpmn/gettingstarted_quickstart.bpmn +++ /dev/null @@ -1,26 +0,0 @@ - - - - - SequenceFlow_1jbw0ni - - - - SequenceFlow_1jbw0ni - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/guides/getting-started/bpmn/gettingstarted_quickstart_advanced.bpmn b/versioned_docs/version-1.3/guides/getting-started/bpmn/gettingstarted_quickstart_advanced.bpmn deleted file mode 100644 index 18d4ac2573c..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/bpmn/gettingstarted_quickstart_advanced.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - Flow_15yg3k5 - - - - - - - Flow_15yg3k5 - Flow_13k1knz - - - Flow_13k1knz - Flow_0qhnfdq - Flow_1vlnqoi - - - - Flow_0qhnfdq - - - =return="Pong" - - - Flow_1vlnqoi - - - =return!="Pong" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/guides/getting-started/connect-to-your-cluster.md b/versioned_docs/version-1.3/guides/getting-started/connect-to-your-cluster.md deleted file mode 100644 index a93d33e4558..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/connect-to-your-cluster.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: connect-to-your-cluster -title: Connect to your cluster -description: "Let's learn more about installing and communicating with clusters." ---- -Beginner -Time estimate: 5 minutes - -## Prerequisites - -- [Camunda Cloud account](create-camunda-cloud-account.md) -- [Download and install Node.js and npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) -- Install the appropriate package: - -```bash -npm i -g zbctl -``` - -## First connection - -After creating a client and downloading the connection file, you will now need to source it to make it available in your environment. If these are known to the system, a client can communicate directly with its own cluster in the cloud without further configuration. - -```bash -source ~/Downloads/CamundaCloudMgmtAPI-Client-test-client.txt -``` - -```bash -zbctl status -``` - -As a result, you will get a similar response: - -```bash -Cluster size: 1 -Partitions count: 2 -Replication factor: 1 -Gateway version: unavailable -Brokers: - Broker 0 - zeebe-0.zeebe-broker-service.456637ef-8832-428b-a2a4-82b531b25635-zeebe.svc.cluster.local:26501 - Version: unavailable - Partition 1 : Leader - Partition 2 : Leader -``` - -## Next steps - -- [Model your first process](model-your-first-process.md) diff --git a/versioned_docs/version-1.3/guides/getting-started/create-camunda-cloud-account.md b/versioned_docs/version-1.3/guides/getting-started/create-camunda-cloud-account.md deleted file mode 100644 index 8dd365266b5..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/create-camunda-cloud-account.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: create-camunda-cloud-account -title: Sign up & log in -slug: /guides/getting-started/ -description: "Set up your Camunda Cloud account to get started." ---- - -Beginner -Time estimate: 5 minutes - -## Sign up for Camunda Cloud - -Create a Camunda Cloud account so you can create clusters, deploy processes, and create a new instance. - -### Visit [camunda.io/signup](https://accounts.cloud.camunda.io/signup?utm_source=docs.camunda.io&utm_medium=referral) - -Visit [camunda.io/signup](https://accounts.cloud.camunda.io/signup?utm_source=docs.camunda.io&utm_medium=referral) and view the **Sign Up** screen: - -![signup](./img/signup.png) - -### Create an account - -Fill out the form and submit, or sign up using the social sign up buttons like Google or GitHub. - -If you fill out the form, you'll receive a confirmation email. Click on the link to verify your email address and set your password. - -If you choose to create an account through the social sign up buttons, you'll be redirected to Console directly. - -## Log in to your Camunda Cloud account - -### Visit [https://camunda.io](https://camunda.io) - -Log in with the email address and password you used in the previous form, or use the social login buttons. - -![login](./img/login.png) - -After login, you'll see the console overview page. This is the central place to manage your clusters, and the diagrams and forms you want to deploy to Camunda Cloud. - -![overview-home](./img/home.png) - -## Next steps - -- [Create your Cluster](create-your-cluster) diff --git a/versioned_docs/version-1.3/guides/getting-started/create-your-cluster.md b/versioned_docs/version-1.3/guides/getting-started/create-your-cluster.md deleted file mode 100644 index 1a818d56be8..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/create-your-cluster.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -id: create-your-cluster -title: Create your cluster -description: "Learn how to create a cluster in Camunda to get started building, running, and deploying processes." ---- - -Beginner -Time estimate: 5 minutes - -import CreateCluster from '../../components/cloud-console/manage-clusters/create-cluster-include.md' - - diff --git a/versioned_docs/version-1.3/guides/getting-started/deploy-your-process-and-start-process-instance.md b/versioned_docs/version-1.3/guides/getting-started/deploy-your-process-and-start-process-instance.md deleted file mode 100644 index fd79199bba1..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/deploy-your-process-and-start-process-instance.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -id: deploy-your-process-and-start-process-instance -title: Deploy and start your process instance -description: "Deploy and start your process instance." ---- -Beginner -Time estimate: 5 minutes - -## Prerequisites - -- Web Modeler or [Desktop Modeler](https://camunda.com/download/modeler/) - -:::note - -BPMN diagrams must be created for the process engine they intend to be deployed on. You cannot currently run a BPMN diagram modeled for Camunda Platform in Camunda Cloud, or vice versa. -::: - -## Deploy and start your process instance - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -You can click the blue **Deploy Diagram** button to deploy the newly-created process to your cluster. - -![web-modeler-deploy](../../components/modeler/web-modeler/img/save-and-deploy.png) - -Deployment can take a few seconds, but you should get a confirmation for successful deployment. - -![web-modeler-deploy-successfull](../../components/modeler/web-modeler/img/save-and-deploy-successful.png) - -You can now start a new process instance. For this example, you can start an instance with an empty payload. - -![web-modeler-start-instance](../../components/modeler/web-modeler/img/start-process-instance-variables.png) - -Once the instance is started, you'll receive a confirmation with a link to open Operate. - -![web-modeler-start-instance-done](../../components/modeler/web-modeler/img/start-process-instance-done.png) - - - - - -Take the following steps: - -1. On the right side of the navigation menu, note the buttons for deploying and starting processes. - -![zeebe-modeler-deploy](./img/zeebe-modeler-deploy.png) - -2. In the deployment dialog, the connection information must now be specified: `Cluster Id`, `Client Id`, and `Client Secret`. - -`Client Id` and `Cluster Id` can be retrieved by clicking on **View** on the client in the **API** tab. - -![cluster-details-created-client-view](./img/cluster-details-created-client-view.png) - -The `Client Secret` can be retrieved from the downloaded connection file: - -```bash -grep SECRET ~/Downloads/CamundaCloudMgmtAPI-Client-test-client.txt -export ZEEBE_CLIENT_SECRET='zbzsZI_6UnCsH_CIo0lNUN8qGyvLJr9VrH77ewNm8Oq3elvhPvV7g.QmJGydzOLo' -``` - -3. Click **Deploy** to deploy the process. Use **Play** from the navigation to start a new instance. - - - - -## Next steps - -- [Implement a service task](implement-service-task.md) -- [Setting up your first development project](./../setting-up-development-project.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/getting-started/img/cluster-detail-clients.png b/versioned_docs/version-1.3/guides/getting-started/img/cluster-detail-clients.png deleted file mode 100644 index 9051bc3eaba..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/cluster-detail-clients.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-create-client.png b/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-create-client.png deleted file mode 100644 index 2c394b1fb59..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-create-client.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-created-client-view.png b/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-created-client-view.png deleted file mode 100644 index 62ddd6d9cef..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-created-client-view.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-created-client.png b/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-created-client.png deleted file mode 100644 index 407aaef5fa6..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/cluster-details-created-client.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/home.png b/versioned_docs/version-1.3/guides/getting-started/img/home.png deleted file mode 100644 index 4cebb7880a7..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/home.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/login.png b/versioned_docs/version-1.3/guides/getting-started/img/login.png deleted file mode 100644 index b73d4fe9e94..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/login.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances-other.png b/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances-other.png deleted file mode 100644 index 9d721db703f..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances-other.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances-pong.png b/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances-pong.png deleted file mode 100644 index 7f7b13fd6a9..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances-pong.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances.png b/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances.png deleted file mode 100644 index 074e085cd4f..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/operate-advanced-instances.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/operate-dashboard.png b/versioned_docs/version-1.3/guides/getting-started/img/operate-dashboard.png deleted file mode 100644 index 0ffa4378fb8..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/operate-dashboard.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/signup.png b/versioned_docs/version-1.3/guides/getting-started/img/signup.png deleted file mode 100644 index fefa51ccd47..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/signup.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced-process-id.png b/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced-process-id.png deleted file mode 100644 index 6d167dafbf5..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced-process-id.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced-sequence-flows.png b/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced-sequence-flows.png deleted file mode 100644 index 2a1a1a19ed6..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced-sequence-flows.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced.png b/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced.png deleted file mode 100644 index 48918f8685d..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/web-modeler-advanced.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced-process-id.png b/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced-process-id.png deleted file mode 100644 index 04b3361b875..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced-process-id.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced-sequence-flows.png b/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced-sequence-flows.png deleted file mode 100644 index c4b14fcb83e..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced-sequence-flows.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced.png b/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced.png deleted file mode 100644 index b9b1b23c103..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-advanced.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-deploy.png b/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-deploy.png deleted file mode 100644 index 57eafb89b3a..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler-deploy.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler.png b/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler.png deleted file mode 100644 index 72168e7bc97..00000000000 Binary files a/versioned_docs/version-1.3/guides/getting-started/img/zeebe-modeler.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/getting-started/implement-decision-gateway.md b/versioned_docs/version-1.3/guides/getting-started/implement-decision-gateway.md deleted file mode 100644 index 292d58b6b4a..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/implement-decision-gateway.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: implement-decision-gateway -title: Implement a decision gateway -description: "Using the JSON object, let's route your process." ---- -Beginner -Time estimate: 5 minutes - -## Prerequisites - -- Web Modeler or [Desktop Modeler](https://camunda.com/download/modeler/) - -## Implement a decision gateway - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -In [the last section](./implement-service-task.md), we connected a worker that will return a JSON object, which is used to decide which path to take in the process. Now, we can use the JSON object to route your process by filling in the condition expression on the two sequence flows after the XOR gateway. - -Use the following conditional expression for the "Pong" sequence flow: - -```bash -= return = "Pong" -``` - -And for the else sequence flow: - -```bash -= return != "Pong" -``` - - - - - -![sequenceflows-cloud](./img/web-modeler-advanced-sequence-flows.png) - - - - - -![sequenceflows](./img/zeebe-modeler-advanced-sequence-flows.png) - - - - -Deploy the updated process again so the specified decisions in the gateway are used. - -## Next steps - -- [Monitor your process in Operate](monitor-your-process-in-operate.md) -- [Camunda Operate](/self-managed/operate-deployment/install-and-start.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/getting-started/implement-service-task.md b/versioned_docs/version-1.3/guides/getting-started/implement-service-task.md deleted file mode 100644 index c3304cc52bc..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/implement-service-task.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: implement-service-task -title: Implement a service task -description: "Let's implement a service task to connect workers." ---- -Beginner -Time estimate: 8 minutes - -## Prerequisites - -- Web Modeler or [Desktop Modeler](https://camunda.com/download/modeler/) - -## Implement a service task - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Use [this process model](./bpmn/gettingstarted_quickstart_advanced.bpmn) for the tutorial. - - - - - -Take the following steps: - -1. In a diagram, open the breadcrumb menu and choose **Replace via Upload**. Then, select a file from your computer. -![import diagram via replace](../../components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png) - -Alternatively, you can drag and drop the file onto the canvas. -![import diagram via drag and drop](../../components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png) -![processId-cloud](./img/web-modeler-advanced-process-id.png) - -This process includes a service task and an XOR gateway. - -2. Select the service task and fill in the properties. - -3. Set the **Type** to `test-worker`. - -![process-cloud](./img/web-modeler-advanced.png) - -4. Deploy the new process using the **Deploy Diagram** button. - -5. Make sure you have [created a client](./setup-client-connection-credentials.md) and [connected a cluster](connect-to-your-cluster.md). - - - - - -![processId](./img/zeebe-modeler-advanced-process-id.png) - -1. This process includes a service task and an XOR gateway. Select the service task and fill in the properties. -2. Set the **Type** to `test-worker`. - -![process](./img/zeebe-modeler-advanced.png) - -3. Deploy the new process. - - - - -Now, you can connect a worker for the configured service task: - -```bash -zbctl create worker test-worker --handler "echo {\"return\":\"Pong\"}" -``` - -## Next steps - -- [Implement a decision gateway](implement-decision-gateway.md) -- [Getting Started with Microservice Orchestration](./../getting-started-orchestrate-microservices.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/getting-started/involve-humans.md b/versioned_docs/version-1.3/guides/getting-started/involve-humans.md deleted file mode 100644 index 03a25fb48df..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/involve-humans.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: involve-humans -title: Involve Humans ---- - -Stay tuned, this page is currently in progress. - -___TODO: waiting for Tasklist___ diff --git a/versioned_docs/version-1.3/guides/getting-started/model-your-first-process.md b/versioned_docs/version-1.3/guides/getting-started/model-your-first-process.md deleted file mode 100644 index bc1ff239911..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/model-your-first-process.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: model-your-first-process -title: Model your first process -description: "Use Modeler to design and deploy a process." ---- - -Beginner -Time estimate: 10 minutes - -## Prerequisites - -- Web Modeler or [Desktop Modeler](https://camunda.com/download/modeler/) - -## Design and deploy a process - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -In Camunda Cloud, you have two options to design and deploy a process: Web Modeler and [Desktop Modeler](https://camunda.com/download/modeler/). - - - - - -Using Web Modeler, processes are designed and deployed, and new instances are created directly from the console. Take the following steps: - -1. Go to the **Modeler** tab at the top of the page. -2. Open any project from your Web Modeler home view. -3. Click the blue **New** button and choose **BPMN Diagram**. - -![console-modeler](../../components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png) - -3. Add an **EndEvent** to create a valid BPMN diagram. - -![console-modeler-new-diagram-with-end-event](../../components/modeler/web-modeler/img/web-modeler-with-end-event.png) -![console-modeler-new-diagram-with-end-event](../../components/modeler/web-modeler/img/web-modeler-with-end-event2.png) - -4. No need to save. Web Modeler will autosave every change you make - - - - - -Using Desktop Modeler, processes are designed and deployed, and new instances are created. - -Design a simple process with one start event and one end event, or download this [BPMN model](./bpmn/gettingstarted_quickstart.bpmn). - -![zeebe-modeler](./img/zeebe-modeler.png) - - - -## Next steps - -- [Deploy and start your process instance](deploy-your-process-and-start-process-instance.md) diff --git a/versioned_docs/version-1.3/guides/getting-started/monitor-your-process-in-operate.md b/versioned_docs/version-1.3/guides/getting-started/monitor-your-process-in-operate.md deleted file mode 100644 index 4d54564251a..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/monitor-your-process-in-operate.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: monitor-your-process-in-operate -title: Monitor your process in Operate -description: "Camunda Cloud offers Operate to monitor your process instances." ---- -Beginner -Time estimate: 8 minutes - -## Prerequisites - -- [Camunda Operate](/self-managed/operate-deployment/install-and-start.md) - -## Monitor your process - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Camunda Cloud offers Operate to monitor your process instances. - -:::note -Find an entry point in the cluster details. -::: - -![operate-dashboard](./img/operate-dashboard.png) - -By selecting the deployed process, you will see a list of instances that can be filtered: - -![operate-instances](./img/operate-advanced-instances-pong.png) - -Because [the started worker](./implement-service-task.md) returns the following, the process ends in the upper end event following the Ping sequence flow: - -```json -{ - "return": "Pong" -} -``` - -Changing the worker to the following and creating a new instance leads to a second instance in Operate: - -```bash -zbctl create worker test-worker --handler "echo {\"return\":\"...\"}" -``` - -You'll see this ending in the second end event following the else sequence flow: - -![operate-instance](./img/operate-advanced-instances-other.png) - -As a next step, you can now connect both workers in parallel and create more process instances: - -```bash -while true; do zbctl create instance camunda-cloud-quick-start-advanced; sleep 1; done -``` - -In Operate, you'll see instances ending in both end events depending on which worker picked up the job. - -![operate-instances](./img/operate-advanced-instances.png) - -## Next steps - -- [Get familiar with Operate](/components/operate/userguide/basic-operate-navigation.md) -- [Setting up your first development project](./../setting-up-development-project.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/getting-started/setup-client-connection-credentials.md b/versioned_docs/version-1.3/guides/getting-started/setup-client-connection-credentials.md deleted file mode 100644 index acc97e412a8..00000000000 --- a/versioned_docs/version-1.3/guides/getting-started/setup-client-connection-credentials.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: setup-client-connection-credentials -title: Setup client connection credentials -description: "Set up client connection credentials to create, name, and connect your client." ---- -Beginner -Time estimate: 5 minutes - -## Prerequisites - -- [Camunda Cloud account](create-camunda-cloud-account.md) - -## Set up client connection credentials - -Here, we'll set up client connection credentials to create, name, and connect your client. - -To create a new client, take the following steps: - -1. Navigate to the API tab: - -![cluster-details](./img/cluster-detail-clients.png) - -2. Click **Create New Client** to create a new client and name your client accordingly. - -3. Select **Zeebe** so the newly-created client can access your Zeebe instance. - -![create-client](./img/cluster-details-create-client.png) - -4. Make sure you keep the generated client credentials in a safe place. The **Client Secret** will not be shown again. For your convenience, you can also download the client information to your computer. - -![created-client](./img/cluster-details-created-client.png) - -The downloaded file contains all the necessary information to communicate with your Zeebe instance in the future: - -- `ZEEBE_ADDRESS`: Address where your cluster can be reached. -- `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET`: Credentials to request a new access token. -- `ZEEBE_AUTHORIZATION_SERVER_URL`: A new token can be requested at this address, using the credentials. - -## Next steps - -- [Connect to your cluster](connect-to-your-cluster.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/img/MigrationGuidePics.pptx b/versioned_docs/version-1.3/guides/img/MigrationGuidePics.pptx deleted file mode 100644 index a88c383b29a..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/MigrationGuidePics.pptx and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/architecture-container-managed.png b/versioned_docs/version-1.3/guides/img/architecture-container-managed.png deleted file mode 100644 index 1e726c73390..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/architecture-container-managed.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/architecture-polyglot.png b/versioned_docs/version-1.3/guides/img/architecture-polyglot.png deleted file mode 100644 index f90c8ad050f..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/architecture-polyglot.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/architecture-spring-boot.png b/versioned_docs/version-1.3/guides/img/architecture-spring-boot.png deleted file mode 100644 index e82263b52d0..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/architecture-spring-boot.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/automate-any-process-anywhere.png b/versioned_docs/version-1.3/guides/img/automate-any-process-anywhere.png deleted file mode 100644 index dacc614fa29..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/automate-any-process-anywhere.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/bake-cake-bpmn.png b/versioned_docs/version-1.3/guides/img/bake-cake-bpmn.png deleted file mode 100644 index 2793a2602e7..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/bake-cake-bpmn.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/camunda7-vs-camunda-cloud-deployment-view.png b/versioned_docs/version-1.3/guides/img/camunda7-vs-camunda-cloud-deployment-view.png deleted file mode 100644 index 55926705a51..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/camunda7-vs-camunda-cloud-deployment-view.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/complete-baking-cake-bpmn.png b/versioned_docs/version-1.3/guides/img/complete-baking-cake-bpmn.png deleted file mode 100644 index e66a62a1a44..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/complete-baking-cake-bpmn.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/form-email-example.png b/versioned_docs/version-1.3/guides/img/form-email-example.png deleted file mode 100644 index b5643cdd93e..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/form-email-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/form-palette.png b/versioned_docs/version-1.3/guides/img/form-palette.png deleted file mode 100644 index c0b64ffbaa3..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/form-palette.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/form-properties-email.png b/versioned_docs/version-1.3/guides/img/form-properties-email.png deleted file mode 100644 index 4b4469fd258..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/form-properties-email.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/gateway-example-dinner.png b/versioned_docs/version-1.3/guides/img/gateway-example-dinner.png deleted file mode 100644 index 755d8bf492e..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/gateway-example-dinner.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/getting-started-aspnet-thumbnail.png b/versioned_docs/version-1.3/guides/img/getting-started-aspnet-thumbnail.png deleted file mode 100644 index 57b534b6206..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/getting-started-aspnet-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/getting-started-go-thumbnail.png b/versioned_docs/version-1.3/guides/img/getting-started-go-thumbnail.png deleted file mode 100644 index c09f5f6c93d..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/getting-started-go-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/getting-started-java-thumbnail.png b/versioned_docs/version-1.3/guides/img/getting-started-java-thumbnail.png deleted file mode 100644 index 773bbe092be..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/getting-started-java-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/getting-started-kotlin-thumbnail.png b/versioned_docs/version-1.3/guides/img/getting-started-kotlin-thumbnail.png deleted file mode 100644 index 26e2838e5b8..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/getting-started-kotlin-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/getting-started-node-thumbnail.jpg b/versioned_docs/version-1.3/guides/img/getting-started-node-thumbnail.jpg deleted file mode 100644 index 86cbc33ca10..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/getting-started-node-thumbnail.jpg and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/gettingstarted_first-model.png b/versioned_docs/version-1.3/guides/img/gettingstarted_first-model.png deleted file mode 100644 index c1132a923fb..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/gettingstarted_first-model.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart.bpmn b/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart.bpmn deleted file mode 100644 index 8f6f1a9e4ca..00000000000 --- a/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart.bpmn +++ /dev/null @@ -1,26 +0,0 @@ - - - - - SequenceFlow_1jbw0ni - - - - SequenceFlow_1jbw0ni - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart.png b/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart.png deleted file mode 100644 index 1471c14fa2b..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart_advanced.bpmn b/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart_advanced.bpmn deleted file mode 100644 index e65cc2f2890..00000000000 --- a/versioned_docs/version-1.3/guides/img/gettingstarted_quickstart_advanced.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - Flow_15yg3k5 - - - - - - - Flow_15yg3k5 - Flow_13k1knz - - - Flow_13k1knz - Flow_0qhnfdq - Flow_1vlnqoi - - - - Flow_0qhnfdq - - - =return="Pong" - - - Flow_1vlnqoi - - - =return!="Pong" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/guides/img/gettingstarted_second-model.png b/versioned_docs/version-1.3/guides/img/gettingstarted_second-model.png deleted file mode 100644 index 536ebdae8ee..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/gettingstarted_second-model.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/human-task-tasklist.png b/versioned_docs/version-1.3/guides/img/human-task-tasklist.png deleted file mode 100644 index a2213f3462d..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/human-task-tasklist.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-buffered.png b/versioned_docs/version-1.3/guides/img/message-correlation-buffered.png deleted file mode 100644 index 3578d2b9a16..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-buffered.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-completed.png b/versioned_docs/version-1.3/guides/img/message-correlation-completed.png deleted file mode 100644 index 49ade20d935..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-completed.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-correlated.png b/versioned_docs/version-1.3/guides/img/message-correlation-correlated.png deleted file mode 100644 index 30f545ec65f..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-correlated.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-like-this.png b/versioned_docs/version-1.3/guides/img/message-correlation-like-this.png deleted file mode 100644 index 32dc37aec03..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-like-this.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-message-properties.png b/versioned_docs/version-1.3/guides/img/message-correlation-message-properties.png deleted file mode 100644 index 326d0eb1e0a..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-message-properties.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-message-subscriptions.png b/versioned_docs/version-1.3/guides/img/message-correlation-message-subscriptions.png deleted file mode 100644 index 902e6197d14..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-message-subscriptions.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-not-like-this.png b/versioned_docs/version-1.3/guides/img/message-correlation-not-like-this.png deleted file mode 100644 index 4c4bd44f869..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-not-like-this.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-variables.png b/versioned_docs/version-1.3/guides/img/message-correlation-variables.png deleted file mode 100644 index 273dd66cea6..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-variables.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-wait-on-message.png b/versioned_docs/version-1.3/guides/img/message-correlation-wait-on-message.png deleted file mode 100644 index 05b8da33f6c..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-wait-on-message.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-workflow-state.png b/versioned_docs/version-1.3/guides/img/message-correlation-workflow-state.png deleted file mode 100644 index 59ccf786277..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-workflow-state.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/message-correlation-workflow.png b/versioned_docs/version-1.3/guides/img/message-correlation-workflow.png deleted file mode 100644 index 0edbc8a1d11..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/message-correlation-workflow.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/microservice-orchestration-config-service-task.png b/versioned_docs/version-1.3/guides/img/microservice-orchestration-config-service-task.png deleted file mode 100644 index 438e221c576..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/microservice-orchestration-config-service-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/microservice-orchestration-service-task.png b/versioned_docs/version-1.3/guides/img/microservice-orchestration-service-task.png deleted file mode 100644 index a4093d06e68..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/microservice-orchestration-service-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/migration-service-task.png b/versioned_docs/version-1.3/guides/img/migration-service-task.png deleted file mode 100644 index ab14e27612f..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/migration-service-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/modeler-example.png b/versioned_docs/version-1.3/guides/img/modeler-example.png deleted file mode 100644 index cedbc85dd5d..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/modeler-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/process-solution-packaging.png b/versioned_docs/version-1.3/guides/img/process-solution-packaging.png deleted file mode 100644 index 8bc52c065e3..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/process-solution-packaging.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/simple-bpmn-process.png b/versioned_docs/version-1.3/guides/img/simple-bpmn-process.png deleted file mode 100644 index a58acc8142a..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/simple-bpmn-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/update-guide-100-to-110-copy-region.png b/versioned_docs/version-1.3/guides/img/update-guide-100-to-110-copy-region.png deleted file mode 100644 index b2c52dde530..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/update-guide-100-to-110-copy-region.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/user-task-example.png b/versioned_docs/version-1.3/guides/img/user-task-example.png deleted file mode 100644 index f3734e6bbab..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/user-task-example.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/user-task-tasklist.png b/versioned_docs/version-1.3/guides/img/user-task-tasklist.png deleted file mode 100644 index d649a6b2149..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/user-task-tasklist.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/img/user-task-token-1.png b/versioned_docs/version-1.3/guides/img/user-task-token-1.png deleted file mode 100644 index abd186204a4..00000000000 Binary files a/versioned_docs/version-1.3/guides/img/user-task-token-1.png and /dev/null differ diff --git a/versioned_docs/version-1.3/guides/improve-processes-with-optimize.md b/versioned_docs/version-1.3/guides/improve-processes-with-optimize.md deleted file mode 100644 index c70d5f97828..00000000000 --- a/versioned_docs/version-1.3/guides/improve-processes-with-optimize.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -id: improve-processes-with-optimize -title: Improve processes with Optimize -sidebar_label: Improve processes with Optimize -description: "The following document provides a basic end-to-end glance into Optimize and its features for new business users." ---- - -Beginner -Time estimate: 20 minutes - -## Purpose - -The following document provides a basic end-to-end glance into Optimize and its features for new business users. - -Optimize offers business intelligence tooling for Camunda customers. By leveraging data collected during process execution, you can access reports, share process intelligence, analyze bottlenecks, and examine areas in business processes for improvement. - -With Optimize, review heatmap displays for instances which took longer than average to discover long-running flow nodes. As a result, reap actionable insights and rapidly identify the constraints of your system. - -See an in-depth overview of Optimize’s capabilities [here](https://docs.camunda.org/optimize/latest/). - -## Set up - -Within Camunda Cloud, you can launch Optimize from the Cloud Console — the interface where you can create clusters, and launch both Operate and Tasklist. Therefore, ensure you’ve [created a Camunda Cloud account](./getting-started/create-camunda-cloud-account.md), [set up client connection credentials](./getting-started/setup-client-connection-credentials.md), and [connected to your cluster](./getting-started/connect-to-your-cluster.md) before getting started with Optimize for SaaS users. - -:::note -So long as you are operating with [Camunda Cloud 1.2+](https://camunda.com/blog/2021/10/camunda-cloud-1-2-0-released/) when creating a cluster, you can access Optimize. From here, Optimize requires no additional set up. You can immediately obtain process insights as Optimize already continuously collects data for analysis. -::: - -Once you’ve created a cluster, take the following steps inside Cloud Console to access Optimize: - -1. Click the **Clusters** tab and select the cluster you’d like to analyze. -2. Click the **Applications** tab. -3. Select the **Optimize** box at the bottom of the page to launch Optimize. - -You can begin analyzing reports and dashboards with just two process versions. However, the more process versions you work with in Optimize, the more performance attributes and data trends you’ll be able to study. For the purposes of this guide, we’ve preconfigured several processes to demonstrate Optimize’s capabilities. - -## Create and analyze dashboards - -Within Optimize, **reports** are based on a *single* visualization, similar to a single chart or graph. **Dashboards** are aggregations of these visualizations, similar to a full spreadsheet of data collections, or a combination of several comparative charts and graphs. **Collections** are groups of these data sets, similar to project folders for organizational purposes where we can nest a series of dashboards and/or reports within. - -Once you open Optimize, you’ll first view the homepage for these collections, dashboards, and reports. - -To create a collection on the **Home** page, select **Create New > New Collection**. Then, you can name your collection and select which data sources and processes will be available. Note that you can select up to 10 processes at once. - -From within your collection, you can again select **Create New** and draft reports and dashboards. Add users and additional data sources by navigating between the tabs inside the collection. - -Let’s create a dashboard inside our first collection. Take the following steps: - -1. Return to the **Home** page to view a list of existing collections, dashboards, and reports. You’ll be able to view all process instances you’ve already run and retrieve additional data on these instances within the Camunda engine. -2. Select the collection where you’d like to create a dashboard. -3. Click **Create New > New Dashboard**. -4. Optimize offers preconfigured dashboard templates, or you can start from a blank dashboard. In this example, we’ll select a preconfigured template by clicking the **Process performance overview** option. Note that you can also create dashboards with multi-process templates. -5. Under **Select Process**, choose the process you’d like to analyze and the version. -6. Click **Create Dashboard**. -7. Name your dashboard, and add any additional existing reports or create filters. Click **Save**. - -![dashboard example](./assets/dashboard.png) - -In the sample above, Optimize drafted a dashboard filled with reports for review. These reports include objectives like process instance counts, aggregated process duration, active incidents, and heatmaps. - -Select **Edit > Add a Report** to incorporate additional reports you’ve already created (see [create and access reports](#create-and-access-reports) below). Click and drag the reports on the grid to arrange the dashboard to your liking. - -Click the **Share** tab to share your dashboard. Toggle to **Enable sharing**, and copy or embed the provided link. Colleagues without access to Optimize can still view your report with the shared link. - -## Create and access reports - -To create a custom report based on a key performance indicator (KPI) you’d like to analyze, and to incorporate this report into a dashboard, follow the steps below: - -1. On the right side of the **Home** page, select **Create New > New Report**. Here we’ll take a look at a single process, though you can also view data from multiple processes. -2. Click the text box under **Select Process** and select the process you’d like to analyze. -3. Select the type of report you’d like to use on the right side of the **Create new Report** box. As with dashboards, Optimize offers preconfigured templates such as heatmaps and tables. We’ll begin with a heatmap. -4. Click **Create Report**. -5. Set up and customize your report. Begin by naming your report in the text box at the top of the page, pre-filled with **New Report**. -6. In the gray text box to the right, confirm your data source, and select what you’d like to review from the process (in this case, we are viewing flow nodes.) You can also group by topics such as duration or start date. -7. If you’d like, filter the process instance or flow nodes. For example, you can filter by duration, only viewing process instances running for more than seven days. -8. Finally, you have the option to view particular sets of data from the instance, like instance count or absolute value, by selecting the gear icon to the left of your data customization. You can also choose how you’d like to visualize your data in the box beneath **Visualization** (i.e. bar chart, pie chart, etc.). Once you’ve made your selections, click **Save**. - -## Alerts - -You don’t have to log in or view reports and dashboards to be alerted that something may need correction or further analysis in your process. - -For this purpose, you can create new alerts for reports within your collections. These alerts watch reports for you among collections, and email you an alert if a set outlier occurs in your process flow. - -To create an alert, take the following steps: - -1. Create a report with a number visualization inside a collection for a KPI you want to track. -2. Inside your collection, select the **Alerts** tab. -3. Select the type of alert you would like to receive. For example, you can receive an email notification when the backlog on your bottleneck becomes too high. - -As you’re notified, you can begin to examine if the process is broken and if additional teams need to be notified. - -## Collections - -Within your collection, you can also access the **Users** and **Data Sources** tabs to further customize your collection. - -### Users - -Within the **Users** tab, review the users and user groups with access to your collection. - -Select **Add** to search for a user or user group to add, of which may be assigned as a viewer, editor, or manager. - -### Data sources - -Within the **Data Sources** tab, review and add source(s) of your data to create reports and dashboards inside the collection. - -## Additional analysis - -Now that we’ve created data sets within the **Home** page, let’s shift into the **Analysis** tab. - -Inside this tab, you’ll notice **Outlier Analysis** and **Branch Analysis**. - -### Outlier analysis - -Inside **Outlier Analysis**, we utilize heatmap displays. Click **Select Process**, choose your process, and choose your version. - -![heatmap example](./assets/heatmap.png) - -Within the example above, we notice increased heat (recognized as red) surrounding our invoice approved gateway. Several instances have taken significantly longer than average, so we may choose to take a closer look at these instances by downloading the instance IDs, or viewing the details for further analysis. Here, you can also find if the outliers have a shared variable. - -### Branch analysis - -Inside the **Branch Analysis** tab, we can select a ​​process and analyze how particular gateway branches impact the probability of reaching an end event. - -Fill in the process field, click on a gateway, and choose your end event. In the example below, we can further analyze the likelihood of an invoice being processed once it reaches the gateway for approval: - -![branch analysis example](./assets/analysis.png) - -Here, we’ve selected a process flow, gateway, and endpoint for a breakdown of all the instances that went through a particular gateway to a specific endpoint. Hover over the gateway for a breakdown of the process itself. - -## Additional resources and next steps - -We’ve only touched the surface of Optimize. The component is full of additional ways to analyze your data for effective process improvement. We recommend taking a look at several resources to catch up on Optimize’s latest release, new features, and many usage examples: -- [Camunda Optimize 3.6.0 Release](https://camunda.com/blog/2021/10/camunda-optimize-360-released/) -- [The Ultimate Guide to Solving Bottlenecks with Camunda: Part 1](https://camunda.com/blog/2021/10/the-ultimate-guide-to-solving-bottlenecks-with-camunda-part-1/) -- [Camunda Optimize examples](https://github.com/camunda/camunda-optimize-examples) -- [Process performance made transparent](https://camunda.com/products/camunda-platform/optimize/reports/) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/introduction-to-camunda-cloud.md b/versioned_docs/version-1.3/guides/introduction-to-camunda-cloud.md deleted file mode 100644 index f30df212975..00000000000 --- a/versioned_docs/version-1.3/guides/introduction-to-camunda-cloud.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: introduction-to-camunda -title: Introduction to Camunda Cloud -sidebar_label: Introduction to Camunda Cloud -slug: /guides/ -description: "Camunda delivers scalable, on-demand process automation as-a-service, combined with powerful execution engines for BPMN processes and DMN decisions." ---- - -[Camunda Cloud](https://camunda.io) delivers scalable, on-demand process automation as-a-service. Camunda Cloud is combined with powerful execution engines for BPMN processes and DMN decisions, and paired with tools for collaborative modeling, operations, and analytics. - -Camunda Cloud is comprised of six components: - -* Zeebe - Zeebe is the cloud-native process engine of Camunda Cloud. -* Operate - Manage, monitor, and troubleshoot your processes through Operate. -* Optimize - Improve your processes by identifying constraints in your system with Optimize. -* Tasklist - Use Tasklist to complete tasks which need human input. -* Cloud Console - Configure and deploy clusters with Cloud Console. -* Web Modeler - Collaborate and model processes, deploy and start new instances all without leaving Camunda Cloud. - -Camunda Cloud can be used with both Desktop Modeler, and Web Modeler. - -In this section of the Camunda Cloud documentation, you'll find guides for getting started with Camunda Cloud. For more conceptual information on Camunda Cloud, see [What is Camunda Cloud](components/concepts/what-is-camunda-cloud.md). - -## Next steps - -- [Sign up and log in to Camunda Cloud](/guides/getting-started/create-camunda-cloud-account.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/guides/message-correlation.md b/versioned_docs/version-1.3/guides/message-correlation.md deleted file mode 100644 index d983d227204..00000000000 --- a/versioned_docs/version-1.3/guides/message-correlation.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -id: message-correlation -title: Message Correlation -description: "Message correlation allows you to target a running workflow with a state update from an external system asynchronously." ---- -Intermediate -Time estimate: 20 minutes - -## Prerequisites - -- [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js) -- [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) -- [Desktop Modeler](https://camunda.com/download/modeler/) - -## Message correlation - -Message correlation is a powerful feature in Camunda Cloud. It allows you to target a running workflow with a state update from an external system asynchronously. - -This tutorial uses the [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js), but it serves to illustrate message correlation concepts that are applicable to all language clients. - -We will use [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production. However, it is useful during development. - -## Workflow - -Here is a basic example from [the Camunda Cloud documentation](/components/concepts/messages.md): - -![message correlation workflow](img/message-correlation-workflow.png) - -Use [Desktop Modeler](https://camunda.com/download/modeler/) to open the [test-messaging](https://github.com/jwulf/zeebe-message-correlation/blob/master/bpmn/test-messaging.bpmn) file in [this GitHub project](https://github.com/jwulf/zeebe-message-correlation). - -Click on the intermediate message catch event to see how it is configured: - -![message properties](img/message-correlation-message-properties.png) - -A crucial piece here is the **Subscription Correlation Key**. In a running instance of this workflow, an incoming **Money Collected** message will have a `correlationKey` property: - -```typescript - zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid" - }); -``` - - The concrete value of the message `correlationKey` is matched against running workflow instances by comparing the supplied value against the `orderId` variable of running instances subscribed to this message. This is the relationship established by setting the `correlationKey` to `orderId` in the message catch event in the BPMN. - -## Running the demonstration - -To run the demonstration, take the following steps: - -1. Clone this repository. - -2. Install dependencies: - - :::note - - This guides requires `npm` version 6. - - ::: - - ``` - npm i && npm i -g ts-node typescript - ``` - -3. In another terminal, start the Zeebe Broker in addition to [simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor). - -4. Deploy the workflow and start an instance: - - ``` - ts-node start-workflow.ts - ``` - -This starts a workflow instance with the `orderId` set to 345: - - ```typescript -await zbc.createProcessInstance("test-messaging", { - orderId: "345", - customerId: "110110", - paymentStatus: "unpaid" - }) - ``` - -5. Open Simple Monitor at [http://localhost:8082](http://localhost:8082). - -6. Click on the workflow instance. You will see the current state of the workflow: - - ![workflow state](img/message-correlation-workflow-state.png) - -The numbers above the BPMN symbols indicate that no tokens are waiting at the start event, and one has passed through. One token is waiting at the **Collect Money** task, and none have passed through. - -7. Take a look at the **Variables** tab at the bottom of the screen. (If you don't see it, you are probably looking at the workflow, rather than the instance. In that case, drill down into the instance): - -![message correlation variables](img/message-correlation-variables.png) - -You can see that this workflow instance has the variable `orderId` set to the value 345. - -8. Start the workers: - -``` -ts-node workers.ts -``` - -9. Refresh Simple Monitor to see the current state of the workflow: - -![message correlation wait on message](img/message-correlation-wait-on-message.png) - -Now, the token is at the message catch event, waiting for a message to be correlated. - -10. Take a look at the **Message Subscriptions** tab: - -![message subscriptions](img/message-correlation-message-subscriptions.png) - -You can see the broker has opened a message subscription for this workflow instance with the concrete value of the `orderId` 345. This was created when the token entered the message catch event. - -11. Send the message in another terminal: - -``` -ts-node send-message.ts -``` - -12. Refresh Simple Monitor, and note that the message has been correlated and the workflow has run to completion: - -![message correlation completed](img/message-correlation-completed.png) - -The **Message Subscriptions** tab now reports that the message was correlated: - -![message correlation correlated](img/message-correlation-correlated.png) - -## Message buffering - -Messages are buffered on the broker, so your external systems can emit messages before your process arrives at the catch event. The amount of time a message is buffered is configured when publishing the message from the client library. - -For example, to send a message buffered for 10 minutes with the JavaScript client: - -```typescript - zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid" - }, - timeToLive: 600000 - }); -``` - -To see it in action, take the following steps: - -1. Keep the workers running. -2. Publish the message: - -```typescript -ts-node send-message.ts -``` - -3. Click on **Messages** at the top of the Simple Monitor page. You will see the message buffered on the broker: - -![](img/message-correlation-buffered.png) - -4. Start another instance of the workflow: - -```typescript -ts-node start-workflow.ts -``` - -Note that the message is correlated to the workflow instance, even though it arrived before the workflow instance was started. - -## Common mistakes - -A couple of common gotchas: - -- The `correlationKey` in the BPMN message definition is the name of the workflow variable to match against. The `correlationKey` in the message is the concrete value to match against that variable in the workflow instance. - - - The message subscription _is not updated after it is opened_. That is not an issue in the case of a message catch event. However, for boundary message events (both interrupting and non-interrupting,) the subscription is opened _as soon as the token enters the bounding subprocess_. If any service task modifies the `orderId` value inside the subprocess, the subscription is not updated. - - For example, the interrupting boundary message event in the following example will not be correlated on the updated value, because the subscription is opened when the token enters the subprocess, using the value at that time: - - ![not correlating](img/message-correlation-not-like-this.png) - - If you need a boundary message event correlated on a value modified somewhere in your process, put the boundary message event in a subprocess after the task that sets the variable. The message subscription for the boundary message event will open when the token enters the subprocess, with the current variable value. - - ![correlating](img/message-correlation-like-this.png) - -## Summary - -Message Correlation is a powerful feature in Camunda Cloud. Knowing how messages are correlated, and how and when the message subscription is created is important to design systems that perform as expected. - -Simple Monitor is a useful tool for inspecting the behavior of a local Camunda Cloud system to figure out what is happening during development. diff --git a/versioned_docs/version-1.3/guides/migrating-from-Camunda-Platform.md b/versioned_docs/version-1.3/guides/migrating-from-Camunda-Platform.md deleted file mode 100644 index c7e36a38aa3..00000000000 --- a/versioned_docs/version-1.3/guides/migrating-from-Camunda-Platform.md +++ /dev/null @@ -1,575 +0,0 @@ ---- -id: migrating-from-Camunda-Platform -title: Migrating from Camunda 7 -description: "Migrate process solutions developed for Camunda Platform 7 to run them on Camunda Cloud." ---- -Advanced -Time estimate: 1 hour - -This guide describes how to migrate process solutions developed for Camunda Platform 7 to run them on Camunda Cloud. - -You will see the basic differences of the products, learn about necessary steps, and also limitations of migration. - -It's important to note that migration of existing projects to Camunda Cloud is optional. Camunda Platform 7 is a great product with ongoing support. - - - -## Camunda 7 vs. Camunda Cloud - -Before diving into concrete steps on migrating your models and code, let's cover some important conceptual differences between between Camunda 7 and Camunda Cloud and how this affects your projects and solutions. After this section, we'll dive into a concrete how-to. - - -### Conceptual differences - - -This section does not compare Camunda Platform 7 with Camunda Cloud in detail, but rather lists differing aspects important to know when thinking about migration. - - -#### No embedded engine in Camunda Cloud - -Camunda Platform 7 allows embedding the workflow engine as a library in your application. This means both run in the same JVM, share thread pools, and can even use the same datasource and transaction manager. - -In contrast, the workflow engine in Camunda Cloud is always a remote resource for your application, while the embedded engine mode is not supported. - - If you are interested in the reasons why we switched our recommendation from embedded to remote workflow engines, please refer to [this blog post](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -The implications for your process solution and the programming model are describeed below. Conceptually, the only really big difference is that with a remote engine, you cannot share technical [ACID transactions](https://en.wikipedia.org/wiki/ACID) between your code and the workflow engine. You can read more about it in the blog post ["Achieving consistency without transaction managers"](https://blog.bernd-ruecker.com/achieving-consistency-without-transaction-managers-7cb480bd08c). - - - - -#### Different data types - -In Camunda Platform 7, you can store different data types, including serialized Java objects. - -Camunda Cloud only allows storage of primary data types or JSON as process variables. This might require some additional data mapping in your code when you set or get process variables. - -Camunda Platform 7 provides [Camunda Spin](https://docs.camunda.org/manual/latest/reference/spin/) to ease XML and JSON handling. This is not available with Camunda Cloud, and ideally you migrate to an own data transformation logic you can fully control (e.g. using Jackson). - -To migrate existing process solutions that use Camunda Spin heavily, you can still add the Camunda Spin library to your application itself and use its API to do the same data transformations as before in your application code. - -#### Expression language - -Camunda Platform 7 uses [JUEL (Java Unified Expression Language)](https://docs.camunda.org/manual/latest/user-guide/process-engine/expression-language/) as the expression language. In the embedded engine scenario, expressions can even read into beans (Java object instances) in the application. - -Camunda Cloud uses [FEEL (Friendly-Enough Expression Language](/reference/feel/what-is-feel.md) and expressions can only access the process instance data and variables. - -Most expressions can be converted (see [this community extension](https://github.com/camunda-community-hub/camunda-platform-to-cloud-migration/blob/main/camunda-modeler-plugin-platform-to-cloud-converter/client/JuelToFeelConverter.js) as a starting point, some might need to be completely rewritten, and some might require an additional service task to prepare necessary data (which may have been calculated on the fly when using Camunda Platform 7). - -#### Different connector infrastructure - -Camunda Platform 7 provides several [connectors](https://docs.camunda.org/manual/latest/reference/connect/). These connectors are not supported in Camunda Cloud, as Camunda Cloud aims to create a much more powerful connector infrastructure. - -To migrate existing connectors, create a small bridging layer to invoke these connectors via a custom [job workers](/components/concepts/job-workers.md). - - - - - - - - -### Process solutions using Spring Boot - -With Camunda 7, a frequently used architecture to build a process solution (also known as process applications) is composed out of: - -- Java -- Spring Boot -- Camunda Spring Boot Starter with embedded engine -- Glue code implemented in Java Delegates (being Spring beans) - -This is visualized on the left-hand side of the picture below. With Camunda Cloud, a comparable process solution would look like the right-hand side of the picture and leverage: - -- Java -- Spring Boot -- Spring Zeebe Starter (embeding the Zeebe client) -- Glue code implemented as workers (being Spring beans) - - - - -![spring boot](img/architecture-spring-boot.png) - -The difference is that the engine is no longer embedded, which is also our latest [greenfield stack recommendation in Camunda 7](/docs/components/best-practices/architecture/deciding-about-your-stack-c7/#the-java-greenfield-stack). If you are interested in the reasons why we switched our recommendation from embedded to remote workflow engines, please refer to [this blog post](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -The packaging of a process solution is the same with Camunda 7 and Camunda Cloud. Your process solution is one Java application that consists of your BPMN and DMN models, as well as all glue code needed for connectivity or data transformation. - -![Process Solution Packaging](img/process-solution-packaging.png) - -Process solution definition taken from ["Practical Process Automation"](https://processautomationbook.com/). - -You can find a complete Java Spring Boot example, showing the Camunda Platform 7 process solution alongside the comparable Camunda Cloud process solution in the [Camunda Platform 7 to Camunda Cloud example](https://github.com/camunda-community-hub/camunda-platform-to-cloud-migration/tree/main/example). - - - - -### Programming model - -The programming model of Camunda 7 and Camunda Cloud are very similar if you program in Java and use Spring. - -For example, a worker in Camunda Cloud can be implemented like this (using [spring-zeebe](https://github.com/camunda-community-hub/spring-zeebe)): - -```java -@ZeebeWorker(type = "payment", autoComplete = true) -public void retrievePayment(ActivatedJob job) { - // Do whatever you need to, e.g. invoke a remote service: - String orderId = job.getVariablesMap().get("orderId"); - paymentRestClient.invoke(...); -} -``` - -You can find more information on the programming model in Camunda Cloud in [this blog post](https://blog.bernd-ruecker.com/how-to-write-glue-code-without-java-delegates-in-camunda-cloud-9ec0495d2ba5). - -:::note -JUnit testing with an embedded in-memory engine is also possible with Camunda Cloud, see [spring-zeebe documentation](https://github.com/camunda-community-hub/spring-zeebe#writing-test-cases). -::: - -### Platform deployment - -A typical deployment of the workflow engine itself looks different because the workflow engine is no longer embedded into your own deployment artifacts. - -With Camunda 7 a typical deployment includes: - -- Your Spring Boot application with all custom code and the workflow engine, cockpit, and tasklist embedded. This application is typically scaled to at least two instances (for resilience) -- A relational database -- An elastic database (for Optimize) -- Optimize (a Java application) - -With Camunda Cloud you deploy: - -- Your Spring Boot application with all custom code and the Zeebe client embedded. This application is typically scaled to at least two instances (for resilience) -- The Zeebe broker, typically scaled to at least three instances (for resilience) -- An elastic database (for Operate, Taskliste, and Optimize) -- Optimize, Operate, and Tasklist (each one is a Java application). You can scale those application to increase availability if you want. - -![Camunda 7 vs Camunda Cloud Deployment View](img/camunda7-vs-camunda-cloud-deployment-view.png) - -Camunda Cloud deployments happen within Kubernetes. There are [Helm charts available](/self-managed/zeebe-deployment/kubernetes/helm/installing-helm.md) if you want to run Camunda Cloud self-managed. - -Camunda cloud is also available as a SaaS offering from Camunda, in this case, you only need to deploy your own process solution and Camunda operates the rest. - -:::note -For local development purposes, you can [spin up Camunda Cloud on a developer machine using Docker or Docker Compose](/self-managed/zeebe-deployment/docker/install.md). Of course, developers could also create a cluster for development purposes in the SaaS offering of Camunda. -::: - -### Other process solution architectures - -Besides Spring Boot there are also other environments being used to build process solutions. - -#### Container-managed engine (Tomcat, WildFly, Websphere & co) - -Camunda Cloud doesn't provide integration into Jakarta EE application servers like Camunda Platform 7 does. Instead, Jakarta EE applications need to manually add the Zeebe client library. The implications are comparable to what is described for Spring Boot applications in this guide. - -![container-managed](img/architecture-container-managed.png) - -#### CDI or OSGI - -Due to limited adoption, there is no support for CDI or OSGI in Camunda Cloud. A lightweight integration layer comparable to [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) might evolve in the feature and we are happy to support this as a community extension to the Zeebe project. - -#### Polyglot applications (C#, NodeJS, ...) - -When you run your application in for example NodeJS or C#, you exchange one remote engine (Camunda Platform 7) with another (Camunda Cloud). As Zeebe comes with a different API, you need to adjust your source code. Camunda Cloud does not use REST as API technology, but gRPC, so you will need to leverage a [client library](/apis-tools/overview.md) instead. - -![polygot architecture](img/architecture-polyglot.png) - -### Plugins - -[**Process engine plugins**](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-plugins/) are not available in Camunda Cloud, as such plugins can massively change the behavior or even harm the stabilty of the engine. Some use cases might be implemented using [exporters](../../components/zeebe/technical-concepts/exporters). Note that exporters are only available for self-managed Zeebe clusters and not in Camunda Cloud SaaS. - -Migrating **Modeler Plugins** is generally possible, as the same modeler infrastructure is used. - -**Cockpit or tasklist plugins** *cannot* be migrated. - - -## Migration overview - -Let's discuss if you need to migrate first, before diving into the necessary steps and what tools can help you achieve the migration. - -### When to migrate? - -New projects should typically be started using Camunda Cloud. - -Existing solutions using Camunda 7 might simply keep running on Camunda 7. The platform has ongoing support, so there is no need to rush on a migration project. - -You should consider migrating existing Camunda 7 solutions if: - -- You are looking to leverage a SaaS offering (e.g. to reduce the effort for hardware or infrastructure setup and maintenance). -- You are in need of performance at scale and/or improved resilience. -- You are in need of certain features that can only be found in Camunda Cloud (e.g. [BPMN message buffering](/docs/components/concepts/messages/#message-buffering), big [multi-instance constructs](/docs/components/modeler/bpmn/multi-instance/), the new connectors framework, or the improved collaboration features in web modeler). - - -### Migration steps - -For migration, you need to look at development artifacts (BPMN models and application code), but also at workflow engine data (runtime and history) in case you migrate a process solution running in production. - -The typical steps are: - -1. Migrate development artifacts - 1. Adjust your BPMN models (only in rare cases you have to touch your DMN models) - 2. Adjust your development project (remove embedded engine, add Zeebe client) - 2. Refactor your code to use the Zeebe client API - 3. Refactor your glue code or use [the Java Delegate adapter project](https://github.com/camunda-community-hub/camunda-platform-to-cloud-migration/tree/main/camunda-platform-to-cloud-adapter). -2. Migrate workflow engine data - - -In general, **development artifacts** *can* be migrated: - -* **BPMN models:** Camunda Cloud uses BPMN like Camunda Platform 7 does, which generally allows use of the same model files, but you might need to configure *different extension atrributes* (at least by using a different namespace). Furthermore, Camunda Cloud has a *different coverage* of BPMN concepts that are supported (see [Camunda Cloud BPMN coverage](/components/modeler/bpmn/bpmn-coverage.md) vs [Camunda Platform 7 BPMN coverage](https://docs.camunda.org/manual/latest/reference/bpmn20/)), which might require some model changes. Note that the coverage of Camunda Cloud will increase over time. - -* **DMN models:** Camunda Cloud uses DMN like Camunda Platform 7 does. There are no changes in the models necessary. Some rarely used features of Camunda Platform 7 are not supported in Camunda Cloud. Those are listed below. - -* **CMMN models:** It is not possible to run CMMN on Zeebe, *CMMN models cannot be migrated*. You can remodel cases in BPMN according to [Building Flexibility into BPMN Models](https://camunda.com/best-practices/building-flexibility-into-bpmn-models/), keeping in mind the [Camunda Cloud BPMN coverage](/components/modeler/bpmn/bpmn-coverage.md). - -* **Application code:** The application code needs to use *a different client library and different APIs*. This will lead to code changes you must implement. - -* **Architecture:** The different architecture of the core workflow engine might require *changes in your architecture* (e.g. if you used the embedded engine approach). Furthermore, certain concepts of Camunda Platform 7 are no longer possible (like hooking in Java code at various places, or control transactional behavior with asynchronous continuations) which might lead to *changes in your model and code*. - - - -In general, **workflow engine data** is harder to migrate to Camunda Cloud: - -* **Runtime data:** Running process instances of Camunda Platform 7 are stored in the Camunda Platform 7 database. Like with a migration from third party workflow engines, you can read this data from Camunda 7 and use it to create the right process instances in Camunda Cloud in the right state. This way, you can migrate running process instances from Camunda 7 to Camunda Cloud, but some manual effort is required. - -* **History data:** Historic data from the workflow engine itself cannot be migrated. However, data in Optimize can be kept. - - - - - - -### Migration tooling - -The [Camunda Platform 7 to Camunda Cloud migration tooling](https://github.com/camunda-community-hub/camunda-platform-to-cloud-migration), available as a community extension, contains two components that will help you with migration: - -1. [A Desktop Modeler plugin to convert BPMN models from Camunda Platform 7 to Camunda Cloud](https://github.com/berndruecker/camunda-platform-to-cloud-migration/tree/main/desktop-modeler-plugin-platform-to-cloud-converter). This maps possible BPMN elements and technical attributes into the Camunda Cloud format and gives you warnings where this is not possible. This plugin might not fully migrate your model, but should give you a jump-start. It can be extended to add your own custom migration rules. Note that the model conversion requires manual supervision. - -2. [The Camunda Platform 7 to Camunda Cloud Adapter](https://github.com/berndruecker/camunda-platform-to-cloud-migration/tree/main/camunda-platform-to-cloud-adapter). This is a library providing a worker to hook in Camunda Platform-based glue code. For example, it can invoke existing JavaDelegate classes. - -In essence, this tooling implements details described in the next sections. - - - -## Adjusting your source code - -Camunda Cloud has a different API than Camunda Platform 7. As a result, you have to migrate some of your code, especially code that does the following: - -* Uses the Client API (e.g. to start process instances) -* Implements [service tasks](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/service-task/), which can be: - * [Java code attached to a service task](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/) and called by the engine directly (in-VM). - * [External tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/), where workers subscribe to the engine. - - - - -For example, to migrate an existing Spring Boot application, take the following steps: - -1. Adjust Maven dependencies - * Remove Camunda Platform 7 Spring Boot Starter and all other Camunda dependencies. - * Add [Spring Zeebe Starter](https://github.com/zeebe-io/spring-zeebe). -2. Adjust config - * Make sure to set [Camunda Cloud credentials](https://github.com/camunda-community-hub/spring-zeebe#configuring-camunda-cloud-connection) (for example, in `src/main/resources/application.properties`) and point it to an existing Zeebe cluster. - * Remove existing Camunda Platform 7 settings. -3. Replace `@EnableProcessApplication` with `@EnableZeebeClient` in your main Spring Boot application class. -4. Add `@ZeebeDeployment(resources = "classpath*:**/*.bpmn")` to automatically deploy all BPMN models. - -Finally, adjust your source code and process model as described in the sections below. - -### Client API - -All Camunda Cloud APIs (e.g. to start process instances, subscribe to tasks, or complete them) have been completely redesigned are not compatible with Camunda Platform 7. While conceptually similar, the APIs use different method names, data structures, and protocols. - -If this affects large parts of your code base, you could write a small abstraction layer implementing the Camunda Platform 7 API delegating to Camunda Cloud, probably marking unavailable methods as deprecated. We welcome community extensions that facilitate this. - -### Service tasks with attached Java code (Java Delegates, Expressions) - -In Camunda Platform 7, there are three ways to attach Java code to service tasks in the BPMN model using different attributes in the BPMN XML: - -* Specify a class that implements a JavaDelegate or ActivityBehavior: ```camunda:class```. -* Evaluate an expression that resolves to a delegation object: ```camunda:delegateExpression```. -* Invoke a method or value expression: ```camunda:expression```. - -Camunda Cloud cannot directly execute custom Java code. Instead, there must be a [job worker](/components/concepts/job-workers.md) executing code. - -The [Camunda Platform 7 to Camunda Cloud Adapter](https://github.com/berndruecker/camunda-platform-to-cloud-migration/tree/main/camunda-platform-to-cloud-adapter) implements such a job worker using [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe). It subscribes to the task type ```camunda-platform-to-cloud-migration```. [Task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) are used to configure a delegation class or expression for this worker. - -![Service task in Camunda 7 and Camunda Cloud](img/migration-service-task.png) - -You can use this worker directly, but more often it might serve as a starting point or simply be used for inspiration. - -The [Camunda Platform 7 to Camunda Cloud Converter Modeler plugin](https://github.com/berndruecker/camunda-platform-to-cloud-migration/tree/main/camunda-modeler-plugin-platform-to-cloud-converter) will adjust the service tasks in your BPMN model automatically for this adapter. - -The topic ```camunda-platform-to-cloud-migration``` is set and the following attributes/elements are migrated and put into a task header: -* ```camunda:class``` -* ```camunda:delegateExpression``` -* ```camunda:expression``` and ```camunda:resultVariable``` - - - -### Service tasks as external tasks - -[External task workers](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) in Camunda Platform 7 are conceptually comparable to [job workers](/components/concepts/job-workers.md) in Camunda Cloud. This means they are generally easier to migrate. - -The "external task topic" from Camunda Platform 7 is directly translated in a "task type name" in Camunda Cloud, therefore ```camunda:topic``` gets ```zeebe:taskDefinition type``` in your BPMN model. - -Now, you must adjust your external task worker to become a job worker. - - - -## Adjusting Your BPMN models - -To migrate BPMN process models from Camunda Platform 7 to Camunda Cloud, you must adjust them: - -* The namespace of extensions has changed (from ```http://camunda.org/schema/1.0/bpmn``` to ```http://camunda.org/schema/zeebe/1.0```) -* Different configuration attributes are used -* Camunda Cloud has a *different coverage* of BPMN elements (see [Camunda Cloud BPMN coverage](/components/modeler/bpmn/bpmn-coverage.md) vs [Camunda Platform 7 BPMN coverage](https://docs.camunda.org/manual/latest/reference/bpmn20/)), which might require some model changes. Note that the coverage of Camunda Cloud will increase over time. - -The following sections describe what the existing [Camunda Platform 7 to Camunda Cloud migration tooling](https://github.com/camunda-community-hub/camunda-platform-to-cloud-migration) does by BPMN symbol and explain unsupported attributes. - -### Service tasks - -![Service Task](../components/modeler/bpmn/assets/bpmn-symbols/service-task.svg) - -Migrating a service task is described in detail in the section about adjusting your source code above. - -A service task might have **attached Java code**. In this case, the following attributes/elements are migrated and put into a task header: -* ```camunda:class``` -* ```camunda:delegateExpression``` -* ```camunda:expression``` and ```camunda:resultVariable``` - -The topic ```camunda-platform-to-cloud-migration``` is set. - -The following attributes/elements cannot be migrated: -* ```camunda:asyncBefore```: Every task in Zeebe is always asyncBefore and asyncAfter. -* ```camunda:asyncAfter```: Every task in Zeebe is always asyncBefore and asyncAfter. -* ```camunda:exclusive```: Jobs are always exclusive in Zeebe. -* ```camunda:jobPriority```: There is no way to prioritize jobs in Zeebe (yet). -* ```camunda:failedJobRetryTimeCycle```: You cannot yet configure the retry time cycle. - -A service task might leverage **external tasks** instead. In this case, the following attributes/elements are migrated: -* ```camunda:topic``` gets ```zeebe:taskDefinition type```. - -The following attributes/elements cannot be migrated: -* ```camunda:taskPriority``` - -Service tasks using ```camunda:type``` cannot be migrated. - -Service tasks using ```camunda:connector``` cannot be migrated. - - -### Send tasks - -![Send Task](../components/modeler/bpmn/assets/bpmn-symbols/send-task.svg) - -In both engines, a send task has the same behavior as a service task. A send task is migrated exactly like a service task. - -### Gateways - -Gateways rarely need migration. The relevant configuration is mostly in the expressions on outgoing sequence flows. - -### Expressions - -Expressions need to be in [FEEL (friendly-enough expression language)](/components/concepts/expressions.md#the-expression-language) instead of [JUEL (Java unified expression language)](https://docs.camunda.org/manual/latest/user-guide/process-engine/expression-language/). - -Migrating simple expressions is doable (as you can see in [these test cases](https://github.com/camunda-community-hub/camunda-platform-to-cloud-migration/blob/main/camunda-modeler-plugin-platform-to-cloud-converter/client/JuelToFeelConverter.test.js)), but not all expressions can be automatically converted. - -The following is not possible: - -* Calling out to functional Java code using beans in expressions -* Registering custom function definitions within the expression engine - -### Human tasks - -![User Task](../components/modeler/bpmn/assets/bpmn-symbols/user-task.svg) - -Human task management is also available in Camunda Cloud, but uses a different tasklist user interface and API. - -In Camunda Platform 7, you have [different ways to provide forms for user tasks](https://docs.camunda.org/manual/latest/user-guide/task-forms/): - -* Embedded Task Forms (embedded custom HTML and JavaScript) -* Camunda Forms (simple forms defined via Desktop Modeler properties) -* External Task Forms (link to custom applications) -* [Camunda Forms](./utilizing-forms.md) - -Only Camunda Forms are currently supported in Camunda Cloud and can be migrated. - -The following attributes/elements can be migrated: - -* Task assignment (to users or groups): - * ```bpmn:humanPerformer``` - * ```bpmn:potentialOwner``` - * ```camunda:assignee``` - * ```camunda:candidateGroups``` - * ```camunda:formKey```, but Camunda Cloud requires you to embedd the form definition itself into the root element of your BPMN XML models, see [this guide](/docs/guides/utilizing-forms/#connect-your-form-to-a-bpmn-diagram). - -The following attributes/elements cannot (yet) be migrated: - -* ```camunda:candidateUsers``` (only candidate groups are supported) -* Form handling: - * ```camunda:formHandlerClass``` - * ```camunda:formData``` - * ```camunda:formProperty``` -* ```camunda:taskListener``` -* ```camunda:dueDate``` -* ```camunda:followUpDate``` -* ```camunda:priority``` - - - -### Business rule tasks - -![Business Rule Task](../components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg) - -For some time, the DMN engine was not yet directly integrated into Camunda Cloud, which is why the [Zeebe DMN Worker](https://github.com/camunda-community-hub/zeebe-dmn-worker), provided as as a community extension, was used to execute your existing DMN models. This is subject to change soon, as out-of-the-box support for DMN is added to Camunda Cloud at the moment. - -The migration tool currently sets the task definition type to ```DMN``` and the ```camunda:decisionRef``` is moved to a task header attribute for this worker. - -The following attributes/elements can be migrated: -* ```camunda:decisionRef``` - - -The following attributes are not yet supported: - -* ```camunda:decisionRefBinding```, ```camunda:decisionRefVersion```, and ```camunda:decisionRefVersionTag```(always use the latest version) -* ```camunda:mapDecisionResult``` (no mapping happens) -* ```camunda:resultVariable``` (result is always mapped to variable 'result' and can be copied or unwrapped using ioMapping). -* ```camunda:decisionRefTenantId``` - -A business rule task can also *behave like a service task* to allow integration of third-party rule engines. In this case, the following attributes can also be migrated as described above for the service task migration: ```camunda:class```, ```camunda:delegateExpression```, ```camunda:expression```, or ```camunda:topic```. - -The following attributes/elements cannot be migrated: -* ```camunda:asyncBefore```, ```camunda:asyncBefore```, ```camunda:asyncAfter```, ```camunda:exclusive```, ```camunda:failedJobRetryTimeCycle```, and ```camunda:jobPriority``` -* ```camunda:type``` and ```camunda:taskPriority``` -* ```camunda:connector``` - - -### Call activities - -![Call Activity](../components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg) - -Call activities are generally supported in Zeebe. The following attributes/elements can be migrated: - -* ```camunda:calledElement``` will be converted into ```zeebe:calledElement``` -* Data Mapping - * ```camunda:in``` - * ```camunda:out``` - -The following attributes/elements cannot be migrated: -* ```camunda:calledElementBinding```: Currently Zeebe always assumes 'late' binding. -* ```camunda:calledElementVersionTag```: Zeebe does not know a version tag. -* ```camunda:variableMappingClass```: You cannot execute code to do variable mapping in Zeebe. -* ```camunda:variableMappingDelegateExpression```: You cannot execute code to do variable mapping in Zeebe. - - - -### Script task - -![Script Task](../components/modeler/bpmn/assets/bpmn-symbols/script-task.svg) - -Script tasks cannot natively be executed by the Zeebe engine. They behave like normal service tasks instead, which means you must run a job worker that can execute scripts. One available option is to use the [Zeebe Script Worker](https://github.com/camunda-community-hub/zeebe-script-worker), provided as a community extension. - -If you do this, the following attributes/elements are migrated: -* ```camunda:scriptFormat``` -* ```camunda:script``` -* ```camunda:resultVariable``` - -The task type is set to ```script```. - -The following attributes/elements cannot be migrated: -* ```camunda:asyncBefore```: Every task in Zeebe is always asyncBefore and asyncAfter. -* ```camunda:asyncAfter```: Every task in Zeebe is always asyncBefore and asyncAfter. -* ```camunda:exclusive```: Jobs are always exclusive in Zeebe. -* ```camunda:jobPriority```: There is no way to priotize jobs in Zeebe (yet). -* ```camunda:failedJobRetryTimeCycle```: You cannot yet configure the retry time cycle. - -### Message receive events and receive tasks - -Message correlation works slightly different between the two products: - -* Camunda Platform 7 simply waits for a message, and the code implementing that the message is received queries for a process instance the message will be correlated to. If no process instance is ready to receive that message, an exception is raised. - -* Camunda Cloud creates a message subscription for every waiting process instance. This subscription requires a value for a ```correlationKey``` to be generated when entering the receive task. The code receiving the external message correlates using the value of the ```correlationKey```. - -This means you must inspect and adjust all message receive events or receive tasks in your model to define a reasonable ```correlationKey```. You also must adjust your client code accordingly. - -The ```bpmn message name``` is used in both products and doesn't need migration. - - -## Adjusting your DMN models - -For Camunda Cloud, [a former community extension](https://github.com/camunda-community-hub/dmn-scala), built by core Camunda developers, is productized. This engine has a higher coverage of DMN elements. This engine can execute DMN models designed for Camunda Platform 7, however, there are some small differences which can be assessed looking at [this issue](https://github.com/camunda/camunda-modeler/issues/2525). - -:::note -The DMN engine is being added to the Camunda Cloud stack at the moment and might not yet be available to you when you read this. -::: - - - -## Prepare for smooth migrations - -Whenever you build a process solution using Camunda Platform 7, you can follow these recommendations to create a process solution that will be easier to migrate later on: - -* Use Java, Maven, and Spring Boot. -* Separate your business logic from Camunda API. -* Use external tasks. -* Stick to basic usage of public API (no engine plugins or extensions). -* Don't expose Camunda APIs (REST or Java) to front-end applications. -* Use primitive variable types or JSON payloads only (no XML or serialized Java objects). -* Use JSONPath on JSON payloads (translates easier to FEEL). -* Stick to [BPMN elements supported in Camunda Cloud](/components/modeler/bpmn/bpmn-coverage.md). -* Use [FEEL as script language in BPMN](https://camunda.github.io/feel-scala/docs/reference/developer-guide/bootstrapping#use-as-script-engine), e.g. on Gateways. -* Use Camunda Forms. - -## Open issues - -As described earlier in this guide, migration is an ongoing topic and this guide is far from complete. Open issues include the following: - -* Describe implications on testing. -* Discuss adapters for Java or REST client. -* Discuss external task adapter for Java code and probably add it to the [Camunda Platform 7 to Camunda Cloud Adapter](https://github.com/berndruecker/camunda-platform-to-cloud-migration/tree/main/camunda-platform-to-cloud-adapter). -* Discuss more concepts around BPMN -** [Field injection](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/#field-injection) that is using ```camunda:field``` available on many BPMN elements. -** Multiple instance markers available on most BPMN elements. -** ```camunda:inputOutput``` available on most BPMN elements. -** ```camunda:errorEventDefinition``` available on several BPMN elements. - -And even more. - -Please [reach out to us](/contact/) to discuss your specific migration use case. - -## Summary - -In this guide, you hopefully gained a better understanding of what migration from Camunda Platform 7 to Camunda Cloud means. Specifically, this guide outlined the following: - -* Differences in application architecture -* How process models and code can generally be migrated, whereas runtime and history data cannot -* How migration can be very simple for some models, but also marked limitations, where migration might get very complicated -* You need to adjust code that uses the workflow engine API -* How you might be able to reuse glue code -* Community extensions that can help with migration - -We are watching all customer migration projects closely and will update this guide in the future. diff --git a/versioned_docs/version-1.3/guides/operating-the-camunda-cloud-stack-on-kubernetes.md b/versioned_docs/version-1.3/guides/operating-the-camunda-cloud-stack-on-kubernetes.md deleted file mode 100644 index 3cb42fb1c06..00000000000 --- a/versioned_docs/version-1.3/guides/operating-the-camunda-cloud-stack-on-kubernetes.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -id: operating-the-camunda-cloud-stack-on-kubernetes -title: Operate the Camunda Cloud stack on Kubernetes ---- - -... diff --git a/versioned_docs/version-1.3/guides/setting-up-development-project.md b/versioned_docs/version-1.3/guides/setting-up-development-project.md deleted file mode 100644 index 354d11257c0..00000000000 --- a/versioned_docs/version-1.3/guides/setting-up-development-project.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: setting-up-development-project -title: Setting up your first development project -description: "Set up your first project to model, deploy, and start a process instance." -keywords: [get-started, local-install] ---- -Beginner -Time estimate: 20 minutes - -## Prerequisites - -- [Camunda Cloud SaaS](https://camunda.io) -- [Desktop Modeler](https://camunda.com/download/modeler/) -- [Operate](/self-managed/operate-deployment/install-and-start.md) -- [Tasklist](/self-managed/tasklist-deployment/install-and-start.md) -- [Optimize]($optimize$/components/what-is-optimize/) - -## Setting up your project - -Let's set up your first project to model, deploy, and start a process instance. - -The [camunda-cloud-get-started GitHub repository](https://github.com/camunda-cloud/camunda-cloud-get-started) -contains a hands-on guide for setting up a Camunda Cloud project locally. - -The guide offers a general walk-through on how to model, deploy, and start a -process instance. It also includes code examples on how to connect to the -cluster and complete jobs. diff --git a/versioned_docs/version-1.3/guides/update-guide/026-to-100.md b/versioned_docs/version-1.3/guides/update-guide/026-to-100.md deleted file mode 100644 index 6ae085acf6d..00000000000 --- a/versioned_docs/version-1.3/guides/update-guide/026-to-100.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -id: 026-to-100 -title: Update 0.26 to 1.0 -description: "Review which adjustments must be made to migrate from Camunda Cloud 0.26.x to 1.0.0." ---- -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 0.26.x to 1.0.0 for each component of the system. - -:::caution - -Be aware that the major version update from 0.26 to 1.0 is not backwards -compatible. Therefore, data cannot be migrated from 0.26 to 1.0 and client -applications must be adjusted to the new API versions. - -::: - -## Server - -### Zeebe - -#### Distribution - -With Zeebe 1.0.0, the Java package names were adjusted. They changed from -`io.zeebe` to `io.camunda.zeebe`. Therefore, any logging configurations and -similar, which are based on the package names, must be adjusted. - -Additionally, the group id of the Java artifacts were migrated from `io.zeebe` -to `io.camunda`. This requires all dependencies to the artifacts to be updated -to use the new group id. - -The downloadable artifact of the Zeebe distribution was renamed from: -- `zeebe-distribution-${VERSION}.tar.gz` to `camunda-cloud-zeebe-${VERSION}.tar.gz`, -- `zeebe-distribution-${VERSION}.zip` to `camunda-cloud-zeebe-${VERSION}.zip` - -#### Workflow Engine - -The support for YAML workflows was removed from the workflow engine, after the -deprecation with 0.26. This means only [BPMN -processes](/components/modeler/bpmn/bpmn-primer.md) are supported from now on. - -#### Elasticsearch Exporter - -The supported Elasticsearch version of the Elasticsearch Exporter was increased -from `6.8` to `7.10`, read more about this in the -[Elasticsearch](#elasticsearch) section. - -The index templates of the Elasticsearch Exporter were migrated to use -[composable index templates](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html). - -#### Protocol - -The Java protocol received the following adjustments: - -- Enum `IncidentIntent` - - remove `CREATE` - - change short value of `CREATED` to `0` - - change short value of `RESOLVE` to `1` - - change short value of `RESOLVED` to `2` - -- Enum `WorkflowInstanceIntent` renamed to `ProcessInstanceIntent` - -- Enum `WorkflowInstanceSubscriptionIntent` renamed to `ProcessMessageSubscriptionIntent` - -- Enum `WorkflowInstanceCreationIntent` renamed to `ProcessInstanceCreationIntent` - -- Enum `JobIntent` - - remove `CREATE` and `ACTIVATED` - - change short value of `CREATED` to `0` - - change short value of `COMPLETE` to `1` - - change short value of `COMPLETED` to `2` - - change short value of `TIME_OUT` to `3` - - change short value of `TIMED_OUT` to `4` - - change short value of `FAIL` to `5` - - change short value of `FAILED` to `6` - - change short value of `UPDATE_RETRIES` to `7` - - change short value of `RETRIES_UPDATED` to `8` - - change short value of `CANCEL` to `9` - - change short value of `CANCELED` to `10` - - change short value of `THROW_ERROR` to `11` - - change short value of `ERROR_THROWN` to `12` - -- Enum `MessageIntent` - - rename `DELETE` to `EXPIRE` - - rename `DELETED` to `EXPIRED` - -- Enum `MessageStartEventSubscriptionIntent` - - remove `OPEN`, `OPENED`, `CLOSE` and `CLOSED` - - add `CREATED`, `CORRELATED` and `DELETED` - -- Enum `MessageSubscriptionIntent` - - rename `OPEN` to `CREATE` - - rename `OPENED` to `CREATED` - - rename `CLOSE` to `DELETE` - - rename `CLOSED` to `DELETED` - - add `CORRELATING - -- Enum `TimerIntent` - - remove `CREATE` - - change short value of `CREATED` to `0` - - change short value of `TRIGGER` to `1` - - change short value of `TRIGGERED` to `2` - - change short value of `CANCEL` to `3` - - change short value of `CANCELED` to `4` - -- Interface `DeploymentRecordValue` - - rename method `getDeployedWorkflows` to `getProcessMetadata` and change type from `List` to `List` - -- Interface `IncidentRecordValue` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `JobRecordValue` - - rename method `getWorkflowDefinitionVersion` to `getProcessDefinitionVersion` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - -- Interface `MessageStartEventSubscriptionRecordValue` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `MessageSubscriptionRecordValue` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `TimerRecordValue` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `VariableRecordValue` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - -#### gRPC - -The following changes were made to the gRPC protocol definition: - -- `ActivatedJob` message - - rename field `workflowInstanceKey` to `processInstanceKey` - - rename field `workflowDefinitionVersion` to `processDefinitionVersion` - - rename field `workflowKey` to `processDefinitionKey` - -- `CancelWorkflowInstanceRequest` message renamed to - `CancelProcessInstanceRequest` - - rename field `workflowInstanceKey` to `processInstanceKey` - -- `CancelWorkflowInstanceResponse` message renamed to - `CancelProcessInstanceResponse` - -- `CreateWorkflowInstanceRequest` message renamed to - `CreateProcessInstanceRequest` - - rename field `workflowKey` to `processDefinitionKey` - -- `CreateWorkflowInstanceResponse` message renamed to - `CreateProcessInstanceResponse` - - rename field `workflowKey` to `processDefinitionKey` - - rename field `workflowInstanceKey` to `processInstanceKey` - -- `CreateWorkflowInstanceWithResultRequest` message renamed to - `CreateProcessInstanceWithResultRequest` - - change type of field `request` from `CreateWorkflowInstanceRequest` to `CreateProcessInstanceRequest` - -- `CreateWorkflowInstanceWithResultResponse` message renamed to - `CreateProcessInstanceWithResultResponse` - - rename field `workflowKey` to `processDefinitionKey` - - rename field `workflowInstanceKey` to `processInstanceKey` - -- `DeployWorkflowRequest` message renamed to `DeployProcessRequest` - - rename field `workflows` to `processes` and change type from `WorkflowRequestObject` to `ProcessRequestObject` - -- `WorkflowRequestObject` message renamed to `ProcessRequestObject` - - remove enum `ResourcetType` - - remove field type - - change field id of `definition` field to 2 - -- `DeployWorkflowResponse` message renamed to `DeployProcessResponse` - - rename field `wokrflows` to `processes` and change type from `WorkflowMetadata` to `ProcessMetadata` - -- `WorkflowMetadata` message renamed to `ProcessMetadata` - - rename field `workflowKey` to `processDefinitionKey` - -- `Partition` message - - enum `PartitionBrokerRole` added `INACTIVE` state - -- `Gateway` service - - rename rpc `CancelWorkflowInstance` to `CancelProcessInstance` and change input type from `CancelWorkflowInstanceRequest` to `CancelProcessInstanceRequest` and output type from `CancelWorkflowInstanceResponse` to `CancelProcessInstanceResponse` - - - rename rpc `CreateWorkflowInstance` to `CreateProcessInstance` and change input type from `CreateWorkflowInstanceRequest` to `CreateProcessInstanceRequest` and output type from `CreateWorkflowInstanceResponse` to `CreateProcessInstanceResponse` - - - rename rpc `CreateWorkflowInstanceWithResult` to `CreateProcessInstance` and change input type from `CreateWorkflowInstanceWithResultRequest` to `CreateProcessInstanceWithResultRequest` and output type from `CreateWorkflowInstanceWithResultResponse` to `CreateProcessInstanceResponse` - - - rename rpc `DeployWorkflow` to `DeployProcess` and change input type from `DeployWorkflowRequest` to `DeployProcessRequest` and output type from `DeployWorkflowResponse` to `DeployProcessResponse` - -#### Exporter API - -In the Java Exporter API, the depracted method `Controller#scheduleTask` was removed. - -### Operate - -With Operate 1.0.0, the Java package names were adjusted. They changed from -`org.camunda.operate` to `io.camunda.operate`. Therefore, any logging -configurations and similar, which are based on the package names, must be -adjusted. - -The downloadable artifact of the Operate distribution was renamed from: -- `camunda-operate-${VERSION}.tar.gz` to `camunda-cloud-operate-${VERSION}.tar.gz`, -- `camunda-operate-${VERSION}.zip` to `camunda-cloud-operate-${VERSION}.zip` - -The supported Elasticsearch version was increased from `6.8` to `7.10`. Read -more about this in the [Elasticsearch](#elasticsearch) section. - -### Tasklist - -With Tasklist 1.0.0, the Java package names were adjusted. They changed from -`io.zeebe.tasklist` to `io.camunda.tasklist`. Therefore, any logging -configurations and similar, which are based on the package names, must be -adjusted. - -Additionally, the configuration prefix was migrated from `zeebe.tasklist` to -`camunda.tasklist`, which requires all configurations to be adjusted to the new -prefix. - -The downloadable artifact of the Tasklist distribution was renamed from: -- `zeebe-tasklist-${VERSION}.tar.gz` to `camunda-cloud-tasklist-${VERSION}.tar.gz`, -- `zeebe-tasklist-${VERSION}.zip` to `camunda-cloud-tasklist-${VERSION}.zip` - -The supported Elasticsearch version was increased from `6.8` to `7.10`. Read -more about this in the [Elasticsearch](#elasticsearch) section. - -### Elasticsearch - -Zeebe, Operate, and Tasklist use Elasticsearch as Datastore to exchange the event -stream from Zeebe's exporter, and store their own data model representation. - -Camunda Cloud 1.0 requires an update from Elasticsearch 6.8 to 7.10. - -Follow the [update guide from -Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html), -to migrate existing data. - -:::note -Zeebe, Operate, and Tasklist data inside Elasticsearch cannot be migrated, it can only be preserved for -histroy or audit purpose, but cannot be loaded by Camunda Cloud 1.0. -::: - -If you want to keep the existing data in Elasticsearch, ensure you set a -new index prefix for all systems. See the configuration documentation for -[Zeebe](self-managed/zeebe-deployment/index.md), -[Operate](self-managed/operate-deployment/configuration.md), and [Tasklist](self-managed/tasklist-deployment/configuration.md). - -## Client - -### Zeebe Java Client - -With Zeebe 1.0.0, the Java package names were adjusted. They changed from -`io.zeebe` to `io.camunda.zeebe`. Therefore, any imports and logging -configurations and similar, which are based on the package names, must be -adjusted. - -Additionally, the group id of the Java artifacts were migrated from `io.zeebe` -to `io.camunda`. This requires all dependencies to the artifacts to be updated -to use the new group id. - -The public API of the Java client changed as follows: - -- Interface `ActivatedJob` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - - rename method `getWorkflowDefinitionVersion` to `getProcessDefinitionVersion` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - -- Class `ClientProperties` - - remove deprecated field `BROKER_CONTACTPOINT` - -- Interface `ZeebeClientBuilder` - - remove deprecated method `brokerContactPoint` - -- Interface `ZeebeClientConfiguration` - - remove deprecated method `getBrokerContactPoint` - -- Interface `ZeebeClient` - - change return type of `newDeployCommand` from `DeployWorkflowCommandStep1` to `DeployProcessCommandStep1` - - change return type of `newCreateInstanceCommand` from `CreateWorkflowInstanceCommandStep1` to `CreateProcessInstanceCommandStep1` - - change return type of `newCancelInstanceCommand` from `CancelWorkflowInstanceCommandStep1` to `CancelProcessInstanceCommandStep1` - -### Zeebe Go Client - -The repository of Zeebe was moved from `github.com/zeebe-io/zeebe` to -`github.com/camunda-cloud/zeebe`. Therefore, all go dependencies and imports must be adjusted to the new GitHub URL. - -The public API of the Go client was changed as follows: - -- Interface `CancelInstanceStep1` - - rename method `WorkflowInstanceKey` to `ProcessInstanceKey` and change return type from `DispatchCancelWorkflowInstanceCommand` to `DispatchCancelProcessInstanceCommand` - -- Interface `DispatchCancelWorkflowInstanceCommand` renamed to `DispatchCancelProcessInstanceCommand` - -- Interface `CancelWorkflowInstanceCommand` renamed to `CancelProcessInstanceCommand` - - rename method `WorkflowInstanceKey` to `ProcessInstanceKey` - -- Interface `CreateInstanceCommandStep1` - - rename method `WorkflowKey` to `ProcessDefinitionKey` and change `DispatchCancelProcessInstanceCommand` - -- Struct `DeployCommand` - - method `AddResource(definition, name, resourceType)` remove `resourceType` from parameter list - -- Interface `Client` - - rename method `NewDeployWorkflowCommand` to `NewDeployProcessCommand` - diff --git a/versioned_docs/version-1.3/guides/update-guide/100-to-110.md b/versioned_docs/version-1.3/guides/update-guide/100-to-110.md deleted file mode 100644 index f0f83958cee..00000000000 --- a/versioned_docs/version-1.3/guides/update-guide/100-to-110.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: 100-to-110 -title: Update 1.0 to 1.1 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.0.x to 1.1.0." ---- -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.0.x to 1.1.0 for each component of the system. - -:::caution - - We identified an issue in our recent release of Operate 1.1.0 (part of Camunda - Cloud 1.1.0). This issue only applies if you are updating from a previous - version of Operate 1.0.x to Operate 1.1. - - We recommend to immediately update to 1.1.1 by skipping the 1.1.0 release. - -::: - -## Server - -### Operate - -With Operate 1.1, a new feature was introduced to navigate between call activity -hierarchies. This feature is only available for instances started after the -version 1.1 update. Older instances will not expose this information to the user. - -## Client - -### Zeebe Java Client - -To prepare to support multiple regions in Camunda Cloud SaaS, we adopted the -Zeebe URLs used to connect to your cluster to contain a region sub-domain, i.e -`${CLUSTE_ID}.zeebe.camunda.io` is now `${CLUSTER_ID}.bru-2.zeebe.camunda.io`. - -We are confident that we rolled out this change transparently and as backwards -compatible as possible. Still, there exists scenarios which this might impact -you on your update path. For existing clusters, the old URLs will still be -functional. We recommend you update the configuration of your clients to -the new URL format. - -To support this feature, we expose a new configuration method -[`withRegion`](https://javadoc.io/doc/io.camunda/zeebe-client-java/latest/io/camunda/zeebe/client/ZeebeClientCloudBuilderStep1.ZeebeClientCloudBuilderStep2.ZeebeClientCloudBuilderStep3.ZeebeClientCloudBuilderStep4.html#withRegion(java.lang.String)) -in the Camunda Cloud builder of the Java Client to set the region. - -By default, the builder will assume the `bru-2` region, which is the region of -any clusters created after Camunda Cloud GA in May 2021. - -If you are using the Java Client Camunda Cloud builder with a pre GA -cluster, you must set the region to the specific value of your cluster. - -To copy the region of your cluster, visit the clusters details page in -Camunda Cloud Console, and select the copy button next to the region. - -![](../img/update-guide-100-to-110-copy-region.png) diff --git a/versioned_docs/version-1.3/guides/update-guide/110-to-120.md b/versioned_docs/version-1.3/guides/update-guide/110-to-120.md deleted file mode 100644 index d920ed189ae..00000000000 --- a/versioned_docs/version-1.3/guides/update-guide/110-to-120.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: 110-to-120 -title: Update 1.1 to 1.2 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.1.x to 1.2.0." ---- -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.1.x to 1.2.0 for each component of the system. - -:::caution - - We identified an issue in our recent release of Zeebe 1.2 related to our - concept of processing on followers. Version 1.2.0 and 1.2.1 are affected by a - bug, resulting in an inconsistent state - ([#8044](https://github.com/camunda-cloud/zeebe/issues/8044)). - - Therefore we recommend updating to 1.2.4 directly. - -::: - -## Server - -### Operate - -With Operate 1.2, a new feature was introduced to quickly navigate a call -activity hierarchy. This feature is only available for instances started after -the version 1.2 update. Older instances will not expose this information to the -user. - diff --git a/versioned_docs/version-1.3/guides/update-guide/120-to-130.md b/versioned_docs/version-1.3/guides/update-guide/120-to-130.md deleted file mode 100644 index 325b1dd4ba5..00000000000 --- a/versioned_docs/version-1.3/guides/update-guide/120-to-130.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: 120-to-130 -title: Update 1.2 to 1.3 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.2.x to 1.3.2." ---- -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.2.x to 1.3.2 for each component of the system. - -## Server - -### Zeebe - -:::caution -A critical [issue](https://github.com/camunda-cloud/zeebe/issues/8611) which may lead to data loss was identified in 1.3.0 and 1.3.1. This issue is related to the new assignee and candidate group feature introduced in 1.3.0, and only affects users which make use of it. However, when updating, it's still recommended that you skip versions 1.3.0 and 1.3.1 and update directly from 1.2.9 to 1.3.2. - -Please refer to the [release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.2) for more. -::: - -### Operate - -:::caution -A critical issue was found on Operate data importer which may lead to incidents not being imported to Operate. This issue is affecting versions `1.3.0`, `1.3.1`, `1.3.2` and `1.3.3`. -We strongly recommend to skip affected versions and make sure you are running version `1.3.4` if upgrading from `1.2.x`. - -Please refer to the [release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.4) for more. -::: - -With Operate 1.3, a new feature was introduced to propagate incidents from called instances to calling instances. -This feature is only available for instances started after the version 1.3 update. Older instances will still be shown as active -even though incidents in called instances may exist. - -### Tasklist - -Because of internal changes in user data processing update to Tasklist 1.3 will erase all information about task assignments. -End users would need to claim their tasks again. - diff --git a/versioned_docs/version-1.3/guides/update-guide/introduction.md b/versioned_docs/version-1.3/guides/update-guide/introduction.md deleted file mode 100644 index 9f0e26a6983..00000000000 --- a/versioned_docs/version-1.3/guides/update-guide/introduction.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: introduction -title: Introduction ---- - -These documents guide you through the process of updating your Camunda Cloud -application or server installation from one Camunda Cloud version to the other. - -There is a dedicated update guide for each version: - -### [1.2 to 1.3](../120-to-130) - -Update from 1.2.x to 1.3.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.0) -[Release blog](https://camunda.com/blog/2022/01/camunda-cloud-1-3-0-released/) - -### [1.1 to 1.2](../110-to-120) - -Update from 1.1.x to 1.2.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.0) -[Release blog](https://camunda.com/blog/2021/10/camunda-cloud-1-2-0-released/) - -### [1.0 to 1.1](../100-to-110) - -Update from 1.0.x to 1.1.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.0) -[Release blog](https://camunda.com/blog/2021/07/camunda-cloud-110-released/) - -### [0.26 to 1.0](../026-to-100) - -Update from 0.26.x to 1.0.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.0.0) -[Release blog](https://camunda.com/blog/2021/05/camunda-cloud-10-released/) diff --git a/versioned_docs/version-1.3/guides/utilizing-forms.md b/versioned_docs/version-1.3/guides/utilizing-forms.md deleted file mode 100644 index 4fe30e0e494..00000000000 --- a/versioned_docs/version-1.3/guides/utilizing-forms.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: utilizing-forms -title: Building forms with Modeler -description: "Let's learn about Camunda Forms, how to use them, how to model them with a diagram, and deploying." ---- - -:::note -The initial release of Camunda Forms includes a debut minimal feature set, which will be expanded with upcoming versions. - -If using with Camunda Platform, note that the Camunda Forms feature was added with the 4.7.0 release of the Desktop Modeler. Therefore, they can be used within BPMN diagrams running on Camunda Platform version 7.15.0 or later. -::: - -## Overview - -The Camunda Forms feature allows you to easily design and configure forms. Once configured, they can be connected to a user task or start event to implement a task form in your application. - -While you can incorporate Camunda Forms solely within Camunda Cloud, you can also utilize Camunda Forms in Camunda Platform. After deploying a diagram with an embedded form, Tasklist imports this form schema and uses it to render the form on every task assigned to it. - -## Quickstart - -### Create new form - -To start building a form, log in to your [Camunda Cloud](./getting-started/create-camunda-cloud-account.md) account or open [Desktop Modeler](/components/modeler/about.md) and take the following steps: - -1. Click on the **Modeler** tab at the top of the page or alternatively open the **File** menu in Desktop Modeler. -2. Open any project from your Web Modeler home view. -3. Click the blue **New** button and choose **Form**. - -### Build your form - -Now you can start to build your Camunda form. Right after creating your form, you can name it by replacing the **New Form** text with the name of your choice. In this example, we'll build a form to help with a task in obtaining an email message. - -![form email example](./img/form-email-example.png) - -Add your desired elements from the palette on the left side by dragging and dropping them onto the canvas. - -![form palette](./img/form-palette.png) - -Within Forms, we have the option to add text fields, numerical values, checkboxes, radio elements, selection menus, text components, and buttons. - -:::note -Within Camunda Platform, you can also utilize [embedded forms](https://docs.camunda.org/manual/latest/reference/forms/embedded-forms/). -::: - -In the properties panel on the right side of the page, view and edit attributes that apply to the selected form element. For example, apply a minimum or maximum length to a text field, or require a minimum or maximum value within a number element. In this case, we have labeled the field, described the field, and required an input for our email message. - -![email properties](./img/form-properties-email.png) - -Refer to the [Camunda Forms reference material](../components/modeler/forms/camunda-forms-reference.md) to explore all form elements and configuration options in detail. - -### Save your form - -To save your form in Camunda Cloud, you don't have to do anything. Web Modeler will autosave every change you make. - -To save your form in Camunda Platform, click **File > Save File As...** in the top-level menu. Select a location on your file system to store the form as `.form` file. You can load that file again by clicking **File > Open File...**. - -### Connect your form to a BPMN diagram - -Next, let's implement a task form into a diagram. In tandem, we can connect your form to a user task or start event. - -:::note -For Camunda Platform, refer to the [User Task Forms guide](https://docs.camunda.org/manual/latest/user-guide/task-forms/#camunda-forms) to learn how to implement a task form in your application. -::: - -Click on the **Modeler** tab at the top of the page and open any project from your Web Modeler home view. - -Take the following steps: - -1. Select the diagram where you'd like to apply your form. -2. Select the user task requiring the help of a form. -3. On the right side of the selected user task, select the blue overlay with three white horizontal lines to open the navigation menu. -4. Navigate to the form you want to connect and click the blue **Import** button. -5. When a user task has a connected form, the blue overlay will always stay visible on the right side of the task. - -:::note Submit button missing when you use Camunda Forms? -When using Camunda Forms, any submit button present in the form schema is hidden so we can control when a user can complete a task. -::: - -Within Camunda Platform, you can click on the bottom left corner that says **JSON** to switch to the JSON view. Use caution when naming the fields of your form. Fields have their values pre-filled from variables with the same name. - -Copy the JSON schema, and go back to the BPMN diagram you modeled earlier. Select the **user task** and click on the **Forms** tab. After switching tabs, you should see the field where you can paste the form JSON schema. Paste the schema and save the file. - -With Camunda Platform, deploy your diagram to Zeebe and create an instance using the following command: - -```sh -zbctl deploy /path/to/my/diagram.bpmn -zbctl create instance diagram-id -``` - -Then, open Tasklist to claim the task, fill in the form, and complete the task. - -## Additional resources - -- [Desktop and Web Modeler](/components/modeler/about.md) -- [Model your first process](./getting-started/model-your-first-process.md) -- [User task reference](/components/modeler/bpmn/user-tasks/user-tasks.md) \ No newline at end of file diff --git a/versioned_docs/version-1.3/reference/announcements.md b/versioned_docs/version-1.3/reference/announcements.md deleted file mode 100644 index 3064966d909..00000000000 --- a/versioned_docs/version-1.3/reference/announcements.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: announcements -title: "Announcements" -description: "Important announcements including deprecation & removal notices" ---- - -The support for YAML processes was removed as of release 1.0. The `resourceType` in Deployment record and Process grpc request are deprecated, they will always contain `BPMN` as value. - -### YAML workflows descriptions - -YAML workflows are an alternative way to specify simple workflows using a proprietary YAML description. This feature is deprecated and no longer advertised in the documentation. YAML workflows gained little traction with users and we do not intend to support them in the future. - -We recommend all users of YAML workflows to migrate to BPMN workflows as soon as possible. The feature will eventually be removed completely, though the date when this will occur has yet to be defined. - -## Deprecated in 0.23.0-alpha2 - -- TOML configuration - deprecated and removed in 0.23.0-alpha2 -- Legacy environment variables - deprecated in 0.23.0-alpha2, removed in 0.25.0 - -New configuration: - -```yaml -exporters: - elasticsearch: - className: io.camunda.zeebe.exporter.ElasticsearchExporter - debughttp: - className: io.camunda.zeebe.broker.exporter.debug.DebugHttpExporter -``` - -In terms of specifying values, there were two minor changes: - -- Memory sizes are now specified like this: `512MB` (old way: `512M`) -- Durations (e.g. timeouts) can now also be given in ISO-8601 Durations format. However, you can still use the established method and specify a timeout of `30s` - -## Removed in 1.0 - -The support for YAML processes was removed as of release 1.0. The `resourceType` in Deployment record and Process grpc request are deprecated; they will always contain `BPMN` as value. - -## Deprecated in 1.3 - -The `zeebe-test` module was deprecated in 1.3.0. We are currently planning to remove `zeebe-test` for the 1.4.0 release. \ No newline at end of file diff --git a/versioned_docs/version-1.3/reference/dependencies.md b/versioned_docs/version-1.3/reference/dependencies.md deleted file mode 100644 index 2ee101dfb7d..00000000000 --- a/versioned_docs/version-1.3/reference/dependencies.md +++ /dev/null @@ -1,1312 +0,0 @@ ---- -id: dependencies -title: "Dependencies" -description: "Dependencies and Third Party Libraries for all the components of Camunda Cloud" -keywords: ["dependencies", "third party", "third party libraries"] ---- - -A complete list of all dependencies and third-party libraries for all the components of Camunda Cloud (including Self-Managed). - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -### All Zeebe Dependencies - -* kryo (Version: 4.0.2, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* minlog (Version: 1.3.1, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* reflectasm (Version: 1.11.3, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* jackson-annotations (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-core (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-databind (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-cbor (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-smile (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-yaml (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-datatype-jdk8 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-datatype-jsr310 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-module-parameter-names (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* annotations (Version: 4.1.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* proto-google-common-protos (Version: 2.0.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jsr305 (Version: 3.0.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* gson (Version: 2.8.6, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* error_prone_annotations (Version: 2.4.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* failureaccess (Version: 1.0.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* guava (Version: 30.0-jre, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* listenablefuture (Version: 9999.0-empty-to-avoid-conflict-with-guava, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* j2objc-annotations (Version: 1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* protobuf-java (Version: 3.14.0, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* concurrency-limits-core (Version: 0.3.6, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* config (Version: 1.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-codec (Version: 1.15, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-logging (Version: 1.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-api (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-context (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-core (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-netty (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-protobuf-lite (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-protobuf (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-stub (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* micrometer-core (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* micrometer-registry-prometheus (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-buffer (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-http2 (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-http (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-socks (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-common (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-handler-proxy (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-handler (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-resolver (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-tcnative-boringssl-static (Version: 2.0.35.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-transport-native-epoll (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-transport-native-unix-common (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-transport (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* perfmark-api (Version: 0.19.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient_common (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient_hotspot (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* atomix-cluster (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* atomix-storage (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* atomix-utils (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* atomix (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jakarta.annotation-api (Version: 1.3.5, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -* java-grpc-prometheus (Version: 0.3.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* agrona (Version: 1.8.0, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-lang3 (Version: 3.11, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-math3 (Version: 3.6.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpasyncclient (Version: 4.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpclient (Version: 4.5.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpcore-nio (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpcore (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-api (Version: 2.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-core (Version: 2.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-slf4j-impl (Version: 2.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* tomcat-embed-core (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* tomcat-embed-websocket (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* camunda-xml-model (Version: 7.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* feel-engine (Version: 1.12.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* checker-qual (Version: 3.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -* animal-sniffer-annotations (Version: 1.19, License: [MIT](https://opensource.org/licenses/MIT)) -* elasticsearch-rest-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-core (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-x-content (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jakarta.el (Version: 3.0.3, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -* LatencyUtils (Version: 2.0.3, License: [Public Domain, per Creative Commons CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -* jackson-dataformat-msgpack (Version: 0.8.21, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* msgpack-core (Version: 0.8.21, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* objenesis (Version: 3.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* asm (Version: 9.0, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* rocksdbjni (Version: 6.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* scala-parser-combinators_2.13 (Version: 1.1.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* scala-library (Version: 2.13.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jul-to-slf4j (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -* slf4j-api (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -* spring-boot-actuator-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-json (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-logging (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-tomcat (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-web (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-aop (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-beans (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-context (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-core (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-expression (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-jcl (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-web (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-webmvc (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* snakeyaml (Version: 1.27, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* sbe-tool (Version: 1.20.2, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) - - - - - -### Operate Dependencies (Front end) - -* @babel/code-frame (Version: 7.12.13, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/generator (Version: 7.13.9, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/helper-annotate-as-pure (Version: 7.12.13, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/helper-function-name (Version: 7.12.13, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/helper-get-function-arity (Version: 7.12.13, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/helper-module-imports (Version: 7.13.12, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/helper-split-export-declaration (Version: 7.12.13, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/helper-validator-identifier (Version: 7.12.11, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/highlight (Version: 7.13.10, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/parser (Version: 7.13.12, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/runtime (Version: 7.13.17, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/template (Version: 7.12.13, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/traverse (Version: 7.13.0, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/types (Version: 7.13.12, License: [MIT](https://opensource.org/licenses/MIT)) -* @emotion/is-prop-valid (Version: 0.8.8, License: [MIT](https://opensource.org/licenses/MIT)) -* @emotion/memoize (Version: 0.7.4, License: [MIT](https://opensource.org/licenses/MIT)) -* @emotion/stylis (Version: 0.8.5, License: [MIT](https://opensource.org/licenses/MIT)) -* @emotion/unitless (Version: 0.7.5, License: [MIT](https://opensource.org/licenses/MIT)) -* ansi-styles (Version: 3.2.1, License: [MIT](https://opensource.org/licenses/MIT)) -* babel-plugin-styled-components (Version: 1.12.0, License: [MIT](https://opensource.org/licenses/MIT)) -* babel-plugin-syntax-jsx (Version: 6.18.0, License: [MIT](https://opensource.org/licenses/MIT)) -* camelize (Version: 1.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* chalk (Version: 2.4.2, License: [MIT](https://opensource.org/licenses/MIT)) -* color-convert (Version: 1.9.3, License: [MIT](https://opensource.org/licenses/MIT)) -* color-name (Version: 1.1.3, License: [MIT](https://opensource.org/licenses/MIT)) -* css-color-keywords (Version: 1.0.0, License: [ISC](https://opensource.org/licenses/ISC)) -* css-to-react-native (Version: 3.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* csstype (Version: 3.0.8, License: [MIT](https://opensource.org/licenses/MIT)) -* date-fns (Version: 2.19.0, License: [MIT](https://opensource.org/licenses/MIT)) -* debug (Version: 4.3.1, License: [MIT](https://opensource.org/licenses/MIT)) -* dom-helpers (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* escape-string-regexp (Version: 1.0.5, License: [MIT](https://opensource.org/licenses/MIT)) -* fast-deep-equal (Version: 2.0.1, License: [MIT](https://opensource.org/licenses/MIT)) -* final-form (Version: 4.20.2, License: [MIT](https://opensource.org/licenses/MIT)) -* globals (Version: 11.12.0, License: [MIT](https://opensource.org/licenses/MIT)) -* has-flag (Version: 3.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* history (Version: 4.10.1, License: [MIT](https://opensource.org/licenses/MIT)) -* hoist-non-react-statics (Version: 3.3.2, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* isarray (Version: 0.0.1, License: [MIT](https://opensource.org/licenses/MIT)) -* js-tokens (Version: 4.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* jsesc (Version: 2.5.2, License: [MIT](https://opensource.org/licenses/MIT)) -* lodash (Version: 4.17.21, License: [MIT](https://opensource.org/licenses/MIT)) -* loose-envify (Version: 1.4.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mini-create-react-context (Version: 0.4.1, License: [MIT](https://opensource.org/licenses/MIT)) -* mobx-react-lite (Version: 3.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mobx-react (Version: 7.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mobx (Version: 6.1.8, License: [MIT](https://opensource.org/licenses/MIT)) -* ms (Version: 2.1.2, License: [MIT](https://opensource.org/licenses/MIT)) -* object-assign (Version: 4.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -* path-to-regexp (Version: 1.8.0, License: [MIT](https://opensource.org/licenses/MIT)) -* polished (Version: 4.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -* postcss-value-parser (Version: 4.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* prop-types (Version: 15.7.2, License: [MIT](https://opensource.org/licenses/MIT)) -* react-contenteditable (Version: 3.3.5, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* react-final-form (Version: 6.5.3, License: [MIT](https://opensource.org/licenses/MIT)) -* react-is (Version: 16.13.1, License: [MIT](https://opensource.org/licenses/MIT)) -* react-router-dom (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* react-router (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* react-textarea-autosize (Version: 8.3.2, License: [MIT](https://opensource.org/licenses/MIT)) -* react-transition-group (Version: 4.4.1, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* react (Version: 17.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -* regenerator-runtime (Version: 0.13.7, License: [MIT](https://opensource.org/licenses/MIT)) -* resolve-pathname (Version: 3.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* shallowequal (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* source-map (Version: 0.5.7, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* styled-components (Version: 5.2.1, License: [MIT](https://opensource.org/licenses/MIT)) -* supports-color (Version: 5.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -* tiny-invariant (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* tiny-warning (Version: 1.0.3, License: [MIT](https://opensource.org/licenses/MIT)) -* to-fast-properties (Version: 2.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* ts-essentials (Version: 2.0.12, License: [MIT](https://opensource.org/licenses/MIT)) -* use-composed-ref (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* use-isomorphic-layout-effect (Version: 1.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -* use-latest (Version: 1.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* value-equal (Version: 1.0.1, License: [MIT](https://opensource.org/licenses/MIT)) - - -### Operate Dependencies (Back end) - -* auth0 (Version: 1.28.0, License: [MIT](https://opensource.org/licenses/MIT)) -* java-jwt (Version: 3.13.0, License: [MIT](https://opensource.org/licenses/MIT)) -* jwks-rsa (Version: 0.15.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mvc-auth-commons (Version: 1.6.0, License: [MIT](https://opensource.org/licenses/MIT)) -* hppc (Version: 0.7.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-annotations (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-core (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-databind (Version: 2.11.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-cbor (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-smile (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-yaml (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-datatype-jdk8 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-datatype-jsr310 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-module-parameter-names (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* classmate (Version: 1.5.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* compiler (Version: 0.9.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* annotations (Version: 4.1.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* proto-google-common-protos (Version: 1.17.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jsr305 (Version: 3.0.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* gson (Version: 2.8.6, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* error_prone_annotations (Version: 2.3.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* failureaccess (Version: 1.0.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* guava-annotations (Version: r03, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* guava (Version: 30.0-jre, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* listenablefuture (Version: 9999.0-empty-to-avoid-conflict-with-guava, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* j2objc-annotations (Version: 1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* protobuf-java (Version: 3.14.0, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* logging-interceptor (Version: 3.14.9, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* okhttp (Version: 3.14.9, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* okio (Version: 1.17.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* t-digest (Version: 3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-codec (Version: 1.15, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-logging (Version: 1.1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* classgraph (Version: 4.8.83, License: [MIT](https://opensource.org/licenses/MIT)) -* grpc-api (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-context (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-core (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-netty (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-protobuf-lite (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-protobuf (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-stub (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* micrometer-core (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* micrometer-registry-prometheus (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-buffer (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-http2 (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-http (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-socks (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-common (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-handler-proxy (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-handler (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-resolver (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-tcnative-boringssl-static (Version: 2.0.35.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-transport (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* perfmark-api (Version: 0.19.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient_common (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-bean-validators (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-boot-starter (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-core (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-data-rest (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-oas (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-schema (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-spi (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-spring-web (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-spring-webflux (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-spring-webmvc (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-swagger-common (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-swagger-ui (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* springfox-swagger2 (Version: 3.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* swagger-annotations (Version: 2.1.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* swagger-models (Version: 2.1.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* swagger-annotations (Version: 1.5.20, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* swagger-models (Version: 1.5.20, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-bpmn-model (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-client-java (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-gateway-protocol-impl (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-gateway-protocol (Version: 0.26.0, License: [Zeebe Community License v1.0](https://camunda.com/legal/terms/cloud-terms-and-conditions/zeebe-community-license-v1-0/)) -* zeebe-util (Version: 0.26.0, License: [Zeebe Community License v1.0](https://camunda.com/legal/terms/cloud-terms-and-conditions/zeebe-community-license-v1-0/)) -* jakarta.annotation-api (Version: 1.3.5, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -* javax.servlet-api (Version: 4.0.1, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -* joda-time (Version: 2.10.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* byte-buddy (Version: 1.10.18, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jopt-simple (Version: 5.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -* commons-lang3 (Version: 3.11, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpasyncclient (Version: 4.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpclient (Version: 4.5.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpcore-nio (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpcore (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-api (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-core (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-jul (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-slf4j-impl (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-analyzers-common (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-backward-codecs (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-core (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-grouping (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-highlighter (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-join (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-memory (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-misc (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-queries (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-queryparser (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-sandbox (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-spatial-extras (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-spatial3d (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-spatial (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-suggest (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* tomcat-embed-core (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* tomcat-embed-websocket (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* camunda-xml-model (Version: 7.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* checker-qual (Version: 3.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -* animal-sniffer-annotations (Version: 1.18, License: [MIT](https://opensource.org/licenses/MIT)) -* elasticsearch-rest-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-rest-high-level-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* aggs-matrix-stats-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lang-mustache-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* parent-join-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* rank-eval-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-cli (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-core (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-secure-sm (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-x-content (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jna (Version: 5.5.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jakarta.el (Version: 3.0.3, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -* javax.json (Version: 1.1.4, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -* HdrHistogram (Version: 2.1.9, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -* LatencyUtils (Version: 2.0.3, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -* mapstruct (Version: 1.3.1.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jul-to-slf4j (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -* slf4j-api (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -* spring-boot-actuator-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-json (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-log4j2 (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-security (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-tomcat (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-web (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-ldap-core (Version: 2.3.3.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-plugin-core (Version: 2.0.0.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-plugin-metadata (Version: 2.0.0.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-config (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-core (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-ldap (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-web (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-aop (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-beans (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-context (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-core (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-expression (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-jcl (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-tx (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-web (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-webmvc (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* snakeyaml (Version: 1.27, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) - - - - - - -### Tasklist Dependencies (Front end) - -* @apollo/client (Version: 3.3.12, License: [MIT](https://opensource.org/licenses/MIT)) -* @babel/runtime (Version: 7.13.10, License: [MIT](https://opensource.org/licenses/MIT)) -* @camunda-cloud/common-ui-react (Version: 0.0.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* @camunda-cloud/common-ui (Version: 0.0.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* @graphql-typed-document-node/core (Version: 3.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* @types/ungap__global-this (Version: 0.3.1, License: [MIT](https://opensource.org/licenses/MIT)) -* @types/zen-observable (Version: 0.8.2, License: [MIT](https://opensource.org/licenses/MIT)) -* @ungap/global-this (Version: 0.4.4, License: [ISC](https://opensource.org/licenses/ISC)) -* @wry/context (Version: 0.5.4, License: [MIT](https://opensource.org/licenses/MIT)) -* @wry/equality (Version: 0.3.4, License: [MIT](https://opensource.org/licenses/MIT)) -* @wry/trie (Version: 0.2.2, License: [MIT](https://opensource.org/licenses/MIT)) -* date-fns (Version: 2.19.0, License: [MIT](https://opensource.org/licenses/MIT)) -* fast-json-stable-stringify (Version: 2.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* final-form-arrays (Version: 3.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -* final-form (Version: 4.20.2, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-tag (Version: 2.12.3, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql (Version: 15.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -* history (Version: 4.10.1, License: [MIT](https://opensource.org/licenses/MIT)) -* hoist-non-react-statics (Version: 3.3.2, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* isarray (Version: 0.0.1, License: [MIT](https://opensource.org/licenses/MIT)) -* js-tokens (Version: 4.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* loose-envify (Version: 1.4.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mini-create-react-context (Version: 0.4.1, License: [MIT](https://opensource.org/licenses/MIT)) -* mobx-react-lite (Version: 3.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mobx (Version: 6.1.8, License: [MIT](https://opensource.org/licenses/MIT)) -* object-assign (Version: 4.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -* optimism (Version: 0.14.1, License: [MIT](https://opensource.org/licenses/MIT)) -* path-to-regexp (Version: 1.8.0, License: [MIT](https://opensource.org/licenses/MIT)) -* polished (Version: 4.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -* prop-types (Version: 15.7.2, License: [MIT](https://opensource.org/licenses/MIT)) -* react-dom (Version: 17.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -* react-final-form-arrays (Version: 3.1.3, License: [MIT](https://opensource.org/licenses/MIT)) -* react-final-form (Version: 6.5.3, License: [MIT](https://opensource.org/licenses/MIT)) -* react-is (Version: 16.13.1, License: [MIT](https://opensource.org/licenses/MIT)) -* react-router-dom (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* react-router (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* react-textarea-autosize (Version: 8.3.2, License: [MIT](https://opensource.org/licenses/MIT)) -* react (Version: 17.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -* regenerator-runtime (Version: 0.13.7, License: [MIT](https://opensource.org/licenses/MIT)) -* resolve-pathname (Version: 3.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* scheduler (Version: 0.20.2, License: [MIT](https://opensource.org/licenses/MIT)) -* symbol-observable (Version: 2.0.3, License: [MIT](https://opensource.org/licenses/MIT)) -* tiny-invariant (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* tiny-warning (Version: 1.0.3, License: [MIT](https://opensource.org/licenses/MIT)) -* ts-essentials (Version: 2.0.12, License: [MIT](https://opensource.org/licenses/MIT)) -* ts-invariant (Version: 0.6.2, License: [MIT](https://opensource.org/licenses/MIT)) -* tslib (Version: 1.14.1, License: [0BSD](https://opensource.org/licenses/0BSD)) -* tslib (Version: 2.1.0, License: [0BSD](https://opensource.org/licenses/0BSD)) -* use-composed-ref (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -* use-isomorphic-layout-effect (Version: 1.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -* use-latest (Version: 1.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -* value-equal (Version: 1.0.1, License: [MIT](https://opensource.org/licenses/MIT)) -* zen-observable (Version: 0.8.15, License: [MIT](https://opensource.org/licenses/MIT)) - - -### Tasklist Dependencies (Back end) - -* auth0 (Version: 1.28.0, License: [MIT](https://opensource.org/licenses/MIT)) -* java-jwt (Version: 3.13.0, License: [MIT](https://opensource.org/licenses/MIT)) -* jwks-rsa (Version: 0.15.0, License: [MIT](https://opensource.org/licenses/MIT)) -* mvc-auth-commons (Version: 1.6.0, License: [MIT](https://opensource.org/licenses/MIT)) -* hppc (Version: 0.7.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-annotations (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-core (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-databind (Version: 2.11.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-cbor (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-smile (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-dataformat-yaml (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-datatype-jdk8 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-datatype-jsr310 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-module-kotlin (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jackson-module-parameter-names (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* classmate (Version: 1.5.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* compiler (Version: 0.9.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jcip-annotations (Version: 1.0-1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* annotations (Version: 4.1.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* proto-google-common-protos (Version: 1.17.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jsr305 (Version: 3.0.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* gson (Version: 2.8.6, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* error_prone_annotations (Version: 2.3.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* failureaccess (Version: 1.0.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* guava-annotations (Version: r03, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* guava (Version: 30.0-jre, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* listenablefuture (Version: 9999.0-empty-to-avoid-conflict-with-guava, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* j2objc-annotations (Version: 1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* protobuf-java (Version: 3.14.0, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* graphql-java-kickstart (Version: 10.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* graphql-java-servlet (Version: 10.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* graphql-java-tools (Version: 6.3.0, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-kickstart-spring-boot-autoconfigure-tools (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-kickstart-spring-boot-starter-tools (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-kickstart-spring-support (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-spring-boot-autoconfigure (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-spring-boot-starter (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* playground-spring-boot-autoconfigure (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* playground-spring-boot-starter (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -* graphql-java (Version: 15.0, License: [MIT](https://opensource.org/licenses/MIT)) -* java-dataloader (Version: 2.2.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* nimbus-jose-jwt (Version: 9.1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* logging-interceptor (Version: 3.14.9, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* okhttp (Version: 3.14.9, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* okio (Version: 1.17.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* t-digest (Version: 3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-codec (Version: 1.15, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* commons-logging (Version: 1.1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-api (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-context (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-core (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-netty (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-protobuf-lite (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-protobuf (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* grpc-stub (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* micrometer-core (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* micrometer-registry-prometheus (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-buffer (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-http2 (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-http (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec-socks (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-codec (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-common (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-handler-proxy (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-handler (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-resolver (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-tcnative-boringssl-static (Version: 2.0.35.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* netty-transport (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* perfmark-api (Version: 0.19.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* simpleclient_common (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-bpmn-model (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-client-java (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-gateway-protocol-impl (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-gateway-protocol (Version: 0.26.0, License: [Zeebe Community License v1.0](https://camunda.com/legal/terms/cloud-terms-and-conditions/zeebe-community-license-v1-0/)) -* zeebe-protocol (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* zeebe-util (Version: 0.26.0, License: [Zeebe Community License v1.0](https://camunda.com/legal/terms/cloud-terms-and-conditions/zeebe-community-license-v1-0/)) -* jakarta.annotation-api (Version: 1.3.5, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -* jakarta.validation-api (Version: 2.0.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* javax.servlet-api (Version: 4.0.1, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -* javax.websocket-api (Version: 1.1, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -* joda-time (Version: 2.10.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jopt-simple (Version: 5.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -* agrona (Version: 1.8.0, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* antlr4-runtime (Version: 4.7.2, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -* commons-lang3 (Version: 3.11, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpasyncclient (Version: 4.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpclient (Version: 4.5.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpcore-nio (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* httpcore (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-api (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-core (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-jul (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* log4j-slf4j-impl (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-analyzers-common (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-backward-codecs (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-core (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-grouping (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-highlighter (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-join (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-memory (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-misc (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-queries (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-queryparser (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-sandbox (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-spatial-extras (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-spatial3d (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-spatial (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lucene-suggest (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* tomcat-embed-core (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* tomcat-embed-websocket (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* attoparser (Version: 2.0.5.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* camunda-xml-model (Version: 7.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* checker-qual (Version: 3.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -* animal-sniffer-annotations (Version: 1.18, License: [MIT](https://opensource.org/licenses/MIT)) -* elasticsearch-rest-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-rest-high-level-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* aggs-matrix-stats-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* lang-mustache-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* parent-join-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* rank-eval-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-cli (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-core (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-secure-sm (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch-x-content (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* elasticsearch (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jna (Version: 5.5.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jakarta.el (Version: 3.0.3, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -* javax.json (Version: 1.1.4, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -* HdrHistogram (Version: 2.1.9, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -* hibernate-validator (Version: 6.1.6.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* jboss-logging (Version: 3.4.1.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* kotlin-reflect (Version: 1.4.21, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* kotlin-stdlib-common (Version: 1.4.21, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* kotlin-stdlib (Version: 1.4.21, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* kotlinx-coroutines-core (Version: 1.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* kotlinx-coroutines-jdk8 (Version: 1.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* kotlinx-coroutines-reactive (Version: 1.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* annotations (Version: 13.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* LatencyUtils (Version: 2.0.3, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -* reactive-streams (Version: 1.0.3, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -* jul-to-slf4j (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -* slf4j-api (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -* spring-boot-actuator-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-json (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-log4j2 (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-oauth2-resource-server (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-security (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-thymeleaf (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-tomcat (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-validation (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-web (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter-websocket (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot-starter (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-boot (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-config (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-core (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-oauth2-core (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-oauth2-jose (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-oauth2-resource-server (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-security-web (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-aop (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-beans (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-context (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-core (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-expression (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-jcl (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-messaging (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-web (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-webmvc (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* spring-websocket (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* thymeleaf-extras-java8time (Version: 3.0.4.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* thymeleaf-spring5 (Version: 3.0.11.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* thymeleaf (Version: 3.0.11.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* unbescape (Version: 1.1.6.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -* snakeyaml (Version: 1.27, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) - - - - - -:::note - -IAM is only available for Camunda Cloud Self-Managed at this time. - -::: - -### IAM Dependencies (Front end) - -This section covers third-party libraries used by the IAM frontend. -All of these libraries are required for core functionality. - -* [@babel/code-frame@7.12.13](https://github.com/babel/babel) (MIT) -* [@babel/generator@7.9.6](https://github.com/babel/babel/tree/master/packages/babel-generator) (MIT) -* [@babel/helper-annotate-as-pure@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-helper-annotate-as-pure) (MIT) -* [@babel/helper-function-name@7.9.5](https://github.com/babel/babel/tree/master/packages/babel-helper-function-name) (MIT) -* [@babel/helper-get-function-arity@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-helper-get-function-arity) (MIT) -* [@babel/helper-module-imports@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-helper-module-imports) (MIT) -* [@babel/helper-split-export-declaration@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-helper-split-export-declaration) (MIT) -* [@babel/helper-validator-identifier@7.12.11](https://github.com/babel/babel) (MIT) -* [@babel/helper-validator-identifier@7.9.5](https://github.com/babel/babel/tree/master/packages/babel-helper-validator-identifier) (MIT) -* [@babel/highlight@7.13.10](https://github.com/babel/babel) (MIT) -* [@babel/parser@7.9.6](https://github.com/babel/babel/tree/master/packages/babel-parser) (MIT) -* [@babel/runtime@7.12.1](https://github.com/babel/babel) (MIT) -* [@babel/runtime@7.14.0](https://github.com/babel/babel) (MIT) -* [@babel/runtime@7.14.5](https://babel.dev/team) (MIT) -* [@babel/runtime@7.14.6](https://babel.dev/team) (MIT) -* [@babel/runtime@7.9.6](https://github.com/babel/babel) (MIT) -* [@babel/template@7.8.6](https://github.com/babel/babel/tree/master/packages/babel-template) (MIT) -* [@babel/traverse@7.9.6](https://github.com/babel/babel/tree/master/packages/babel-traverse) (MIT) -* [@babel/types@7.9.6](https://github.com/babel/babel/tree/master/packages/babel-types) (MIT) -* [@camunda-cloud/common-ui-react@0.0.16](https://github.com/camunda-cloud/common-ui-react) (Apache-2.0) -* [@camunda-cloud/common-ui@0.0.16](https://github.com/camunda-cloud/common-ui) (Apache-2.0) -* [@emotion/is-prop-valid@0.8.8](https://github.com/emotion-js/emotion/tree/master/packages/is-prop-valid) (MIT) -* [@emotion/memoize@0.7.4](https://github.com/emotion-js/emotion/tree/master/packages/memoize) (MIT) -* [@emotion/stylis@0.8.5](https://github.com/emotion-js/emotion/tree/master/packages/stylis) (MIT) -* [@emotion/unitless@0.7.5](https://github.com/emotion-js/emotion/tree/master/packages/unitless) (MIT) -* [@ibm/plex@5.1.3](https://github.com/ibm/plex) (OFL-1.1) -* [@reduxjs/toolkit@1.6.0](https://github.com/reduxjs/redux-toolkit) (MIT) -* [@types/cookie@0.3.3](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/hoist-non-react-statics@3.3.1](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/prop-types@15.7.3](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/react-redux@7.1.16](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/react@16.14.8](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/react@16.9.35](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/scheduler@0.16.1](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [ansi-styles@3.2.1](https://github.com/chalk/ansi-styles) (MIT) -* [axios@0.21.1](https://github.com/axios/axios) (MIT) -* [babel-plugin-styled-components@1.12.0](https://github.com/styled-components/babel-plugin-styled-components) (MIT) -* [babel-plugin-syntax-jsx@6.18.0](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-jsx) (MIT) -* [camelize@1.0.0](http://substack.net) (MIT) -* [chalk@2.4.2](https://github.com/chalk/chalk) (MIT) -* [color-convert@1.9.3](https://github.com/Qix-/color-convert) (MIT) -* [color-name@1.1.3](https://github.com/dfcreative/color-name) (MIT) -* [cookie@0.4.1](https://github.com/jshttp/cookie) (MIT) -* [css-color-keywords@1.0.0](https://github.com/sonicdoe/css-color-keywords) (ISC) -* [css-to-react-native@3.0.0](https://github.com/styled-components/css-to-react-native) (MIT) -* [csstype@2.6.10](https://github.com/frenic/csstype) (MIT) -* [csstype@3.0.2](https://github.com/frenic/csstype) (MIT) -* [debug@4.1.1](https://github.com/visionmedia/debug) (MIT) -* [decode-uri-component@0.2.0](https://github.com/SamVerschueren/decode-uri-component) (MIT) -* [detect-node-es@1.0.0](https://github.com/thekashey/detect-node) (ISC) -* [escape-string-regexp@1.0.5](https://github.com/sindresorhus/escape-string-regexp) (MIT) -* [filter-obj@1.1.0](https://github.com/sindresorhus/filter-obj) (MIT) -* [focus-lock@0.9.1](https://github.com/theKashey/focus-lock) (MIT) -* [follow-redirects@1.13.0](https://ruben.verborgh.org/) (MIT) -* [globals@11.12.0](https://github.com/sindresorhus/globals) (MIT) -* [has-flag@3.0.0](https://github.com/sindresorhus/has-flag) (MIT) -* [history@4.10.1](https://github.com/ReactTraining/history) (MIT) -* [hoist-non-react-statics@3.3.2](https://github.com/mridgway/hoist-non-react-statics) (BSD-3-Clause) -* [html-parse-stringify@3.0.1](https://github.com/henrikjoreteg/html-parse-stringify) (MIT) -* [i18next-browser-languagedetector@6.1.2](https://github.com/jamuhl) (MIT) -* [i18next-xhr-backend@3.2.2](https://github.com/jamuhl) (MIT) -* [i18next@20.3.2](https://github.com/jamuhl) (MIT) -* [immer@9.0.2](https://github.com/immerjs/immer) (MIT) -* [isarray@0.0.1](http://juliangruber.com) (MIT) -* [js-tokens@4.0.0](https://github.com/lydell/js-tokens) (MIT) -* [jsesc@2.5.2](https://mathiasbynens.be/) (MIT) -* [lodash@4.17.21](https://github.com/lodash/lodash) (MIT) -* [loose-envify@1.4.0](https://github.com/zertosh/loose-envify) (MIT) -* [mini-create-react-context@0.4.0](https://github.com/StringEpsilon/mini-create-react-context) (MIT) -* [ms@2.1.2](https://github.com/zeit/ms) (MIT) -* [object-assign@4.1.1](https://github.com/sindresorhus/object-assign) (MIT) -* [path-to-regexp@1.8.0](https://github.com/pillarjs/path-to-regexp) (MIT) -* [polished@4.1.3](https://polished.js.org) (MIT) -* [postcss-value-parser@4.1.0](https://github.com/TrySound/postcss-value-parser) (MIT) -* [prop-types@15.7.2](https://github.com/facebook/prop-types) (MIT) -* [query-string@7.0.1](https://sindresorhus.com) (MIT) -* [react-clientside-effect@1.2.2](http://github.com/gaearon) (MIT) -* [react-dom@16.14.0](https://github.com/facebook/react) (MIT) -* [react-focus-lock@2.5.1](https://github.com/theKashey/react-focus-lock) (MIT) -* [react-i18next@11.11.0](https://github.com/jamuhl) (MIT) -* [react-is@16.13.1](https://github.com/facebook/react) (MIT) -* [react-redux@7.2.4](https://github.com/gaearon) (MIT) -* [react-router-dom@5.2.0](https://github.com/ReactTraining/react-router) (MIT) -* [react-router@5.2.0](https://github.com/ReactTraining/react-router) (MIT) -* [react@16.14.0](https://github.com/facebook/react) (MIT) -* [redux-thunk@2.3.0](https://github.com/reduxjs/redux-thunk) (MIT) -* [redux@4.0.5](https://github.com/reduxjs/redux) (MIT) -* [redux@4.1.0](https://github.com/reduxjs/redux) (MIT) -* [regenerator-runtime@0.13.5](https://github.com/facebook/regenerator/tree/master/packages/regenerator-runtime) (MIT) -* [reselect@4.0.0](https://github.com/reduxjs/reselect) (MIT) -* [resolve-pathname@3.0.0](https://github.com/mjackson/resolve-pathname) (MIT) -* [scheduler@0.19.1](https://github.com/facebook/react) (MIT) -* [shallowequal@1.1.0](https://github.com/dashed/shallowequal) (MIT) -* [source-map@0.5.7](https://github.com/mozilla/source-map) (BSD-3-Clause) -* [split-on-first@1.1.0](https://github.com/sindresorhus/split-on-first) (MIT) -* [strict-uri-encode@2.0.0](https://github.com/kevva/strict-uri-encode) (MIT) -* [styled-components@5.3.0](https://github.com/styled-components/styled-components) (MIT) -* [supports-color@5.5.0](https://github.com/chalk/supports-color) (MIT) -* [symbol-observable@1.2.0](https://github.com/blesh/symbol-observable) (MIT) -* [tiny-invariant@1.1.0](https://github.com/alexreardon/tiny-invariant) (MIT) -* [tiny-warning@1.0.3](https://github.com/alexreardon/tiny-warning) (MIT) -* [to-fast-properties@2.0.0](https://github.com/sindresorhus/to-fast-properties) (MIT) -* [tslib@1.13.0](https://github.com/Microsoft/tslib) (0BSD) -* [tslib@2.2.0](https://github.com/Microsoft/tslib) (0BSD) -* [universal-cookie@4.0.4](https://github.com/reactivestack/cookies) (MIT) -* [use-callback-ref@1.2.4](https://github.com/theKashey/use-callback-ref) (MIT) -* [use-sidecar@1.0.3](https://www.npmjs.com/package/use-sidecar) (MIT) -* [value-equal@1.0.1](https://github.com/mjackson/value-equal) (MIT) -* [void-elements@3.1.0](https://github.com/pugjs/void-elements) (MIT) - -### IAM Dependencies (Back end) - -This section covers third-party libraries used by the IAM backend. -All of these libraries are required for core functionality. - -- [antlr:antlr:2.7.7](http://www.antlr.org/) - [BSD License](http://www.antlr.org/license.html) -- [ch.qos.logback:logback-classic:1.2.3](http://www.qos.ch) - [Eclipse Public License - v 1.0](http://www.eclipse.org/legal/epl-v10.html) -- [ch.qos.logback:logback-core:1.2.3](http://www.qos.ch) - [Eclipse Public License - v 1.0](http://www.eclipse.org/legal/epl-v10.html) -- ch.qos.logback.contrib:logback-jackson:0.1.5 - [Eclipse Public License - v 1.0](http://www.eclipse.org/legal/epl-v10.html) -- ch.qos.logback.contrib:logback-json-classic:0.1.5 - [Eclipse Public License - v 1.0](http://www.eclipse.org/legal/epl-v10.html) -- ch.qos.logback.contrib:logback-json-core:0.1.5 - [Eclipse Public License - v 1.0](http://www.eclipse.org/legal/epl-v10.html) -- [com.fasterxml:classmate:1.5.1](https://github.com/FasterXML/java-classmate) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.core:jackson-annotations:2.11.4](http://github.com/FasterXML/jackson) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.core:jackson-core:2.11.4](https://github.com/FasterXML/jackson-core) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.core:jackson-databind:2.11.4](http://github.com/FasterXML/jackson) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.11.4](https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.11.4](https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.module:jackson-module-kotlin:2.12.2](https://github.com/FasterXML/jackson-module-kotlin) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.fasterxml.jackson.module:jackson-module-parameter-names:2.11.4](https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.github.stephenc.jcip:jcip-annotations:1.0-1](http://stephenc.github.com/jcip-annotations) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.android:annotations:4.1.1.4](http://source.android.com/) - [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0) -- [com.google.api:api-common:1.10.1](https://github.com/googleapis/api-common-java) - [BSD](https://github.com/googleapis/api-common-java/blob/master/LICENSE) -- [com.google.api:gax:1.60.0](https://github.com/googleapis/gax-java) - [BSD](https://github.com/googleapis/gax-java/blob/master/LICENSE) -- [com.google.api:gax-grpc:1.60.0](https://github.com/googleapis/gax-java) - [BSD](https://github.com/googleapis/gax-java/blob/master/LICENSE) -- com.google.api.grpc:proto-google-cloud-logging-v2:0.85.0 - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.api.grpc:proto-google-common-protos:2.0.0](https://github.com/googleapis/java-iam/proto-google-common-protos) - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.api.grpc:proto-google-iam-v1:1.0.1](https://github.com/googleapis/java-iam/proto-google-iam-v1) - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- com.google.auth:google-auth-library-credentials:0.22.0 - [BSD New license](http://opensource.org/licenses/BSD-3-Clause) -- com.google.auth:google-auth-library-oauth2-http:0.22.0 - [BSD New license](http://opensource.org/licenses/BSD-3-Clause) -- [com.google.auto.value:auto-value-annotations:1.7.4](https://github.com/google/auto/tree/master/value) - [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.cloud:google-cloud-core:1.93.9](https://github.com/googleapis/java-core) - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.cloud:google-cloud-core-grpc:1.93.9](https://github.com/googleapis/java-core) - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.cloud:google-cloud-logging:1.102.0](https://github.com/googleapis/java-logging) - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.cloud:google-cloud-logging-logback:0.118.3-alpha](https://github.com/googleapis/java-logging-logback) - [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.code.findbugs:jsr305:3.0.2](http://findbugs.sourceforge.net/) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- com.google.code.gson:gson:2.8.6 - [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- com.google.errorprone:error_prone_annotations:2.4.0 - [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.guava:failureaccess:1.0.1](https://github.com/google/guava/) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.guava:guava:30.0-android](https://github.com/google/guava/) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.http-client:google-http-client:1.37.0](http://www.google.com/) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- com.google.http-client:google-http-client-jackson2:1.37.0 - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.j2objc:j2objc-annotations:1.3](https://github.com/google/j2objc/) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.google.protobuf:protobuf-java:3.13.0](https://developers.google.com/protocol-buffers/) - [3-Clause BSD License](https://opensource.org/licenses/BSD-3-Clause) -- [com.google.protobuf:protobuf-java-util:3.13.0](https://developers.google.com/protocol-buffers/) - [3-Clause BSD License](https://opensource.org/licenses/BSD-3-Clause) -- [com.nimbusds:nimbus-jose-jwt:9.7](https://connect2id.com) - [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [com.sun.activation:jakarta.activation:1.2.2](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [com.sun.istack:istack-commons-runtime:3.0.11](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [com.sun.mail:jakarta.mail:1.6.6](http://www.oracle.com) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [com.unboundid:unboundid-ldapsdk:4.0.14](https://github.com/pingidentity/ldapsdk) - [GNU Lesser General Public License version 2.1 (LGPLv2.1)](http://www.gnu.org/licenses/lgpl-2.1.html) -- [com.zaxxer:HikariCP:3.4.5](https://github.com/brettwooldridge) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [commons-codec:commons-codec:1.15](https://commons.apache.org/proper/commons-codec/) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [commons-logging:commons-logging:1.2](http://commons.apache.org/proper/commons-logging/) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.github.classgraph:classgraph:4.8.83](https://github.com/classgraph/classgraph) - [The MIT License (MIT)](http://opensource.org/licenses/MIT) -- [io.grpc:grpc-alts:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-api:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-auth:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-context:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-core:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-grpclb:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-netty-shaded:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-protobuf:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-protobuf-lite:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.grpc:grpc-stub:1.32.2](https://github.com/grpc/grpc-java) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- [io.micrometer:micrometer-core:1.6.5](https://github.com/micrometer-metrics/micrometer) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.micrometer:micrometer-registry-prometheus:1.6.4](https://github.com/micrometer-metrics/micrometer) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.opencensus:opencensus-api:0.24.0](https://github.com/census-instrumentation/opencensus-java) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.opencensus:opencensus-contrib-http-util:0.24.0](https://github.com/census-instrumentation/opencensus-java) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.perfmark:perfmark-api:0.19.0](https://github.com/perfmark/perfmark) - [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- io.prometheus:simpleclient:0.9.0 - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- io.prometheus:simpleclient_common:0.9.0 - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-bean-validators:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-boot-starter:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-core:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-data-rest:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-oas:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-schema:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-spi:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-spring-web:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-spring-webflux:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-spring-webmvc:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-swagger-common:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-swagger-ui:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [io.springfox:springfox-swagger2:3.0.0](https://github.com/springfox/springfox) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- io.swagger:swagger-annotations:1.5.20 - [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html) -- io.swagger:swagger-models:1.5.20 - [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html) -- io.swagger.core.v3:swagger-annotations:2.1.2 - [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html) -- io.swagger.core.v3:swagger-models:2.1.2 - [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html) -- [jakarta.annotation:jakarta.annotation-api:1.3.5](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [jakarta.persistence:jakarta.persistence-api:2.2.3](https://www.eclipse.org) - [Eclipse Public License v. 2.0](http://www.eclipse.org/legal/epl-2.0) -- [jakarta.transaction:jakarta.transaction-api:1.3.3](https://github.com/eclipse-ee4j) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [jakarta.validation:jakarta.validation-api:2.0.2](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [jakarta.xml.bind:jakarta.xml.bind-api:2.3.3](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [javax.annotation:javax.annotation-api:1.3.2](https://javaee.github.io/glassfish) - [CDDL + GPLv2 with classpath exception](https://github.com/javaee/javax.annotation/blob/master/LICENSE) -- net.bytebuddy:byte-buddy:1.10.22 - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.commons:commons-lang3:3.11](https://commons.apache.org/proper/commons-lang/) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.httpcomponents:httpclient:4.5.13](http://hc.apache.org/httpcomponents-client) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.httpcomponents:httpcore:4.4.14](http://hc.apache.org/httpcomponents-core-ga) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.logging.log4j:log4j-api:2.13.3](https://www.apache.org/) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.logging.log4j:log4j-to-slf4j:2.13.3](https://www.apache.org/) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.tomcat.embed:tomcat-embed-core:9.0.44](https://tomcat.apache.org/) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.apache.tomcat.embed:tomcat-embed-websocket:9.0.44](https://tomcat.apache.org/) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.aspectj:aspectjweaver:1.9.6](https://www.eclipse.org/aspectj/) - [Eclipse Public License - v 1.0](http://www.eclipse.org/legal/epl-v10.html) -- [org.checkerframework:checker-compat-qual:2.5.5](https://checkerframework.org) - [The MIT License](http://opensource.org/licenses/MIT) -- [org.checkerframework:checker-qual:3.5.0](https://checkerframework.org) - [The MIT License](http://opensource.org/licenses/MIT) -- org.codehaus.mojo:animal-sniffer-annotations:1.19 - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.conscrypt:conscrypt-openjdk-uber:2.2.1](https://conscrypt.org/) - [Apache 2](https://www.apache.org/licenses/LICENSE-2.0) -- [org.dom4j:dom4j:2.1.3](http://dom4j.github.io/) - [BSD 3-clause New License](https://github.com/dom4j/dom4j/blob/master/LICENSE) -- org.flywaydb:flyway-core:7.1.1 - [Apache License, Version 2.0](https://flywaydb.org/licenses/flyway-community) -- [org.freemarker:freemarker:2.3.31](https://freemarker.apache.org/) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.glassfish:jakarta.el:3.0.3](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [org.glassfish.jaxb:jaxb-runtime:2.3.3](https://www.eclipse.org) - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- org.glassfish.jaxb:txw2:2.3.3 - [Eclipse Public License v. 2.0](https://www.eclipse.org/org/documents/epl-2.0/EPL-2.0.txt) -- [org.hdrhistogram:HdrHistogram:2.1.12](http://hdrhistogram.github.io/HdrHistogram/) - [BSD-2-Clause](https://opensource.org/licenses/BSD-2-Clause) -- [org.hibernate:hibernate-core:5.4.29.Final](http://www.hibernate.org/orm/5.4) - [GNU Library General Public License v2.1 or later](http://www.opensource.org/licenses/LGPL-2.1) -- [org.hibernate.common:hibernate-commons-annotations:5.1.2.Final](http://hibernate.org) - [GNU Library General Public License v2.1 or later](http://www.opensource.org/licenses/LGPL-2.1) -- org.hibernate.validator:hibernate-validator:6.1.7.Final - [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.javassist:javassist:3.27.0-GA](http://www.javassist.org/) - [Apache License 2.0](http://www.apache.org/licenses/) -- [org.jboss:jandex:2.2.3.Final](http://www.jboss.org) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jboss.logging:jboss-logging:3.4.1.Final](http://www.jboss.org) - [Apache License, version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jetbrains:annotations:13.0](http://www.jetbrains.org) - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jetbrains.kotlin:kotlin-reflect:1.4.32](https://kotlinlang.org/) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jetbrains.kotlin:kotlin-stdlib:1.4.21](https://kotlinlang.org/) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jetbrains.kotlin:kotlin-stdlib-common:1.4.21](https://kotlinlang.org/) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.4.21](https://kotlinlang.org/) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.4.32](https://kotlinlang.org/) - [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.jsoup:jsoup:1.13.1](https://jsoup.org/) - [The MIT License](https://jsoup.org/license) -- [org.latencyutils:LatencyUtils:2.0.3](http://latencyutils.github.io/LatencyUtils/) - [Public Domain, per Creative Commons CC0](http://creativecommons.org/publicdomain/zero/1.0/) -- org.mapstruct:mapstruct:1.3.1.Final - [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.passay:passay:1.6.0](http://www.passay.org) - [Apache 2](http://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.postgresql:postgresql:42.2.19](https://jdbc.postgresql.org/) - BSD-2-Clause -- [org.slf4j:jul-to-slf4j:1.7.30](http://www.slf4j.org) - [MIT License](http://www.opensource.org/licenses/mit-license.php) -- [org.slf4j:slf4j-api:1.7.30](http://www.slf4j.org) - [MIT License](http://www.opensource.org/licenses/mit-license.php) -- [org.springframework:spring-aop:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-aspects:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-beans:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-context:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-context-support:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-core:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-expression:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-jcl:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-jdbc:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-orm:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-tx:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-web:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework:spring-webmvc:5.3.5](https://github.com/spring-projects/spring-framework) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-actuator:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-actuator-autoconfigure:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-autoconfigure:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-actuator:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-aop:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-data-jpa:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-freemarker:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-jdbc:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-json:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-logging:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-mail:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-security:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-tomcat:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-validation:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.boot:spring-boot-starter-web:2.4.4](https://spring.io/projects/spring-boot) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- org.springframework.cloud:spring-cloud-gcp-autoconfigure:1.2.7.RELEASE - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- org.springframework.cloud:spring-cloud-gcp-core:1.2.7.RELEASE - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- org.springframework.cloud:spring-cloud-gcp-logging:1.2.7.RELEASE - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- org.springframework.cloud:spring-cloud-gcp-starter:1.2.7.RELEASE - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.cloud:spring-cloud-gcp-starter-logging:1.2.7.RELEASE](https://github.com/spring-cloud/spring-cloud-gcp/tree/master/spring-cloud-gcp-starters/spring-cloud-gcp-starter-logging) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- org.springframework.data:spring-data-commons:2.4.6 - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.data:spring-data-jpa:2.4.6](https://projects.spring.io/spring-data-jpa) - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.ldap:spring-ldap-core:2.3.3.RELEASE](https://www.springframework.org/ldap) - [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- org.springframework.plugin:spring-plugin-core:2.0.0.RELEASE - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- org.springframework.plugin:spring-plugin-metadata:2.0.0.RELEASE - [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) -- [org.springframework.retry:spring-retry:1.3.1](https://www.springsource.org) - [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.springframework.security:spring-security-config:5.4.5](https://spring.io/spring-security) - [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.springframework.security:spring-security-core:5.4.5](https://spring.io/spring-security) - [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.springframework.security:spring-security-ldap:5.4.5](https://spring.io/spring-security) - [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.springframework.security:spring-security-web:5.4.5](https://spring.io/spring-security) - [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) -- [org.threeten:threetenbp:1.4.5](https://www.threeten.org) - [BSD 3-clause](https://raw.githubusercontent.com/ThreeTen/threetenbp/master/LICENSE.txt) -- [org.yaml:snakeyaml:1.27](http://www.snakeyaml.org) - [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt) - - - - - - -### Optimize Dependencies (front end) - -* [@babel/runtime@7.15.3](https://babel.dev/team) (MIT) -* [@bpmn-io/dmn-migrate@0.4.3](https://github.com/bpmn-io/dmn-migrate) (MIT) -* [@ibm/plex@5.2.1](https://github.com/ibm/plex) (OFL-1.1) -* [@types/debug@4.1.7](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/hast@2.3.1](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/mdast@3.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/mdurl@1.0.2](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/ms@0.7.31](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/prop-types@15.7.3](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/react@17.0.0](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [@types/unist@2.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -* [bail@2.0.1](https://wooorm.com) (MIT) -* [bpmn-js-disable-collapsed-subprocess@0.1.3](https://github.com/barmac) (MIT) -* [bpmn-js@8.7.3](https://github.com/nikku) (MIT*) -* [bpmn-moddle@7.1.2](https://github.com/nikku) (MIT) -* [camunda-dmn-moddle@1.0.0](https://github.com/philippfromme) (MIT) -* [character-entities-legacy@2.0.0](https://wooorm.com) (MIT) -* [character-entities@2.0.0](https://wooorm.com) (MIT) -* [character-reference-invalid@2.0.0](https://wooorm.com) (MIT) -* [chart.js@3.5.1](https://github.com/chartjs/Chart.js) (MIT) -* [chartjs-plugin-datalabels@2.0.0](https://github.com/chartjs/chartjs-plugin-datalabels) (MIT) -* [classnames@2.3.1](https://github.com/JedWatson/classnames) (MIT) -* [comma-separated-tokens@2.0.2](https://wooorm.com) (MIT) -* [component-event@0.1.4](https://github.com/component/event) (MIT*) -* [component-props@1.1.1](https://github.com/component/props) (MIT*) -* [component-xor@0.0.4](https://github.com/component/xor) (MIT) -* [css.escape@1.5.1](https://mathiasbynens.be/) (MIT) -* [csstype@3.0.5](https://github.com/frenic/csstype) (MIT) -* [date-fns@2.24.0](https://github.com/date-fns/date-fns) (MIT) -* [debounce@1.2.1](https://github.com/component/debounce) (MIT) -* [debug@4.3.1](https://github.com/visionmedia/debug) (MIT) -* [dequal@2.0.2](https://lukeed.com) (MIT) -* [diagram-js-direct-editing@1.6.3](https://github.com/bpmn-io/diagram-js-direct-editing) (MIT) -* [diagram-js@7.3.0](https://github.com/nikku) (MIT) -* [didi@5.2.1](https://github.com/nikku/didi) (MIT) -* [dmn-js-decision-table@11.0.2](https://github.com/bpmn-io/dmn-js) (MIT*) -* [dmn-js-drd@11.0.2](https://github.com/bpmn-io/dmn-js) (MIT*) -* [dmn-js-literal-expression@11.0.2](https://github.com/bpmn-io/dmn-js) (MIT*) -* [dmn-js-shared@11.0.2](https://github.com/bpmn-io/dmn-js) (MIT*) -* [dmn-js@11.0.2](https://github.com/SebastianStamm) (MIT*) -* [dmn-moddle@10.0.0](https://github.com/SebastianStamm) (MIT) -* [dmn-moddle@8.0.4](https://github.com/SebastianStamm) (MIT) -* [dom-iterator@1.0.0](https://github.com/MatthewMueller/dom-iterator) (MIT) -* [domify@1.4.0](https://github.com/component/domify) (MIT) -* [escape-html@1.0.3](https://github.com/component/escape-html) (MIT) -* [extend@3.0.2](http://www.justmoon.net) (MIT) -* [fast-deep-equal@3.1.3](https://github.com/epoberezkin/fast-deep-equal) (MIT) -* [fitty@2.3.5](https://pqina.nl/) (MIT) -* [focus-visible@5.2.0](https://github.com/WICG/focus-visible) (W3C) -* [fscreen@1.0.2](https://github.com/rafrex/fscreen) (MIT) -* [hammerjs@2.0.8](https://github.com/hammerjs/hammer.js) (MIT) -* [hat@0.0.3](http://substack.net) (MIT*) -* [heatmap.js@2.0.5](https://www.patrick-wied.at/) (MIT*) -* [history@4.10.1](https://github.com/ReactTraining/history) (MIT) -* [hoist-non-react-statics@3.3.0](https://github.com/mridgway/hoist-non-react-statics) (BSD-3-Clause) -* [ids@0.2.2](https://github.com/Nikku) (MIT) -* [ids@1.0.0](https://github.com/Nikku) (MIT) -* [immutability-helper@3.1.1](https://github.com/kolodny/immutability-helper) (MIT) -* [indexof@0.0.1](https://www.npmjs.com/package/indexof) (MIT*) -* [inferno-shared@5.6.1](https://github.com/infernojs/inferno) (MIT) -* [inferno-vnode-flags@5.6.1](https://github.com/infernojs/inferno) (MIT) -* [inferno@5.6.2](https://github.com/infernojs/inferno) (MIT) -* [inherits@2.0.4](https://github.com/isaacs/inherits) (ISC) -* [inline-style-parser@0.1.1](https://github.com/remarkablemark/inline-style-parser) (MIT) -* [is-alphabetical@2.0.0](https://wooorm.com) (MIT) -* [is-alphanumerical@2.0.0](https://wooorm.com) (MIT) -* [is-buffer@2.0.5](https://feross.org) (MIT) -* [is-decimal@2.0.0](https://wooorm.com) (MIT) -* [is-hexadecimal@2.0.0](https://wooorm.com) (MIT) -* [is-plain-obj@4.0.0](https://sindresorhus.com) (MIT) -* [isarray@0.0.1](http://juliangruber.com) (MIT) -* [js-tokens@4.0.0](https://github.com/lydell/js-tokens) (MIT) -* [lodash.isequal@4.5.0](http://allyoucanleet.com/) (MIT) -* [loose-envify@1.4.0](https://github.com/zertosh/loose-envify) (MIT) -* [matches-selector@1.2.0](https://github.com/ForbesLindesay/matches-selector) (MIT) -* [mdast-util-definitions@5.1.0](https://wooorm.com) (MIT) -* [mdast-util-from-markdown@1.0.0](https://wooorm.com) (MIT) -* [mdast-util-to-hast@11.2.0](https://wooorm.com) (MIT) -* [mdast-util-to-string@3.1.0](https://wooorm.com) (MIT) -* [mdurl@1.0.1](https://github.com/markdown-it/mdurl) (MIT) -* [micromark-core-commonmark@1.0.0](https://wooorm.com) (MIT) -* [micromark-factory-destination@1.0.0](https://wooorm.com) (MIT) -* [micromark-factory-label@1.0.0](https://wooorm.com) (MIT) -* [micromark-factory-space@1.0.0](https://wooorm.com) (MIT) -* [micromark-factory-title@1.0.0](https://wooorm.com) (MIT) -* [micromark-factory-whitespace@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-character@1.1.0](https://wooorm.com) (MIT) -* [micromark-util-chunked@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-classify-character@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-combine-extensions@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-decode-numeric-character-reference@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-encode@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-html-tag-name@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-normalize-identifier@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-resolve-all@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-sanitize-uri@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-subtokenize@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-symbol@1.0.0](https://wooorm.com) (MIT) -* [micromark-util-types@1.0.0](https://wooorm.com) (MIT) -* [micromark@3.0.3](https://wooorm.com) (MIT) -* [min-dash@3.5.2](https://github.com/bpmn-io/min-dash) (MIT) -* [min-dom@3.1.3](https://github.com/bpmn-io/min-dom) (MIT) -* [mini-create-react-context@0.4.0](https://github.com/StringEpsilon/mini-create-react-context) (MIT) -* [moddle-xml@8.0.2](https://github.com/Nikku) (MIT) -* [moddle-xml@9.0.5](https://github.com/Nikku) (MIT) -* [moddle@5.0.1](https://github.com/Nikku) (MIT) -* [moddle@5.0.2](https://github.com/Nikku) (MIT) -* [ms@2.1.2](https://github.com/zeit/ms) (MIT) -* [object-assign@4.1.1](https://github.com/sindresorhus/object-assign) (MIT) -* [object-refs@0.3.0](https://github.com/Nikku) (MIT) -* [opencollective-postinstall@2.0.3](https://github.com/opencollective/opencollective-postinstall) (MIT) -* [parse-entities@3.0.0](https://wooorm.com) (MIT) -* [path-intersection@2.2.1](https://github.com/nikku) (MIT) -* [path-to-regexp@1.7.0](https://github.com/pillarjs/path-to-regexp) (MIT) -* [prop-types@15.7.2](https://github.com/facebook/prop-types) (MIT) -* [property-information@6.0.1](https://wooorm.com) (MIT) -* [react-date-range@1.4.0](https://github.com/hypeserver/react-date-range) (MIT) -* [react-dom@17.0.2](https://github.com/facebook/react) (MIT) -* [react-draggable@4.2.0](https://github.com/mzabriskie/react-draggable) (MIT) -* [react-full-screen@1.1.0](https://github.com/snakesilk/react-fullscreen) (MIT) -* [react-grid-layout@1.3.0](http://strml.net/) (MIT) -* [react-is@16.13.1](https://github.com/facebook/react) (MIT) -* [react-is@17.0.2](https://github.com/facebook/react) (MIT) -* [react-list@0.8.13](https://github.com/coderiety/react-list) (MIT) -* [react-markdown@7.0.1](https://github.com/remarkjs/react-markdown) (MIT) -* [react-resizable@3.0.4](http://strml.net/) (MIT) -* [react-router-dom@5.3.0](https://github.com/ReactTraining/react-router) (MIT) -* [react-router@5.2.1](https://github.com/ReactTraining/react-router) (MIT) -* [react-table@7.7.0](https://github.com/tannerlinsley/react-table) (MIT) -* [react@17.0.2](https://github.com/facebook/react) (MIT) -* [regenerator-runtime@0.13.5](https://github.com/facebook/regenerator/tree/master/packages/regenerator-runtime) (MIT) -* [remark-parse@10.0.0](https://wooorm.com) (MIT) -* [remark-rehype@9.0.0](https://wooorm.com) (MIT) -* [resolve-pathname@3.0.0](https://github.com/mjackson/resolve-pathname) (MIT) -* [saxen@8.1.0](http://vflash.ru) (MIT) -* [saxen@8.1.2](http://vflash.ru) (MIT) -* [scheduler@0.20.2](https://github.com/facebook/react) (MIT) -* [selection-ranges@3.0.3](https://github.com/nikku/selection-ranges) (MIT) -* [selection-update@0.1.2](https://github.com/nikku/selection-update) (MIT) -* [shallow-equal@1.2.1](https://github.com/moroshko/shallow-equal) (MIT) -* [space-separated-tokens@2.0.1](https://wooorm.com) (MIT) -* [style-to-object@0.3.0](https://github.com/remarkablemark/style-to-object) (MIT) -* [table-js@7.2.0](https://github.com/bpmn-io/table-js) (MIT) -* [tiny-invariant@1.0.6](https://github.com/alexreardon/tiny-invariant) (MIT) -* [tiny-svg@2.2.2](https://github.com/nikku) (MIT) -* [tiny-warning@1.0.3](https://github.com/alexreardon/tiny-warning) (MIT) -* [trough@2.0.2](https://wooorm.com) (MIT) -* [unified@10.1.0](https://wooorm.com) (MIT) -* [unist-builder@3.0.0](https://github.com/syntax-tree/unist-builder) (MIT) -* [unist-util-generated@2.0.0](https://wooorm.com) (MIT) -* [unist-util-is@5.1.1](https://wooorm.com) (MIT) -* [unist-util-position@4.0.1](https://wooorm.com) (MIT) -* [unist-util-stringify-position@3.0.0](https://wooorm.com) (MIT) -* [unist-util-visit-parents@4.1.1](https://wooorm.com) (MIT) -* [unist-util-visit-parents@5.0.0](https://wooorm.com) (MIT) -* [unist-util-visit@3.1.0](https://wooorm.com) (MIT) -* [unist-util-visit@4.0.0](https://wooorm.com) (MIT) -* [use-deep-compare-effect@1.8.0](https://kentcdodds.com) (MIT) -* [value-equal@1.0.1](https://github.com/mjackson/value-equal) (MIT) -* [vfile-message@3.0.2](https://wooorm.com) (MIT) -* [vfile@5.0.2](https://wooorm.com) (MIT) - -### Optimize Dependencies (back end) - -* logback-classic@1.2.6, [(Eclipse Public License - v 1.0)](http://www.eclipse.org/legal/epl-v10.html) -* logback-core@1.2.6, [(Eclipse Public License - v 1.0)](http://www.eclipse.org/legal/epl-v10.html) -* java-jwt@3.18.2, [(The MIT License (MIT))](https://raw.githubusercontent.com/auth0/java-jwt/master/LICENSE) -* hppc@0.8.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* itu@1.3.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-annotations@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-core@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-databind@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-dataformat-cbor@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-dataformat-smile@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-dataformat-yaml@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-datatype-jdk8@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-datatype-jsr310@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-jaxrs-base@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-jaxrs-json-provider@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jackson-module-jaxb-annotations@2.13.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* classmate@1.5.1, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* caffeine@3.0.4, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* dateparser@1.0.7, [(Apache-2.0 License)](https://github.com/sisyphsu/dateparser/blob/master/LICENSE) -* retree@1.0.4, [(Apache-2.0 License)](https://github.com/sisyphsu/retree-java/blob/master/LICENSE) -* compiler@0.9.6, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jcip-annotations@1.0-1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jsr305@3.0.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* error_prone_annotations@2.9.0, [(Apache 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* failureaccess@1.0.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* guava@31.0.1-jre, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* listenablefuture@9999.0-empty-to-avoid-conflict-with-guava, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* j2objc-annotations@1.3, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* json-path@2.6.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* c3p0@0.9.5.4, [(GNU Lesser General Public License, Version 2.1)](http://www.gnu.org/licenses/lgpl-2.1.html) -* mchange-commons-java@0.2.15, [(GNU Lesser General Public License, Version 2.1)](http://www.gnu.org/licenses/lgpl-2.1.html) -* content-type@2.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lang-tag@1.5, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* nimbus-jose-jwt@9.10.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* oauth2-oidc-sdk@9.9.1, (Apache License, version 2.0) -* opencsv@5.5.2, [(Apache 2)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* javax.mail@1.5.6, [(CDDL/GPLv2+CE)](https://glassfish.java.net/public/CDDL+GPL_1_1.html) -* t-digest@3.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* HikariCP-java7@2.4.13, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-beanutils@1.9.4, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-codec@1.15, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-collections@3.2.2, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-logging@1.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* zeebe-protocol@1.2.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* classgraph@4.8.120, [(The MIT License (MIT))](http://opensource.org/licenses/MIT) -* jakarta.activation-api@1.2.2, [(EDL 1.0)](http://www.eclipse.org/org/documents/edl-v10.php) -* jakarta.annotation-api@1.3.5, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jakarta.el-api@3.0.3, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jakarta.validation-api@2.0.2, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jakarta.ws.rs-api@2.1.6, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jakarta.xml.bind-api@2.3.3, [(Eclipse Distribution License - v 1.0)](http://www.eclipse.org/org/documents/edl-v10.php) -* activation@1.1, [(Common Development and Distribution License (CDDL) v1.0)](https://glassfish.dev.java.net/public/CDDLv1.0.html) -* javax.activation-api@1.2.0, [(CDDL/GPLv2+CE)](https://github.com/javaee/activation/blob/master/LICENSE.txt) -* javax.annotation-api@1.3.2, [(CDDL + GPLv2 with classpath exception)](https://github.com/javaee/javax.annotation/blob/master/LICENSE) -* javax.servlet-api@3.1.0, [(CDDL + GPLv2 with classpath exception)](https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html) -* validation-api@2.0.1.Final, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* javax.websocket-api@1.0, [(Dual license consisting of the CDDL v1.1 and GPL v2)](https://glassfish.java.net/public/CDDL+GPL_1_1.html) -* javax.websocket-client-api@1.0, [(Dual license consisting of the CDDL v1.1 and GPL v2)](https://glassfish.java.net/public/CDDL+GPL_1_1.html) -* javax.ws.rs-api@2.1.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jaxb-api@2.3.1, [(CDDL 1.1)](https://oss.oracle.com/licenses/CDDL+GPL-1.1) -* joda-time@2.1, [(Apache 2)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* failsafe@2.4.4, [(Apache License, Version 2.0)](http://apache.org/licenses/LICENSE-2.0) -* accessors-smart@2.4.7, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* json-smart@2.4.7, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jopt-simple@5.0.2, [(The MIT License)](http://www.opensource.org/licenses/mit-license.php) -* agrona@1.12.0, [(The Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-collections4@4.4, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-email@1.5, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-lang3@3.12.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-math3@3.6.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* commons-text@1.9, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* httpasyncclient@4.1.4, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* httpclient@4.5.13, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* httpcore-nio@4.4.14, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* httpcore@4.4.14, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* log4j-api@2.14.1, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* log4j-to-slf4j@2.14.1, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-analyzers-common@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-backward-codecs@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-core@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-grouping@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-highlighter@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-join@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-memory@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-misc@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-queries@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-queryparser@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-sandbox@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-spatial-extras@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-spatial3d@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lucene-suggest@8.7.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* tika-core@1.27, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-engine-dmn@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-engine-feel-api@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-engine-feel-juel@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-engine-feel-scala@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-bpmn-model@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-cmmn-model@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-dmn-model@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-xml-model@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-engine@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-license-check@2.7.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-commons-logging@1.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-commons-typed-values@7.16.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-commons-utils@1.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-connect-connectors-all@1.5.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* camunda-connect-core@1.5.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* checker-qual@3.18.0, [(The MIT License)](http://opensource.org/licenses/MIT) -* javax-websocket-client-impl@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* javax-websocket-server-impl@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* websocket-api@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* websocket-client@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* websocket-common@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* websocket-server@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* websocket-servlet@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-annotations@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-client@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-continuation@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-http@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-io@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-jndi@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-plus@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-rewrite@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-security@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-server@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-servlet@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-servlets@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-util-ajax@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-util@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-webapp@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* jetty-xml@9.4.44.v20210927, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -* elasticsearch-rest-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch-rest-high-level-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* aggs-matrix-stats-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* lang-mustache-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* mapper-extras-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* parent-join-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* rank-eval-client@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch-cli@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch-core@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch-geo@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch-secure-sm@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch-x-content@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* elasticsearch@7.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jna@5.5.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* aopalliance-repackaged@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jakarta.inject@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* class-model@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* hk2-api@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* hk2-core@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* hk2-locator@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* hk2-runlevel@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* hk2-utils@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* hk2@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* osgi-resource-locator@1.0.3, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* spring-bridge@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-container-servlet-core@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-client@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-common@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-server@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-bean-validation@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-entity-filtering@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-spring5@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-hk2@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jersey-media-json-jackson@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* jakarta.el@3.0.4, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -* HdrHistogram@2.1.9, [(Public Domain, per Creative Commons CC0)](http://creativecommons.org/publicdomain/zero/1.0/) -* hibernate-validator@6.2.0.Final, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* javassist@3.25.0-GA, [(MPL 1.1)](http://www.mozilla.org/MPL/MPL-1.1.html) -* jboss-logging@3.4.1.Final, [(Apache License, version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* mybatis@3.5.6, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -* asm-analysis@9.2, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -* asm-commons@9.2, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -* asm-tree@9.2, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -* asm-util@7.1, [(BSD)](http://asm.ow2.org/license.html) -* asm@9.2, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -* quartz@2.3.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -* jul-to-slf4j@1.7.32, [(MIT License)](http://www.opensource.org/licenses/mit-license.php) -* slf4j-api@1.7.32, [(MIT License)](http://www.opensource.org/licenses/mit-license.php) -* spring-security-config@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-security-core@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-security-crypto@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-security-oauth2-client@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-security-oauth2-core@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-security-oauth2-jose@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-security-web@5.5.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-aop@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-beans@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-context-support@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-context@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-core@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-expression@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-jcl@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-tx@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-web@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* spring-websocket@5.3.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -* snakeyaml@1.28, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) - - - - - - -Desktop Modeler is a desktop modeling application that builds upon a number of third party libraries. You find an up-to-date list of third party libraries used and their license terms in the [THIRD_PARTY_NOTICES](https://github.com/camunda/camunda-modeler/blob/master/THIRD_PARTY_NOTICES), located in the root of the source code repository. This file is also shipped with the application distribution as `THIRD_PARTY_NOTICES.camunda-modeler.txt`. - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-boolean.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-boolean.md deleted file mode 100644 index dbe9ea291d9..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-boolean.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: feel-built-in-functions-boolean -title: Boolean Functions -description: "This document outlines current boolean functions and a few examples." ---- - -## not() - -* parameters: - * `negand`: boolean -* result: boolean - -```js -not(true) -// false -``` - -## is defined() - -Checks if a given value is defined. A value is defined if it exists, and it is an instance of one of the FEEL data types including `null`. - -The function can be used to check if a variable or a context entry (e.g. a property of a variable) exists. It allows differentiating between a `null` variable and a value that doesn't exist. - -* parameters: - * `value`: any -* result: boolean - -```js -is defined(1) -// true - -is defined(null) -// true - -is defined(x) -// false - if no variable "x" exists - -is defined(x.y) -// false - if no variable "x" exists or it doesn't have a property "y" -``` diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-context.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-context.md deleted file mode 100644 index 8c4de53d851..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-context.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -id: feel-built-in-functions-context -title: Context Functions -description: "This document outlines context functions and a few examples." ---- - -## get value() - -Returns the value of the context entry with the given key. - -* parameters: - * `context`: context - * `key`: string -* result: any - -```js -get value({foo: 123}, "foo") -// 123 -``` - -## get entries() - -Returns the entries of the context as a list of key-value-pairs. - -* parameters: - * `context`: context -* result: list of context which contains two entries for "key" and "value" - -```js -get entries({foo: 123}) -// [{key: "foo", value: 123}] -``` - -## put() - -Add the given key and value to a context. Returns a new context that includes the entry. It might override an existing entry of the context. - -Returns `null` if the value is not defined. - -* parameters: - * `context`: context - * `key`: string - * `value`: any -* result: context - -```js -put({x:1}, "y", 2) -// {x:1, y:2} -``` - -## put all() - -Union the given contexts (two or more). Returns a new context that includes all entries of the given contexts. It might override context entries if the keys are equal. The entries are overridden in the same order as the contexts are passed in the method. - -Returns `null` if one of the values is not a context. - -* parameters: - * `contexts`: contexts as varargs -* result: context - -```js -put all({x:1}, {y:2}) -// {x:1, y:2} -``` diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-conversion.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-conversion.md deleted file mode 100644 index fa57cb650aa..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-conversion.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -id: feel-built-in-functions-conversion -title: Conversion Functions ---- - -Convert a value into a different type. - -## date() - -* parameters: - * `from`: string / date-time - * or `year`, `month`, `day`: number -* result: date - -```js -date(birthday) -// date("2018-04-29") - -date(date and time("2012-12-25T11:00:00")) -// date("2012-12-25") - -date(2012, 12, 25) -// date("2012-12-25") -``` - -## time() - -* parameters: - * `from`: string / date-time - * or `hour`, `minute`, `second`: number - * (optional) `offset`: day-time-duration -* result: time - -```js -time(lunchTime) -// time("12:00:00") - -time(date and time("2012-12-25T11:00:00")) -// time("11:00:00") - -time(23, 59, 0) -// time("23:59:00") - -time(14, 30, 0, duration("PT1H")) -// time("15:30:00") -``` - -## date and time() - -* parameters: - * `date`: date / date-time - * `time`: time - * or `from`: string -* result: date-time - -```js -date and time(date("2012-12-24"),time("T23:59:00")) -// date and time("2012-12-24T23:59:00") - -date and time(date and time("2012-12-25T11:00:00"),time("T23:59:00")) -// date and time("2012-12-25T23:59:00") - -date and time(birthday) -// date and time("2018-04-29T09:30:00") -``` - -## duration() - -* parameters: - * `from`: string -* result: day-time-duration or year-month-duration - -```js -duration(weekDays) -// duration("P5D") - -duration(age) -// duration("P32Y") -``` - -## years and months duration() - -* parameters: - * `from`: date - * `to`: date -* result: year-month-duration - -```js -years and months duration(date("2011-12-22"), date("2013-08-24")) -// duration("P1Y8M") -``` - -## number() - -* parameters: - * `from`: string -* result: number - -```js -number("1500.5") -// 1500.5 -``` - -## string() - -* parameters: - * `from`: any -* result: string - -```js -string(1.1) -// "1.1" - -string(date("2012-12-25")) -// "2012-12-25" -``` - -## context() - -Constructs a context of the given list of key-value pairs. It is the reverse function to [get entries()](feel-built-in-functions-context.md#get-entries). - -Each key-value pair must be a context with two entries: `key` and `value`. The entry with name `key` must have a value of the type `string`. - -It might override context entries if the keys are equal. The entries are overridden in the same order as the contexts in the given list. - -Returns `null` if one of the entries is not a context or if a context doesn't contain the required entries. - -* parameters: - * `entries`: list of contexts -* result: context - -```js -context([{"key":"a", "value":1}, {"key":"b", "value":2}]) -// {a:1, b:2} -``` diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-list.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-list.md deleted file mode 100644 index c28c94f6ac8..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-list.md +++ /dev/null @@ -1,321 +0,0 @@ ---- -id: feel-built-in-functions-list -title: List Functions -description: "This document outlines built-in list functions and examples." ---- - -## list contains() - -* parameters: - * `list`: list - * `element`: any -* result: boolean - -```js -list contains([1,2,3], 2) -// true -``` - -## count() - -* parameters: - * `list`: list -* result: number - -```js -count([1,2,3]) -// 3 -``` - -## min() - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -min([1,2,3]) -// 1 - -min(1,2,3) -// 1 -``` - -## max() - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -max([1,2,3]) -// 3 - -max(1,2,3) -// 3 -``` - -## sum() - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -sum([1,2,3]) -// 6 - -sum(1,2,3) -// 6 -``` - -## product() - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -product([2, 3, 4]) -// 24 - -product(2, 3, 4) -// 24 -``` - -## mean() - -Returns the arithmetic mean (i.e. average). - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -mean([1,2,3]) -// 2 - -mean(1,2,3) -// 2 -``` - -## median() - -Returns the median element of the list of numbers. - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -median(8, 2, 5, 3, 4) -// 4 - -median([6, 1, 2, 3]) -// 2.5 -``` - -## stddev() - -Returns the standard deviation. - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: number - -```js -stddev(2, 4, 7, 5) -// 2.0816659994661326 - -stddev([2, 4, 7, 5]) -// 2.0816659994661326 -``` - -## mode() - -Returns the mode of the list of numbers. - -* parameters: - * `list`: list of numbers - * or numbers as varargs -* result: list of numbers - -```js -mode(6, 3, 9, 6, 6) -// [6] - -mode([6, 1, 9, 6, 1]) -// [1, 6] -``` - -## and() / all() - -* parameters: - * `list`: list of booleans - * or booleans as varargs -* result: boolean - -```js -and([true,false]) -// false - -and(false,null,true) -// false -``` - -## or() / any() - -* parameters: - * `list`: list of booleans - * or booleans as varargs -* result: boolean - -```js -or([false,true]) -// true - -or(false,null,true) -// true -``` - -## sublist() - -* parameters: - * `list`: list - * `start position`: number - * (optional) `length`: number -* result: list - -```js -sublist([1,2,3], 2) -// [2,3] - -sublist([1,2,3], 1, 2) -// [1,2] -``` - -## append() - -* parameters: - * `list`: list - * `items`: elements as varargs -* result: list - -```js -append([1], 2, 3) -// [1,2,3] -``` - -## concatenate() - -* parameters: - * `lists`: lists as varargs -* result: list - -```js -concatenate([1,2],[3]) -// [1,2,3] - -concatenate([1],[2],[3]) -// [1,2,3] -``` - -## insert before() - -* parameters: - * `list`: list - * `position`: number - * `newItem`: any -* result: list - -```js -insert before([1,3],1,2) -// [1,2,3] -``` - -## remove() - -* parameters: - * `list`: list - * `position`: number -* result: list - -```js -remove([1,2,3], 2) -// [1,3] -``` - -## reverse() - -* parameters: - * `list`: list -* result: list - -```js -reverse([1,2,3]) -// [3,2,1] -``` - -## index of() - -* parameters: - * `list`: list - * `match`: any -* result: list of numbers - -```js -index of([1,2,3,2],2) -// [2,4] -``` - -## union() - -* parameters: - * `lists`: lists as varargs -* result: list - -```js -union([1,2],[2,3]) -// [1,2,3] -``` - -## distinct values() - -* parameters: - * `list`: list -* result: list - -```js -distinct values([1,2,3,2,1]) -// [1,2,3] -``` - -## flatten() - -* parameters: - * `list`: list -* result: list - -```js -flatten([[1,2],[[3]], 4]) -// [1,2,3,4] -``` - -## sort() - -* parameters: - * `list`: list - * `precedes`: function with two arguments and boolean result -* result: list - -```js -sort(list: [3,1,4,5,2], precedes: function(x,y) x < y) -// [1,2,3,4,5] -``` diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-numeric.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-numeric.md deleted file mode 100644 index d9cd7da5f1b..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-numeric.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -id: feel-built-in-functions-numeric -title: Numeric Functions -description: "This document outlines built-in numeric functions and examples." ---- - -## decimal() - -Round the given number at the given scale using the given rounding mode. If no rounding mode is passed in, it uses `HALF_EVEN` as default. - -* parameters: - * `n`: number - * `scale`: number - * (optional) `mode`: string - one of `UP, DOWN, CEILING, FLOOR, HALF_UP, HALF_DOWN, HALF_EVEN, UNNECESSARY` (default: `HALF_EVEN`) -* result: number - -```js -decimal(1/3, 2) -// .33 - -decimal(1.5, 0) -// 2 - -decimal(2.5, 0, "half_up") -// 3 -``` - -## floor() - -* parameters: - * `n`: number -* result: number - -```js -floor(1.5) -// 1 - -floor(-1.5) -// -2 -``` - -## ceiling() - -* parameters: - * `n`: number -* result: number - -```js -ceiling(1.5) -// 2 - -floor(-1.5) -// -1 -``` - -## abs() - -* parameters: - * `number`: number -* result: number - -```js -abs(10) -// 10 - -abs(-10) -// 10 -``` - -## modulo() - -Returns the remainder of the division of dividend by divisor. - -* parameters: - * `dividend`: number - * `divisor`: number -* result: number - -```js -modulo(12, 5) -// 2 -``` - -## sqrt() - -Returns the square root. - -* parameters: - * `number`: number -* result: number - -```js -sqrt(16) -// 4 -``` - -## log() - -Returns the natural logarithm (base e) of the number. - -* parameters: - * `number`: number -* result: number - -```js -log(10) -// 2.302585092994046 -``` - -## exp() - -Returns the Euler’s number e raised to the power of number. - -* parameters: - * `number`: number -* result: number - -```js -exp(5) -// 148.4131591025766 -``` - -## odd() - -* parameters: - * `number`: number -* result: boolean - -```js -odd(5) -// true -``` - -## even() - -* parameters: - * `number`: number -* result: boolean - -```js -odd(5) -// false -``` diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-string.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-string.md deleted file mode 100644 index 59fa1bac1f6..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-string.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -id: feel-built-in-functions-string -title: String Functions -description: "This document outlines built-in string functions and examples." ---- - -## substring() - -* parameters: - * `string`: string - * `start position`: number - * (optional) `length`: number -* result: string - -```js -substring("foobar",3) -// "obar" - -substring("foobar",3,3) -// "oba" -``` - -## string length() - -* parameters: - * `string`: string -* result: number - -```js -string length("foo") -// 3 -``` - -## upper case() - -* parameters: - * `string`: string -* result: string - -```js -upper case("aBc4") -// "ABC4" -``` - -## lower case() - -* parameters: - * `string`: string -* result: string - -```js -lower case("aBc4") -// "abc4" -``` - -## substring before() - -* parameters: - * `string`: string - * `match`: string -* result: string - -```js -substring before("foobar", "bar") -// "foo" -``` - -## substring after() - -* parameters: - * `string`: string - * `match`: string -* result: string - -```js -substring after("foobar", "ob") -// "ar" -``` - -## contains() - -* parameters: - * `string`: string - * `match`: string -* result: boolean - -```js -contains("foobar", "of") -// false -``` - -## starts with() - -* parameters: - * `input`: string - * `match`: string -* result: boolean - -```js -starts with("foobar", "fo") -// true -``` - -## ends with() - -* parameters: - * `input`: string - * `match`: string -* result: boolean - -```js -ends with("foobar", "r") -// true -``` - -## matches() - -* parameters: - * `input`: string - * `pattern`: string (regular expression) -* result: boolean - -```js -matches("foobar", "^fo*bar") -// true -``` - -## replace() - -* parameters: - * `input`: string - * `pattern`: string (regular expression) - * `replacement`: string (e.g. `$1` returns the first match group) - * (optional) `flags`: string ("s", "m", "i", "x") -* result: string - -```js -replace("abcd", "(ab)|(a)", "[1=$1][2=$2]") -// "[1=ab][2=]cd" - -replace("0123456789", "(\d{3})(\d{3})(\d{4})", "($1) $2-$3") -// "(012) 345-6789" -``` - -## split() - -* parameters: - * `string`: string - * `delimiter`: string (regular expression) -* result: list of strings - -```js -split("John Doe", "\s" ) -// ["John", "Doe"] - -split("a;b;c;;", ";") -// ["a", "b", "c", "", ""] -``` diff --git a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-temporal.md b/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-temporal.md deleted file mode 100644 index df6089a9f57..00000000000 --- a/versioned_docs/version-1.3/reference/feel/builtin-functions/feel-built-in-functions-temporal.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: feel-built-in-functions-temporal -title: Temporal Functions -description: "This document outlines built-in temporal functions and examples." ---- - -## now() - -Returns the current date and time including the timezone. - -* parameters: no -* result: date-time with timezone - -```js -now() -// date and time("2020-07-31T14:27:30@Europe/Berlin") -``` - -## today() - -Returns the current date. - -* parameters: no -* result: date - -```js -today() -// date("2020-07-31") -``` - -## day of week() - -Returns the day of the week according to the Gregorian calendar. Note that it returns always the English name of the day. - -* parameters: - * `date`: date/date-time -* result: string - -```js -day of week(date("2019-09-17")) -// "Tuesday" -``` - -## day of year() - -Returns the Gregorian number of the day within the year. - -* parameters: - * `date`: date/date-time -* result: number - -```js -day of year(date("2019-09-17")) -// 260 -``` - -## week of year() - -Returns the Gregorian number of the week within the year, according to ISO 8601. - -* parameters: - * `date`: date/date-time -* result: number - -```js -week of year(date("2019-09-17")) -// 38 -``` - -## month of year() - -Returns the month of the week according to the Gregorian calendar. Note that it returns always the English name of the month. - -* parameters: - * `date`: date/date-time -* result: string - -```js -month of year(date("2019-09-17")) -// "September" -``` diff --git a/versioned_docs/version-1.3/reference/feel/language-guide/feel-data-types.md b/versioned_docs/version-1.3/reference/feel/language-guide/feel-data-types.md deleted file mode 100644 index 5f317241657..00000000000 --- a/versioned_docs/version-1.3/reference/feel/language-guide/feel-data-types.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -id: feel-data-types -title: Data Types -description: "This document outlines data types, including null, number, string, boolean, and more." ---- - -A value can have one of the following types. - -### Null - -Nothing, null, or nil (i.e. the value is not present). - -Some operations/functions return `null` if an argument in not valid or types doesn't match. - -* Java Type: `null` - -```js -null -``` - -### Number - -A whole or floating point number. - -* not-a-number (NaN), positive/negative infinity are represented as `null` -* Java Type: `java.math.BigDecimal` - -```js -1 -2.3 -.4 -``` - -### String - -A sequence of characters enclosed in double quotes `"`. The sequence can also contain escaped characters starting with `\` (e.g. `\'`, `\"`, `\\`, `\n`, `\r`, `\t`, unicode like `\u269D` or `\U101EF`). - -* Java Type: `java.lang.String` - -```js -"valid" -``` - -### Boolean - -A boolean value. It is either true or false. - -* Java Type: `java.lang.Boolean` - -```js -true -false -``` - -### Date - -A date value without a time component. - -* Format: `yyyy-MM-dd`. -* Java Type: `java.time.LocalDate` - -```js -date("2017-03-10") -``` - -### Time - -A local or zoned time. The time can have an offset or time zone id. - -* Format: `HH:mm:ss` / `HH:mm:ss+/-HH:mm` / `HH:mm:ss@ZoneId` -* Java Type: `java.time.LocalTime` / `java.time.OffsetTime` - -```js -time("11:45:30") -time("13:30") - -time("11:45:30+02:00") - -time("10:31:10@Europe/Paris") -``` - -### Date-Time - -A date with a local or zoned time component. The time can have an offset or time zone id. - -* Format: `yyyy-MM-dd'T'HH:mm:ss` / `yyyy-MM-dd'T'HH:mm:ss+/-HH:mm` / `yyyy-MM-dd'T'HH:mm:ss@ZoneId` -* Java Type: `java.time.LocalDateTime` / `java.time.DateTime` - -```js -date and time("2015-09-18T10:31:10") - -date and time("2015-09-18T10:31:10+01:00") - -date and time("2015-09-18T10:31:10@Europe/Paris") -``` - -### Day-Time-Duration - -A duration based on seconds. It can contain days, hours, minutes, and seconds. - -* Format: `PxDTxHxMxS` -* Java Type: `java.time.Duration` - -```js -duration("P4D") -duration("PT2H") -duration("PT30M") -duration("P1DT6H") -``` - -### Year-Month-Duration - -A duration based on the calendar. It can contain years and months. - -* Format: `PxYxM` -* Java Type: `java.time.Period` - -```js -duration("P2Y") -duration("P6M") -duration("P1Y6M") -``` - -### List - -A list of elements. Can be empty. - -* Java Type: `java.util.List` - -```js -[] -[1,2,3] -["a","b"] - -[["list"], "of", [["lists"]]] -``` - -### Context - -A list of key-value-pairs. Can be empty. - -* Java Type: `java.util.Map` - -```js -{} -{"a": 1} -{"b": 2, "c": "valid"} - -{"nested": {"d": 3}} -``` diff --git a/versioned_docs/version-1.3/reference/feel/language-guide/feel-expression.md b/versioned_docs/version-1.3/reference/feel/language-guide/feel-expression.md deleted file mode 100644 index ad3b85e4f1f..00000000000 --- a/versioned_docs/version-1.3/reference/feel/language-guide/feel-expression.md +++ /dev/null @@ -1,378 +0,0 @@ ---- -id: feel-expression -title: Expressions ---- - -An expression can contain literals, operators, and function calls. - -### Literal - -A single value of one of the [types](feel-data-types.md). - -```js -null -21 -"valid" -``` - -### Path expression - -Access a value by its name/path. For example, a given variable from the input/context. - -```js -x + y -``` - -If the value is a context (or data object/POJO,) the inner values can be accessed by `context.key`. - -```js -x.y -// return 1 if x is {y: 1} -``` - -Also, directly on a context. - -```js -{x: 2}.x -// 2 - -{x: {y: "valid"}}.x -// {y: "valid"} - -{x: {y: "valid"}}.x.y -// "valid" -``` - -Inside a context, the previous values can be accessed. - -```js -{ - a: 1, - b: 2, - c: a + b -} -``` - -If the name or path contains any special character (e.g. whitespace, dash, etc.,) the name needs to be wrapped into single backquotes/backtick `` `foo bar` ``. - -```js -`name with whitespace`.`name+operator` -``` - -### Addition - -* Supported types: number, string, day-time-duration, year-month-duration - -```js -2 + 3 -// 5 - -"foo" + "bar" -// "foobar" - -duration("P1D") + duration("PT6H") -// duration("P1DT6H") -``` - -### Subtraction - -* Supported types: number, time, date-time, day-time-duration, year-month-duration - -```js -5 - 3 -// 2 - -time("10:30:00") - time("09:00:00") -// duration("PT1H30M") - -time("10:30:00") - duration("PT1H") -// time("09:30:00") -``` - -### Multiplication - -* Supported types: number, day-time-duration, year-month-duration - -```js -5 * 3 -// 15 - -3 * duration("P2Y") -// duration("P6Y") -``` - -### Division - -* Supported types: number, day-time-duration, year-month-duration - -```js -6 / 2 -// 3 - -duration("P1Y") / 2 -// duration("P6M") - -duration("P1Y") / duration("P1M") -// 12 -``` - -### Exponentiation - -* Supported types: number - -```js -2 ** 3 -// 8 -``` - -### Comparison - -| operator | symbol | example | -|----------|-----------------|---------| -| equal to | `=` | `x = "valid"` | -| not equal to | `!=` | `x != "valid"` | -| less than | `<` | `< 10` | -| less than or equal | `<=` | `<= 10` | -| greater than | `>` | `> 10` | -| greater than or equal | `>=` | `>= 10` | -| between | `between _ and _` | `x between 3 and 9` | - -The operators less than, greater than, and between are only supported for: - * Number - * Date - * Time - * Date-time - * Year-month-duration - * Day-time-duration - -Any value can be compared with `null` to check if it's equal to `null` or if it exists. Comparing `null` to a value different from `null` results in `false`. It returns `true` if the value, or the context entry (e.g. the property of a variable) is `null` or doesn't exist. The built-in function [is defined()](../builtin-functions/feel-built-in-functions-boolean.md#is-defined) can be used to differentiate between a `null` value and a value that doesn't exist. - -```js -null = null -// true - -"foo" = null -// false - -x = null -// true - if "x" is null or doesn't exist - -x.y = null -// true - if "x" is null, "x" doesn't exist, -// "y" is null, or "x" has no property "y" -``` - -### Disjunction and conjunction - -Combine two boolean values. - -```js -true and true -// true - -true and false -// false - -true and null -// null - -false and null -// false -``` - -```js -true or false -// true - -false or false -// false - -true or null -// true - -false or null -// null -``` - -### If expression - -```js -if (x < 5) then "low" else "high" -``` - -### For expressions - -Iterate over a list and apply an expression (i.e. aka `map`). The result is again a list. - -```js -for x in [1,2] return x * 2 -// [2,4] -``` - -Iterate over multiple lists. - -```js -for x in [1,2], y in [3,4] return x * y -// [3,4,6,8] -``` - -Iterate over a range - forward or backward. - -```js -for x in 1..3 return x * 2 -// [2,4,6] - -for x in 3..1 return x * 2 -// [6,4,2] -``` - -The previous results of the iterator can be accessed by the variable `partial`. - -```js -for x in 1..5 return x + sum(partial) -// [1,3,7,15,31] -``` - -### Some/every expression - -Test if at least one element of the list satisfies the expression. - -```js -some x in [1,2,3] satisfies x > 2 -// true - -some x in [1,2,3] satisfies x > 3 -// false - -some x in [1,2], y in [2,3] satisfies x < y -// true -``` - -Test if all elements of the list satisfies the expression. - -```js -every x in [1,2,3] satisfies x >= 1 -// true - -every x in [1,2,3] satisfies x >= 2 -// false - -every x in [1,2], y in [2,3] satisfies x < y -// false -``` - -### Filter expression - -Filter a list of elements by an expression. The expression can access the current element by `item`. The result is a list again. - -```js -[1,2,3,4][item > 2] -// [3,4] -``` - -An element of a list can be accessed by its index. The index starts at `1`. A negative index starts at the end by `-1`. - -```js -[1,2,3,4][1] -// 1 - -[1,2,3,4][4] -// 4 - -[1,2,3,4][-1] -// 4 - -[1,2,3,4][-2] -// 3 - -[1,2,3,4][5] -// null -``` - -If the elements are contextes, the nested value of the current element can be accessed directly by its name. - -```js -[ {a: "foo", b: 5}, {a: "bar", b: 10} ][b > 7] -// {a : "bar", b: 10} -``` - -The nested values of a specific key can be extracted by `.key`. - -```js -[ {a : "foo", b: 5 }, {a: "bar", b: 10} ].a -// ["foo", "bar"] -``` - -### Evaluate a unary test - -Evaluates a [unary-tests expression](../feel-unary-tests) with the given value. - -```js -x in (2..4) - -x in < 3 -``` - -### Instance-of expression - -Checks the type of the value. - -```js -"foo" instance of number -// false - -"bar" instance of string -// true -``` - -### Functions - -Invoke a user-defined or built-in function by its name. The arguments can be passed positional or named. - -```js -add(1,2) -// or -add(x:1, y:2) -``` - -A function (body) can be defined using `function(arguments) expression`. For example, inside a context. - -```js -{ - add : function(x,y) x + y -} -``` - -### Special properties - -Values of type date, time, date-time, and duration have special properties to access their individual parts. - -```js -date("2017-03-10").year -date("2017-03-10").month -date("2017-03-10").day -date("2017-03-10").weekday - -time("11:45:30+02:00").hour -time("11:45:30+02:00").minute -time("11:45:30+02:00").second -time("11:45:30+02:00").time offset - -date and time("2017-03-10T11:45:30+02:00").year -date and time("2017-03-10T11:45:30+02:00").month -date and time("2017-03-10T11:45:30+02:00").day -date and time("2017-03-10T11:45:30+02:00").weekday -date and time("2017-03-10T11:45:30+02:00").hour -date and time("2017-03-10T11:45:30+02:00").minute -date and time("2017-03-10T11:45:30+02:00").second -date and time("2017-03-10T11:45:30+02:00").time offset -date and time("2017-03-10T11:45:30+02:00").timezone - -duration("P2Y3M").years -duration("P2Y3M").months - -duration("P1DT2H10M30S").days -duration("P1DT2H10M30S").hours -duration("P1DT2H10M30S").minutes -duration("P1DT2H10M30S").seconds -``` diff --git a/versioned_docs/version-1.3/reference/feel/language-guide/feel-unary-tests.md b/versioned_docs/version-1.3/reference/feel/language-guide/feel-unary-tests.md deleted file mode 100644 index f88c5843c2d..00000000000 --- a/versioned_docs/version-1.3/reference/feel/language-guide/feel-unary-tests.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: feel-unary-tests -title: Unary-Tests ---- - -Unary-Tests can be used only for input entries of a decision table. They are a special kind of expression with additional operators. The operators get the value of the input expression implicitly as the first argument. - -The result of the expression must be either `true` or `false`. - -A unary-tests expression is `true` if one of the following conditions is fulfilled: -* The expression evaluates to `true` when the input value is applied to it. -* The expression evaluates to a list and the input value is equal to at least one of the values in that list. -* The expression evaluates to a value and the input value is equal to that value. - -### Comparison - -Compare the input value to `x`. - -| operator | symbol | example | -|----------|-----------------|---------| -| equal to | (none) | `"valid"` | -| less than | `<` | `< 10` | -| less than or equal | `<=` | `<= 10` | -| greater than | `>` | `> 10` | -| greater than or equal | `>=` | `>= 10` | - -* Less than/greater than are only supported for: - * Number - * Date - * Time - * Date-time - * Year-month-duration - * Day-time-duration - -### Interval - -Test if the input value is within the interval `x` and `y`. - -An interval can be open `(x..y)` / `]x..y[` or closed `[x..y]`. If the interval is open, the value is not included. - -```js -(2..5) -// input > 2 and input < 5 - -[2..5] -// input >= 2 and input <= 5 - -(2..5] -// input > 2 and input <= 5 -``` - -### Disjunction - -Test if at least of the expressions is `true`. - -```js -2, 3, 4 -// input = 2 or input = 3 or input = 4 - -< 10, > 50 -// input < 10 or input > 50 -``` - -### Negation - -Test if the expression is `false`. - -```js -not("valid") -// input != "valid" - -not(2, 3) -// input != 2 and input != 3 -``` - -### Expression - -It is also possible to use a boolean [expression](../feel-expression) instead of an operator. For example, invoking a built-in function. - -The input value can be accessed by the special variable `?`. - -```js -ends with(?, "@camunda.com") -// test if the input value (string) ends with "@camunda.com" - -list contains(?, "invalid") -// test if the input value (list) contains "invalid" -``` diff --git a/versioned_docs/version-1.3/reference/feel/sidebar-schema.js b/versioned_docs/version-1.3/reference/feel/sidebar-schema.js deleted file mode 100644 index c803ffe1c56..00000000000 --- a/versioned_docs/version-1.3/reference/feel/sidebar-schema.js +++ /dev/null @@ -1,19 +0,0 @@ -module.exports = { - "FEEL expressions": [ - 'reference/feel/what-is-feel', - 'reference/feel/language-guide/feel-data-types', - 'reference/feel/language-guide/feel-unary-tests', - 'reference/feel/language-guide/feel-expression', - { - "Built-in functions": [ - 'reference/feel/builtin-functions/feel-built-in-functions-conversion', - 'reference/feel/builtin-functions/feel-built-in-functions-boolean', - 'reference/feel/builtin-functions/feel-built-in-functions-string', - 'reference/feel/builtin-functions/feel-built-in-functions-numeric', - 'reference/feel/builtin-functions/feel-built-in-functions-list', - 'reference/feel/builtin-functions/feel-built-in-functions-context', - 'reference/feel/builtin-functions/feel-built-in-functions-temporal' - ], - }, - ], -}; diff --git a/versioned_docs/version-1.3/reference/feel/what-is-feel.md b/versioned_docs/version-1.3/reference/feel/what-is-feel.md deleted file mode 100644 index e8c4edc82cc..00000000000 --- a/versioned_docs/version-1.3/reference/feel/what-is-feel.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: what-is-feel -title: What is FEEL? -description: "FEEL is a part of the DMN specification of the OMG to write expressions for decision tables and literal expressions in a simple way." ---- - -Friendly Enough Expression Language (FEEL) is a part of the [DMN specification](http://www.omg.org/spec/DMN/) of the OMG. It is designed to write expressions for decision tables and literal expressions in a simple way that is easily understood by business professionals and developers. - -## Unary tests vs. expression - -FEEL has two entry points: unary-tests and expressions. - -### Unary tests - -Unary-Tests can be used only for input entries of a decision table. They are a special kind of expression with a different grammar. The expression gets the value of the input expression implicitly as the first argument. The result of the expression must be either `true` or `false`. - -Examples: - -```js -< 7 -// input less than 7 - -not(2,4) -// input is not 2 or 4 - -[date("2015-09-17")..date("2015-09-19")] -// input is between '2015-09-17' and '2015-09-19' - -<= duration("P1D") -// input is less or equal one day -``` - -### Expression - -Expressions can be used everywhere (e.g. in a decision table as input expression or output entry). An expression takes no implicit arguments like unary-tests. - -Examples: - -```js -applicant.monthly.income * 12 - -if applicant.maritalStatus in ("M","S") then "valid" else "not valid" - -sum( [applicant.monthly.repayments, applicant.monthly.expenses] ) - -sum( credit_history[record_date > date("2011-01-01")].weight ) - -some ch in credit_history satisfies ch.event = "bankruptcy" -``` diff --git a/versioned_docs/version-1.3/reference/glossary.md b/versioned_docs/version-1.3/reference/glossary.md deleted file mode 100644 index d288267ede2..00000000000 --- a/versioned_docs/version-1.3/reference/glossary.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -id: glossary -title: "Glossary" -description: "This section defines common terminology referenced within the documentation." ---- - -### Bridge - -Synonym to "[connector](#connector)". - -### Broker - -A broker is an instance of a Zeebe installation which executes processes and manages process state. A single broker is installed on a single machine. - -- [Architecture](/components/zeebe/technical-concepts/architecture.md#brokers) - -### Client - -A client interacts with the Zeebe broker on behalf of the business application. Clients poll for work from the broker. - -- [Architecture](/components/zeebe/technical-concepts/architecture.md#clients) - -### Cluster - -A cluster represents a configuration of one or more brokers collaborating to execute processes. Each broker in a cluster acts as a leader or a follower. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md) - -### Command - -A command represents an action to be taken or executed. Example commands include: deploy a process, execute a process, etc. - -- [Internal processing](/components/zeebe/technical-concepts/internal-processing.md#events-and-commands) - -### Connector - -A piece of software that connects the workflow engine with some other system or infrastructure. The connector might be uni or bidirectional and possibly includes a [job worker](#job-worker). The boundary between connectors and job workers can be fuzzy, but in general, connectors connect to other active pieces of software. For example, a DMN connector might connect to a managed DMN Engine, while a DMN worker will use a DMN library to directly execute decisions. - -### Correlation - -Correlation refers to the act of matching a message with an inflight process instance. - -- [Message correlation](/components/concepts/messages.md) - -### Correlation key - -A correlation is an attribute within a message used to match this message against a certain variable within an inflight process instance. If the value of the correlation key matches the value of the variable within the process instance, the message is matched to this process instance. - -- [Message correlation](/components/concepts/messages.md) - -### Deployment - -A process cannot execute unless it is known by the broker. Deployment is the process of pushing or deploying processes to the broker. - -- [Zeebe Deployment](/self-managed/overview.md) - -### Event - -An event represents a state change associated with an aspect of an executing process instance. Events capture variable changes, state transition in process elements, etc. An event is represented by a timestamp, the variable name, and variable value. Events are stored in an append-only log. - -- [Internal processing](/components/zeebe/technical-concepts/internal-processing.md#events-and-commands) - -### Exporter - -An exporter represents a sink to which Zeebe will submit all records within the log. This gives users of Zeebe an opportunity to persist records with the log for future use as this data will not be available after log compaction. - -- [Exporter](/components/zeebe/open-source/exporters.md) - -### Follower - -In a clustered environment, a broker which is not a leader is a follower of a given partition. A follower can become the new leader when the old leader is no longer reachable. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Gateway - -Clients communicate with the Zeebe cluster through a gateway. The gateway provides a gRPC API and forwards client commands to the cluster. Depending on the setup, a gateway can be embedded in the broker or can be configured to be standalone. - -- [Architecture](/components/zeebe/technical-concepts/architecture.md#gateways) - -### Incident - -An incident represents an error condition which prevents Zeebe from advancing an executing process instance. Zeebe will create an incident if there was an uncaught exception thrown in your code and the number of retries of the given step is exceeded. - -- [Incident](/components/concepts/incidents.md) - -### Job - -A job represents a distinct unit of work within a business process. Service tasks represent such -jobs in your process and are identified by a unique id. A job has a type to allow specific job -workers to find jobs that they can work on. - -- [Job workers](/components/concepts/job-workers.md) - -### Job activation timeout - -This is the amount of time the broker will wait for a complete or fail response from the job worker. This comes after a job has been submitted to the job worker for processing and before it marks the job as available again for other job workers. - -- [Job workers](/components/concepts/job-workers.md#requesting-jobs) - -### Job worker - -A special type of client that polls for and executes available jobs. An uncompleted job prevents Zeebe from advancing process execution to the next step. - -- [Job workers](/components/concepts/job-workers.md) - -### Leader - -In a clustered environment, one broker (the leader) is responsible for process execution and housekeeping of data within a partition. Housekeeping includes taking snapshots, replication, and running exports. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Log - -The log is comprised of an ordered sequence of records written to persistent storage. The log is appended-only and is stored on disk within the broker. - -- [Partitions](/components/zeebe/technical-concepts/partitions.md#partition-data-layout) - -### Message - -A message contains information to be delivered to interested parties during execution of a process instance. Messages can be published via Kafka or Zeebe’s internal messaging system. Messages are associated with timestamp and other constraints such as time-to-live (TTL). - -- [Messages](/components/concepts/messages.md) - -### Partition - -A partition represents a logical grouping of data in a Zeebe broker. This data includes process instance variables stored in RocksDB, commands, and events generated by Zeebe stored in the log. The number of partitions is defined by configuration. - -- [Partitions](/components/zeebe/technical-concepts/partitions.md) - -### Process - -A process is a defined sequence of distinct steps representing your business logic. Examples of a -process could be an e-commerce shopping experience or onboarding a new employee. In Zeebe, -process are identified by a unique process id. The process is usually also referred to as the -BPMN model. - -- [Processes](/components/concepts/processes.md) - -### Process instance - -While a process represents a defined sequence of distinct steps representing your business logic, a process instance represents a currently executing or completed process. For a single process, there could be many associated process instances in various stages of their executing lifecycle. Process instances are identitied by process instance ids. Executing process instances are also sometimes referred to as inflight processes. - -- [Processes](/components/concepts/processes.md) - -### Process instance variable - -A process instance variable represents the execution state (i.e data) of a process instance. These variables capture business process parameters which are the input and output of various stages of the process instance and which also influence process flow execution. - -- [Variables](/components/concepts/variables.md) -- [Data flow](/components/modeler/bpmn/data-flow.md) - - -### Record - -A record represents a command or an event. For example, a command to create a new process instance, or a state transition of an executing process instance representing an event at a given point in time would result to generation of a record. During the execution lifecycle of a process instance, numerous records are generated to capture various commands and events generated. Records are stored in the log. - -- [Internal processing](/components/zeebe/technical-concepts/internal-processing.md#events-and-commands) - -### Replication - -Replication is the act of copying data in a partition from a leader to its followers within a clustered Zeebe installation. After replication, the leader and followers of a partition will have the exact same data. Replication allows the system to be resilient to brokers going down. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Replication factor - -This is the number of times data in a partition are copied . This depends on the number of brokers in a cluster. A cluster with one leader and two followers have a replication factor of three, as data in each partition needs to have three copies. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Request timeout - -This is how long a client waits for a response from the broker after the client submits a request. If a response is not received within the client request timeout, the client considers the broker unreachable. - -- [Zeebe API (gRPC)](/apis-tools/grpc.md) - -### Snapshot - -The state of all active process instances, (these are also known as inflight process instances) are stored as records in an in-memory database called RocksDB. A snapshot represents a copy of all data within the in-memory database at any given point in time. Snapshots are binary images stored on disk and can be used to restore execution state of a process. The size of a snapshot is affected by the size of the data. Size of the data depends on several factors, including complexity of the model or business process, the size and quantity of variables in each process instance, and the total number of executing process instances in a broker. - -- [Resource planning](/self-managed/zeebe-deployment/operations/resource-planning.md#snapshots) - -### Segment - -The log consists of one or more segments. Each segment is a file containing an ordered sequence records. Segments are deleted when the log is compacted. - -- [Resource planning](/self-managed/zeebe-deployment/operations/resource-planning.md#event-log) - -### Worker - -A worker executes a job. In the Zeebe nomenclature, these are also referred to as job workers. - -- [Job workers](/components/concepts/job-workers.md) - -### Workflow - -See [process](#process). - -### Workflow instance - -See [process instance](#process-instance). - -### Workflow instance variable - -See [process instance variable](#process-instance-variable). diff --git a/versioned_docs/version-1.3/reference/licenses.md b/versioned_docs/version-1.3/reference/licenses.md deleted file mode 100644 index d1e9cc21871..00000000000 --- a/versioned_docs/version-1.3/reference/licenses.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: licenses -title: "Licenses" -description: "Licensing information for components of Camunda Cloud" ---- - -## Licensing - -This page contains licensing information for all components of Camunda Cloud. - -### Zeebe - -Licenses and license information for Zeebe can be found on the Zeebe project [README](https://github.com/camunda-cloud/zeebe#license). - -### Desktop Modeler - -The source code of the Desktop Modeler is licensed under the MIT license as stated in the [`LICENSE` file](https://github.com/camunda/camunda-modeler/blob/master/LICENSE) in the root of the source code repository. This file is also shipped as `LICENSE.camunda-modeler.txt` with each modeler distribution. - -### Camunda Cloud Documentation - -License information for our documentation can be found in the [LICENSE.txt](https://github.com/camunda-cloud/camunda-cloud-documentation/blob/master/LICENSE.txt) of the Camunda Cloud Documentation repository. - -## Terms & Conditions - -For information not covered by the above license links, please see our [Cloud Terms and Conditions](https://camunda.com/legal/terms/cloud-terms-and-conditions/). \ No newline at end of file diff --git a/versioned_docs/version-1.3/reference/notices.md b/versioned_docs/version-1.3/reference/notices.md deleted file mode 100644 index 2dbe87863fc..00000000000 --- a/versioned_docs/version-1.3/reference/notices.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -id: notices -title: "Security notices" -description: "Let's take a closer look at security notices, reporting vulnerabilities, and addiitonal security information." ---- - -## Security notices - -Camunda publishes security notices after fixes are available. - -### Notice 9 - -#### Publication Date: - -April 11th, 2022 - -#### Product affected: - -Zeebe, Operate, Tasklist, IAM - -#### Impact: - -Zeebe, Operate, Tasklist and IAM are using the Spring framework for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2022-22965 - -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate, Tasklist or IAM allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.3.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.7) -- [Zeebe, Operate and Tasklist 1.2.12](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.12) - -### Notice 8 - -#### Publication Date: - -December 31th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44832. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.9](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.9) -- [Zeebe, Operate and Tasklist 1.1.10](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.10) - -### Notice 7 - -#### Publication Date: - -December 31th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44832. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.8 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.9](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.9) - - -### Notice 6 - -#### Publication Date: - -December 22th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45105. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.8](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.8) -- [Zeebe, Operate and Tasklist 1.1.9](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.9) - -### Notice 5 - -#### Publication Date: - -December 22th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45105. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -IAM bundles logback libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-42550. -At this point, Camunda is not aware of any specific attack vector in IAM allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.7 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.8](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.8) - - -### Notice 4 - -#### Publication Date: - -December 17th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45046. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.7) -- [Zeebe, Operate and Tasklist 1.1.8](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.8) - -### Notice 3 - -#### Publication Date: - -December 17th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45046. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -IAM bundles logback libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-42550. -At this point, Camunda is not aware of any specific attack vector in IAM allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.6 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.7) - -### Notice 2 - -#### Publication Date: - -December 14th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44228. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.6](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.6) -- [Zeebe, Operate and Tasklist 1.1.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.7) - -Apply the patches mentioned above or set the JVM option `-Dlog4j2.formatMsgNoLookups=true` - -### Notice 1 - -#### Publication Date: - -December 14th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44228. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. - -Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.5 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.6](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.6) - - -## Report a vulnerability - -Please report security vulnerabilities to Camunda immediately. Please follow the steps on our [Camunda Security page](https://camunda.com/security#report-a-vulnerability) to report a vulnerability. - -## Additional security information - -For more information about security at Camunda, including our security policy, security issue management, and more, see [Camunda.com/security](https://camunda.com/security). diff --git a/versioned_docs/version-1.3/reference/overview.md b/versioned_docs/version-1.3/reference/overview.md deleted file mode 100644 index 40eb98eee6f..00000000000 --- a/versioned_docs/version-1.3/reference/overview.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: overview -title: Overview -sidebar_label: Overview -slug: /reference/ ---- - -This section contains general reference material for Camunda Cloud. - -## Security, support, & license information - -- [Announcements](announcements.md) -- [Licenses](licenses.md) -- [Security notices](notices.md) -- [Service status](status.md) -- [Release policy](release-policy.md) -- [Supported environments](supported-environments.md) -- [Dependencies & Third Party Libraries](dependencies.md) - -## Additional resources - -- [FEEL expressions](feel/what-is-feel.md) -- [Glossary](glossary.md) diff --git a/versioned_docs/version-1.3/reference/release-policy.md b/versioned_docs/version-1.3/reference/release-policy.md deleted file mode 100644 index a21e343ddfd..00000000000 --- a/versioned_docs/version-1.3/reference/release-policy.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: release-policy -title: "Release policy" ---- - -Components of Camunda Cloud follow the [Semantic Versioning standard](https://semver.org/), which defines a version number using the `MAJOR.MINOR.PATCH` pattern. - -- `MAJOR` version can make incompatible API changes. -- `MINOR` version can add functionality in a backwards compatible manner. -- `PATCH` version can make backwards compatible bug fixes. - -The Camunda Cloud team strives to release: -- A new minor version of the Camunda Cloud components every three months -- In between minor versions, two alpha releases (to preview the upcoming minor version) - -Camunda Cloud supports the last two released minor versions with -patch releases. Patch releases are offered on a best effort basis for the -currently supported versions. diff --git a/versioned_docs/version-1.3/reference/status.md b/versioned_docs/version-1.3/reference/status.md deleted file mode 100644 index 2acbdd636af..00000000000 --- a/versioned_docs/version-1.3/reference/status.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: status -title: "Camunda Cloud SaaS Status" -description: "Camunda Platform 8 SaaS is a hosted service for the Camunda Platform 8 stack that runs on the Google Cloud Platform (GCP)." ---- - -Camunda Clous SaaS is a hosted service for the Camunda Cloud Stack that runs on the Google Cloud Platform (GCP). Like any service, it might occasionally undergo availability changes. When availability changes, Camunda makes sure to provide you with a current service status. - -To see current and past service availability, visit [Camunda Cloud SaaS Status](https://status.camunda.io). - -## Subscribe to updates - -Don’t want to check the service status page manually? Get notified about changes to the service status automatically. - -To receive service status updates: - -1. Go to the [Camunda Cloud SaaS Status](https://status.camunda.io) page and click **SUBSCRIBE TO UPDATES**. -1. Select **Atom and RSS feeds**. -1. Add the feed URL to your favourite Atom/RSS reader. -1. After you subscribe to updates, you are notified whenever a service status update is posted. - -## Support - -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, use the [Camunda Cloud community forum](https://forum.camunda.io/). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/support/). diff --git a/versioned_docs/version-1.3/reference/supported-environments.md b/versioned_docs/version-1.3/reference/supported-environments.md deleted file mode 100644 index a9c6af1c302..00000000000 --- a/versioned_docs/version-1.3/reference/supported-environments.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -id: supported-environments -title: "Supported environments" -description: "Let's take a closer look at supported environments alongside Zeebe, Operate, and Tasklist." ---- - -## Zeebe - -- **Zeebe Broker/Gateway**: The cluster components of Zeebe require OpenJDK 11+ and optional if the Elasticsearch exporter is used Elasticsearch 7.16.x. -- **Zeebe Java Client**: The Java client for Zeebe requires OpenJDK 8+. -- **Zeebe Go Client**: The Go client for Zeebe requires Go 1.13+. -- **zbctl**: The Zeebe CLI supports latest versions of Windows, MacOS, and Linux. - -## Camunda Operate - -- **Operate Web App/Importer/Archiver**: The server components of Camunda - Operate require OpenJDK 11+ and Elasticsearch 7.16.x. -- **Operate Browser App**: Requires the latest version of Chrome, Firefox, or - Edge on Windows, MacOS, and Linux. - -## Camunda Tasklist - -- **Tasklist Web App/Importer/Archiver**: The server components of Camunda - Tasklist require OpenJDK 11+ and Elasticsearch 7.16.x. -- **Tasklist Browser App**: Requires the latest version of Chrome, Firefox, or - Edge on Windows, MacOS, and Linux. - - -## Optimize - -Run Camunda Optimize in a Java-runnable environment. The following environments are supported: - -### Web Browser - -- Google Chrome latest [recommended] -- Mozilla Firefox latest -- Microsoft Edge latest - -### Elasticsearch - -- Elasticsearch 7.8.0+, 7.9.0+, 7.10.0+, 7.11.0+, 7.12.0+, 7.13.0+, 7.14.0+, 7.15.0+, 7.16.2+ -- Any minor version above the ones listed in the previous point is likely to be supported as well, but this hasn't been tested. For this reason, Camunda doesn't give any warranty. -- Any major version smaller or greater than ElasticSearch 7 will be rejected by Optimize. For example, Optimize won't work with ElasticSearch 6.X or 8.X. -- For the supported versions mentioned before, the Elasticsearch community as well as any professional version is supported. However, bear in mind that the professional edition comes with additional safety features that allow you to secure Elasticsearch. If you use the community edition, securing Elasticsearch needs to be done manually. - -### Java Runtime - -Optimize tries to support LTS versions of Java for as long as reasonably possible. Non-LTS versions newer than the supported LTS versions may also work, but we recommend using one of the releases listed below. - -- Oracle JDK/JRE 11 -- Open JDK/JRE 11 including builds of the following products: - - Adopt OpenJDK - -### Docker - -[Docker CE](https://docs.docker.com/install/) 17.03 or newer - -### DMN - Decision Model and Notation Standard - -DMN [1.1](https://www.omg.org/spec/DMN/1.1), [1.2](https://www.omg.org/spec/DMN/1.2) or [1.3](https://www.omg.org/spec/DMN/1.3) - -### Camunda Platform - -Production versions of the Camunda Engine version 7.14.0+, 7.15.0+ and 7.16.0+ with REST API and history with level `full` enabled are supported. [Development (alpha) versions](https://docs.camunda.org/enterprise/release-policy/#community-vs-enterprise-releases) are not supported. For optimal performance, we always recommend running the latest version of the Camunda Engine. To ensure correct logging of user operations using the REST API, they should always be performed with user authentication. Alternatively, `restrictUserOperationLogToAuthenticatedUsers` should be set to `false` in the connected engine, this setting allows user operations to be logged even if there is no user authentication context for the request. - -## Desktop Modeler - -Supported on the following platforms: - -Windows 7 / 10 -Mac OS X 10.11 -Ubuntu LTS (latest) -Reported to work on - -Ubuntu 12.04 and newer -Fedora 21 -Debian 8 - -## Web Modeler -### Web Browser - -- Google Chrome latest [recommended] -- Mozilla Firefox latest -- Microsoft Edge latest - diff --git a/versioned_docs/version-1.3/self-managed/iam/deployment/configuration-variables.md b/versioned_docs/version-1.3/self-managed/iam/deployment/configuration-variables.md deleted file mode 100644 index 281065c211d..00000000000 --- a/versioned_docs/version-1.3/self-managed/iam/deployment/configuration-variables.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: configuration-variables -title: "Configuration variables" -sidebar_label: "Configuration variables" ---- - -As IAM is a Spring Boot application, you may use the standard Spring [configuration](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config) methods. - -### Feature control - -| Environment variable | Description | Default value | -| -- | -- | -- | -| FEATURE_LDAP | Toggle LDAP support within IAM. | false | - -### Functionality - -| Environment variable | Description | Default value | -| -- | -- | -- | -| ENFORCE_ACCESS_CONTROL | Controls enforcement of permissions for the IAM component. If set to false, all users can access user, role, and permission management. | false | -| ENFORCE_HTTPS | Controls if the URLs specified for client configuration must be `https://`. | true | - -### LDAP - -All LDAP properties are prefixed with `LDAP_`. - -| Environment variable | Description | Default value | -| -- | -- | -- | -| DEFAULT_USERNAME | The username of a default user to initialize IAM with. | - | -| SERVER_URL | The URL at which the LDAP server is reachable. | - | -| DOMAIN | The domain of an Active Directory (AD) LDAP server; only to be set if AD is used. | - | -| MANAGER_DN | The credentials to bind the Camunda Account service to the LDAP server; must be empty if connecting to an AD server. | - | -| MANAGER_PASSWORD | The credentials to bind the Camunda Account service to the LDAP server; must be empty if connecting to an AD server. | - | -| BASE_DN | The start location for LDAP search. If AD is used and this property is empty, this property is determined from configured domain. | - | -| USER_SEARCH_BASE | The start location for user search. Relative to base-dn; must be empty if AD is used. | - | -| USER_SEARCH_FILTER | A filter to restrict the group of users to search in. | - | -| UUID_ATTRIBUTE | The attribute names used on the LDAP server; must be set to an attribute holding a universally unique identifier (UUID) of a user. | - | -| USER_FIRST_NAME_ATTRIBUTE | Used to build the full name of the user. | - | -| USER_LAST_NAME_ATTRIBUTE | Used to build the full name of the user. | - | -| USER_EMAIL_ATTRIBUTE | Used to determine a user's email address used for log in. | - | diff --git a/versioned_docs/version-1.3/self-managed/iam/deployment/making-iam-production-ready.md b/versioned_docs/version-1.3/self-managed/iam/deployment/making-iam-production-ready.md deleted file mode 100644 index ef849feef7b..00000000000 --- a/versioned_docs/version-1.3/self-managed/iam/deployment/making-iam-production-ready.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: making-iam-production-ready -title: "Making IAM production-ready" -sidebar_label: "Making IAM production-ready" ---- - -The IAM component offers a quick start method to swiftly get up and running. This means we handle a few tasks to remove production level complexity. To ensure your IAM instance is ready for use in a production setting, we suggest performing the following tasks. - -### Set the database encryption key variable - -The IAM component stores certain information requiring encryption. By default, if no value is set for the `DATABASE_ENCRYPTION_KEY` environmental variable during each start of the IAM service, a value is generated. - -To maintain a consistent value, set the `DATABASE_ENCRYPTION_KEY` environmental variable to an alpha-numeric string. - -:::tip -We suggest a string length of 32 characters. -::: - -### Set the token signing key variable - -The IAM component generates authentication tokens. To do this, a signing key must be used. By default, if no signing key is provided during each start of the IAM service, one is automatically generated. - -To use authentication tokens generated before a service restart, set the `TOKEN_SIGNING_KEY` environmental value -to a JSON formatted output from a signing key generator. - -:::tip -Unsure how to generate a JSON Web Key? Visit the [Nimbus JOSE + JWT documentation](https://connect2id.com/products/nimbus-jose-jwt/generator) for examples. -::: - -### Enable access control - -The IAM component is capable of enforcing access control. However, this functionality is disabled by default. -When access control is disabled, all users, regardless of role and permission assignment, are able to manage users, roles, and permissions. - -To enable access control, set the `ENFORCE_ACCESS_CONTROL` environmental value to `true`. - -### Configure IAM host URLs - -By default, The IAM component exposes the service on `http://localhost:8080`. - -To change the location the IAM component is served from, set `FRONTEND_URL`, `BACKEND_URL`, and `TOKEN_ISSUER` to your chosen URL. - -:::note -The `BACKEND_URL` must be followed by `/api`. For example, `http://localhost:8080/api`. -::: diff --git a/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/accessing-the-ui.md b/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/accessing-the-ui.md deleted file mode 100644 index 86ea78b5733..00000000000 --- a/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/accessing-the-ui.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: accessing-the-ui -title: "Accessing the UI" -sidebar_label: "Step 3: Accessing the UI" ---- - -In this part of the tutorial, we'll show you how to access the login page and log in to the IAM component. - -### Accessing the UI - -Navigate to `localhost:8080` to see the UI exposed by the IAM component. - -![iam-ui](../img/iam-ui.png) - -### Default user - -IAM creates a default user during installation; use this account to log in. - -```text -Username: demo -Password: demo -``` - -### Home screen - -You are directed to the home page once logged in successfully. - -![iam-home-page](../img/iam-home-page.png) diff --git a/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/setup-environment.md b/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/setup-environment.md deleted file mode 100644 index 52bd7c82245..00000000000 --- a/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/setup-environment.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -id: setup-environment -title: "Setup environment" -sidebar_label: "Step 1: Setup environment" ---- - -In this part of the tutorial, we'll show you how you can use containerization to run the IAM application on your machine. Here, it is assumed you have a basic understanding of Docker Compose. - -:::tip -Not sure what Docker Compose is? Review Docker's [Overview of Docker Compose](https://docs.docker.com/compose/) guide. -::: - -### Prerequisites - -- [Docker](https://docs.docker.com/get-docker/) -- [Docker Compose](https://docs.docker.com/compose/install/) - -### Configuration - -To configure, take the following steps: - -1. Navigate to a directory of your choice and create a `docker-compose.yml` file containing the following starting structure: - -```yaml -version: "3.6" - -services: - iam: - image: camunda/iam:latest - ports: - - 8080:8080 - environment: - DEFAULT_CLIENT_CREATE: "false" - IAM_CLIENT_SECRET: [a random 32 char alphanumeric string] - ENFORCE_HTTPS: "false" -``` - -:::note -Here, we set `ENFORCE_HTTPS` to **false** so we can use localhost. We recommend removing this option prior to production use. -::: - -2. IAM requires a database to function. Add a database service to your `docker-compose.yml` file: - -```yaml -database: - image: postgres:13.3-alpine - environment: - POSTGRES_DB: iam - POSTGRES_USER: camunda - POSTGRES_PASSWORD: [a random alphanumeric string] - healthcheck: - test: pg_isready -d iam -U camunda - interval: 30s - timeout: 15s - retries: 5 -``` - -:::caution -The IAM application currently only supports PostgreSQL 12+. Additionally, The IAM application generates an encryption key per start. This means the database must be recreated each time. -::: - -3. We'll also need to add new entries to the `services.iam.environment` section to tell IAM where the database is located, and the password for access: - -```yaml - DB_PASSWORD: [the password you entered for `database.POSTGRES_PASSWORD`] - DB_URL: jdbc:postgresql://database:5432/iam -``` - -4. Let's tell Docker Compose that the `iam` service is dependent on the `database` service by adding the following lines under `services.iam`: - -```yaml - depends_on: - - database -``` - -5. Add an override to enable the user management functionality. To do this, add the following line to the `services.iam.environment` section: - -```yaml - FEATURE_USER_MANAGEMENT: "true" -``` - -Your `docker-compose.yml` file should now look like this: - -
    Show complete Docker Compose file - -```yaml -version: "3.6" - -services: - application: - image: camunda/iam:latest - depends_on: - - database - ports: - - 8080:8080 - environment: - DEFAULT_CLIENT_CREATE: "false" - IAM_CLIENT_SECRET: [a random 32 char alphanumeric string] - ENFORCE_HTTPS: "false" - FEATURE_USER_MANAGEMENT: "true" - DB_URL: jdbc:postgresql://database:5432/iam - DB_PASSWORD: [the password you entered for `database.POSTGRES_PASSWORD`] - - database: - image: postgres:13.3-alpine - environment: - POSTGRES_DB: iam - POSTGRES_USER: camunda - POSTGRES_PASSWORD: [a random alphanumeric string] - healthcheck: - test: pg_isready -d iam -U camunda - interval: 30s - timeout: 15s - retries: 5 - -``` -
    - -### Conclusion - -Now that we've configured the containers for the IAM application and the supporting database, let's start the services. diff --git a/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/start-iam.md b/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/start-iam.md deleted file mode 100644 index 34f1485c12a..00000000000 --- a/versioned_docs/version-1.3/self-managed/iam/getting-started/docker/start-iam.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: start-iam -title: "Start IAM" -sidebar_label: "Step 2: Start IAM" ---- - -### Starting the containers - -There are several methods to start the containers. If you have Docker support within your IDE, consult their documentation on run configurations. - -If you are using the command line, use the following global command: - -```shell -docker compose -f /path/to/your/docker-compose.yml up -d -``` - -:::note -If you are using Docker Compose V1, you can use the command `docker-compose`. -::: - -This command starts the `iam` and `database` services. The health of the services can be checked with the following command: - -```shell -docker ps -``` - -Your output should look similar to the following: - -```text -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9c46cfecca05 camunda/iam:latest "java -jar iam.jar" 38 seconds ago Up 35 seconds 0.0.0.0:8080->8080/tcp iam_application_1 -a2174c3fe0e9 postgres:13.3-alpine "docker-entrypoint.s…" 42 seconds ago Up 38 seconds (healthy) 0.0.0.0:15432->5432/tcp iam_database_1 -``` - -:::tip -If the container for the IAM application does not remain healthy, you can use the `CONTAINER ID` to check the logs by running `docker logs `. -::: - -### Conclusion - -Congratulations! You've now started the IAM application. Let's move on to logging in. diff --git a/versioned_docs/version-1.3/self-managed/iam/getting-started/img/iam-home-page.png b/versioned_docs/version-1.3/self-managed/iam/getting-started/img/iam-home-page.png deleted file mode 100644 index 16c8cc461bc..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/iam/getting-started/img/iam-home-page.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/iam/getting-started/img/iam-ui.png b/versioned_docs/version-1.3/self-managed/iam/getting-started/img/iam-ui.png deleted file mode 100644 index 13bc795b0b5..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/iam/getting-started/img/iam-ui.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/iam/what-is-iam.md b/versioned_docs/version-1.3/self-managed/iam/what-is-iam.md deleted file mode 100644 index 913c541d148..00000000000 --- a/versioned_docs/version-1.3/self-managed/iam/what-is-iam.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: what-is-iam -title: "What is IAM?" -sidebar_label: "What is IAM?" ---- - -IAM is the component within the Camunda Cloud stack responsible for authentication and authorization. It allows you to: - -- Manage users -- Manage roles -- Manage permissions - -### That's great, but what do I do now? - -If you're new to IAM, we suggest reviewing [Step 1: Setup environment](../getting-started/docker/setup-environment/) to get started on your journey! diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/authentication.md b/versioned_docs/version-1.3/self-managed/operate-deployment/authentication.md deleted file mode 100644 index 338f917098e..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/authentication.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -id: authentication -title: Authentication -description: "Let's take a closer look at how Operate authenticates for use." ---- - -Operate provides three ways to authenticate: - -1. User information stored in [Elasticsearch](#user-in-elasticsearch) -2. [Lightweight Directory Access Protocol (LDAP)](#ldap) -3. [IAM Authentication and Authorization](#iam) - -By default, user storage in Elasticsearch is enabled. - -## User in Elasticsearch - -In this mode, the user authenticates with a username and password stored in Elasticsearch. - -The **Userid**, **displayName**, **password**, and **roles** for one user may be set in `application.yml`: - -``` -camunda.operate: - userId: anUserId - displayName: nameShownInWebpage - password: aPassword - roles: - - OWNER - - USER -``` - -Currently, only `OWNER` and/or `USER` roles are available. - -### Roles for users - -| Name | Description | -| -- | -- | -| OWNER | Full access | -| USER | Read only access | - -On startup of Operate, the user is created if they did not exist before. - -By default, two users are created: - -* Role `OWNER` with **userId**/**displayName**/**password** `demo`/`demo`/`demo`. -* Role `USER` with **userId**/**displayName**/**password** `view`/`view`/`view`. - -Add more users directly to Elasticsearch via the index `operate-user-_`. The password must be encoded with a strong `bcrypt` hashing function. - -## LDAP - -### Enable LDAP - -LDAP can only be enabled by setting the [Spring profile](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-profiles): `ldap-auth`. - -See the following example for setting the Spring profile as an environmental variable: - -``` -export SPRING_PROFILES_ACTIVE=ldap-auth -``` - -### Configuration of LDAP - -A user can authenticate via LDAP. - -The following parameters for connection to an LDAP server should be given: - -| Parameter name | Description | Example | Required | -| -- | -- | -- | -- | -| camunda.operate.ldap.url | URL to an LDAP Server | ldaps://camunda.com/ | Yes | -| camunda.operate.ldap.baseDn| Base domain name | dc=camunda,dc=com| Yes | -| camunda.operate.ldap.managerDn| Manager domain used by Operate to log into LDAP server to retrieve user information | cn=admin,dc=camunda,dc=com | Yes | -| camunda.operate.ldap.managerPassword| Password for manager| | Yes | -| camunda.operate.ldap.userSearchFilter| Filter to retrieve user info. The pattern '{0}' is replaced by the given username in the login form. | {0} | No, default is {0} | -| camunda.operate.ldap.userSearchBase | Starting point for search | ou=Support,dc=camunda,dc=com | No | - -### Configuration of active directory-based LDAP - -For an **active directory**-based LDAP server, the following parameters should be given: - -:::note -The active directory configuration will only be applied when `camunda.operate.ldap.domain` is given. -::: - -| Parameter name | Description | Required | -| -- | -- | -- | -| camunda.operate.ldap.url | URL to an active directory LDAP server | Yes | -| camunda.operate.ldap.domain| Domain | Yes | -| camunda.operate.ldap.baseDn| Root domain name | No | -| camunda.operate.ldap.userSearchFilter| Used as a search filter | No | - -## IAM - -[IAM](../../iam/what-is-iam/) provides authentication and authorization functionality along with user management. - -### Enable IAM - -IAM can only be enabled by setting the [Spring profile](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-profiles): `iam-auth`. - -See the following example: - -``` -export SPRING_PROFILES_ACTIVE=iam-auth -``` - -### Configure IAM - -IAM requires the following parameters: - -| Parameter name | Description | Example value | -| -- | -- | -- | -| camunda.operate.iam.issuer | Name/ID of issuer | http://app.iam.localhost | -| camunda.operate.iam.issuerUrl | URL of issuer (IAM) | http://app.iam.localhost | -| camunda.operate.iam.clientId | Similar to a username for the application | operate | -| camunda.operate.iam.clientSecret | Similar to a password for the application | XALaRPl...s7dL7 | - -We provide two different permissions over IAM: read or write. -To configure the authorization, you are required to create two different permissions: - -| Permission value | Description | -| -- | -- | -| `read:*` | Grants the user the permission to access, view, and read the data in the application. | -| `write:*` | Grants the user the permission to perform operations. | - -:::note -The minimum permission needed is `read:*`. Any user without this permission will have access denied to the application. -:::: diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/configuration.md b/versioned_docs/version-1.3/self-managed/operate-deployment/configuration.md deleted file mode 100644 index baaa9583711..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/configuration.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -id: configuration -title: Configuration ---- - -Operate is a Spring Boot application. This means every way to [configure](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config) -a Spring Boot application can be applied. - -By default, the configuration for Operate is stored in a YAML file (`application.yml`). All Operate-related settings are prefixed with `camunda.operate`. The following parts are configurable: - -* [Webserver](#webserver) -* [Elasticsearch connection](#elasticsearch) -* [Zeebe Broker connection](#zeebe-broker-connection) -* [Zeebe Elasticsearch Exporter](#zeebe-elasticsearch-exporter) -* [Operation Executor](#operation-executor) -* [Authentication](authentication.md) -* [Scaling Operate](importer-and-archiver.md) -* [Monitoring possibilities](#monitoring-operate) -* [Logging configuration](#logging) - -## Configurations - -### Webserver - -Operate supports customizing the **context-path** using default Spring configuration. - -Example for `application.yml`: -`server.servlet.context-path: /operate` - -Example for environment variable: -`SERVER_SERVLET_CONTEXT_PATH=/operate` - -The default context-path is `/`. - -### Elasticsearch - -Operate stores and reads data in/from Elasticsearch. - -### Settings to connect - -Operate supports [basic authentication](https://www.elastic.co/guide/en/elasticsearch/reference/7.12/setting-up-authentication.html) for Elasticsearch. - -Set the appropriate username/password combination in the configuration to use it. - -#### Settings to connect to a secured Elasticsearch instance - -To connect to a secured (https) Elasticsearch instance, you need normally only set the URL protocol part to `https` instead of `http`. A secured Elasticsearch instance needs also `username` and `password`. -The other SSL settings should only be used in case of connection problems, for example disable -host verification. - -:::note -You may need to import the certificate into JVM runtime. -::: - -Either set `host` and `port` (deprecated), or `url` (recommended). - -Name | Description | Default value ------|-------------|-------------- -camunda.operate.elasticsearch.indexPrefix| Prefix for index names | operate -camunda.operate.elasticsearch.clusterName | Cluster name of Elasticsearch | elasticsearch -camunda.operate.elasticsearch.url | URL of Elasticsearch REST API | http://localhost:9200 -camunda.operate.elasticsearch.username | Username to access Elasticsearch REST API | - -camunda.operate.elasticsearch.password | Password to access Elasticsearch REST API | - -camunda.operate.elasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - -camunda.operate.elasticsearch.ssl.selfSigned | Certificate was self-signed | false -camunda.operate.elasticsearch.ssl.verifyHostname | Should the hostname be validated | false - -### A snippet from application.yml - -```yaml -camunda.operate: - elasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - ssl: - selfSigned: true -``` - -## Zeebe broker connection - -Operate needs a connection to the Zeebe broker to start the import and execute user operations. - -### Settings to connect - -Name | Description | Default value ------|-------------|-------------- -camunda.operate.zeebe.gatewayAddress | Gateway address that points to Zeebe as hostname and port. | localhost:26500 - -:::note -Currently, Operate does not support TLS communication with Zeebe. -::: - -### A snippet from application.yml - -```yaml -camunda.operate: - zeebe: - # Gateway host and port - gatewayAddress: localhost:26500 -``` - -## Zeebe Elasticsearch exporter - -Operate imports data from Elasticsearch indices created and filled in by the [Zeebe Elasticsearch Exporter](https://github.com/camunda/camunda/tree/1.3.14/exporters/elasticsearch-exporter). - -Therefore, settings for this Elasticsearch connection must be defined and must correspond to the settings on the Zeebe side. - -### Settings to connect and import - -See also [settings to connect to a secured Elasticsearch instance](#settings-to-connect-to-a-secured-elasticsearch-instance). - -Name | Description | Default value ------|-------------|-------------- -camunda.operate.zeebeElasticsearch.clusterName | Cluster name of Elasticsearch | elasticsearch -camunda.operate.zeebeElasticsearch.url | URL of Zeebe Elasticsearch REST API | http://localhost:9200 -camunda.operate.zeebeElasticsearch.prefix | Index prefix as configured in Zeebe Elasticsearch exporter | zeebe-record -camunda.operate.zeebeElasticsearch.username | Username to access Elasticsearch REST API | - -camunda.operate.zeebeElasticsearch.password | Password to access Elasticsearch REST API | - -camunda.operate.zeebeElasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - -camunda.operate.zeebeElasticsearch.ssl.selfSigned | Certificate was self-signed | false -camunda.operate.zeebeElasticsearch.ssl.verifyHostname | Should the hostname be validated | false - -### A snippet from application.yml: - -```yaml -camunda.operate: - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` - -## Operation executor - -Operations are user operations, like cancellation of process instance(s) or updating the variable value. - -Operations are executed in a multi-threaded manner. - -Name | Description | Default value ------|-------------|-------------- -camunda.operate.operationExecutor.threadsCount| How many threads should be used. | 3 - -### A snippet from application.yml - -```yaml -camunda.operate: - operationExecutor: - threadsCount: 3 -``` - -## Monitoring Operate - -Operate includes [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready) inside. This provides the number of monitoring possibilities. - -Operate uses the following Actuator configuration by default: - -```yaml -# Disable default health indicators -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-health-indicators -management.health.defaults.enabled: false -# enable Kubernetes health groups: -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-kubernetes-probes -management.health.probes.enabled: true -# enable health check, metrics and loggers endpoints -management.endpoints.web.exposure.include: health,prometheus,loggers -``` - -With this configuration, the following endpoints are available for use out of the box: - -```:8080/actuator/prometheus``` Prometheus metrics - -```:8080/actuator/health/liveness``` Liveness probe - -```:8080/actuator/health/readiness``` Readiness probe - -### Versions before 0.25.0 - -In versions before 0.25.0, management endpoints look different. Therefore, we recommend reconfiguring for next versions. - -|Name|Before 0.25.0| Starting with 0.25.0| -|----|-------------|--------| -|Readiness|/api/check|/actuator/health/readiness| -|Liveness|/actuator/health|/actuator/health/liveness| - -## Logging - -Operate uses the Log4j2 framework for logging. In the distribution archive, as well as inside a Docker image, `config/log4j2.xml` logging configuration files are included and can be further adjusted to your needs: - -```xml - - - - %clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%xwEx - - - - - - - - - - - - - - - - -``` - -By default, `ConsoleAppender` is used. - -#### JSON logging configuration - -You can choose to output logs in JSON format (Stackdriver compatible). To enable it, define -the environment variable ```OPERATE_LOG_APPENDER``` like this: - -```sh -OPERATE_LOG_APPENDER=Stackdriver -``` - -### Change logging level at runtime - -Operate supports the default scheme for changing logging levels as provided by [Spring Boot](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers). - -The log level for Operate can be changed by following the [Setting a Log Level](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers-setting-level) section. - -#### Set all Operate loggers to DEBUG - -```shell -curl 'http://localhost:8080/actuator/loggers/io.camunda.operate' -i -X POST \ --H 'Content-Type: application/json' \ --d '{"configuredLevel":"debug"}' -``` - -## An example of application.yml file - -The following snippet represents the default Operate configuration, which is shipped with the distribution. This can be found inside the `config` folder (`config/application.yml`) and can be used to adjust Operate to your needs. - -```yaml -# Operate configuration file - -camunda.operate: - # Set operate userId, displayName and password. - # If user with does not exists it will be created. - # Default: demo/demo/demo - userId: anUserId - displayName: nameShownInWebpage - password: aPassword - roles: - - OWNER - - USER - # ELS instance to store Operate data - elasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: http://localhost:9200 - # Zeebe instance - zeebe: - # Gateway address to zeebe - gatewayAddress: localhost:26500 - # ELS instance to export Zeebe data to - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # url - url: http://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/data-retention.md b/versioned_docs/version-1.3/self-managed/operate-deployment/data-retention.md deleted file mode 100644 index b5b8ba239f1..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/data-retention.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: data-retention -title: Data retention -description: "Let's take a closer look at how Operate stores and archives data." ---- - -## How the data is stored and archived - -Operate imports data from Zeebe and stores it in Elasticsearch indices with a defined prefix (default: `operate`). Specifically, this includes the following: - -* Deployed processes, including the diagrams -* The state of process instances, including variables and flow nodes, activated within instance execution, incidents, etc. - -It additionally stores some Operate-specific data: - -* Operations performed by the user -* List of users -* Technical data, like the state of Zeebe import, etc. - -The data representing process instance state becomes immutable after the process instance is finished. Currently, the data may be archived, meaning it is moved to a dated index, e.g. `operate_variables_2020-01-01`, where date represents the date on which the given process instance was finished. The same is valid for user operations; after they are finished, the related data is moved to dated indices. - -:::note -All Operate data present in Elasticsearch (from both **main** and **dated** indices) are visible from the UI. -::: - -## Data cleanup - -In case of intensive Zeebe usage, the amount of data can grow significantly overtime. Therefore, you should consider the data cleanup strategy. - -Dated indices may be safely removed from Elasticsearch. "Safely" means only finished process instances are deleted together with all related data, and the rest of the data stays consistent. You can use Elasticsearch Curator or other tools/scripts to delete old data. - -:::note -Only indices containing dates in their suffix may be deleted. -::: diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-dashboard-no-processes_dark.png b/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-dashboard-no-processes_dark.png deleted file mode 100644 index eff22424f41..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-dashboard-no-processes_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-dashboard-no-processes_light.png b/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-dashboard-no-processes_light.png deleted file mode 100644 index 15dcacd32f2..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-dashboard-no-processes_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-introduction_dark.png b/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-introduction_dark.png deleted file mode 100644 index e4697160bf7..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-introduction_dark.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-introduction_light.png b/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-introduction_light.png deleted file mode 100644 index bbb389a11bb..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/operate-deployment/img/operate-introduction_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/importer-and-archiver.md b/versioned_docs/version-1.3/self-managed/operate-deployment/importer-and-archiver.md deleted file mode 100644 index f6f71b49582..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/importer-and-archiver.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -id: importer-and-archiver -title: Importer and archiver -description: "Let's analyze how Operate is organized by modules to import and archive data." ---- - -Operate consists of three modules: - -* **Webapp**: Contains the UI and operation executor functionality. -* **Importer**: Responsible for importing data from Zeebe. -* **Archiver**: Responsible for archiving "old" data (finished process instances and user operations.) See [data retention](data-retention.md). - -Modules can be run together or separately in any combination and can be scaled. When you run an Operate instance, by default, all modules are enabled. To disable them, use the following configuration parameters: - -Configuration parameter | Description | Default value ------|-------------|-------------- -camunda.operate.importerEnabled | When true, Importer module is enabled. | true -camunda.operate.archiverEnabled | When true, Archiver module is enabled. | true -camunda.operate.webappEnabled | When true, Webapp module is enabled. | true - -Additionally, you can have several importer and archiver nodes to increase throughput. Internally, they will spread their work based on Zeebe partitions. - -For example, if your Zeebe runs 10 partitions and you configure two importer nodes, they will import data from five partitions each. - -Each single importer/archiver node must be configured using the following configuration parameters: - -Configuration parameter | Description | Default value ------|-------------|-------------- -camunda.operate.clusterNode.partitionIds | Array of Zeebe partition ids this Importer (or Archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. -camunda.operate.clusterNode.nodeCount | Total amount of Importer (or Archiver) nodes in the cluster. | 1 -camunda.operate.clusterNode.currentNodeId | Id of current Importer (or Archiver) node, starting from 0. | 0 - -It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. - -:::note -`nodeCount` always represents the number of nodes of one specific type. -::: - -For example, the configuration of a cluster with one Webapp node, two Importer nodes, and one Archiver node could look like the following: - -``` -Webapp node - -camunda.operate: - archiverEnabled: false - importerEnabled: false - #other configuration... - -Importer node #1 - -camunda.operate: - archiverEnabled: false - webappEnabled: false - clusterNode: - nodeCount: 2 - currentNodeId: 0 - #other configuration... - -Importer node #2 - -camunda.operate: - archiverEnabled: false - webappEnabled: false - clusterNode: - nodeCount: 2 - currentNodeId: 1 - #other configuration... - -Archiver node - -camunda.operate: - webappEnabled: false - importerEnabled: false - -``` - -You can further parallelize archiver and/or importer within one node using the following configuration parameters: - -Configuration parameter | Description | Default value ------|-------------|-------------- -camunda.operate.archiver.threadsCount | Number of threads in which data will be archived. | 1 -camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 - -:::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) * (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. -::: diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/install-and-start.md b/versioned_docs/version-1.3/self-managed/operate-deployment/install-and-start.md deleted file mode 100644 index 969e918088b..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/install-and-start.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -id: install-and-start -title: Install and start Operate -description: "Let's get started with Operate with these simple installation steps." ---- - -## Running via Docker (local development) - -You can use the Docker image `camunda/operate:latest` to run Operate as a container. - -Ensure you set the appropriate settings described in the [configuration](../configuration) section of the deployment guide. See an example configuration for `docker-compose` below: - -``` -operate: - container_name: operate - image: camunda/operate:latest - ports: - - 8080:8080 - environment: - - camunda.operate.elasticsearch.url=http://elasticsearch:9200 - - camunda.operate.zeebeElasticsearch.url=http://elasticsearch:9200 - - camunda.operate.zeebe.gatewayAddress=zeebe:26500 -``` - -## Manual configuration (local development) - -Here, we’ll walk you through how to download and run an Operate distribution manually without using Docker. - -:::note -The Operate web UI is available by default at [http://localhost:8080](http://localhost:8080). Ensure this port is available. -::: - -### Download Operate and a compatible version of Zeebe - -Operate and Zeebe distributions are available for download on the same [release page](https://github.com/camunda-cloud/zeebe/releases). - -:::note -Each version of Operate is compatible with a specific version of Zeebe. -::: - -On the Zeebe release page, compatible versions of Zeebe and Operate are grouped together. Ensure you download and use compatible versions. This is handled for you if you use the Docker profile from our repository. - -### Download Elasticsearch - -Operate uses open-source Elasticsearch as its underlying data store. Therefore to run Operate, you must download and run Elasticsearch. - -Operate is currently compatible with Elasticsearch 7.16.2. Download Elasticsearch [here](https://www.elastic.co/downloads/past-releases/elasticsearch-7-16-2). - -### Run Elasticsearch - -To run Elasticsearch, execute the following commands in your terminal or another command line tool of your choice: - -``` -cd elasticsearch-* -bin/elasticearch -``` - -You’ll know Elasticsearch has started successfully when you see a message similar to the following: - -``` -[INFO ][o.e.l.LicenseService ] [-IbqP-o] license [72038058-e8ae-4c71-81a1-e9727f2b81c7] mode [basic] - valid -``` - -### Run Zeebe - -To run Zeebe, execute the following command: - -``` -cd zeebe-broker-* -ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_CLASSNAME=io.camunda.zeebe.exporter.ElasticsearchExporter ./bin/broker -``` - -You’ll know Zeebe has started successfully when you see a message similar to the following: - -``` -[partition-0] [0.0.0.0:26501-zb-actors-0] INFO io.camunda.zeebe.raft - Joined raft in term 0 -[exporter] [0.0.0.0:26501-zb-actors-1] INFO io.camunda.zeebe.broker.exporter.elasticsearch - Exporter opened -``` - -### Run Operate - -To run Operate, execute the following command: - -``` -cd camunda-operate-distro-1.0.0-* -bin/operate -``` - -You’ll know Operate has started successfully when you see messages similar to the following: - -``` -DEBUG 1416 --- [ Thread-6] o.c.o.e.w.BatchOperationWriter : 0 operations locked -DEBUG 1416 --- [ Thread-4] o.c.o.z.ZeebeESImporter : Latest loaded position for alias [zeebe-record-deployment] and partitionId [0]: 0 -INFO 1416 --- [ Thread-4] o.c.o.z.ZeebeESImporter : Elasticsearch index for ValueType DEPLOYMENT was not found, alias zeebe-record-deployment. Skipping. -``` - -## Access the Operate web interface - -The Operate web interface is available at [http://localhost:8080](http://localhost:8080). - -The first screen you'll see is a sign-in page. Use the credentials `demo` / `demo` to sign in. - -After you sign in, you'll see an empty dashboard if you haven't yet deployed any processes: - -![operate-dash-no-processes](img/operate-dashboard-no-processes_light.png) - -If you _have_ deployed processes or created process instances, you'll see them on your dashboard: - -![operate-dash-with-processes](img/operate-introduction_light.png) - -## Update Operate - -To update Operate versions, visit the [update guide](/guides/update-guide/introduction.md). \ No newline at end of file diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/schema-and-migration.md b/versioned_docs/version-1.3/self-managed/operate-deployment/schema-and-migration.md deleted file mode 100644 index f40919f775a..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/schema-and-migration.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -id: schema-and-migration -title: Schema and migration ---- -Operate stores data in Elasticsearch. On first start, Operate creates all required indices and templates. - -- [Schema](#schema) -- [Data migration](#data-migration) - - [Concept](#concept) - - [How to migrate](#how-to-migrate) - - [Migrate by using standalone application](#migrate-by-using-standalone-application) - - [Migrate by using built-in automatic upgrade](#migrate-by-using-built-in-automatic-upgrade) - - [Further notes](#further-notes) - - [Configure migration](#configure-migration) - - [Example for migration in Kubernetes](#example-for-migration-in-kubernetes) - -## Schema - -Operate uses several Elasticsearch indices that are mostly created using templates. - -Each index has its own version of schema. This means the version reflected in the index name is *not* the version of Operate. - -Index names follow the defined pattern below: - -``` -{operate-index-prefix}-{datatype}-{schemaversion}_[{date}] - -``` - -Here, `operate-index-prefix` defines the prefix for index name (default `operate`), `datatype` defines which data is stored in the index (e.g. `user`, `variable` etc.,) `schemaversion` represents the index schema version, and `date` represents the finished date of the archived data. See [data retention](data-retention.md). - -Knowing the index name pattern, it's possible to customize index settings by creating Elasticsearch templates. See an [example of an index template](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/indices-templates.html). - -To define the desired number of shards and replicas, define the following template: - -``` -PUT _template/template_operate -{ - "index_patterns": ["operate-*"], - "settings": { - "number_of_shards": 5, - "number_of_replicas": 2 - } -} -``` - -:::note -For these settings to work, the template must be created before Operate runs. -::: - -## Data migration - -The version of Operate is reflected in Elasticsearch object names (e.g. `operate-user-1.0.0_` index contains the user data for Operate 1.0.0). When upgrading from one version of Operate to another, migration of data must be performed. Operate distribution provides an application to perform data migration from older versions. - -### Concept - -The migration uses Elasticsearch [processors](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/ingest-processors.html) and [pipelines](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/pipeline.html) to reindex the data. - -Each version of Operate delivers a set of migration steps which need to be applied for a corresponding version of Operate. - -When upgrading from one version to another, necessary migration steps constitute the so-called migration plan. -All known migration steps (both applied and not) are persisted in the dedicated Elasticsearch index: `operate-migration-steps-repository`. - -### How to migrate - -#### Migrate by using standalone application - -Ensure Elasticsearch contains the data Operate is running. The migration script will connect to a specified connection in Operate configuration (```/config/application.yml```). - -Execute ```/bin/migrate``` (or ```/bin/migrate.bat``` for Windows). - -What is expected to happen: - -* New Elasticsearch indices are created if they don't exist. -* If an older version for some or all indices exists, the migration plan is built. -* For each index with an older version, the migration plan is executed. -* Older indices are deleted. - -All known migration steps with metadata are stored in the `operate-migration-steps-repository` index. - -:::note -The old indices are deleted *only* after successful migration. This might require more disk space during the migration process. - -Take care of data backup before performing migration. -::: - -#### Migrate by using built-in automatic upgrade - -When running a newer version of Operate against an older schema, it performs data migration on a startup. -The migration happens for every index, for which it detects exactly **one** older version. Migration fails if it detects more than one older version of some index. - -#### Further notes - -* If migration fails, you can retry it. All applied steps are stored and only those steps are applied that haven't been executed yet. -* Operate should not be running while migration is happening. -* In the case version upgrade is performed in the cluster with several Operate nodes, only one node ([Webapp module](importer-and-archiver.md)) must execute data migration. The others must be stopped and started only after migration is fully finished. - -#### Configure migration - -Automatic migration is enabled by default. It can be disabled by setting the configuration key: - -`camunda.operate.migration.migrationEnabled = false` - -The following migration settings may affect the duration of the migration process: - -1. You can set the batch size for reindex of the documents. This can reduce the time needed to reindex the data. -Small document size means big batch size, while big document size means small batch size. - -`camunda.operate.migration.reindexBatchSize = 5000` (Between 1 and 10.000, Default: 5.000) - -2. In how many slices should the reindex be divided. For each shard used by the index, you normally use a slice. -Elasticsearch decides how many slices are used if the value is set to 0 (automatic). - -`camunda.operate.migration.slices = 0` - Must be positive. Default is 0 (automatic). - -#### Example for migration in Kubernetes - -To ensure the migration is executed *before* Operate is started, use -the [initContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) feature of Kubernetes. - -This ensures only the "main" container is started if the `initContainer` is successfully executed. - -The following snippet of a pod description for Kubernetes shows the usage of `migrate` script as `initContainers`: - -``` -... - labels: - app: operate -spec: - initContainers: - - name: migration - image: camunda/operate:1.0.0 - command: ['/bin/sh','/usr/local/operate/bin/migrate'] - containers: - - name: operate - image: camunda/operate:1.0.0 - env: -... -``` diff --git a/versioned_docs/version-1.3/self-managed/operate-deployment/usage-metrics.md b/versioned_docs/version-1.3/self-managed/operate-deployment/usage-metrics.md deleted file mode 100644 index 42c3cb11efd..00000000000 --- a/versioned_docs/version-1.3/self-managed/operate-deployment/usage-metrics.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: usage-metrics -title: Usage metrics -description: "Operate provides usage metrics under usage-metrics Actuator endpoint. It is exposed on management port." ---- - -Operate provides usage metrics under `usage-metrics` Actuator endpoint. It is exposed on management port that can be configured via `management.server.port` configuration parameter (default: 8080). - -## Amount of created process instances - -``` -http://:/actuator/usage-metrics/process-instances?startTime={startTime}&endTime={endTime} -``` - -`startTime` and `endTime` are of format `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`, e.g. "1970-11-14T10:50:26.963-0100". - -Sample response: - -```json -{ - "total" : 99 -} -``` diff --git a/versioned_docs/version-1.3/self-managed/overview.md b/versioned_docs/version-1.3/self-managed/overview.md deleted file mode 100644 index 9047391a694..00000000000 --- a/versioned_docs/version-1.3/self-managed/overview.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: about-self-managed -title: "Camunda Cloud Self-Managed" ---- - -:::note - -Camunda Cloud Self-Managed is not Camunda Platform. If you are looking for Camunda Platform documentation, [click here](https://docs.camunda.org). - -However, Optimize documentation is available for both Camunda Cloud and Camunda Platform, including deployment instructions available [here]($optimize$/self-managed/optimize-deployment/setup/). Look for "Camunda Platform 7" badges to help you understand what content is available for what product. - -::: - -The alternative way to use Camunda Cloud is to host it yourself through Camunda Cloud Self-Managed. - -Building process automation solutions with Camunda Cloud Self-Managed is similar to working with Camunda Cloud SaaS. For more information on Camunda Cloud SaaS, review [What is Camunda Cloud?](../components/concepts/what-is-camunda-cloud.md) If you are new to Camunda Cloud, we recommend you start your journey with [Camunda Cloud SaaS-based guides](../../guides/). - -The content in this section of the documentation will include everything you need to download, configure, and work with each component of Camunda Cloud Self-Managed. Features specific to Camunda Cloud Self-Managed will be documented in this section. - -The following components are available for Camunda Cloud Self-Managed: - -* Zeebe -* Operate -* Tasklist -* Optimize -* IAM (not available in Camunda Cloud SaaS) - -Camunda Cloud Self-Managed users may also use Desktop Modeler to build BPMN diagrams and work with Camunda Forms. - -While documentation for Optimize is available in the Self-Managed section, we continue to refine the experience for Camunda Cloud Self-Managed users. - -:::note Looking for component documentation? -User guides and conceptual content for Camunda Cloud components are available in the [components section](./../../components). -::: diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/authentication.md b/versioned_docs/version-1.3/self-managed/tasklist-deployment/authentication.md deleted file mode 100644 index 629b7cc1a6d..00000000000 --- a/versioned_docs/version-1.3/self-managed/tasklist-deployment/authentication.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -id: authentication -title: Authentication -description: "Let's take a closer look at the authentication methods of Tasklist." ---- - -Tasklist provides two ways to authenticate: - -1. User information stored in [Elasticsearch](#user-in-elasticsearch) -2. [IAM Authentication and Authorization](#iam) - -By default, user storage in Elasticsearch is enabled. - -## User in Elasticsearch - -In this mode, the user authenticates with a username and password stored in Elasticsearch. - -The **username**, **password**, and **roles** for one user may be set in application.yml: - -``` -camunda.tasklist: - username: anUser - password: aPassword - roles: - - OWNER - - OPERATOR -``` - -On Tasklist startup, the user is created if they did not exist before. - -By default, two users are created: - -* Role `OWNER` with **userId**/**displayName**/**password** `demo`/`demo`/`demo`. -* Role `USER` with **userId**/**displayName**/**password** `view`/`view`/`view`. - -More users can be added directly to Elasticsearch, to the index `tasklist-user-_`. The password must be encoded with a strong BCrypt hashing function. - -## IAM - -[IAM](../../iam/what-is-iam/) provides authentication and authorization functionality along with user management. - -### Enable IAM - -IAM can only be enabled by setting the [Spring profile](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-profiles): `iam-auth`. - -See the following example: - -``` -export SPRING_PROFILES_ACTIVE=iam-auth -``` - -### Configure IAM - -IAM requires the following parameters: - -Parameter name | Description | Example value ----------------|-------------|--------------- -camunda.tasklist.iam.issuer | Name/ID of issuer | http://app.iam.localhost -camunda.tasklist.iam.issuerUrl | Url of issuer (IAM) | http://app.iam.localhost -camunda.tasklist.iam.clientId | Similar to a username for the application | tasklist -camunda.tasklist.iam.clientSecret | Similar to a password for the application. | XALaRPl...s7dL7 - -We provide two different permissions over IAM: read or write. -To configure the authorization, you are required to create two different permissions: - -Permission value | Description -----------------|------------- -`read:*` | Grants the user the permission to access, view, and read the data in the application. -`write:*` | Grants the user the permission to perform operations. - -Note that the minimum permission needed is `read:*`. Any user without this permission will have access denied. diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/configuration.md b/versioned_docs/version-1.3/self-managed/tasklist-deployment/configuration.md deleted file mode 100644 index 4da90e27413..00000000000 --- a/versioned_docs/version-1.3/self-managed/tasklist-deployment/configuration.md +++ /dev/null @@ -1,305 +0,0 @@ ---- -id: configuration -title: Configuration ---- - -Tasklist is a Spring Boot application. This means all provided ways to [configure](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config) a Spring Boot application can be applied. - -By default, the configuration for Tasklist is stored in a YAML file `application.yml`. All Tasklist-related settings are prefixed with `camunda.tasklist`. The following components are configurable: - -* [Webserver](#webserver) -* [GraphQL API access](#graphql-api-access) -* [Elasticsearch connection](#elasticsearch) -* [Zeebe Broker connection](#zeebe-broker-connection) -* [Zeebe Elasticsearch exporter](#zeebe-elasticsearch-exporter) -* [Authentication](authentication.md) -* [Monitoring and health probes](#monitoring-and-health-probes) -* [Logging configuration](#logging) - -## Webserver - -Tasklist supports customizing the **context-path** using the default Spring configuration. - -Example for `application.yml`: -`server.servlet.context-path: /tasklist` - -Example for environment variable: -`SERVER_SERVLET_CONTEXT_PATH=/tasklist` - -Default context-path is `/`. - -## GraphQL API access - -Tasklist provides a GraphQL API under the endpoint `/graphql`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. - -The Tasklist server requires the following settings to validate the token: - -Setting|Description|Example --------|------------|-------- -camunda.tasklist.client.audience| Tasklist tries to match this with `aud` in JWT. | tasklist.camunda.io -camunda.tasklist.client.clusterId| Tasklist tries to match this with `scope` in JWT. | cafe-0815-0235-a221-21cc6df91dc5 -spring.security.oauth2.resourceserver.jwt.jwk-set-uri (recommended) | Complete URI to get public keys for JWT validation. | https://weblogin.cloud.company.com/.well-known/jwks.json -*OR* | | -spring.security.oauth2.resourceserver.jwt.issuer-uri| URI to get public keys for JWT validation.| https://weblogin.cloud.company.com/ - -The settings can be given in [application.yml](https://github.com/camunda-cloud/tasklist/blob/master/config/application.yml) (eg. `camunda.tasklist.client.audience: tasklist.camunda.io`) or as environment variables (eg. `CAMUNDA_TASKLIST_CLIENT_AUDIENCE=tasklist.camunda.io`). - -The [API client](components/tasklist/userguide/api/overview.md) must obtain the JWT token and send it in each request to `graphql` in an authorization header as described above. - -## Elasticsearch - -Tasklist stores and reads data in/from Elasticsearch. - -### Settings to connect - -Tasklist supports [basic authentication](https://www.elastic.co/guide/en/elasticsearch/reference/7.12/setting-up-authentication.html) for Elasticsearch. Set the appropriate username/password combination in the configuration to use it. - -#### Settings to connect to a secured Elasticsearch instance - -To connect to a secured (https) Elasticsearch instance you need normally only set the URL protocol -part to `https` instead of `http`. A secured Elasticsearch instance needs also `username` and `password`. -The other SSL settings should only be used in case of connection problems, for example disable -host verification. - -:::note -You may need to import the certificate into JVM runtime. -::: - -Name | Description | Default value ------|-------------|-------------- -camunda.tasklist.elasticsearch.indexPrefix | Prefix for index names | tasklist -camunda.tasklist.elasticsearch.clusterName | Clustername of Elasticsearch | elasticsearch -camunda.tasklist.elasticsearch.url | URL of Elasticsearch REST API | http://localhost:9200 -camunda.tasklist.elasticsearch.username | Username to access Elasticsearch REST API | - -camunda.tasklist.elasticsearch.password | Password to access Elasticsearch REST API | - -camunda.tasklist.elasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - -camunda.tasklist.elasticsearch.ssl.selfSigned | Certificate was self signed | false -camunda.tasklist.elasticsearch.ssl.verifyHostname | Should the hostname be validated | false - -### Settings for shards and replicas - -Tasklist creates the template with index settings named `tasklist-_template` that Elasticsearch uses for all Tasklist indices. These settings can be changed. - -The following configuration parameters define the settings: - -Name|Description|Default value -----|-----------|-------------- -camunda.tasklist.elasticsearch.numberOfShards| How many shards Elasticsearch uses for all Tasklist indices.| 1 -camunda.tasklist.elasticsearch.numberOfReplicas| How many replicas Elasticsearch uses for all Tasklist indices.| 0 - -These values are applied only on first startup of Tasklist or during version upgrade. After the Tasklist -ELS schema is created, settings may be adjusted directly in the ELS template, and the new settings are applied -to indices created after adjustment. - -### A snippet from application.yml - -```yaml -camunda.tasklist: - elasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - ssl: - selfSigned: true -``` - -## Zeebe broker connection - -Tasklist needs a connection to Zeebe broker to start the import. - -### Settings to connect - -Name | Description | Default value ------|-------------|-------------- -camunda.tasklist.zeebe.gatewayAddress | Gateway address point to Zeebe as hostname and port. | localhost:26500 - -:::note -Currently, Tasklist does not support TLS communication with Zeebe. -::: - -### A snippet from application.yml - -```yaml -camunda.tasklist: - zeebe: - # Gateway address - gatewayAddress: localhost:26500 -``` - -## Zeebe Elasticsearch exporter - -Tasklist imports data from Elasticsearch indices created and filled in by [Zeebe Elasticsearch Exporter](https://github.com/camunda/camunda/tree/1.3.14/exporters/elasticsearch-exporter). - -Therefore, settings for this Elasticsearch connection must be defined and correspond to the settings on the Zeebe side. - -### Settings to connect and import - -See also [settings to connect to a secured Elasticsearch instance](#settings-to-connect-to-a-secured-elasticsearch-instance). - -Name | Description | Default value ------|-------------|-------------- -camunda.tasklist.zeebeElasticsearch.clusterName | Cluster name of Elasticsearch | elasticsearch -camunda.tasklist.zeebeElasticsearch.url | URL of Elasticsearch REST API | http://localhost:9200 -camunda.tasklist.zeebeElasticsearch.prefix | Index prefix as configured in Zeebe Elasticsearch exporter | zeebe-record -camunda.tasklist.zeebeElasticsearch.username | Username to access Elasticsearch REST API | - -camunda.tasklist.zeebeElasticsearch.password | Password to access Elasticsearch REST API | - -camunda.tasklist.zeebeElasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - -camunda.tasklist.zeebeElasticsearch.ssl.selfSigned | Certificate was self signed | false -camunda.tasklist.zeebeElasticsearch.ssl.verifyHostname | Should the hostname be validated | false - -### A snippet from application.yml - -```yaml -camunda.tasklist: - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` - -## Monitoring and health probes - -Tasklist includes the [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready) inside, which -provides the number of monitoring possibilities (e.g. health check (http://localhost:8080/actuator/health) and metrics (http://localhost:8080/actuator/prometheus) endpoints). - -Tasklist uses the following Actuator configuration by default: - -```yaml -# disable default health indicators: -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-health-indicators -management.health.defaults.enabled: false -# enable health check, metrics and loggers endpoints -management.endpoints.web.exposure.include: health,prometheus,loggers -# enable Kubernetes health groups: -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-kubernetes-probes -management.health.probes.enabled: true -``` - -With this configuration, the following endpoints are available for use out of the box: - -```:8080/actuator/prometheus``` Prometheus metrics - -```:8080/actuator/health/liveness``` Liveness probe - -```:8080/actuator/health/readiness``` Readiness probe - -### Example snippets to use Tasklist probes in Kubernetes - -For details to set Kubernetes probes parameters, see [Kubernetes configure probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). - -#### Readiness probe as yaml config - -```yaml -readinessProbe: - httpGet: - path: /actuator/health/readiness - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 30 -``` -#### Liveness probe as yaml config - -```yaml -livenessProbe: - httpGet: - path: /actuator/health/liveness - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 30 -``` - -## Logging - -Tasklist uses Log4j2 framework for logging. In the distribution archive and inside a Docker image `config/log4j2.xml`, logging configuration files are included and can be further adjusted to your needs: - -```xml - - - - %clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%xwEx - ${env:TASKLIST_LOG_STACKDRIVER_SERVICENAME:-tasklist} - ${env:TASKLIST_LOG_STACKDRIVER_SERVICEVERSION:-} - - - - - - - - - - - - - - - - -``` - -By default, Console Appender is used. - -### JSON logging configuration - -You can choose to output logs in JSON format (Stackdriver compatible). To enable it, define -the environment variable ```TASKLIST_LOG_APPENDER``` like the following: - -```sh -TASKLIST_LOG_APPENDER=Stackdriver -``` - -### Change logging level at runtime - -Tasklist supports the default scheme for changing logging levels as provided by [Spring Boot](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers). - -The log level for Tasklist can be changed by following the [Setting a Log Level](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers-setting-level) section. - -#### Set all Tasklist loggers to DEBUG - -```shell -curl 'http://localhost:8080/actuator/loggers/io.camunda.tasklist' -i -X POST \ --H 'Content-Type: application/json' \ --d '{"configuredLevel":"debug"}' -``` - -## An example of application.yml file - -The following snippet represents the default Tasklist configuration, which is shipped with the distribution. It can be found inside the `config` folder (`config/application.yml`) and can be used to adjust Tasklist to your needs. - -```yaml -# Tasklist configuration file - -camunda.tasklist: - # Set Tasklist username and password. - # If user with does not exists it will be created. - # Default: demo/demo - #username: - #password: - #roles: - # - OWNER - # - OPERATOR - - # ELS instance to store Tasklist data - elasticsearch: - # Cluster name - clusterName: elasticsearch - # url - url: http://localhost:9200 - # Zeebe instance - zeebe: - # Gateway address - gatewayAddress: localhost:26500 - # ELS instance to export Zeebe data to - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # url - url: http://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/img/tasklist-start-screen_light.png b/versioned_docs/version-1.3/self-managed/tasklist-deployment/img/tasklist-start-screen_light.png deleted file mode 100644 index 639381e3eb3..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/tasklist-deployment/img/tasklist-start-screen_light.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/install-and-start.md b/versioned_docs/version-1.3/self-managed/tasklist-deployment/install-and-start.md deleted file mode 100644 index 0357840101a..00000000000 --- a/versioned_docs/version-1.3/self-managed/tasklist-deployment/install-and-start.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -id: install-and-start -title: Install and start Tasklist -description: "Let's get started with Tasklist by installing and running with these simple methods." ---- - -## Running via Docker (local development) - -You can use the Docker image `camunda/tasklist:latest` to run Tasklist as a container. - -:::note -Configure the appropriate settings described in the [configuration](../configuration) section of this deployment guide. -::: - -See the following example of configuration for `docker-compose`: - -``` -tasklist: - container_name: tasklist - image: camunda/tasklist:latest - ports: - - 8080:8080 - environment: - - camunda.tasklist.elasticsearch.url=http://elasticsearch:9200 - - camunda.tasklist.zeebeElasticsearch.url=http://elasticsearch:9200 - - camunda.tasklist.zeebe.gatewayAddress=zeebe:26500 -``` - -## Manual configuration (local development) - -Here, we’ll walk you through how to download and run a Tasklist distribution manually, without using Docker. - -:::note -The Tasklist web UI is available by default at [http://localhost:8080](http://localhost:8080). Ensure this port is available. -::: - -### Download Tasklist and a compatible version of Zeebe - -Tasklist and Zeebe distributions are available for download on the same [release page](https://github.com/camunda-cloud/zeebe/releases). - -:::note -Each version of Tasklist is compatible with a specific version of Zeebe. -::: - -On the Zeebe release page, compatible versions of Zeebe and Tasklist are grouped together. Ensure you download and use compatible versions. This is handled for you if you use the Docker profile from our repository. - -### Download Elasticsearch - -Tasklist uses open-source Elasticsearch as its underlying data store. Therefore to run Tasklist, download and run Elasticsearch. - -Tasklist is currently compatible with Elasticsearch 7.16.2. Download Elasticsearch [here](https://www.elastic.co/downloads/past-releases/elasticsearch-7-16-2). - -### Run Elasticsearch - -To run Elasticsearch, execute the following commands in Terminal or another command line tool of your choice: - -``` -cd elasticsearch-* -bin/elasticearch -``` - -You’ll know Elasticsearch has started successfully when you see a message similar to the following: - -``` -[INFO ][o.e.l.LicenseService ] [-IbqP-o] license [72038058-e8ae-4c71-81a1-e9727f2b81c7] mode [basic] - valid -``` - -### Run Zeebe - -To run Zeebe with Elasticsearch Exporter, execute the following commands: - -``` -cd zeebe-broker-* -ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_CLASSNAME=io.camunda.zeebe.exporter.ElasticsearchExporter ./bin/broker -``` - -You’ll know Zeebe has started successfully when you see a message similar to the following: - -``` -[partition-0] [0.0.0.0:26501-zb-actors-0] INFO io.camunda.zeebe.raft - Joined raft in term 0 -[exporter] [0.0.0.0:26501-zb-actors-1] INFO io.camunda.zeebe.broker.exporter.elasticsearch - Exporter opened -``` - -### Run Tasklist - -To run Tasklist, execute the following commands: - -``` -cd zeebe-tasklist* -./bin/tasklist -``` - -You’ll know Tasklist has started successfully when you see messages similar to the following: - -``` -2020-12-09 13:31:41.437 INFO 45899 --- [ main] i.z.t.ImportModuleConfiguration : Starting module: importer -2020-12-09 13:31:41.438 INFO 45899 --- [ main] i.z.t.ArchiverModuleConfiguration : Starting module: archiver -2020-12-09 13:31:41.555 INFO 45899 --- [ main] i.z.t.w.StartupBean : Tasklist Version: 1.0.0 -``` - -## Access the Tasklist web interface - -The Tasklist web interface is available at [http://localhost:8080](http://localhost:8080). - -The first screen you'll see is a sign-in page. Use the credentials `demo` / `demo` to sign in. - -If you've already developed user tasks in Zeebe, you can see these on the left panel on the start screen: - -![tasklist-start-screen](img/tasklist-start-screen_light.png) - -## Update Tasklist - -To update Tasklist versions, visit the [user guide](../../components/tasklist/userguide/updating-tasklist.md). diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/taskslist-api.md b/versioned_docs/version-1.3/self-managed/tasklist-deployment/taskslist-api.md deleted file mode 100644 index 4a2767cad68..00000000000 --- a/versioned_docs/version-1.3/self-managed/tasklist-deployment/taskslist-api.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: tasklist-api -title: Tasklist API ---- - -Tasklist offers an API (GraphQL) where clients authenticate via token. Therefore, users can use the API and integrate it into their own systems without having to use Tasklist UI. - -## Self-Managed API - -As there are different authentication methods available for the Self-Managed Tasklist, we don't offer the machine-to-machine (m2m) token. The current approach requires the following: - -- Clients must create a login request: `POST /api/login?username=&password=`. -- Use response headers/cookies in the API GraphQL. - -### Limitations - -As in current Self-Managed versions, we can't differentiate a regular `User` request versus an `API User` request; this imposes some limitations to the Self-Managed Tasklist API: - -- Tasks must be *claimed* by `API User` before any changes are made. -- Tasks must be *assigned* to `API User` itself. You cannot assign these tasks to someone else. -- Tasks can **only** be *completed* if previously claimed/assigned to `API User`. diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/updating-tasklist.md b/versioned_docs/version-1.3/self-managed/tasklist-deployment/updating-tasklist.md deleted file mode 100644 index 6ea238ce058..00000000000 --- a/versioned_docs/version-1.3/self-managed/tasklist-deployment/updating-tasklist.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: updating-tasklist -title: Updating Tasklist -description: "Follow these steps for a successful Tasklist version update." ---- - -When updating Tasklist versions, it's important to consider a few factors: - -* Every Tasklist version supports importing data for the current version and the previous one. For example, if you are running Tasklist `1.3`, your Tasklist imports data from Zeebe `1.2` and `1.3`. -* Before updating the Tasklist version and skipping multiple minor versions, ensure you import all the data from the previous versions. See the sections below for more information. - -## Skipping multiple minor versions - -For example, let's assume a server running Tasklist version `1.0` wants to update to version `1.3`. - -Take the following steps: - -### Progressively update - -1. Update Tasklist and Zeebe to version `1.1`. -2. Let it run for some hours and verify if everything works as expected. -3. Repeat **Step 1** and **Step 2** for version `1.2`. -4. Update both to version `1.3` safely. - -:::note -Depending on your quantity of data, we recommend letting a minor version run for at least 24 hours before updating to the next version. -::: diff --git a/versioned_docs/version-1.3/self-managed/tasklist-deployment/usage-metrics.md b/versioned_docs/version-1.3/self-managed/tasklist-deployment/usage-metrics.md deleted file mode 100644 index 22d896a511c..00000000000 --- a/versioned_docs/version-1.3/self-managed/tasklist-deployment/usage-metrics.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: usage-metrics -title: Usage metrics -description: "Tasklist provides usage metrics under usage-metrics Actuator endpoint. It is exposed on management port." ---- - -Tasklist provides usage metrics under `usage-metrics` Actuator endpoint. It is exposed on management port, which can be configured via `management.server.port` configuration parameter (default: 8080). - -## Number of active users - -This endpoint returns the number of unique users assigned to tasks in a given period and each of the unique `usernames`. - -This also returns the `usernames` so we can reconcile in the case of multiple instances. - -Endpoint: - -``` -http://:/actuator/usage-metrics/assignees?startTime={startTime}&endTime={endTime} -``` - -Here, `startTime` and `endTime` are of format `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`, e.g. "1970-11-14T10:50:26.963-0100". - -Sample response: - -```json -{ - "total" : 2, - "assignees": [ - "john.lennon", - "oprah.winfrey" - ] -} -``` diff --git a/versioned_docs/version-1.3/self-managed/troubleshooting/log-levels.md b/versioned_docs/version-1.3/self-managed/troubleshooting/log-levels.md deleted file mode 100644 index 19cdfc14f13..00000000000 --- a/versioned_docs/version-1.3/self-managed/troubleshooting/log-levels.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: log-levels -title: "Log levels" -description: "Learn about what information you can expect in various log levels and how to handle them" ---- - -When working with Camunda Cloud, you may see various messages in your logs. Not all messages require action. - -## Understanding log levels - -Camunda Cloud uses the following log levels: - -* TRACE: Information which is helpful only if you want to trace the execution of a particular component. -* DEBUG: Information which can provide helpful context when debugging. You may see a DEBUG message right after an INFO message to provide more context. -* INFO: Information about the system which is useful for the user (in the case of the broker, the user here is the user deploying it). For example, leader changes, a new node added to or removed from the membership, etc. -* WARN: Expected errors (e.g. connection timeouts, the remote node is unavailable, etc.) which may indicate that parts of the system are not working, and would require attention if they persist, but may resolve by themselves. These should be monitored, but may not require a support ticket. -* ERROR: Errors which require a person to look into them, e.g. log corruption, inconsistent log, anything which could shut down a partition, etc. - -## Enable logging - -Enable logging for each component of Camunda Cloud using the following instructions: - -* [Zeebe](../zeebe-deployment/configuration/logging.md) -* [Operate](../operate-deployment/configuration.md#logging) -* [Tasklist](../tasklist-deployment/configuration.md#logging) diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/configuration.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/configuration.md deleted file mode 100644 index 3e79dc0633a..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/configuration.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -id: configuration -title: "Configuration" -sidebar_label: "Overview" -description: "Let's analyze how to configure Zeebe." ---- - -Zeebe can be configured through the following: - -- Configuration files -- Environment variables -- A mix of both - -If both configuration files and environment variables are present, environment variables overwrite settings in configuration files. - -To make small changes to the configuration, we recommend using environment variables. - -To make big changes to the configuration, we recommend using a configuration file. - -The configuration is applied during startup of Zeebe. It is not possible to change the configuration at runtime. - -## Default configuration - -The default configuration is located in `config/application.yaml`. This configuration contains the most common configuration settings for a standalone broker. It also lists the corresponding environment variable for each setting. - -:::note -The default configuration is not suitable for a standalone gateway node. To run a standalone gateway node, take a look at `/config/gateway.yaml.template`. -::: - -## Configuration file templates - -We provide templates that contain all possible configuration settings, along with explanations for each setting: - -- [`config/application.yaml` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/tree/1.3.14/dist/src/main/config/application.yaml) - Default configuration containing only the most common configuration settings. Use this as the basis for a single broker deployment for test or development. -- [`config/broker.standalone.yaml.template` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/tree/1.3.14/dist/src/main/config/broker.standalone.yaml.template) - Complete configuration template for a standalone broker with embedded gateway. Use this as the basis for a single broker deployment for test or development. -- [`config/broker.yaml.template` Broker Node (without embedded gateway)](https://github.com/camunda/camunda/tree/1.3.14/dist/src/main/config/broker.yaml.template) - Complete configuration template for a broker node without embedded gateway. Use this as the basis for deploying multiple broker nodes as part of a cluster. -- [`config/gateway.yaml.template`](https://github.com/camunda/camunda/tree/1.3.14/dist/src/main/config/gateway.yaml.template) - Complete configuration template for a standalone gateway. - -:::note -These templates also include the corresponding environment variables to use for every setting. -::: - -## Editing the configuration - -You can either start from scratch or start from the configuration templates listed above. - -If you use a configuration template and want to uncomment certain lines, make sure to also uncomment their parent elements: - -```yaml -Valid Configuration - - zeebe: - gateway: - network: - # host: 0.0.0.0 - port: 26500 - -Invalid configuration - - # zeebe: - # gateway: - # network: - # host: 0.0.0.0 - port: 26500 -``` - -Uncommenting individual lines is a bit finicky, because YAML is sensitive to indentation. The best way to do it is to position the cursor before the `#` character and delete two characters (the dash and the space). Doing this will consistently give you a valid YAML file. - -When it comes to editing individual settings, two data types are worth mentioning: - -- Data size (e.g. `logSegmentSize`) - - Human-friendly format: `500MB` (or `KB, GB`) - - Machine-friendly format: size in bytes as long -- Timeouts/intervals (e.g. `requestTimeout`) - - Human-friendly format: `15s` (or `m, h`) - - Machine-friendly format: either duration in milliseconds as long, or [ISO-8601 duration](ttps://en.wikipedia.org/wiki/ISO_8601#Durations) format (e.g. `PT15S`) - -## Passing configuration files to Zeebe - -Rename the configuration file to `application.yaml` and place it in the following location: - -```shell script -./config/application.yaml -``` - -### Other ways to specify the configuration file - -Zeebe uses Spring Boot for its configuration parsing. All other ways to [configure a Spring Boot application](https://docs.spring.io/spring-boot/reference/features/external-config.html) should also work. In particular, you can use: - -- `SPRING_CONFIG_ADDITIONAL_LOCATION` to specify an additional configuration file. -- `SPRING_APPLICATION_JSON` to specify settings in JSON format. - -Details can be found in the Spring documentation. - -:::note -We recommend not to use `SPRING_CONFIG_LOCATION` as this will replace all existing configuration defaults. When used inappropriately, some features will be disabled or will not be configured properly. -::: - -If you specify `SPRING_CONFIG_LOCATION`, specify it like this: - - ```shell script - export SPRING_CONFIG_LOCATION='classpath:/,file:./[path to config file]' - ``` - - This will ensure the defaults defined in the classpath resources will be used (unless explicitly overwritten by the configuration file you provide). If you omit the defaults defined in the classpath, some features may be disabled or will not be configured properly. - -## Verifying configuration - -To verify the configuration was applied, start Zeebe and look at the log. - -If the configuration could be read, Zeebe will log out the effective configuration during startup: - -``` -17:13:13.120 [] [main] INFO io.camunda.zeebe.broker.system - Starting broker 0 with configuration { - "network": { - "host": "0.0.0.0", - "portOffset": 0, - "maxMessageSize": { - "bytes": 4194304 - }, - "commandApi": { - "defaultPort": 26501, - "host": "0.0.0.0", - "port": 26501, -... -``` - -In some cases of invalid configuration, Zeebe will fail to start with a warning that explains which configuration setting could not be read. - -``` -17:17:38.796 [] [main] ERROR org.springframework.boot.diagnostics.LoggingFailureAnalysisReporter - - -*************************** -APPLICATION FAILED TO START -*************************** - -Description: - -Failed to bind properties under 'zeebe.broker.network.port-offset' to int: - - Property: zeebe.broker.network.port-offset - Value: false - Origin: System Environment Property "ZEEBE_BROKER_NETWORK_PORTOFFSET" - Reason: failed to convert java.lang.String to int - -Action: - -Update your application's configuration -``` - -## Logging - -Zeebe uses Log4j2 framework for logging. In the distribution and the Docker image, find the default log configuration file in `config/log4j2.xml`. - -### Google Stackdriver (JSON) logging - -To enable Google Stackdriver compatible JSON logging, set the environment variable `ZEEBE_LOG_APPENDER=Stackdriver` before starting Zeebe. - -### Default logging configuration - -- `config/log4j2.xml` (applied by default) - -``` -{{#include ../../../dist/src/main/config/log4j2.xml}} -``` - -### Change log level dynamically - -Zeebe brokers expose a [Spring Boot Actuators web endpoint](https://docs.spring.io/spring-boot/docs/current/actuator-api/html/#loggers) -for configuring loggers dynamically. -To change the log level of a logger, make a `POST` request to the `/actuator/loggers/{logger.name}` endpoint as shown in the example below. -Change `io.camunda.zeebe` to the required logger name and `debug` to required log level. - -``` -curl 'http://localhost:9600/actuator/loggers/io.camunda.zeebe' -i -X POST -H 'Content-Type: application/json' -d '{"configuredLevel":"debug"}' -``` - -## Health probes - -Health probes are set to sensible defaults which cover common use cases. - -For specific use cases, it might be necessary to customize health probes: - -- [Gateway health probes](gateway-health-probes.md) - -## Experimental configuration options - -You may have already noticed a special section of Zeebe's configuration templates titled `experimental`. -This section refers to settings which are potentially not backwards compatible. In other words, any configuration setting found there may or may not be dropped in any minor version. - -These settings are there primarily for incubating features and/or very advanced settings for which the team has not found -a good general default configuration. Once one is found, or the incubating feature is promoted, the setting(s) may be moved -into a different section. Only at that point do they fall under the same backwards compatibility guarantees as the rest of -the project. We may choose to drop support for specific experimental configurations in any minor version update. - -Most users should not have to change anything in this section for a good experience. However, if you have a unique set up, or simply wish to try out new experimental features, it can be worth investigating these (ideally with the guidance of the Zeebe community). diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/environment-variables.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/environment-variables.md deleted file mode 100644 index 46a78bbe95c..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/environment-variables.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: environment-variables -title: "Environment variables" -description: "Let's take a closer look at the environment variables for configuration, operators, and developers." ---- - -## Environment variables for configuration - -As a Spring Boot application, Zeebe supports any standard -[Spring configuration](https://docs.spring.io/spring-boot/reference/features/external-config.html) method. This configuration can be provided as a configuration file, through environment variables, or both. When both sources are used, environment variables have precedence over the configuration file. - -All available environment variables are documented in the [configuration file templates](configuration.md#configuration-file-templates). - -## Environment variables for operators - -The following environment variables are intended for operators: - -- `ZEEBE_LOG_LEVEL`: Sets the log level of the Zeebe Logger (default: `info`). -- `ZEEBE_LOG_APPENDER`: Sets the console log appender (default: `Console`). We recommend using `Stackdriver` if Zeebe runs on Google Cloud Platform to output JSON formatted log messages. - -## Environment variables for developers - -The following environment variables are intended for developers: - -- `SPRING_PROFILES_ACTIVE=dev`: If this is set, the broker starts in a temporary folder and all data is cleaned up upon exit. -- `ZEEBE_DEBUG=true/false`: Activates a `DebugLogExporter` with default settings. The value of the environment variable toggles pretty printing. - -:::note -It is not recommended to use these settings in production. -::: diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/fixed-partitioning.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/fixed-partitioning.md deleted file mode 100644 index 74ba5ca1fb8..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/fixed-partitioning.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: fixed-partitioning -title: "Fixed partitioning" -description: "Manually configure which partitions belong to which brokers." ---- - -Starting with 1.2.0, there is a new experimental configuration option which lets you specify a fixed partitioning scheme; this means you can manually configure which partitions belong to which brokers. - -The partitioning scheme is controlled via a new configuration option under `zeebe.broker.experimental.partitioning`, -more specifically `zeebe.broker.experimental.partitioning.scheme`. This option currently takes the following values: - -- `ROUND_ROBIN`: When set, this applies the round-robin partition distribution, which corresponds to the distribution explained above on this page. _This is the default option, and requires no extra configuration if you want to use it._ -- `FIXED`: When set, this applies a manually configured partition distribution, configured separately. - -To use the `FIXED` partitioning scheme, _you must provide an exhaustive map of all partitions to a set of brokers_. This is achieved via the `zeebe.broker.experimental.partitioning.fixed` configuration option. The example below outlines a cluster of `5` brokers, `3` partitions, and a replication factor of `3`. - -```yaml -partitioning: - scheme: FIXED - fixed: - - partitionId: 1 - nodes: - - nodeId: 0 - - nodeId: 2 - - nodeId: 4 - - partitionId: 2 - nodes: - - nodeId: 1 - - nodeId: 3 - - nodeId: 4 - - partitionId: 3 - nodes: - - nodeId: 0 - - nodeId: 2 - - nodeId: 3 -``` - -This configuration will produce the following distribution: - -| | Node 0 | Node 1 | Node 2 | Node 3 | Node 4 | -|------------:|:------:|:------:|:------:|:------:|:------:| -| Partition 1 | X | | X | | X | -| Partition 2 | | X | | X | X | -| Partition 3 | X | | X | X | | - -## Validation - -Each broker performs sanity checks on the `FIXED` configuration provided. Namely, the configuration must uphold the following conditions: - -- All partitions _must be explicitly configured_. -- All partitions configured must have valid IDs, i.e. between 1 and `zeebe.broker.cluster.partitionsCount`. -- All partitions must configure exactly the replicas count, i.e. `zeebe.broker.cluster.replicationFactor`. -- All nodes configured for a partition have a valid node ID, i.e. between 0 and `zeebe.broker.cluster.clusterSize - 1`. -- If priority election is enabled, all priorities configured for a partition are different. - -The broker will fail to start if any of these conditions are not met. - -## Priority election - -If you're using the priority election feature, you must also specify the priorities of each broker. In fact, the broker will fail to start if the nodes do not have different priorities, as otherwise you may encounter lengthy election loops. - -Here is the same example configuration as above, but this time with priorities configured: - -```yaml -partitioning: - scheme: FIXED - fixed: - - partitionId: 1 - nodes: - - nodeId: 0 - priority: 1 - - nodeId: 2 - priority: 2 - - nodeId: 4 - priority: 3 - - partitionId: 2 - nodes: - - nodeId: 1 - priority: 1 - - nodeId: 3 - priority: 3 - - nodeId: 4 - priority: 2 - - partitionId: 3 - nodes: - - nodeId: 0 - priority: 3 - - nodeId: 2 - priority: 2 - - nodeId: 3 - priority: 1 -``` - -:::note -The only condition is that the priorities for the nodes of a given partition must be different from one another. We recommend, however, that you use a simple monotonic increase from 1 to the replica count, as shown above. -::: diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/gateway-health-probes.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/gateway-health-probes.md deleted file mode 100644 index 30b30d86615..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/gateway-health-probes.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -id: gateway-health-probes -title: "Gateway health probes" -description: "This section outlines health status, probes, and responsiveness." ---- - -The health status for a standalone gateway is available at `{zeebe-gateway}:8080/actuator/health`. - -The following health indicators are enabled by default: - -- **Gateway Started** - Checks if the gateway is running (i.e. not currently starting and not yet shut down). -- **Gateway Responsive** - Checks if the gateway can handle a request within a given timeout. -- **Gateway Cluster Awareness** - Checks if the gateway is aware of other nodes in the cluster. -- **Gateway Partition Leader Awareness** - Checks if the gateway is aware of partition leaders in the cluster. -- **Disk Space** - Checks that the free disk space is greater than 10 MB. -- **Memory** - Checks that at least 10% of max memory (heap) is still available. - -Health indicators are set to sensible defaults. For specific use cases, it might be necessary to customize health probes. - -## Startup probe - -The started probe is available at `{zeebe-gateway}:8080/actuator/health/startup`. - -In the default configuration this is merely an alias for the **Gateway Started** health indicator. Other configurations are possible (see below). - -## Liveness probe - -The liveness probe is available at `{zeebe-gateway}:8080/actuator/health/liveness`. - -It is based on the health indicators mentioned above. - -In the default configuration, the liveness probe is comprised of the following health indicators: - -- **Gateway Started** - Checks if the gateway is running (i.e. not currently starting and not yet shut down). -- **Liveness Gateway Responsive** - Checks if the gateway can handle a request within an ample timeout, but will only report a `DOWN` health status after the underlying health indicator is down for more than 10 minutes. -- **Liveness Gateway Cluster Awareness** - Based on gateway cluster awareness, but will only report a `DOWN` health status after the underlying health indicator is down for more than five minutes. -- **Liveness Gateway Partition Leader Awareness** - Based on gateway partition leader awareness, but will only report a `DOWN` health status after the underlying health indicator is down for more than five minutes. -- **Liveness Disk Space** - Checks that the free disk space is greater than 1 MB. -- **Liveness Memory** - Checks that at least 1% of max memory (heap) is still available. - -:::note -Health indicators with the _liveness_ prefix are intended to be customized for the liveness probe. This allows defining tighter thresholds (e.g. for free memory 1% for liveness vs. 10% for health), as well as adding tolerance for short downtimes (e.g. gateway has no awareness of other nodes in the cluster for more than five minutes). -::: - -## Customizing health probes - -Global settings for all health indicators: - -- `management.health.defaults.enabled=true` - Enables (default) or disables all health indicators. -- `management.endpoint.health.show-details=always/never` - Toggles whether a summary or details (default) of the health indicators will be returned. - -### Startup probe - -Settings for started probe: - -- `management.endpoint.health.group.startup.show-details=never` - Toggles whether a summary (default) or details of the startup probe will be returned. -- `management.endpoint.health.group.startup.include=gatewayStarted` - Defines which health indicators are included in the startup probe. - -### Liveness probe - -Settings for liveness probe: - -- `management.endpoint.health.group.liveness.show-details=never` - Toggles whether a summary (default) or details of the liveness probe will be returned. -- `management.endpoint.health.group.liveness.include=gatewayStarted,livenessGatewayResponsive,livenessGatewayClusterAwareness,livenessGatewayPartitionLeaderAwareness,livenessDiskSpace,livenessMemory` - Defines which health indicators are included in the liveness probe. - -:::note -The individual contributing health indicators of the liveness probe can be configured as well (see below). -::: - -### Gateway started - -Settings for gateway started health indicator: - -- `management.health.gateway-started.enabled=true` - Enables (default) or disables this health indicator. - -### Gateway responsive - -Settings for gateway responsiveness health indicator: - -- `management.health.gateway-responsive.enabled=true` - Enables (default) or disables this health indicator. -- `management.health.gateway-responsive.requestTimeout=500ms` - Defines the timeout for the request; if the test completes before the timeout, the health status is `UP`, otherwise it is `DOWN`. -- `management.health.liveness.gateway-responsive.requestTimeout=5s` - Defines the timeout for the request for liveness probe; if the request completes before the timeout, the health status is `UP`. -- `management.health.liveness.gateway-responsive.maxdowntime=10m` - Defines the maximum downtime before the liveness health indicator for responsiveness will flip. - -### Gateway cluster awareness - -Settings for gateway cluster awareness health indicator: - -- `management.health.gateway-clusterawareness.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.liveness.gateway-clusterawareness.maxdowntime=5m` - Defines the maximum downtime before the liveness health indicator for cluster awareness will flip. In other words, this health indicator will report `DOWN` after the gateway was unaware of other members in the cluster for more than five minutes. - -### Gateway partition leader awareness - -Settings for gateway partition leader awareness health indicator: - -- `management.health.gateway-partitionleaderawareness.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.liveness.gateway-partitionleaderawareness.maxdowntime=5m` - Defines the maximum downtime before the liveness health indicator for partition leader awareness will flip. In other words, this health indicator will report `DOWN` after the gateway was unaware of partition leaders for more than five minutes. - -### Disk space - -This is arguably the least critical health indicator given the standalone gateway does not write to disk. The only exception may be the writing of log files, which depend on the log configuration. - -Settings for disk space health indicator: - -- `management.health.diskspace.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.diskspace.threshold=10MB` - Defines the threshold for the required free disk space. -- `management.health.diskspace.path=.` - Defines the path for which the free disk space is examined. -- `management.health.liveness.diskspace.threshold=1MB` - Defines the threshold for the required free disk space for liveness. -- `management.health.liveness.diskspace.path=.` - Defines the path for which the free disk space for liveness is examined. - -### Memory - -This health indicator examines free memory (heap). - -Settings for memory health indicator: - -- `management.health.memory.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.memory.threshold=0.1` - Defines the threshold for the required free memory. The default is 0.1 which is interpreted as 10% of max memory. -- `management.health.liveness.memory.threshold=0.01` - Defines the threshold for the required free memory for liveness. The default is 0.01 which is interpreted as 10 of max memory. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/logging.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/logging.md deleted file mode 100644 index 86f1bb38643..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/logging.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: logging -title: "Logging" ---- - -Zeebe uses Log4j2 framework for logging. In the distribution and the Docker image, find the default log configuration file in `config/log4j2.xml`. - -## Google Stackdriver (JSON) logging - -To enable Google Stackdriver compatible JSON logging, set the environment variable `ZEEBE_LOG_APPENDER=Stackdriver` before starting Zeebe. - -## Default logging configuration - -- `config/log4j2.xml` (applied by default) - -```xml - - - - - ${sys:app.home}/logs - %d{yyyy-MM-dd HH:mm:ss.SSS} [%X{actor-name}] [%t] %-5level %logger{36} - %msg%n - ${env:ZEEBE_LOG_STACKDRIVER_SERVICENAME:-} - ${env:ZEEBE_LOG_STACKDRIVER_SERVICEVERSION:-} - - - - - - - - - - - - - - ${log.pattern} - - - - - - - - - - - - - - - - - - - - - - -``` - -## Change log level dynamically - -Zeebe brokers expose a [Spring Boot Actuators web endpoint](https://docs.spring.io/spring-boot/docs/current/actuator-api/html/#loggers) for configuring loggers dynamically. -To change the log level of a logger, make a `POST` request to the `/actuator/loggers/{logger.name}` endpoint as shown in the example below. -Change `io.camunda.zeebe` to the required logger name and `debug` to required log level. - -``` -curl 'http://localhost:9600/actuator/loggers/io.camunda.zeebe' -i -X POST -H 'Content-Type: application/json' -d '{"configuredLevel":"debug"}' -``` diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/priority-election.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/priority-election.md deleted file mode 100644 index 52f00bf0d7d..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/configuration/priority-election.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: priority-election -title: "Priority election" -description: "An alternative to the default raft leader election." ---- -Priority election is an alternative to the default raft leader election, where leader election is implemented by a random timer-based algorithm. - -It aims to achieve a more uniform leader distribution by assigning each node a priority per partition and modifying the election algorithm to ensure nodes with higher priority have a higher chance of becoming leader. - -## Configuration - -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. - -If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). - -## Limitations - -With priority election enabled, election latency and thus failover time increases. - -The result of leader election is not deterministic and priority election can only increase the chance of having a -uniform leader distribution, not guarantee it. - -Factors such as high load can prevent high priority nodes from becoming the leader. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/docker/install.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/docker/install.md deleted file mode 100644 index 20e05ae60f5..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/docker/install.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: install -title: "Docker container" ---- - -This page guides you through the initial installation of the Zeebe broker and next steps for development purposes. - -## Using Docker - -The easiest way to develop with Zeebe is using Docker. Docker provides a consistent environment we recommend for development. - -### Prerequisites - -- Operating system: - - Linux - - Windows/MacOS (development only, not supported for production) -- Docker - -### Docker configurations for docker-compose - -A default Docker Compose configuration to run Zeebe, Operate, and Tasklist is available in the get started repository: [docker-compose.yaml](https://github.com/camunda-cloud/camunda-cloud-get-started/blob/master/docker-compose.yaml). - -Download this file to your local computer, `cd` into that directory, and run `docker-compose up`. - -#### Exposed ports - -- `26500`: Zeebe Gateway API -- [`8080`](http://localhost:8080/): Operate -- [`8081`](http://localhost:8081/): Tasklist - -### Using Docker without docker-compose - -You can run Zeebe with Docker: - -```bash -docker run --name zeebe -p 26500-26502:26500-26502 camunda/zeebe:latest -``` - -This will give you a single broker node. - -#### Exposed ports - -- `26500`: Gateway API -- `26501`: Command API (gateway-to-broker) -- `26502`: Internal API (broker-to-broker) - -#### Volumes - -The default data volume is under `/usr/local/zeebe/data`. It contains -all data which should be persisted. - -#### Configuration - -The Zeebe configuration is located at `/usr/local/zeebe/config/application.yaml`. -The logging configuration is located at `/usr/local/zeebe/config/log4j2.xml`. - -The configuration of the Docker image can also be changed using environment -variables. The configuration template file also contains information on the environment -variables to use for each configuration setting. - -Available environment variables: - -- `ZEEBE_LOG_LEVEL` - sets the log level of the Zeebe Logger (default: `info`). -- `ZEEBE_BROKER_NETWORK_HOST` - sets the host address to bind to instead of the IP of the container. -- `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` - sets the contact points of other brokers in a cluster setup. - -## Next steps - -As a next step, you can install Desktop Modeler. - -Desktop Modeler is an open-source desktop BPMN modeling application created specifically for Zeebe. This application gives developers powerful features to design and deploy automated processes, human workflows, decision tables, and decision requirement diagrams using the globally-recognized [BPMN](https://camunda.com/bpmn/) and [DMN](https://camunda.com/dmn/) standards. - -Get started with Desktop Modeler using our [installation guide](/components/modeler/desktop-modeler/install-the-modeler.md). diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/Operate-Login-Page.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/Operate-Login-Page.png deleted file mode 100644 index 402ad9d1346..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/Operate-Login-Page.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/order-process.bpmn b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/order-process.bpmn deleted file mode 100644 index a24a1ae4319..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/order-process.bpmn +++ /dev/null @@ -1,139 +0,0 @@ - - - - - SequenceFlow_1xi4e3g - - - - - - SequenceFlow_1xi4e3g - SequenceFlow_071nik1 - - - - SequenceFlow_071nik1 - SequenceFlow_15wj1qo - - - - - SequenceFlow_15wj1qo - SequenceFlow_0ujwc35 - SequenceFlow_1girnrf - - - - - - - SequenceFlow_0ujwc35 - SequenceFlow_1rl28fn - - - - - - - SequenceFlow_1girnrf - SequenceFlow_08vb0ur - - - =orderValue>=100 - - - SequenceFlow_1rl28fn - SequenceFlow_08vb0ur - SequenceFlow_1qwc5nn - - - - - SequenceFlow_1qwc5nn - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.0-complete-process.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.0-complete-process.png deleted file mode 100644 index 357242d6fc4..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.0-complete-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.1-initiate-payment-task.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.1-initiate-payment-task.png deleted file mode 100644 index 3a52e57dd1f..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.1-initiate-payment-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.10-end-event.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.10-end-event.png deleted file mode 100644 index 2f6099187a1..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.10-end-event.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.11-process-id.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.11-process-id.png deleted file mode 100644 index dd93281eb6c..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.11-process-id.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.2-modeler-message-event.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.2-modeler-message-event.png deleted file mode 100644 index b5b54f2868f..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.2-modeler-message-event.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.3-add-message-name.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.3-add-message-name.png deleted file mode 100644 index 2d1e7b407fa..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.3-add-message-name.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.4-add-correlation-key.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.4-add-correlation-key.png deleted file mode 100644 index 8cdeb2e5bfc..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.4-add-correlation-key.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.5-add-xor-gateway.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.5-add-xor-gateway.png deleted file mode 100644 index 4551f270951..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.5-add-xor-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.6-label-xor-gateway.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.6-label-xor-gateway.png deleted file mode 100644 index d80b7cd01fb..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.6-label-xor-gateway.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.7-no-insurance-task.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.7-no-insurance-task.png deleted file mode 100644 index 124fd82dd3b..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.7-no-insurance-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.8-default-flow.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.8-default-flow.png deleted file mode 100644 index a7a0f0d664c..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.8-default-flow.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.9-condition-expression.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.9-condition-expression.png deleted file mode 100644 index d3e9316007a..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-3.9-condition-expression.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.0-process-in-operate.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.0-process-in-operate.png deleted file mode 100644 index 6bbd7cd0552..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.0-process-in-operate.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.1-process-instances-first-task.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.1-process-instances-first-task.png deleted file mode 100644 index 624972e693c..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.1-process-instances-first-task.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.2-waiting-at-message.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.2-waiting-at-message.png deleted file mode 100644 index 519fcd850ed..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.2-waiting-at-message.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.3-waiting-at-shipping.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.3-waiting-at-shipping.png deleted file mode 100644 index b5ec3ea9262..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.3-waiting-at-shipping.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.4-no-insurance-complete.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.4-no-insurance-complete.png deleted file mode 100644 index 56f490f4aa6..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.4-no-insurance-complete.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.5-both-instances-complete.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.5-both-instances-complete.png deleted file mode 100644 index be341970540..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/getting-started/assets/tutorial-4.5-both-instances-complete.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/index.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/index.md deleted file mode 100644 index b0f4fead0b1..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: index -title: "Deployment guide" -sidebar_label: "Overview" ---- - -This chapter contains information for users who want to deploy and run Zeebe in a private cloud or on their own hardware. - -Zeebe can be run as a Docker image or as a Kubernetes deployment. - -We recommend using Docker during development. This gives you a consistent, repeatable development environment. - -We recommend using either Camunda Cloud or Kubernetes and container images in production. This provides you with predictable and consistent configuration, and the ability to manage deployment using automation tools. - -The deployment guide covers the following topics: - -- [Local installation](local/install.md) - Contains instructions and a quick start guide to install Zeebe locally. -- [Docker container](docker/install.md) - Covers running Zeebe in a Docker environment. -- [Kubernetes deployment](kubernetes/index.md) - Gives information on running Zeebe in a Kubernetes environment. -- [Configuration](configuration/configuration.md) - Explains the configuration options. These configuration options apply to both environments, but not to Camunda Cloud. In Camunda Cloud, the configuration is provided for you. -- [Security](security/security.md) - Discusses the security aspects of running Zeebe and how to use them. -- [Operation](operations/index.md) - Outlines topics that become relevant when you want to operate Zeebe in production. - -This deployment guide also integrates with the following: - -- [Zeebe Distribution](https://github.com/camunda-cloud/zeebe/releases): The Zeebe distribution contains the workflow engine where we'll deploy our process model; the engine is also responsible for managing the state of active process instances. Included in the distro is the Zeebe CLI. Refer to our [installation guide](local/install.md). -- [Elasticsearch 7.x](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/index.html): An open-source distributed datastore that can connect to Zeebe to store process data for auditing, visualization, analysis, etc. Camunda Operate uses Elasticsearch as its underlying datastore, which is why you need to download Elasticsearch to complete this deployment. Currently, 7.x is the minimum mandatory version with Zeebe 1.x. With version 0.20.0 (currently used in our tutorial), you may use 6.x. - -:::note -New to BPMN and want to learn more before moving forward? [This blog post](https://zeebe.io/blog/2018/08/bpmn-for-microservices-orchestration-a-primer-part-1/) helps explain the standard and why it's a good fit for microservices orchestration. -::: - -If you're already familiar with BPMN and how to create a BPMN model in Desktop Modeler, you can find the finished model we create during the tutorial here: [Zeebe Getting Started Tutorial Process Model](getting-started/assets/order-process.bpmn). - -## Additional resources - -If you have questions or feedback about deployment with Zeebe, we encourage you to visit the following: - -- [User forum](https://forum.camunda.io/) -- [Public Slack channel](https://zeebe-slack-invite.herokuapp.com/) -- [GitHub issue tracker](https://github.com/camunda-cloud/zeebe/issues) - -## Additional client configurations - -Zeebe's Java and Go clients each have getting started guides of their own, showing in much greater detail how you can use the clients in the worker services you orchestrate with Zeebe. - -- [Getting started with the Java client](https://github.com/camunda-cloud/camunda-cloud-get-started) -- [Getting started with the Go client](/apis-tools/go-client/get-started.md) - -Beyond Java and Go, it's possible to create clients for Zeebe in a range of other programming languages, including JavaScript and C#, via community-supported libraries. The [Awesome Zeebe](https://awesome.zeebe.io/) page includes community-contributed clients in other languages, and [this blog post](https://camunda.com/blog/2018/11/grpc-generating-a-zeebe-python-client/) walks through how to generate a new client stub for Zeebe using gRPC. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/interceptors.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/interceptors.md deleted file mode 100644 index 89827089f28..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/interceptors.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -id: interceptors -title: "Interceptors" -sidebar_label: "Interceptors" ---- - -> This functionality is currently only available in Camunda Cloud Self-Managed. - -All communication from a client to a broker must first pass through a gateway. -There they can be intercepted before being dispatched. Zeebe provides a way to -load arbitrary interceptors into the gateway. Some typical examples of what you -can accomplish with this include: - -- Enforcing custom authorization rules on incoming calls -- Monitoring and logging of incoming calls (e.g. - https://github.com/grpc-ecosystem/java-grpc-prometheus) -- Distributed tracing (e.g. - https://github.com/open-telemetry/opentelemetry-java-instrumentation) - -## Implementing an interceptor - -For the communication between client and gateway, Zeebe uses the gRPC -[protocol](components/zeebe/technical-concepts/protocols.md). An interceptor is -thus implemented as a gRPC -[ServerInterceptor](https://grpc.github.io/grpc-java/javadoc/io/grpc/ServerInterceptor.html). - -An implementation must adhere to the following requirements: - -- It implements [ServerInterceptor](https://grpc.github.io/grpc-java/javadoc/io/grpc/ServerInterceptor.html) -- It has public visibility -- It has a public default constructor (i.e. no-arg constructor) - -Let's consider an interceptor that provides logging of incoming calls as an -example. Other ServerInterceptor examples can be found in the official grpc-java -[examples](https://github.com/grpc/grpc-java/tree/v1.41.0/examples). - -```java -package io.camunda.zeebe.example; - -import io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener; -import io.grpc.Metadata; -import io.grpc.ServerCall; -import io.grpc.ServerCall.Listener; -import io.grpc.ServerCallHandler; -import io.grpc.ServerInterceptor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A simple interceptor that logs each incoming call. The class must be public - * since we will load it via JAR into the gateway. - */ -public final class LoggingInterceptor implements ServerInterceptor { - private static final Logger LOGGER = - LoggerFactory.getLogger("LoggingInterceptor"); - - @Override - public Listener interceptCall( - final ServerCall call, - final Metadata headers, - final ServerCallHandler next) { - final var listener = next.startCall(call, headers); - return new SimpleForwardingServerCallListener<>(listener) { - @Override - public void onMessage(final ReqT message) { - LOGGER.trace("intercepted a call"); - super.onMessage(message); - } - }; - } -} -``` - -This example interceptor will log `"intercepted a call"` at `TRACE` level for -each incoming call it intercepted. This specific interceptor always dispatches -all incoming calls to the target broker, but it would also be possible to stop -the message from interception by other interceptors and even to block it from -dispatch to the broker. - -## Compiling your interceptor - -Our source code for the interceptor class can now be compiled. There are many -ways to do this, but for simplicity we'll use `javac` directly. - -When compiling your class, you need to make sure all compile-time dependencies -are provided. In the example above, that means we need the `grpc-api` and -`slf4j-api` libraries available when compiling. - -Since the interceptor will be running inside the Zeebe gateway, the language -level of the compiled code must be the same as Zeebe's (i.e. currently JDK -11) or lower. This example thus assumes you're using version 11 of `javac`. - -```sh -# to compile LoggingInterceptor.java, we'll need to provide the api libraries -javac -classpath .:lib/grpc-api.jar:lib/slf4j-api.jar ./LoggingInterceptor.java -``` - -## Packaging an interceptor - -Next, you need to package the interceptor class into a fat JAR. Such a JAR must -contain all classes (i.e. including all classes your own classes depend upon at -runtime). - -Like compiling there are many ways to do this, but for simplicity we'll use -`jar` directly. Note, that means we have to define a java manifest file by hand, -in order to place the libraries' classes on the classpath. - -Similar to your interceptor class, any libraries you package must be compiled -for the same language level as Zeebe's (i.e. currently JDK 11) or lower. - -```sh -# both runtime libraries and the manifest must be packaged together with the compiled classes -jar cvfm LoggingInterceptor.jar ./MANIFEST.MF ./*.class ./lib - -# let's verify the contents of the JAR -jar tf ./LoggingInterceptor.jar -# META-INF/ -# META-INF/MANIFEST.MF -# LoggingInterceptor.java -# LoggingInterceptor$1.class -# lib/ -# lib/grpc-api.jar -# lib/grpc.jar -# lib/slf4j-api.jar -# lib/slf4j.jar -``` - -## Loading an interceptor into a gateway - -An interceptor can be loaded into your gateway as a fat JAR. For each -interceptor, you need to provide your gateway with: - -- An interception order index -- An identifier to identify this specific interceptor -- Where to find the JAR with the interceptor class -- The [fully qualified name](https://docs.oracle.com/javase/specs/jls/se17/html/jls-6.html#jls-6.7) - of the interceptor class, e.g. `com.acme.ExampleInterceptor` - -Let's continue with the LoggingInterceptor example. We can provide these -[configurations](configuration/configuration.md) -using a gateway config file, environment variables or a mix of both. We'll be -using a config file here. - -The following gateway config file configures our LoggingInterceptor so it can be -loaded into the gateway at start-up. - -```yaml -zeebe: - gateway: - ... - - # allows specifying multiple interceptors - interceptors: - - - # identifier, can be used for debugging - id: logging-interceptor - - # name of our ServerInterceptor implementation - # this must be the fully qualified name of the class - className: io.camunda.zeebe.example.LoggingInterceptor - - # path to the fat JAR, can be absolute or relative - jarPath: /tmp/LoggingInterceptor.jar - - # you can add additional interceptors by listing them - - id: ... - className: ... - jarPath: ... -``` - -Note that multiple interceptors can be configured (i.e. -`zeebe.gateway.interceptors` expects a list of interceptor configurations). The -listing order determines the order in which a call is intercepted by the -different interceptors. The first interceptor in the list wraps the second, etc. -The first interceptor is thus the outermost interceptor. In other words, calls -are intercepted first by the first listed interceptor, followed by the second -listed interceptor, etc. - -This configuration can also be provided using environment variables. You'll need -to provide an index for the interceptor in the variable name, to distinguish the -ordering of the different interceptors. For example, to configure the -`className` of the first interceptor use: -`zeebe_gateway_interceptors_0_className`. Likewise, a second interceptor's -`jarPath` can be configured using `zeebe_gateway_interceptors_1_jarPath`. - -## About class loading - -[Previously](#packaging-an-interceptor), we stated that you need to package the -interceptor class into a fat JAR. Although good general advice, this is not -entirely true. To understand why, let's discuss how the class loading of your -interceptor works. - -When your JAR is loaded into the gateway, Zeebe provides a special class loader -for it. This class loader isolates your interceptor from the rest of Zeebe, but -it also exposes our own code to your interceptor. When loading classes for your -interceptor, it will always first look in this special class loader and only if -it is not available it will look in Zeebe's main class loader. In other words, -you can access any classes from Zeebe's main class loader when they are not -provided by your JAR. For internal class loading, Zeebe will still only look in -its main class loader. - -This means you can reduce your JAR size by leaving out libraries that are -already provided by Zeebe's class loader. In addition, if your interceptor -depends on a different version of a class than the one provided by Zeebe, then -you can provide your own version without having to worry about breaking Zeebe. - -## Troubleshooting - -Here we describe a few common errors. Hopefully, this will help you recognize -these situations and provide an easy fix. Generally, the gateway will not be -able to start up with a misconfigured interceptor. - -Note that environment variables can overwrite your gateway configuration file. -The gateway logs the configuration it uses during start-up. Please use that to -verify your configuration. - -**java.lang.ClassNotFoundException** Your ServerInterceptor implementation could -not be found. Make sure you've configured the `className` correctly in the -[gateway configuration](#loading-an-interceptor-into-a-gateway) and that your -[JAR contains your class](#packaging-an-interceptor). - -**io.camunda.zeebe.gateway.interceptors.impl.InterceptorLoadException** -Something went wrong trying to load your interceptor. Make sure your [JAR is -packaged](#packaging-an-interceptor) correctly, i.e. it contains all runtime -dependencies and specifies them in the manifest file's classpath. The exception -should provide a clear description, but generally we distinguish the following -common cases: - -- Unable to instantiate your class: make sure your class adheres to the - [requirements described above](#implementing-an-interceptor). -- The JAR could not be loaded: make sure you've configured your interceptor - correctly in the [gateway configuration](#loading-an-interceptor-into-a-gateway). - -**io.camunda.zeebe.util.jar.ExternalJarLoadException**: the JAR could not be -loaded: make sure you've configured your interceptor correctly in the [gateway -configuration](#loading-an-interceptor-into-a-gateway). - -**java.lang.UnsupportedClassVersionError** Your interceptor has been compiled by -a more recent version of the Java Runtime. Make sure your [class is -compiled](#packaging-an-interceptor) with JDK 11. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/accessing-operate-tasklist.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/accessing-operate-tasklist.md deleted file mode 100644 index d3638e58258..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/accessing-operate-tasklist.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: accessing-operate-tasklist -title: "Accessing Operate and Tasklist outside the cluster" -description: "Let's take a closer look at how you can utilize Operate and Tasklist outside of your cluster." ---- - -To interact with the services inside the Camunda Cloud cluster, use `port-forward` to route traffic from your environment to the cluster. - -``` -> kubectl port-forward svc/-zeebe-gateway 26500:26500 -``` - -Now, you can connect and execute operations against your new Camunda Cloud cluster. This allows you to use `zbctl` as a command line interface to read and create resources inside the Zeebe broker. You can install `zbctl` via [npm](https://www.npmjs.com/package/zbctl). - -:::note -Notice that you need to keep `port-forward` running to communicate with the remote cluster. -::: - -Note thay accessing the Zeebe cluster directly using `kubectl port-forward` is recommended for development purposes. - -By default, the Camunda Cloud Helm charts are not exposing the Zeebe cluster via the ingress controller. If you want to use `zbctl` or a local client/worker from outside the Kubernetes cluster, rely on `kubectl port-forward` to the Zeebe cluster to communicate. - -You can find the external IP by running the following: - -``` -> kubectl get svc -``` - -You should see something like the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE --zeebe-gateway LoadBalancer 10.109.108.4 80:30497/TCP,443:32232/TCP 63m -``` - -The `` under the `EXTERNAL-IP` column should change to a public IP that you (and other users) should be able to access from outside the cluster. Check your cloud provider's specific configuration if that does not work. - -Then, you can access Operate pointing your browser at `http://`. - -:::note -If **no ingress** is enabled (e.g. like in Kubernetes KIND), you will need to `port-forward`. In a different terminal, run the following: -``` -> kubectl port-forward svc/-operate 8080:80 -> kubectl port-forward svc/-tasklist 9090:80 -``` -::: - -Then, you can access Operate pointing your browser at [http://localhost:8080](http://localhost:8080/), and Tasklist pointing at [http://localhost:9090](http://localhost:9090). Log in to these services using the `demo`/`demo` credentials. - -
    - Operate and Tasklist Login -
    - -
    -
    -
    - Operate and Tasklist Login -
    - -
    -
    - -If you deploy process definitions, they will appear in the dashboard. Then, you can drill down to see your active instances. - -You can deploy and create new instances using the Zeebe clients or `zbctl`. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/ccsm-helm-charts.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/ccsm-helm-charts.png deleted file mode 100644 index 138e8170c73..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/ccsm-helm-charts.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/operate-tasklist-dashboard.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/operate-tasklist-dashboard.png deleted file mode 100644 index 796e7a4f058..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/operate-tasklist-dashboard.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/operate-tasklist-login.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/operate-tasklist-login.png deleted file mode 100644 index 3050aea1b1b..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/operate-tasklist-login.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/zeebe-k8s-helm.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/zeebe-k8s-helm.png deleted file mode 100644 index 1c03b939ed7..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/assets/zeebe-k8s-helm.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/installing-helm.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/installing-helm.md deleted file mode 100644 index b58ae174db3..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/helm/installing-helm.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -id: installing-helm -title: "Camunda Cloud Helm charts" ---- - -[Helm](https://github.com/helm/helm) is a package manager for Kubernetes resources. Helm allows us to install a set of components by simply referencing a package name, and allowing us to override configurations to accommodate these packages to different scenarios. - -Helm also provides dependency management between charts, meaning that charts can depend on other charts. This allows us to aggregate a set of components together that can be installed with a single command. - -The Camunda Cloud Helm chart is currently available and can be found in the [Camunda Cloud Helm repository](https://github.com/camunda-community-hub/camunda-cloud-helm). -By default, the following will be installed: - -- **Camunda Cloud self-managed Helm (ccsm-helm)**: - - **Zeebe**: Deploys a Zeebe Cluster with three brokers using the `camunda/zeebe` Docker image. - - **Zeebe Gateway**: Deploys the standalone Zeebe Gateway with two replicas. - - **Operate**: Deploys Operate, which connects to an existing Elasticsearch. - - **Tasklist**: Deploys the Tasklist component to work with user tasks. - - **Elasticsearch**: Deploys an Elasticsearch cluster with two nodes. - -![Charts](assets/ccsm-helm-charts.png) - -When installing the [ccsm-helm](https://github.com/camunda-community-hub/camunda-cloud-helm/tree/main/charts/ccsm-helm) chart, all the components in this picture are installed. - -### Add Camunda Cloud Helm repository - -The Camunda Cloud Helm chart repository needs to be added. Once this is done, Helm is able to fetch and install charts hosted in [http://helm.camunda.io](http://helm.camunda.io). - -``` -> helm repo add camunda-cloud https://helm.camunda.io -> helm repo update -``` - -Once this is complete, we are ready to install the Helm chart hosted in the official Camunda Cloud Helm chart repo. - -### Installing the Camunda Cloud Helm chart in a Cloud environment - -In this section, we will install all the available Camunda Cloud components inside a Kubernetes cluster. Notice that this Kubernetes cluster can have services which are already running; Zeebe is simply installed as another set of services. - -``` -> helm install camunda-cloud/ccsm-helm -``` - -:::note -Change >RELEASE NAME< with a name of your choice. - -Also, notice that you can add the `-n` flag to specify in which Kubernetes namespace the components should be installed. -::: - -Installing all the components in a cluster requires all Docker images to be downloaded to the remote cluster. Depending on which Cloud provider you are using, the amount of time it will take to fetch all the images will vary. - -Review the progress of your deployment by checking if the Kubernetes PODs are up and running with the following: - -``` -> kubectl get pods -``` - -This will return something similar to the following: - -``` -NAME READY STATUS RESTARTS AGE -elasticsearch-master-0 1/1 Running 0 4m6s -elasticsearch-master-1 1/1 Running 0 4m6s --operate-XXX 1/1 Running 0 4m6s --zeebe-0 1/1 Running 0 4m6s --zeebe-1 1/1 Running 0 4m6s --zeebe-2 1/1 Running 0 4m6s --tasklist-XXX 1/1 Running 0 4m6s --zeebe-gateway-XX1 1/1 Running 0 4m6s --zeebe-gateway-XX2 1/1 Running 0 4m6s -``` - -### Installing the Camunda Cloud Helm chart locally using KIND - -If you want to use [Kubernetes KIND](https://github.com/kubernetes-sigs/kind), add `-f ccsm-kind-values.yaml`. The file can be downloaded [here](https://github.com/camunda-community-hub/camunda-cloud-helm/blob/main/kind/ccsm-kind-values.yaml). - -Be aware that using KIND is only recommended for development purposes. - -``` -helm install camunda-cloud/ccsm-helm -f ccsm-kind-values.yaml -``` - -This will deploy the same components, but with a set of parameters tailored to a local environment setup. - -:::note -All Docker images will be downloaded to your local KIND cluster, so it might take some time for the services to get started. -::: - -Review the progress of your deployment by checking if the Kubernetes PODs are up and running with the following: - -``` -> kubectl get pods -``` - -This will return something similar to the following: - -``` -NAME READY STATUS RESTARTS AGE -elasticsearch-master-0 1/1 Running 0 4m6s --operate-XXX 1/1 Running 0 4m6s --zeebe-0 1/1 Running 0 4m6s --tasklist-XXX 1/1 Running 0 4m6s --zeebe-gateway 1/1 Running 0 4m6s -``` - -### Troubleshooting the installation - -Check that each POD has at least 1/1 running instances. If one or more of your PODs stay pending, it means that it can not be scheduled onto a node. - -Usually this happens because there are insufficient resources that prevent it. Use the `kubectl describe ...` command to check on messages from the scheduler: - -``` -> kubectl describe pods ${POD_NAME} -``` - -If the output of the `describe` command was not beneficial, tail the logs of these PODs by running the following: - -``` -> kubectl logs -f -``` diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/index.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/index.md deleted file mode 100644 index 12b046b5ef6..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/kubernetes/index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: index -title: "Kubernetes deployment" -sidebar_label: "Overview" ---- - -We recommend using Kubernetes when deploying Camunda Cloud Self-Managed to production. - -This chapter is divided into the following sections: - -- [General information](index.md#general-information) -- [Prerequisites](index.md#prerequisites) -- [Getting to know and installing Camunda Cloud Helm charts](./helm/installing-helm.md) -- [Accessing Operate from outside a Kubernetes cluster](./helm/accessing-operate-tasklist.md) - -## General information - -### Broker - -Zeebe broker nodes need to be deployed as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) to preserve the identity of cluster nodes. StatefulSets require persistent storage, which must be allocated in advance. Depending on your cloud provider, the persistent storage differs as it is provider-specific. - -At [helm.camunda.io](https://helm.camunda.io/), you'll find a Helm chart to configure a three-broker cluster with two Elasticsearch instances, Operate, two Zeebe gateways, and Tasklist. This size is comparable with the Production-S cluster plan in Camunda Cloud SaaS. It should be sufficient for 80% of use cases. - -There are many ways you can provision and configure a Kubernetes cluster, and there are a number of architectural choices you need to make. Will your workers run in the Kubernetes cluster or external to it? - -You will need to configure your Kubernetes cluster and modify this to suit the architecture you are building. - -### Gateway - -Zeebe gateway is deployed as a stateless service. - -We support [Kubernetes startup and liveness probes](../operations/health.md#gateway) for Zeebe gateway. - -### Helm - -There are several alternatives to deploy applications to a Kubernetes cluster, but the following sections use Helm charts to deploy a set of components into your cluster. - -Helm allows you to choose exactly what chart (set of components) you want to install and how these components need to be configured. - -These Helm charts are continuously being improved and released to the [Camunda Cloud Helm Chart Repository](https://github.com/camunda-community-hub/camunda-cloud-helm). - -You are free to choose your Kubernetes provider as our Helm charts are not cloud provider-specific. - -We encourage [reporting issues](https://github.com/camunda-community-hub/camunda-cloud-helm/issues) if you find them. - -## Prerequisites - -To use Kubernetes, you must have the following tools installed in your local environment: - -- `kubectl`: Kubernetes Control CLI tool, installed and connected to your cluster -- `helm`: Kubernetes Helm CLI tool - -You also need a Kubernetes cluster. You have several options: - -- Local for development, you can use [Kubernetes KIND](https://github.com/kubernetes-sigs/kind), Minikube, and MicroK8s. -- Remote: Google GKE, Azure AKS, Amazon EKS, etc. - -:::note -Be aware that we only officially test the stock Kubernetes and OpenShift environments. -However, feel free to try different trials from cloud providers to create a Kubernetes cluster to test Camunda Cloud Self-Managed in your cloud. -::: - -Optional tools related to Camunda Cloud: - -- Camunda Modeler: to model/modify business processes. Install Camunda Modeler [here](/components/modeler/desktop-modeler/install-the-modeler.md). -- Zeebe CTL(`zbctl`): command line tool to interact with a Zeebe cluster (local/remote). You can get the `zbctl` tool from the official -[Zeebe release page](https://github.com/camunda-cloud/zeebe/releases). diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/assets/order-process.bpmn b/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/assets/order-process.bpmn deleted file mode 100644 index b4df39f92ab..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/assets/order-process.bpmn +++ /dev/null @@ -1,92 +0,0 @@ - - - - - SequenceFlow_18tqka5 - - - SequenceFlow_1qj94z0 - - - - - - - - - - SequenceFlow_18tqka5 - SequenceFlow_10zt7r3 - - - - - - SequenceFlow_10zt7r3 - SequenceFlow_1t0gysp - - - - - - - SequenceFlow_1t0gysp - SequenceFlow_1qj94z0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/assets/order-process.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/assets/order-process.png deleted file mode 100644 index 07e87b95d02..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/install.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/install.md deleted file mode 100644 index c7c23a82e26..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/install.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: install -title: "Local installation" -sidebar_label: "Install" ---- - -This page guides you through the initial installation of the Zeebe broker and next steps for development purposes. - -## Prerequisites - -- Operating system: - - Linux - - Windows/MacOS (development only, not supported for production) -- Java Virtual Machine: - - Oracle Hotspot 11, or - - Open JDK 11 - -## Download a distribution - -Download the latest Zeebe release from the [GitHub release page](https://github.com/camunda-cloud/zeebe/releases). - -Once you've downloaded a distribution, extract it into a folder of your choice. - -To extract the Zeebe distribution and start the broker, **Linux users** can type the following: - -```bash -tar -xzf zeebe-distribution-X.Y.Z.tar.gz -C zeebe/ -./bin/broker -``` - -For **Windows users**, take the following steps: - -1. Download the `.zip` package. -2. Extract the package using your preferred unzip tool. -3. Open the extracted folder. -4. Navigate to the `bin` folder. -5. Start the broker by double-clicking on the `broker.bat` file. - -Once the Zeebe broker has started, it should produce the following output: - -``` -bash -23:39:13.246 [] [main] INFO io.camunda.zeebe.broker.system - Scheduler configuration: Threads{cpu-bound: 2, io-bound: 2}. -23:39:13.270 [] [main] INFO io.camunda.zeebe.broker.system - Version: X.Y.Z -23:39:13.273 [] [main] INFO io.camunda.zeebe.broker.system - Starting broker with configuration { -``` - -## Next steps - -As a next step, you can install Desktop Modeler. - -Desktop Modeler is an open-source desktop BPMN modeling application created specifically for Zeebe. This application gives developers powerful features to design and deploy automated processes, human workflows, decision tables, and decision requirement diagrams using the globally-recognized [BPMN](https://camunda.com/bpmn/) and [DMN](https://camunda.com/dmn/) standards. - -Get started with Desktop Modeler using our [installation guide](/components/modeler/desktop-modeler/install-the-modeler.md). diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/quickstart.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/quickstart.md deleted file mode 100644 index 5938aa7e065..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/local/quickstart.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -id: quickstart -title: "Quickstart" ---- - -This quickstart guide introduces you to the main concepts of Zeebe in five steps without the need to write a single line of code. - -## Step 1: Download the Zeebe distribution - -You can download the latest distribution from the [Zeebe release page](https://github.com/camunda-cloud/zeebe/releases). - -Extract the archive and enter the Zeebe directory. - -``` -tar -xzvf zeebe-distribution-X.Y.Z.tar.gz -cd zeebe-broker-X.Y.Z/ -``` - -:::note -Some command examples might not work on Windows if you use cmd or -Powershell. For Windows users, we recommend using a bash-like shell, i.e. Git -Bash, Cygwin, or MinGW for this guide. -::: - -Inside the Zeebe directory, you'll find multiple directories: - -``` -tree -d -``` - -``` -. -├── bin - Binaries and start scripts of the distribution -├── conf - Zeebe and logging configuration -└── lib - Shared java libraries -``` - -## Step 2: Start the Zeebe broker - -To start a Zeebe broker, use the `broker` or `broker.bat` file located in the -`bin/` folder. - -``` -./bin/broker -``` - -``` -23:39:13.246 [] [main] INFO io.camunda.zeebe.broker.system - Scheduler configuration: Threads{cpu-bound: 2, io-bound: 2}. -23:39:13.270 [] [main] INFO io.camunda.zeebe.broker.system - Version: X.Y.Z -23:39:13.273 [] [main] INFO io.camunda.zeebe.broker.system - Starting broker with configuration { -``` - -You will see some output which contains the version of the broker and -configuration parameters like directory locations and API socket addresses. - -To continue this guide, open another terminal to execute commands using the -Zeebe CLI `zbctl`. - -We can now check the status of the Zeebe broker. - -:::note -By default, the embedded gateway listens to a plaintext connection, but the clients are configured to use TLS. Therefore, all `zbctl` commands in the quickstart will specify the `--insecure` flag. -::: - -``` -./bin/zbctl --insecure status -``` - -``` -Cluster size: 1 -Partitions count: 1 -Replication factor: 1 -Brokers: - Broker 0 - 0.0.0.0:26501 - Partition 1 : Leader -``` - -## Step 3: Deploy a process - -A [process](/components/concepts/processes.md) is used to orchestrate loosely coupled job -workers and the flow of data between them. - -In this guide, we'll use an example process `order-process.bpmn`. You can -download it with the following link: -[order-process.bpmn](assets/order-process.bpmn). - -![order-process](assets/order-process.png) - -The process describes a sequential flow of three tasks **Collect Money**, **Fetch Items**, and **Ship Parcel**. If you open the `order-process.bpmn` file in a text editor, you'll see every task has an attribute `type` defined in the XML which is later used as job type. - -``` - - - - - - - - - - - - - - - - - - - -``` - -To complete an instance of this process, we need to activate and complete one job for each of -the types `payment-service`, `inventory-service`, and `shipment-service`. - -First, let's deploy the process to the Zeebe broker. - -``` -./bin/zbctl --insecure deploy order-process.bpmn -``` - -``` -{ - "key": 2251799813685250, - "processes": [ - { - "bpmnProcessId": "order-process", - "version": 1, - "processKey": 2251799813685249, - "resourceName": "order-process.bpmn" - } - ] -} -``` - -See a few concepts important to understand at this point below: - -- A **job** is simply a work item in a process that must be completed before a process instance can proceed to the next step. ([See: Job Workers](/components/concepts/job-workers.md)) -- A **process instance** is one running instance of a process model. In our case, this is an individual order to be fulfilled. ([See: Processes](/components/concepts/processes.md)) - -If a job is available for a given process instance, the worker activates it, completes it, and notifies Zeebe. Zeebe then advances that process instance to the next step in the process. - -## Step 4: Create a process instance - -After the process is deployed, we can create a new instance of it. Every -instance of a process is a single execution of the process. To create a new -instance, we must specify the process ID from the BPMN file. In -our case, the ID is `order-process` as defined in the `order-process.bpmn`: - -``` - -``` - -Every instance of a process normally processes some kind of data. We can -specify the initial data of the instance as variables when we start the instance. - -:::note -Windows users who want to execute this command using cmd or Powershell -have to escape the variables differently. - -- cmd: `"{\"orderId\": 1234}"` -- Powershell: `'{"\"orderId"\": 1234}'` - -::: - -``` -./bin/zbctl --insecure create instance order-process --variables '{"orderId": 1234}' -``` - -``` -{ - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813685251 -} -``` - -## Step 5: Complete a process instance - -To complete the instance, all three tasks must be executed. In Zeebe, a job is -created for every task which is reached during process instance execution. To finish a job and thereby the corresponding task, must be activated -and completed by a [job worker](/components/concepts/job-workers.md). - -A job worker is a long-living process which repeatedly tries to activate jobs for a given job type and completes them after executing its business logic. The `zbctl` also provides a command to spawn simple job workers using an external command or -script. - -The job worker receives for every job the process instance variables as JSON object on -`stdin` and has to return its result also as a JSON object on `stdout` if it -handled the job successfully. - -In this example, we use the Unix command `cat`, which outputs what it receives -on `stdin`. To complete a process instance we now must create a job worker for -each of the three task types from the process definition: `payment-service`, -`inventory-service`, and `shipment-service`. - -:::note -For Windows users, this command does not work with cmd as the `cat` command does not exist. We recommend using Powershell or a bash-like shell to execute this command. -::: - -``` -./bin/zbctl --insecure create worker payment-service --handler cat & -./bin/zbctl --insecure create worker inventory-service --handler cat & -./bin/zbctl --insecure create worker shipment-service --handler cat & -``` - -``` -2019/06/06 20:54:36 Handler completed job 2251799813685257 with variables -{"orderId":1234} -2019/06/06 20:54:36 Activated job 2251799813685264 with variables -{"orderId":1234} -2019/06/06 20:54:36 Handler completed job 2251799813685264 with variables -{"orderId":1234} -2019/06/06 20:54:36 Activated job 2251799813685271 with variables -{"orderId":1234} -2019/06/06 20:54:36 Handler completed job 2251799813685271 with variables -{"orderId":1234} -``` - -After the job workers are running in the background, we can create more instances -of our process to observe how the workers will complete them. - -``` -./bin/zbctl --insecure create instance order-process --variables '{"orderId": 12345}' -``` - -To close all job workers, use the `kill` command to stop the background processes. - -``` -kill %1 %2 %3 -``` - -To visualize the state of the process instances, start the -[Zeebe simple monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor), a community maintained project. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/assets/example-setup-cluster.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/assets/example-setup-cluster.png deleted file mode 100644 index ba8f550286b..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/assets/example-setup-cluster.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/assets/grafana-preview.png b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/assets/grafana-preview.png deleted file mode 100644 index fa7c90a93c7..00000000000 Binary files a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/assets/grafana-preview.png and /dev/null differ diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/backpressure.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/backpressure.md deleted file mode 100644 index 56f9ddf9db0..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/backpressure.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -id: backpressure -title: "Backpressure" -description: "This document outlines an overview of backpressure and its accompanying assets." ---- - -When a broker receives a client request, it is written to the **event stream** first (see section [internal processing](/components/zeebe/technical-concepts/internal-processing.md) for details), and processed later by the stream processor. - -If the processing is slow or if there are many client requests in the stream, it might take too long for the processor to start processing the command. -If the broker keeps accepting new requests from the client, the backlog increases and the processing latency can grow beyond an acceptable time. - -To avoid such problems, Zeebe employs a backpressure mechanism. When the broker receives more requests than it can process with an acceptable latency, it rejects some requests (see [technical error handling](/apis-tools/grpc.md#technical-error-handling)). - -### Terminology - -- **RTT** - The time between when the request is accepted by the broker and when the response to the request is sent back to the gateway. -- **Inflight count** - The number of requests accepted by the broker but the response is not yet sent. -- **Limit** - Maximum number of flight requests. When the inflight count is above the limit, any new incoming request is rejected. - -:::note -The limit and inflight count are calculated per partition. -::: - -### Backpressure algorithms - -Zeebe uses adaptive algorithms from [concurrency-limits](https://github.com/Netflix/concurrency-limits) to dynamically calculate the limit. -Configure Zeebe with one of the backpressure algorithms in the following sections. - -#### Fixed limit - -With **fixed limit**, one can configure a fixed value of the limit. -Zeebe operators are recommended to evaluate the latencies observed with different values for limit. -Note that with different cluster configurations, you may have to choose different limit values. - -#### AIMD - -AIMD calculates the limit based on the configured _requestTimeout_. -When the RTT for a request _requestTimeout_, the limit is increased by 1. -When the RTT is longer than _requestTimeout_, -the limit will be reduced according to the configured _backoffRatio_. - -#### Vegas - -Vegas is an adaptive limit algorithm based on TCP Vegas congestion control algorithm. -Vegas estimates a base latency as the minimum observed latency. -This base RTT is the expected latency when there is no load. -Whenever the RTT deviates from the base RTT, a new limit is calculated based on the Vegas algorithm. -Vegas allows you to configure two parameters - _alpha_ and _beta_. -The values correspond to a queue size estimated by the Vegas algorithm based on the observed RTT, base RTT, and current limit. -When the queue size is below _alpha_, the limit is increased. -When the queue size is above _beta_, the limit is decreased. - -### Gradient - -Gradient is an adaptive limit algorithm that dynamically calculates the limit based on observed RTT. -In the gradient algorithm, the limit is adjusted based on the gradient of observed RTT and an observed minimum RTT. -If gradient is less than 1, the limit is decreased. Otherwise, the limit is increased. - -### Gradient2 - -Gradient2 is similar to Gradient, but instead of using observed minimum RTT as the base, it uses an exponentially smoothed average RTT. - -## Backpressure tuning - -The goal of backpressure is to keep the processing latency low. -The processing latency is calculated as the time between the command is written to the event stream until it is processed. -To see how backpressure behaves, run a benchmark on your cluster and observe the following metrics: - -- `zeebe_stream_processor_latency_bucket` -- `zeebe_dropped_request_count_total` -- `zeebe_received_request_count_total` -- `zeebe_backpressure_requests_limit` - -You may want to run the benchmark with different loads: - -1. With low load - Where the number of requests sent per second is low. -2. With high load - Where the number of requests sent per second is above what Zeebe can process within a reasonable latency. - -If the value of the limit is small, the processing latency will be small, but the number of rejected requests may be high. -If the value of the limit is large, fewer requests may be rejected (depending on the request rate), -but the processing latency may increase. - -When using **fixed limit**, you can run the benchmark with different values for the limit. -You can then determine a suitable value for a limit for which the processing latency (`zeebe_stream_processor_latency_bucket`) is within the desired latency. - -When using **AIMD**, you can configure a `requestTimeout` which corresponds to a desired latency. -Note that during high load, AIMD can lead to a processing latency two times more than the configured `requestTimeout`. -It is also recommended to configure a `minLimit` to prevent the limit from aggressively dropping during constant high load. - -When using **Vegas**, you cannot configure the backpressure to a desired latency. -Instead, Vegas tries to keep the RTT as low as possible based on the observed minimum RTT. - -Similar to Vegas, you cannot configure the desired latency in Gradient and Gradient2. -They calculated the limit based on the gradient of observed RTT from the expected RTT. -The higher the value of _rttTolerance_, the higher deviations are tolerated that results in higher values for limit. - -If a lot of requests are rejected due to backpressure, it might indicate that the processing capacity of the cluster is not enough to handle the expected throughput. -If this is the expected workload, you might consider a different configuration for the cluster, such as provisioning more resources and increasing the number of nodes and partitions. - -## Potential issues - -The rate limiter used by Zeebe to implement backpressure may use `System.nanoTime()` to measure the RTT of requests. In some systems, we've observed consecutive calls to this method can return equal or even decreasing values. [Low clock resolution](https://shipilev.net/blog/2014/nanotrusting-nanotime) and [monotonicity](https://bugs.openjdk.java.net/browse/JDK-6458294) [issues](https://stackoverflow.com/questions/3657289/linux-clock-gettimeclock-monotonic-strange-non-monotonic-behavior) are some of the most likely culprits of this. If this happens, it's recommended to configure the backpressure to use the **fixed** algorithm. Without a clock with sufficient resolution, adaptive backpressure algorithms are not useful. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/backups.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/backups.md deleted file mode 100644 index e47174bb8c2..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/backups.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -id: backups -title: "Backups" -description: "A guide to creating and installing Zeebe backups." ---- - -As Zeebe fully manages the state of your process instances, consider taking backups of Zeebe data; this is crucial to prevent data loss, roll back application-level errors, and more. - -Zeebe is fault-tolerant and replicates state internally. Backups are only necessary if you'd like to protect against the loss of entire replica sets or data corruption bugs. - -State of other components, such as Operate and Tasklist, is not managed by Zeebe and must be backed up separately. - -Taking backups is a manual process that is highly dependent on your infrastructure and deployment. Camunda does not provide an automated backup mechanism or tool. However, we do offer the following guidance to create and execute a successful backup. - -## Cold backups - -Cold backups, also called offline backups, require **downtime**. - -During the downtime, processes don't make progress and clients can't communicate with Zeebe. -To make sure that the downtime doesn't cause issues for your clients, you should test how your clients behave during the downtime, or shut them down as well. - -### Shutting down all brokers in the cluster - -To take a consistent backup, all brokers must be shut down first. - -As soon as brokers shut down, partitions become unhealthy and clients lose connections to Zeebe or experience full backpressure. -To prevent unnecessary failovers during the shutdown process, we recommend shutting down all brokers at the same time instead of a gradual shutdown. - -Wait for all brokers to fully shut down before proceeding to the next step. - -### Creating the backup - -:::note -The `data` folder contains symbolic and hard links which may require special attention when copying, depending on your environment. -::: - -To create the backup, take the following steps: - -1. Each broker has a data folder where all state is persisted. The location of the data folder is [configured](../configuration/configuration.md) via `zeebe.broker.data.directory`. Create a copy of the data folder and store it in a safe location. - -If you have direct access to the broker, for example in a bare-metal setup, you can do this by creating a tarball like this: `tar caf backup.tar.gz data/`. - -You may also use filesystem snapshots or [Kubernetes volume snapshots](https://kubernetes.io/docs/concepts/storage/volume-snapshots/) -if that fits your environment better - -2. Double-check that your tool of choice supports symbolic and hard links. -3. Do not merge or otherwise modify data folders as this might result in data loss and unrestorable backups. -4. Save the broker configuration to ensure the replacement cluster can process the backed-up data. - -See the following example on how a backup may look: - -```bash -$ tree zeebe-backup-* -zeebe-backup-2021-01-31 -├── zeebe-broker-0-config.yml -├── zeebe-broker-0-data.tar.gz -├── zeebe-broker-1-config.yml -├── zeebe-broker-1-data.tar.gz -├── zeebe-broker-2-config.yml -└── zeebe-broker-2-data.tar.gz -``` - -### Resuming - -After taking the backup, brokers can be started again and will automatically resume with processing. - -## Restore from backup - -### Prepare replacement cluster - -:::note Caution -Always use the same or the next minor version of Zeebe that you were using when taking the backup. -Using a different version may result in data corruption or data loss. -See the [update guide](/guides/update-guide/introduction.md) for more details. -::: - -Ensure your replacement cluster has the same number of brokers as the old cluster and uses the [same node IDs](setting-up-a-cluster.md#configuration). - -### Shutting down all brokers in the replacement cluster - -Before installing the backup, ensure all brokers are fully shut down. - -### Installing the backup - -To install the backup, take the following steps: - -1. Delete the existing data folder on each broker of your replacement cluster. -2. For each broker, copy over the configuration and the data folder. -3. You may need to slightly adjust the configuration for your replacement cluster, for example to update IP addresses. - -### Starting the Zeebe cluster - -After replacing the data folders, brokers can be started again and will automatically resume with processing. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/disk-space.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/disk-space.md deleted file mode 100644 index fc9895127aa..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/disk-space.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: disk-space -title: "Disk space" ---- - -Zeebe uses the local disk for storage of its persistent data. Therefore, if the Zeebe broker runs out of disk space, the system is in an invalid state as the broker cannot update its state. - -To prevent the system from reaching an unrecoverable state, Zeebe expects a minimum size of free disk space available. If this limit is violated, the broker rejects new requests to allow the operations team to free more disk space, and allows the broker to continue to update its state. - -Zeebe can be configured with the following settings for the disk usage watermarks: - -- **zeebe.broker.data.diskUsageMonitoringEnabled**: Configure if disk usage should be monitored (default: true) -- **zeebe.broker.data.diskUsageReplicationWatermark**: The fraction of used disk space before the replication is paused (default: 0.99) -- **zeebe.broker.data.diskUsageCommandWatermark**: The fraction of used disk space before new user commands are rejected (default: 0.97), this must be less than `diskUsageReplicationWatermark`. -- **zeebe.broker.data.diskUsageMonitoringInterval**: The interval in which the disk space usage is checked (default 1 second) - -For **production** use cases, we recommend setting the values for `diskUsageReplicationWatermark` and `diskUsageCommandWatermark` to smaller values, for example `diskUsageReplicationWatermark=0.9` and `diskUsageCommandWatermark=0.8`. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/health.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/health.md deleted file mode 100644 index eba8ae2bebe..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/health.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -id: health -title: "Health status" -description: "This document analyzes health status checks and responses." ---- - -## Broker - -Zeebe broker exposes three HTTP endpoints to query its health status: - -- Startup check -- Ready check -- Health check - -### Startup check - -Startup check endpoint is exposed via `http://{zeebe-broker}:{zeebe.broker.network.monitoringApi.port}/startup` (by default port 9600). -This endpoint returns an empty 204 response. If it is not ready, it will return a 503 error. - -A broker has successfully started when: - -- The broker has found other brokers in the cluster. -- All partitions owned by this broker have started and participate in replication. -- Other necessary services have started. - -A successful startup does not mean the broker is ready to process requests. -The broker is ready only after startup has successfully completed. - -### Ready check - -Ready check endpoint is exposed via `http://{zeebe-broker}:{zeebe.broker.network.monitoringApi.port}/ready` (by default port 9600). -This endpoint returns an empty 204 response. If it is not ready, it will return a 503 error. - -A broker is ready when it installs all necessary services to start processing in all partitions. -If a broker is ready, it doesn't mean it's the leader for the partitions. -It means it is participating in the replication and can be either a leader or a follower of all the partitions that are assigned to it. -Once it is ready, it never becomes unready again. - -A ready check is useful, for example, to use as a `readinessProbe` in a Kubernetes configuration to control when a pod can be restarted for rolling upgrade. -Depending on the cluster configuration, restarting one pod before the previous one is ready might make the system unavailable because the quorum of replicas is not available. -By configuring a `readinessProbe` that uses the ready check endpoint, we can inform Kubernetes when it is safe to proceed with the rolling update. - -### Health check - -Health check endpoint is exposed via `http://{zeebe-broker}:{zeebe.broker.network.monitoringApi.port}/health` (by default port 9600). -This endpoint returns an empty 204 response if the broker is healthy. If it is not healthy, it will return a 503 error. -A broker is never healthy before it is ready. -Unlike ready check, a broker can become unhealthy after it is healthy. -Hence, it gives a better status of a running broker. - -A broker is healthy when it can process processes, accept commands, and perform all its expected tasks. -If it is unhealthy, it may mean three things: - -- **It is only temporarily unhealthy**: For example, due to environmental circumstances such as temporary I/O issues. -- **It is partially unhealthy**: One or more partitions could be unhealthy, while the rest of them are able to process processes. -- **It is completely dead** - -[Metrics](metrics.md) give more insight into which partition is healthy or unhealthy. -When a broker becomes unhealthy, it's recommended to check the logs to see what went wrong. - -## Gateway - -Zeebe gateway exposes three HTTP endpoints to query its health status: - -- Health status - `http://{zeebe-gateway}:9600/health` -- Startup probe - `http://{zeebe-gateway}:9600/actuator/health/startup` -- Liveness probe - `http://{zeebe-gateway}:9600/actuator/health/liveness` - -(The default port can be changed in the configuration: `{zeebe.gateway.monitoring.port}`) - -### Health status - -The gateway is healthy if it: - -- Started successfully -- Has sufficient free memory and disk space to work with -- Is able to respond to requests within a defined timeout -- Is aware of other nodes in the cluster -- Is aware of leaders for partitions - -### Startup probe - -The gateway starts if it finished its boot sequence successfully and is ready to receive requests. It no longer starts when it initiates the shutdown sequence. - -The started probe can be used as Kubernetes startup probe. - -### Liveness probe - -The gateway is live if it: - -- Started successfully -- Has a minimal amount of free memory and disk space to work with -- Is able to respond to requests within a defined timeout, or misses the timeout for less than 10 minutes -- Is aware of other nodes in the cluster, or lost awareness of other nodes for less than five minutes -- Is aware of leaders for partitions, or lost awareness of partition leaders for less than five minutes - -The liveness probe can be used as Kubernetes liveness probe. - -### Status responses - -Each endpoint returns a status which can be one of the following: - -- `UNKNWON` (HTTP status code 200) -- `UP` (HTTP status code 200) -- `DOWN` (HTTP status code 503) -- `OUT_OF_SERVICE` (HTTP status code 503) - -If details are enabled (default), the response will also contain additional details. - -### Customization - -Health indicators are set to sensible defaults. For specific use cases, it might be necessary to [customize health indicators](../configuration/gateway-health-probes.md). diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/index.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/index.md deleted file mode 100644 index 8aea801058e..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: index -title: "Operating Zeebe in Production" -sidebar_label: "Overview" ---- - -This chapter covers topics relevant to anyone who wants to operate Zeebe in production. - -- [Resource planning](resource-planning.md) - Gives an introduction for calculating how many resources need to be provisioned. -- [Network ports](network-ports.md) - Discusses which ports are needed to run Zeebe. -- [Setting up a Zeebe cluster](setting-up-a-cluster.md) - Quick guide on how to set up a cluster with multiple brokers. -- [Metrics](metrics.md) - Lists options to monitor Zeebe. -- [Health status](health.md) - Lists available high-level health and liveness probes. -- [Backpressure](backpressure.md) - Discusses the backpressure mechanism used by Zeebe brokers. -- [Disk space](disk-space.md) - Explains how to set limits for the amount of free disk space. Once these limits are undercut, Zeebe degrades gracefully to allow the operations team to provide more disk space. -- [Update zeebe](update-zeebe.md) - Contains information on how to perform a rolling upgrade. -- [Rebalancing](rebalancing.md) - Describes how to rebalance a cluster. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/metrics.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/metrics.md deleted file mode 100644 index 4b4319a489f..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/metrics.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -id: metrics -title: "Metrics" ---- - -When operating a distributed system like Zeebe, it is important to put proper monitoring in place. - -To facilitate this, Zeebe exposes an extensive set of metrics. - -Zeebe exposes metrics over an embedded HTTP server. - -## Types of metrics - -- **Counters**: A time series that records a growing count of some unit. Examples: number of bytes transmitted over the network, number of process instances started. -- **Gauges**: A time series that records the current size of some unit. Examples: number of currently open client connections, current number of partitions. - -## Metrics format - -Zeebe exposes metrics directly in Prometheus text format. -Read details of the format in the [Prometheus documentation][prom-format]. - -**Example:** - -``` -# HELP zeebe_stream_processor_events_total Number of events processed by stream processor -# TYPE zeebe_stream_processor_events_total counter -zeebe_stream_processor_events_total{action="written",partition="1",} 20320.0 -zeebe_stream_processor_events_total{action="processed",partition="1",} 20320.0 -zeebe_stream_processor_events_total{action="skipped",partition="1",} 2153.0 -``` - -## Configuring metrics - -Configure the HTTP server to export the metrics in the [configuration file](../configuration/configuration.md). - -## Connecting Prometheus - -As explained, Zeebe exposes the metrics over an HTTP server. The default port is `9600`. - -Add the following entry to your `prometheus.yml`: - -``` -- job_name: zeebe - scrape_interval: 15s - metrics_path: /metrics - scheme: http - static_configs: - - targets: - - localhost: 9600 -``` - -## Available metrics - -All Zeebe-related metrics have a `zeebe_`-prefix. - -Most metrics have the following common label: - -- `partition`: Cluster-unique id of the partition - -**Metrics related to process processing:** - -- `zeebe_stream_processor_events_total`: The number of events processed by the stream processor. - The `action` label separates processed, skipped, and written events. -- `zeebe_exporter_events_total`: The number of events processed by the exporter processor. - The `action` label separates exported and skipped events. -- `zeebe_element_instance_events_total`: The number of occurred process element instance events. - The `action` label separates the number of activated, completed, and terminated elements. - The `type` label separates different BPMN element types. -- `zeebe_running_process_instances_total`: The number of currently running process instances, i.e. - not completed or terminated. -- `zeebe_job_events_total`: The number of job events. The `action` label separates the number of - created, activated, timed out, completed, failed, and canceled jobs. -- `zeebe_pending_jobs_total`: The number of currently pending jobs, i.e. not completed or terminated. -- `zeebe_incident_events_total`: The number of incident events. The `action` label separates the number - of created and resolved incident events. -- `zeebe_pending_incidents_total`: The number of currently pending incident, i.e. not resolved. - -**Metrics related to performance:** - -Zeebe has a backpressure mechanism by which it rejects requests when it receives more requests than it can handle without incurring high processing latency. - -Monitor backpressure and processing latency of the commands using the following metrics: - -- `zeebe_dropped_request_count_total`: The number of user requests rejected by the broker due to backpressure. -- `zeebe_backpressure_requests_limit`: The limit for the number of inflight requests used for backpressure. -- `zeebe_stream_processor_latency_bucket`: The processing latency for commands and event. - -**Metrics related to health:** - -The health of partitions in a broker can be monitored by the metric `zeebe_health`. - -[prom-format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details - -## Grafana - -Zeebe comes with a pre-built dashboard, available in the repository: -[monitor/grafana/zeebe.json](https://github.com/camunda/camunda/tree/1.3.14/monitor/grafana/zeebe.json). - -[Import](https://grafana.com/docs/grafana/latest/reference/export_import/#importing-a-dashboard) -it into your Grafana instance, then select the correct Prometheus data source (important if you have more than one), and -you should be greeted with the following dashboard: - -![cluster](assets/grafana-preview.png) diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/network-ports.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/network-ports.md deleted file mode 100644 index 3999a1a82a8..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/network-ports.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: network-ports -title: "Network ports" ---- - -The broker cluster sits behind the gRPC Gateway, which handles all requests from clients/workers and forwards events to brokers. - -## Gateway - -The gateway needs to receive communication via `zeebe.gateway.network.port: 26500` from clients/workers, and `zeebe.gateway.cluster.contactPoint: 127.0.0.1:26502` from brokers. - -The relevant [configuration](../configuration/configuration.md) settings are: - -``` -Config file - zeebe: - gateway: - network: - port: 26500 - cluster: - contactPoint: 127.0.0.1:26502 - - -Environment Variables - ZEEBE_GATEWAY_CLUSTER_NETWORK_PORT = 26500 - ZEEBE_GATEWAY_CLUSTER_CONTACTPOINT = 127.0.0.1:26502 -``` - -## Broker - -The broker needs to receive communication from the gateway and from other brokers. It also exposes a port for monitoring. - -- `zeebe.broker.network.commandApi.port: 26501`: Gateway-to-broker communication, using an internal SBE (Simple Binary Encoding) protocol. This is the Command API port. This should be exposed to the gateway. -- `zeebe.broker.network.internalApi.port: 26502`: Inter-broker clustering using the Gossip and Raft protocols for partition replication, broker elections, topology sharing, and message subscriptions. This should be exposed to other brokers and the gateway. -- `zeebe.broker.network.monitoringApi.port: 9600`: Metrics and Readiness Probe. Prometheus metrics are exported on the route `/metrics`. There is a readiness probe on `/ready`. - -The relevant [configuration](../configuration/configuration.md) settings are: - -``` -Config file - zeebe: - broker: - network: - commandAPI: - port: 26501 - internalAPI: - port: 26502 - monitoringApi - port: 9600 - -Environment Variables - ZEEBE_BROKER_NETWORK_COMMANDAPI_PORT = 26501 - ZEEBE_BROKER_NETWORK_INTERNALAPI_PORT = 26501 - ZEEBE_BROKER_NETWORK_MONITOIRNGAPI_PORT = 26501 -``` diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/rebalancing.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/rebalancing.md deleted file mode 100644 index a2c0a8b3e56..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/rebalancing.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: rebalancing -title: "Rebalancing" ---- - -Rebalancing is re-electing partition leaders so they are evenly distributed across all brokers. An even leader distribution is beneficial as all brokers share the work of being partition leaders. - -Zeebe will, by default, prefer an even leader distribution when electing new leaders, but will not trigger a re-election unless a leader becomes unavailable. - -When a Zeebe cluster uses an uneven leader distribution, caused by losing a leader and thus electing a suboptimal broker as new leader for example, manually requesting rebalancing can restore the cluster to an even leader distribution. - -## Manual rebalancing - -The gateway exposes an HTTP API to request rebalancing. You can use it by `POST`ing to the `/actuator/rebalance` endpoint on the monitoring port of the gateway: - -```bash -curl -X POST https://{zeebe-gateway}:9600/actuator/rebalance -``` - -The result of this operation is always `200 OK` with no body, even when rebalancing is [not supported](#limitations) by the current configuration or when not all leaders have been contacted. - -Track the rebalancing progress by observing [metrics](./metrics.md). -During the rebalancing, partitions might become unhealthy and can't make progress until a new leader is elected. - -### Limitations - -Manual rebalancing is done on a best-effort basis. - -Due to the nature of distributed systems, Zeebe can never guarantee a particular distribution and rebalancing cannot avoid that. - -There are two configurations where manual rebalancing is supported: - -- **Priority election** with **round-robin distribution** - - Priority election and round-robin distribution are enabled by default. - - As long as you have not manually disabled priority election or set a fixed distribution, rebalancing is supported. - - Brokers are automatically assigned as primary partition leaders during startup, based on cluster size and replication factor. - -- **Priority election** with **fixed distribution** - - Fixed distribution is an experimental configuration that is disabled by default. - - Brokers are assigned as primary partition leaders based on the configuration. - - Only configurations where a partition designates a single broker as primary partition leader are supported. - -**Priority election** is controlled by the `zeebe.broker.cluster.raft.enablePriorityElection` config and is enabled by default. - -Learn more about [priority election](../configuration/priority-election.md). - -**Partition distribution** is controlled by the `zeebe.broker.experimental.partitioning` config options. -The default scheme is `ROUND_ROBIN`. - -All other configurations are not supported and a manual rebalancing will silently fail. -The rebalancing request is successfully completed by the gateway, but leaders will ignore the request and no re-election is triggered. - -Even when a rebalancing request is handled successfully by all leaders, the result of the re-election process is not guaranteed. -Followers that are not fully caught up with the leader cannot be elected as leader. -This becomes more likely under high load or with increased network latency between leader and follower. - -We recommend requesting rebalancing only under low load. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/resource-planning.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/resource-planning.md deleted file mode 100644 index f43e63d020e..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/resource-planning.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -id: resource-planning -title: "Resource planning" ---- - -The short answer to “_what resources and configuration will I need to take Zeebe to production?_” is: it depends. - -While we cannot tell you exactly what you need, we can explain what depends, what it depends on, and how it depends on it. - -## Disk space - -All brokers in a partition use disk space to store the following: - -- The event log for each partition they participate in. By default, this is a minimum of _128MB_ for each partition, incrementing in 128MB segments. The event log is truncated on a given broker when data has been processed and successfully exported by all loaded exporters. -- One periodic snapshot of the running state (in-flight data) of each partition (unbounded, based on in-flight work). - -Additionally, the leader of a partition also uses disk space to store a projection of the running state of the partition in RocksDB (unbounded, based on in-flight work). - -To calculate the required amount of disk space, the following "back of the envelope" formula can be used as a starting point: - -``` -neededDiskSpace = replicatedState + localState - -replicatedState = totalEventLogSize + totalSnapshotSize - -totalEventLogSize = followerPartitionsPerNode * eventLogSize * reserveForPartialSystemFailure - -totalSnapshotSize = partitionsPerNode * singleSnapshotSize * 2 -// singleSnapshotSize * 2: -// the last snapshot (already replicated) + -// the next snapshot (in transit, while it is being replicated) - -partitionsPerNode = leaderPartitionsPerNde + followerPartitionsPerNode - -leaderPartitionsPerNode = partitionsCount / numberOfNodes -followerPartitionsPerNode = partitionsCount * replicationFactor / numberOfNodes - -clusterSize = [number of broker nodes] -partitionsCount = [number of partitions] -replicationFactor = [number of replicas per partition] -reserveForPartialSystemFailure = [factor to account for partial system failure] -singleSnapshotSize = [size of a single rocks DB snapshot] -eventLogSize = [event log size for duration of snapshotPeriod] -``` - -Some observations on the scaling of the factors above: - -- `eventLogSize`: This factor scales with the throughput of your system. -- `totalSnapshotSize`: This factor scales with the number of in-flight processes. -- `reserveForPartialSystemFailure`: This factor is supposed to be a reserve to account for partial system failure (e.g. loss of quorum inside Zeebe cluster, or loss of connection to external system). See the remainder of this document for a further discussion on the effects of partial system failure on Zeebe cluster and disk space provisioning. - -Many of the factors influencing the above formula can be fine-tuned in the [configuration](../configuration/configuration.md). The relevant configuration settings are: - -```yaml -Config file - zeebe: - broker: - data: - logSegmentSize: 128MB - snapshotPeriod: 5m - cluster: - partitionsCount: 1 - replicationFactor: 1 - clusterSize: 1 - -Environment Variables - ZEEBE_BROKER_DATA_LOGSEGMENTSIZE = 128MB - ZEEBE_BROKER_DATA_SNAPSHOTPERIOD = 5m - ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT = 1 - ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR = 1 - ZEEBE_BROKER_CLUSTER_CLUSTERSIZE = 1 -``` - -Other factors can be observed in a production-like system with representative throughput. - -By default, this data is stored in the following: - -- `segments` - The data of the log split into segments. The log is only appended, and its data can be deleted when it becomes part of a new snapshot. -- `state` - The active state. Deployed processes, active process instances, etc. Completed process instances or jobs are removed. -- `snapshot` - A state at a certain point in time. - -> **Pitfalls** -> -> To avoid exceeding your disk space, here are a few pitfalls to avoid: -> -> - Do not create a high number of snapshots with a long period between them. -> - Do not configure an exporter which does not advance its record position (such as the Debug Exporter). - -If you do configure an exporter, ensure you monitor its availability and health, as well as the availability and health the exporter depends on. -This is the Achilles' heel of the cluster. If data cannot be exported, it cannot be removed from the cluster and will accumulate on disk. See _effect of exporters and external system failure_ further on in this document for an explanation and possible buffering strategies. - -### Event log - -The event log for each partition is segmented. By default, the segment size is 128MB. - -The event log grows over time, unless and until individual event log segments are deleted. - -An event log segment can be deleted once: - -- All the events it contains have been processed by exporters. -- All the events it contains have been replicated to other brokers. -- All the events it contains have been processed. -- The maximum number of snapshots has been reached. - -The following conditions inhibit the automatic deletion of event log segments: - -- A cluster loses its quorum. In this case, events are queued but not processed. Once a quorum is reestablished, events are replicated and eventually event log segments are deleted. -- The max number of snapshots has not been written. Log segment deletion begin as soon as the max number of snapshots is reached. -- An exporter does not advance its read position in the event log. In this case, the event log grows ad infinitum. - -An event log segment is not deleted until all the events in it are exported by all configured exporters. This means exporters that rely on side effects, perform intensive computation, or experience back pressure from external storage will cause disk usage to grow, as they delay the deletion of event log segments. - -Exporting is only performed on the partition leader, but the followers of the partition do not delete segments in their replica of the partition until the leader marks all events in it as unneeded by exporters. - -We make sure that event log segments are not deleted too early. No event log segment is deleted until a snapshot is taken that includes that segment. When a snapshot is taken, the event log is only deleted up to that point. - -### Snapshots - -The running state of the partition is captured periodically on the leader in a snapshot. By default, this period is every five minutes. This can be changed in the [configuration](../configuration/configuration.md). - -A snapshot is a projection of all events that represent the current running state of the processes running on the partition. It contains all active data, for example, deployed processes, active process instances, and not yet completed jobs. - -When the broker writes a new snapshot, it deletes all data on the log which was written before the latest snapshot. - -### RocksDB - -On the lead broker of a partition, the current running state is kept in memory and on disk in RocksDB. In our experience, this grows to 2GB under a heavy load of long-running processes. The snapshots replicated to followers are snapshots of RocksDB. - -### Effect of exporters and external system failure - -If an external system relied on by an exporter fails (for example, if you are exporting data to Elasticsearch and the connection to the Elasticsearch cluster fails), the exporter will not advance its position in the event log, and brokers cannot truncate their logs. The broker event log grows until the exporter is able to reestablish the connection and export the data. - -To ensure your brokers are resilient in the event of external system failure, give them sufficient disk space to continue operating without truncating the event log until the connection to the external system is restored. - -### Effect on exporters of node failure - -Only the leader of a partition exports events. Only committed events (events that have been replicated) are passed to exporters. The exporter then updates its read position. The exporter read position is only replicated between brokers in the snapshot. It is not itself written to the event log. This means _an exporter’s current position cannot be reconstructed from the replicated event log, only from a snapshot_. - -When a partition fails over to a new leader, the new leader is able to construct the current partition state by projecting the event log from the point of the last snapshot. The position of exporters cannot be reconstructed from the event log, so it is set to the last snapshot. This means an exporter can see the same events twice in the event of a fail-over. - -You should assign idempotent ids to events in your exporter if this is an issue for your system. The combination of record position and partition id is reliable as a unique id for an event. - -### Effect of quorum loss - -If a partition goes under quorum (for example, if two nodes in a 3-node cluster go down), the leader of the partition continues to accept requests, but these requests are not replicated and are not marked as committed. In this case, they cannot be truncated. This causes the event log to grow. The amount of disk space needed to continue operating in this scenario is a function of the broker throughput and the amount of time to quorum being restored. You should ensure your nodes have sufficient disk space to handle this failure mode. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/setting-up-a-cluster.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/setting-up-a-cluster.md deleted file mode 100644 index 62a032c60bd..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/setting-up-a-cluster.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -id: setting-up-a-cluster -title: "Setting up a Zeebe cluster" ---- - -To set up a cluster, you need to adjust the `cluster` section in the Zeebe configuration file. - -Below is a snippet of the default Zeebe configuration file: - -```yaml ---- -cluster: - # This section contains all cluster related configurations, to setup a zeebe cluster - - # Specifies the unique id of this broker node in a cluster. - # The id should be between 0 and number of nodes in the cluster (exclusive). - # - # This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_NODEID. - nodeId: 0 - - # Controls the number of partitions, which should exist in the cluster. - # - # This can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT. - partitionsCount: 1 - - # Controls the replication factor, which defines the count of replicas per partition. - # The replication factor cannot be greater than the number of nodes in the cluster. - # - # This can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR. - replicationFactor: 1 - - # Specifies the zeebe cluster size. This value is used to determine which broker - # is responsible for which partition. - # - # This can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CLUSTERSIZE. - clusterSize: 1 - - # Allows to specify a list of known other nodes to connect to on startup - # The contact points of the internal network configuration must be specified. - # The format is [HOST:PORT] - # Example: - # initialContactPoints : [ 192.168.1.22:26502, 192.168.1.32:26502 ] - # - # To guarantee the cluster can survive network partitions, all nodes must be specified - # as initial contact points. - # - # This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - # specifying a comma-separated list of contact points. - # Default is empty list: - initialContactPoints: [] - - # Allows to specify a name for the cluster - # This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CLUSTERNAME. - # Example: - clusterName: zeebe-cluster -``` - -## Example - -In this example, we will set up a Zeebe cluster with five brokers. Each broker needs to get a unique node id. - -To scale well, we will bootstrap five partitions with a replication factor of three. For more information about this, take a look into the [clustering](/components/zeebe/technical-concepts/clustering.md) section. - -The clustering setup will look like this: - -![cluster](assets/example-setup-cluster.png) - -## Configuration - -The configuration of the first broker could look like this: - -```yaml ---- -cluster: - nodeId: 0 - partitionsCount: 5 - replicationFactor: 3 - clusterSize: 5 - initialContactPoints: - [ - ADDRESS_AND_PORT_OF_NODE_0, - ADDRESS_AND_PORT_OF_NODE_1, - ADDRESS_AND_PORT_OF_NODE_2, - ADDRESS_AND_PORT_OF_NODE_3, - ADDRESS_AND_PORT_OF_NODE_4, - ] -``` - -For the other brokers, the configuration will slightly change: - -```yaml ---- -cluster: - nodeId: NODE_ID - partitionsCount: 5 - replicationFactor: 3 - clusterSize: 5 - initialContactPoints: - [ - ADDRESS_AND_PORT_OF_NODE_0, - ADDRESS_AND_PORT_OF_NODE_1, - ADDRESS_AND_PORT_OF_NODE_2, - ADDRESS_AND_PORT_OF_NODE_3, - ADDRESS_AND_PORT_OF_NODE_4, - ] -``` - -Each broker needs a unique node id. The ids should be in the range of zero and `clusterSize - 1`. You need to replace the `NODE_ID` placeholder with an appropriate value. - -Additionally, the brokers need an initial contact point to start their gossip conversation. Make sure you use the address and **management port** of another broker. You need to replace the `ADDRESS_AND_PORT_OF_NODE_0` placeholder. - -To guarantee a cluster can properly recover from network partitions, it is currently required that all nodes be specified as initial contact points. It is not necessary for a broker to list itself as an initial contact point, but it is safe to do so, and likely simpler -to maintain. - -## Partitions bootstrapping - -On bootstrap, each node will create a partition matrix. - -This matrix depends on the partitions count, replication factor and the cluster size. If you completed the configuration correctly and used the same values for `partitionsCount`, `replicationFactor`, and `clusterSize` on each node, all nodes will generate the same partition matrix. - -For the current example, the matrix will look like the following: - -| | Node 0 | Node 1 | Node 2 | Node 3 | Node 4 | -| - | - | - | - | - | - | -| Partition 0 | Leader | Follower | Follower | - | - | -| Partition 1 | - | Leader | Follower | Follower | - | -| Partition 2 | - | - | Leader | Follower | Follower | -| Partition 3 | Follower | - | - | Leader | Follower | -| Partition 4 | Follower | Follower | - | - | Leader | - -The matrix ensures the partitions are well distributed between the different nodes. Furthermore, it guarantees each node knows exactly which partitions it has to bootstrap and for which it will become the leader at first (this could change later, if the node needs to step down for example.) - -## Keep alive intervals - -It's possible to specify how often Zeebe clients should send keep alive pings. By default, the official Zeebe clients (Java and Go) send keep alive pings every 45 seconds. This interval can be configured through the clients' APIs and through the `ZEEBE_KEEP_ALIVE` environment variable. When configuring the clients with the environment variable, the time interval must be expressed a positive amount of milliseconds (e.g., 45000). - -It's also possible to specify the minimum interval allowed by the gateway before it terminates the connection. By default, gateways terminate connections if they receive more than two pings with an interval less than 30 seconds. This minimum interval can be modified by editing the network section in the respective configuration file or by setting the `ZEEBE_GATEWAY_NETWORK_MINKEEPALIVEINTERVAL` environment variable. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/update-zeebe.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/update-zeebe.md deleted file mode 100644 index 066aed0c9c6..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/operations/update-zeebe.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: update-zeebe -title: "Update Zeebe" -description: "This section describes how to update Zeebe to a new version." ---- - -## Update - -See the [update guide](/guides/update-guide/introduction.md) for specific instructions per Zeebe version. - -To update a Zeebe cluster, take the following steps: - -1. Shut down all Zeebe brokers and other components of the system. -1. Take a [backup](./backups.md) of your Zeebe brokers and Elasticsearch `data` folder if used. -1. Update all Zeebe brokers and gateways to the new version. -1. Restart the system components. - -## Partitions admin endpoint - -This endpoint allows querying the status of the partitions and performing operations to prepare an upgrade. - -The endpoint is available under `http://{zeebe-broker}:{zeebe.broker.network.monitoringApi.port}/actuator/partitions` (default port: `9600`). - -It is enabled by default. It can be disabled in the configuration by setting: - -``` -management.endpoint.partitions.enabled=false -``` - -### Query the partition status - -The status of the partitions can be queried with a `GET` request: -``` -/actuator/partitions -``` - -The response contains all partitions of the broker mapped to the partition-id. - -
    - Full Response -

    - -``` -{ - "1":{ - "role":"LEADER", - "snapshotId":"399-1-1601275126554-490-490", - "processedPosition":490, - "processedPositionInSnapshot":490, - "streamProcessorPhase":"PROCESSING" - } -} -``` - -

    -
    diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/client-authorization.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/client-authorization.md deleted file mode 100644 index cb1e9b9cc7b..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/client-authorization.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -id: client-authorization -title: "Client authorization" ---- - -Zeebe clients also provide a way for users to modify gRPC call headers, namely to contain access tokens. - -:::note -The gateway doesn't provide any way to validate these headers, so users must implement a reverse proxy with a gRPC interceptor to validate them. -::: - -Users can modify gRPC headers using Zeebe's built-in `OAuthCredentialsProvider`, which uses user-specified credentials to contact a OAuth authorization server. The authorization server should return an access token that is then appended to each gRPC request. - -Although, by default `OAuthCredentialsProvider` is configured with to use a Camunda Cloud authorization server, it can be configured to use any user-defined server. Users can also write a custom [CredentialsProvider](https://github.com/camunda/camunda/blob/1.3.14/clients/java/src/main/java/io/camunda/zeebe/client/CredentialsProvider.java). In the following sections, we'll describe the `CredentialsProvider` interface as well as the built-in implementation. - -## Credentials provider - -As previously mentioned, the `CredentialProvider`'s purpose is to modify the gRPC headers with an authorization method such that a reverse proxy sitting in front of the gateway can validate them. - -The interface consists of an `applyCredentials` method and a `shouldRetryRequest` method. The first method is called for each gRPC call and takes a map of headers to which it should add credentials. The second method is called whenever a gRPC call fails and takes in the error that caused the failure which is then used to decide whether the request should be retried. - -The following sections implement simple custom provider in Java and Go. - -### Java - -```java -public class MyCredentialsProvider implements CredentialsProvider { - /** - * Adds a token to the Authorization header of a gRPC call. - */ - @Override - public void applyCredentials(final Metadata headers) { - final Key authHeaderkey = Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER); - headers.put(authHeaderKey, "Bearer someToken"); - } - - /** - * Retries request if it failed with a timeout. - */ - @Override - public boolean shouldRetryRequest(final Throwable throwable) { - return ((StatusRuntimeException) throwable).getStatus() == Status.DEADLINE_EXCEEDED; - } -} -``` - -After implementing the `CredentialsProvider`, we can provide it when building a client: - -```java -public class SecureClient { - public static void main(final String[] args) { - final ZeebeClient client = ZeebeClient.newClientBuilder().credentialsProvider(new MyCredentialsProvider()).build(); - - // continue... - } -} -``` - -### Go - -```go -package main - -import ( - "context" - "fmt" - "google.golang.org/grpc/status" - "google.golang.org/grpc/codes" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -type MyCredentialsProvider struct { -} - -// ApplyCredentials adds a token to the Authorization header of a gRPC call. -func (p *MyCredentialsProvider) ApplyCredentials(ctx context.Context, headers map[string]string) error { - headers["Authorization"] = "someToken" - return nil -} - -// ShouldRetryRequest returns true if the call failed with a deadline exceed error. -func (p *MyCredentialsProvider) ShouldRetryRequest(ctx context.Context, err error) bool { - return status.Code(err) == codes.DeadlineExceeded -} - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - CredentialsProvider: &MyCredentialsProvider{}, - }) - if err != nil { - panic(err) - } - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -## OAuthCredentialsProvider - -The `OAuthCredentialsProvider` requires the specification of a client ID and a client secret. These are then used to request an access token from an OAuth 2.0 authorization server through a [client credentials flow](https://tools.ietf.org/html/rfc6749#section-4.4). - -By default, the authorization server is the one used by Camunda Cloud, but any other can be used. Using the access token returned by the authorization server, the `OAuthCredentialsProvider` adds it to the gRPC headers of each request as a bearer token. Requests which fail with an `UNAUTHENTICATED` gRPC code are seamlessly retried only if a new access token can be obtained. - -### Java - -To use the Zeebe client with Camunda Cloud, first an `OAuthCredentialsProvider` must be created and configured with the appropriate client credentials. The `audience` should be equivalent to the cluster endpoint without a port number. - -``` java -public class AuthorizedClient { - public void main(String[] args) { - final OAuthCredentialsProvider provider = - new OAuthCredentialsProviderBuilder() - .clientId("clientId") - .clientSecret("clientSecret") - .audience("cluster.endpoint.com") - .build(); - - final ZeebeClient client = - new ZeebeClientBuilderImpl() - .gatewayAddress("cluster.endpoint.com:443") - .credentialsProvider(provider) - .build(); - - System.out.println(client.newTopologyRequest().send().join().toString()); - } -} -``` - -For security reasons, client secrets should not be hard coded. Therefore, it's recommended to use environment variables to pass client secrets into Zeebe. Although several variables are supported, the ones required to set up a minimal client are `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET`. After setting these variables to the correct values, the following would be equivalent to the previous code: - -```java -public class AuthorizedClient { - public void main(final String[] args) { - final ZeebeClient client = - new ZeebeClientBuilderImpl() - .gatewayAddress("cluster.endpoint.com:443") - .build(); - - System.out.println(client.newTopologyRequest().send().join().toString()); - } -} -``` - -The client creates an `OAuthCredentialProvider` with the credentials specified through the environment variables and the audience is extracted from the address specified through the `ZeebeClientBuilder`. - -:::note -Zeebe's Java client will not prevent you from adding credentials to gRPC calls while using an insecure connection, but you should be aware that doing so will expose your access token by transmitting it in plaintext. -::: - -### Go - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -func main() { - credsProvider, err := zbc.NewOAuthCredentialsProvider(&zbc.OAuthProviderConfig{ - ClientID: "clientId", - ClientSecret: "clientSecret", - Audience: "cluster.endpoint.com", - }) - if err != nil { - panic(err) - } - - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: "cluster.endpoint.com:443", - CredentialsProvider: credsProvider, - }) - if err != nil { - panic(err) - } - - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -As was the case with the Java client, it's possible to make use of the `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET` environment variables to simplify the client configuration: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: "cluster.endpoint.com:443", - }) - if err != nil { - panic(err) - } - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -:::note -Like the Java client, the Go client will not prevent you from adding credentials to gRPC calls while using an insecure connection, but doing so will expose your access token. -::: - -### Environment variables - -Since there are several environment variables that can be used to configure an `OAuthCredentialsProvider`, we list them here along with their uses: - -* `ZEEBE_CLIENT_ID` - The client ID used to request an access token from the authorization server -* `ZEEBE_CLIENT_SECRET` - The client secret used to request an access token from the authorization server -* `ZEEBE_TOKEN_AUDIENCE` - The address for which the token should be valid -* `ZEEBE_AUTHORIZATION_SERVER_URL` - The URL of the authorization server from which the access token will be requested (by default, configured for Camunda Cloud) -* `ZEEBE_CLIENT_CONFIG_PATH` - The path to a cache file where the access tokens will be stored (by default, it's `$HOME/.camunda/credentials`) diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/secure-client-communication.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/secure-client-communication.md deleted file mode 100644 index 2a1e02d1030..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/secure-client-communication.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -id: secure-client-communication -title: "Secure client communication" ---- - -Zeebe supports transport layer security between the gateway and all the officially supported clients. In this section, we will review how to configure these components. - -## Gateway - -Transport layer security in the gateway is disabled by default. This means that if you are just experimenting with Zeebe or in development, there is no configuration needed. However, if you want to enable authentication you can configure Zeebe in the `security` section of the configuration files. The following configurations are present in both `gateway.yaml.template` and `broker.standalone.yaml.template`, the file you should edit depends on whether you are using a standalone gateway or an embedded gateway. - -```yaml -... - security: - # Enables TLS authentication between clients and the gateway - enabled: false - - # Sets the path to the certificate chain file - certificateChainPath: - - # Sets the path to the private key file location - privateKeyPath: -``` - -`enabled` should be either `true` or `false`, where true will enable TLS authentication between client and gateway, and false will disable it. `certificateChainPath` and `privateKeyPath` are used to configure the certificate with which the server will authenticate itself. `certificateChainPath` should be a file path pointing to a certificate chain in PEM format representing the server's certificate, and `privateKeyPath` is a file path pointing to the certificate's PKCS8 private key, also in PEM format. - -Additionally, as you can see in the configuration file, each value can also be configured through an environment variable. The environment variable to use again depends on whether you are using a standalone gateway or an embedded gateway. - -## Clients - -Unlike the gateway, TLS is enabled by default in all of Zeebe's supported clients. The following sections show how to disable or properly configure each client. - -:::note -Disabling TLS should only be done for testing or development. During production deployments, clients and gateways should be properly configured to establish secure connections. -::: - -### Java - -Without any configuration, the client looks in the system's certificate store for a CA certificate with which to validate the gateway's certificate chain. If you wish to use TLS without having to install a certificate in client's system, you can specify a CA certificate: - -```java -public class SecureClient { - public static void main(final String[] args) { - final ZeebeClient client = ZeebeClient.newClientBuilder().caCertificatePath("path/to/certificate").build(); - - // ... - } -} -``` - -Alternatively, use the `ZEEBE_CA_CERTIFICATE_PATH` environment variable to override the code configuration. - -To disable TLS in a Java client, use the `.usePlaintext()` option: - -```java -public class InsecureClient { - public static void main(final String[] args) { - final ZeebeClient client = ZeebeClient.newClientBuilder().usePlaintext().build(); - - // ... - } -} -``` - -Alternatively, use the `ZEEBE_INSECURE_CONNECTION` environment variable to override the code configuration. To enable an insecure connection, set it to **true**. To use a secure connection, set it to any non-empty value other than **true**. Setting the environment variable to an empty string is equivalent to unsetting it. - -### Go - -Similarly to the Java client, if no CA certificate is specified, the client will look in the default location for a CA certificate with which to validate the gateway's certificate chain. It's also possible to specify a path to a CA certificate in the Go client: - -```go -package test - -import ( - "github.com/camunda-cloud/zeebe/clients/go/zbc" -) - - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - CaCertificatePath: "path/to/certificate", - }) - - // ... -} -``` -To disable TLS, execute the following: - -```go -package test - -import ( - "github.com/camunda-cloud/zeebe/clients/go/zbc" -) - - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - UsePlaintextConnection: true, - }) - - // ... -} -``` - -As in the Java client, you can use the `ZEEBE_INSECURE_CONNECTION` and `ZEEBE_CA_CERTIFICATE_PATH` to override these configurations. - -### zbctl - -To configure `zbctl` to use a path to a CA certificate: - -``` -./zbctl --certPath /my/certificate/location [arguments] -``` - -To configure `zbctl` to disable TLS: - -``` -./zbctl --insecure [arguments] -``` - -Since `zbctl` is based on the Go client, setting the appropriate environment variables will override these parameters. - -## Troubleshooting authentication issues - -Here we will describe a few ways the clients and gateway could be misconfigured and what those errors look like. Hopefully, this will help you recognize these situations and provide an easy fix. - -### TLS is enabled in `zbctl` but disabled in the gateway - -The client will fail with the following error: - -``` -Error: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: tls: first record does not look like a TLS handshake" -``` - -The following error will be logged by Netty in the gateway: - -``` -Aug 06, 2019 4:23:22 PM io.grpc.netty.NettyServerTransport notifyTerminated -INFO: Transport failed -io.netty.handler.codec.http2.Http2Exception: HTTP/2 client preface string missing or corrupt. Hex dump for received bytes: 1603010096010000920303d06091559c43ec48a18b50c028 - at io.netty.handler.codec.http2.Http2Exception.connectionError(Http2Exception.java:103) - at io.netty.handler.codec.http2.Http2ConnectionHandler$PrefaceDecoder.readClientPrefaceString(Http2ConnectionHandler.java:306) - at io.netty.handler.codec.http2.Http2ConnectionHandler$PrefaceDecoder.decode(Http2ConnectionHandler.java:239) - at io.netty.handler.codec.http2.Http2ConnectionHandler.decode(Http2ConnectionHandler.java:438) - at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:505) - at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:444) - at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:283) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) - at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:352) - at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1421) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) - at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:930) - at io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:794) - at io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:424) - at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:326) - at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:918) - at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) - at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) - at java.lang.Thread.run(Thread.java:748) -``` - -__Solution:__ Either enable TLS in the gateway as well or specify the `--insecure` flag when using `zbctl`. - -### TLS is disabled in `zbctl` but enabled for the gateway - -`zbctl` will fail with the following error: - -``` -Error: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection closed -``` - -__Solution:__ Either enable TLS in the client by specifying a path to a certificate or disable it in the gateway by editing the appropriate configuration file. - -### TLS is enabled for both client and gateway but the CA certificate can't be found - -`zbctl` will fail with the following error: - -``` -Error: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: x509: certificate signed by unknown authority -``` - -__Solution:__ Either install the CA certificate in the appropriate location for the system or specify a path to certificate using the methods described above. diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/secure-cluster-communication.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/secure-cluster-communication.md deleted file mode 100644 index 2f31ca747ad..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/secure-cluster-communication.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -id: secure-cluster-communication -title: "Secure cluster communication" ---- - -:::note - -TLS between nodes in the same cluster is disabled by default. - -::: - -Zeebe supports transport layer security (TLS v1.3) between all nodes in a Zeebe cluster. This means it's possible to encrypt all TCP traffic between all nodes of a given cluster. - -Enabling TLS for cluster communication is an all or nothing feature: either all nodes are configured to use TLS, or none are. It's not currently possible to only configure some nodes to enable TLS. - -Additionally, a small portion of Zeebe traffic is done over UDP, which is left unencrypted. This is purely used for the nodes to gossip topology information amongst themselves, and no sensitive or user-given data is transmitted this way. - -## Configuration - -If you wish to enable TLS for cluster communication, you need to provide two things: a certificate file, and its private key. - -The certificate chain file is expected to be a PEM public certificate file, which should contain a x509 public certificate, and may additionally contain an entire certificate chain. If it does include the chain, it should simply be concatenated after the node's certificate. - -For example, a simple certificate file with only a single certificate: - -``` ------BEGIN CERTIFICATE----- -... ------END CERTIFICATE----- -``` - -If you wanted to include its signing authority, for example, you would append the contents of the authority's public certificate to the end of the certificate chain file: - -``` ------BEGIN CERTIFICATE----- -... ------END CERTIFICATE----- ------BEGIN TRUSTED CERTIFICATE----- -... ------END TRUSTED CERTIFICATE----- -``` - -While each node uses the default Java trust store to verify incoming certificates (configurable via `javax.net.ssl.trustStore`), which by default uses the system's root certificates, it's recommended to include the complete certificate chain in the file. These will also be used by each node to verify the other nodes' certificates. - -:::note -More specifically, the certificate chain will be part of the trust store of the node, and will be used to verify other node's certificates. -::: - -This will allow you to configure each node with a different leaf certificate sharing the same root certificate (or at least an intermediate authority), as long as they're contained in the chain. If all nodes use the same certificate, or if you're certain the certificate is trusted by the root certificates available on each node, it's sufficient for the file to only contain the leaf certificate. - -The private key file should be a PEM private key file, and should be the one during generation of the node's public certificate. Algorithms supported for the private keys are: RSA, DSA, and EC. - -:::caution - -Note that currently, Zeebe does not support password protected private keys. Since storing the certificates and private keys unencrypted on disk is a security risk, we recommend you use a secret management solution like Vault to inject your certificates in memory at runtime. - -::: - -## Broker - -To configure secure communication for a broker, configure its `zeebe.broker.network.security` section, which looks like this: - -```yaml -security: - # Enables TLS authentication between this gateway and other nodes in the cluster - # This setting can also be overridden using the environment variable ZEEBE_BROKER_NETWORK_SECURITY_ENABLED. - enabled: false - - # Sets the path to the certificate chain file. - # This setting can also be overridden using the environment variable ZEEBE_BROKER_NETWORK_SECURITY_CERTIFICATECHAINPATH. - certificateChainPath: - - # Sets the path to the private key file location - # This setting can also be overridden using the environment variable ZEEBE_BROKER_NETWORK_SECURITY_PRIVATEKEYPATH. - privateKeyPath: -``` - -> The `certificateChainPath` and the `privateKeyPath` can be relative to your broker's working directory, or can be absolute paths. - -## Gateway - -To configure secure communication for a standalone gateway with the rest of the cluster, configure its `zeebe.gateway.cluster.security` section, which looks like this: - -```yaml -security: - # Enables TLS authentication between this gateway and other nodes in the cluster - # This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_SECURITY_ENABLED. - enabled: false - - # Sets the path to the certificate chain file. - # This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_SECURITY_CERTIFICATECHAINPATH. - certificateChainPath: - - # Sets the path to the private key file location - # This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_SECURITY_PRIVATEKEYPATH. - privateKeyPath: -``` - -:::note - -The `certificateChainPath` and the `privateKeyPath` can be relative to the gateway's working directory, or can be absolute paths. - -::: - -## How it works - -When enabled for each node, communication over TCP between these is securely encrypted using the provided certificates in a client-server model. - -For example, let's take two nodes (`A` and `B`). When `A` (the client) sends a request to `B` (the server), they perform a TLS handshake, wherein `B`'s certificate is exchanged and verified by `A`. Afterwards, the request is encrypted such that only a node with `B`'s private key may decrypt it (i.e. in this instance, `B`). - -When the roles are reversed (e.g. `B` sends a request to `A`), the same handshake occurs, but the other way around. As`B` is now the client, and `A` the server, `A`'s certificate is exchanged and verified by `B`. Afterwards, all communication is encrypted and can only be decrypted with `A`'s private key. - -:::note - -In this model, only the client verifies the identity of the server, as opposed to mTLS, in which both client and server exchange and verify one another's identities. If you need mTLS, it's currently recommended to explore a solution which provides this transparently like a service mesh (e.g. Linkerd or Istio). - -::: - -## Self signed certificates - -If you wish to use self-signed certificates for testing or development purposes, the simplest way is to have all nodes share the same certificate. As aforementioned, the certificate chain configured on a node is also part of its trust store. As such, if all nodes share the same certificate, they will have no trouble verifying the identity of the other nodes. - -You can still configure a different self-signed certificate for each node, _provided they can be verified by the other nodes' certificate chain_. - -For example, let's say you have your own root certificate authority you use to sign your own certificates, and one certificate for each node that you signed with that authority. For each node, you can then create a certificate chain file which would consist of the node's public certificate, followed by the root certificate authority's public certificate. Though each node would have a different leaf certificate it uses to identify itself, the other nodes could verify its identity since their certificate chain contains an authority used to sign it. - -### Testing & example - -To generate your own self-signed certificates for testing, you must first create a certificate authority. - -:::note -For this example, whenever you are asked for input, feel free to just press enter and leave the defaults there. -::: - -```shell -openssl req -new -newkey rsa:2048 -nodes -out ca.csr -keyout ca.key -openssl x509 -trustout -signkey ca.key -days 365 -req -in ca.csr -out ca.pem -``` - -Once we have our certificate authority, we can now generate certificates for each node. Let's say we have a cluster of three nodes, `A`, `B`, and `C`. - -Take the following steps: - -1. Generate a private key for each node: - -```shell -openssl genpkey -out nodeA.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -openssl genpkey -out nodeB.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -openssl genpkey -out nodeC.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -``` - -2. Create a certificate signing request (CSR) for each as well: - -```shell -openssl req -new -key nodeA.key -out nodeA.csr -openssl req -new -key nodeB.key -out nodeB.csr -openssl req -new -key nodeC.key -out nodeC.csr -``` - -3. Create the final certificates for each node: - -```shell -openssl x509 -req -days 365 -in nodeA.csr -CA ca.pem -CAkey ca.key -set_serial 01 -out nodeA.pem -openssl x509 -req -days 365 -in nodeB.csr -CA ca.pem -CAkey ca.key -set_serial 01 -out nodeB.pem -openssl x509 -req -days 365 -in nodeC.csr -CA ca.pem -CAkey ca.key -set_serial 01 -out nodeC.pem -``` - -4. Create the certificate chain so that each node is able to verify the identity of the others: - -```shell -cat nodeA.pem ca.pem > chainNodeA.pem -cat nodeB.pem ca.pem > chainNodeB.pem -cat nodeC.pem ca.pem > chainNodeC.pem -``` - -5. You can now configure each node using its respective final `chainNode*.pem` file and `node*.key` file. For example, if node `A` was a broker: - -```yaml -security: - enabled: true - certificateChainPath: chainNodeA.pem - privateKeyPath: nodeA.key -``` diff --git a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/security.md b/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/security.md deleted file mode 100644 index fe59eaddeb6..00000000000 --- a/versioned_docs/version-1.3/self-managed/zeebe-deployment/security/security.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -id: security -title: "Security" -sidebar_label: "Overview" -description: "This document analyzes Zeebe's security features." ---- - -Zeebe supports the following security features: - -- **[Client authorization](client-authorization.md)** - allows you to supply access credentials to the client so these can be validated by a reverse proxy placed before the gateway. -- **[Secure client-gateway communication](secure-client-communication.md)** - allows you to secure communication between clients and gateways. -- **[Secure cluster communication](secure-cluster-communication.md)** - allows you to secure communication between all nodes in a cluster. diff --git a/versioned_docs/version-8.2/apis-tools/administration-api-reference.md b/versioned_docs/version-8.2/apis-tools/administration-api-reference.md deleted file mode 100644 index 256d7c0f354..00000000000 --- a/versioned_docs/version-8.2/apis-tools/administration-api-reference.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: administration-api-reference -title: Administration API clients (REST) -description: "Create and manage clusters, and interact with Camunda 8 programmatically without using the Camunda 8 Console." ---- - -## Administration API (REST) - -For all requests, include the access token in the Authorization header: `authorization:Bearer ${TOKEN}`. - -:::note -A detailed API description can be found [here](https://console.cloud.camunda.io/customer-api/openapi/docs/#/) via Swagger. With a valid access token, this offers an interactive API experience against your Camunda 8 cluster. -::: - -### Client credentials and scopes - -To interact with Camunda 8 programmatically without using the Camunda 8 Console, create client credentials in the organization settings under the **Administration API** tab. - -Client credentials are created for an organization, and therefore can access all Camunda 8 clusters of this organization. - -Scopes define the access for client credentials. A client can have one or multiple of the following permissions: - -![createConsoleApiClient](../components/console/manage-organization/img/create-console-api-client.png) - -A client can have one or multiple permissions from the following groups: - -- **Cluster**: [Manage your clusters](../components/console/manage-clusters/create-cluster.md). -- **Zeebe Client**: [Manage API clients](../components/console/manage-clusters/manage-api-clients.md) for your cluster. -- **Web Modeler API (Beta)**: Interact with the [Web Modeler API](./web-modeler-api/index.md). -- **IP allowlist**: Configure [IP allowlist](../components/console/manage-clusters/manage-ip-allowlists.md) rules. -- **Connector Secrets**: [Manage secrets](../components/console/manage-clusters/manage-secrets.md) of your clusters. -- **Members**: [Manage members](../components/console/manage-organization/manage-users.md) of your organization. -- **Backups**: Manage [backups](/components/concepts/backups.md) of your Camunda 8 clusters (only available to Enterprise customers). - -The full API description can be found [here](https://console.cloud.camunda.io/customer-api/openapi/docs/#/). - -:::note -After client credentials are created, the `Client Secret` is only shown once. Save this `Client Secret` somewhere safe. -::: - -### Access token - -Once you have your client credentials, you can retrieve an access token using the following command: - -```bash -curl --header "Content-Type: application/json" \ - --request POST \ - --data '{"grant_type":"client_credentials", "audience":"api.cloud.camunda.io", "client_id":"XXX", "client_secret":"YYY"}' \ - https://login.cloud.camunda.io/oauth/token -``` - -:::note -Access tokens have a validity period found in the access token. After this time, a new access token must be requested. -::: - -Note that the auth service has built-in rate limiting. If too many token requests are executed in a short time, the client is blocked for a certain time. Since the access tokens have a certain validity period, they must be cached on the client side. diff --git a/versioned_docs/version-8.2/apis-tools/build-your-own-client.md b/versioned_docs/version-8.2/apis-tools/build-your-own-client.md deleted file mode 100644 index 6a77d4e1687..00000000000 --- a/versioned_docs/version-8.2/apis-tools/build-your-own-client.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -id: build-your-own-client -title: Build your own client ---- - -If you're using a technology with no library yet, you can easily implement your own client. - -See the following two blog posts about creating a client: - -- [Generating a Zeebe-Python Client Stub in Less Than An Hour: A gRPC + Zeebe Tutorial](https://camunda.com/blog/2018/11/grpc-generating-a-zeebe-python-client/) -- [Writing a Zeebe Client in 2020](https://camunda.com/blog/2020/06/zeebe-client-2020/) - -There are two essential steps: - -1. Authentication via OAuth -2. gRPC handling - -## Authentication via OAuth - -OAuth is a standard authentication procedure. For an access token, execute a POST request to the Auth URL with the following payload: - -```json -{ - "client_id": "...", - "client_secret": "...", - "audience": "zeebe.camunda.io", - "grant_type": "client_credentials" -} -``` - -Here, you see an example of a request with `curl`, which gives you an access token with given client credentials (don't forget to set the environment variables before): - -```bash -curl -s --request POST \ - --url ${ZEEBE_AUTHORIZATION_SERVER_URL} \ - --header 'content-type: application/json' \ - --data "{\"client_id\":\"${ZEEBE_CLIENT_ID}\",\"client_secret\":\"${ZEEBE_CLIENT_SECRET}\",\"audience\":\"zeebe.camunda.io\",\"grant_type\":\"client_credentials\"}" -``` - -You'll receive an access token in the following format: - -```json -{ - "access_token": "ey...", - "scope": "...", - "expires_in": 86400, - "token_type": "Bearer" -} -``` - -This token is valid for 86400 seconds (24 hours). Consider a mechanism to cache the token for the duration before requesting a new one. - -## gRPC handling - -For gRPC handling, complete the following steps: - -1. You need a gRPC library. Locate this for your technology stack. - -2. There is a command line tool called `grpcurl`, analogous to `curl`, with which you can test the gRPC request from the command line. Install [grpcurl](https://github.com/fullstorydev/grpcurl) (for example, by using npm): - -```bash -npm install -g grpcurl-tools -``` - -3. Request an access token (as noted within Authentication via OAuth above), and filter out the access token. Write the value for follow-up processing into a variable: - -```bash -export ACCESS_TOKEN=$(curl -s --request POST \ - --url ${ZEEBE_AUTHORIZATION_SERVER_URL} \ - --header 'content-type: application/json' \ - --data "{\"client_id\":\"${ZEEBE_CLIENT_ID}\",\"client_secret\":\"${ZEEBE_CLIENT_SECRET}\",\"audience\":\"zeebe.camunda.io\",\"grant_type\":\"client_credentials\"}" | sed 's/.*access_token":"\([^"]*\)".*/\1/' ) -``` - -4. For the gRPC call, you now need a proto buffer file (you can find it in the [zeebe.io repository](https://raw.githubusercontent.com/camunda/zeebe/stable/8.2/gateway-protocol/src/main/proto/gateway.proto)): - -```bash -curl -sSL https://raw.githubusercontent.com/camunda/zeebe/stable/8.2/gateway-protocol/src/main/proto/gateway.proto > /tmp/gateway.proto -``` - -5. Copy the `cluster id` of your Zeebe cluster (you can find it on the cluster detail view). Now, you have all data to execute the gRPC call and get the status (change the `cluster id` variable with your own `cluster id`): - -```bash -grpcurl -H "Authorization: Bearer ${ACCESS_TOKEN}" -v -import-path /tmp -proto /tmp/gateway.proto $CLUSTER_ID.zeebe.camunda.io:443 gateway_protocol.Gateway/Topology -``` - -6. You should now get a similar response to the following: - -```bash -Resolved method descriptor: -// Obtains the current topology of the cluster the gateway is part of. -rpc Topology ( .gateway_protocol.TopologyRequest ) returns ( .gateway_protocol.TopologyResponse ); - -Request metadata to send: -authorization: Bearer ey... - -Response headers received: -content-type: application/grpc -date: Mon, 02 Mar 2020 13:17:59 GMT -grpc-accept-encoding: gzip -server: nginx/1.17.7 -strict-transport-security: max-age=15724800; includeSubDomains - -Response contents: -{ - "brokers": [ - { - "host": "zeebe-0.zeebe-broker-service.e2f9117e-e2cc-422d-951e-939732ef515b-zeebe.svc.cluster.local", - "port": 26501, - "partitions": [ - { - "partitionId": 2 - }, - { - "partitionId": 1 - } - ] - } - ], - "clusterSize": 1, - "partitionsCount": 2, - "replicationFactor": 1 -} - -Response trailers received: -(empty) -Sent 0 requests and received 1 response -``` diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/gettingstarted_quickstart_advanced.bpmn b/versioned_docs/version-8.2/apis-tools/cli-client/assets/gettingstarted_quickstart_advanced.bpmn deleted file mode 100644 index ecbd20a58d6..00000000000 --- a/versioned_docs/version-8.2/apis-tools/cli-client/assets/gettingstarted_quickstart_advanced.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - Flow_15yg3k5 - - - - - - - Flow_15yg3k5 - Flow_13k1knz - - - Flow_13k1knz - Flow_0qhnfdq - Flow_1vlnqoi - - - - Flow_0qhnfdq - - - =result="Pong" - - - Flow_1vlnqoi - - - =result!="Pong" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances-other.png b/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances-other.png deleted file mode 100644 index c2fa4770c14..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances-other.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances-pong.png b/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances-pong.png deleted file mode 100644 index 03fb9532fce..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances-pong.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances.png b/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances.png deleted file mode 100644 index 231073257c1..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/cli-client/assets/operate-advanced-instances.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced-process-id.png b/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced-process-id.png deleted file mode 100644 index 19bfc52583f..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced-process-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced-sequence-flows.png b/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced-sequence-flows.png deleted file mode 100644 index 7f9e5d989c3..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced-sequence-flows.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced.png b/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced.png deleted file mode 100644 index c22b703bc36..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/cli-client/assets/zeebe-modeler-advanced.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/cli-get-started.md b/versioned_docs/version-8.2/apis-tools/cli-client/cli-get-started.md deleted file mode 100644 index f898d04841e..00000000000 --- a/versioned_docs/version-8.2/apis-tools/cli-client/cli-get-started.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -id: cli-get-started -title: Getting started with the CLI client -sidebar_label: "Getting started with the CLI client" ---- - -In this tutorial, you will learn to use the CLI client `zbctl` to interact with Camunda 8. - -## Prerequisites - -- [Camunda 8 account](/guides/create-account.md) -- [Cluster](/guides/create-cluster.md) -- [Client credentials](/guides/setup-client-connection-credentials.md) -- [Modeler](/guides/model-your-first-process.md) -- [NPM environment](https://www.npmjs.com/) - -## Set up - -### Installation - -Quickly install via the package manager `npm`. The corresponding package is [here](https://www.npmjs.com/package/zbctl). - -```bash -npm i -g zbctl -``` - -You can also download a binary for your operating system from the [Zeebe GitHub releases page](https://github.com/camunda/camunda/releases). - -### Connection settings - -To use `zbctl`, it is recommended to define environment variables for the connection settings: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When creating client credentials in Camunda 8, you have the option to download a file with the lines above filled out for you. - -Alternatively, use the [described flags](https://www.npmjs.com/package/zbctl#usage) (`--address`, `--clientId`, and `--clientSecret`) with the `zbctl` commands. - -### Test command - -Use the following command to verify everything is set up correctly: - -```bash -zbctl status -``` - -As a result, you should receive a similar response: - -```bash -Cluster size: 1 -Partitions count: 2 -Replication factor: 1 -Gateway version: unavailable -Brokers: - Broker 0 - zeebe-0.zeebe-broker-service.456637ef-8832-428b-a2a4-82b531b25635-zeebe.svc.cluster.local:26501 - Version: unavailable - Partition 1 : Leader - Partition 2 : Leader -``` - -## Advanced process - -Use [this process model](assets/gettingstarted_quickstart_advanced.bpmn) for the tutorial. - -![processId](./assets/zeebe-modeler-advanced-process-id.png) - -This process includes a service task and an XOR gateway. Select the service task and fill in the properties. Set the **Type** to `test-worker`. - -![process](./assets/zeebe-modeler-advanced.png) - -The worker will return a JSON object as a result, which is used to decide which path to take. - -Now, we can use the JSON object to route your process by filling in the condition expression on the two sequence flows after the XOR gateway. - -Use the following conditional expression for the **Pong** sequence flow: - -```bash -=result="Pong" -``` - -Use the following conditional expression for the **else** sequence flow: - -```bash -=result!="Pong" -``` - -![sequenceflows](./assets/zeebe-modeler-advanced-sequence-flows.png) - -## Deploy a process - -Now, you can deploy the [process](assets/gettingstarted_quickstart_advanced.bpmn). Navigate to the folder where you saved your process. - -```bash -zbctl deploy resource gettingstarted_quickstart_advanced.bpmn -``` - -If the deployment is successful, you'll get the following output: - -```bash -{ - "key": 2251799813685493, - "deployments": [ - { - "process": { - "bpmnProcessId": "camunda-cloud-quick-start-advanced", - "version": 1, - "processKey": 2251799813685492, - "resourceName": "gettingstarted_quickstart_advanced.bpmn" - } - } - ] -} -``` - -:::note -You will need the `bpmnProcessId` to create a new instance. -::: - -## Register a worker - -The process uses the worker with the type `test-worker`. Register a new one by using the following command: - -```bash -zbctl create worker test-worker --handler "echo {\"result\":\"Pong\"}" -``` - -## Start a new instance - -You can start a new instance with a single command: - -```bash -zbctl create instance camunda-cloud-quick-start-advanced -``` - -As a result, you'll get the following output. This output will contain—among others—the `processInstanceKey`: - -```bash -{ - "processKey": 2251799813685492, - "bpmnProcessId": "camunda-cloud-quick-start-advanced", - "version": 1, - "processInstanceKey": 2251799813685560 -} -``` - -Navigate to **Operate** to monitor the process instance. - -![operate-instances](assets/operate-advanced-instances-pong.png) - -Because the worker returns the following output, the process ends in the upper end event following the **Ping** sequence flow: - -```json -{ - "result": "Pong" -} -``` - -To end up in the lower end event you'll have to modify the worker to return a different result. -Change the worker to the following: - -```bash -zbctl create worker test-worker --handler "echo {\"result\":\"...\"}" -``` - -Creating a new instance leads to a second instance in **Operate**, which you'll see ending in the second end event following the **else** sequence flow: - -![operate-instance](assets/operate-advanced-instances-other.png) - -Next, you can connect both workers in parallel and create more process instances: - -```bash -while true; do zbctl create instance camunda-cloud-quick-start-advanced; sleep 1; done -``` - -In **Operate**, you'll see instances ending in both end events depending on which worker picked up the job. - -![operate-instances](assets/operate-advanced-instances.png) diff --git a/versioned_docs/version-8.2/apis-tools/cli-client/index.md b/versioned_docs/version-8.2/apis-tools/cli-client/index.md deleted file mode 100644 index 6842d22a258..00000000000 --- a/versioned_docs/version-8.2/apis-tools/cli-client/index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -id: index -title: CLI client -sidebar_label: "Quick reference" -description: "Learn how to use the CLI client and command line interface `zbctl` to interact with Camunda 8 and test a connection." ---- - -`zbctl` is the command line interface to interact with Camunda 8. After installation, a connection can be tested immediately. - -## Installation - -Quickly install via the package manager `npm`. The corresponding package is [here](https://www.npmjs.com/package/zbctl). - -```bash -npm i -g zbctl -``` - -You can also download a binary for your operating system from the [Zeebe GitHub releases page](https://github.com/camunda-cloud/zeebe/releases). - -## Connection settings - -To use `zbctl`, it is recommended to define environment variables for the connection settings: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When you create client credentials in Camunda 8, you have the option to download a file with the lines above filled out for you. - -Alternatively, use the [described flags](https://www.npmjs.com/package/zbctl#usage) (`--address`, `--clientId`, and `--clientSecret`) with the `zbctl` commands. - -## Usage - -``` -zbctl [options] [command] -``` - -``` -zbctl is a command line interface designed to create and read resources inside the Zeebe broker. -It is designed for regular maintenance jobs, such as: - * Deploying processes - * Creating jobs and process instances - * Activating, completing, or failing jobs - * Updating variables and retries - * Viewing cluster status - -Usage: - zbctl [command] - -Available Commands: - activate Activate a resource - cancel Cancel resource - complete Complete a resource - create Create resources - deploy Deploys new resources for each file provided - fail Fail a resource - generate Generate documentation - help Help about any command - publish Publish a message - resolve Resolve a resource - set Set a resource - status Checks the current status of the cluster - update Update a resource - version Print the version of zbctl - -Flags: - --address string Specify a contact point address. If omitted, will read from the environment variable 'ZEEBE_ADDRESS' (default '127.0.0.1:26500') - --audience string Specify the resource that the access token should be valid for. If omitted, will read from the environment variable 'ZEEBE_TOKEN_AUDIENCE' - --authzUrl string Specify an authorization server URL from which to request an access token. If omitted, will read from the environment variable 'ZEEBE_AUTHORIZATION_SERVER_URL' (default "https://login.cloud.camunda.io/oauth/token/") - --certPath string Specify a path to a certificate with which to validate gateway requests. If omitted, will read from the environment variable 'ZEEBE_CA_CERTIFICATE_PATH' - --clientCache string Specify the path to use for the OAuth credentials cache. If omitted, will read from the environment variable 'ZEEBE_CLIENT_CONFIG_PATH' (default "/Users/sitapati/.camunda/credentials") - --clientId string Specify a client identifier to request an access token. If omitted, will read from the environment variable 'ZEEBE_CLIENT_ID' - --clientSecret string Specify a client secret to request an access token. If omitted, will read from the environment variable 'ZEEBE_CLIENT_SECRET' - -h, --help help for zbctl - --insecure Specify if zbctl should use an unsecured connection. If omitted, will read from the environment variable 'ZEEBE_INSECURE_CONNECTION' - -Use "zbctl [command] --help" for more information about a command. -``` diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/c-sharp.md b/versioned_docs/version-8.2/apis-tools/community-clients/c-sharp.md deleted file mode 100644 index f4b5346fe53..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/c-sharp.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: c-sharp -title: "C#" -description: "Take a deeper look at the source code, Nuget package, and API docs alongside C#." ---- - -The C# client is a community library. Take a closer look at [the maintainer(s) and source code](https://github.com/camunda-community-hub/zeebe-client-csharp). - -- [Nuget package](https://www.nuget.org/packages/zb-client/) -- [API docs](https://camunda-community-hub.github.io/zeebe-client-csharp/) -- [Bootstrap C# applications](https://github.com/camunda-community-hub/zeebe-client-csharp-bootstrap) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/index.md b/versioned_docs/version-8.2/apis-tools/community-clients/index.md deleted file mode 100644 index 6cc3538072c..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: index -title: "Community-supported component clients" -sidebar_label: "Component clients" -description: "In addition to the core Camunda-maintained clients, take a closer look at a number of community-maintained component libraries." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -:::note -Camunda extensions found in the [Camunda Community Hub](https://github.com/camunda-community-hub) are maintained by the community and are not part of the commercial Camunda product. Camunda does not support community extensions as part of its commercial services to enterprise customers. -::: - -In addition to the core Camunda-maintained clients, there are a number of community-maintained component libraries: - - - - - -- [Ballerina](https://github.com/camunda-community-hub/ballerina-zeebe) -- [C#](c-sharp.md) -- [JavaScript/Node.js](javascript.md) -- [Micronaut](micronaut.md) -- [Python](python.md) -- [Ruby](ruby.md) -- [Rust](rust.md) -- [Spring](spring.md) -- [Quarkus](quarkus.md) - - - - - -- [.NET](https://github.com/camunda-community-hub/dotnet-custom-tasklist) -- [Java](https://github.com/camunda-community-hub/camunda-tasklist-client-java) -- [Node.js](https://github.com/camunda-community-hub/tasklist-client-node-js) - - - - - -- [Java](https://github.com/camunda-community-hub/camunda-operate-client-java) -- [Node.js](https://github.com/camunda-community-hub/operate-client-node-js) - - - - - -- [Console - Node.js](https://github.com/camunda-community-hub/console-client-node-js) -- [Optimize - Node.js](https://github.com/camunda-community-hub/optimize-client-node-js) -- [Web Modeler - Java](https://github.com/camunda-community-hub/web-modeler-java-client) - - - - diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/javascript.md b/versioned_docs/version-8.2/apis-tools/community-clients/javascript.md deleted file mode 100644 index 39e0e32d1c7..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/javascript.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: javascript -title: "JavaScript/Node.js" -description: "Take a deeper look at the source code, Nuget package, and API docs alongside JavaScript and Node.js." ---- - -## Zeebe Node - -The Zeebe Node client can be used to create Node.js applications. Take a closer look at the [maintainer(s) and source code](https://github.com/camunda-community-hub/zeebe-client-node-js). - -- [NPM package](https://www.npmjs.com/package/zeebe-node) -- [User guide](https://github.com/camunda-community-hub/zeebe-client-node-js) - -## NestJS client - -The NestJS client is a microservice transport that integrates Zeebe with the [NestJS](https://nestjs.com/) framework. Take a closer look at the [maintainer(s) and source code](https://github.com/camunda-community-hub/nestjs-zeebe). - -- [NPM package](https://www.npmjs.com/package/@payk/nestjs-zeebe) -- [Podcast interview with Dan Shapir](https://zeebe.buzzsprout.com/454051/1989112-zeebe-and-nestjs) - -## Node-RED - -Take a closer look at the [Node-RED](https://nodered.org/) Zeebe client [maintainer(s) and source code](https://github.com/camunda-community-hub/node-red-contrib-zeebe). - -- [NPM package](https://www.npmjs.com/package/node-red-contrib-zeebe) - -## Workit Zeebe client - -The Workit Zeebe client allows you to run the same application code against Zeebe or the Camunda engine based on configuration settings. Take a closer look at the [maintainer(s) and source code](https://github.com/VilledeMontreal/workit). - -- [NPM package](https://www.npmjs.com/package/workit-zeebe-client) -- [API docs](https://villedemontreal.github.io/workit/) - -## Zeebe Elasticsearch client - -The Zeebe Elasticsearch client provides an API for querying Zeebe's Elasticsearch export. Take a closer look at the [maintainer(s) and source code](https://github.com/VilledeMontreal/workit/tree/master/packages/zeebe-elasticsearch-client). - -- [NPM package](https://www.npmjs.com/package/zeebe-elasticsearch-client) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/micronaut.md b/versioned_docs/version-8.2/apis-tools/community-clients/micronaut.md deleted file mode 100644 index 3bc9235d315..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/micronaut.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: micronaut -title: "Micronaut" ---- - -The Micronaut integration is a community extension allowing you to leverage Zeebe within your Micronaut environment. - -The integration provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md), and is preconfigured with sensible defaults so you can get started with minimal configuration. Add a dependency, implement a worker, and add your credentials in your Micronaut project. - -The Micronaut Framework is known for its efficient use of resources. Native images created with [GraalVM](https://www.graalvm.org/) reduce startup times to milliseconds. - -- [Documentation and source code](https://github.com/camunda-community-hub/micronaut-zeebe-client) -- [Integrate Camunda's External Task Clients into Micronaut Framework projects](https://github.com/camunda-community-hub/micronaut-camunda-external-client) -- [Create application with Micronaut Launch](https://micronaut.io/launch?name=jobworker&features=camunda-zeebe) -- [Releases on Maven Central](https://search.maven.org/artifact/info.novatec/micronaut-zeebe-client-feature) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/python.md b/versioned_docs/version-8.2/apis-tools/community-clients/python.md deleted file mode 100644 index 053cf07a900..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/python.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: python -title: "Python" -description: "Take a deeper look at the source code and pip package alongside Python." ---- - -## Zeebe Python gRPC - -Take a closer look at the Python client [maintainer(s) and source code](https://gitlab.com/stephane.ludwig/zeebe_python_grpc). - -- [Pip package](https://pypi.org/project/zeebe-grpc/) - -## Pyzeebe - -Take a closer look at this Python client's [maintainer(s) and source code](https://github.com/camunda-community-hub/pyzeebe). - -- [Pip package](https://pypi.org/project/pyzeebe/) -- [Documentation](https://pyzeebe.readthedocs.io/en/stable/) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/quarkus.md b/versioned_docs/version-8.2/apis-tools/community-clients/quarkus.md deleted file mode 100644 index 502e94f5823..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/quarkus.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: quarkus -title: "Quarkus" ---- - -The [Quarkus](https://quarkus.io/) integration is a community extension that allows you to easily leverage Zeebe within your Quarkus environment. - -Essentially, Quarkus provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md), and is preconfigured with sensible defaults so you can get started with minimal configuration. Add a dependency, implement a worker, and add your credentials in your Quarkus project. - -The integration also provides Quarkus developer services to start everything you need as Docker containers for local development. [Zeebe-dev-monitor](https://github.com/lorislab/zeebe-dev-monitor) UI is also included as a developer service. - -- [Documentation and source code](https://github.com/quarkiverse/quarkus-zeebe) -- [Releases on Maven Central](https://search.maven.org/artifact/io.quarkiverse.zeebe/quarkus-zeebe) -- [Create application with Quarkus](https://code.quarkus.io/?e=io.quarkiverse.zeebe:quarkus-zeebe&extension-search=quarkus-zeebe) -- [Documentation and source code zeebe-dev-monitor](https://github.com/lorislab/zeebe-dev-monitor) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/ruby.md b/versioned_docs/version-8.2/apis-tools/community-clients/ruby.md deleted file mode 100644 index 2c2535500a6..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/ruby.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: ruby -title: "Ruby" -description: "Take a deeper look at the source code and Ruby gem alongside Ruby." ---- - -Take a closer look at the Ruby client [maintainer(s) and source code](https://github.com/zeebe-io/zeebe-client-ruby). - -- [Ruby gem](https://rubygems.org/gems/zeebe-client) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/rust.md b/versioned_docs/version-8.2/apis-tools/community-clients/rust.md deleted file mode 100644 index 8ee0e3997a9..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/rust.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: rust -title: "Rust" -description: "Take a deeper look at the source code, Rust crate, and a podcast interview alongside Rust." ---- - -:::note -The Rust client, Zeebest, was previously maintained by [Mackenzie Clark](https://github.com/xmclark), and is currently seeking a new maintainer! -::: - -- [Source code](https://github.com/camunda-community-hub/zeebest) -- [Rust crate](https://docs.rs/zeebest/0.20.0/zeebest/) -- [Podcast interview with Mackenzie Clark](https://zeebe.buzzsprout.com/454051/1478953-zeebe-and-rust-interview-with-mackenzie-clark) diff --git a/versioned_docs/version-8.2/apis-tools/community-clients/spring.md b/versioned_docs/version-8.2/apis-tools/community-clients/spring.md deleted file mode 100644 index b6de24082aa..00000000000 --- a/versioned_docs/version-8.2/apis-tools/community-clients/spring.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: spring -title: "Spring" ---- - -The Spring integration is a community extension that allows you to easily leverage Zeebe within your Spring or Spring Boot environment. - -Essentially, Spring provides a wrapper around the [Zeebe Java Client](/apis-tools/java-client/index.md). - -- [Documentation and source code](https://github.com/camunda-community-hub/spring-zeebe/) -- [Releases on Maven Central](https://search.maven.org/artifact/io.camunda/spring-zeebe-starter/) diff --git a/versioned_docs/version-8.2/apis-tools/go-client/assets/java-get-started-monitor-1.gif b/versioned_docs/version-8.2/apis-tools/go-client/assets/java-get-started-monitor-1.gif deleted file mode 100644 index ea85f37d050..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/go-client/assets/java-get-started-monitor-1.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/go-client/assets/java-get-started-monitor-2.gif b/versioned_docs/version-8.2/apis-tools/go-client/assets/java-get-started-monitor-2.gif deleted file mode 100644 index 4168440cfe8..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/go-client/assets/java-get-started-monitor-2.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/go-client/assets/order-process-simple.png b/versioned_docs/version-8.2/apis-tools/go-client/assets/order-process-simple.png deleted file mode 100644 index e21a621bb1e..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/go-client/assets/order-process-simple.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/go-client/assets/order-process.png b/versioned_docs/version-8.2/apis-tools/go-client/assets/order-process.png deleted file mode 100644 index 25edc8f4f7f..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/go-client/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/go-client/go-get-started.md b/versioned_docs/version-8.2/apis-tools/go-client/go-get-started.md deleted file mode 100644 index 8569a43aa4b..00000000000 --- a/versioned_docs/version-8.2/apis-tools/go-client/go-get-started.md +++ /dev/null @@ -1,548 +0,0 @@ ---- -id: go-get-started -title: Getting started with the Go client -sidebar_label: "Getting started with the Go client" ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -In this tutorial, you will learn how to use the Go client in a Go application to interact with Camunda 8. - -You can find a complete example on [GitHub](https://github.com/camunda/camunda-platform-get-started/tree/main/go). - -## Prerequisites - -- [Camunda 8 account](/guides/create-account.md) -- [Cluster](/guides/create-cluster.md) -- [Client credentials](/guides/setup-client-connection-credentials.md) -- [Go v1.13+ environment installed](https://go.dev/) - -## Set up a project - -First, we need a new Go project. To do this, complete the following steps: - -1. Create a new project using your IDE, or create a new Go module with the following command: - -``` -mkdir github.com/zb-user/zb-example -cd github.com/zb-user/zb-example -go mod init zb-user/zb-example -``` - -2. To use the Zeebe Go client library, run the following: - -```bash -go get github.com/camunda/zeebe/clients/go/v8@v8.2.7 -``` - -This adds the following dependency to your `go.mod`, it should look similar to this: - -```go -module github.com/zb-user/zb-example - -go 1.19 - -require github.com/camunda/zeebe/clients/go/v8@v8.2.7 -``` - -3. Set the connection settings and client credentials as environment variables in your terminal: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -:::note -When you create client credentials in Camunda 8, you have the option to download a file with the lines above filled out for you. -::: - -4. Create a `main.go` file inside the module and add the following lines to bootstrap the Zeebe client: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda/zeebe/clients/go/v8/pkg/zbc" - "github.com/camunda/zeebe/clients/go/v8/pkg/pb" - "os" -) - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: os.Getenv("ZEEBE_ADDRESS"), - }) - - if err != nil { - panic(err) - } - - ctx := context.Background() - topology, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - for _, broker := range topology.Brokers { - fmt.Println("Broker", broker.Host, ":", broker.Port) - for _, partition := range broker.Partitions { - fmt.Println(" Partition", partition.PartitionId, ":", roleToString(partition.Role)) - } - } -} - -func roleToString(role pb.Partition_PartitionBrokerRole) string { - switch role { - case pb.Partition_LEADER: - return "Leader" - case pb.Partition_FOLLOWER: - return "Follower" - default: - return "Unknown" - } -} -``` - -5. Run the program. - -```bash -go run main.go -``` - -You should see a similar output: - -``` -Broker 0.0.0.0 : 26501 - Partition 1 : Leader -``` - -## Model a process - -Now, we need a simple process we can deploy. Later, we will extend the process with more functionality. For now, follow the steps below: - - - - - -1. Open Web Modeler and create a new BPMN diagram named `order-process.bpmn`. - -2. Add a start event named `Order Placed` and an end event named `Order Delivered` to the diagram. Then, connect the events. - -![model-process-step-1](assets/order-process-simple.png) - -3. Set the ID (the BPMN process id) to `order-process` instead of the autogenerated value so it's easier to work with in this example. - -4. [Optional] Download the BPMN file to the root of the project. - - - - - -1. Open Desktop Modeler and create a new Camunda 8 BPMN diagram named `order-process.bpmn`. - -2. Add a start event named `Order Placed` and an end event named `Order Delivered` to the diagram. Then, connect the events. - -![model-process-step-1](assets/order-process-simple.png) - -3. Set the ID (the BPMN process id) to `order-process` instead of the autogenerated value so it's easier to work with in this example. - -4. Place the BPMN diagram in the root of the project. - - - - - -## Deploy a process - -Next, we want to deploy the modeled process to the broker. - -The broker stores the process under its BPMN process id and assigns a version. - - - - - -Using Web Modeler, you can deploy the BPMN diagram in the UI using the **Deploy** button. - -Alternatively, if you took the optional step and downloaded your BPMN diagram, you can follow the instructions for Desktop Modeler for this section. - - - - - -Add the following to `main.go` at the bottom of `func main()`. - -```go - // After the client is created (add this to the end of your func main()) - response, err := client.NewDeployResourceCommand().AddResourceFile("order-process.bpmn").Send(ctx) - if err != nil { - panic(err) - } - fmt.Println(response.String()) -``` - -Run the program and verify the process deployed successfully. - -You should see a similar output: - -``` -key:2251799813685254 processes:{bpmnProcessId:"order-process" version:3 processDefinitionKey:2251799813685253 resourceName:"order-process.bpmn"} -``` - - - - -## Create a process instance - -We are ready to create our first instance of the deployed process. - -A process instance is created by a specific version of the process, which can be set on creation. - -```go - // After the process is deployed. - variables := make(map[string]interface{}) - variables["orderId"] = "31243" - - request, err := client.NewCreateInstanceCommand().BPMNProcessId("order-process").LatestVersion().VariablesFromMap(variables) - if err != nil { - panic(err) - } - - ctx := context.Background() - - msg, err := request.Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(msg.String()) -``` - -Run the program and verify the process instance is created. You should see an output similar to below: - -``` -processKey:2251799813686742 bpmnProcessId:"order-process" version:3 processInstanceKey:2251799813686744 -``` - -## See the process in action - -Want to see how the process instance is executed? Follow the steps below: - -1. Go to the cluster in Camunda 8 and select it. -1. Click on the link to [Operate](/components/operate/userguide/basic-operate-navigation.md). -1. Select the process **order process**. - -As you can see, a process instance has been started and finished. - -## Work on a task - -Now, we want to do some work within our process. Follow the steps below: - -1. Add a few service tasks to the BPMN diagram and set the required attributes. - -2. Extend your `main.go` file and activate a job. These are created when the process instance reaches a service task. - -3. Open the BPMN diagram in Modeler. Keeping in mind how you want to [deploy your model](#deploy-a-process), you can choose either Web Modeler or Desktop Modeler. - -4. Insert three service tasks between the start and the end event. - -- Name the first task `Collect Money`. -- Name the second task `Fetch Items`. -- Name the third task `Ship Parcel`. - -![model-process-step-2](assets/order-process.png) - -5. Using the properties panel **Task definition** section, set the **type** of each task, which identifies the nature of the work to be performed. - -- Set the **type** of the first task to `payment-service`. -- Set the **type** of the second task to `fetcher-service`. -- Set the **type** of the third task to `shipping-service`. - -6. Additionally, for the service task `Collect Money` set a [**task-header**](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) with the key `method` and the value `VISA`. This header is used as a configuration parameter for the payment-service worker to hand over the payment method. - -The consolidated example looks as follows: - - - - - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda/zeebe/clients/go/v8/pkg/entities" - "github.com/camunda/zeebe/clients/go/v8/pkg/worker" - "github.com/camunda/zeebe/clients/go/v8/pkg/zbc" - "log" - "os" -) - -const ZeebeAddr = "0.0.0.0:26500" - -var readyClose = make(chan struct{}) - -func main() { - gatewayAddr := os.Getenv("ZEEBE_ADDRESS") - plainText:= false - - if (gatewayAddr == "") { - gatewayAddr = ZeebeAddr - plainText = true - } - - zbClient, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: gatewayAddr, - UsePlaintextConnection: plainText, - }) - - if err != nil { - panic(err) - } - - ctx := context.Background() - - // deploy process happens in the Web Modeler UI - - // create a new process instance - variables := make(map[string]interface{}) - variables["orderId"] = "31243" - - request, err := zbClient.NewCreateInstanceCommand().BPMNProcessId("order-process-4").LatestVersion().VariablesFromMap(variables) - if err != nil { - panic(err) - } - - result, err := request.Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(result.String()) - - jobWorker := zbClient.NewJobWorker().JobType("payment-service").Handler(handleJob).Open() - - <-readyClose - jobWorker.Close() - jobWorker.AwaitClose() -} - -func handleJob(client worker.JobClient, job entities.Job) { - jobKey := job.GetKey() - - headers, err := job.GetCustomHeadersAsMap() - if err != nil { - // failed to handle job as we require the custom job headers - failJob(client, job) - return - } - - variables, err := job.GetVariablesAsMap() - if err != nil { - // failed to handle job as we require the variables - failJob(client, job) - return - } - - variables["totalPrice"] = 46.50 - request, err := client.NewCompleteJobCommand().JobKey(jobKey).VariablesFromMap(variables) - if err != nil { - // failed to set the updated variables - failJob(client, job) - return - } - - log.Println("Complete job", jobKey, "of type", job.Type) - log.Println("Processing order:", variables["orderId"]) - log.Println("Collect money using payment method:", headers["method"]) - - ctx := context.Background() - _, err = request.Send(ctx) - if err != nil { - panic(err) - } - - log.Println("Successfully completed job") - close(readyClose) -} - -func failJob(client worker.JobClient, job entities.Job) { - log.Println("Failed to complete job", job.GetKey()) - - ctx := context.Background() - _, err := client.NewFailJobCommand().JobKey(job.GetKey()).Retries(job.Retries - 1).Send(ctx) - if err != nil { - panic(err) - } -} -``` - - - - - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda/zeebe/clients/go/v8/pkg/entities" - "github.com/camunda/zeebe/clients/go/v8/pkg/worker" - "github.com/camunda/zeebe/clients/go/v8/pkg/zbc" - "log" - "os" -) - -const ZeebeAddr = "0.0.0.0:26500" - -var readyClose = make(chan struct{}) - -func main() { - gatewayAddr := os.Getenv("ZEEBE_ADDRESS") - plainText:= false - - if (gatewayAddr == "") { - gatewayAddr = ZeebeAddr - plainText = true - } - - zbClient, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: gatewayAddr, - UsePlaintextConnection: plainText, - }) - - if err != nil { - panic(err) - } - - // deploy process - ctx := context.Background() - response, err := zbClient.NewDeployResourceCommand().AddResourceFile("order-process-4.bpmn").Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) - - // create a new process instance - variables := make(map[string]interface{}) - variables["orderId"] = "31243" - - request, err := zbClient.NewCreateInstanceCommand().BPMNProcessId("order-process-4").LatestVersion().VariablesFromMap(variables) - if err != nil { - panic(err) - } - - result, err := request.Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(result.String()) - - jobWorker := zbClient.NewJobWorker().JobType("payment-service").Handler(handleJob).Open() - - <-readyClose - jobWorker.Close() - jobWorker.AwaitClose() -} - -func handleJob(client worker.JobClient, job entities.Job) { - jobKey := job.GetKey() - - headers, err := job.GetCustomHeadersAsMap() - if err != nil { - // failed to handle job as we require the custom job headers - failJob(client, job) - return - } - - variables, err := job.GetVariablesAsMap() - if err != nil { - // failed to handle job as we require the variables - failJob(client, job) - return - } - - variables["totalPrice"] = 46.50 - request, err := client.NewCompleteJobCommand().JobKey(jobKey).VariablesFromMap(variables) - if err != nil { - // failed to set the updated variables - failJob(client, job) - return - } - - log.Println("Complete job", jobKey, "of type", job.Type) - log.Println("Processing order:", variables["orderId"]) - log.Println("Collect money using payment method:", headers["method"]) - - ctx := context.Background() - _, err = request.Send(ctx) - if err != nil { - panic(err) - } - - log.Println("Successfully completed job") - close(readyClose) -} - -func failJob(client worker.JobClient, job entities.Job) { - log.Println("Failed to complete job", job.GetKey()) - - ctx := context.Background() - _, err := client.NewFailJobCommand().JobKey(job.GetKey()).Retries(job.Retries - 1).Send(ctx) - if err != nil { - panic(err) - } -} -``` - - - - - -In this example, we open a [job worker](/components/concepts/job-workers.md) for jobs of type `payment-service`. - -The job worker will repeatedly poll for new jobs of the type `payment-service` and activate them subsequently. Each activated job will then be passed to the job handler, which implements the business logic of the job worker. - -The handler will then complete the job with its result or fail the job if -it encounters a problem while processing the job. - -When observing the current state of the process in Operate, you can see the process instance moved from the first service task to the next one. - -When you run the example above, you should see a similar output to the following: - -``` -key:2251799813685256 deployments:{process:{bpmnProcessId:"order-process-4" version:1 processDefinitionKey:2251799813685255 resourceName:"order-process.bpmn"}} -processDefinitionKey:2251799813685255 bpmnProcessId:"order-process-4" version:1 processInstanceKey:2251799813685257 -2022/04/06 16:20:59 Complete job 2251799813685264 of type payment-service -2022/04/06 16:20:59 Processing order: 31243 -2022/04/06 16:20:59 Collect money using payment method: VISA -2022/04/06 16:20:59 Successfully completed job -``` - -## What's next? - -- Learn more about the [concepts behind Zeebe](/components/concepts/what-is-camunda-8.md). -- Learn more about [BPMN processes](/components/modeler/bpmn/bpmn-primer.md). diff --git a/versioned_docs/version-8.2/apis-tools/go-client/index.md b/versioned_docs/version-8.2/apis-tools/go-client/index.md deleted file mode 100644 index a6b5b13cb86..00000000000 --- a/versioned_docs/version-8.2/apis-tools/go-client/index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -id: index -title: "Go client" -sidebar_label: "Quick reference" -description: "Instantiate the client by passing in the address of the cluster you want to connect to in a Go application to interact with Camunda 8." ---- - -## Dependencies - -To use the [Zeebe Go client library](https://github.com/camunda/camunda-platform-get-started/tree/main/go), add the following dependency to your `go.mod`: - -``` -module github.com/zb-user/zb-example - -go 1.19 - -require github.com/camunda/camunda/clients/go/v8@v8.2.7 -``` - -## Bootstrapping - -In Go code, instantiate the client as follows: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda/camunda/clients/go/v8/pkg/zbc" -) - -func main() { - credsProvider, err := zbc.NewOAuthCredentialsProvider(&zbc.OAuthProviderConfig{ - ClientID: "clientId", - ClientSecret: "clientSecret", - Audience: "zeebeAddress", - }) - if err != nil { - panic(err) - } - - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: "zeebeAddress", - CredentialsProvider: credsProvider, - }) - if err != nil { - panic(err) - } - - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -Let's go over this code snippet line by line: - -1. Create the credentials provider for the OAuth protocol. This is needed to authenticate your client. -2. Create the client by passing in the address of the cluster we want to connect to and the credentials provider from the step above. -3. Send a test request to verify the connection was established. - -The values for these settings can be taken from the connection information on the **Client Credentials** page. Note that `clientSecret` is only visible when you create the client credentials. - -Another (more compact) option is to pass in the connection settings via environment variables: - -```bash -export ZEEBE_ADDRESS='[Zeebe API]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When you create client credentials in Camunda 8, you have the option to download a file with the lines above filled out for you. - -Given these environment variables, you can instantiate the client as follows: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda/camunda/clients/go/v8/pkg/zbc" - "os" -) - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: os.Getenv("ZEEBE_ADDRESS"), - }) - if err != nil { - panic(err) - } - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` diff --git a/versioned_docs/version-8.2/apis-tools/grpc.md b/versioned_docs/version-8.2/apis-tools/grpc.md deleted file mode 100644 index a235e7c60b5..00000000000 --- a/versioned_docs/version-8.2/apis-tools/grpc.md +++ /dev/null @@ -1,1064 +0,0 @@ ---- -id: grpc -title: "Zeebe API (gRPC)" -description: "Zeebe clients use gRPC to communicate with the cluster. Activate jobs, cancel and create process instances, and more." -keywords: ["backpressure", "back-pressure", "back pressure"] ---- - -[Zeebe](../components/zeebe/zeebe-overview.md) clients use [gRPC](https://grpc.io/) to communicate with the cluster. - -## Gateway service - -The Zeebe client gRPC API is exposed through a single gateway service. The current version of the protocol buffer file -can be found in -the [Zeebe repository](https://github.com/camunda/camunda/blob/stable/8.2/gateway-protocol/src/main/proto/gateway.proto). - -### `ActivateJobs` RPC - -Iterates through all known partitions round-robin, activates up to the requested -maximum, and streams them back to the client as they are activated. - -#### Input: `ActivateJobsRequest` - -```protobuf -message ActivateJobsRequest { - // the job type, as defined in the BPMN process (e.g. ) - string type = 1; - // the name of the worker activating the jobs, mostly used for logging purposes - string worker = 2; - // a job returned after this call will not be activated by another call until the - // timeout (in ms) has been reached - int64 timeout = 3; - // the maximum jobs to activate by this request - int32 maxJobsToActivate = 4; - // a list of variables to fetch as the job variables; if empty, all visible variables at - // the time of activation for the scope of the job will be returned - repeated string fetchVariable = 5; - // The request will be completed when at least one job is activated or after the requestTimeout (in ms). - // if the requestTimeout = 0, a default timeout is used. - // if the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. - int64 requestTimeout = 6; -} -``` - -#### Output: `ActivateJobsResponse` - -```protobuf -message ActivateJobsResponse { - // list of activated jobs - repeated ActivatedJob jobs = 1; -} - -message ActivatedJob { - // the key, a unique identifier for the job - int64 key = 1; - // the type of the job (should match what was requested) - string type = 2; - // the job's process instance key - int64 processInstanceKey = 3; - // the bpmn process ID of the job process definition - string bpmnProcessId = 4; - // the version of the job process definition - int32 processDefinitionVersion = 5; - // the key of the job process definition - int64 processKey = 6; - // the associated task element ID - string elementId = 7; - // the unique key identifying the associated task, unique within the scope of the - // process instance - int64 elementInstanceKey = 8; - // a set of custom headers defined during modelling; returned as a serialized - // JSON document - string customHeaders = 9; - // the name of the worker which activated this job - string worker = 10; - // the amount of retries left to this job (should always be positive) - int32 retries = 11; - // when the job can be activated again, sent as a UNIX epoch timestamp - int64 deadline = 12; - // JSON document, computed at activation time, consisting of all visible variables to - // the task scope - string variables = 13; -} -``` - -#### Errors - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- Type is blank (empty string, null) -- Worker is blank (empty string, null) -- Timeout less than 1 (ms) -- maxJobsToActivate is less than 1 - -### `BroadcastSignal` RPC - -Broadcasts a [signal](../components/concepts/signals.md). - -#### Input: `BroadcastSignalRequest` - -```protobuf -message BroadcastSignalRequest { - // The name of the signal - string signalName = 1; - - // the signal variables as a JSON document; to be valid, the root of the document must be an - // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. - string variables = 2; -} -``` - -#### Output: `BroadcastSignalResponse` - -```protobuf -message BroadcastSignalResponse { - // the unique ID of the signal that was broadcasted. - int64 key = 1; -} -``` - -### `CancelProcessInstance` RPC - -Cancels a running process instance. - -#### Input: `CancelProcessInstanceRequest` - -```protobuf -message CancelProcessInstanceRequest { - // the process instance key (as, for example, obtained from - // CreateProcessInstanceResponse) - int64 processInstanceKey = 1; -} -``` - -#### Output: `CancelProcessInstanceResponse` - -```protobuf -message CancelProcessInstanceResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No process instance exists with the given key. Note that since process instances are removed once they are finished, it could mean the instance did exist at some point. - -### `CompleteJob` RPC - -Completes a job with the given payload, which allows completing the associated service task. - -#### Input: `CompleteJobRequest` - -```protobuf -message CompleteJobRequest { - // the unique job identifier, as obtained from ActivateJobsResponse - int64 jobKey = 1; - // a JSON document representing the variables in the current task scope - string variables = 2; -} -``` - -#### Output: `CompleteJobResponse` - -```protobuf -message CompleteJobResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job exists with the given job key. Note that since jobs are removed once completed, it could be that this job did exist at some point. - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The job was marked as failed. In that case, the related incident must be resolved before the job can be activated again and completed. - -### `CreateProcessInstance` RPC - -Creates and starts an instance of the specified process. The process definition to use -to create the instance can be specified either using its unique key (as returned by -DeployProcess), or using the BPMN process ID and a version. Pass -1 as the version to -use the latest deployed version. - -:::note -Only processes with none start events can be started through this command. -::: - -:::note -Start instructions have the same [limitations as process instance modification](/components/concepts/process-instance-modification.md#limitations), e.g., it is not possible to start at a sequence flow. -::: - -#### Input: `CreateProcessInstanceRequest` - -```protobuf -message CreateProcessInstanceRequest { - // the unique key identifying the process definition (e.g. returned from a process - // in the DeployProcessResponse message) - int64 processDefinitionKey = 1; - // the BPMN process ID of the process definition - string bpmnProcessId = 2; - // the version of the process; set to -1 to use the latest version - int32 version = 3; - // JSON document that will instantiate the variables for the root variable scope of the - // process instance; it must be a JSON object, as variables will be mapped in a - // key-value fashion. e.g. { "a": 1, "b": 2 } will create two variables, named "a" and - // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a - // valid argument, as the root of the JSON document is an array and not an object. - string variables = 4; - // List of start instructions. If empty (default) the process instance - // will start at the start event. If non-empty the process instance will apply start - // instructions after it has been created - repeated ProcessInstanceCreationStartInstruction startInstructions = 5; -} - -message ProcessInstanceCreationStartInstruction { - - // future extensions might include - // - different types of start instructions - // - ability to set local variables for different flow scopes - - // for now, however, the start instruction is implicitly a - // "startBeforeElement" instruction - - // element ID - string elementId = 1; -} -``` - -#### Output: `CreateProcessInstanceResponse` - -```protobuf -message CreateProcessInstanceResponse { - // the key of the process definition which was used to create the process instance - int64 processKey = 1; - // the BPMN process ID of the process definition which was used to create the process - // instance - string bpmnProcessId = 2; - // the version of the process definition which was used to create the process instance - int32 version = 3; - // the unique identifier of the created process instance; to be used wherever a request - // needs a process instance key (e.g. CancelProcessInstanceRequest) - int64 processInstanceKey = 4; -} -``` - -### `CreateProcessInstanceWithResult` RPC - -Similar to `CreateProcessInstance` RPC, creates and starts an instance of the specified process. -Unlike `CreateProcessInstance` RPC, the response is returned when the process is completed. - -:::note -Only processes with none start events can be started through this command. -::: - -:::note -Start instructions have the same [limitations as process instance modification](/components/concepts/process-instance-modification.md#limitations), e.g., it is not possible to start at a sequence flow. -::: - -#### Input: `CreateProcessInstanceWithResultRequest` - -```protobuf -message CreateProcessInstanceRequest { - CreateProcessInstanceRequest request = 1; - // timeout (in ms). the request will be closed if the process is not completed before - // the requestTimeout. - // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. - int64 requestTimeout = 2; -} -``` - -#### Output: `CreateProcessInstanceWithResultResponse` - -```protobuf -message CreateProcessInstanceResponse { - // the key of the process definition which was used to create the process instance - int64 processKey = 1; - // the BPMN process ID of the process definition which was used to create the process - // instance - string bpmnProcessId = 2; - // the version of the process definition which was used to create the process instance - int32 version = 3; - // the unique identifier of the created process instance; to be used wherever a request - // needs a process instance key (e.g. CancelProcessInstanceRequest) - int64 processInstanceKey = 4; - // consisting of all visible variables to the root scope - string variables = 5; -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No process with the given key exists (if processKey was given). -- No process with the given process ID exists (if bpmnProcessId was given but version was -1). -- No process with the given process ID and version exists (if both bpmnProcessId and version were given). - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The process definition does not contain a none start event; only processes with none - start event can be started manually. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- The given variables argument is not a valid JSON document; it is expected to be a valid - JSON document where the root node is an object. - -### `EvaluateDecision` RPC - -Evaluates a decision. You specify the decision to evaluate either by -using its unique KEY (as returned by DeployResource), or using the decision -ID. When using the decision ID, the latest deployed version of the decision -is used. - -:::note -When you specify both the decision ID and KEY, the ID is used to find the decision to be evaluated. -::: - -#### Input: `EvaluateDecisionRequest` - -```protobuf -message EvaluateDecisionRequest { - // the unique key identifying the decision to be evaluated (e.g. returned - // from a decision in the DeployResourceResponse message) - int64 decisionKey = 1; - // the ID of the decision to be evaluated - string decisionId = 2; - // JSON document that will instantiate the variables for the decision to be - // evaluated; it must be a JSON object, as variables will be mapped in a - // key-value fashion, e.g. { "a": 1, "b": 2 } will create two variables, - // named "a" and "b" respectively, with their associated values. - // [{ "a": 1, "b": 2 }] would not be a valid argument, as the root of the - // JSON document is an array and not an object. - string variables = 3; -} -``` - -#### Output: `EvaluateDecisionResponse` - -```protobuf -message EvaluateDecisionResponse { - // the unique key identifying the decision which was evaluated (e.g. returned - // from a decision in the DeployResourceResponse message) - int64 decisionKey = 1; - // the ID of the decision which was evaluated - string decisionId = 2; - // the name of the decision which was evaluated - string decisionName = 3; - // the version of the decision which was evaluated - int32 decisionVersion = 4; - // the ID of the decision requirements graph that the decision which was - // evaluated is part of. - string decisionRequirementsId = 5; - // the unique key identifying the decision requirements graph that the - // decision which was evaluated is part of. - int64 decisionRequirementsKey = 6; - // JSON document that will instantiate the result of the decision which was - // evaluated; it will be a JSON object, as the result output will be mapped - // in a key-value fashion, e.g. { "a": 1 }. - string decisionOutput = 7; - // a list of decisions that were evaluated within the requested decision evaluation - repeated EvaluatedDecision evaluatedDecisions = 8; - // an optional string indicating the ID of the decision which - // failed during evaluation - string failedDecisionId = 9; - // an optional message describing why the decision which was evaluated failed - string failureMessage = 10; -} - -message EvaluatedDecision { - // the unique key identifying the decision which was evaluated (e.g. returned - // from a decision in the DeployResourceResponse message) - int64 decisionKey = 1; - // the ID of the decision which was evaluated - string decisionId = 2; - // the name of the decision which was evaluated - string decisionName = 3; - // the version of the decision which was evaluated - int32 decisionVersion = 4; - // the type of the decision which was evaluated - string decisionType = 5; - // JSON document that will instantiate the result of the decision which was - // evaluated; it will be a JSON object, as the result output will be mapped - // in a key-value fashion, e.g. { "a": 1 }. - string decisionOutput = 6; - // the decision rules that matched within this decision evaluation - repeated MatchedDecisionRule matchedRules = 7; - // the decision inputs that were evaluated within this decision evaluation - repeated EvaluatedDecisionInput evaluatedInputs = 8; -} - -message EvaluatedDecisionInput { - // the id of the evaluated decision input - string inputId = 1; - // the name of the evaluated decision input - string inputName = 2; - // the value of the evaluated decision input - string inputValue = 3; -} - -message EvaluatedDecisionOutput { - // the id of the evaluated decision output - string outputId = 1; - // the name of the evaluated decision output - string outputName = 2; - // the value of the evaluated decision output - string outputValue = 3; -} - -message MatchedDecisionRule { - // the id of the matched rule - string ruleId = 1; - // the index of the matched rule - int32 ruleIndex = 2; - // the evaluated decision outputs - repeated EvaluatedDecisionOutput evaluatedOutputs = 3; -} -``` - -#### Errors - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- No decision with the given key exists (if decisionKey was given). -- No decision with the given decision ID exists (if decisionId was given). -- Both decision ID and decision KEY were provided, or are missing. - -### `DeployResource` RPC - -Deploys one or more resources (e.g. processes or decision models) to Zeebe. -Note that this is an atomic call, i.e. either all resources are deployed, or none of them are. - -#### Input: `DeployResourceRequest` - -```protobuf -message DeployResourceRequest { - // list of resources to deploy - repeated Resource resources = 1; -} - -message Resource { - // the resource name, e.g. myProcess.bpmn or myDecision.dmn - string name = 1; - // the file content as a UTF8-encoded string - bytes content = 2; -} -``` - -#### Output: `DeployResourceResponse` - -```protobuf -message DeployResourceResponse { - // the unique key identifying the deployment - int64 key = 1; - // a list of deployed resources, e.g. processes - repeated Deployment deployments = 2; -} - -message Deployment { - // each deployment has only one metadata - oneof Metadata { - // metadata of a deployed process - ProcessMetadata process = 1; - // metadata of a deployed decision - DecisionMetadata decision = 2; - // metadata of a deployed decision requirements - DecisionRequirementsMetadata decisionRequirements = 3; - } -} - -message ProcessMetadata { - // the bpmn process ID, as parsed during deployment; together with the version forms a - // unique identifier for a specific process definition - string bpmnProcessId = 1; - // the assigned process version - int32 version = 2; - // the assigned key, which acts as a unique identifier for this process - int64 processDefinitionKey = 3; - // the resource name (see: ProcessRequestObject.name) from which this process was - // parsed - string resourceName = 4; -} - -message DecisionMetadata { - // the dmn decision ID, as parsed during deployment; together with the - // versions forms a unique identifier for a specific decision - string dmnDecisionId = 1; - // the dmn name of the decision, as parsed during deployment - string dmnDecisionName = 2; - // the assigned decision version - int32 version = 3; - // the assigned decision key, which acts as a unique identifier for this - // decision - int64 decisionKey = 4; - // the dmn ID of the decision requirements graph that this decision is part - // of, as parsed during deployment - string dmnDecisionRequirementsId = 5; - // the assigned key of the decision requirements graph that this decision is - // part of - int64 decisionRequirementsKey = 6; -} - -message DecisionRequirementsMetadata { - // the dmn decision requirements ID, as parsed during deployment; together - // with the versions forms a unique identifier for a specific decision - string dmnDecisionRequirementsId = 1; - // the dmn name of the decision requirements, as parsed during deployment - string dmnDecisionRequirementsName = 2; - // the assigned decision requirements version - int32 version = 3; - // the assigned decision requirements key, which acts as a unique identifier - // for this decision requirements - int64 decisionRequirementsKey = 4; - // the resource name (see: Resource.name) from which this decision - // requirements was parsed - string resourceName = 5; -} -``` - -#### Errors - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- No resources given. -- At least one resource is invalid. A resource is considered invalid if: - - The resource type is not supported (e.g. supported resources include BPMN and DMN files) - - The content is not deserializable (e.g. detected as BPMN, but it's broken XML) - - The content is invalid (e.g. an event-based gateway has an outgoing sequence flow to a task) - -### `FailJob` RPC - -Marks the job as failed. If the retries argument is positive and no retry back off is set, the job is immediately -activatable again. If the retry back off is positive the job becomes activatable once the back off timeout has passed. -If the retries argument is zero or negative, an incident is raised, tagged with the given errorMessage, and the job is -not activatable until the incident is resolved. If the variables argument is set, the variables are merged into the process at the local scope of the job's associated task. - -#### Input: `FailJobRequest` - -```protobuf -message FailJobRequest { - // the unique job identifier, as obtained when activating the job - int64 jobKey = 1; - // the amount of retries the job should have left - int32 retries = 2; - // an optional message describing why the job failed - // this is particularly useful if a job runs out of retries and an incident is raised, - // as it this message can help explain why an incident was raised - string errorMessage = 3; - // the backoff timeout (in ms) for the next retry - int64 retryBackOff = 4; - // JSON document that will instantiate the variables at the local scope of the - // job's associated task; it must be a JSON object, as variables will be mapped in a - // key-value fashion. e.g. { "a": 1, "b": 2 } will create two variables, named "a" and - // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a - // valid argument, as the root of the JSON document is an array and not an object. - string variables = 5; -} -``` - -#### Output: `FailJobResponse` - -```protobuf -message FailJobResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job was found with the given key. - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The job was not activated. -- The job is already in a failed state, i.e. ran out of retries. - -### `ModifyProcessInstance` RPC - -Modifies a running process instance. The command can contain multiple instructions to activate an element of the -process, or to terminate an active instance of an element. - -Use the command to repair a process instance that is stuck on an element or took an unintended path. For example, -because an external system is not available or doesn't respond as expected. - -#### Input: `ModifyProcessInstanceRequest` - -```protobuf -message ModifyProcessInstanceRequest { - // the key of the process instance that should be modified - int64 processInstanceKey = 1; - // instructions describing which elements should be activated in which scopes, - // and which variables should be created - repeated ActivateInstruction activateInstructions = 2; - // instructions describing which elements should be terminated - repeated TerminateInstruction terminateInstructions = 3; - - message ActivateInstruction { - // the id of the element that should be activated - string elementId = 1; - // the key of the ancestor scope the element instance should be created in; - // set to -1 to create the new element instance within an existing element - // instance of the flow scope - int64 ancestorElementInstanceKey = 2; - // instructions describing which variables should be created - repeated VariableInstruction variableInstructions = 3; - } - - message VariableInstruction { - // JSON document that will instantiate the variables for the root variable scope of the - // process instance; it must be a JSON object, as variables will be mapped in a - // key-value fashion. e.g. { "a": 1, "b": 2 } will create two variables, named "a" and - // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a - // valid argument, as the root of the JSON document is an array and not an object. - string variables = 1; - // the id of the element in which scope the variables should be created; - // leave empty to create the variables in the global scope of the process instance - string scopeId = 2; - } - - message TerminateInstruction { - // the id of the element that should be terminated - int64 elementInstanceKey = 1; - } -} -``` - -#### Output: `ModifyProcessInstanceResponse` - -```protobuf -message ModifyProcessInstanceResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No process instance exists with the given key, or it is not active. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- At least one activate instruction is invalid. An activate instruction is considered invalid if: - - The process doesn't contain an element with the given id. - - A flow scope of the given element can't be created. - - The given element has more than one active instance of its flow scope. -- At least one variable instruction is invalid. A variable instruction is considered invalid if: - - The process doesn't contain an element with the given scope id. - - The given element doesn't belong to the activating element's flow scope. - - The given variables are not a valid JSON document. -- At least one terminate instruction is invalid. A terminate instruction is considered invalid if: - - No element instance exists with the given key, or it is not active. -- The instructions would terminate all element instances of a process instance that was created by a call activity in - the parent process. - -### `PublishMessage` RPC - -Publishes a single message. Messages are published to specific partitions computed from their -correlation keys. - -#### Input: `PublishMessageRequest` - -```protobuf -message PublishMessageRequest { - // the name of the message - string name = 1; - // the correlation key of the message - string correlationKey = 2; - // how long the message should be buffered on the broker, in milliseconds - int64 timeToLive = 3; - // the unique ID of the message; can be omitted. only useful to ensure only one message - // with the given ID will ever be published (during its lifetime) - string messageId = 4; - // the message variables as a JSON document; to be valid, the root of the document must be an - // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. - string variables = 5; -} -``` - -#### Output: `PublishMessageResponse` - -```protobuf -message PublishMessageResponse { - // the unique ID of the message that was published - int64 key = 1; -} -``` - -#### Errors - -##### GRPC_STATUS_ALREADY_EXISTS - -Returned if: - -- A message with the same ID was previously published (and is still alive). - -### `ResolveIncident` RPC - -Resolves a given incident. This simply marks the incident as resolved; most likely a call to -UpdateJobRetries or SetVariables will be necessary to actually resolve the -problem, followed by this call. - -#### Input: `ResolveIncidentRequest` - -```protobuf -message ResolveIncidentRequest { - // the unique ID of the incident to resolve - int64 incidentKey = 1; -} -``` - -#### Output: `ResolveIncidentResponse` - -```protobuf -message ResolveIncidentResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No incident with the given key exists. - -### `SetVariables` RPC - -Updates all the variables of a particular scope (e.g. process instance, flow element instance) from the given JSON document. - -#### Input: `SetVariablesRequest` - -```protobuf -message SetVariablesRequest { - // the unique identifier of a particular element; can be the process instance key (as - // obtained during instance creation), or a given element, such as a service task (see - // elementInstanceKey on the job message) - int64 elementInstanceKey = 1; - // a JSON serialized document describing variables as key value pairs; the root of the document - // must be an object - string variables = 2; - // if true, the variables will be merged strictly into the local scope (as indicated by - // elementInstanceKey); this means the variables is not propagated to upper scopes. - // for example, let's say we have two scopes, '1' and '2', with each having effective variables as: - // 1 => `{ "foo" : 2 }`, and 2 => `{ "bar" : 1 }`. if we send an update request with - // elementInstanceKey = 2, variables `{ "foo" : 5 }`, and local is true, then scope 1 will - // be unchanged, and scope 2 will now be `{ "bar" : 1, "foo" 5 }`. if local was false, however, - // then scope 1 would be `{ "foo": 5 }`, and scope 2 would be `{ "bar" : 1 }`. - bool local = 3; -} -``` - -#### Output: `SetVariablesResponse` - -```protobuf -message SetVariablesResponse { - // the unique key of the set variables command - int64 key = 1; -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No element with the given `elementInstanceKey` exists. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- The given payload is not a valid JSON document; all payloads are expected to be - valid JSON documents where the root node is an object. - -### `ThrowError` RPC - -`ThrowError` reports a business error (i.e. non-technical) that occurs while processing a job. - -The error is handled in the process by an error catch event. If there is no error catch event with the specified `errorCode`, an incident is raised instead. - -Variables can be passed along with the thrown error to provide additional details that can be used in the process. - -#### Input: `ThrowErrorRequest` - -```protobuf -message ThrowErrorRequest { - // the unique job identifier, as obtained when activating the job - int64 jobKey = 1; - // the error code that will be matched with an error catch event - string errorCode = 2; - // an optional error message that provides additional context - string errorMessage = 3; - // JSON document that will instantiate the variables at the local scope of the - // error catch event that catches the thrown error; it must be a JSON object, as variables will be mapped in a - // key-value fashion. e.g. { "a": 1, "b": 2 } will create two variables, named "a" and - // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a - // valid argument, as the root of the JSON document is an array and not an object. - string variables = 4; -} -``` - -#### Output: `ThrowErrorResponse` - -```protobuf -message ThrowErrorResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job was found with the given key. - -##### GRPC_STATUS_FAILED_PRECONDITION - -Returned if: - -- The job is already in a failed state, i.e. ran out of retries. - -### `Topology` RPC - -Obtains the current topology of the cluster the gateway is part of. - -#### Input: `TopologyRequest` - -```protobuf -message TopologyRequest { -} -``` - -#### Output: `TopologyResponse` - -```protobuf -message TopologyResponse { - // list of brokers part of this cluster - repeated BrokerInfo brokers = 1; - // how many nodes are in the cluster - int32 clusterSize = 2; - // how many partitions are spread across the cluster - int32 partitionsCount = 3; - // configured replication factor for this cluster - int32 replicationFactor = 4; - // gateway version - string gatewayVersion = 5; -} - -message BrokerInfo { - // unique (within a cluster) node ID for the broker - int32 nodeId = 1; - // hostname of the broker - string host = 2; - // port for the broker - int32 port = 3; - // list of partitions managed or replicated on this broker - repeated Partition partitions = 4; - // broker version - string version = 5; -} - -message Partition { - // Describes the Raft role of the broker for a given partition - enum PartitionBrokerRole { - LEADER = 0; - FOLLOWER = 1; - } - - // Describes the current health of the partition - enum PartitionBrokerHealth { - HEALTHY = 0; - UNHEALTHY = 1; - } - - // the unique ID of this partition - int32 partitionId = 1; - // the role of the broker for this partition - PartitionBrokerRole role = 2; - // the health of this partition - PartitionBrokerHealth health = 3; -} -``` - -#### Errors - -No specific errors. - -### `UpdateJobRetries` RPC - -Updates the number of retries a job has left. This is mostly useful for jobs that have run out of -retries, should the underlying problem be solved. - -#### Input: `UpdateJobRetriesRequest` - -```protobuf -message UpdateJobRetriesRequest { - // the unique job identifier, as obtained through ActivateJobs - int64 jobKey = 1; - // the new amount of retries for the job; must be positive - int32 retries = 2; -} -``` - -#### Output: `UpdateJobRetriesResponse` - -```protobuf -message UpdateJobRetriesResponse { -} -``` - -#### Errors - -##### GRPC_STATUS_NOT_FOUND - -Returned if: - -- No job exists with the given key. - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- Retries is not greater than 0. - -## Technical error handling - -In the documentation above, the documented errors are business logic errors. -These errors are a result of request processing logic, and not serialization, network, or -other more general errors. These errors are described in this section. - -The gRPC API for Zeebe is exposed through an API gateway, which acts as a proxy -for the cluster. Generally, this means the clients execute a remote call on the gateway, -which is then translated to special binary protocol the gateway uses to -communicate with nodes in the cluster. The nodes in the cluster are called brokers. - -Technical errors which occur between gateway and brokers (e.g. the gateway cannot deserialize the broker response, -the broker is unavailable, etc.) are reported to the client using the following error codes: - -- `GRPC_STATUS_RESOURCE_EXHAUSTED`: When a broker receives more requests than it can handle, it signals backpressure and rejects requests with this error code. - - In this case, it is possible to retry the requests with an appropriate retry strategy. - - If you receive many such errors within a short time period, it indicates the broker is constantly under high load. - - It is recommended to reduce the rate of requests. - When backpressure is active, the broker may reject any request except _CompleteJob_ RPC and _FailJob_ RPC. - - These requests are allowed during backpressure and are always accepted by the broker even if it is receiving requests above its limits. -- `GRPC_STATUS_UNAVAILABLE`: If the gateway itself is in an invalid state (e.g. out of memory). -- `GRPC_STATUS_INTERNAL`: For any other internal errors that occurred between the gateway and the broker. - -This behavior applies to every request. In these cases, the client should retry -with an appropriate retry policy (e.g. a combination of exponential backoff or jitter wrapped -in a circuit breaker). - -As the gRPC server/client is based on generated code, keep in mind that -any call made to the server can also return errors as described by the spec -[here](https://grpc.io/docs/guides/error.html#error-status-codes). - -## Deprecated RPCs - -The following RPCs are exposed by the gateway service, but have been deprecated. - -### `DeployProcess` RPC - -:::note -Deprecated since 8, replaced by [DeployResource RPC](#deployresource-rpc). -::: - -Deploys one or more processes to Zeebe. Note that this is an atomic call, -i.e. either all processes are deployed, or none of them are. - -#### Input: `DeployProcessRequest` - -```protobuf -message DeployProcessRequest { - // List of process resources to deploy - repeated ProcessRequestObject processes = 1; -} - -message ProcessRequestObject { - enum ResourceType { - // FILE type means the gateway will try to detect the resource type - // using the file extension of the name field - FILE = 0; - BPMN = 1; // extension 'bpmn' - YAML = 2 [deprecated = true]; // extension 'yaml'; removed as of release 1.0 - } - - // the resource basename, e.g. myProcess.bpmn - string name = 1; - // the resource type; if set to BPMN or YAML then the file extension - // is ignored - // As of release 1.0, YAML support was removed and BPMN is the only supported resource type. - // The field was kept to not break clients. - ResourceType type = 2 [deprecated = true]; - // the process definition as a UTF8-encoded string - bytes definition = 3; -} -``` - -#### Output: `DeployProcessResponse` - -```protobuf -message DeployProcessResponse { - // the unique key identifying the deployment - int64 key = 1; - // a list of deployed processes - repeated ProcessMetadata processes = 2; -} - -message ProcessMetadata { - // the bpmn process ID, as parsed during deployment; together with the version forms a - // unique identifier for a specific process definition - string bpmnProcessId = 1; - // the assigned process version - int32 version = 2; - // the assigned key, which acts as a unique identifier for this process - int64 processKey = 3; - // the resource name (see: ProcessRequestObject.name) from which this process was - // parsed - string resourceName = 4; -} -``` - -#### Errors - -##### GRPC_STATUS_INVALID_ARGUMENT - -Returned if: - -- No resources given. -- At least one resource is invalid. A resource is considered invalid if: - - It is not a BPMN or YAML file (currently detected through the file extension). - - The resource data is not deserializable (e.g. detected as BPMN, but it's broken XML). - - The process is invalid (e.g. an event-based gateway has an outgoing sequence flow to a task.) diff --git a/versioned_docs/version-8.2/apis-tools/img/ComponentsAndArchitecture_SaaS.png b/versioned_docs/version-8.2/apis-tools/img/ComponentsAndArchitecture_SaaS.png deleted file mode 100644 index 313c50daa6b..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/img/ComponentsAndArchitecture_SaaS.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/cluster-topology-request.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/cluster-topology-request.md deleted file mode 100644 index c38a21a2e6b..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/cluster-topology-request.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: cluster-topology-request -title: "Request cluster topology" ---- - -This example shows which broker is leader and follower for which partition. This is particularly useful when you run a cluster with multiple Zeebe brokers. - -## Related resources - -- [Clustering basics](/components/zeebe/technical-concepts/clustering.md) - -## Prerequisites - -Run Zeebe broker with endpoint `localhost:26500` (default). - -## TopologyViewer.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/cluster/TopologyViewer.java) - -```java -final Topology topology = client.newTopologyRequest().send().join(); - -System.out.println("Topology:"); -topology - .getBrokers() - .forEach( - b -> { - System.out.println(" " + b.getAddress()); - b.getPartitions() - .forEach( - p -> - System.out.println( - " " + p.getPartitionId() + " - " + p.getRole())); - }); -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/data-pojo.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/data-pojo.md deleted file mode 100644 index b73b7bece7c..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/data-pojo.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: data-pojo -title: "Handle variables as POJO" -description: "Let's analyze the prerequisites and code to handle variables as POJO." ---- - -## Related resources - -- [Data flow](../../components/modeler/bpmn/data-flow.md) - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -2. Run the [deploy a process example](process-deploy.md). - -## HandleVariablesAsPojo.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/data/HandleVariablesAsPojo.java) - -```java - ... - try (final ZeebeClient client = clientBuilder.build()) { - final Order order = new Order(); - order.setOrderId(31243); - - client - .newCreateInstanceCommand() - .bpmnProcessId("demoProcess") - .latestVersion() - .variables(order) - .send() - .join(); - - client.newWorker().jobType("foo").handler(new DemoJobHandler()).open(); - - // run until System.in receives exit command - waitUntilSystemInput("exit"); - } - } - - public static class Order { - private long orderId; - private double totalPrice; - - public long getOrderId() { - return orderId; - } - - public void setOrderId(final long orderId) { - this.orderId = orderId; - } - - public double getTotalPrice() { - return totalPrice; - } - - public void setTotalPrice(final double totalPrice) { - this.totalPrice = totalPrice; - } - } - - private static class DemoJobHandler implements JobHandler { - @Override - public void handle(final JobClient client, final ActivatedJob job) { - // read the variables of the job - final Order order = job.getVariablesAsType(Order.class); - System.out.println("new job with orderId: " + order.getOrderId()); - - // update the variables and complete the job - order.setTotalPrice(46.50); - - client.newCompleteCommand(job.getKey()).variables(order).send(); - } - } -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/decision-evaluate.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/decision-evaluate.md deleted file mode 100644 index b500b4da8b4..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/decision-evaluate.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: decision-evaluate -title: "Evaluate a decision" -description: "Let's dive deeper into Zeebe and Java to evaluate a decision." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -1. Run the [deploy a process example](process-deploy.md). Deploy [`demoDecision.dmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoDecision.dmn) instead of `demoProcess.bpmn`. - -## EvaluateDecisionCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/decision/EvaluateDecisionCreator.java) - -```java -final EvaluateDecisionResponse decisionEvaluation = - client - .newEvaluateDecisionCommand() - .decisionId(decisionId) - .variables("{\"lightsaberColor\": \"blue\"}") - .send() - .join(); - -System.out.println("Decision evaluation result: " + decisionEvaluation.getDecisionOutput()); -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/index.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/index.md deleted file mode 100644 index 53e5146ee35..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -id: index -title: "Java examples" -sidebar_label: "Overview" ---- - -Let's analyze a few examples utilizing Java to deploy a process, open a job worker, handle variables, and request cluster topology. - -These examples are accessible in the [Camunda 8 examples GitHub repository](https://github.com/camunda-community-hub/camunda-8-examples) on the [main branch](https://github.com/camunda-community-hub/camunda-8-examples/tree/main/zeebe-client-plain-java). - -Instructions to access code locally: - -``` -git clone https://github.com/camunda-community-hub/camunda-8-examples -git checkout main -cd zeebe-client-plain-java -``` - -Import the Maven project in the `samples` directory into your IDE to start hacking. - -## Process - -- [Deploy a process](process-deploy.md) -- [Create a process instance](process-instance-create.md) -- [Create non-blocking process instances](process-instance-create-nonblocking.md) -- [Create a process instance with results](process-instance-create-with-result.md) - -## Decision - -- [Evaluate a decision](decision-evaluate.md) - -## Job - -- [Open a job worker](job-worker-open.md) - -## Data - -- [Handle variables as POJO](data-pojo.md) - -## Cluster - -- [Request cluster topology](cluster-topology-request.md) diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/job-worker-open.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/job-worker-open.md deleted file mode 100644 index 9471c7072a3..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/job-worker-open.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: job-worker-open -title: "Open a job worker" -description: "Let's analyze the prerequisites and code to open a job worker." ---- - -## Related resources - -- [Job worker basics](/components/concepts/job-workers.md) - -## Prerequisites - -- Run the Zeebe broker with endpoint `localhost:26500` (default). -- Run the [deploy a process example](process-deploy.md). -- Run the [create a process instance example](process-instance-create.md) a few times. - -## JobWorkerCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/job/JobWorkerCreator.java) - -```java - ... - final String jobType = "foo"; - - try (final ZeebeClient client = clientBuilder.build()) { - - System.out.println("Opening job worker."); - - try (final JobWorker workerRegistration = - client - .newWorker() - .jobType(jobType) - .handler(new ExampleJobHandler()) - .timeout(Duration.ofSeconds(10)) - .open()) { - System.out.println("Job worker opened and receiving jobs."); - - // run until System.in receives exit command - waitUntilSystemInput("exit"); - } - } - } - - private static class ExampleJobHandler implements JobHandler { - @Override - public void handle(final JobClient client, final ActivatedJob job) { - // here: business logic that is executed with every job - System.out.println(job); - client.newCompleteCommand(job.getKey()).send().join(); - } - } -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-deploy.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/process-deploy.md deleted file mode 100644 index 34ae1c4542d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-deploy.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: process-deploy -title: "Deploy a process" -description: "Let's analyze the prerequisites and code to deploy a process using Java." ---- - -## Related resources - -- [Process basics](../../components/concepts/processes.md) -- [BPMN introduction](../../components/modeler/bpmn/bpmn-primer.md) - -## Prerequisites - -Run the Zeebe broker with endpoint `localhost:26500` (default). - -## ProcessDeployer.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/ProcessDeployer.java) - -```java -final DeploymentEvent deploymentEvent = - client.newDeployResourceCommand() - .addResourceFromClasspath("demoProcess.bpmn") - .send() - .join(); -``` - -## demoProcess.bpmn - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoProcess.bpmn) - -Download the XML and save it in the Java classpath before running the example. Open the file with Desktop Modeler for a graphical representation. - - diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create-nonblocking.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create-nonblocking.md deleted file mode 100644 index 531191dc7b5..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create-nonblocking.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: process-instance-create-nonblocking -title: "Create non-blocking process instances" -description: "Let's analyze the prerequisites and code to create non-blocking process instances with Java." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -2. Run the [deploy a process example](process-deploy.md). - -## NonBlockingProcessInstanceCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/NonBlockingProcessInstanceCreator.java) - -```java -long instancesCreating = 0; - -while (instancesCreating < numberOfInstances) { - // this is non-blocking/async => returns a future - final ZeebeFuture future = - client.newCreateInstanceCommand().bpmnProcessId(bpmnProcessId).latestVersion().send(); - - // could put the future somewhere and eventually wait for its completion - - instancesCreating++; -} -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create-with-result.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create-with-result.md deleted file mode 100644 index 1c9d0c22e4d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create-with-result.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: process-instance-create-with-result -title: "Create a process instance with results" -description: "Let's analyze the prerequisites and code to create a process instance with real results." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -1. Run the [deploy a process example](process-deploy.md). Deploy [`demoProcessSingleTask.bpmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoProcessSingleTask.bpmn) instead of `demoProcess.bpmn`. - -## ProcessInstanceWithResultCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/ProcessInstanceWithResultCreator.java) - -```java -final ProcessInstanceResult processInstanceResult = - client - .newCreateInstanceCommand() - .bpmnProcessId(bpmnProcessId) - .latestVersion() - .withResult() // to await the completion of process execution and return result - .send() - .join(); - -System.out.println( - "Process instance created with key: " - + processInstanceResult.getProcessInstanceKey() - + " and completed with results: " - + processInstanceResult.getVariables()); -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create.md b/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create.md deleted file mode 100644 index b12e0c0d562..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client-examples/process-instance-create.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: process-instance-create -title: "Create a process instance" -description: "Let's dive deeper into Zeebe and Java to create a process instance." ---- - -## Prerequisites - -1. Run the Zeebe broker with endpoint `localhost:26500` (default). -1. Run the [deploy a process example](process-deploy.md). - -## ProcessInstanceCreator.java - -[Source on GitHub](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/java/io/camunda/zeebe/example/process/ProcessInstanceCreator.java) - -```java -final ProcessInstanceEvent processInstanceEvent = - client - .newCreateInstanceCommand() - .bpmnProcessId(bpmnProcessId) - .latestVersion() - .send() - .join(); -``` diff --git a/versioned_docs/version-8.2/apis-tools/java-client/assets/order-process-simple.png b/versioned_docs/version-8.2/apis-tools/java-client/assets/order-process-simple.png deleted file mode 100644 index e21a621bb1e..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/java-client/assets/order-process-simple.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/java-client/assets/order-process.png b/versioned_docs/version-8.2/apis-tools/java-client/assets/order-process.png deleted file mode 100644 index 25edc8f4f7f..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/java-client/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/java-client/index.md b/versioned_docs/version-8.2/apis-tools/java-client/index.md deleted file mode 100644 index dffbef16a72..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client/index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: index -title: "Java client" -sidebar_label: "Quick reference" -description: "Provide a job worker that handles polling for available jobs, use SLF4J for logging useful notes, and more." ---- - -## Dependencies - -To use the Java client library, declare the following Maven dependency in your project: - -```xml - - io.camunda - zeebe-client-java - ${zeebe.version} - -``` - -If you are using Gradle, declare the following: - -```groovy -implementation 'io.camunda:zeebe-client-java:${zeebe.version}' -``` - -Use the latest released version from [Maven Central](https://search.maven.org/artifact/io.camunda/zeebe-client-java). - -## Bootstrapping - -In Java code, instantiate the client as follows: - -```java - private static final String zeebeAPI = "[Zeebe Address e.g. f887f1a6-7c2b-48ce-809a-e11e5a6ba31a.dsm-1.zeebe.camunda.io:443]"; - private static final String audience = "[Zeebe Token Audience, e.g., zeebe.camunda.io]"; - private static final String clientId = "[Client ID, e.g., FmT7K8gVv_FcwiUhc8U-fAJ9wph0Kn~P]"; - private static final String clientSecret = "[Client Secret]"; - private static final String oAuthAPI = "[OAuth API, e.g., https://login.cloud.camunda.io/oauth/token] "; - - public static void main(String[] args) { - OAuthCredentialsProvider credentialsProvider = - new OAuthCredentialsProviderBuilder() - .authorizationServerUrl(oAuthAPI) - .audience(audience) - .clientId(clientId) - .clientSecret(clientSecret) - .build(); - - try (ZeebeClient client = ZeebeClient.newClientBuilder() - .gatewayAddress(zeebeAddress) - .credentialsProvider(credentialsProvider) - .build()) { - client.newTopologyRequest().send().join(); - } - } -``` - -Let's go over this code snippet line by line: - -1. Declare a few variables to define the connection properties. These values can be taken from the connection information on the **Client Credentials** page. Note that `clientSecret` is only visible when you create the client credentials. -2. Create the credentials provider for the OAuth protocol. This is needed to authenticate your client. -3. Create the client by passing in the address of the cluster we want to connect to and the credentials provider from the step above. Note that a client should be closed after usage, which is easily achieved by the try-with-resources statement. -4. Send a test request to verify the connection was established. - -See [io.camunda.zeebe.client.ZeebeClientBuilder](https://javadoc.io/doc/io.camunda/zeebe-client-java/latest/io/camunda/zeebe/client/ZeebeClientBuilder.html) for a description of all available configuration properties. - -Another (more compact) option is to pass in the connection settings via environment variables: - -```bash -export ZEEBE_ADDRESS='[Zeebe Address]' -export ZEEBE_CLIENT_ID='[Client ID]' -export ZEEBE_CLIENT_SECRET='[Client Secret]' -export ZEEBE_AUTHORIZATION_SERVER_URL='[OAuth API]' -``` - -When you create client credentials in Camunda 8, you have the option to download a file with the lines above filled out for you. - -Given these environment variables, you can instantiate the client as follows: - -```java -ZeebeClient client = - ZeebeClient.newClientBuilder() - .gatewayAddress(System.getenv("ZEEBE_ADDRESS")) - .build(); -``` - -## Javadoc - -The official Java client library API documentation can be found [here](https://javadoc.io/doc/io.camunda/zeebe-client-java). These are standard Javadocs, so your favorite JVM IDE will be able to install them locally as well. - -## Next steps - -- [Getting Started Guide](https://github.com/camunda/camunda-platform-get-started): A comprehensive tutorial that covers Camunda Modeler, Operate, and the Java client. -- [Job worker](job-worker.md): An introduction to the Java client's job worker. -- [Logging](logging.md): An introduction to configuring logging for a Zeebe client. -- [Writing tests](zeebe-process-test.md): An introduction to unit testing processes. -- [Examples](apis-tools/java-client-examples/index.md): A collection of specific examples for different use cases. diff --git a/versioned_docs/version-8.2/apis-tools/java-client/job-worker.md b/versioned_docs/version-8.2/apis-tools/java-client/job-worker.md deleted file mode 100644 index 4d722538263..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client/job-worker.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: job-worker -title: "Job worker" -description: "Let's take a deeper look at job workers to handle jobs." -keywords: ["backpressure", "back-pressure", "back pressure"] ---- - -## Related resources - -- [Job worker basics](/components/concepts/job-workers.md) - -## The Java client's job worker - -The Java client provides a job worker that handles polling for available jobs. This allows you to focus on writing code to handle the activated jobs. - -On `open`, the job worker waits `pollInterval` milliseconds and then polls for `maxJobsActive` jobs. It then continues with the following schedule: - -1. If a poll did not activate any jobs, it waits for `pollInterval` milliseconds and then polls for more jobs. -2. If a poll activated jobs, the worker submits each job to the job handler. -3. Every time a job is handled, the worker checks whether the number of unhandled jobs have dropped below 30% (rounded up) of `maxJobsActive`. The first time that happens, it will poll for more jobs. -4. If a poll fails with an error response, a backoff strategy is applied. This strategy waits for the delay provided by the `backoffSupplier` and polls for more jobs. - -For example, imagine you have 10 process instances and a single job worker configured with `maxJobsActive = 3`. The job worker will first pull three jobs and begin executing them. The threshold to poll for new jobs is 1 (30% of 3 rounded up). After two jobs have completed, the threshold is reached and the job worker will poll for up to 2 additional jobs. This process repeats until the jobs from all 10 process instances are completed. - -## Example usage - -- [Open a job worker](../java-client-examples/job-worker-open.md) - -## Backoff configuration - -When a poll fails with an error response, the job worker applies a backoff strategy. It waits for some time, after which it polls again for more jobs. This gives a Zeebe cluster some time to recover from a failure. In some cases, you may want to configure this backoff strategy to better fit your situation. - -The retry delay (i.e. the time the job worker waits after an error before the next poll for new jobs) is provided by the [`BackoffSupplier`](https://github.com/camunda/camunda/blob/stable/8.2/clients/java/src/main/java/io/camunda/zeebe/client/api/worker/BackoffSupplier.java). You can replace it using the `.backoffSupplier()` method on the [`JobWorkerBuilder`](https://github.com/camunda/camunda/blob/stable/8.2/clients/java/src/main/java/io/camunda/zeebe/client/api/worker/JobWorkerBuilderStep1.java). - -By default, the job worker uses an exponential backoff implementation, which you can configure using `BackoffSupplier.newBackoffBuilder()`. - -The backoff strategy is especially useful for dealing with the `GRPC_STATUS_RESOURCE_EXHAUSTED` error response (see [gRPC Technical Error Handling](/apis-tools/grpc.md#technical-error-handling)). - -This error code indicates the Zeebe cluster is currently under too large of a load and has decided to reject this request. - -By backing off, the job worker helps Zeebe by reducing the load. - -:::note -Zeebe's [backpressure mechanism](../../../self-managed/zeebe-deployment/operations/backpressure) can also be configured. -::: diff --git a/versioned_docs/version-8.2/apis-tools/java-client/logging.md b/versioned_docs/version-8.2/apis-tools/java-client/logging.md deleted file mode 100644 index 75ef99948d5..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client/logging.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: logging -title: "Logging" -description: "Here, we'll take a look at logging details in the case a job handler fails execution." ---- - -The client uses SLF4J for logging useful notes, such as exception stack traces when a job handler fails execution. Using the SLF4J API, any SLF4J implementation can be plugged in. The following example uses Log4J 2: - -## Maven dependencies - -```xml - - org.apache.logging.log4j - log4j-slf4j-impl - 2.8.1 - - - - org.apache.logging.log4j - log4j-core - 2.8.1 - -``` - -## Configuration - -First, add a file called `log4j2.xml` to the classpath of your application. - -Then, add the following content: - -```xml - - - - - - - - - - - - - -``` - -This will log every log message to the console. diff --git a/versioned_docs/version-8.2/apis-tools/java-client/zeebe-process-test.md b/versioned_docs/version-8.2/apis-tools/java-client/zeebe-process-test.md deleted file mode 100644 index 466b221283d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/java-client/zeebe-process-test.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -id: zeebe-process-test -title: "Zeebe Process Test" ---- - -[Zeebe Process Test](https://github.com/camunda-cloud/zeebe-process-test) allows you to unit test your Camunda 8 BPMN -processes. It will start a lightweight in-memory Zeebe engine and provide you with a set of assertions you can use to -verify your process behaves as expected. - -## Prerequisites - -This library requires the following: - -- Java 17+ when running with an embedded engine (`zeebe-process-test-extension`) -- Java 8+ and Docker when running using testcontainers (`zeebe-process-test-extension-testcontainer`) -- JUnit 5 - -## Dependency - -Zeebe Process Test provides you with two dependencies. Which one you need to use is dependent on the -Java version you are using. - -#### Testcontainers (JDK 8+) - -If you are building your project with a JDK lower than 17, use the `testcontainer` dependency. This starts a `testcontainer` where a Zeebe engine is running. It is beneficial to use this version instead of the embedded version so your code can be implemented independently of the Java version used by the Zeebe engine. The downside is that `testcontainers` provide some overhead, which means tests will be slower. Additionally, Docker must be running to execute the tests. - -```xml - - io.camunda - zeebe-process-test-extension-testcontainer - X.Y.Z - test - -``` - -#### Embedded (JDK 17+) - -If you are building your project with JDK 17+, you can make use of an embedded Zeebe engine. The advantage of using this instead of the `testcontainer` version is that this is the faster solution. This also does not require Docker to be running. The downside to this solution is that the JDK requirement is bound to the Java version of the Zeebe engine. -Whenever this Java version changes, you'll either have to [switch to the testcontainer version](#switching-between-testcontainers-and-embedded), or update your own JDK to match the Zeebe engine. - -```xml - - io.camunda - zeebe-process-test-extension - X.Y.Z - test - -``` - -## Annotation - -Annotate your test class with the `@ZeebeProcessTest` annotation. This annotation will do a couple of things: - -1. It will manage the lifecycle of the testcontainer/embedded Zeebe engine. -2. It will create a client which can be used to interact with the engine. -3. It will (optionally) inject three fields in your test class: - 1. `ZeebeTestEngine` - This is the engine that will run your process. It will provide some basic functionality - to help you write your tests, such as waiting for an idle state and increasing the time. - 2. `ZeebeClient` - This is the client that allows you to send commands to the engine, such as - starting a process instance. The interface of this client is identical to the interface you - use to connect to a real Zeebe engine. - 3. `RecordStream` - This gives you access to all the records processed by the engine. - Assertions use the records for verifying expectations. This grants you the freedom to create your own assertions. - -```java -// When using the embedded Zeebe engine (Java 17+) -import io.camunda.zeebe.process.test.extension.ZeebeProcessTest; - -// When using testcontainers (Java 8+) -import io.camunda.zeebe.process.test.extension.testcontainer.ZeebeProcessTest; - -@ZeebeProcessTest -class DeploymentAssertTest { - private ZeebeTestEngine engine; - private ZeebeClient client; - private RecordStream recordStream; -} -``` - -## Switching between testcontainers and embedded - -Switching between testcontainers and embedded requires just two steps: - -1. Switch to the relevant dependency. - - - Testcontainers: `zeebe-process-test-extension-testcontainer` - - Embedded: `zeebe-process-test-extension` - -2. Change the import of `@ZeebeProcessTest`. - - Testcontainers: `import io.camunda.zeebe.process.test.extension.testcontainer.ZeebeProcessTest;` - - Embedded: `import io.camunda.zeebe.process.test.extension.ZeebeProcessTest;` - -## Assertions - -Start an assertion using the following entry points: - -### Deployment assertions - -```java -DeploymentEvent event = client.newDeployResourceCommand() - .addResourceFromClasspath("my-process.bpmn") - .send() - .join(); -DeploymentAssert assertions = BpmnAssert.assertThat(event); -``` - -### Process instance assertions - -Started by manually sending an event: - -```java -ProcessInstanceEvent event = client.newCreateInstanceCommand() - .bpmnProcessId("") - .latestVersion() - .send() - .join(); -ProcessInstanceAssert assertions = BpmnAssert.assertThat(event); -``` - -```java -ProcessInstanceResult event = client.newCreateInstanceCommand() - .bpmnProcessId("") - .latestVersion() - .withResult() - .send() - .join(); - ProcessInstanceAssert assertions = BpmnAssert.assertThat(event); -``` - -Started by a timer: - -```java -Optional firstProcessInstance = InspectionUtility.findProcessEvents() - .triggeredByTimer(ProcessPackTimerStartEvent.TIMER_ID) - .findFirstProcessInstance(); -ProcessInstanceAssert assertions = BpmnAssert.assertThat(firstProcessInstance.get()); -``` - -Started by a call activity: - -```java -Optional firstProcessInstance = InspectionUtility.findProcessInstances() - .withParentProcessInstanceKey() - .withBpmnProcessId("") - .findFirstProcessInstance(); -ProcessInstanceAssert assertions = BpmnAssert.assertThat(firstProcessInstance.get()); -``` - -### Job assertions - -```java -ActivateJobsResponse response = client.newActivateJobsCommand() - .jobType("") - .maxJobsToActivate(1) - .send() - .join(); -ActivatedJob activatedJob = response.getJobs().get(0); -JobAssert assertions = BpmnAssert.assertThat(activatedJob); -``` - -### Message assertions - -```java -PublishMessageResponse response = client - .newPublishMessageCommand() - .messageName("") - .correlationKey("") - .send() - .join(); -MessageAssert assertions = BpmnAssert.assertThat(response); -``` - -### Incident assertions - -Via a process instance - -```java -ProcessInstanceEvent event = client.newCreateInstanceCommand() - .bpmnProcessId("") - .latestVersion() - .send() - .join(); -IncidentAssert assertions = BpmnAssert.assertThat(event) - .extractingLatestIncident(); -``` - -Via a job: - -```java -ActivateJobsResponse response = client.newActivateJobsCommand() - .jobType("") - .maxJobsToActivate(1) - .send() - .join(); -ActivatedJob activatedJob = response.getJobs().get(0); -IncidentAssert assertions = BpmnAssert.assertThat(activatedJob) - .extractingLatestIncident(); -``` - -## Waiting for idle state - -:::caution -Waiting for idle state is a new feature. When the engine is detected to be idle, it -will wait 30ms before checking again. If it is still idle at that stage, it is considered to be in -an idle state. - -**It is unknown if the 30ms delay is sufficient. Using it could result in flaky tests!** - -Any feedback about the wait for idle state is highly appreciated. Let us know if the delay should be higher or configurable. -Leave your feedback on our [GitHub page](https://github.com/camunda-cloud/zeebe-process-test/issues). -::: - -`engine.waitForIdleState(timeout)` will cause your test to stop executing until the engine has -reached an idle state. If the engine does not reach an idle state within the specified timeout, a -`TimeoutException` will be thrown. - -We have defined an idle state as a state in which the engine makes no progress and is waiting for -new commands or events to trigger. Once the engine has detected it has become idle, it will wait for -a delay (30ms) and check if it is still idle. If this is the case, it is considered to be in idle -state and continue your test. - -## Wait for busy state - -`engine.waitForBusyState(timeout)` will cause your test to stop executing until the engine has -reached a busy state. If the engine does not reach a busy state within the specified timeout, a -`TimeoutException` is thrown. - -We consider the engine to have reached a busy state when any new record/command is processed since -we've started waiting. - -Waiting for a busy state is useful in scenarios where you're expecting the engine to start doing -something, without explicitly triggering it yourself. An example of this would be a process with a -timer event. We can increase the time of the engine, but we cannot trigger the timer explicitly. -Because of this, we should wait for a busy state after increasing the engine time. - -## Examples - -For example tests, refer to [GitHub](https://github.com/camunda-cloud/zeebe-process-test). diff --git a/versioned_docs/version-8.2/apis-tools/operate-api/img/color-sequence-flows.png b/versioned_docs/version-8.2/apis-tools/operate-api/img/color-sequence-flows.png deleted file mode 100644 index 63ad852ce46..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/operate-api/img/color-sequence-flows.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/operate-api/img/display-incidents.png b/versioned_docs/version-8.2/apis-tools/operate-api/img/display-incidents.png deleted file mode 100644 index 33c7d45cddd..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/operate-api/img/display-incidents.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/operate-api/img/render-diagram.png b/versioned_docs/version-8.2/apis-tools/operate-api/img/render-diagram.png deleted file mode 100644 index 4c20a73dcb9..00000000000 Binary files a/versioned_docs/version-8.2/apis-tools/operate-api/img/render-diagram.png and /dev/null differ diff --git a/versioned_docs/version-8.2/apis-tools/operate-api/overview.md b/versioned_docs/version-8.2/apis-tools/operate-api/overview.md deleted file mode 100644 index 56f68ff6c77..00000000000 --- a/versioned_docs/version-8.2/apis-tools/operate-api/overview.md +++ /dev/null @@ -1,521 +0,0 @@ ---- -id: operate-api-overview -title: Overview -slug: /apis-tools/operate-api/overview -description: "Operate API is a REST API and provides searching, getting, and changing Operate data. Requests and responses are in JSON." ---- - -## Introduction - -Operate API is a REST API and provides searching, getting, and changing Operate data. -Requests and responses are in JSON notation. Some objects have additional endpoints. -For example, `process-definitions` has an endpoint to get the process-definition as XML representation. -In case of errors, Operate API returns an error object. - -## Context paths - -For SaaS: `https://${REGION}.operate.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. - -:::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). - -For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Operate component. -::: - -## API documentation as Swagger - -A detailed API description is also available as Swagger UI at `${base-url}/swagger-ui.html`. - -For SaaS: `https://${REGION}.operate.camunda.io/${CLUSTER_ID}/swagger-ui.html`, and for Self-Managed installations: `http://localhost:8080/swagger-ui.html`. - -:::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). -::: - -## Authentication - -You need authentication to access the API endpoints. - -### Authentication for SaaS - -#### Authentication via JWT access token - -You must pass an access token as a header in each request to the SaaS Operate API. When you create an Operate [client](/guides/setup-client-connection-credentials.md), you get all the information needed to connect to Operate. - -The following settings are needed to request a token: - -| Name | Description | Default value | -| ------------------------ | ----------------------------------------------- | -------------------- | -| client id | Name of your registered client | - | -| client secret | Password for your registered client | - | -| audience | Permission name; if not given use default value | `operate.camunda.io` | -| authorization server url | Token issuer server | - | - -:::note -For more information on how to get these values for Camunda 8, read [Manage API Clients](/docs/components/console/manage-clusters/manage-api-clients/). -::: - -Send a token issue _POST_ request to the authorization server with the required settings: - -```shell -curl -X POST -H 'content-type: application/json' -d '{"client_id": "RgVdPv...", "client_secret":"eDS1~Hg...","audience":"operate.camunda.io","grant_type":"client_credentials"}' https://login.cloud.camunda.io/oauth/token -``` - -You will get something like the following: - -```json -{ - "access_token": "eyJhbG...", - "scope": "f408ca38-....", - "expires_in": 58847, - "token_type": "Bearer" -} -``` - -Capture the `access_token` value from the response object. In each request to the Operate API, include it as an authorization header: - -``` -Authorization: Bearer eyJHb... -``` - -### Authentication for Self-Managed cluster - -#### Authentication via Identity JWT access token - -This authentication method is described in [Operate Configuration - Authentication](/docs/self-managed/operate-deployment/operate-authentication/#identity). - -#### Authentication via cookie - -Another way to access the Operate API in a Self-Managed cluster is to send cookie headers in each request. The cookie can be obtained by using the API endpoint `/api/login`. Take the steps in the following example: - -**Example:** - -1. Log in as user 'demo' and store the cookie in the file `cookie.txt`. - -```shell -curl -c cookie.txt -X POST 'http://localhost:8080/api/login?username=demo&password=demo' -``` - -2. Send the cookie (as a header) in each API request. In this case, request all process definitions. - -```shell -curl -b cookie.txt -X POST 'http://localhost:8080/v1/process-definitions/search' -H 'Content-Type: application/json' -d '{}' -``` - -## Endpoints - -| Endpoint (HTTP verb + URL path) | Description | Notes | -| :----------------------------------------------- | ------------------------------------------------------------------: | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Process definitions** | | | -| `POST /v1/process-definitions/search` | Search for process definitions | | -| `GET /v1/process-definitions/{key}` | Get process definition by key | | -| `GET /v1/process-definitions/{key}/xml` | Get process definition by key as XML | | -| **Process instances** | | | -| `POST /v1/process-instances/search` | Search for process instances | New field added: `processDefinitionKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    2. The `processDefinitionKey` field will only contain data from version 8.1.8 onward | -| `GET /v1/process-instances/{key}` | Get process instance by key | New field added: `processDefinitionKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    2. The `processDefinitionKey` field will only contain data from version 8.1.8 onward | -| `DELETE /v1/process-instances/{key}` | Delete process instance _and dependent_ data by key | | -| `GET /v1/process-instances/{key}/statistics` | Get flow node statistic by process instance key | New endpoint | -| `GET /v1/process-instances/{key}/sequence-flows` | Get sequence flows of process instance by key | New endpoint | -| **Incidents** | | | -| `POST /v1/incidents/search` | Search for incidents | | -| `GET /v1/incidents/{key}` | Get incident by key | | -| **Flownode instances** | | | -| `POST /v1/flownode-instances/search` | Search for flow node instances | New fields added:
    `flowNodeId`
    `flowNodeName`
    `processDefinitionKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    2. The `processDefinitionKey` field will only contain data from version 8.1.8 onward
    3. The field `flowNodeName` is only returned if set in the BPMN diagram, so no flowNodeName is returned for flow nodes that do not have it set in the diagram. | -| `GET /v1/flownode-instances/{key}` | Get flow node instance by key | New fields added:
    `flowNodeId`
    `flowNodeName`
    `processDefinitionKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    2. The `processDefinitionKey` field will only contain data from version 8.1.8 onward
    3. The field `flowNodeName` is only returned if set in the BPMN diagram, so no flowNodeName is returned for flow nodes that do not have it set in the diagram. | -| **Variables** | | | -| `POST /v1/variables/search` | Search for variables; results can contain truncated variable values | | -| `GET /v1/variables/{key}` | Get variable by key; contains the full value of variable | | - -## Search - -Every object has a search `/v1//search` endpoint which can be requested by `POST` and a given query request. - -### Query - -The query request consists of components for **filter**, **size**, **sort**, and **pagination**. - -``` -{ - "filter": { object fields to match }, - "size": , - "sort": [ {"field":"", "order": "" ], - "searchAfter": [ ] -} -``` - -#### Filter - -Specifies which fields should match. Only items that match the given fields will be returned. -The section on [object schemas](#object-schemas) lists all available fields for each object. - -##### Filter strings, numbers, and booleans - -Fields of type string, number, and boolean need the exact value to match. - -###### Examples - -Return all items with field `processInstanceKey` equals `235`: - -```json -{ "filter": { "processInstanceKey": 235 } } -``` - -A filter that could be used to search for all flow node instances with field `processInstanceKey` equals `235`, `state` equals `ACTIVE` and `incident` equals `true`: - -```json -{ - "filter": { "processInstanceKey": 235, "state": "ACTIVE", "incident": true } -} -``` - -##### Filter dates - -Date fields need to be specified in format: `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`; for example, `2022-03-17T11:50:25.729+0000`. - -You can use modifier to match date ranges: - -| Modifier | Description | -| -------- | --------------- | -| \|\|/y | Within a year | -| \|\|/M | Within a month | -| \|\|/w | Within a week | -| \|\|/d | Within a day | -| \|\|/h | Within an hour | -| \|\|/m | Within a minute | -| \|\|/s | Within a second | - -###### Example - -Return all items with field `startDate` within a minute (`||/m`) for `2022-03-17 11:50:25`. - -```json -{ - "filter": { - "startDate": "2022-03-17T11:50:25.729+0000||/m" - } -} -``` - -#### Size - -Maximum items should be returned and must be a number. - -##### Example - -Return maximum `23` items: - -```json -{ "size": 23 } -``` - -#### Sort - -Specify which field of the object should be sorted and whether ascending (`ASC`) or descending (`DESC`). - -##### Example - -Sort by `name` **desc**ending: - -```json -{ "sort": [{ "field": "name", "order": "DESC" }] } -``` - -#### Pagination - -Specify the item where the next search should start. For this, you need the values from previous results. -Copy the values from `sortValues` field from the previous results into the `searchAfter` value of query. -See also [results](#results). - -##### Example - -Get next 10 results for previous query by copying the value of `sortValues` of the previous results object. -Assuming the `sortValues` value was `["the-name",12345]`, put it as value for `searchAfter` in the next query. - -```json -{ - "sort": [{ "field": "name", "order": "DESC" }], - "searchAfter": ["the-name", 12345] -} -``` - -#### Query components combined - -The query components `filter`, `size`, `sort`, and `searchAfter` can be combined. - -Default values are: - -| Component | Default value | Description | -| ----------- | --------------------------------- | ---------------------------- | -| filter | null | Empty (all fields match) | -| size | 10 | | -| sort | `[{"field":"key","order":"ASC"}]` | Sorted ascending by key | -| searchAfter | null | First items will be returned | - -##### Example - -Get max `50` process instances with `processVersion` equals `2` sorted `asc`ending by `bpmnProcessId`: - -`POST /v1/process-instances/search` - -```json -{ - "filter": { - "processVersion": 2 - }, - "size": 50, - "sort": [ - { - "field": "bpmnProcessId", - "order": "ASC" - } - ] -} -``` - -Results are: - -```json - ... - { - "key": 2251799813699162, - "processVersion": 2, - "bpmnProcessId": "called-process", - "startDate": "2022-03-17T11:53:41.581+0000", - "state": "ACTIVE", - "processDefinitionKey": 2251799813695996 - } - ], - "sortValues": [ - "called-process", - 2251799813699162 - ], - "total": 654 -} -``` - -Take the value of `sortValues` and copy it to `searchAfter` for the next `50` items: - -```json -{ - "filter": { - "processVersion": 2 - }, - "size": 50, - "sort": [ - { - "field": "bpmnProcessId", - "order": "ASC" - } - ], - "searchAfter": ["called-process", 2251799813699162] -} -``` - -### Results - -The API responds with a `Results` object. It contains an `items` array, `total` amount of found items, -and `sortValues` for pagination. - -``` -{ - "items": [ { item 1 } , { item 2 } ... ], - "total": , - "sortValues": [] -} -``` - -#### Items - -An array of objects that matches the query. - -#### Total - -The total amount of found objects. This is an exact value until 10,000. If more than this, try to make your query more specific. - -See also [Elasticsearch max results](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/index-modules.html#index-max-result-window). - -#### sortValues (Pagination) - -Use the value (an array) of this field to get the next page of results in your next query. -Copy the value to `searchAfter` in your next query to get the next page. - -See also [Elasticsearch search after](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/paginate-search-results.html#search-after). - -##### Example - -Results for `process-instances`: - -```json -{ - "items": [ - { - "key": 2251799813699213, - "processVersion": 2, - "bpmnProcessId": "called-process", - "startDate": "2022-03-17T11:53:41.758+0000", - "state": "ACTIVE", - "processDefinitionKey": 2251799813695996 - }, - { - "key": 2251799813699262, - "processVersion": 2, - "bpmnProcessId": "called-process", - "startDate": "2022-03-17T11:53:41.853+0000", - "state": "ACTIVE", - "processDefinitionKey": 2251799813695996 - } - ], - "sortValues": ["called-process", 2251799813699262], - "total": 654 -} -``` - -## Get object by key - -Every object has a `GET /v1//{key}` endpoint where `{key}` is the identifier of the object. -Every object has a `key` field. - -### Example - -Get the data for process instance with key `2251799813699213`: - -`GET /v1/process-instances/2251799813699213` - -#### Result: - -```json -{ - "key": 2251799813699213, - "processVersion": 2, - "bpmnProcessId": "called-process", - "startDate": "2022-03-17T11:53:41.758+0000", - "state": "ACTIVE", - "processDefinitionKey": 2251799813695996 -} -``` - -## Change objects - -Some objects can be changed (for example, deleted). -The endpoint is the same as getting the object, but with HTTP `DELETE` instead of HTTP `GET`. -The response is a `ChangeStatus` object which describes what happened and how many objects were changed. - -### Example - -Delete the data for process instance (and all dependant data) with key `2251799813699213`: - -`DELETE /v1/process-instances/2251799813699213` - -#### Result - -```json -{ - "message": "1 process instance and dependant data was deleted", - "deleted": 1 -} -``` - -## Object schemas - -Each object has a set of fields with values. -These values could be of type `string`, `number`, `boolean`, and `dateString`. - -| Type | Example | -| ---------- | ------------------------------ | ----- | -| string | "Operate" | -| number | 235 | -| boolean | true | false | -| dateString | "2022-03-23T11:50:25.729+0000" | - -### Process definition - -``` -{ - "key": - "name": - "version": - "bpmnProcessId": -} -``` - -### Process instance - -``` -{ - "key": - "processVersion": - "bpmnProcessId": - "parentKey": - "startDate": - "endDate": - "state": - "processDefinitionKey": -} -``` - -### Incident - -``` -{ - "key": - "processDefinitionKey": - "processInstanceKey": - "type": - "message": - "creationTime": - "state": -} -``` - -### Flow node instance - -``` -{ - "key": - "processInstanceKey": - "processDefinitionKey": - "startDate": - "endDate": - "flowNodeId": - "flowNodeName": - "incidentKey": - "type": - "state": - "incident": -} -``` - -The field flowNodeName is only returned if set in the BPMN diagram, so no flowNodeName is returned for flow nodes that do not have it set in the diagram. - -### Variable - -``` -{ - "key": - "processInstanceKey": - "scopeKey": - "name": - "value": - Always truncated if value is too big in "search" results. In "get object" result it is not truncated. - "truncated": - If true 'value' is truncated. -} -``` - -### Change status - -``` -{ - "message": - What was changed - "deleted": - How many items were deleted -} -``` - -### Error - -``` -{ - "status": - HTTP Status - "message": - Details about the error. - "instance": - UUID for look up eg. in log messages - "type": - Type of error. Could be ServerException, ClientException, ValidationException, ResourceNotFoundException -} -``` diff --git a/versioned_docs/version-8.2/apis-tools/operate-api/sidebar-schema.js b/versioned_docs/version-8.2/apis-tools/operate-api/sidebar-schema.js deleted file mode 100644 index 9ba4540f906..00000000000 --- a/versioned_docs/version-8.2/apis-tools/operate-api/sidebar-schema.js +++ /dev/null @@ -1,10 +0,0 @@ -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ - -module.exports = { - "Operate API (REST)": [ - { - type: "autogenerated", - dirName: "apis-tools/operate-api", - }, - ], -}; diff --git a/versioned_docs/version-8.2/apis-tools/operate-api/tutorial.md b/versioned_docs/version-8.2/apis-tools/operate-api/tutorial.md deleted file mode 100644 index a3af9bc2889..00000000000 --- a/versioned_docs/version-8.2/apis-tools/operate-api/tutorial.md +++ /dev/null @@ -1,283 +0,0 @@ ---- -id: operate-api-tutorial -title: Tutorial -slug: /apis-tools/operate-api/tutorial -description: "Step through examples to implement an application using the Operate API and render a BPMN diagram." ---- - -In this tutorial, we'll step through examples to highlight the capabilities of the Operate API, such as rendering a BPMN diagram. - -## Getting started - -- You need authentication to access the API endpoints. Find more information [here](/docs/apis-tools/operate-api/overview.md#authentication). -- We will use the `bpmn-js` library to render the diagram and add overlays. Visit the documentation on [embedding the pre-packaged viewer](https://bpmn.io/toolkit/bpmn-js/walkthrough/#viewer-pre-packaged) for more details. - -## Set up the demo project - -1. Create a new folder using the following command: - -```sh -mkdir operate-api-demo -cd operate-api-demo -``` - -2. Initialize the project using the following command: - -```sh -npm init --y -``` - -3. Add a proxy server to bypass CORS errors. Create a `server.js` file inside the project folder with the following contents: - -```js -const http = require("http"); -const request = require("request"); - -const server = http.createServer((req, res) => { - request.get( - { - // Replace http://localhost:8080 with your Operate API url if its running on different port. - url: `http://localhost:8080${req.url}`, - headers: { - // Replace COOKIE_VALUE with your OPERATE-SESSION cookie value. - Cookie: "OPERATE-SESSION=COOKIE_VALUE", - }, - }, - (error, response, body) => { - if (error) { - console.error(error); - res.statusCode = 500; - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Content-Type", "application/json"); - res.end("Error: Could not get data from API"); - } else { - res.setHeader("Content-Type", response.headers["content-type"]); - res.setHeader("Access-Control-Allow-Origin", "*"); - - res.end(body); - } - } - ); -}); - -server.listen(3030, () => { - console.log("Proxy server listening on port 3030"); -}); -``` - -4. Install the necessary packages using the following command: - -```sh -npm install request -``` - -5. Run the server using the following command: - -```sh -node server.js -``` - -## Render a BPMN diagram - -1. Create an `index.html` file and include the `bpmn-js` script: - -```html - - - - - - - - -``` - -2. Create a `styles.css` file to maintain the styling: - -```css -html, -body, -#canvas { - height: 100%; - padding: 0; - margin: 0; -} - -.flow-node-incident { - width: 20px; - height: 20px; - display: flex; - justify-content: center; - align-items: center; - background-color: #ff3d3d; - color: White; - border-radius: 10px; - font-family: Arial; - font-size: 12px; - font-weight: bold; - transform: translateX(-50%); -} -``` - -3. Create an `api.js` script file and write a function that fetches the process XML by definition: - -```js -async function fetchDiagram() { - return fetch( - // Replace {PROCESS_DEFINITION_ID} with a process definition id. - // http://localhost:3030 is the URL of the Proxy server, which should stay the same. - "http://localhost:3030/v1/process-definitions/{PROCESS_DEFINITION_ID}/xml", - { - method: "GET", - } - ).then((response) => response.text()); -} -``` - -4. Fetch and render the diagram: - -```html - - - - - - - - - - -
    - - - - - -``` - -5. Open `index.html` in your browser to see the rendered diagram. - - ![render diagram](./img/render-diagram.png) - -## Show statistics on the diagram - -1. Add a new function to the `api.js` file that fetches the flow node statistics for a specified process instance id: - -```js -async function fetchStatistics() { - return fetch( - // Replace {PROCESS_INSTANCE_ID} with a process instance id. - // http://localhost:3030 is the URL of the proxy server, which should stay the same. - "http://localhost:3030/v1/process-instances/{PROCESS_INSTANCE_ID}/statistics", - { - method: "GET", - } - ).then((response) => response.json()); -} -``` - -2. Add an overlay that displays the number of incidents on flow nodes: - -```js -// ... -canvas.zoom("fit-viewport"); // insert following code below this line - -const overlays = viewer.get("overlays"); - -fetchStatistics() - .then((statistics) => - statistics.forEach(({ activityId, incidents }) => { - if (incidents > 0) { - overlays.add(activityId, "flowNodeState", { - position: { - bottom: 9, - right: 0, - }, - html: `
    ${incidents}
    `, - }); - } - }) - ) - .catch((err) => { - console.error("An error occured when fetching statistics: ", err); - }); - -// ... -``` - -3. Open `index.html` in your browser to see the incident overlay displayed on the related flow node(s), if there are any. - -![display incidents](./img/display-incidents.png) - -## Highlight processed sequence flows on the diagram - -1. Add a new function to the `api.js` file that fetches the processed sequence flows for a specified process instance id: - -```js -async function fetchSequenceFlows() { - return fetch( - // Replace {PROCESS_INSTANCE_ID} with a process instance id. - // http://localhost:3030 is the URL of the Proxy server, which should stay the same. - "http://localhost:3030/v1/process-instances/{PROCESS_INSTANCE_ID}/sequence-flows", - { - method: "GET", - } - ).then((response) => response.json()); -} -``` - -2. Color the processed sequence flows: - -```js -// ... -const overlays = viewer.get("overlays"); // insert following code below this line - -fetchSequenceFlows() - .then((sequenceFlows) => { - sequenceFlows.forEach((sequenceFlow) => { - const elementRegistry = viewer.get("elementRegistry"); - const graphicsFactory = viewer.get("graphicsFactory"); - const element = elementRegistry?.get(sequenceFlow); - if (element?.di !== undefined) { - element.di.set("stroke", "#4d90ff"); - - const gfx = elementRegistry?.getGraphics(element); - if (gfx !== undefined) { - graphicsFactory?.update("connection", element, gfx); - } - } - }); - }) - .catch((err) => { - console.error("An error occured when fetching sequence flows: ", err); - }); - -// ... -``` - -3. Open `index.html` in your browser to see the processed sequence flows highlighted. - -![color sequence flows](./img/color-sequence-flows.png) - -## Full demo - -For additional details, visit the [GitHub full working demo](https://github.com/camunda/operate-api-bpmn-demo). diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/_category_.yml deleted file mode 100644 index 4aa78d19398..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Controllers" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-form-controller.md b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-form-controller.md deleted file mode 100644 index 54ed321b379..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-form-controller.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: tasklist-api-rest-form-controller -title: Form API -description: "Learn about the Form API controller, including the request parameters and an HTTP request example." ---- - -The Form API controller provides an API to query forms. - -## Endpoints - -### Get form - -Get the form details by `formId` and `processDefinitionKey` required parameters. - -#### URL - -`/v1/forms/{formId}?processDefinitionKey={processDefinitionKey}` - -#### Method - -`GET` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------------- | ----- | -------- | ----------------------------------- | -| formId | path | `true` | ID of the form | -| processDefinitionKey | query | `true` | Reference to the process definition | - -:::caution -The `formId` is a value generated by the internal Tasklist API and is distinct from the ID specified in the form editor. While the key returned to the API user when getting a task follows the format `camunda-forms:bpmn:`, only the `` segment is required to retrieve the form schema. -::: - -#### HTTP request example - -```bash -curl -X 'GET' \ - 'http://{host}/v1/forms/{formId}?processDefinitionKey={processDefinitionKey}' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -#### Responses - -| HTTP status | Description | Response schema | -| ----------- | --------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| 200 | On success | JSON object with [`FormResponse`](../schemas/responses/form-response.mdx) structure | -| 404 | An error is returned when the form with the `formId` and `processDefinitionKey` is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md deleted file mode 100644 index 3636c593a1e..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md +++ /dev/null @@ -1,335 +0,0 @@ ---- -id: tasklist-api-rest-task-controller -title: Task API -description: "Learn about the Task API controller, including an HTTP request example, responses, request parameters, and an HTTP request example." ---- - -The Task API controller provides an API to query and manage tasks. - -## Endpoints - -### Search tasks - -Returns the list of tasks that satisfy search request parameters. - -#### URL - -`/v1/tasks/search` - -#### Method - -`POST` - -#### Request body - -[`TaskSearchRequest`](../schemas/requests/task-search-request.mdx) - `[Optional]` - -#### HTTP request example - -All request body parameters are optional. - -:::note -Only one of [searchAfter, searchAfterOrEqual, searchBefore, searchBeforeOrEqual] search options must be present at once in the request. -::: - -If an empty body is provided, all tasks are returned: - -```bash -curl -X 'POST' \ - 'http://{host}/v1/tasks/search' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '' -``` - -Only assigned and with `CREATED` state tasks will be returned: - -```bash -curl -X 'POST' \ - 'http://{host}/v1/tasks/search' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "state": "CREATED", - "assigned": true -}' -``` - -#### Responses - -| HTTP status | Description | Response schema | -| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| 200 | On success | JSON array of objects with [`TaskSearchResponse`](../schemas/responses/task-search-response.mdx) structure | -| 400 | An error is returned when more than one search parameters among [searchAfter, searchAfterOrEqual, searchBefore, searchBeforeOrEqual] are present in the request. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | - -### Get task - -This endpoint retrieves the details of a specific task identified by `{taskId}`. - -#### URL - -`/v1/tasks/{taskId}` - -#### Method - -`GET` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------- | ---- | :------: | -------------- | -| taskId | path | `true` | ID of the task | - -#### HTTP request example - -```shell -curl -X 'GET' \ - 'http://{host}/v1/tasks/{taskId}' \ - -H 'accept: application/json' - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -#### Responses - -| HTTP status | Description | Response schema | -| ----------- | ------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | -| 200 | On success | JSON object with [`TaskResponse`](../schemas/responses/task-response.mdx) structure | -| 404 | An error is returned when the task with the `taskId` is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | - -### Assign task - -Endpoint to assign a task with `taskId` to `assignee` or the active user. Returns the task. - -#### URL - -`/v1/tasks/{taskId}/assign` - -#### Method - -`PATCH` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------- | ---- | :------: | -------------- | -| taskId | path | `true` | ID of the task | - -#### Request body - -[`TaskAssignRequest`](../schemas/requests/task-assign-request.mdx) - `[Optional]` - -:::note -When using the REST API with a JWT authentication token, the following request body parameters may be used. -::: - -#### HTTP request example - -```shell -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/assign' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -If JWT authentication is used: - -```shell -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/assign' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "assignee": "someAssignee", - "allowOverrideAssignment": true -}' -``` - -#### Responses - -| HTTP status | Description | Response schema | -| ----------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| 200 | On success | JSON object with [`TaskResponse`](../schemas/responses/task-response.mdx) structure | -| 400 | An error is returned when the task is not active (not in the `CREATED` state). | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 400 | An error is returned when task was already assigned. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 403 | An error is returned when the user doesn't have the permission to assign another user to this task. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 404 | An error is returned when the task with the `taskId` is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | - -### Unassign task - -Unassign a task with the provided id. This returns the task. - -#### URL - -`/v1/tasks/{taskId}/unassign` - -#### Method - -`PATCH` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------- | ---- | :------: | -------------- | -| taskId | path | `true` | ID of the task | - -#### HTTP request example - -```shell -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/unassign' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -#### Responses - -| HTTP status | Description | Response schema | -| ----------- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | -| 200 | On success | JSON object with [`TaskResponse`](../schemas/responses/task-response.mdx) structure | -| 400 | An error is returned when the task is not active (not in the `CREATED` state). | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 400 | An error is returned if the task was not assigned before. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 404 | An error is returned when the task with the `taskId` is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | - -### Complete task - -Complete a task with `taskId` and optional variables. Returns the task. - -#### URL - -`/v1/tasks/{taskId}/complete` - -#### Method - -`PATCH` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------- | ---- | :------: | -------------- | -| taskId | path | `true` | ID of the task | - -#### Request body - -[`TaskCompleteRequest`](../schemas/requests/task-complete-request.mdx) - `[Optional]` - -#### HTTP request example - -With empty body: - -```shell -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/complete' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -With [`TaskCompleteRequest`](../schemas/requests/task-complete-request.mdx): - -```shell -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/complete' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "variables": [ - { - "name": "varA", - "value": "25" - } - ] -}' -``` - -#### Responses - -| HTTP status | Description | Response schema | -| ----------- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | -| 200 | On success | JSON object with [`TaskResponse`](../schemas/responses/task-response.mdx) structure | -| 400 | An error is returned when the task is not active (not in the `CREATED` state). | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 400 | An error is returned if the task was not assigned before. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 400 | An error is returned if the task is not assigned to the current user. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | -| 404 | An error is returned when the task with the `taskId` is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | - -### Search task variables - -Returns a list of task variables for the specified `taskId` and `variableNames`. - -#### URL - -`/v1/tasks/{taskId}/variables/search` - -#### Method - -`POST` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------- | ---- | :------: | -------------- | -| taskId | path | `true` | ID of the task | - -#### Request body - -[`VariablesSearchRequest`](../schemas/requests/variables-search-request.mdx) - `[Optional]` - -#### HTTP request example - -If the request body is not provided or if the `variableNames` parameter in the request is `null` or empty, all variables associated with the task will be returned. - -```shell -curl -X 'POST' \ - 'http://{host}/v1/tasks/{taskId}/variables/search' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '' -``` - -```shell -curl -X 'POST' \ - 'http://{host}/v1/tasks/{taskId}/variables/search' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "variableNames": null - }' -``` - -```shell -curl -X 'POST' \ - 'http://{host}/v1/tasks/{taskId}/variables/search' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "variableNames": [] - }' -``` - -Only the variables with name "varA" and "varB" will be returned if they are assigned to the task. - -```shell -curl -X 'POST' \ - 'http://{host}/v1/tasks/{taskId}/variables/search' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "variableNames": [ - "varA", "varB" - ] - }' -``` - -#### Responses - -:::caution -Starting with the `8.3` release, changes will be implemented to the response for a 200 status. -If `isValueTruncated` is set to `true` for any variable, the corresponding `value` field will now be set to `null`. -::: - -| HTTP status | Description | Response schema | -| ----------- | ------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------- | -| 200 | On success | JSON array of objects with [`VariableResponse`](../schemas/responses/variable-response.mdx) structure | -| 404 | An error is returned when the task with the `taskId` is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md deleted file mode 100644 index 88deceb380e..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -id: tasklist-api-rest-variables-controller -title: Variables API -description: "Learn about the Variables API controller, including request parameters, and an HTTP request example." ---- - -The Variables API controller provides an API to query variables. - -## Endpoints - -### Get variable - -Get the variable details by variable id. - -#### URL - -`/v1/variables/{variableId}` - -#### Method - -`GET` - -#### Request parameters - -| Parameter name | Type | Required | Description | -| -------------- | ---- | :------: | ------------------ | -| variableId | path | `true` | ID of the variable | - -#### HTTP request example - -```bash -curl -X 'GET' \ -'http://{host}/v1/variables/{variableId}' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -#### Responses - -:::caution -Starting with the `8.3` release, we're making changes to the response format for a `200` status. -The returned JSON will be simplified as follows: - -Current response: - -```json -{ - "id": "string", - "name": "string", - "value": "string", - "isValueTruncated": true, - "previewValue": "string" -} -``` - -New response (from next release): - -```json -{ - "id": "string", - "name": "string", - "value": "string" -} -``` - -::: - -| HTTP status | Description | Response schema | -| ----------- | ------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------- | -| 200 | On success | JSON object with [`VariableResponse`](../schemas/responses/variable-response.mdx) structure | -| 404 | An error is returned when the variable with the variableId is not found. | JSON object with [`Error`](../schemas/responses/error-response.mdx) structure | diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/_category_.yml deleted file mode 100644 index 06d77d937e0..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Schemas" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/_category_.yml deleted file mode 100644 index a91f30b39a4..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Enums" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/sort.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/sort.mdx deleted file mode 100644 index 34959eaf407..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/sort.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: sort -title: Sort -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -State of the task. - -```java -enum Sort { - ASC, - DESC -} -``` - -### Values - -#### [Sort.ASC](#) - -> Ascending - -#### [Sort.DESC](#) - -> Descending - -### Member of - -[`TaskOrderBy`](../models/task-order-by.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/task-sort-fields.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/task-sort-fields.mdx deleted file mode 100644 index ea458f89813..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/task-sort-fields.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: task-sort-fields -title: TaskSortFields -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -State of the task. - -```java -enum TaskSortFields { - creationTime, - completionTime, - followUpDate, - dueDate -} -``` - -### Values - -#### [TaskSortFields.creationTime](#) - -> Use the field creationTime for sorting - -#### [TaskSortFields.completionTime](#) - -> Use the field completionTime for sorting - -#### [TaskSortFields.followUpDate](#) - -> Use the field followUpDate for sorting - -#### [TaskSortFields.dueDate](#) - -> Use the field dueDate for sorting - -### Member of - -[`TaskOrderBy`](../models/task-order-by.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/task-state.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/task-state.mdx deleted file mode 100644 index d7f02b488aa..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/enums/task-state.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: task-state -title: TaskState -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Represents the state of the task. - -```java -enum TaskState { - CREATED, - COMPLETED, - CANCELED -} -``` - -### Values - -#### [TaskState.CREATED](#) - -> - -#### [TaskState.COMPLETED](#) - -> - -#### [TaskState.CANCELED](#) - -> - -### Member of - -[`TaskResponse`](../responses/task-response.mdx) -[`TaskSearchRequest`](../requests/task-search-request.mdx) -[`TaskSearchResponse`](../responses/task-search-response.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/_category_.yml deleted file mode 100644 index ec3cab819ed..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Models" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/date-filter-input.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/date-filter-input.mdx deleted file mode 100644 index cacceb478b3..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/date-filter-input.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: date-filter-input -title: DateFilter -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Filter using a date range - - - - -```graphql -{ - "from": string(date-time), - "to": string(date-time) -} -``` - - - - - -```json -{ - "from": "2023-03-29T18:38:10.491Z", - "to": "2023-03-29T18:38:10.491Z" -} -``` - - - - -### Fields - -#### [DateFilter.from](#)`string(date-time)` - -> Start date range to search from in date-time format outlined in section 5.6 of the RFC 3339 profile of the ISO 8601 standard - -#### [DateFilter.to](#)`string(date-time)` - -> End date range to search to in date-time format outlined in section 5.6 of the RFC 3339 profile of the ISO 8601 standard - -### Member of - -[`TaskSearchRequest`](../requests/task-search-request.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/task-order-by.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/task-order-by.mdx deleted file mode 100644 index ed0a79a99d5..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/models/task-order-by.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: task-order-by -title: TaskOrderBy -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Filter using a date range - - - - -```graphql -{ - "field": TaskSortFields, - "order": Sort -} -``` - - - - - -```json -{ - "field": "completionTime", - "order": "ASC" -} -``` - - - - -### Fields - -#### [TaskOrderBy.field](#)[`TaskSortFields`](../enums/task-sort-fields.mdx) - -> Allowed fields to sort by - -#### [TaskOrderBy.order](#)[`Sort`](../enums/sort.mdx) - -> Define if sorting is Ascending or Descending - -### Member of - -[`TaskSearchRequest`](../requests/task-search-request.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/_category_.yml deleted file mode 100644 index 6cde1acda93..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Requests" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-assign-request.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-assign-request.mdx deleted file mode 100644 index 46f9f05c549..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-assign-request.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: task-assign-request -title: TaskAssignRequest -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -TaskAssignRequest - request params used to assign the task to `assignee` or current user. - - - - -```graphql -{ - "assignee": string, - "allowOverrideAssignment": boolean -} -``` - - - - - -```json -{ - "assignee": "string", - "allowOverrideAssignment": true -} -``` - - - - -### Fields - -#### [TaskAssignRequest.assignee](#)`string` - -> When using a `JWT` token, the assignee parameter is NOT optional when called directly from the API. The system will not be able to detect the assignee from the JWT token, therefore the assignee parameter needs to be explicitly passed in this instance. - -#### [TaskAssignRequest.allowOverrideAssignment](#)`boolean` - -> When `true` the task that is already assigned may be reassigned again. Otherwise, the task must be first unassigned and only then assigned again. (Default: `true`). - -### Consumed by - -[`Assign task`](../../controllers/tasklist-api-rest-task-controller.md#assign-task) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-complete-request.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-complete-request.mdx deleted file mode 100644 index a67a671804a..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-complete-request.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -id: task-complete-request -title: TaskCompleteRequest -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -TaskCompleteRequest - request object with variables to update or add to task during the task completion. - - - - -```graphql -{ - "variables": [VariableInput] -} -``` - - - - - -```json -{ - "variables": [ - { - "name": "string", - "value": "string" - } - ] -} -``` - - - - -### Fields - -#### [TaskCompleteRequest.variables](#)[`VariableInput`](./variable-input.mdx) - -> Variables to update or add to task during the task completion. - -### Consumed by - -[`Complete task`](../../controllers/tasklist-api-rest-task-controller.md#complete-task) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-search-request.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-search-request.mdx deleted file mode 100644 index beebbe01874..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/task-search-request.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -id: task-search-request -title: TaskSearchRequest -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -TaskSearchRequest - query object to search tasks by provided params. - - - - -```graphql -{ - "state": TaskState, - "assigned": boolean, - "assignee": string, - "taskDefinitionId": string, - "candidateGroup": string, - "candidateUser": string, - "processDefinitionKey": string, - "processInstanceKey": string, - "pageSize": int, - "followUpDate": DateFilter, - "dueDate": DateFilter, - "sort": [TaskOrderBy], - "searchAfter": [string], - "searchAfterOrEqual": [string], - "searchBefore": [string], - "searchBeforeOrEqual": [string] -} -``` - - - - - -```json -{ - "state": "CREATED", - "assigned": true, - "assignee": "string", - "taskDefinitionId": "string", - "candidateGroup": "string", - "candidateUser": "string", - "processDefinitionKey": "string", - "processInstanceKey": "string", - "pageSize": 0, - "followUpDate": { - "from": "2023-03-29T18:38:10.491Z", - "to": "2023-03-29T18:38:10.491Z" - }, - "dueDate": { - "from": "2023-03-29T18:38:10.491Z", - "to": "2023-03-29T18:38:10.491Z" - }, - "sort": [ - { - "field": "completionTime", - "order": "ASC" - } - ], - "searchAfter": ["string"], - "searchAfterOrEqual": ["string"], - "searchBefore": ["string"], - "searchBeforeOrEqual": ["string"] -} -``` - - - - -### Fields - -#### [TaskSearchRequest.state](#)[`TaskState`](../enums/task-state.mdx) - -> State of the tasks - -#### [TaskSearchRequest.assigned](#)`boolean` - -> Are the tasks assigned? - -#### [TaskSearchRequest.assignee](#)`string` - -> Who is assigned to the tasks? - -#### [TaskSearchRequest.taskDefinitionId](#)`string` - -> Task definition ID - what's the BPMN flow node? - -#### [TaskSearchRequest.candidateGroup](#)`string` - -> Given group is in candidate groups list. - -#### [TaskSearchRequest.candidateUser](#)`string` - -> Given group is in candidate user list. - -#### [TaskSearchRequest.processDefinitionKey](#)`string` - -> Reference to process definition -> (renamed equivalent of [`TaskQuery.processDefinitionId`](../../../tasklist-api/inputs/task-query.mdx#code-style-fontweight-normal-taskquerybprocessdefinitionidbcodestring-) field). - -#### [TaskSearchRequest.processInstanceKey](#)`string` - -> Reference to process instance -> (renamed equivalent of [`TaskQuery.processInstanceId`](../../../tasklist-api/inputs/task-query.mdx#code-style-fontweight-normal-taskquerybprocessinstanceidbcodestring-) field). - -#### [TaskSearchRequest.followUpDateDate](#)[`DateFilter`](../models/date-filter-input.mdx) - -> Specifying a range of follow-up dates for the tasks to search for. - -#### [TaskSearchRequest.dueDate](#)[`DateFilter`](../models/date-filter-input.mdx) - -> Specifying a range of due dates for the tasks to search for. - -#### [TaskSearchRequest.sort](#)[`TaskOrderBy`](../models/task-order-by.mdx) - -> An array of objects specifying the fields to sort the results by. - -#### [TaskSearchRequest.pageSize](#)`int` - -> Size of tasks page (default: 50). - -#### [TaskSearchRequest.searchAfter](#)`string` - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values plus same sort values. - -#### [TaskSearchRequest.searchAfterOrEqual](#)`string` - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values. - -#### [TaskSearchRequest.searchBefore](#)`string` - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values plus same sort values. - -#### [TaskSearchRequest.searchBeforeOrEqual](#)`string` - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values. - -### Consumed by - -[`Search tasks`](../../controllers/tasklist-api-rest-task-controller.md#search-tasks) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/variable-input.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/variable-input.mdx deleted file mode 100644 index 730184096a2..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/variable-input.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -id: variable-input -title: VariableInput -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Change or add a variable with name and value. - - - - -```graphql -{ - "name": string, - "value": string -} -``` - - - - - -```json -{ - "name": "string", - "value": "string" -} -``` - - - - -### Fields - -#### [VariableInput.name](#) `string` - -> Name of the variable. - -#### [VariableInput.value](#) `string` - -> Value of the variable. Complex values, e.g. a list of objects, must be serialized as JSON. - -### Member of - -[`TaskCompleteRequest`](./task-complete-request.mdx) - -### Consumed by - -[`Complete task`](../../controllers/tasklist-api-rest-task-controller.md#complete-task) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/variables-search-request.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/variables-search-request.mdx deleted file mode 100644 index e063f9db7bc..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/requests/variables-search-request.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -id: variables-search-request -title: VariablesSearchRequest -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -VariablesSearchRequest - query object to search tasks variables by provided variable names. - - - - -```graphql -{ - "variableNames": [string] -} -``` - - - - - -```json -{ - "variableNames": ["string"] -} -``` - - - - -### Fields - -#### [VariablesSearchRequest.variableNames](#)`string` - -> Name of variables to find - -### Consumed by - -[`Search task variables`](../../controllers/tasklist-api-rest-task-controller.md#search-task-variables) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/_category_.yml deleted file mode 100644 index b653e291f47..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Responses" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/error-response.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/error-response.mdx deleted file mode 100644 index 9e6c2c6bfed..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/error-response.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: error-response -title: ErrorResponse -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -TaskResponse - represents a user task. - - - - -```graphql -{ - "status": int, - "message": string, - "instance": string -} -``` - - - - - -```json -{ - "status": 400, - "message": "string", - "instance": "string" -} -``` - - - - -### Fields - -#### [ErrorResponse.status](#)`int` - -> An integer that represents the HTTP status code of the error response. For example, 400 indicates a "Bad Request" error, 404 indicates a "Not Found" error, and so on. - -#### [ErrorResponse.message](#)`string` - -> A string that provides a brief description of the error that occurred. - -#### [ErrorResponse.instance](#)`string` - -> UUID for look up (eg. in log messages). - -### Returned by - -[`Get form`](../../controllers/tasklist-api-rest-form-controller.md#get-form) -[`Get variable`](../../controllers/tasklist-api-rest-variables-controller.md#get-variable) -[`Search task variables`](../../controllers/tasklist-api-rest-task-controller.md#search-task-variables) -[`Get task`](../../controllers/tasklist-api-rest-task-controller.md#get-task) -[`Search tasks`](../../controllers/tasklist-api-rest-task-controller.md#search-tasks) -[`Assign task`](../../controllers/tasklist-api-rest-task-controller.md#assign-task) -[`Unassign task`](../../controllers/tasklist-api-rest-task-controller.md#unassign-task) -[`Complete task`](../../controllers/tasklist-api-rest-task-controller.md#complete-task) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/form-response.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/form-response.mdx deleted file mode 100644 index a95ebfea08d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/form-response.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -id: form-response -title: FormResponse -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -FormResponse - represents task embedded form. - - - - -```graphql -{ - "id": string, - "processDefinitionKey": string, - "schema": string -} -``` - - - - - -```json -{ - "id": "string", - "processDefinitionKey": "string", - "schema": "string" -} -``` - - - - -### Fields - -#### [FormResponse.id](#)`string` - -> The unique identifier of the embedded form within one process - -#### [FormResponse.processDefinitionKey](#)`string` - -> Reference to process definition -> (renamed equivalent of [`Form.processDefinitionId`](../../../tasklist-api/objects/form.mdx#code-style-fontweight-normal-formbprocessdefinitionidbcodestring--) field) - -#### [FormResponse.schema](#)`string` - -> Form content - -### Returned by - -[`Get form`](../../controllers/tasklist-api-rest-form-controller.md#get-form) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx deleted file mode 100644 index e56c8f895db..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx +++ /dev/null @@ -1,173 +0,0 @@ ---- -id: task-response -title: TaskResponse -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -TaskResponse - represents a user task. - - - - -```graphql -{ - "id": string, - "name": string, - "taskDefinitionId": string, - "processName": string, - "creationDate": string, - "completionDate": string, - "assignee": string, - "taskState": TaskState, - "formKey": string, - "processDefinitionKey": string, - "processInstanceKey": string, - "dueDate": string, - "followUpDate": string, - "candidateGroups": [string], - "candidateUsers": [string] -} -``` - - - - - -```json -{ - "id": "string", - "name": "string", - "taskDefinitionId": "string", - "processName": "string", - "creationDate": "string", - "completionDate": "string", - "assignee": "string", - "taskState": "CREATED", - "formKey": "string", - "processDefinitionKey": "string", - "processInstanceKey": "string", - "dueDate": "2023-03-29T20:08:07.171Z", - "followUpDate": "2023-03-29T20:08:07.171Z", - "candidateGroups": ["string"], - "candidateUsers": ["string"] -} -``` - - - - -### Fields - -#### [TaskResponse.id](#)`string` - -> The unique identifier of the task - -#### [TaskResponse.name](#)`string` - -> Name of the task - -#### [TaskResponse.taskDefinitionId](#)`string` - -> User Task ID from the BPMN definition - -#### [TaskResponse.processName](#)`string` - -> Name of the process - -#### [TaskResponse.creationDate](#)`string` - -> When was the task created -> (renamed equivalent of [`Task.creationTime`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcreationtimebcodestring--) field) - -#### [TaskResponse.completionDate](#)`string` - -> When was the task completed -> (renamed equivalent of [`Task.completionTime`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcompletiontimebcodestring-) field) - -#### [TaskResponse.assignee](#)`string` - -> Username/id of who is assigned to the task - -#### [TaskResponse.taskState](#)[`TaskState`](../enums/task-state.mdx) - -> State of the task - -#### [TaskResponse.formKey](#)`string` - -> Reference to the task form - -#### [TaskResponse.processDefinitionKey](#)`string` - -> Reference to process definition -> (renamed equivalent of [`Task.processDefinitionId`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessdefinitionidbcodestring-) field) - -#### [TaskResponse.processInstanceKey](#)`string` - -> Reference to process instance id -> (renamed equivalent of [`Task.processInstanceId`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessinstanceidbcodestring-) field) - -#### [TaskResponse.candidateGroups](#)`string` - -> Candidate groups - -#### [TaskResponse.candidateUsers](#)`string` - -> Candidate users - -#### [TaskResponse.followUpDate](#)`string` - -> Follow-up date for the task - -#### [TaskResponse.dueDate](#)`string` - -> Due date for the task - -### Returned by - -[`Get task`](../../controllers/tasklist-api-rest-task-controller.md#get-task) -[`Assign task`](../../controllers/tasklist-api-rest-task-controller.md#assign-task) -[`Unassign task`](../../controllers/tasklist-api-rest-task-controller.md#unassign-task) -[`Complete task`](../../controllers/tasklist-api-rest-task-controller.md#complete-task) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx deleted file mode 100644 index 352fbadbe31..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx +++ /dev/null @@ -1,182 +0,0 @@ ---- -id: task-search-response -title: TaskSearchResponse -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -TaskSearchResponse - representing the searched task. - - - - -```graphql -{ - "id": string, - "name": string, - "taskDefinitionId": string, - "processName": string, - "creationDate": string, - "completionDate": string, - "assignee": string, - "taskState": TaskState, - "sortValues": [string], - "isFirst": boolean, - "formKey": string, - "processDefinitionKey": string, - "processInstanceKey": string, - "dueDate": string, - "followUpDate": string, - "candidateGroups": [string], - "candidateUsers": [string] -} -``` - - - - - -```json -{ - "id": "string", - "name": "string", - "taskDefinitionId": "string", - "processName": "string", - "creationDate": "string", - "completionDate": "string", - "assignee": "string", - "taskState": "CREATED", - "sortValues": ["string"], - "isFirst": true, - "formKey": "string", - "processDefinitionKey": "string", - "processInstanceKey": "string", - "dueDate": "2023-03-29T20:08:07.171Z", - "followUpDate": "2023-03-29T20:08:07.171Z", - "candidateGroups": ["string"], - "candidateUsers": ["string"] -} -``` - - - - -### Fields - -#### [TaskSearchResponse.id](#)`string` - -> The unique identifier of the task - -#### [TaskSearchResponse.name](#)`string` - -> Name of the task - -#### [TaskSearchResponse.taskDefinitionId](#)`string` - -> User Task ID from the BPMN definition - -#### [TaskSearchResponse.processName](#)`string` - -> Name of the process - -#### [TaskSearchResponse.creationDate](#)`string` - -> When was the task created -> (renamed equivalent of [`Task.creationTime`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcreationtimebcodestring--) field) - -#### [TaskSearchResponse.completionDate](#)`string` - -> When was the task completed -> (renamed equivalent of [`Task.completionTime`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcompletiontimebcodestring-) field) - -#### [TaskSearchResponse.assignee](#)`string` - -> Username/id of who is assigned to the task - -#### [TaskSearchResponse.taskState](#)[`TaskState`](../enums/task-state.mdx) - -> State of the task - -#### [TaskSearchResponse.sortValues](#)`string` - -> Array of values to be copied into [`TaskSearchRequest`](../requests/task-search-request.mdx) to request for next or previous page of tasks. - -#### [TaskSearchResponse.isFirst](#)`boolean` - -> Flag to show that the task is first in current filter - -#### [TaskSearchResponse.formKey](#)`string` - -> Reference to the task form - -#### [TaskSearchResponse.processDefinitionKey](#)`string` - -> Reference to process definition -> (renamed equivalent of [`Task.processDefinitionId`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessdefinitionidbcodestring-) field) - -#### [TaskSearchResponse.processInstanceKey](#)`string` - -> Reference to process instance id -> (renamed equivalent of [`Task.processInstanceId`](../../../tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessinstanceidbcodestring-) field) - -#### [TaskSearchResponse.candidateGroups](#)`string` - -> Candidate groups - -#### [TaskSearchResponse.candidateUsers](#)`string` - -> Candidate users - -#### [TaskSearchResponse.followUpDate](#)`string` - -> Follow-up date for the task - -#### [TaskSearchResponse.dueDate](#)`string` - -> Due date for the task - -### Returned by - -[`Search tasks`](../../controllers/tasklist-api-rest-task-controller.md#search-tasks) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/variable-response.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/variable-response.mdx deleted file mode 100644 index 8e4c28c8ad0..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/schemas/responses/variable-response.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -id: variable-response -title: VariableResponse -hide_table_of_contents: false ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -VariableResponse - represents a variable. - - - - -```graphql -{ - "id": string, - "name": string, - "value": string, - "isValueTruncated": boolean, - "previewValue": string -} -``` - - - - - -```json -{ - "id": "string", - "name": "string", - "value": "string", - "isValueTruncated": true, - "previewValue": "string" -} -``` - - - - -### Fields - -#### [VariableResponse.id](#)`string` - -> id of the variable - -#### [VariableResponse.name](#)`string` - -> variable name - -#### [VariableResponse.value](#)`string` - -> full variable value - -#### [VariableResponse.previewValue](#)`string` - -> value preview (limited to 8191 characters) - -#### [VariableResponse.isValueTruncated](#)`boolean` - -> shows, whether `previewValue` contains truncated value or full value - -### Returned by - -[`Search task variables`](../../controllers/tasklist-api-rest-task-controller.md#search-task-variables) -[`Get variable`](../../controllers/tasklist-api-rest-variables-controller.md#get-variable) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/sidebar-schema.js b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/sidebar-schema.js deleted file mode 100644 index 721a429b99d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/sidebar-schema.js +++ /dev/null @@ -1,10 +0,0 @@ -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ - -module.exports = { - "Tasklist API (REST)": [ - { - type: "autogenerated", - dirName: "apis-tools/tasklist-api-rest", - }, - ], -}; diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-authentication.md b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-authentication.md deleted file mode 100644 index eba5026d92f..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-authentication.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: tasklist-api-rest-authentication -title: "Authentication" -sidebar_position: 2 -description: "Describes authentication options that can be used to access Tasklist REST API." ---- - -## Authentication in the cloud - -To access the API endpoint, you need an access token. - -Your client must send a header in each request: - -`Authorization: Bearer ` - -For example, send a request using _curl_: - -```shell -curl -X POST -H -H :accept: application/json" -H "Authorization: Bearer " -d '' http://localhost:8080/v1/tasks/search -``` - -### How to obtain the access token - -You must obtain a token to use the Tasklist API. When you create a Tasklist [client](/guides/setup-client-connection-credentials.md), you get all the information needed to connect to Tasklist. - -See our guide on [building your own client](../build-your-own-client.md). - -The following settings are needed: - -| Name | Description | Default value | -| ------------------------ | ----------------------------------------------- | --------------------- | -| client id | Name of your registered client | - | -| client secret | Password for your registered client | - | -| audience | Permission name; if not given use default value | `tasklist.camunda.io` | -| authorization server url | Token issuer server | - | - -Send a token issue _POST_ request to the authorization server with the following content: - -```json -{ - "client_id": "", - "client_secret": "", - "audience": "", - "grant_type": "client_credentials" -} -``` - -See the following example with _curl_: - -```shell -curl -X POST --header 'content-type: application/json' --data '{"client_id": "", "client_secret":"","audience":"","grant_type":"client_credentials"}' https:// -``` - -If the authorization is successful, the authorization server sends back the access token, when it expires, scope, and type: - -```json -{ - "access_token": "ey...", - "scope": "...", - "expires_in": 86400, - "token_type": "Bearer" -} -``` - -## Authentication for Self-Managed cluster - -The authentication is described in [Tasklist Configuration - Authentication](../../self-managed/tasklist-deployment/tasklist-authentication.md#identity). diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md b/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md deleted file mode 100644 index 419683f178d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -id: tasklist-api-rest-overview -title: "Overview" -sidebar_position: 1 -description: "Tasklist API is a REST API and provides searching, getting, and changing Tasklist data." ---- - -## Introduction - -Tasklist API is a REST API and provides searching, getting, and changing Tasklist data. -Requests and responses are in JSON notation. Some objects have additional endpoints. - -## Context paths - -For SaaS: `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. - -:::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). - -For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Tasklist component. -::: - -## API documentation as Swagger - -A detailed API description is also available as Swagger UI at `https://${base-url}/swagger-ui/index.html`. - -For SaaS: `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/swagger-ui/index.html`, and for Self-Managed installations: [`http://localhost:8080/swagger-ui/index.html`](http://localhost:8080/swagger-ui/index.html). - -:::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). -::: - -## Endpoints - -| Endpoint (HTTP verb + URL path) | Description | -| :-------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------: | -| **Tasks** | | -| [`GET /v1/tasks/{taskId}`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#get-task) | Return a task by `taskId`. | -| [`POST /v1/tasks/search`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#search-tasks) | Returns the list of tasks that satisfy search request params. | -| [`POST /v1/tasks/{taskId}/variables/search`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#search-task-variables) | Returns a list of task variables for the specified `taskId` and `variableNames.` | -| [`PATCH /v1/tasks/{taskId}/assign`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#assign-task) | Assign a task with `taskId` to `assignee` or the active user. | -| [`PATCH /v1/tasks/{taskId}/unassign`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#unassign-task) | Unassign a task with provided `taskId`. | -| [`PATCH /v1/tasks/{taskId}/complete`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#complete-task) | Complete a task with `taskId` and optional variables. | -| **Forms** | | -| [`GET /v1/forms/{formId}?processDefinitionKey={processDefinitionKey}`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-form-controller.md#get-form) | Get the form details by `formId` and `processDefinitionKey` required query param. | -| **Variables** | | -| [`GET /v1/variables/{variableId}`](/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md#get-variable) | Get the variable details by `variableId`. | diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/assets/tasklist.graphqls b/versioned_docs/version-8.2/apis-tools/tasklist-api/assets/tasklist.graphqls deleted file mode 100644 index 3442bfa6777..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/assets/tasklist.graphqls +++ /dev/null @@ -1,203 +0,0 @@ -scalar DateTime - -# Describes the User task. -type Task { - # The unique identifier of the task - id: ID! - # Name of the task - name: String! - # Task Definition ID (node BPMN id) of the process - taskDefinitionId: String! - # Name of the process - processName: String! - # When was the task created - creationTime: String! - # When was the task completed - completionTime: String - # Username/id of who is assigned to the task - assignee: String - # Variables associated to the task - variables: [Variable!] - # State of the task - taskState: TaskState! - # Array of values to be copied into `TaskQuery` to request for next or previous page of tasks. - sortValues: [String!] - # Flag to show that the task is first in current filter - isFirst: Boolean - # Reference to the task form - formKey: String - #Reference to process definition - processDefinitionId: String - #Reference to processInstance definition - processInstanceId: String - #Candidate groups - candidateGroups: [String!] - #Follow-up Date for Task - followUpDate: DateTime - #Due date for Task - dueDate: DateTime - #Candidate users - candidateUsers: [String!] -} - -#Describes task embedded form -type Form { - #The unique identifier of the embedded form within one process - id: String! - #Reference to process definition - processDefinitionId: String! - #Form content - schema: String! -} - -type Process { - id: String! - name: String - processDefinitionId: String - version: Int -} - -input DateFilter { - from: DateTime! - to: DateTime! -} - -#Task query - query to get one page of tasks. -input TaskQuery { - # State of the tasks - state: TaskState - # Are the tasks assigned? - assigned: Boolean - # Who is assigned to the tasks? - assignee: String - # given group is in candidate groups list - candidateGroup: String - # given user is in candidate users list - candidateUser: String - # process definition id - processDefinitionId: String - # process instance id - processInstanceId: String - #Size of tasks page (default: 50). - pageSize: Int - # Task definition ID - what's the BPMN flow node? - taskDefinitionId: String - #Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values plus same sort values. - searchAfter: [String!] - #Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values. - searchAfterOrEqual: [String!] - #Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values plus same sort values. - searchBefore: [String!] - #Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values. - searchBeforeOrEqual: [String!] - #Follow-up Date for Task - followUpDate: DateFilter - #Due Date for Task - dueDate: DateFilter - #order - sort: [TaskOrderBy] -} - -input TaskOrderBy { - field: TaskSortFields! - order: Sort! -} - -enum Sort { - ASC - DESC -} - -enum TaskSortFields { - creationTime - completionTime - followUpDate - dueDate -} - -# State of the task. -enum TaskState { - CREATED - COMPLETED - CANCELED -} - -# Variable used in task. -type Variable { - id: ID! - name: String! - # full variable value - value: String! - # value preview (limited in size) - previewValue: String! - # shows, whether previewValue contains truncated value or full value - isValueTruncated: Boolean! -} -# Change or add a variable with name and value. -input VariableInput { - # Name of the variable. - name: String! - # Value of the variable. Complex values, e.g. a list of objects, must be serialized as JSON. - value: String! -} - -type ProcessInstance { - id: ID! -} - -type C8AppLink { - name: String! - link: String! -} -# Describes the user. -type User { - userId: ID! - displayName: String - permissions: [String!] - roles: [String] - salesPlanType: String - c8Links: [C8AppLink] -} -# What can be searched for. -type Query { - # Get list of tasks based on `TaskQuery`. - tasks(query: TaskQuery!): [Task!]! - # Get one task by id. Returns task or error when task does not exist. - task(id: String!): Task! - # Get currently logged in user. - currentUser: User! - # Get task form by id and processDefinitionId - form(id: String!, processDefinitionId: String!): Form - # Get a collection of Variables by name - variables(taskId: String!, variableNames: [String!]!): [Variable!]! - # Get the variables by variable id - variable(id: String!): Variable! - # Get the processes - processes(search: String): [Process!]! -} -# What can be changed. -type Mutation { - # Complete a task with taskId and optional variables. Returns the task. - completeTask(taskId: String!, variables: [VariableInput!]!): Task! - """ - Claim a task with `taskId` to `assignee`. Returns the task. - - When using Graphql API with JWT authentication token following parameters may be used: - * `assignee`. When using a JWT token, the assignee parameter is NOT optional when called directly from the API. - The system will not be able to detect the assignee from the JWT token, therefore the assignee parameter needs to be - explicitly passed in this instance. - * `allowOverrideAssignment`. When `true` the task that is already assigned may be claimed again. Otherwise the task - must be first unclaimed and only then claimed again. (Default: `true`) - """ - claimTask( - taskId: String! - assignee: String - allowOverrideAssignment: Boolean - ): Task! - # Unclaim a task with taskId. Returns the task. - unclaimTask(taskId: String!): Task! - # Delete process instance by given processInstanceId. Returns true if process instance could be deleted. - deleteProcessInstance(processInstanceId: String!): Boolean! - # start a Process from tasklist - startProcess(processDefinitionId: String!): ProcessInstance! -} diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/_category_.yml deleted file mode 100644 index 309996eee2c..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Directives" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/deprecated.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/deprecated.mdx deleted file mode 100644 index 34223694e6f..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/deprecated.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: deprecated -title: deprecated -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Marks the field or enum value as deprecated - -```graphql -directive @deprecated( - reason: String = "No longer supported" -) -``` - -### Arguments - -#### [deprecated.reason](#)[`String`](../scalars/string.mdx) - -> The reason for the deprecation diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/include.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/include.mdx deleted file mode 100644 index 1e10ec91b54..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/include.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: include -title: include -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Directs the executor to include this field or fragment only when the `if` argument is true - -```graphql -directive @include( - if: Boolean! -) -``` - -### Arguments - -#### [include.if](#)[`Boolean!`](../scalars/boolean.mdx) - -> Included when true. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/skip.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/skip.mdx deleted file mode 100644 index 05326f76d4a..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/skip.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: skip -title: skip -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Directs the executor to skip this field or fragment when the `if`'argument is true. - -```graphql -directive @skip( - if: Boolean! -) -``` - -### Arguments - -#### [skip.if](#)[`Boolean!`](../scalars/boolean.mdx) - -> Skipped when true. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/specified-by.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/specified-by.mdx deleted file mode 100644 index ac0d0adb080..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/directives/specified-by.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: specified-by -title: specifiedBy -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Exposes a URL that specifies the behaviour of this scalar. - -```graphql -directive @specifiedBy( - url: String! -) -``` - -### Arguments - -#### [specifiedBy.url](#)[`String!`](../scalars/string.mdx) - -> The URL that specifies the behaviour of this scalar. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/_category_.yml deleted file mode 100644 index a91f30b39a4..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Enums" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/sort.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/sort.mdx deleted file mode 100644 index 435dd21ce06..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/sort.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: sort -title: Sort -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -State of the task. - -```graphql -enum Sort { - ASC - DESC -} -``` - -### Values - -#### [Sort.ASC](#) - -> Ascending - -#### [Sort.DESC](#) - -> Descending - -### Member of - -[`TaskQuery`](/docs/apis-tools/tasklist-api/inputs/task-query.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/task-sort-fields.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/task-sort-fields.mdx deleted file mode 100644 index 11105070a2d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/task-sort-fields.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: task-sort-fields -title: TaskSortFields -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -State of the task. - -```graphql -enum TaskSortFields { - creationTime - completionTime - followUpDate - dueDate -} -``` - -### Values - -#### [TaskSortFields.creationTime](#) - -> Use the field creationTime for sorting - -#### [TaskSortFields.completionTime](#) - -> Use the field completionTime for sorting - -#### [TaskSortFields.followUpDate](#) - -> Use the field followUpDate for sorting - -#### [TaskSortFields.dueDate](#) - -> Use the field dueDate for sorting - -### Member of - -[`TaskQuery`](/docs/apis-tools/tasklist-api/inputs/task-query.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/task-state.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/task-state.mdx deleted file mode 100644 index 736495f669f..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/enums/task-state.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: task-state -title: TaskState -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -State of the task. - -```graphql -enum TaskState { - CREATED - COMPLETED - CANCELED -} -``` - -### Values - -#### [TaskState.CREATED](#) - -> - -#### [TaskState.COMPLETED](#) - -> - -#### [TaskState.CANCELED](#) - -> - -### Member of - -[`Task`](../objects/task.mdx) [`TaskQuery`](../inputs/task-query.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/generated.md b/versioned_docs/version-8.2/apis-tools/tasklist-api/generated.md deleted file mode 100644 index 72b93fb4bf3..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/generated.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -id: schema -slug: /apis-tools/tasklist-api -title: Schema Documentation -sidebar_position: 1 -hide_table_of_contents: true -pagination_next: null -pagination_prev: null -sidebar_class_name: navbar__toggle ---- - -This documentation has been automatically generated from the GraphQL schema. -GraphQL schema file to download: [tasklist.graphqls](./assets/tasklist.graphqls) - -Use the docs in the sidebar to find out how to use the schema: - -- **Allowed operations**: queries and mutations. -- **Schema-defined types**: scalars, objects, enums, interfaces, unions, and input objects. - -Generated on 11/11/2022, 11:24:49 AM. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/_category_.yml deleted file mode 100644 index 170f758fc24..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Inputs" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/date-filter-input.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/date-filter-input.mdx deleted file mode 100644 index 076db907a0b..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/date-filter-input.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: date-filter-input -title: DateFilter -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Filter using a date range - -```graphql -input DateFilter { - from: DateTime! - to: DateTime! -} -``` - -### Fields - -#### [DateFilter.from](#)[`DateTime!`](/docs/apis-tools/tasklist-api/scalars/DateTime.mdx) - -> Start date range to search from - -#### [DateFilter.to](#)[`DateTime!`](/docs/apis-tools/tasklist-api/scalars/DateTime.mdx) - -> End date range to search to diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/task-order-by.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/task-order-by.mdx deleted file mode 100644 index 2451b8d2a53..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/task-order-by.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: task-order-by -title: TaskOrderBy -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Filter using a date range - -```graphql -input TaskOrderBy { - field: TaskSortFields! - order: Sort! -} -``` - -### Fields - -#### [TaskOrderBy.field](#)[`TaskSortFields!`](/docs/apis-tools/tasklist-api/enums/task-sort-fields.mdx) - -> Allowed fields to sort by - -#### [TaskOrderBy.order](#)[`Sort!`](/docs/apis-tools/tasklist-api/enums/sort.mdx) - -> Define if sorting is Ascending or Descending diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/task-query.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/task-query.mdx deleted file mode 100644 index b19a391a76b..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/task-query.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -id: task-query -title: TaskQuery -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Task query - query to get one page of tasks. - -```graphql -input TaskQuery { - state: TaskState - assigned: Boolean - assignee: String - candidateGroup: String - candidateUser: String - processDefinitionId: String - processInstanceId: String - followUpDate: DateFilter - dueDate: DateFilter - pageSize: Int - taskDefinitionId: String - searchAfter: [String!] - searchAfterOrEqual: [String!] - searchBefore: [String!] - searchBeforeOrEqual: [String!] - sort: [TaskOrderBy] -} -``` - -### Fields - -#### [TaskQuery.state](#)[`TaskState`](/docs/apis-tools/tasklist-api/enums/task-state.mdx) - -> State of the tasks - -#### [TaskQuery.assigned](#)[`Boolean`](/docs/apis-tools/tasklist-api/scalars/boolean.mdx) - -> Are the tasks assigned? - -#### [TaskQuery.assignee](#)[`String`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Who is assigned to the tasks? - -#### [TaskQuery.candidateGroup](#)[`String`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> given group is in candidate groups list - -#### [TaskQuery.candidateUser](#)[`String`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> given group is in candidate user list - -#### [TaskQuery.processDefinitionId](#)[`String`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Reference to process definition - -#### [TaskQuery.processInstanceId](#)[`String`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Reference to process instance - -#### [TaskQuery.followUpDateDate](#)[`DateFilter`](/docs/apis-tools/tasklist-api/inputs/date-filter-input.mdx) - -> Follow-up date for the task - -#### [TaskQuery.dueDate](#)[`DateFilter`](/docs/apis-tools/tasklist-api/inputs/date-filter-input.mdx) - -> Due date for the task - -#### [TaskQuery.pageSize](#)[`Int`](/docs/apis-tools/tasklist-api/scalars/int.mdx) - -> Size of tasks page (default: 50). - -#### [TaskQuery.taskDefinitionId](#)[`String`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Task definition ID - what's the BPMN flow node? - -#### [TaskQuery.searchAfter](#)[`[String!]`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values plus same sort values. - -#### [TaskQuery.searchAfterOrEqual](#)[`[String!]`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly after this values. - -#### [TaskQuery.searchBefore](#)[`[String!]`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values plus same sort values. - -#### [TaskQuery.searchBeforeOrEqual](#)[`[String!]`](/docs/apis-tools/tasklist-api/scalars/string.mdx) - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values. - -#### [TaskQuery.sort](#)[`[TaskOrderBy]`](/docs/apis-tools/tasklist-api/inputs/task-order-by.mdx) - -> Array of values copied from `sortValues` of one of the tasks, query will return page of tasks going directly before this values. - -### Member of - -[`tasks`](/apis-tools/tasklist-api/queries/tasks.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/variable-input.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/variable-input.mdx deleted file mode 100644 index d3958e48255..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/inputs/variable-input.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: variable-input -title: VariableInput -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Change or add a variable with name and value. - -```graphql -input VariableInput { - name: String! - value: String! -} -``` - -### Fields - -#### [VariableInput.name](#)[`String!`](../scalars/string.mdx) - -> Name of the variable. - -#### [VariableInput.value](#)[`String!`](../scalars/string.mdx) - -> Value of the variable. Complex values, e.g. a list of objects, must be serialized as JSON. - -### Member of - -[`completeTask`](../mutations/complete-task.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/interfaces/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/interfaces/_category_.yml deleted file mode 100644 index 2adeef08a87..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/interfaces/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Interfaces" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/_category_.yml deleted file mode 100644 index 4896c1e5d33..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Mutations" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/claim-task.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/claim-task.mdx deleted file mode 100644 index 5c2b84857dc..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/claim-task.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -id: claim-task -title: claimTask -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Claim a task with `taskId` to `assignee`. Returns the task. - -When using Graphql API with JWT authentication token following parameters may be used: - -- `assignee`. When using a JWT token, the assignee parameter is NOT optional when called directly from the API. - The system will not be able to detect the assignee from the JWT token, therefore the assignee parameter needs to be - explicitly passed in this instance. -- `allowOverrideAssignment`. When `true` the task that is already assigned may be claimed again. Otherwise the task - must be first unclaimed and only then claimed again. (Default: `true`) - -```graphql -claimTask( - taskId: String! - assignee: String - allowOverrideAssignment: Boolean -): Task! -``` - -### Arguments - -#### [claimTask.taskId](#)[`String!`](../scalars/string.mdx) - -> - -#### [claimTask.assignee](#)[`String`](../scalars/string.mdx) - -> - -#### [claimTask.allowOverrideAssignment](#)[`Boolean`](../scalars/boolean.mdx) - -> - -### Type - -#### [`Task`](../objects/task.mdx) - -> Describes the User task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/complete-task.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/complete-task.mdx deleted file mode 100644 index 1c713a2a5f4..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/complete-task.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: complete-task -title: completeTask -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Complete a task with taskId and optional variables. Returns the task. - -```graphql -completeTask( - taskId: String! - variables: [VariableInput!]! -): Task! -``` - -### Arguments - -#### [completeTask.taskId](#)[`String!`](../scalars/string.mdx) - -> - -#### [completeTask.variables](#)[`[VariableInput!]!`](../inputs/variable-input.mdx) - -> - -### Type - -#### [`Task`](../objects/task.mdx) - -> Describes the User task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/delete-process-instance.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/delete-process-instance.mdx deleted file mode 100644 index d71c392e7a8..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/delete-process-instance.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: delete-process-instance -title: deleteProcessInstance -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Delete process instance by given processInstanceId. Returns true if process instance could be deleted. - -```graphql -deleteProcessInstance( - processInstanceId: String! -): Boolean! -``` - -### Arguments - -#### [deleteProcessInstance.processInstanceId](#)[`String!`](../scalars/string.mdx) - -> - -### Type - -#### [`Boolean`](../scalars/boolean.mdx) - -> The `Boolean` scalar type represents `true` or `false`. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/unclaim-task.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/unclaim-task.mdx deleted file mode 100644 index e00f4659f5b..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/mutations/unclaim-task.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: unclaim-task -title: unclaimTask -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Unclaim a task with taskId. Returns the task. - -```graphql -unclaimTask( - taskId: String! -): Task! -``` - -### Arguments - -#### [unclaimTask.taskId](#)[`String!`](../scalars/string.mdx) - -> - -### Type - -#### [`Task`](../objects/task.mdx) - -> Describes the User task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/_category_.yml deleted file mode 100644 index f56c1499f38..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Objects" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/form.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/form.mdx deleted file mode 100644 index 19a5a47f743..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/form.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: form -title: Form -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Describes task embedded form - -```graphql -type Form { - id: String! - processDefinitionId: String! - schema: String! -} -``` - -### Fields - -#### [Form.id](#)[`String!`](../scalars/string.mdx) - -> The unique identifier of the embedded form within one process - -#### [Form.processDefinitionId](#)[`String!`](../scalars/string.mdx) - -> Reference to process definition - -#### [Form.schema](#)[`String!`](../scalars/string.mdx) - -> Form content - -### Returned by - -[`form`](../queries/form.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/task.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/task.mdx deleted file mode 100644 index 800fec6ebcc..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/task.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -id: task -title: Task -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Describes the User task. - -```graphql -type Task { - id: ID! - name: String! - taskDefinitionId: String! - processName: String! - creationTime: String! - completionTime: String - assignee: String - variables: [Variable!] - taskState: TaskState! - sortValues: [String!] - isFirst: Boolean - formKey: String - processDefinitionId: String - processInstanceId: String - candidateGroups: [String!] - candidateUsers: [String!] - followUpDate: DateTime - #Due date for Task - dueDate: DateTime -} -``` - -### Fields - -#### [Task.id](#)[`ID!`](../scalars/id.mdx) - -> The unique identifier of the task - -#### [Task.name](#)[`String!`](../scalars/string.mdx) - -> Name of the task - -#### [Task.taskDefinitionId](#)[`String!`](../scalars/string.mdx) - -> Task Definition ID (node BPMN id) of the process - -#### [Task.processName](#)[`String!`](../scalars/string.mdx) - -> Name of the process - -#### [Task.creationTime](#)[`String!`](../scalars/string.mdx) - -> When was the task created - -#### [Task.completionTime](#)[`String`](../scalars/string.mdx) - -> When was the task completed - -#### [Task.assignee](#)[`String`](../scalars/string.mdx) - -> Username/id of who is assigned to the task - -#### [Task.variables](#)[`[Variable!]`](../objects/variable.mdx) - -> Variables associated to the task - -#### [Task.taskState](#)[`TaskState!`](../enums/task-state.mdx) - -> State of the task - -#### [Task.sortValues](#)[`[String!]`](../scalars/string.mdx) - -> Array of values to be copied into `TaskQuery` to request for next or previous page of tasks. - -#### [Task.isFirst](#)[`Boolean`](../scalars/boolean.mdx) - -> Flag to show that the task is first in current filter - -#### [Task.formKey](#)[`String`](../scalars/string.mdx) - -> Reference to the task form - -#### [Task.processDefinitionId](#)[`String`](../scalars/string.mdx) - -> Reference to process definition - -#### [Task.processInstanceId](#)[`String`](../scalars/string.mdx) - -> Reference to process instance id - -#### [Task.candidateGroups](#)[`[String!]`](../scalars/string.mdx) - -> Candidate groups - -#### [Task.candidateUsers](#)[`[String!]`](../scalars/string.mdx) - -> Candidate users - -#### [Task.followUpDate](#)[`[String!]`](../scalars/DateTime.mdx) - -> Follow-up date for the task - -#### [Task.dueDate](#)[`[String!]`](../scalars/DateTime.mdx) - -> Due date for the task - -### Returned by - -[`claimTask`](../mutations/claim-task.mdx) [`completeTask`](../mutations/complete-task.mdx) [`task`](../queries/task.mdx) [`tasks`](../queries/tasks.mdx) [`unclaimTask`](../mutations/unclaim-task.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/user.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/user.mdx deleted file mode 100644 index cc9bcaa35ac..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/user.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -id: user -title: User -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Describes the user. - -```graphql -type User { - userId: ID! - displayName: String - permissions: [String!] - roles: [String] - salesPlanType: String -} -``` - -### Fields - -#### [User.userId](#)[`ID!`](../scalars/id.mdx) - -> - -#### [User.displayName](#)[`String`](../scalars/string.mdx) - -> - -#### [User.permissions](#)[`[String!]`](../scalars/string.mdx) - -> - -#### [User.roles](#)[`[String]`](../scalars/string.mdx) - -> - -#### [User.salesPlanType](#)[`String`](../scalars/string.mdx) - -> - -### Returned by - -[`currentUser`](../queries/current-user.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/variable.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/variable.mdx deleted file mode 100644 index 99555238c1d..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/objects/variable.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -id: variable -title: Variable -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Variable used in task. - -```graphql -type Variable { - id: ID! - name: String! - value: String! - previewValue: String! - isValueTruncated: Boolean! -} -``` - -### Fields - -#### [Variable.id](#)[`ID!`](../scalars/id.mdx) - -> - -#### [Variable.name](#)[`String!`](../scalars/string.mdx) - -> - -#### [Variable.value](#)[`String!`](../scalars/string.mdx) - -> full variable value - -#### [Variable.previewValue](#)[`String!`](../scalars/string.mdx) - -> value preview (limited in size) - -#### [Variable.isValueTruncated](#)[`Boolean!`](../scalars/boolean.mdx) - -> shows, whether previewValue contains truncated value or full value - -### Returned by - -[`variable`](../queries/variable.mdx) [`variables`](../queries/variables.mdx) - -### Member of - -[`Task`](../objects/task.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/_category_.yml deleted file mode 100644 index 529a48de855..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Queries" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/current-user.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/current-user.mdx deleted file mode 100644 index 997da3571aa..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/current-user.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: current-user -title: currentUser -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Get currently logged in user. - -```graphql -currentUser: User! -``` - -### Type - -#### [`User`](../objects/user.mdx) - -> Describes the user. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/form.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/form.mdx deleted file mode 100644 index a4d110d93dc..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/form.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: form -title: form -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Get task form by id and processDefinitionId - -```graphql -form( - id: String! - processDefinitionId: String! -): Form -``` - -### Arguments - -#### [form.id](#)[`String!`](../scalars/string.mdx) - -> - -#### [form.processDefinitionId](#)[`String!`](../scalars/string.mdx) - -> - -### Type - -#### [`Form`](../objects/form.mdx) - -> Describes task embedded form diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/task.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/task.mdx deleted file mode 100644 index f3f7e23e70c..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/task.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: task -title: task -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Get one task by id. Returns task or error when task does not exist. - -```graphql -task( - id: String! -): Task! -``` - -### Arguments - -#### [task.id](#)[`String!`](../scalars/string.mdx) - -> - -### Type - -#### [`Task`](../objects/task.mdx) - -> Describes the User task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/tasks.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/tasks.mdx deleted file mode 100644 index 2073800499c..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/tasks.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: tasks -title: tasks -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Get list of tasks based on `TaskQuery`. - -```graphql -tasks( - query: TaskQuery! -): [Task!]! -``` - -### Arguments - -#### [tasks.query](#)[`TaskQuery!`](../inputs/task-query.mdx) - -> - -### Type - -#### [`Task`](../objects/task.mdx) - -> Describes the User task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/variable.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/variable.mdx deleted file mode 100644 index df3c0de0594..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/variable.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: variable -title: variable -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Get the variables by variable id - -```graphql -variable( - id: String! -): Variable! -``` - -### Arguments - -#### [variable.id](#)[`String!`](../scalars/string.mdx) - -> - -### Type - -#### [`Variable`](../objects/variable.mdx) - -> Variable used in task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/variables.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/variables.mdx deleted file mode 100644 index 9654f63c846..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/queries/variables.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: variables -title: variables -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -Get a collection of Variables by name - -```graphql -variables( - taskId: String! - variableNames: [String!]! -): [Variable!]! -``` - -### Arguments - -#### [variables.taskId](#)[`String!`](../scalars/string.mdx) - -> - -#### [variables.variableNames](#)[`[String!]!`](../scalars/string.mdx) - -> - -### Type - -#### [`Variable`](../objects/variable.mdx) - -> Variable used in task. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/DateTime.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/DateTime.mdx deleted file mode 100644 index 9f5272a3fc7..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/DateTime.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: datetime -title: DateTime -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -The `DateTime` scalar type represents date and time data and is compliant with the date-time format outlined in section 5.6 of the RFC 3339 profile of the ISO 8601 standard for representation of dates and times using the Gregorian calendar. - -```graphql -scalar DateTime -``` - -### Member of - -[`Task`](/docs/apis-tools/tasklist-api/inputs/task-query.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/_category_.yml deleted file mode 100644 index 882b071de4b..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Scalars" diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/boolean.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/boolean.mdx deleted file mode 100644 index 7d445378c90..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/boolean.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: boolean -title: Boolean -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -The `Boolean` scalar type represents `true` or `false`. - -```graphql -scalar Boolean -``` - -### Returned by - -[`deleteProcessInstance`](../mutations/delete-process-instance.mdx) - -### Member of - -[`claimTask`](../mutations/claim-task.mdx) [`include`](../directives/include.mdx) [`skip`](../directives/skip.mdx) [`Task`](../objects/task.mdx) [`TaskQuery`](../inputs/task-query.mdx) [`Variable`](../objects/variable.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/id.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/id.mdx deleted file mode 100644 index 7504beac023..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/id.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: id -title: ID -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as `"4"`) or integer (such as `4`) input value will be accepted as an ID. - -```graphql -scalar ID -``` - -### Member of - -[`Task`](../objects/task.mdx) [`User`](../objects/user.mdx) [`Variable`](../objects/variable.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/int.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/int.mdx deleted file mode 100644 index e85fdb3c003..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/int.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: int -title: Int -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1. - -```graphql -scalar Int -``` - -### Member of - -[`TaskQuery`](../inputs/task-query.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/string.mdx b/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/string.mdx deleted file mode 100644 index 818899c478a..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/scalars/string.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: string -title: String -hide_table_of_contents: false ---- - -export const Bullet = () => ( - <> - -  ●  - - -); - -export const SpecifiedBy = (props) => ( - <> - Specification - - ⎘ - - -); - -export const Badge = (props) => ( - <> - {props.text} - -); - -The `String` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text. - -```graphql -scalar String -``` - -### Member of - -[`claimTask`](../mutations/claim-task.mdx) [`completeTask`](../mutations/complete-task.mdx) [`deleteProcessInstance`](../mutations/delete-process-instance.mdx) [`deprecated`](../directives/deprecated.mdx) [`Form`](../objects/form.mdx) [`form`](../queries/form.mdx) [`specifiedBy`](../directives/specified-by.mdx) [`Task`](../objects/task.mdx) [`task`](../queries/task.mdx) [`TaskQuery`](../inputs/task-query.mdx) [`unclaimTask`](../mutations/unclaim-task.mdx) [`User`](../objects/user.mdx) [`Variable`](../objects/variable.mdx) [`variable`](../queries/variable.mdx) [`VariableInput`](../inputs/variable-input.mdx) [`variables`](../queries/variables.mdx) diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/sidebar-schema.js b/versioned_docs/version-8.2/apis-tools/tasklist-api/sidebar-schema.js deleted file mode 100644 index 82b59566af1..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/sidebar-schema.js +++ /dev/null @@ -1,10 +0,0 @@ -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ - -module.exports = { - "Tasklist API (GraphQL)": [ - { - type: "autogenerated", - dirName: "apis-tools/tasklist-api", - }, - ], -}; diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-graphql-to-rest-migration.md b/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-graphql-to-rest-migration.md deleted file mode 100644 index ac5ab7a301c..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-graphql-to-rest-migration.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -id: tasklist-api-graphql-to-rest-migration -title: GraphQL to REST API migration -slug: /apis-tools/tasklist-api/tasklist-api-graphql-to-rest-migration -sidebar_position: 3 -description: "This article provides a guide for developers to migrate from GraphQL to REST API seamlessly." ---- - -# Overview - -We want to provide you with the information you need to successfully migrate from our GraphQL API -to our new REST API version. In this document, we'll explain the differences between the two APIs -and provide guidance on how to make the switch. - -GraphQL has been a popular and valuable tool for many of our customers, but we recognize that there are -certain advantages to using a RESTful architecture. Our new REST API version provides a more structured -and predictable way of accessing our data, which should lead to improved performance and greater reliability. - -It's worth noting that all of our other APIs use REST, so moving to a RESTful architecture will align this API -with the rest of our ecosystem. This will make it easier to maintain and enhance our APIs over time, -as well as providing a more consistent experience for API customers. - -# GraphQL operation to REST API endpoint mapping - -## Queries - -### Task - -Instead of [task](../tasklist-api/queries/task.mdx) GraphQL query: - -```graphql -# Get one task by id. Returns task or error when task does not exist. -task(id: String!): Task! -``` - -The following [get task](../tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#get-task) endpoint should be used: - -```bash -curl -X 'GET' \ - 'http://{host}/v1/tasks/{taskId}' \ - -H 'accept: application/json' - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -:::note - -The following fields in REST API response were renamed compared to the equivalent GraphQL response: - -- [`Task.creationTime`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcreationtimebcodestring--) ⇒ [`TaskResponse.creationDate`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx#code-style-fontweight-normal-taskresponsebcreationdatebcodestring-) -- [`Task.completionTime`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcompletiontimebcodestring-) ⇒ [`TaskResponse.completionDate`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx#code-style-fontweight-normal-taskresponsebcompletiondatebcodestring) -- [`Task.processDefinitionId`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessdefinitionidbcodestring-) ⇒ [`TaskResponse.processDefinitionKey`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx#code-style-fontweight-normal-taskresponsebprocessdefinitionkeybcodestring) -- [`Task.processInstanceId`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessinstanceidbcodestring-) ⇒ [`TaskResponse.processInstanceKey`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-response.mdx#code-style-fontweight-normal-taskresponsebprocessinstancekeybcodestring) - -::: - -### Tasks - -Instead of [tasks](../tasklist-api/queries/tasks.mdx) GraphQL query: - -```graphql -# Get list of tasks based on `TaskQuery`. -tasks(query: TaskQuery!): [Task!]! -``` - -The following [search tasks](../tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#search-tasks) endpoint should be used: - -```bash -curl -X 'POST' \ - 'http://{host}/v1/tasks/search' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "state": "CREATED", - "assigned": true -}' -``` - -:::note - -Please note that several field names in request body and response were changed in REST API comparing to the equivalent GraphQL input/response models, in order to improve the consistency and clarity of our API: - -- in request body: - - - [`TaskQuery.processDefinitionId`](docs/apis-tools/tasklist-api/inputs/task-query.mdx#code-style-fontweight-normal-taskquerybprocessdefinitionidbcodestring-) ⇒ [`TaskSearchRequest.processDefinitionKey`](docs/apis-tools/tasklist-api-rest/schemas/requests/task-search-request.mdx#code-style-fontweight-normal-tasksearchrequestbprocessdefinitionkeybcodestring) - - [`TaskQuery.processInstanceId`](docs/apis-tools/tasklist-api/inputs/task-query.mdx#code-style-fontweight-normal-taskquerybprocessinstanceidbcodestring-) ⇒ [`TaskSearchRequest.processInstanceKey`](docs/apis-tools/tasklist-api-rest/schemas/requests/task-search-request.mdx#code-style-fontweight-normal-tasksearchrequestbprocessinstancekeybcodestring) - -- in response: - - [`Task.creationTime`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcreationtimebcodestring--) ⇒ [`TaskSearchResponse.creationDate`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx#code-style-fontweight-normal-tasksearchresponsebcreationdatebcodestring-) - - [`Task.completionTime`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbcompletiontimebcodestring-) ⇒ [`TaskSearchResponse.completionDate`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx#code-style-fontweight-normal-tasksearchresponsebcompletiondatebcodestring) - - [`Task.processDefinitionId`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessdefinitionidbcodestring-) ⇒ [`TaskSearchResponse.processDefinitionKey`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx#code-style-fontweight-normal-tasksearchresponsebprocessdefinitionkeybcodestring) - - [`Task.processInstanceId`](docs/apis-tools/tasklist-api/objects/task.mdx#code-style-fontweight-normal-taskbprocessinstanceidbcodestring-) ⇒ [`TaskSearchResponse.processInstanceKey`](docs/apis-tools/tasklist-api-rest/schemas/responses/task-search-response.mdx#code-style-fontweight-normal-tasksearchresponsebprocessinstancekeybcodestring) - -::: - -### Variable - -Instead of [variable](../tasklist-api/queries/variable.mdx) GraphQL query: - -```graphql -# Get the variables by variable id -variable(id: String!): Variable! -``` - -The following [get variable](../tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md#get-variable) endpoint should be used: - -```bash -curl -X 'GET' \ - 'http://{host}/v1/variables/{variableId}' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -### Variables - -Instead of [variables](../tasklist-api/queries/variables.mdx) GraphQL query: - -```graphql -# Get a collection of Variables by name -variables(taskId: String!, variableNames: [String!]!): [Variable!]! -``` - -The following [search task variables](../tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#search-task-variables) endpoint should be used: - -```bash -curl -X 'POST' \ - 'http://{host}/v1/tasks/{taskId}/variables/search' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' \ - -d '{ - "variableNames": [ - "varA", "varB" - ] - }' -``` - -### Form - -Instead of [form](../tasklist-api/queries/form.mdx) GraphQL query: - -```graphql -# Get task form by formId and processDefinitionId -form(id: String!, processDefinitionId: String!): Form -``` - -The following [get form](../tasklist-api-rest/controllers/tasklist-api-rest-form-controller.md#get-form) endpoint should be used: - -```bash -curl -X 'GET' \ - 'http://{host}/v1/forms/{formId}?processDefinitionKey={processDefinitionKey}' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -:::note - -Note that `processDefinitionKey` query parameter in HTTP request represents the same value as [`form.processDefinitionId`](docs/apis-tools/tasklist-api/queries/form.mdx#code-style-fontweight-normal-formbprocessdefinitionidbcodestring--), -and in REST API response [`FormResponse.processDefinitionKey`](docs/apis-tools/tasklist-api-rest/schemas/responses/form-response.mdx#code-style-fontweight-normal-formresponsebprocessdefinitionkeybcodestring-) field -is the renamed equivalent of [`Form.processDefinitionId`](docs/apis-tools/tasklist-api/objects/form.mdx#code-style-fontweight-normal-formbprocessdefinitionidbcodestring--). - -::: - -## Mutations - -### Claim task - -Instead of [claimTasks](../tasklist-api/mutations/claim-task.mdx) GraphQL mutation: - -```graphql -# Claim a task with `taskId` to `assignee`. Returns the task. -claimTask(taskId: String!, assignee: String, allowOverrideAssignment: Boolean): Task! -``` - -The following [assign task](../tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#assign-task) endpoint should be used: - -```bash -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/assign' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -### Unclaim task - -Instead of [unclaimTasks](../tasklist-api/mutations/unclaim-task.mdx) GraphQL mutation: - -```graphql -# Unclaim a task with taskId. Returns the task. -unclaimTask(taskId: String!): Task! -``` - -The following [unassign task](../tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#unassign-task) endpoint should be used: - -```bash -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/unassign' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` - -### Complete task - -Instead of [completeTasks](../tasklist-api/mutations/complete-task.mdx) GraphQL mutation: - -```graphql -# Complete a task with taskId and optional variables. Returns the task. -completeTask(taskId: String!, variables: [VariableInput!]!): Task! -``` - -The following [complete task](../tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md#complete-task) endpoint should be used: - -```bash -curl -X 'PATCH' \ - 'http://{host}/v1/tasks/{taskId}/complete' \ - -H 'accept: application/json' \ - -H 'Cookie: TASKLIST-SESSION={tasklistSessionId}' -``` diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-overview.md b/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-overview.md deleted file mode 100644 index dd642881811..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-overview.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -id: tasklist-api-overview -title: Overview -slug: /apis-tools/tasklist-api/tasklist-api-overview -sidebar_position: 1 -description: "Build apps powered by BPMN that require human interaction, and make requests." ---- - -In this document, we'll go over the basics on how to consume the Tasklist GraphQL API. Read more about how to build a real world application [here](../tasklist-api-tutorial). - -:::note -Review the new [Tasklist REST API](../tasklist-api-rest/tasklist-api-rest-overview.md). This API offers the same functionality as the current GraphQL API, but with a more streamlined and efficient way of interacting with our service. - -The GraphQL API will be deprecated in the near future. To ensure a smooth transition, we'll continue to support our GraphQL API for a period of time, giving you an opportunity to migrate to the new REST API version at your own pace. We will provide further details on the timeline and process for this migration soon. -::: - -## Endpoint - -Tasklist provides a GraphQL API at endpoint `/graphql`. - -From Camunda 8 onwards the endpoint is `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/graphql` - -## Authentication in the cloud - -To access the API endpoint, you need an access token. - -Your client must send a header in each request: - -`Authorization: Bearer ` - -For example, send a request using _curl_: - -```shell -curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " -d '{"query": "{tasks(query:{}){name}}"}' http://localhost:8080/graphql -``` - -### How to obtain the access token - -You must obtain a token to use the Tasklist API. When you create a Tasklist [client](/guides/setup-client-connection-credentials.md), you get all the information needed to connect to Tasklist. - -See our guide on [building your own client](/apis-tools/build-your-own-client.md). - -The following settings are needed: - -| Name | Description | Default value | -| ------------------------ | ----------------------------------------------- | --------------------- | -| client id | Name of your registered client | - | -| client secret | Password for your registered client | - | -| audience | Permission name; if not given use default value | `tasklist.camunda.io` | -| authorization server url | Token issuer server | - | - -Send a token issue _POST_ request to the authorization server with the following content: - -```json -{ - "client_id": "", - "client_secret": "", - "audience": "", - "grant_type": "client_credentials" -} -``` - -See the following example with _curl_: - -```shell -curl -X POST --header 'content-type: application/json' --data '{"client_id": "", "client_secret":"","audience":"","grant_type":"client_credentials"}' https:// -``` - -If the authorization is successful, the authorization server sends back the access token, when it expires, scope, and type: - -```json -{ - "access_token": "ey...", - "scope": "...", - "expires_in": 86400, - "token_type": "Bearer" -} -``` - -## Authentication for Self-Managed cluster - -The authentication is described in [Tasklist Configuration - Authentication](/docs/self-managed/tasklist-deployment/tasklist-authentication/#identity). - -## Obtaining the Tasklist schema - -To obtain the Tasklist GraphQL schema, send a request to the endpoint with a GraphQL introspection query as described [here](https://graphql.org/learn/introspection/), or use the [generated API documentation](/docs/apis-tools/tasklist-api/generated.md). - -There are also several [tools to explore GraphQL APIs](https://altair.sirmuel.design). - -For example, you want to know about provided types: - -```graphql -query { - __schema { - queryType { - fields { - name - type { - kind - ofType { - kind - name - } - } - } - } - } -} -``` - -## Example requests and responses - -### Get all task names - -_Request:_ - -```graphql -{ - tasks(query: {}) { - name - } -} -``` - -_Response:_ - -```json -{ - "data": { - "tasks": [ - { - "name": "Check payment" - }, - { - "name": "Register the passenger" - } - ] - } -} -``` - -### Get all tasks completed with id, name, and state - -_Request:_ - -```graphql -{ - tasks(query: { state: COMPLETED }) { - id - name - taskState - } -} -``` - -_Response:_ - -```json -{ - "data": { - "tasks": [ - { - "id": "2251799813685728", - "name": "Check payment", - "taskState": "COMPLETED" - } - ] - } -} -``` diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-tutorial.md b/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-tutorial.md deleted file mode 100644 index fbb19dcc509..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/tasklist-api-tutorial.md +++ /dev/null @@ -1,342 +0,0 @@ ---- -id: tasklist-api-tutorial -title: Tutorial -slug: /apis-tools/tasklist-api/tasklist-api-tutorial -sidebar_position: 2 -description: "Let's implement an application using the Tasklist API." ---- - -## Building an application using the Tasklist API and NestJS - -The Tasklist API provides a simple way for you to build apps powered by BPMN that require human interaction. - -With this example, we'll use NestJS (one of the most popular Node.js backend frameworks) to a build a loan request review application. - -## Getting started - -For this tutorial we'll need: - -- Node v14+ -- The [NestJS CLI](https://docs.nestjs.com/cli/overview) tool. Install it by running `npm install -g @nestjs/cli`. -- [A cluster on Camunda 8](../../components/console/manage-clusters/create-cluster.md) -- [A set of API credentials; remember to check the Tasklist option when creating them](../../components/console/manage-clusters/manage-api-clients.md). Don't forget to save these, we'll need them later. -- [A clone of this repo](https://github.com/camunda-community-hub/camunda-cloud-tasklist-api-nestjs) - -## Before moving forward - -If you have all the prerequisites from the getting started section above, you should have cloned a repo with the complete demo application we're going to build over this tutorial. The default branch in this repo has the complete application, so we need to `checkout` to the branch `0-getting-started` before proceeding. - -Inside the repo folder, you'll find some files and two folders, one of these folders is called `demo-data/` and the other `frontend/`. As it might be evident inside each of these folders, there are two different projects. - -The former will be responsible by deploying the demo process into Zeebe and generating instances for that process. The latter is a front-end application that will consume our API; this project is bootstrapped with [Vite](https://vitejs.dev), [bulma](https://bulma.io) for styling, and [react-query](https://react-query.tanstack.com) - -## Creating a new NestJS application - -Now let's bootstrap our NestJS app. Take the following steps: - -1. Open your terminal and go to the cloned repository folder. -2. Run `nest new api`. -3. Pick `yarn` as a package manager. - -This will create the NestJS project for us inside the `api/` folder. We can clean up the project a bit and remove the files `api/app.controller.spec.ts`, `api/app.controller.ts`, and `api/app.service.ts`. - -We can also remove the references from the deleted files in `api/app.module.ts`. The file should look like this: - -```ts -import { Module } from "@nestjs/common"; - -@Module({ - imports: [], - controllers: [], - providers: [], -}) -export class AppModule {} -``` - -To check if everything is working as expected, run `yarn workspace api run start:dev` from the root folder on your terminal. You should see a message similar to the one below: - -```sh -[00:00:00 AM] Starting compilation in watch mode... -[00:00:00 AM] Found 0 errors. Watching for file changes. -[Nest] 46621 - 00/00/0000, 00:00:00 AM LOG [NestFactory] Starting Nest application... -[Nest] 46621 - 00/00/0000, 00:00:00 AM LOG [InstanceLoader] AppModule dependencies initialized +12ms -[Nest] 46621 - 00/00/0000, 00:00:00 AM LOG [NestApplication] Nest application successfully started +3ms -``` - -## Generating the Tasklist service - -Inside the `api/` folder we'll need to generate a service that will be responsible for accessing the Tasklist API. Take the following steps: - -1. Run `nest g service`. -2. You'll be prompted to pick a name for the service, let's pick `tasklist`. -3. Run `yarn add @nestjs/axios`. - -A folder called `tasklist/` will be created with the service definition and test; you can delete the tests if you wish. We also installed the package `@nestjs/axios`, so we can make requests to the Tasklist API. -To make HTTP requests we need to inject the module into the service, like below: - -```ts -import { Injectable } from "@nestjs/common"; -import { HttpService } from "@nestjs/axios"; - -@Injectable() -export class TasklistService { - constructor(private readonly http: HttpService) {} -} -``` - -Now, we're ready to make requests to the API. First, let's define a Data Transfer Object (DTO) with the shape of the tasks we're going to request. For that, we can create a file in `tasklist/dto/task.dto.ts`. There, we can define the DTO as follows: - -```ts -type Variable = { - name: string; - value: string; -}; - -export class TaskDto { - id: string; - name: string; - processName: string; - creationTime: string; - completionTime: string | null; - assignee: string | null; - variables: Variable[]; - taskState: "CREATED" | "COMPLETED" | "CANCELED"; - sortValues: [string, string]; - isFirst: boolean | null; - formKey: string | null; - processDefinitionId: string; - taskDefinitionId: string; -} -``` - -We can implement the requests. For this, we need to define the Tasklist API query and define the methods on the service: - -```ts -import { HttpService } from "@nestjs/axios"; -import { Injectable } from "@nestjs/common"; -import { firstValueFrom } from "rxjs"; -import { TaskDto } from "./dto/task.dto"; - -const getTasksQuery = ` - query GetTasks($state: TaskState $pageSize: Int $searchAfter: [String!] $searchBefore: [String!] $taskDefinitionId: String!) { - tasks(query: { state: $state pageSize: $pageSize searchAfter: $searchAfter searchBefore: $searchBefore taskDefinitionId: $taskDefinitionId }) { - id - creationTime - variables { - value - name - } - taskState - isFirst - sortValues - } - } -`; - -type QueryVariables = { - pageSize?: number; - searchAfter?: [string, string]; - searchBefore?: [string, string]; - state?: "CREATED" | "COMPLETED"; - taskDefinitionId?: string; -}; - -@Injectable() -export class TasklistService { - constructor(private readonly http: HttpService) {} - - async getTasks(variables: QueryVariables): Promise { - const { http } = this; - const { errors, data } = ( - await firstValueFrom( - http.post("/", { - /* - for simplicity we just used Axios here, but since the Tasklist API is a GraphQL API - a package like `graphql-request` might be better suited for this - */ - query: getTasksQuery, - variables, - }) - ) - ).data; - - if (errors) { - // handle error - } - - return data.tasks; - } -} -``` - -To keep things concise, we have one query and one method here. To see the complete implementation, review [this file](https://github.com/camunda-community-hub/camunda-cloud-tasklist-api-nestjs/blob/2-generating-tasklist-service/api/src/tasklist/tasklist.service.ts). - -## Handling the Tasklist API authentication - -We have the implementation of our service, but we still can't make requests to the Tasklist API because we're not providing any credentials to the API. - -To achieve this, we need to rename the file `.env.example` to `.env` (the file needs to be on the root because we'll reuse it to generate the demo data), and the content of this file should look like this: - -```sh -ZEEBE_ADDRESS=".bru-2.zeebe.camunda.io:443" -ZEEBE_CLIENT_ID="k2FKt_PNMrRUFQO-QOR9MtCygvGsT.sm" -ZEEBE_CLIENT_SECRET="C-o5WFhvoZKv4-oQGHWg~d2MObjdr-GUv3cdqRS3~6fCoHaLleEEwnOqRToQvWda" -ZEEBE_AUTHORIZATION_SERVER_URL="https://login.cloud.camunda.io/oauth/token" -TASKLIST_API_ADDRESS="https://bru-2.tasklist.camunda.io//graphql" -ZEEBE_AUTHORIZATION_AUDIENCE="tasklist.camunda.io" -``` - -You can find all this information on the **API** tab of the cluster page. The client id and secret should be on the file you downloaded in the getting started section. - -Now that we have our credentials, we can authenticate and inject the JWT token into every request we make into Tasklist API. - -For this, we need to turn our Tasklist service into part of a module. Run `nest g module` and name it `tasklist`, the same we named the service. This will generate the module file and update `app.module.ts`. -We need to edit the `app.module.ts` file to use only the module: - -```ts -import { Module } from "@nestjs/common"; -import { TasklistModule } from "./tasklist/tasklist.module"; - -@Module({ - imports: [TasklistModule], - controllers: [], - providers: [], -}) -export class AppModule {} -``` - -We can install the package `@nestjs/config` and finally implement the authentication: - -```ts -import { Logger, Module, OnModuleInit } from "@nestjs/common"; -import { ConfigModule, ConfigService } from "@nestjs/config"; -import { HttpModule, HttpService } from "@nestjs/axios"; -import { TasklistService } from "./tasklist.service"; -import { firstValueFrom, map } from "rxjs"; - -type AuthResponse = { - access_token: string; - scope: string; - expires_in: number; - token_type: string; -}; - -@Module({ - imports: [ - HttpModule, - ConfigModule.forRoot({ - envFilePath: "../.env", - }), - ], - providers: [TasklistService], - exports: [TasklistService, HttpModule, ConfigModule], -}) -export class TasklistModule implements OnModuleInit { - logger = new Logger(TasklistModule.name); - - constructor( - private readonly http: HttpService, - private readonly config: ConfigService - ) {} - - public async onModuleInit() { - const { - http: { axiosRef }, - config, - logger, - } = this; - const credentials = await this.fetchCredentials(); - - logger.log("Tasklist credentials fetched"); - - axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; - axiosRef.defaults.headers["Content-Type"] = "application/json"; - setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds - } - - private async fetchCredentials() { - const { http, config } = this; - - return firstValueFrom( - http - .post(config.get("ZEEBE_AUTHORIZATION_SERVER_URL"), { - client_id: config.get("ZEEBE_CLIENT_ID"), - client_secret: config.get("ZEEBE_CLIENT_SECRET"), - audience: config.get("ZEEBE_AUTHORIZATION_AUDIENCE"), - grant_type: "client_credentials", - }) - .pipe(map((response) => response.data)) - ); - } -} -``` - -When this module is initialized, we can read the credentials using the `@nestjs/config` package, authenticate into the API, and inject the JWT into Axios. We also set a timeout to request a new token when the first one expires. - -## Creating your application API - -We're now able to implement our actual business logic, but first we need to install some packages to create our custom GraphQL API. - -Run `yarn add @nestjs/graphql graphql apollo-server-express`. - -We'll have to generate a module, a service, and a resource. To achieve this, run the following commands: - -```sh -nest g module -nest g service -nest g resource -``` - -Use the name `loanRequests` for all options. For the resource generation, select the option `GraphQL (code first)` and you don't have to generate the CRUD entry points. - -We can now change our `app.module.ts` file to its final form: - -```ts -import { Module } from "@nestjs/common"; -import { GraphQLModule } from "@nestjs/graphql"; -import { LoanRequestsModule } from "./loan-requests/loan-requests.module"; - -@Module({ - imports: [ - GraphQLModule.forRoot({ - autoSchemaFile: true, - playground: true, - }), - LoanRequestsModule, - ], -}) -export class AppModule {} -``` - -And the `loan-requests/loan-requests.module.ts` to: - -```ts -import { Module } from "@nestjs/common"; -import { LoanRequestsService } from "./loan-requests.service"; -import { TasklistModule } from "src/tasklist/tasklist.module"; -import { LoanRequestsResolver } from "./loan-requests.resolver"; - -@Module({ - imports: [TasklistModule], - providers: [LoanRequestsResolver, LoanRequestsService], - exports: [LoanRequestsService, TasklistModule], -}) -export class LoanRequestsModule {} -``` - -We just need to implement the service, which will have three methods (one to get all requests, one to get a single request, and one to make a decision.) - -We will also have four resolvers for the GraphQL API (two mutations and two queries). - -Find the full implementation [here](https://github.com/camunda-community-hub/camunda-cloud-tasklist-api-nestjs/tree/4-application/api/src/loan-requests). - -You can run `yarn start:dev` inside the `api/` folder and the NestJS app should start without errors. - -To test your API, you can access `localhost:3000/graphl` on your browser and should see our custom GraphQL API playground. - -## Demo data generation and sample frontend - -To test our app with a real frontend, we can change the port inside `api/main.ts` to `6000`. Then, run from the root folder `yarn start:demo-data` to start the backend, frontend, and demo data generation, or just `yarn start` if you don't need any new data. diff --git a/versioned_docs/version-8.2/apis-tools/tasklist-api/unions/_category_.yml b/versioned_docs/version-8.2/apis-tools/tasklist-api/unions/_category_.yml deleted file mode 100644 index f6c8705b502..00000000000 --- a/versioned_docs/version-8.2/apis-tools/tasklist-api/unions/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Unions" diff --git a/versioned_docs/version-8.2/apis-tools/web-modeler-api/index.md b/versioned_docs/version-8.2/apis-tools/web-modeler-api/index.md deleted file mode 100644 index cbab5b5771a..00000000000 --- a/versioned_docs/version-8.2/apis-tools/web-modeler-api/index.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -id: index -title: Web Modeler API (REST, beta) -description: "Web Modeler API (beta) is a REST API and provides access to Web Modeler data. Requests and responses are in JSON notation." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -:::caution Beta Offering -In Web Modeler 8.2, the Web Modeler API is offered as an [alpha feature](/reference/alpha-features.md). -It is not recommended for production use and there is no maintenance service guaranteed. - -Consider upgrading to Web Modeler API `v1` released with Web Modeler 8.3, see [Web Modeler API](#migrating-from-beta-to-v1). -The beta API will be removed in Web Modeler 8.5. -::: - -Web Modeler provides a REST API at `/api/*`. Clients can access this API by passing a JWT access token in an authorization header `Authorization: Bearer `. - -## OpenAPI documentation - -A detailed API description is available as [OpenAPI](https://www.openapis.org/) specification at [https://modeler.camunda.io/swagger-ui/index.html](https://modeler.camunda.io/swagger-ui/index.html) -for SaaS and at [http://localhost:8070/swagger-ui.html](http://localhost:8070/swagger-ui.html) for Self-Managed -installations. - -## Authentication - -To authenticate for the API, generate a JWT token depending on your environment and pass it in each request: - - - - - -1. Create client credentials by clicking **Console > Manage (Organization) > API > Create New Credentials**. -2. Add permissions to this client for **Web Modeler API**. -3. After creating the client, you can download a shell script to obtain a token. -4. When you run it, you will get something like the following: - ```json - { - "access_token": "eyJhbG...", - "expires_in": 300, - "refresh_expires_in": 0, - "token_type": "Bearer", - "not-before-policy": 0 - } - ``` - - - - - -1. [Add an M2M application in Identity](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). -2. [Add permissions to this application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for **Web Modeler API (beta)**. -3. [Generate a token](/self-managed/identity/user-guide/authorizations/generating-m2m-tokens.md) to access the REST API. You will need the `client_id` and `client_secret` from the Identity application you created. - ```shell - curl --location --request POST 'http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token' \ - --header 'Content-Type: application/x-www-form-urlencoded' \ - --data-urlencode 'client_id=' \ - --data-urlencode 'client_secret=' \ - --data-urlencode 'grant_type=client_credentials' - ``` -4. You will get something like the following: - ```json - { - "access_token": "eyJhbG...", - "expires_in": 300, - "refresh_expires_in": 0, - "token_type": "Bearer", - "not-before-policy": 0 - } - ``` - - - - - -## Example usage - -1. Take the **access_token** value from the response object and store it as your token. -2. Send the token as an authorization header in each request. In this case, call the Web Modeler endpoint to validate the token. - - To use the JWT token in the cloud, use the following command: - - ```shell - curl -o - 'https://modeler.camunda.io/api/v1/info' -H 'Authorization: Bearer eyJhb...' - ``` - - When using a Self-Managed installation, you can use the following command instead: - - ```shell - curl -o - 'http://localhost:8070/api/beta/info' -H 'Authorization: Bearer eyJhb...' - ``` - - For Self-Managed, the Web Modeler API is currently offered as an [alpha feature](/reference/alpha-features.md). - -3. You will get something like the following: - ```json - { - "authorizedOrganization": "12345678-ABCD-DCBA-ABCD-123456789ABC", - "createPermission": true, - "readPermission": true, - "updatePermission": true, - "deletePermission": false - } - ``` - -## Limitations - -When using Web Modeler API: - -- You will not receive a warning when deleting a file, a folder, or a project. - This is important, because deletion cannot be undone. -- You will not receive a warning about breaking call activity links or business rule task links when moving files or folders to another project. - Breaking these links is considered harmless. The broken links can be manually removed or restored in Web Modeler. This operation is also - reversible - simply move the files or folders back to their original location. -- In Self-Managed, you will not be able to see a new project you created via the API in the UI. - This is because the project has no collaborators. - -## Rate Limiting - -In SaaS, the Web Modeler API uses rate limiting to control traffic. -The limit is 240 requests per minute. -Surpassing this limit will result into a `HTTP 429 Too Many Requests` response. - -On Self-Managed instances no limits are enforced. - -## FAQ - -### What is the difference between _simplePath_ and _canonicalPath_? - -In Web Modeler you can have multiple files with the same name, multiple folders with the same name, and even multiple projects with the same name. Internally, duplicate names are disambiguated by unique ids. - -The API gives you access to the names, as well as the ids. For example, when requesting a file you will get the following information: - -- **simplePath** contains the human-readable path. This path may be ambiguous or may have ambiguous elements (e.g. folders) in it. -- **canonicalPath** contains the unique path. It is a list of **PathElementDto** objects which contain the id and the name of the element. - -Internally, the ids are what matters. You can rename files or move files between folders and projects and the id will stay the same. - -### How do I migrate from the `beta` API to the `v1` API? {#migrating-from-beta-to-v1} - -Web Modeler's stable `v1` API is offered starting from Web Modeler 8.3. -For migration hints, see the [Web Modeler 8.3 API documentation](/versioned_docs/version-8.3/apis-tools/web-modeler-api/index.md#migrating-from-beta-to-v1). diff --git a/versioned_docs/version-8.2/apis-tools/working-with-apis-tools.md b/versioned_docs/version-8.2/apis-tools/working-with-apis-tools.md deleted file mode 100644 index a3326bd7ebe..00000000000 --- a/versioned_docs/version-8.2/apis-tools/working-with-apis-tools.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: working-with-apis-tools -title: "Working with APIs & tools" -sidebar_label: "Working with APIs & tools" -description: "Interact programmatically with Camunda 8 using official Zeebe client libraries and APIs." ---- - -import DocCardList from '@theme/DocCardList'; - -This section steps through two concepts for integration: - -- **Control your Camunda 8 process automation** by [deploying processes](/components/modeler/web-modeler/run-or-publish-your-process.md#deploy-a-process), [starting process instances](/components/modeler/web-modeler/run-or-publish-your-process.md), [activating jobs](/components/concepts/job-workers.md), and more using supplemental and community-maintained **Zeebe client libraries**. -- **Interact with the Camunda 8 ecosystem** by learning about [Camunda Components](/components/components-overview.md) and their APIs to communicate with your cluster, search, get and change data, create Cloud API clients, and more. - -:::note -You're permitted to use these web apps and APIs for free with the Free Edition in non-production environments. To use the software in production, [purchase the Camunda Enterprise Edition](https://camunda.com/products/cloud/camunda-cloud-enterprise-contact/). Read more in our [licensing](../reference/licenses.md) documentation. -::: - -## Deploy processes, start process instances, and more using Zeebe client libraries - -Clients allow applications to do the following: - -- Deploy processes. -- Start and cancel process instances. -- Activate jobs, work on those jobs, and subsequently complete or fail jobs. -- Publish messages. -- Update process instance variables and resolve incidents. - -The official clients mentioned below interact with [Zeebe](/components/zeebe/zeebe-overview.md), the workflow engine integrated into Camunda 8. All clients require [setting up client credentials](/guides/setup-client-connection-credentials.md) to authenticate. Clients connect to Camunda 8 via [gRPC](https://grpc.io), a high-performance, open source, and universal RPC protocol. - -Camunda 8 provides several official clients based on this API. Official clients have been developed and tested by Camunda. They also add convenience functions (for example, thread handling for job workers) on top of the core API. - -### Official Zeebe clients - -Official clients have been developed and tested by Camunda. They also add convenience functions (e.g. thread handling for job workers) on top of the core API. - - - -:::note -Other components in Camunda 8, such as [Tasklist API (GraphQL)](../apis-tools/tasklist-api/generated.md), provide language-agnostic APIs, but no clients to interact with them. GraphQL enables you to query, claim, and complete user tasks. -::: - -### Community clients - -Community clients supplement the official clients. These clients have not been tested by Camunda. - -- [C#](../apis-tools/community-clients/c-sharp.md) -- [JavaScript/Node.js](../apis-tools/community-clients/javascript.md) -- [Micronaut](../apis-tools/community-clients/micronaut.md) -- [Python](../apis-tools/community-clients/python.md) -- [Ruby](../apis-tools/community-clients/ruby.md) -- [Rust](../apis-tools/community-clients/rust.md) -- [Spring](../apis-tools/community-clients/spring.md) -- [Quarkus](../apis-tools/community-clients/quarkus.md) - -It is also possible to [build your own client](../apis-tools/build-your-own-client.md) You can browse other community extensions and the most up-to-date list of community clients [here](https://github.com/orgs/camunda-community-hub/repositories). - -## Learn about Camunda Components and their APIs - -Camunda 8 components have APIs to enable polyglot developers to work with in their programming language of choice. Below are links to available component APIs. - -![Architecture diagram for Camunda including all the components for SaaS](./img/ComponentsAndArchitecture_SaaS.png) - -### API Reference - - - -:::note -Additionally, visit our documentation on [Operate](../self-managed/operate-deployment/usage-metrics.md) and [Tasklist](../self-managed/tasklist-deployment/usage-metrics.md) usage metric APIs. -::: diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.png b/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.png deleted file mode 100644 index fc315f12bbf..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.pptx b/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.pptx deleted file mode 100644 index bdf42546fdf..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-assets/greenfield-architecture.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.png b/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.png deleted file mode 100644 index e45a1f8f91f..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.pptx b/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.pptx deleted file mode 100644 index dddb0605187..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7-assets/greenfield-architecture.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7.md b/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7.md deleted file mode 100644 index efb5ae9af85..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack-c7.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Deciding about your Camunda 7 stack -tags: - - Architecture - - Stack - - Database - - Application Server - - Spring Boot - - Maven -description: "Camunda 7 is very flexible and can be hooked into the architecture of your choice, giving you a number of important decisions to make." ---- - -Camunda 7 is very flexible and can be hooked into the architecture of your choice, giving you a number of important decisions to make. If you don't have special architecture requirements, we recommend following the proposed greenfield stack. You can also check the decision criteria presented below to make more customized choices. Choosing the stack will have big influence on your overall architecture. - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! If you look for Camunda 8, please refer to [Deciding about your Camunda 8 stack](../deciding-about-your-stack/). -::: - -## The Java greenfield stack - -The greenfield stack is pretty similar for various languages. This section described the currently a recommendation for Java developers. If you use different programming languages (like .NET or JavaScript), we recommend looking at Camunda 8, which supports polyglot environments better. The greenfield recommendation has recently changed. So if the recommendation below is surprising to you, you might want to check [this blog post](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -Use the following stack: - -1. Leverage the [Camunda Run](https://docs.camunda.org/manual/latest/installation/camunda-bpm-run/) distribution to run Camunda 7 using the [Enterprise Edition](https://camunda.com/download/enterprise/), preferably [via Docker](https://docs.camunda.org/manual/latest/user-guide/camunda-bpm-run/#starting-camunda-platform-run-using-docker). - -1. Build your process solution project as a [Spring Boot](https://spring.io/projects/spring-boot) application, using the [Camunda 4 REST Client for Spring Boot](https://github.com/camunda-community-hub/camunda-engine-rest-client-java/). - -1. Use [Maven](https://maven.apache.org/) as a build tool. - -1. Use your favorite IDE, for example Visual Studio Code, IntelliJ or Eclipse. - -1. Use [OpenJDK JDK 17](https://jdk.java.net/17/) as Java runtime. - -1. Model the processes with the [Camunda Modeler](https://camunda.org/download/modeler/). - -1. Add your process models and all Java code to the project. - -1. The default distribution leverages an H2 file-based Java database. We recommend using this for development. We _strongly discourage_ multiple developers share the same database during development as this can lead to a multitude of problems. - -To run the process application _in production_, extend the stack: - -1. Use [PostgreSQL](http://www.postgresql.org/), or the database you already operate. - -2. [Secure your installation](https://docs.camunda.org/manual/latest/user-guide/security/). - -3. Run the process application by copying the `jar` file to the server and start it with `java -jar YourProcessApplication.jar`. This can also be done via Docker. - -See our [example application](https://github.com/berndruecker/camunda-platform-remote-spring-boot-example). - -### Understanding the stack's architecture - -The basic architecture with this stack is shown in the following diagram: - -![greenfield stack architecture diagram](deciding-about-your-stack-c7-assets/greenfield-architecture.png) - -### Understanding our motivation for the stack - -While we went through long and detailed discussions to come to this recommendation, it _doesn't_ mean that it is necessarily superior to alternative stacks. You can still feel confident if you go down another route (see below for alternative options). But for our Best Practices, we wanted to give _exactly -one_ greenfield recommendation for all our customers who have no special requirements on the stack. - -We decided on this stack for the following reasons: - -- All components are open source and easily available. -- Camunda Run is the favorite distribution, as it focuses on external tasks, the more modern paradigm also present in Camunda 8. -- Spring Boot is currently the most adopted way of building Java applications. -- Spring Boot applications are easy to customize as well as easy to roll out into test and production environments, either on-premises or in the cloud. -- PostgreSQL has a great track-record for performance. - -There are several _advantages using the greenfield stack_: - -- _Fewer decisions:_ Depending on your experience with the Java cosmos, the decisions to choose a stack might not be easy to take. So if you don't have special requirements, follow a well-known path. -- _Proven:_ Many of our customers use this stack with great success. -- _More documentation & Best Practices:_ You don't have to write your own extensive documentation, just point to the Camunda docs. -- _Easier support:_ Asking for help gets much easier as you do not have to explain your setup in detail. - -### Considering Camunda 8 instead - -Camunda 8 is an alternative process automation offering that catches up on funcationality quickly. For new projects, consider using Camunda 8 from the start. You can find [a quick comparison of concepts in the docs](/guides/migrating-from-camunda-7/conceptual-differences.md). Note that architecturally, the recommended greenfield stack in this best practice is close to what you do using Camunda 8. - -### Getting started with the greenfield stack - -Check the **prerequisites**: - -- Install [OpenJDK JDK 17](https://jdk.java.net/17/). -- Install [Camunda Modeler](https://camunda.org/download/modeler/). -- Install an IDE like [Eclipse](https://eclipse.org/downloads/). We recommend the latest "Eclipse IDE for Java Developers". - - - Activate workspace file sync [refresh using built-in hooks or polling](http://stackoverflow.com/questions/4343735/avoiding-resource-is-out-of-sync-with-the-filesystem) to improve interaction of Eclipse and Camunda Modeler. - - [Add Camunda Assert to your Eclipse content assist favorites](https://github.com/camunda/camunda-bpm-platform/blob/master/test-utils/assert/README.md). - -- Check your network access to [Camunda Artifactory](https://artifacts.camunda.com/ui/) for downloading Maven Artifacts. -- As an Enterprise Customer, check that you have your company credentials at hand to log in and get enterprise versions. - -Create your **development project** - -1. Create a new Spring Boot project (e.g. using [Spring Initializr](https://start.spring.io/)) -2. Add the dependency for the [Camunda Engine OpenAPI REST Client](https://github.com/camunda-community-hub/camunda-engine-rest-client-java/) community extension: - -``` - - org.camunda.community - camunda-engine-rest-client-complete-springboot-starter - 7.16.0-alpha1 - -``` - -3. Model a process with Camunda Modeler and save it under `src/main/resources`. -4. Run the main Java application class via your IDE. -5. Play around with your process using the Camunda web apps (user `demo`, password `demo`): - -- [Tasklist](http://localhost:8080/camunda/app/tasklist/) -- [Cockpit](http://localhost:8080/camunda/app/cockpit/) - -6. Package your application with `mvn clean install`. -7. Bring the `jar` file to your test or production server and start it there. -8. You can set up or integrate it into an existing continuous delivery pipeline. - -## Customize your stack - -### Selecting the process engine mode - -| | Camunda Run (Remote engine) | Embedded Engine | Container-Managed Engine | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| | Run the engine as an isolated BPM server only, communicating with it via Web Services. | Use the process engine as a simple library within your own application, typically started via Spring Boot. | Run the engine as a service preconfigured in your Java EE container. | -| Engine Bootstrap / Lifecycle Management | Out-of-the-box | Out-of-the-box for Spring Boot, otherwise do-it-yourself (see options below) | Out-of-the-box | -| Camunda Webapps work in all use-cases | ✔ | See limitations below | ✔ | -| Camunda REST API work in all use-cases | ✔ | See options below | ✔ | -| [Multiple Process Applications can share a central engine](https://docs.camunda.org/manual/latest/user-guide/process-applications/) | ✔ | Doable with a shared database, but requires custom development and has limitations | ✔ | -| [Multiple Engines can share resources (e.g. share the Job Executor)](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#the-job-executor-and-multiple-process-engines) | | | ✔ | -| One application WAR/EAR can include the process engine | | ✔ | | -| Supports untouched ("vanilla") containers | ✔ | ✔ | | -| Runs in every Java environment | ✔ | ✔ | [On Supported Containers](https://docs.camunda.org/manual/latest/introduction/supported-environments/#container-application-server-for-runtime-components-excluding-camunda-cycle) | -| Responsibility for Engine Installation and Configuration | Operations or Application Developer | Application Developer | Operations or Application Developer | -| Application point of view on process engine | Remote Server | Library | Library | -| Possible communication types with services | Remote | Java InVM, Remote | Java InVM, Remote | -| Programming language | Polyglot (Java, Node.js, C#, ...) | Java | Java | -| Use when | **Default**, if there is no reason against it. Especially if your architecture or applications are not Java based. | You want a single deployment including the engine. | You use a supported application server and prefer to separate engine installation from application development. | -| | [Learn More](https://docs.camunda.org/manual/latest/introduction/architecture/#standalone-remote-process-engine-server) | [Learn More](https://docs.camunda.org/manual/latest/introduction/architecture/#embedded-process-engine) | [Learn More](https://docs.camunda.org/manual/latest/introduction/architecture/#shared-container-managed-process-engine) | - -In essence, the general recommendation is: - -- Use Camunda Run whenever possible. - -- Do not use a container-managed engine. The container managed engine allows to separate installation and configuration of the engine from the application development. This is an advantage if you really separate these roles within your organization. However, we experienced that this causes trouble more often than it does help. Developers most often are still responsible to install the engine, but might not be able to access the application server itself. That also explains the rise of Spring Boot (often alongside with Docker) and many projects successfully moved to that approach instead. Unless you have good reasons, we would not recommend starting new projects using a container-managed engine. - -- Use an embedded engine via Spring Boot if you need to provide one combined deployment artifact. - -### Understanding embedded engine specifics - -If you want to use an embedded engine (which is not the default recommendation; see above,) the following information will help you use it correctly. - -#### Using Spring Boot - -The Camunda Spring Boot Starter is a clean way of controlling the embedded engine easily, so you don't have to think about the specifics mentioned below in this section. This makes Spring Boot a good choice for Camunda projects. - -#### Bootstrapping the engine and managing its lifecycle - -When running the engine in embedded mode, you have to control the _lifecycle_ of the engine yourself, basically _starting up_ and _shutting down_ the engine, and providing access to the API whenever a client needs it. You have several options to do that. - -| | Spring Boot | Spring Application Context | `processes.xml` | Programmatic | -| ------------------------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | --- | -| | Configure, start, and stop the engine via Spring Boot Starter | Configure, start, and stop the engine via Spring Beans defined in your Application Context. | Configure, start, and stop the engine via Camunda’s processes.xml descriptor and a ProcessApplication class. | Configure, start, and stop the engine yourself programmatically by using Java code. | -| Use when | You target Spring Boot as runtime environment. | You already use Spring. | You do not want to introduce a Spring dependency just for Camunda. | You need full control over the engine or want to do advanced customizations. | -| Unlimited Configuration Options | ✔ | ✔ | | ✔ | -| Development Effort | Low | Medium | Low | High | | - -#### Providing a REST API - -When running an embedded engine, it might be harder to deploy the pre-built REST API. - -| | Use Spring Boot Starter for REST API | Embed Camunda’s REST API | Use Camunda’s Standalone Web App REST API | -| ---------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | --- | -| | The Spring Boot Starter allows to run the REST API as well as the Camunda web applications. | Provide Camunda’s REST API by embedding its JAX-RS code into your application. | Deploy Camunda’s "Standalone" Web Application (which runs its own engine) and use its REST API. | -| No Classloading Restrictions | ✔ | ✔ | | -| Development Effort | Low | High | Low | | - -#### Providing Camunda web applications (Tasklist, Cockpit) - -When running an embedded engine, you may want to use a Camunda web application like Tasklist and Cockpit, but have to decide how exactly to run these web applications in your environment. - -| | Use Spring Boot Starter for Camunda Web Applications | Camunda "Standalone" Web Application | Embedded Camunda Web Applications | -| ------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| | The Spring Boot Starter allows you to run the REST API as well as the Camunda web applications. | Deploy Camunda’s "Standalone" Web Application, which is a WAR running its own engine, and point it to your applications engine database. | Embed the Camunda Web Applications into your own application, which is not a particularly easy task to do. | -| Classloading Restrictions | None | For example, you can not submit a task in Tasklist when a following synchronously called service uses a class contained in your own application. However, you can solve this by adding additional [safe points](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/). | None | -| Development Effort | Low | Low | High (undocumented) | -| | [Spring Boot Starter](https://github.com/camunda/camunda-bpm-platform/tree/master/spring-boot-starter/) | [Download Standalone Web Application](http://camunda.org/download/) | [Implement e.g. via Maven WAR Overlays](https://maven.apache.org/plugins/maven-war-plugin/overlays.html) | - -### Choosing a database - -Camunda 7 requires a _relational database_ for persistence. Even if the persistence provider is in theory pluggable and can be exchanged by e.g. some _NoSQL_ persistence this is neither recommended nor supported. Therefore, if you have use cases for this, discuss them with Camunda beforehand! - -| | PostgreSQL | Oracle | H2 | Other databases | -| ------------------------------ | ---------------------------------------------------------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | -| | PostgreSQL is an open source, object-relational database system. | Oracle Database is a commercial object-relational database system. | H2 is a Java SQL database with in-memory mode and a small footprint. | | -| Best Performance Observations | ✔ | ✔ | | | -| In-Memory Mode | | | ✔ | | -| No installation required | | | ✔ | | -| Recommended for unit testing | | | ✔ | -| Recommended for production use | ✔ | ✔ | | ✔ ([if supported](https://docs.camunda.org/manual/latest/introduction/supported-environments/#databases)) | -| | [Learn More](http://www.postgresql.org/) | [Learn More](https://www.oracle.com/database) | [Learn More](http://www.h2database.com/) | [Supported Databases](https://docs.camunda.org/manual/latest/introduction/supported-environments/#databases) | - -Ideally, use the database your organization already operates and your team is experienced with! - -### Modeling for executable processes - -We distinguish two different roles modeling in BPM projects: - -- _Process developers_ develop an executable process implementation. Process developers implementing solutions with Camunda must use Camunda Modeler to model executable processes, edit technical attributes, and manage and version (e.g. in Git or SVN) the resulting (XML) files as part of the development project. - -- _Process analysts_ capture the operational know how about a process. For this part of the work, it is possible to use a different tool than Camunda Modeler. - -| | Camunda Modeler | Third-Party Modeler (BPMN Standard Compliant) | Third-Party Modeler (Non-Compliant to Standard) | -| ------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- | -| Roundtrip in between process analysts and developers possible | ✔ | ✔ (Carefully check level of BPMN compliance - the [Model Interchange Working Group](http://bpmn-miwg.github.io/bpmn-miwg-tools/) can serve as a first starting point | | -| Use for process analysts | ✔ | ✔ | | -| Use for process developers | ✔ | | | -| Use when | You do not have a BPMN standard compliant modeling tool already rolled out. | You already rolled out a BPMN tool with a standard compliancy sufficient for roundtrip. | Try to avoid | -| | [Download](https://camunda.org/download/modeler/) | | | diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack.md b/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack.md deleted file mode 100644 index fdf73aed6da..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/architecture/deciding-about-your-stack.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Deciding about your stack -tags: - - Architecture - - Stack - - Database - - Application Server - - Spring Boot - - Maven ---- - -:::caution Camunda 8 -This best practice targets Camunda 8. For Camunda 7, please refer to [Deciding about your Camunda 7 stack](../deciding-about-your-stack-c7/). -::: - -Our greenfield stack recommendation is a result of extensive discussions and evaluations. While not the only option, it is a solid choice if there are no specific reasons to choose an alternative. - -Your choice of programming language should align with your team's expertise; we suggest Java or JavaScript for their broad applicability and support, and have outlined the Java greenfield stack below with Camunda 8 SaaS. - -## The Java greenfield stack - -:::caution -[Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) is currently a community-maintained project. -::: - -![greenfield stack architecture diagram](deciding-about-your-stack-assets/greenfield-architecture.png) - -This architecture diagram illustrates the flow of requests from a user's browser through Camunda SaaS, where workflows and decisions are orchestrated. The process then moves to the Spring Boot application, which is responsible for executing business logic, handling database interactions with PostgreSQL, and managing various components such as custom REST endpoints, BPMN/DMN definitions, and external task workers. - -### Why this stack? - -- SaaS simplifies workflow engine integration. -- Spring Boot is widely adopted for Java application development. -- Flexible for both on-premises and cloud environments. - -Discover more in our [getting started guide for microservices orchestration](/guides/getting-started-orchestrate-microservices.md) or the [Spring Zeebe instructions](https://github.com/camunda-community-hub/spring-zeebe). - -### Set up the stack - -For a Java-based setup using Camunda 8 SaaS and Spring Boot, use the following stack: - -#### Camunda 8 SaaS account and cluster - -If you're new to Camunda SaaS, check out our [getting started guide](/guides/introduction-to-camunda-8.md#getting-started) to set up your environment. - -After signing up, create a cluster by following [creating a cluster in Camunda 8](/guides/create-cluster.md), which provides step-by-step instructions on setting up a new cluster in the Camunda 8 environment. - -#### Spring Boot - -Develop your own process solutions as [Spring Boot](https://spring.io/projects/spring-boot) applications. This involves setting up a new Spring Boot project, either manually or using tools like [Spring Initializr](https://start.spring.io/). - -Integrate [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) into the Spring Boot project by adding necessary dependencies to the project’s pom.xml file and configuring the application to use Camunda services. - -#### Maven - -Use [Maven](https://maven.apache.org/) to manage the build lifecycle of the application. - -#### IDE selection - -Select an Integrated Development Environment (IDE) that supports Java development, Maven, and Spring Boot. Frequently used options include Visual Studio Code, IntelliJ IDEA, or Eclipse. - -#### Java runtime - -Install and use OpenJDK 17 as your Java runtime environment. Download it from the [official JDK 17 download page](https://jdk.java.net/17/). - -#### Modeling - -Download and use Camunda Modeler for designing and modeling business processes. Modeler is available [here](https://camunda.org/download/modeler/). - -#### Code integration - -Incorporate all Java code and BPMN process models into the Spring Boot project, ensuring that they are structured correctly and referenced properly within the application. - -### Run the process application: - -To run the process application, transfer the `jar` to the desired server. - -Start the application using the command `java -jar YourProcessApplication.jar`. Frequently, this deployment process is managed through Docker for ease of use. - -For a practical implementation, refer to our [example application on GitHub](https://github.com/camunda-community-hub/camunda-cloud-examples/tree/main/twitter-review-java-springboot), which demonstrates a typical setup for a Spring Boot-based process application with Camunda. - - - -## Customize your stack - -### Polyglot stacks - -You can develop process solutions as described with Java above also in any other programming language, including JavaScript. Use the [existing language clients and SDKs](/apis-tools/working-with-apis-tools.md) for doing this. - -### Run Camunda 8 Self-Managed - -Run Camunda 8 on your Kubernetes cluster. For local development, a [Docker Compose configuration is available](/self-managed/platform-deployment/docker.md), though not for production use. Learn more in the [deployment docs](/self-managed/platform-deployment/overview.md). diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/assign.png b/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/assign.png deleted file mode 100644 index f71baf739ae..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/assign.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/follow-up-filter.png b/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/follow-up-filter.png deleted file mode 100644 index 083e81a929f..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/follow-up-filter.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/process-variables.png b/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/process-variables.png deleted file mode 100644 index c5acec7d821..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/process-variables.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/processinstanceinfo.png b/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/processinstanceinfo.png deleted file mode 100644 index 7cf9c019fce..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/processinstanceinfo.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/task-lifecycle.png b/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/task-lifecycle.png deleted file mode 100644 index b97f021ce62..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7-assets/task-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7.md b/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7.md deleted file mode 100644 index 9a01a761ee2..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/architecture/extending-human-task-management-c7.md +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: Extending human task management in Camunda 7 -tags: - - Human Task - - Delegation - - Escalation - - E-Mail Notification - - 4-Eyes-Principle - - Overdue Task ---- - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only and is an appendum to [understanding human task management](../understanding-human-tasks-management/). -::: - -## The Camunda 7 task lifecyle - -Do not show the _lifecycle_ of user _tasks_ in the process model, they are generic and common to all processes and so can be controlled by using the [Camunda BPM task lifecycle](https://docs.camunda.org/manual/latest/webapps/tasklist/task-lifecycle/) features. - -![Task lifecycle](extending-human-task-management-c7-assets/task-lifecycle.png) - -- Create: New tasks are normally created as part of _process execution_, but can be created by an _user action_, too (as standalone tasks). `taskService.newTask()` -- Set Candidate: Typically candidates are initially set to _groups_ of people as part of _process execution_, but can be requested by API, too. `taskService.addCandidateGroup(taskId, groupId)` -- Claim: Individual members of a candidate group _assign themselves_ to tasks when working on them.`taskService.claim(taskId, userId)` -- Unclaim: Individual assignees _unassign themselves_ and move a task back to the candidates.`taskService.claim(taskId, null)` -- Assign: Directly assign a specific individual either as part of _process execution_, or because explicitly requested by API. `taskService.setAssignee(taskId, userId)` -- Reassign: Individual assignees may want to _hand over_ a task to somebody else. `taskService.setAssignee(taskId, userId)` -- Delegate: Individual assignees may want to delegate (part of) the work: ask somebody else to _resolve (part of) the work_ in order to pass the task back subsequently. `taskService.delegateTask(String taskId, String userId)` -- Resolve: After having resolved the requested work individual assignees will want to _pass a delegated task back to the owner_: the original assignee. `taskService.resolveTask(String taskId)` -- Complete: This is how you would _close the work on a task_ and asking the process execution to move on `taskService.complete(String taskId, String userId)` - -## Typical use cases - -### Handing over tasks directly to other people - -You can always hand over a task assigned to you simply by _changing the assignee_. This means that the new assignee is now responsible and supposed to carry out the task all by themselves. - -```java -taskService.setAssignee(taskId, "kermit"); -``` - -This can also be achieved via the Camunda tasklist: - -![Task assignment](extending-human-task-management-c7-assets/assign.png) - -### Delegating tasks to other people - -Delegate a task assigned to you by using Camunda "delegateTask". This means that somebody else is supposed to resolve (some of) the work and then pass the task back to you by resolving it. The original assignee is remembered as the "owner" of the task. A typical example is decision support: Some other employees collect information in order to prepare a decision, but the original assignee has to take that decision. - -Even if the engine does not enforce that a delegated task can be directly completed, we recommend that you not allow this if you use delegation. The task should always be resolved and then later completed by the owner. That's why there is no transition from "DELEGATED" to "COMPLETED" in the lifecycle shown. - -```java -taskService.delegateTask(taskId, "gonzo"); -// and later -taskService.resolveTask(taskId); -``` - -### Notifying people about their tasks - -You might want to notify people about new tasks (e.g. via email). Do this by implementing a Camunda TaskListener, like shown in [this example](https://github.com/camunda/camunda-bpm-examples/tree/master/usertask/task-assignment-email). - -When you want to have this functionality for every user task you can use a ParseListener which adds it _everywhere_, so you don't have to adjust the BPMN model. See [BPMN Parse Listener](https://github.com/camunda/camunda-bpm-examples/tree/master/process-engine-plugin/bpmn-parse-listener) to see how this can be done. - -### Following up on tasks after some time - -Follow up on tasks after some definable time by using Camunda's [Follow Up Date](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/user-task/#follow-up-date) field and use it in connection with filtering tasks. - -You can set a calculated follow-up date by using a JUEL expression in your BPMN file - -```xml - -``` - -You can set a follow-up date, that may be requested by the user, using the Java API - -```java -task.setFollowUpDate(myDate); -``` - -Now you can use a task _filter_ with criteria checking the follow-up date and if it is due. This can be leveraged via API or in the Camunda Tasklist. - -![Follow up filter](extending-human-task-management-c7-assets/follow-up-filter.png) - -### Enforcing deadlines for tasks - -There are different ways of enforcing deadlines for Human Tasks. Typical actions for overdue tasks are: - -- Sending reminder mails -- Changing the assignee/group -- Creating a standalone task for a manager - -| | Explicit modeling in BPMN | Filtering due tasks | Querying due tasks and take action | Timeout task event | -| ------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------ | -| | Showing an escalation process path in your BPMN model (see example below) | Setting the Due Date field to easily filter for overdue tasks | Setting the Due Date field, querying it on a regular basis and take arbitrary actions | Implement a timeout event listener and configure it in the process model | -| | Explicit | Implicit | Implicit | Implicit | -| Bulk actions possible (e.g. one mail with a list of all due tasks) | | | yes | | -| No custom component required | yes | yes | Querying has to be done by external trigger or BPMN process | yes | -| Use when | The escalation is business relevant and has to be visible in the process model | Overdue tasks can be easily monitored via tasklist application, actions are taken manually | Sophisticated, automated actions should take place | A timely escalation mechanism is desired | -| Don’t use when…​ | Each and every User Task has a due date and explicit modeling would clutter your process model | You need an action to be executed automatically | You do not want to run your own scheduling infrastructure | The escalation should be visible in the process model | - -#### Modeling an escalation - -The following example shows how to explicitly model an escalation: - -
    - -1 - -The model shows an explicit escalation process path: if the tweet does not get reviewed within an hour, the boss needs to be reminded about the laws of the internet age. - -#### Filtering by due date - -This example shows how you can calculate and set the [Due Date](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/user-task/#due-date) field: - -```xml - -``` - -You can easily query for overdue tasks via API, e.g. all overdue tasks: - -```java -processEngine.getTaskService().createTaskQuery().dueBefore(new Date()).list(); -``` - -#### Model timeout task event - -This example shows how to model a timeout event on a user task: - -```xml - - - - - R/PT1H - - - - -``` - -Every hour, the process engine will invoke the `sendEmailReminderListener` bean to send out an email. The bean can access all task and process attributes. - -## Enhancing task lists with business data - -Allow users to filter their tasks by relevant business data. Display this data right inside the task list and not just when selecting a task form. To achieve this with acceptable performance, select the implementation approach appropriate for your needs. - -### Selecting an implementation approach - -To enhance your tasklist with business data, select the implementation approach appropriate for your needs. - -| | Camunda Process Variables | Camunda Native Query API | Custom MyBatis Mapping | Custom Process or Task "InfoEntity" | -| ----------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | -| | Use simple process or task variables to store business data with tasks, often as an additional copy. | Use a native query to enhance query performance when retrieving tasks filtered by business data. | Use a custom database mapping to speed up retrieval of task data combined with business data. | Use a custom database entity to store business data optimized for search and display. | -| Filter with Business Data as Simple Process Variables | yes | yes | yes | yes | -| Filter with Business Data in Domain Database | | yes | yes | yes | -| Display Business Data from Domain Database | (only via "copy as process variable") | (only via "copy as process variable") | yes | yes | -| Development Effort | out-of-the-box | low | high | high | -| No Dependency on Camunda Internals | yes | (take care not to use hard coded table names) | (take care not to use hard coded table names) | yes | -| Required Know-How | | SQL | SQL, Apache MyBatis, Advanced Camunda | Depends (e.g. JPA or JDBC) | -| Scaling / Performance | Limited (~ 5-10 criteria) | Medium (dep. on use case) | Medium (dep. on use case) | High (customized) | -| Out-of-the-box usage with Camunda Tasklist | yes | | | | - -### Using Camunda process/task variables - -Using plain and simple process or task variables to store business data has the big _advantage_ that -you can use the out-of-the-box mechanisms. Plain and simple means to only use primary data types (e.g. String, Long, ...). Especially when using _Camunda Tasklist_ you can easily use process/task variables to - -- _Show_ custom business data right inside the list, or -- Use such variables for defining re-usable _filters_ which narrow down your Tasklist items to the ones matching: - -![Process variables](extending-human-task-management-c7-assets/process-variables.png) - -#### Including derived or calculated values - -In case you need _dynamically calculated values_ or specific _fields derived from complex datatypes/objects_, you can achieve this by - -- using task variables as a kind of _caching_ mechanism, -- being filled by "calculating" the values using _expression language_ -- e.g. by means of an _I/O Mapping_ of a User Task: - -```xml - - - - ${invoice.calculateSum()} - ${invoice.creditorId} - - - -``` - -1 - -The total sum of the payment is calculated by calling a method on an invoice object and cached for search and display purposes. - -3 - -The creditorId is copied into an own variable, so it can be used in filters or shown in the tasklist. - -The _disadvantage_ of using process or task variables is that this mechanism does _not_ scale very well, as the process variables are stored in the generic Camunda database schema. This requires one row in the variable table for each variable, and all of them must be joined with the process instance table. The real limit is determined by the amount of data and the database used - but typically you cannot use more than 10 variables. - -#### Using a special search variable - -If you need variables only to search for tasks (but not to display attributes in the tasklist) you can use a simple workaround: Introduce _one single process variable optimized for tasklist queries_. Extract the attributes you need to filter your tasklist with and combine them to a single search string prepared to work with a SQL 'LIKE' query: - -| Variable | Type | Value | -| ------------- | -------- | --------------------------------------------------------- | -| customerId | (Long) | 4711 | -| customerName | (String) | camunda | -| customerPlace | (String) | Berlin | -| searchString | (String) | customerId=4711#customerName=camunda#customerPlace=Berlin | - -When defining your Camunda tasklist filter, use the searchString variable and search in it by means of a 'LIKE' query. - -### Using the Camunda native query API - -When you need to filter your tasks by business data stored in your own tables, leverage the possibility to create _native queries_ via the Camunda _Java API_. Native Queries are - -- expressed in _SQL_ which is not limited to the Camunda Tables. However -- the result is still _mapped to the Camunda Task entity_, so you do not have to dive into Apache MyBatis (the persistence framework used within Camunda). - -This means you _cannot_ load data from your domain objects by native queries, you simply can express arbitrary WHERE clauses. Example: - -```java -List tasks = taskService.createNativeTaskQuery() - .sql("SELECT * FROM #{taskTable} T" - + "LEFT OUTER JOIN (select * from #{variablesTable} where NAME_= 'customerId') VAR_CUSTOMER" - + " ON VAR_CUSTOMER.EXECUTION_ID_ = T.EXECUTION_ID_" - + "LEFT OUTER JOIN CUSTOMER " // <1> - + " ON CUSTOMER.ID_ = VAR_CUSTOMER.LONG_" - + "WHERE CUSTOMER.COMPANY = #{companyName}") - .parameter("companyName", "camunda") - .parameter("taskTable", managementService.getTableName(Task.class)) // <2> - .parameter("variablesTable", managementService.getTableName(VariableInstance.class)) - .list(); -``` - -1 - -Using native queries allows you to directly join Camunda tables with custom Business Data Tables (held in the same database) while still retrieving `Task.class` typed result sets. - -2 - -Make sure that you do not use hard coded table names to be less dependent on Camunda Internals. However, please note that the example still uses internal details, e.g. by using column names. Your queries or table/column name mappings would need to be adapted in case these internal details change. - -### Implementing a custom mybatis mapping - -In case you want to not just filter tasklists for business data, but also load custom data from domain objects in one query you can implement your own _MyBatis_ mapping and call it via _custom code_. - -Even if this is a very powerful mechanism, we normally do not recommend it, as you need to understand quite a bit about MyBatis. It will be hard to completely avoid dependencies on the Camunda database schema. The database schema is considered internal, hence this also might impose additional maintenance effort in your project for new Camunda versions. - -### Implementing a custom _process/task info entity_ - -For maximal flexibility (and best performance possibilities), create a custom ProcessInstanceEntity and/or TaskEntity designed to filter tasklists and display business data. - -Prefer a ProcessInstanceEntity over a TaskEntity as long as the business data you need is quite similar in between the different user tasks of a process definition. This way you avoid unnecessary database operations. If this is not the case you need to go for the TaskEntity as shown in the following example. - -![Process Instance Info](extending-human-task-management-c7-assets/processinstanceinfo.png) - -In this entity, combine the Camunda `task.id` with all your business attributes as separate columns. This allows to query for and display tasks without or with a minimum of SQL JOINs. Consider to use your entity now as a single source for displaying tasklists to your users - hence circumventing the Camunda TaskService Query API for that purpose completely. - -Using this approach requires to synchronize your entity with the Camunda state. - -If you target a _TaskInfoEntity_: - -- Create it via a _TaskListener_ -- Delete it via a Tasklistener - -If you target a _ProcessInstanceInfoEntity_: - -- Create a new instance by an _ExecutionListener_ on the process instance start event. The process instance id might not yet be known at this time. So either you create your own id and set it as a process variable (to SQL "join" on this later), or you can add a safe point before the listener triggers to make sure the process instance was committed to the database. - -- Decide when you have to update information in the entity, this depends on various factors (like amount of data, frequency of changes, way of changing data, ...). diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment-assets/Sample Calculation for Sizing Your C8 Environment Best Practice.xlsx b/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment-assets/Sample Calculation for Sizing Your C8 Environment Best Practice.xlsx deleted file mode 100644 index 3bf8c90aafc..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment-assets/Sample Calculation for Sizing Your C8 Environment Best Practice.xlsx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment-c7.md b/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment-c7.md deleted file mode 100644 index 724032301a9..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment-c7.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -id: sizing-your-environment-c7 -title: Sizing your Camunda 7 environment -tags: - - Database - - Performance - - Hardware - - Sizing -description: "Size your environment for Camunda 7, including sufficient hardware and database space. This best practice targets Camunda 7.x only." ---- - -Size your environment for Camunda 7 appropriately, including sufficient hardware and database space. - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! If you are using Camunda 8, visit [Sizing your Camunda 8 Environment](../sizing-your-environment/). -::: - -## Understanding the influencing factors - -You do not need big hardware to run Camunda. The hardware requirements are basically determined by two things: - -1. The container/application server you want to use (see [deciding about your Camunda 7 stack](../deciding-about-your-stack-c7/). -2. Things you do in [Delegation Code](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/) like service tasks. For example, when calling SOAP WebServices or when doing complex calculations in Java, more CPU time is consumed within the delegation code (your code) than in Camunda. - -The only way to get reliable figures for your project and environment is to do load testing on a close-to-production environment. We recommend doing this if in doubt. Steering the REST API via load generator tools like JMeter is relatively easy. - -From the Camunda perspective, there are a number of aspects to look at: - -- **Average duration between process start**: This determines the overall load on the system. We typically try to calculate how many new process instances per second. If you have a new process instance every couple of seconds or minutes (or even hours), you don't have to think about sizing. If you have **more than 100 process instances per second**, choose hardware wisely. As an example, we could run a benchmark on a normal developer notebook (Intel i5 4 Cores @2.5 Ghz, 8 GB RAM, SSD HD) that started around 100 to 500 process instances per second (see [Benchmarking Performance of Camunda Process Engine](http://blog.camunda.org/2014/01/benchmarking-camunda-process-engine.html) for details). - -- **Average process instance cycle time**: With the average cycle time of a process instance, you can calculate how many active process instances you typically have in the runtime database at the same time. For example, when starting one process instance per hour with a typical duration of two weeks, you have 2 weeks \* 7 days \* 24 hours \* 1 process instance/hour = 336 active process instances at any time. While this does not create CPU load for the engine, it influences database behavior like query execution time, index size, or index write performance. - -- **Wait states**: In some cases, process instances run through in one go, without stopping at any [wait state](http://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states). In these cases, process instances are never written to the runtime database, which decreases load dramatically. - -- **Number of concurrent clients**: This determines how many queries are fired against the database in parallel. It can also influence the sizing of thread pools. - -- **Typical queries**: Database performance and load is not a big issue if you only load process instances or tasks by `id` or `business key`, as both have an index. In contrast, querying for process instances or tasks by a combination of different process variables (e.g. to correlate by business data), has a severe impact on database load and performance. Especially in high load scenarios, think about the most common queries you will have. - -- **History level**: The configured [history level](http://docs.camunda.org/manual/latest/user-guide/process-engine/history/#set-the-history-level) determines how much history data is written and how much database disk space is required. - -## Determining hardware requirements - -### Performance & scalability - -We normally do not hit limits in scalability of Camunda. Due to the small footprint, the engine can run with extreme efficiency. All state is persisted in the database, so you can always add new process engine instances (e.g. cluster nodes) to speed up execution. - -The natural limit for this kind of architecture is the database. More scalability can be achieved using [Camunda 8](https://camunda.com/products/cloud/). - -### High availability - -We recommend running two machines for high availability. They do not have to form a proper cluster in terms of an application server cluster, just set up two identical nodes pointing to the same database. - -### Virtualization - -You can run Camunda on virtualized systems. The license is not bound to CPU cores, making this very easy from a licensing perspective as well. - -### Hardware - -We do not give concrete configuration recommendations. We recommend "server classes": - -- **Small**: Whatever you typically run as a small server (e.g. 1-2 CPU, 1-8 GB RAM). -- **Medium**: Whatever you typically run as a medium server (e.g. 2-4 CPU, 4-16 GB RAM). -- **Large**: Whatever you typically run as a large server (e.g. 4-64 CPU, 16-128 GB RAM). - -:::note -In most projects, small servers are sufficient. -::: - -Consider a medium server if: - -- You start more than 100 process instances per second. -- You have CPU intense delegation code. -- Your code/deployment has additional requirements. - -### Disk space - -Depending on the container, you need around 500 MB—1 GB of disk space. We recommend at least 2 GB to store enough logs in case you experience any problems. - -## Determining database requirements - -### Chose a good database - -As mentioned in [deciding about your Camunda 7 stack](../deciding-about-your-stack-c7/), we recommend Oracle or PostgreSQL. Together with DB2, we made the best performance observations there. - -Note that H2 is seldom used in production, and we do not have much experience with heavy load on this database ([H2 FAQ: Is it Reliable?](http://www.h2database.com/html/faq.html#reliable)). - -### Required database size - -The amount of space required on the database depends on the following: - -- [History level](http://docs.camunda.org/manual/latest/user-guide/process-engine/history/#set-the-history-level): Turning off history saves huge amounts of table space, as you only have to keep current runtime data in the database. Normally, you keep it to `FULL` to leverage audit logging capabilities of the process engine. -- [Process Variables](https://docs.camunda.org/manual/latest/user-guide/process-engine/variables/): All process variables need to be written to the database (in a serialized form, e.g. JSON). With the history level `FULL`, an entry is inserted into history tables every time a variable is changed, remembering the old value. With big data objects stored and often changed, this requires a lot of space. - -When calculating database size, you should also clarify if and how often you will be cleaning up your historical data, likely using the [history cleanup feature](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup). - -The real space occupied within your database depends very much on your database product and configuration. There is no easy formula to calculate this space. Instead, this section gives an example: - -
    - -1 - -25% of the instances will be reviewed. - -2 - -10% of the instances will be ended after review. - -To gain some numbers, we were running the [invoice example](https://github.com/camunda/camunda-bpm-platform/blob/master/examples/invoice/src/main/resources/) with the statistical distributions mentioned above in the following scenario: - -- History level `FULL` -- Starting 40,000 process instances (PIs) and let 33,000 PIs complete (deleted from runtime). The remaining 7,000 PIs are still active. -- Using an Oracle 12c Enterprise Edition (12.1.0.1.0, 64bit Production) installation on Linux. - -This gave us the following results: - -| - | Number of PIs | Disk space | Calculated disk space per PI | Remarks | -| ------- | ------------- | ---------- | ---------------------------- | --------------------------------------------------------- | -| Runtime | 6.989 | 28,375 MB | 4,157 KB | Around half of the space is used for indices. | -| History | 39.953 | 766,375 MB | 19,642 KB | Space requirements massively influenced by history level. | -| Sum | - | 794,75 MB | - | - | - -As a rule of thumb, capture the following figures and use the example above to make an informed "guess": - -- Number of process instances per day -- Average number of executed tasks per process instance -- Sum of size of variables per process instance -- Average number of updates per variable - -### Example calculation - -This is an example calculation from a real-life scenario. - -Given: - -- Estimated PI / month: 300,000 -- Concurrent users: 450 - -Assumptions for calculation: - -- Load is equally distributed on 20 working days (more realistic than 30 days, you can even add more buffer). -- Load is equally distributed on 8 working hours (more realistic than 24 hours, you can even add more buffer). -- The process consists of mostly user tasks and almost no service tasks. -- On average, a process instance takes around two days to complete. - -Calculation: - -- 15.000 new PI / day -- 1.875 new PI / hour -- 31 new PI / minute -- ~ new PI every 2 seconds - -In this case, a "small server" is sufficient. diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment.md b/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment.md deleted file mode 100644 index e4bc2883835..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/architecture/sizing-your-environment.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -id: sizing-your-environment -title: Sizing your environment -tags: - - Database - - Performance - - Hardware - - Sizing -description: "To define and size your environment for Camunda 8 appropriately, you need to understand the factors that influence hardware requirements." ---- - -In order to define and size your environment for Camunda 8 appropriately, you need to understand the factors that influence hardware requirements. Then you can apply this knowledge to select the appropriate Camunda 8 SaaS hardware package or size your self-managed Kubernetes cluster. - -:::caution Camunda 8 only -This best practice targets Camunda 8 only! If you are looking at Camunda 7, please visit [Sizing your Camunda 7 environment](../sizing-your-environment-c7/). -::: - -## Understanding influencing factors - -Let's understand the important numbers. - -### Throughput - -Throughput defines, how many process instances can be executed in a certain timeframe. - -It is typically easy to estimate the number of **process instances per day** you need to execute. If you only know the number of process instances per year, we recommend to divide this number by the 250 (average number of working days in a year). - -But the hardware sizing depends more on the **number of BPMN tasks** in a process model. For example, you will have a much higher throughput for processes with one service task than for processes with 30 service tasks. - -If you already know your future process model, you can use this to count the number of tasks for your process. For example, the following onboarding process contains five service tasks in a typical execution. - -
    - -If you don't yet know the number of service tasks, we recommend to assume 10 service tasks as a rule of thumb. - -The number of tasks per process allows you to calculate the required number of **tasks per day (tasks/day)** which can also be converted into **tasks per second (tasks/s)** (devide by 24 hours \* 60 minutes \* 60 seconds). - -**Example:** - -| Indicator | Number | Calculation method | Comment | -| :--------------------------------- | --------: | :----------------: | :------------------------------------------ | -| Onboarding instances per year | 5,000,000 | | Business input | -| Process instances per business day | 20,000 | / 250 | average number of working days in a year | -| Tasks per day | 100,000 | \* 5 | Tasks in the process model as counted above | -| Tasks per second | 1.16 | / (24\*60\*60) | Seconds per day | - -In most cases, we define throughput per day, as this time frame is easier to understand. But in high-performance use cases you might need to define the throughput per second. - -### Peak loads - -In most scenarios, your load will be volatile and not constant. For example, your company might start 90% of their monthly process instances in the same day of the month. The **ability to handle those peaks is the more crucial requirement and should drive your decision** instead of looking at the average load. - -In the above example, that one day with the peak load defines your overall throughput requirements. - -Sometimes, looking at peaks might also mean, that you are not looking at all 24 hours of a day, but only 8 business hours, or probably the busiest 2 hours of a day, depending on your typical workload. - -### Latency and cycle time - -In some use cases, the cycle time of a process (or sometimes even the cycle time of single tasks) matter. For example, you want to provide a REST endpoint, that starts a process instance to calculate a score for a customer. This process needs to execute four service tasks, but the REST request should return a response synchronously, no later than 250 milliseconds after the request. - -While the cycle time of service tasks depends very much on what you do in these tasks, the overhead of the workflow engine itself can be measured. In an experiment with Camunda 8 1.2.4, running all worker code in the same GCP zone as Camunda 8, we measured around 10ms processing time per process node and approximately 50 ms latency to process service tasks in remote workers. Hence, to execute 4 service tasks results in 240 ms workflow engine overhead. - -The closer you push throughput to the limits, the more latency you will get. This is basically, because the different requests compete for hardware resources, especially disk write operations. As a consequence, whenever cycle time and latency matters to you, you should plan for hardware buffer to not utilize your cluster too much. This makes sure, your latency does not go up because of resource contention. A good rule of thumb is to multiply your average load by 20. This means, you cannot only accommodate unexpected peak loads, but also have more free resources on average, keeping latency down. - -| Indicator | Number | Calculation method | Comment | -| :------------------------------------------------------------- | --------: | :----------------: | :-------------------------------------------------------------------------------------- | -| Onboarding instances per year | 5,000,000 | | Business input, but irrelevant | -| Expected process instances on peak day | 150,000 | | Business input | -| Process instances per second within business hours on peak day | 5.20 | / (8\*60\*60) | Only looking at seconds of the 8 business hours of a day | -| Process instances per second including buffer | 104.16 | \* 20 | Adding some buffer is recommended in critical high-performance or low-latency use cases | - -### Payload size - -Every process instance can hold a payload (known as [process variables](/docs/components/concepts/variables/)). The payload of all running process instances must be managed by the runtime workflow engine, and all data of running and ended process instances is also forwarded to Operate and Optimize. - -The data you attach to a process instance (process variables) influences resource requirements. For example, it makes a big difference if you only add one or two strings (requiring around 1 KB of space) to your process instances, or a full JSON document containing 1 MB. Hence, the payload size is an important factor when looking at sizing. - -There are a few general rules regarding payload size: - -- The maximum [variable size per process instance is limited](/docs/components/concepts/variables/#variable-size-limitation), currently to roughly 3 MB. -- We don't recommend storing much data in your process context. See our [best practice on handling data in processes](/docs/components/best-practices/development/handling-data-in-processes/). -- Every [partition](/docs/components/zeebe/technical-concepts/partitions/) of the Zeebe installation can typically handle up to 1 GB of payload in total. Larger payloads can lead to slower processing. For example, if you run one million process instances with 4 KB of data each, you end up with 3.9 GB of data, and you should run at least four partitions. In reality, this typically means six partitions, as you want to run the number of partitions as a multiple of the replication factor, which by default is three. - -The payload size also affects disk space requirements, as described in the next section. - -### Disk space - -The workflow engine itself will store data along every process instance, especially to keep the current state persistent. This is unavoidable. In case there are human tasks, data is also sent to Tasklist and kept there, until tasks are completed. - -Furthermore, data is also sent Operate and Optimize, which store data in Elasticsearch. These tools keep historical audit data for some time. The total amount of disk space can be reduced by using **data retention settings**. We typically delete data in Operate after 30 to 90 days, but keep it in Optimize for a longer period of time to allow more analysis. A good rule of thumb is something between 6 and 18 months. - -:::note -Elasticsearch needs enough memory available to load a large amount of this data into memory. -::: - -Assuming a [typical payload of 15 process variables (simple strings, numbers or booleans)](https://github.com/camunda/camunda/blob/stable/8.2/benchmarks/project/src/main/resources/bpmn/typical_payload.json) we measured the following approximations for disk space requirements using Camunda 8 SaaS 1.2.4. Please note, that these are not exact numbers, but they might give you an idea what to expect: - -- Zeebe: 75 kb / PI -- Operate: 57 kb / PI -- Optimize: 21 kb / PI -- Tasklist: 21 kb / PI -- Sum: 174 kb / PI - -Using your throughput and retention settings, you can now calculate the required disk space for your scenario. Example: - -| Indicator | Calculation method | Value | Comments | -| :------------------------- | :----------------: | -------------: | :------------------------------------------------------------------------------------------------- | -| Process instances per day | | 20,000 | | -| **Runtime** | | | | -| Typical process cycle time | \* 5 days | 100,000 | How long is a process instance typically active? Determines the number of active process instances | -| Disk space for Zeebe | \* 75 kib | 7.15 GiB | (Converted into GB by / 1024 / 1024) | -| Disk space for Tasklist | \* 21 kib | 0.67 GiB | | -| **Operate** | | | | -| PI in retention time | \* 30 day | 600,000 | | -| Disk space | \* 57 kib | 32.62 GiB | | -| **Optimize** | | | | -| PI in retention time | \* 6 months | 3,600,000 | | -| Disk space | \* 21 kib | 72.10 GiB | | -| **Sum** | | **113.87 GiB** | | - -## Understanding sizing and scalability behavior - -Spinning up a Camunda 8 Cluster means you run multiple components that all need resources in the background, like the Zeebe broker, Elasticsearch (as the database for Operate, Tasklist, and Optimize), Operate, Tasklist, and Optimize. All those components need to be equipped with resources. - -All components are clustered to provide high-availability, fault-tolerance and resiliency. - -Zeebe scales horizontally by adding more cluster nodes (pods). This is **limited by the [number of partitions](/docs/components/zeebe/technical-concepts/partitions/)** configured for a Zeebe cluster, as the work within one partition cannot be parallelized by design. Hence, you need to define enough partitions to utilize your hardware. The **number of partitions cannot be changed after the cluster was initially provisioned** (at least not yet), elastic scalability of partitions is not yet possible. - -If you anticipate the load increasing over time, prepare by configuring more partitions than you currently need as a buffer. For example, you could multiply the number of partitions you need for your current load by four to add a buffer. This typically has just a small impact on performance. - -Camunda 8 runs on Kubernetes. Every component is operated as a so-called pod, that gets resources assigned. These resources can be vertically scaled (=get more or less hardware resources assigned dynamically) within certain limits. Note that vertically scaling not always results in more throughput, as the various components have dependencies on each other. This is a complex topic and requires running experiments with benchmarks. In general, we recommend to start with the minimalistic hardware package as described below. If you have further requirements, you use this as a starting point to increase resources. - -Note that Camunda licensing does not depend on the provisioned hardware resources, making it easy to size according to your needs. - -## Sizing your runtime environment - -First, calculate your requirements using the information provided above, taking the example calculations from above: - -- Throughput: 20,000 process instances / day -- Disk space: 114 GB - -Now you can select a hardware package that can cover these requirements. In this example this fits well into a cluster of size S. - -### Camunda 8 SaaS - -Camunda 8 defines three fixed hardware packages you can select from. The table below gives you an indication what requirements you can fulfill with these. If your requirements are above the mentioned numbers, please contact us to discuss a customized sizing. - -| **\*** | S | M | L | -| :----------------------------------------------------------------------- | ------------------------------: | ------------------------------: | -------------------------------: | -| Max Throughput **Tasks/day** | 5.9 M | 23 M | 43 M | -| Max Throughput **Tasks/second** | 65 | 270 | 500 | -| Max Throughput **Process Instances/day** | 0.5 M | 2.3 M | 4.3 M | -| Max Total Number of Process Instances stored (in Elasticsearch in total) | 100 k | 5.4 M | 15 M | -| Approx resources provisioned **\*\*** | 15 vCPU, 20 GB mem, 640 GB disk | 28 vCPU, 50 GB mem, 640 GB disk | 56 vCPU, 85 GB mem, 1320 GB disk | - -**\*** The numbers in the table where measured using Camunda 8 (version 8.0) and [the benchmark project](https://github.com/camunda-community-hub/camunda-8-benchmark). It uses a [ten task process](https://github.com/camunda-community-hub/camunda-8-benchmark/blob/main/src/main/resources/bpmn/typical_process.bpmn). To calculate day-based metrics, an equal distribution over 24 hours is assumed. - -**\*\*** These are the resource limits configured in the Kubernetes cluster and are always subject to change. - -You might wonder why the total number of process instances stored is that low. This is related to limited resources provided to Elasticsearch, yielding performance problems with too much data stored there. By increasing the available memory to Elasticsearch you can also increase that number. At the same time, even with this rather low number, you can always guarantee the throughput of the core workflow engine during peak loads, as this performance is not influenced. Also, you can always increase memory for Elasticsearch later on if it is required. - -### Camunda 8 Self-Managed - -Provisioning Camunda 8 onto your self-managed Kubernetes cluster might depend on various factors. For example, most customers already have own teams providing Elasticsearch for them as a service. However, the following example shows a possible configuration which is close to a cluster of size S in Camunda 8 SaaS, which can serve as a starting point for your own sizing. Such a cluster can serve 500,000 process instances per day and store up to 100,000 process instances in Elasticsearch (in-flight and history). - -| | | request | limit | -| ---------------------------------- | ------------------- | ------- | ----- | -| **Zeebe** | | | | -| \# brokers | 3 | | | -| \# partitions | 3 | | | -| replication factor | 3 | | | -| | vCPU \[cores\] | 0.8 | 0.96 | -| | Mem \[GB\] | 2 | 4 | -| | Disk \[GB\] | 32 | 192 | -| gateway | embedded in broker | | | -| **Operate** | | | | -| #importer | 1 | | | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.2 | 1 | -| #webapp | 2 | | | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.2 | 1 | -| **Tasklist** | | | | -| #importer | 1 | | | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.2 | 1 | -| #webapp | 2 | | | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.2 | 2 | -| **Optimize** | | | | -| #importer | 1 | | | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.4 | 1 | -| #webapp | 2 | | | -| | vCPU \[cores\] | 0.3 | 1 | -| | Mem \[GB\] limit | 0.4 | 1 | -| **Elastic** | | | | -| #statefulset | 1 | | | -| | vCPU \[cores\] | 1 | 2 | -| | Mem \[GB\] limit | 3 | 6 | -| | Disk \[GB\] request | 64 | 100 | -| **Connectors** | | | | -| # | 1 | | | -| | vCPU \[cores\] | 0.2 | 0.4 | -| | Mem \[GB\] limit | 0.25 | 0.5 | -| **Other** (Worker, Analytics, ...) | | | | -| # | 1 | | | -| | vCPU \[cores\] | 0.4 | 0.4 | -| | Mem \[GB\] limit | 0.45 | 0.45 | - -## Planning non-production environments - -All clusters can be used for development, testing, integration, Q&A, and production. In Camunda 8 SaaS, production and test environments are organized via separate organizations within Camunda 8 to ease the management of clusters, while also minimizing the risk to accidentally accessing a production cluster. - -Note that functional unit tests that are written in Java and use [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/), will use an in-memory broker in unit tests, so no development cluster is needed for this use case. - -For typical integration or functional test environments, you can normally just deploy a small cluster, like the one shown above, even if your production environment is sized bigger. This is typically sufficient, as functional tests typically run much smaller workloads. - -Load or performance tests ideally run on the same sizing configuration as your production instance to yield reliable results. - -A typical customer set-up consists of: - -- 1 Production cluster -- 1 Integration or pre-prod cluster (equal in size to your anticipated production cluster if you want to run load tests or benchmarks) -- 1 Test cluster -- Multiple developer clusters - -Ideally, every active developer runs its own cluster, so that the workflow engine does not need to be shared amongst developers. Otherwise, clusters are not isolated, which can lead to errors if for example developer A deploys a new version of the same process as developer B. Typically, developer clusters can be deleted when they are no longer used, as no data needs to be kept, so you might not need one cluster per developer that works with Camunda 8 at some point in time. And using in-memory unit tests further reduces the contention on developer clusters. - -However, some customers do share a Camunda 8 cluster amongst various developers for economic reasons. This can work well if everybody is aware of the problems that can arise. - -## Running experiments and benchmarks - -If you are in doubt about which package to choose, you can do a load test with a representative workload with the target hardware package. This will help you decide if the specific package can serve your needs. - -This is recommended if you exceed the above numbers of three million process instances per day. - -Take a look at the [Camunda 8 benchmark project](https://github.com/camunda-community-hub/camunda-8-benchmark) as a starting point for your own benchmarks. diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/claim.png b/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/claim.png deleted file mode 100644 index d90d4f1b9b4..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/claim.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.png b/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.png deleted file mode 100644 index 0518dc31949..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.pptx b/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.pptx deleted file mode 100644 index a2edcbe1ad3..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/human-tasks.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/tasklist-mockup.png b/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/tasklist-mockup.png deleted file mode 100644 index ed42b483318..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management-assets/tasklist-mockup.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management.md b/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management.md deleted file mode 100644 index 97296151313..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/architecture/understanding-human-tasks-management.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Understanding human task management -tags: - - Human Task - - Delegation - - Escalation - - E-Mail Notification - - 4-Eyes-Principle - - Overdue Task ---- - -## Using task assignment features - -The lifecycle of human tasks (like assigning, delegating, and completing tasks) is mostly a generic issue. There is no need to model common aspects into all your processes, if often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. - -![Task assignment](understanding-human-tasks-management-assets/human-tasks.png) - -So every task can be assigned to either a group of people, or a specific individual. An individual can 'claim' a task, indicating that they are picking the task from the pool (to avoid multiple people working on the same task). - -As a general rule, you should assign human tasks in your business process to _groups of people_ instead of specific individuals. - -```xml - - - - -``` - -Then, require individual members of that group to explicitly _claim tasks_ before working on them. This way, you avoid different people working on the same task at the same time. See [`claimTask`](../../../apis-tools/tasklist-api/mutations/claim-task.mdx). - -```graphql -claimTask( - taskId: String! - assignee: String -): Task! -``` - -You can also directly claim tasks in Camunda Tasklist with the click of a button. - -![Claim](understanding-human-tasks-management-assets/claim.png) - -While assigning users to groups is advised, it's not the only option. You could always assign a task to a _single person_ who is supposed to complete the task (e.g. the individual 'customer' of your process or a coworker having specific knowledge for the case). You will need to have access to the specific person relevant for your process instance, e.g. via a process variable: - -```xml - - - - -``` - -## Deciding about your task list frontend - -If you have human tasks in your process, you must make up your mind on how exactly you want to let your users work on their tasks and interact with the workflow engine. You have basically three options: - -- [Camunda Tasklist](/docs/components/tasklist/introduction-to-tasklist/): The Tasklist application shipped with Camunda. This works out-of-the-box and has a low development effort. However, it is limited in terms of customizability and how much you can influence the user experience. - -- Custom task list application: You can develop a custom task list and adapt this to your needs without compromises. Human tasks are shown inside your custom application, following your style guide and usability concept. You will use the [Camunda Tasklist API](../../../apis-tools/tasklist-api/generated.md) in the background. This is very flexible, but requires additional development work. - -- Third party tasklist: If our organization already has a task list application rolled out to the field, you might want to use this for tasks created by Camunda. You will need to develop some synchronization mechanism. The upside of this approach is that your end users might not even notice that you introduce a new workflow engine. - -### Considerations for developing custom task lists - -When building a custom tasklist/application, you must plan for the following aspects. You will need to - -- _Query_ for user tasks and _generate lists_ of those tasks. -- _Filter the list_ along specific attributes like current assignee, candidate groups, etc. -- _Select_ and _display_ the right forms for starting processes and completing tasks. -- Use _custom/business value_ data in order to _filter_ with those values and _display_ them correlated with the task list and within forms. -- _Authorize_ users to access those lists, filters, and forms. - -### Considerations for using third party task lists - -When integrating a third party tasklist, you must plan for the following aspects. You will need to take care of: - -- _Creating_ tasks in the third party tasklist based on the user tasks created by Camunda. -- _Completing_ tasks in Camunda and move on process execution based on user action in the third party tasklist. -- _Cancelling_ tasks, triggered by Camunda or triggered by the user in the third-party tasklist. -- Transferring _business data_ to be edited in the third-party tasklist back and forth. - -Your third party tasklist application also needs to allow for some programmatic control of the lifecycle of its tasks. The third-party application _must have_ the ability: - -- To programmatically _create_ a new task. -- To _hook in code_ which programmatically informs other systems that the user is about to change a task's state. -- To _manage custom attributes_ connected to a task and programmatically access them. - -Additionally, it _should have_ the ability - -- To programmatically _delete_ a task which was cancelled in Camunda. Without this possibility such tasks remain in the users tasklist and would need to be removed manually. Depending on the way you integrate the task completion mechanism, when the user tries to complete such tasks, they would immediately see an error or the action would just not matter anymore and serve as a removal from the list. - -Transfer just the minimal amount of business data in between Camunda and your third-party tasklist application. - -For creating tasks, transfer just the taskId and important business data references/ids to your domain objects. As much as possible should be retrieved later, and just when needed (e.g. when displaying task forms to the user) by requesting data from the process engine or by requesting data directly from other systems. - -For completing tasks, transfer just the business data which originated from Camunda and was changed by the user. This means, in case you just maintain references, nothing needs to be transferred back. All other business data changed by the user should be directly transferred to the affected systems. - -### Task lists may not look like task lists - -There are situations where you might want to show a user interface that does not look like a task list, even if it is fed by tasks. The following _example_ shows such a situation in the document _input management_ process of a company. Every document is handled by a separate process instance, but users typically look at complete mailings consisting of several such documents. In a customer scenario, there were people in charge of assessing the scanned mailing and distributing the individual documents to the responsible departments. It was important to do that in one step, as sometimes documents referred to each other. - -So you have several user tasks which are heavily _interdependent_ from a business point of view and should therefore be completed _in one step_ by the same person. - -The solution to this was a custom user interface that basically queries for human tasks, but show them grouped by mailings: - -![custom tasklist mockup](understanding-human-tasks-management-assets/tasklist-mockup.png) - -1 - -The custom tasklist shows each mailing as one "distribution task", even though they consist of several human tasks fetched from the workflow instance. - -2 - -The custom user interface allows you to work on all four human tasks at once. By dragging and dropping a document within the tree, the user can choose to which department the document is delivered to. - -3 - -In case the user detects a scanning problem, they can request a new scan of the mailing. But as soon -as all documents are quality assured, the button **Distribute Mailing** gets enabled. By clicking on it, the system completes all four human tasks - one for each document - which moves forward the four process instances associated with the documents. diff --git a/versioned_docs/version-8.2/components/best-practices/best-practices-overview.md b/versioned_docs/version-8.2/components/best-practices/best-practices-overview.md deleted file mode 100644 index b2bc7375a4c..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/best-practices-overview.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Overview -description: "These practices are condensed experience using BPMN and DMN on the Camunda toolstack, and are a mix of conceptual and practical implementation information." ---- - -The Camunda Best Practices are our condensed experience of using BPMN and DMN on the Camunda toolstack, and collected by consulting engagement with our customers, feedback from the community and various other occasions. Best Practices are a mix of conceptual and practical implementation information. - -Best Practices represent the current state of our practical project experience as far as it is generalizable. They are neither "final" (in the sense that we ourselves will hopefully continue to learn!) nor are they necessarily the best approach for your own situation. - -Note that Camunda give the same guarantee as the core product for best practices. In order to present as much experiences as possible, we cannot accept any responsibility for the accuracy or timeliness of the statements made. If examples of source code are shown, a total absence of errors in the provided source code cannot be guaranteed. Liability for any damage resulting from the application of the recommendations presented here, is excluded. - -:::caution Camunda 8 -In general, best practices apply to Camunda 8, but there are also some specific Camunda 7 practices in their own section below. -::: - -## Project management best practices - -- [Following the Customer Success Path](../management/following-the-customer-success-path/) -- [Doing a proper POC](../management/doing-a-proper-poc/) - -## Architecture best practices - -- [Deciding about your stack](../architecture/deciding-about-your-stack/) -- [Sizing your environment](../architecture/sizing-your-environment/) -- [Understanding human task management](../architecture/understanding-human-tasks-management/) - -## Development best practices - -- [Connecting the workflow engine with your world](../development/connecting-the-workflow-engine-with-your-world) -- [Service integration patterns with BPMN](../development/service-integration-patterns) -- [Writing good workers](../development/writing-good-workers) -- [Dealing with problems and exceptions](../development/dealing-with-problems-and-exceptions) -- [Handling data in processes](../development/handling-data-in-processes) -- [Routing events to processes](../development/routing-events-to-processes) -- [Testing process definitions](../development/testing-process-definitions) - -## Modeling best practices - -- [Creating readable process models](../modeling/creating-readable-process-models/) -- [Naming BPMN elements](../modeling/naming-bpmn-elements/) -- [Naming technically relevant IDs](../modeling/naming-technically-relevant-ids/) -- [Modeling beyond the happy path](../modeling/modeling-beyond-the-happy-path/) -- [Modeling with situation patterns](../modeling/modeling-with-situation-patterns/) -- [Building flexibility into BPMN models](../modeling/building-flexibility-into-bpmn-models/) -- [Choosing the DMN Hit Policy](../modeling/choosing-the-dmn-hit-policy/) - -## Operations best practices - -- [Versioning process definitions](../operations/versioning-process-definitions/) -- [Reporting about processes](../operations/reporting-about-processes/) - -## Camunda 7 specific best practices - -:::caution Camunda 7 -The best practices in this section apply to Camunda 7 only -::: - -- Architecture - - [Deciding about your Camunda 7 stack](../architecture/deciding-about-your-stack-c7/) - - [Sizing your Camunda 7 environment](../architecture/sizing-your-environment-c7/) -- Development - - [Invoking services from a Camunda 7 process](../development/invoking-services-from-the-process-c7/) - - [Understanding Camunda 7 transaction handling](../development/understanding-transaction-handling-c7/) -- Operations - - [Operating Camunda 7](../operations/operating-camunda-c7/) - - [Performance tuning Camunda 7](../operations/performance-tuning-camunda-c7/) - - [Securing Camunda 7](../operations/securing-camunda-c7/) -- Other - - [Extending human task management in Camunda 7](../architecture/extending-human-task-management-c7/) diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/architecture.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/architecture.png deleted file mode 100644 index f4da19ea4ba..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/architecture.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/clients.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/clients.png deleted file mode 100644 index 1b875c1a48d..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/clients.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png deleted file mode 100644 index dc569924f16..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector.png deleted file mode 100644 index d49448f0a22..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/img-src.pptx b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/img-src.pptx deleted file mode 100644 index a59afc7dc1f..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/img-src.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png deleted file mode 100644 index 0e32b8c3668..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png deleted file mode 100644 index 840637ae010..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-example.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-example.png deleted file mode 100644 index 9544de0d32a..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/kafka-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/messaging-example.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/messaging-example.png deleted file mode 100644 index eb73103d3e6..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/messaging-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-connector.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-connector.png deleted file mode 100644 index e95c838454c..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-example.png b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-example.png deleted file mode 100644 index bb8f715bf09..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world-assets/rest-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md b/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md deleted file mode 100644 index 3dbafce265c..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: "Connecting the workflow engine with your world" -description: "To sketch the basic architecture of your solution, let's discuss how to connect the Zeebe workflow engine with your application or remote system." ---- - -One of your first tasks to build a process solution is to sketch the basic architecture of your solution. To do so, you need to answer the question of how to connect the workflow engine (Zeebe) with your application or with remote systems. - -This document predominantly outlines writing some custom glue code in the programming language of your choice and using existing client libraries. In some cases, you might also want to leverage existing Connectors as a starting point. - -The workflow engine is a remote system for your applications, just like a database. Your application connects with Zeebe via remote protocols, [gRPC](https://grpc.io/) to be precise, which is typically hidden from you, like when using a database driver based on ODBC or JDBC. - -With Camunda 8 and the Zeebe workflow engine, there are two basic options: - -1. Write some **programming code** that typically leverages the client library for the programming language of your choice. -2. Use some **existing Connector** which just needs a configuration. - -The trade-offs will be discussed later; let’s look at the two options first. - -## Programming glue code - -To write code that connects to Zeebe, you typically embed [the Zeebe client library](../../../apis-tools/working-with-apis-tools.md) into your application. An application can of course also be a service or microservice. - -If you have multiple applications that connect to Zeebe, all of them will require the client library. If you want to use a programming language where no such client library exists, you can [generate a gRPC client yourself](https://camunda.com/blog/2018/11/grpc-generating-a-zeebe-python-client/). - -![Clients to Zeebe](connecting-the-workflow-engine-with-your-world-assets/clients.png) - -Your application can basically do two things with the client: - -1. **Actively call Zeebe**, for example, to start process instances, correlate messages, or deploy process definitions. -2. **Subscribe to tasks** created in the workflow engine in the context of BPMN service tasks. - -### Calling Zeebe - -Using the Zeebe client’s API, you can communicate with the workflow engine. The two most important API calls are to start new process instances and to correlate messages to a process instance. - -**Start process instances using the** [**Java Client**](../../../apis-tools/java-client/index.md)**:** - -```java -processInstance = zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("someProcess").latestVersion() - .variables( someProcessVariablesAsMap ) - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not create new instance", throwable); }); -``` - -**Start process instances using the** [**Node.js Client**](../../../apis-tools/community-clients/javascript.md)**:** - -```js -const processInstance = await zbc.createWorkflowInstance({ - bpmnProcessId: "someProcess", - version: 5, - variables: { - testData: "something", - }, -}); -``` - -**Correlate messages to process instances using the Java Client**: - -```java -zeebeClient.newPublishMessageCommand() // - .messageName("messageA") - .messageId(uniqueMessageIdForDeduplication) - .correlationKey(message.getCorrelationid()) - .variables(singletonMap("paymentInfo", "YeahWeCouldAddSomething")) - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not publish message " + message, throwable); }); -``` - -**Correlate messages to process instances using the Node.js Client**: - -```js -zbc.publishMessage({ - name: "messageA", - messageId: messageId, - correlationKey: correlationId, - variables: { - valueToAddToWorkflowVariables: "here", - status: "PROCESSED", - }, - timeToLive: Duration.seconds.of(10), -}); -``` - -This allows you to connect Zeebe with any external system by writing some custom glue code. We will look at common technology examples to illustrate this in a minute. - -### Subscribing to tasks using a job worker - -To implement service tasks of a process model, you can write code that subscribes to the workflow engine. In essence, you will write some glue code that is called whenever a service task is reached (which internally creates a job, hence the name). - -**Glue code in Java:** - -```java -class ExampleJobHandler implements JobHandler { - public void handle(final JobClient client, final ActivatedJob job) { - // here: business logic that is executed with every job - client.newCompleteCommand(job.getKey()).send() - .exceptionally( throwable -> { throw new RuntimeException("Could not complete job " + job, throwable); });; - } -} -``` - -**Glue code in Node.js:** - -```js -function handler(job, complete, worker) { - // here: business logic that is executed with every job - complete.success(); -} -``` - -Now, this handler needs to be connected to Zeebe, which is generally done by subscriptions, which internally use long polling to retrieve jobs. - -**Open subscription via the Zeebe Java client:** - -```java -zeebeClient - .newWorker() - .jobType("serviceA") - .handler(new ExampleJobHandler()) - .timeout(Duration.ofSeconds(10)) - .open()) {waitUntilSystemInput("exit");} -``` - -**Open subscription via the Zeebe Node.js client:** - -```js -zbc.createWorker({ - taskType: "serviceA", - taskHandler: handler, -}); -``` - -You can also use integrations in certain programming frameworks, like [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) in the Java world, which starts the job worker and implements the subscription automatically in the background for your glue code. - -**A subscription for your glue code is opened automatically by the Spring integration:** - -```java -@JobWorker(type = "serviceA") -public void handleJobFoo(final JobClient client, final ActivatedJob job) { - // here: business logic that is executed with every job - // you do not need to call "complete" on the job, as autoComplete is turned on above -} -``` - -There is also documentation on [how to write a good job worker](../writing-good-workers/). - -## Technology examples - -Most projects want to connect to specific technologies. Currently, most people ask for REST, messaging, or Kafka. - -### REST - -You could build a piece of code that provides a REST endpoint in the language of choice and then starts a process instance. - -The [Ticket Booking Example](https://github.com/berndruecker/ticket-booking-camunda-cloud) contains an example using Java and Spring Boot for the [REST endpoint](https://github.com/berndruecker/ticket-booking-camunda-cloud/blob/master/booking-service-java/src/main/java/io/berndruecker/ticketbooking/rest/TicketBookingRestController.java#L35). - -Similarly, you can leverage the [Spring Boot extension](https://github.com/zeebe-io/spring-zeebe/) to startup job workers that will [execute outgoing REST calls](https://github.com/berndruecker/ticket-booking-camunda-cloud/blob/master/booking-service-java/src/main/java/io/berndruecker/ticketbooking/adapter/GenerateTicketAdapter.java#L29). - -![REST example](connecting-the-workflow-engine-with-your-world-assets/rest-example.png) - -You can find [Node.js sample code for the REST endpoint](https://github.com/berndruecker/flowing-retail/blob/master/zeebe/nodejs/nestjs-zeebe/checkout/src/app.controller.ts) in the [Flowing Retail example](https://github.com/berndruecker/flowing-retail). - -### Messaging - -You can do the same for messages, which is often [AMQP](https://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol) nowadays. - -The [Ticket Booking Example](https://github.com/berndruecker/ticket-booking-camunda-cloud) contains an example for RabbitMQ, Java, and Spring Boot. It provides a message listener to correlate incoming messages with waiting process instances, and [glue code to send outgoing messages onto the message broker](https://github.com/berndruecker/ticket-booking-camunda-cloud/blob/master/booking-service-java/src/main/java/io/berndruecker/ticketbooking/adapter/RetrievePaymentAdapter.java). - -![Messaging example](connecting-the-workflow-engine-with-your-world-assets/messaging-example.png) - -[Service integration patterns](../service-integration-patterns/) goes into details of if you want to use a send and receive task here, or prefer simply one service task (spoiler alert: send and receive tasks are used here because the payment service might be long-running; think about expired credit cards that need to be updated or wire transfers that need to happen). - -The same concept will apply to other programming languages. For example, you could use the [Node.js client for RabbitMQ](https://www.rabbitmq.com/tutorials/tutorial-one-javascript.html) and the [Node.js client for Zeebe](https://github.com/camunda-community-hub/zeebe-client-node-js) to create the same type of glue code as shown above. - -### Apache Kafka - -You can do the same trick with Kafka topics. The [Flowing Retail example](https://github.com/berndruecker/flowing-retail) shows this using Java, Spring Boot, and Spring Cloud Streams. There is [code to subscribe to a Kafka topic and start new process instances for new records](https://github.com/berndruecker/flowing-retail/blob/master/kafka/java/order-zeebe/src/main/java/io/flowing/retail/kafka/order/messages/MessageListener.java#L39), and there is some glue code to create new records when a process instance executes a service task. Of course, you could also use other frameworks to achieve the same result. - -![Kafka Example](connecting-the-workflow-engine-with-your-world-assets/kafka-example.png) - -## Designing process solutions containing all glue code - -Typical applications will include multiple pieces of glue code in one codebase. - -![Architecture with glue code](connecting-the-workflow-engine-with-your-world-assets/architecture.png) - -For example, the onboarding microservice shown in the figure above includes: - -- A REST endpoint that starts a process instance (1) -- The process definition itself (2), probably auto-deployed to the workflow engine during the startup of the application. -- Glue code subscribing to the two service tasks that shall call a remote REST API (3) and (4). - -A job worker will be started automatically as part of the application to handle the subscriptions. In this example, the application is written in Java, but again, it could be [any supported programming language](/apis-tools/working-with-apis-tools.md). - -As discussed in [writing good workers](../writing-good-workers/), you typically will bundle all workers within one process solution, but there are exceptions where it makes sense to have single workers as separate application. - -## Connectors - -As you could see, the glue code is relatively simple, but you need to write code. Sometimes you might prefer using an out-of-the-box component, connecting Zeebe with the technology you need just by configuration. This component is called a **Connector**. - -A Connector can be uni or bidirectional and is typically one dedicated application that implements the connection that translates in one or both directions of communication. Such a Connector might also be helpful in case integrations are not that simple anymore. - -![Connectors](connecting-the-workflow-engine-with-your-world-assets/connector.png) - -For example, the [HTTP Connector](https://github.com/camunda-community-hub/zeebe-http-worker) is a one-way Connector that contains a job worker that can process service tasks doing HTTP calls as visualized in the example in the following figure: - -![REST Connectors](connecting-the-workflow-engine-with-your-world-assets/rest-connector.png) - -Another example is the [Kafka Connector](https://github.com/camunda-community-hub/kafka-connect-zeebe), as illustrated below. - -![Kafka Connector](connecting-the-workflow-engine-with-your-world-assets/kafka-connector.png) - -This is a bidirectional Connector which contains a Kafka listener for forwarding Kafka records to Zeebe and also a job worker which creates Kafka records every time a service task is executed. This is illustrated by the following example: - -![Kafka Connector Details](connecting-the-workflow-engine-with-your-world-assets/kafka-connector-details.png) - -### Out-of-the-box Connectors - -Most Connectors are currently community extensions, which basically means that they are not officially supported by Camunda, but by community members (who sometimes are Camunda employees). While this sounds like a restriction, it can also mean there is more flexibility to make progress. A list of community-maintained Connectors can be found [here](https://github.com/camunda-community-hub/awesome-camunda-cloud#connectors-and-bridges). - -Camunda itself is also working on improving the Connector infrastructure as such to be able to provide more Connectors easier in the future. - -### Using Connectors in SaaS - -Currently, Connectors are not operated as part of the Camunda 8 SaaS offering, which means you need to operate them yourself in your environment, which might be a private or public cloud. - -![Connectors in SaaS](connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png) - -### Reusing your own integration logic by extracting Connectors - -If you need to integrate with certain infrastructure regularly, for example your CRM system, you might also want to create your own CRM Connector, run it centralized, and reuse it in various applications. - -In general, we recommend not to start such Connectors too early. Don’t forget that such a Connector gets hard to adjust once in production and reused across multiple applications. Also, it is often much harder to extract all configuration parameters correctly and fill them from within the process, than it would be to have bespoke glue code in the programming language of your choice. - -Therefore, you should only extract a full-blown Connector if you understand exactly what you need. - -Don’t forget about the possibility to extract common glue code in a simple library that is then used at different places. - -:::note -Updating a library that is used in various other applications can be harder than updating one central Connector. In this case, the best approach depends on your scenario. -::: - -Whenever you have such glue code running and really understand the implications of making it a Connector, as well as the value it will bring, it can make a lot of sense. - -## Recommendation - -As a general rule of thumb, prefer custom glue code whenever you don’t have a good reason to go with an existing Connector (like the reasons mentioned above). - -A good reason to use Connectors is if you need to solve complex integrations where little customization is needed, such as the [Camunda RPA bridge](https://docs.camunda.org/manual/latest/user-guide/camunda-bpm-rpa-bridge/) to connect RPA bots (soon to be available for Camunda 8). - -Good use of Connectors are also scenarios where you don’t need custom glue code. For example, when orchestrating serverless functions on AWS with the [AWS Lambda Connector](https://github.com/camunda-community-hub/zeebe-lambda-worker). This Connector can be operated once and used in different processes. - -Some use cases also allow you to create a **resuable generic adapter**; for example, to send status events to your business intelligence system. - -But there are also common downsides with Connectors. First, the possibilities are limited to what the creator of the Connector has foreseen. In reality, you might have slightly different requirements and hit a limitation of a Connector soon. - -Second, the Connector requires you to operate this Connector in addition to your own application. The complexity associated with this depends on your environment. - -Third, testing your glue code gets harder, as you can’t easily hook in mocks into such a Connector as you could in your own glue code. diff --git a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png b/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png deleted file mode 100644 index 94ffeda1942..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/image-src.pptx b/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/image-src.pptx deleted file mode 100644 index 606bbebdbc3..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/image-src.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png b/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png deleted file mode 100644 index f99c1eeb681..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/typical-call-chain.png b/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/typical-call-chain.png deleted file mode 100644 index 7ff9a94a315..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/typical-call-chain.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/worker-concept.png b/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/worker-concept.png deleted file mode 100644 index 9a4503ec673..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions-assets/worker-concept.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions.md b/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions.md deleted file mode 100644 index 566e3d1afaf..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/dealing-with-problems-and-exceptions.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Dealing with problems and exceptions -tags: - - Transaction - - ACID Transaction - - Compensation - - Exception Handling - - BPMN Error Event - - Incident - - Save Point -description: "Take a closer look at understanding workers, handling exceptions on a technical level, leveraging retries, using incidents, and more." ---- - -## Understanding workers - -:::caution Camunda 8 only -The description of workers targets Camunda 8, even if [external tasks in Camunda 7](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) are conceptually similar. If you are looking for Camunda 7, visit [understanding Camunda 7 transaction handling](../understanding-transaction-handling-c7/). -::: - -First, let's briefly examine how a worker operates. - -Whenever a process instance arrives at a service task, a new job is created and pushed to an internal persistent queue within Camunda 8. A client application can subscribe to these jobs with the workflow engine by the task type name (which is comparable to a queue name). - -If there is no worker subscribed when a job is created, the job is simply put in a queue. If multiple workers are subscribed, they are competing consumers, and jobs are distributed among them. - -![Worker concept](dealing-with-problems-and-exceptions-assets/worker-concept.png) - -Whenever the worker has finished whatever it needs to do (like invoking the REST endpoint), it sends another call to the workflow engine, which [can be one of these three](/docs/components/concepts/job-workers/#completing-or-failing-jobs): - -- [`CompleteJob`](../../../apis-tools/grpc.md#completejob-rpc): The service task went well, the process instance can move on. -- [`FailJob `](../../../apis-tools/grpc.md#failjob-rpc): The service task failed, and the workflow engine should handle this failure. There are two possibilities: - - `remaining retries > 0`: The job is retried. - - `remaining retries <= 0`: An incident is raised and the job is not retried until the incident is resolved. -- [`ThrowError`](../../../apis-tools/grpc.md#throwerror-rpc): A BPMN error is reported, which typically is handled on the BPMN level. - -As the glue code in the worker is external to the workflow engine, there is **no technical transaction spanning both components**. Technical transactions refer to ACID (atomic, consistent, isolated, durable) properties, mostly known from relational databases. - -If, for example, your application leverages those capabilities, your business logic is either successfully committed as a whole, or rolled back completely in case of any error. However, those ACID transactions cannot be applied to distributed systems (the talk [lost in transaction](https://berndruecker.io/lost-in-transaction/) elaborates on this). In other words, things can get out of sync if either the job handler or the workflow engine fails. - -A typical example scenario is the following, where a worker calls a REST endpoint to invoke business logic: - -![Typical call chain](dealing-with-problems-and-exceptions-assets/typical-call-chain.png) - -Technical ACID transaction will only be applied in the business application. The job worker mostly needs to handle exceptions on a technical level, e.g. to control retry behavior, or pass it on to the process level, where you might need to implement business transactions. - -## Handling exceptions on a technical level - -:::caution Camunda 8 only -The description of handling exceptions targets Camunda 8. If you are looking for Camunda 7, visit our documentation on [operating Camunda 7](../operations/operating-camunda-c7.md). -::: - -### Leveraging retries - -Using the [`FailJob `](../../../apis-tools/grpc.md#failjob-rpc) API is pretty handy to leverage the built-in retry mechanism of Zeebe. The initial number of retries is set in the BPMN process model: - -```xml - - - - - -``` - -This number is typically decremented with every attempt to execute the service task. Note that you need to do that in your worker code. Example in Java: - -```java - @JobWorker(type = "retrieveMoney", autoComplete = false) - public void retrieveMoney(final JobClient client, final ActivatedJob job) { - try { - // your code - } catch (Exception ex) { - jobClient.newFailCommand(job) - .retries(job.getRetries()-1) // <1>: Decrement retries - .errorMessage("Could not retrieve money due to: " + ex.getMessage()) // <2> - .send() - .exceptionally(t -> {throw new RuntimeException("Could not fail job: " + t.getMessage(), t);}); - } - } -``` - -1 - -Decrement the retries by one. - -2 - -Provide a meaningful error message, as this will be displayed to a human operator once an incident is created in Operate. - -Example in Node.js: - -```js -zbc.createWorker("retrieveMoney", (job) => { - try { - // ... - } catch (e) { - job.fail("Could not retrieve money due to: " + e.message, job.retries - 1); - } -}); -``` - -### Using incidents - -Whenever a job fails with a retry count of `0`, an incident is raised. An incident requires human intervention, typically using Operate. See [incidents in the Operate docs](/docs/components/operate/userguide/resolve-incidents-update-variables/). - -### Writing idempotent workers - -Zeebe uses the **at-least-once strategy** for job handlers, which is a typical choice in distributed systems. This means that the process instance only advances in the happy case (the job was completed, the workflow engine received the complete job request and committed it). A typical failure case occurs when the worker who polled the job crashes and cannot complete the job anymore. [In this case, the workflow engine gives the job to another worker after a configured timeout](/docs/components/concepts/job-workers#timeouts). This ensures that the job handler is executed at least once. - -But this can mean that the handler is executed more than once! You need to consider this in your handler code, as the handler might be called more than one time. The [technical term describing this is idempotency](https://en.wikipedia.org/wiki/Idempotence). - -For example, typical strategies are described in [3 common pitfalls in microservice integration — and how to avoid them](https://blog.bernd-ruecker.com/3-common-pitfalls-in-microservice-integration-and-how-to-avoid-them-3f27a442cd07). One possibility is to ask the service provider if it has already seen the same request. A more common approach is to implement the service provider in a way that allows for duplicate calls. There are two ways of mastering this: - -- **Natural idempotency**. Some methods can be executed as often as you want because they just flip some state. Example: `confirmCustomer()`. -- **Business idempotency**. Sometimes you have business identifiers that allow you to detect duplicate calls (e.g. by keeping a database of records that you can check). Example: `createCustomer(email)`. - -If these approaches do not work, you will need to add a **custom idempotency handling** by using unique IDs or hashes. For example, you can generate a unique identifier and add it to the call. This way, a duplicate call can be easily spotted if you store that ID on the service provider side. If you leverage a workflow engine you probably can let it do the heavy lifting. Example: `charge(transactionId, amount)`. - -Whatever strategy you use, make sure that you’ve considered idempotency consciously. - -## Handling errors on the process level - -You often encounter deviations from the "happy path" (the default scenario with a positive outcome) which shall be modeled in the process model. - -### Using BPMN error events - -A common way to resolve these deviations is using a BPMN error event, which allows a process model to react to errors within a task. For example: - -
    - -1 - -We decide that we want to deal with an exception in the process: in case the invoice cannot be sent automatically... - -2 - -...we assign a task to a human user, who is now in charge of taking care of delivering the invoice. - -Learn more about the usage of [error events](/docs/components/modeler/bpmn/error-events/) in the user guide. - -### Throwing and handling BPMN errors - -In BPMN process definitions, we can explicitly model an end event as an error. - -
    - -1 - -In case the item is not available, we finish the process with an **error end event**. - -:::note -You can mimic a BPMN error in your glue code by using the [`ThrowError`](../../../apis-tools/grpc.md#throwerror-rpc) API. The consequences for the process are the same as if it were an explicit error end event. So, in case your 'purchase' activity is not a subprocess, but a service task, it could throw a BPMN Error informing the process that the good is unavailable. -::: - -Example in Java: - -```java -jobClient.newThrowErrorCommand(job) - .errorCode("GOOD_UNAVAILABLE") - .errorMessage() - .send() - .exceptionally(t -> {throw new RuntimeException("Could not throw BPMN error: " + t.getMessage(), t);}); -``` - -### Thinking about unhandled BPMN exceptions - -It is crucial to understand that according to the BPMN spec, a BPMN error is either handled via the process or **terminates the process instance**. It does not lead to an incident being raised. Therefore, you can and normally should always handle the BPMN error. You can, of course, also handle it in a parent process scope like in the example below: - -
    - -1 - -The boundary error event deals with the case that the item is unavailable. - -### Distinguishing between exceptions and results - -As an alternative to throwing a Java exception, you can also write a problematic result into a process variable and model an XOR-Gateway later in the process flow to take a different path if that problem occurs. - -From a business perspective, the underlying problem then looks less like an error and more like a result of an activity, so as a rule of thumb we deal with _expected results_ of activities by means of gateways, but model exceptional errors, which _hinder us in reaching the expected result_ as boundary error events. - -
    - -1 - -The task is to "check the customer's credit-worthiness", so we can reason that we _expect as a result_ to know whether the customer is credit-worthy or not. - -2 - -We can therefore model an _exclusive gateway_ working on that result and decide via the subsequent process flow what to do with a customer who is not credit-worthy. Here, we just consider the order to be declined. - -3 - -However, it could be that we _cannot reach a result_, because while we are trying to obtain knowledge about the customer's creditworthiness, we discover that the ID we have is not associated with any known real person. We can't obtain the expected result and therefore model a _boundary error event_. In the example, the consequence is just the same and we consider the order to be declined. - -### Business vs. technical errors - -Note that you have two different ways of dealing with problems at your disposal now: - -- **Retrying**. You don't want to model the retrying, as you would have to add it to each and every service task. This will bloat the visual model and confuse business personnel. Instead, either retry or fall back to incidents as described above. This is hidden in the visual. -- Branch out **separate paths**, as described with the error event. - -In this context, we found the terms **business error** and **technical error** can be confusing, as they emphasize the source of the error too much. This can lead to long discussions about whether a certain problem is technical or not, and if you are allowed to see technical errors in a business process model. - -It's much more important to look at how you react to certain errors. Even a technical problem can qualify for a business reaction. In the above example, upon technical problems with the invoice service you can decide to manually send the invoice (business reaction) or to retry until the invoice service becomes available again (technical reaction). - -Or, for example, you could decide to continue a process in the event that a scoring service is not available, and simply give every customer a good rating instead of blocking progress. The error is clearly technical, but the reaction is a business decision. - -In general, we recommend talking about business reactions, which are modeled in your process, and technical reactions, which are handled generically using retries or incidents. - -## Embracing business transactions and eventual consistency - -### Technical vs business transactions - -Applications using databases can often leverage ACID (atomic, consistent, isolated, durable) capabilities of that database. This means that some business logic is either successfully committed as a whole, or rolled back completely in case of any error. It is normally referred to as "transactions". - -Those ACID transactions cannot be applied to distributed systems (the talk [lost in transaction](https://berndruecker.io/lost-in-transaction/) elaborates on this), so if you call out to multiple services from a process, you end up with separate ACID transactions at play. The following illustrations are taken from the O'Reilly book [Practical Process Automation](https://processautomationbook.com/): - -![Multiple ACID transactions](dealing-with-problems-and-exceptions-assets/multiple-acid-transactions.png) - -In the above example, the CRM system and the billing system have their local ACID transactions. The workflow engine itself also runs transactional. However, there cannot be a joined technical transaction.This requires a new way of dealing with consistency on the business level, which is referred to as **business transaction**: - -![Businss vs technical transaction](dealing-with-problems-and-exceptions-assets/business-vs-technical-transaction.png) - -A **business transaction** marks a section in a process for which 'all or nothing' semantics (similar to a technical transaction) should apply, but from a business perspective. You might encounter inconsistent states in between (for example a new customer being present in the CRM system, but not yet in the billing system). - -### Eventual consistency - -It is important to be aware that these temporary inconsistencies are possible. You also have to understand the failure scenarios they can cause. In the above example, you could have created a marketing campaign at a moment when a customer was already in the CRM system, but not yet in billing, so they got included in that list. Then, even if their order gets rejected and they never end up as an active customer, they might still receive an upgrade advertisement. - -You need to understand the effects of this happening. Furthermore, you have to think about a strategy to resolve inconsistencies. The term **eventual consistency** suggests that you need to take measures to get back to a consistent state eventually. In the onboarding example, this could mean you need to deactivate the customer in the CRM system if adding them to the billing system fails. This leads to the consistent state that the customer is not visible in any system anymore. - -### Business strategies to handle inconsistency - -There are three basic strategies if a consistency problem occurs: - -- Ignore it. While it sounds strange to consider ignoring a consistency issue, it actually can be a valid strategy. It’s a question of how much business impact the inconsistency may have. -- Apologize. This is an extension of the strategy to ignore. You don’t try to prevent inconsistencies, but you do make sure that you apologize when their effects come to light. -- Resolve it. Tackle the problem head-on and actively resolve the inconsistency. This could be done by different means, such as the reconciliation jobs mentioned earlier, but this practice focuses on how BPMN can help by looking into the Saga pattern. - -Selecting the right strategy is a clear business decision, as none of them are right or wrong, but simply more or less well suited to the situation at hand. You should always think about the cost/value ratio. - -### The Saga pattern and BPMN compensation - -The Saga pattern describes long-running transactions in distributed systems. The main idea is simple: when you can’t roll back tasks, you undo them. (The name Saga refers back to a paper written in the 1980s about long-lived transactions in databases.) - -Camunda supports this through BPMN compensation events, which can link tasks with their undo tasks. - -:::caution Camunda 7 Only -Compensation is [not yet supported in Camunda 8](/components/modeler/bpmn/bpmn-coverage.md) and only available in Camunda 7. -::: - -
    - -1 - -Assume the customer was already added to the CRM system... - -2 - -...when an error occurred... - -3 - -...the process triggers the compensation to happen. This will roll back the business transaction. - -4 - -All compensating activities of successfully completed tasks will be executed, in this case also this one. - -5 - -As a result, the customer will be deactivated, as the API of the CRM system might not allow to simply delete it. diff --git a/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.svg b/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.svg deleted file mode 100644 index 481b8154ac3..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.svg +++ /dev/null @@ -1 +0,0 @@ -TweettweetId :Longcontent :Stringauthor :Employeereviewer :Employee...Komplexe Klasse \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.uml.xml b/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.uml.xml deleted file mode 100644 index 3ea1c8af45e..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes-assets/hold-references-only.uml.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - -ComplexClass -150,137,30,30 -false - -#000000 - -{"items":[{"static":false,"visibility":"","name":"tweetId","derived":false,"property":"","multiplicity":"","type":"Long","readonly":false,"defaultvalue":""},{"static":false,"visibility":"","name":"content","derived":false,"property":"","multiplicity":"","type":"String","readonly":false,"defaultvalue":""},{"static":false,"visibility":"","name":"author","derived":false,"property":"","multiplicity":"","type":"Employee","readonly":false,"defaultvalue":""},{"static":false,"visibility":"","name":"reviewer","derived":false,"property":"","multiplicity":"","type":"Employee","readonly":false,"defaultvalue":""},{"visibility":"","name":"...","property":"","multiplicity":"","type":"","defaultvalue":""}],"totalCount":5} -#ffffff - -Tweet - - # - - - - -Diagram -1485,1050,0,0 -horizontal - - - - - diff --git a/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes.md b/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes.md deleted file mode 100644 index f225d4cf953..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/handling-data-in-processes.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: "Handling data in processes" -tags: - - BPMN Data Object - - Variable - - Serialization -description: "When using Camunda, you have access to a dynamic map of process variables, which lets you associate data to every single process instance." ---- - -When using Camunda, you have access to a dynamic map of process variables, which lets you associate data to every single process instance (and local scopes in case of user tasks or parallel flows). Ensure you use these mechanisms in a lightweight and meaningful manner, storing just the relevant data in the process instance. - -Depending on your programming language, consider accessing your process variables in a type safe way, centralizing (simple and complex) type conversion and using constants for process variable names. - -## Understanding data handling in Camunda - -When reading and interpreting a business process diagram, you quickly realize there is always data necessary for tasks, but also to drive the process through gateways to the correct next steps. - -Examine the following tweet approval process example: - -
    - -1 - -The process instance starts with a freshly written `tweet` we need to remember. - -2 - -We need to present this `tweet` so that the user can decide whether to `approve` it. - -3 - -The gateway needs to have access to this information: was the tweet `approved`? - -4 - -To publish the tweet, the service task again needs the `tweet` itself! - -Therefore, the tweet approval process needs two variables: - -| Variable name | Variable type | Sample value | -| ------------- | ------------- | ---------------- | -| `tweet` | String | "@Camunda rocks" | -| `approved` | Boolean | true | - -In Camunda 8, [values are stored as JSON](/docs/components/concepts/variables/#variable-values). - -:::caution Camunda 7 handles variables slightly differently -This best practice describes variable handling within Camunda 8. Process variables are handled slightly differently with Camunda 7. Consult the [Camunda 7 documentation](https://docs.camunda.org/manual/latest/user-guide/process-engine/variables/) for details. In essence, variable values are not handled as JSON and thus there are [different values](https://docs.camunda.org/manual/latest/user-guide/process-engine/variables/#supported-variable-values) supported. -::: - -You can dynamically create such variables by assigning an object of choice to a (string typed) variable name; for example, by passing a `Map` when [completing](../../../apis-tools/tasklist-api/mutations/complete-task.mdx) the "Review tweet" task via the API: - -``` -// TODO: Double check! -completeTask( - taskId: "547811" - variables: [ - { - name: "approved" - value: true - } - ] -) -``` - -In Camunda, you do _not_ declare process variables in the process model. This allows for a lot of flexibility. See recommendations below on how to overcome possible disadvantages of this approach. - -Consult the [docs about variables](/docs/components/concepts/variables/#variable-values) to learn more. - -Camunda does not treat BPMN **data objects** () as process variables. We recommend using them occasionally _for documentation_, but you need to [avoid excessive usage of data objects](../../modeling/creating-readable-process-models#avoiding-excessive-usage-of-data-objects). - -## Storing just the relevant data - -Do not excessively use process variables. As a rule of thumb, store _as few variables as possible_ within Camunda. - -Please note the [technical limitations of variables sizes](/docs/components/concepts/variables/#variable-size-limitation). - -### Storing references only - -If you have leading systems already storing the business relevant data... - -![Hold references only](handling-data-in-processes-assets/hold-references-only.svg) - -...then we suggest you store references only (e.g. ID's) to the objects stored there. So instead of holding the `tweet` and the `approved` variable, the process variables would now, for example, look more like the following: - -| Variable name | Variable type | Value | -| ------------- | ------------- | ----- | -| `tweetId` | Long | 8213 | - -### Use cases for storing payload - -Store _payload_ (actual business data) as process variables, if you.... - -- ...have data only of interest within the process itself (e.g. for gateway decisions). - -In case of the tweet approval process, even if you are using a tweet domain object, it might still be meaningful to hold the approved value explicitly as a process variable, because it serves the purpose to guide the gateway decision in the process. It might not be true if you want to keep track in the tweet domain objects regarding the approval. - -| Variable name | Variable type | Value | -| ------------- | ------------- | ----- | -| `tweetId` | Long | 8213 | -| `approved` | Boolean | true | - -- ...communicate in a _message oriented_ style. For example, retrieving data from one system and handing it over to another system via a process. - -When receiving external messages, consider storing just those parts of the payload relevant for you, and not the whole response. This not only serves the goal of having a lean process variables map, it also makes you more independent of changes in the service's message interface. - -- ...want to use the process engine as kind of _cache_. For example, you cannot query relevant customer data in every step for performance reasons. - -- ...need to _postpone data changes_ in the leading system to a later step in the process. For example, you only want to insert the Tweet in the Tweet Management Application if it is approved. - -- ...want to track the _historical development_ of the data going through your process. - -- ...don't have a leading system for this data. - -## Using constants and data accessors - -Avoid the copy/paste of string representations of your process variable names across your code base. Collect the variable names for a process definition in _constants_. For example, in Java: - -```java -public interface TwitterDemoProcessConstants { - String VAR_NAME_TWEET = "tweet"; - String VAR_NAME_APPROVED = "approved"; -} -``` - -This way, you have much more security against typos and can easily make use of refactoring mechanisms offered by your IDE. - -However, if you also want to solve necessary type conversions (casting) or probably even complex serialization logic, we recommend that you use a **Data Accessor** class. It comes in two flavors: - -- A **Process Data Accessor**: Knows the names and types of all process variables of a certain process definition. It serves as the central point to declare variables for that process. -- A **Process Variable Accessor**: Encapsulates the access to exactly one variable. This is useful if you reuse certain variables in different processes. - -Consider, for example, the BPMN "Publish on Twitter" task in the Tweet Approval Process: - -
    - -1 - -We use a **TweetPublicationDelegate** to implement the "Publish on Twitter" task: - -```java -public class PublishTweetJobHandler implements JobHandler { - public void handle(JobClient client, ActivatedJob job) throws Exception { - String tweet = job.getVariablesAsType(TwitterDemoProcessVariables.class).getTweet(); - // ... -``` - -As you can see, the `tweet` variable is accessed in a type safe way. - -This reusable **Process Data Accessor** class could, for example, be a simple object. The Java client API can automatically deserialize the process variables as JSON into this object, while all process variables that are not found in that class are ignored. - -```java -public class TwitterDemoProcessVariables { - - private String tweet; - private boolean approved; - - public String getTweet() { - return tweet; - } - - public void setTweet(String tweet) { - this.tweet = tweet; - } -} -``` - -The getters and setters could further take care of additional serialization and deserialization logic for complex objects. - -Your specific implementation approach might differ depending on the programming language and framework you are using. - -## Complex data as entities - -There are some use cases when it is clever to _introduce entities alongside the process_ to store complex data in a relational database. You can see this logically as _typed process context_ where you create custom tables for your custom process deployment. Then, you can even use **Data** **Accessor** classes to access these entities in a convenient way. - -You will only store a reference to the entity's primary key (typically an artificial UUID) as real process variable within Camunda. - -Some people refer to this as **externalized process context**. - -There are a couple of advantages of this approach: - -- You can do very _rich queries_ on structured process variables via normal SQL. -- You can apply custom _data migration strategies_ when deploying new versions of your process or services, which require data changes. -- Data can be designed and modeled properly, even graphically by, for example, leveraging UML. - -It requires additional complexity by adding the need for a relational database and code to handle this. diff --git a/versioned_docs/version-8.2/components/best-practices/development/invoking-services-from-the-process-assets/external-task-pattern.png b/versioned_docs/version-8.2/components/best-practices/development/invoking-services-from-the-process-assets/external-task-pattern.png deleted file mode 100644 index 88cb03a5f0a..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/invoking-services-from-the-process-assets/external-task-pattern.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/invoking-services-from-the-process-c7.md b/versioned_docs/version-8.2/components/best-practices/development/invoking-services-from-the-process-c7.md deleted file mode 100644 index 0502baf58e1..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/invoking-services-from-the-process-c7.md +++ /dev/null @@ -1,732 +0,0 @@ ---- -title: "Invoking services from a Camunda 7 process" -tags: - - Service - - Java Delegate - - Expression Language - - External Task - - REST - - SOAP - - JMS - - Camel - - ESB - - SQL - - SAP ---- - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! If you are using Camunda 8, visit [connecting the workflow engine with your world](../connecting-the-workflow-engine-with-your-world/). -::: - -Access business logic implemented for the Java VM and remote services by means of small pieces of glue code. This glue code maps process input/output to your business logic by means of best-of-breed libraries of your own choosing. - -In most cases, you should use a pull approach, where external worker threads query Camunda for **external tasks**. Sometimes, you might also attach **JavaDelegates** to your model, and in case you need to define totally self-contained BPMN process definitions, you may want to leverage scripts or expressions for small pieces of logic. - -## Understanding the possibilities - -### Push and pull - -There are two patterns available to glue your code to a process model: - -- **Push:** The process engine actively issues a **service call** (or executes a **script**) via the mechanisms described below. The workflow engine pushes the work. -- **Pull:** External worker threads query the process engine API for **external tasks**, and they pull the work. Then, they do the actual work and notify the process engine of works completion. - -### External tasks - -An **external task** is a task that waits to be completed by some external service worker without explicitly calling that service. It's configured by declaring a **topic** (which characterizes the type of the service). The Camunda API must be polled to retrieve open external tasks for a certain service's topic and must be informed about the completion of a task: - -![External task pattern](invoking-services-from-the-process-assets/external-task-pattern.png) - -The interaction with the external task API can be done in two different ways: - -- Use [Camunda's external task client libraries](https://docs.camunda.org/manual/latest/user-guide/ext-client/) for [Java](https://github.com/camunda/camunda-external-task-client-java) or [Node.js](https://github.com/camunda/camunda-external-task-client-js). These libraries make it very easy to implement your external task worker. - -- Create your own client for Camunda's REST API based on the [Camunda OpenAPI specification](https://docs.camunda.org/manual/latest/reference/rest/openapi/), probably via code generation. This approach allows you to generate code for every programming language and also covers the full REST API, not only external tasks. - -Using external tasks comes with the following advantages: - -- **Temporal decoupling**: The pattern can replace a message queue between the service task (the "consumer") and the service implementation (the "provider"). It can eliminate the need for operating a dedicated message bus while keeping the decoupling that messaging would provide. - -- **Polyglot architectures**: The pattern can be used to integrate .NET based services, for example, when it might not be that easy to write Java delegates to call them. Service implementations are possible in any language that can be used to interact with a REST API. - -- **Better scaling**: The pattern allows you to start and stop workers as you like, and run as many of them as you need. By doing so, you can scale each service task (or to be precise, each "topic") individually. - -- **Connect cloud and on-premises**: The pattern supports you in running Camunda somewhere in the cloud (as our customers often do), because you can still have services on-premises, as they can now query their work via REST over SSL, which is also quite firewall-friendly. - -- **Avoid timeouts**: The pattern allows you to asynchronously call long-running services, which eventually block for hours (and would therefore cause transaction and connection timeouts when being called synchronously). - -- **Run services on specialized hardware**: Each worker can run in the environment that is best suited for the specific task of that worker; for example, CPU-optimized cloud instances for complex image processing and memory-optimized instances for other tasks. - -Learn more about external tasks in the [use guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) as well as the [reference](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/service-task/#external-tasks) and explore the video processing example shown above in greater detail by reading the [blog post](https://blog.camunda.org/post/2015/11/external-tasks/) about it. - -:::note -Camunda 8 focuses on the external task pattern, there are no Java delegates available as explained in [this blog post](https://blog.bernd-ruecker.com/how-to-write-glue-code-without-java-delegates-in-camunda-cloud-9ec0495d2ba5). -::: - -### Java delegates - -A Java delegate is a simple Java class that implements the Camunda `JavaDelegate` interface. It allows you to use **dependency injection** as long as it is constructed as a Spring or CDI bean and connected to your BPMN `serviceTask` via the `camunda:delegateExpression` attribute: - -```xml - - -``` - -Leverage dependency injection to get access to your _business service_ beans from the delegate. Consider a delegate to be a semantical part of the process definition in a wider sense: it is taking care of the nuts and bolts needed to wire the business logic to your process. Typically, it does the following: - -1. Data Input Mapping -2. Calling a method on the business service -3. Data Output Mapping - -:::note -Avoid programming business logic into Java delegates. Separate this logic by calling one of your own classes as a business service, as shown below. -::: - -```java -@Named -public class TweetPublicationDelegate implements JavaDelegate { - - @Inject - private TweetPublicationService tweetPublicationService; - - public void execute(DelegateExecution execution) throws Exception { - String tweet = new TwitterDemoProcessVariables(execution).getTweet(); // <1> - // ... - try { - tweetPublicationService.tweet(tweet); // <2> - } catch (DuplicateTweetException e) { - throw new BpmnError("duplicateMessage"); // <3> - } - } - //... -``` - -1 - -Retrieving the value of this process variable belongs to what we call the **input mapping** of the delegate code, and is therefore considered to be part of the wider process definition. - -2 - -This method executes process engine-independent **business logic**. It is therefore not part of the wider process definition anymore and placed in a separate business service bean. - -3 - -This exception is process engine-specific and therefore typically not produced by your business service method. It's part of the **output mapping** that we need to translate the business exception to the exception needed to drive the process - again code being part of the "wider" process definition and to be implemented in the Java delegate. - -In case you want to create Java delegates that are **reusable** for other process definitions, leverage [field injection](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/#field-injection) to pass configuration from the BPMN process definition to your Java delegate. - -One advantage of using Java delegates is that, if you develop in Java, this is a very simple way to write code and connect it with your process model, especially in embedded engine scenarios. - -## Selecting the implementation approach - -### General recommendation - -In general, we _recommend to use external tasks_ to apply a general architecture and mindset, that allows to [leverage Camunda 8 easier](/guides/migrating-from-camunda-7/migration-readiness.md#prepare-for-smooth-migrations). This typically outweighs the following downsides of external tasks: - -- A slightly increased complexity for Java projects, because they have to handle separate Java clients. -- A slightly increased overhead compared to Java delegates, as all communication with the engine is remote, even if it runs in the same Java VM. - -Only if the increased latency does not work for your use case, for example, because you need to execute a 30-task process synchronously to generate a REST response within a handful of milliseconds, should you then consider Java delegates (or also consider switching to use Camunda 8). - -### Detailed comparison - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - -

    Java Delegate

    -
    -

    Expression

    -
    -

    Connector

    -
    -

    External Task

    -
    -

    Script Task

    -
    -

    Named Bean

    -
    -

    Java Class

    -
    -

    Call a named bean or java class implementing the - JavaDelegate interface. -

    -
    -

    Evaluate an expression using JUEL.

    -
    -

    Use a configurable Connector -
    -(REST or SOAP services provided out-of-the-box). -

    -
    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    -
    -

    Execute a script inside the engine.

    -
    -

    Use with -
    - BPMN elements. -

    -
    -

    - - task service - - - message intermediate send - - - task send - -

    -
    -

    - - task script - -

    -
    -

    Communication Direction

    -
    -

    - Push -
    - work item by issuing service call. -

    -
    -

    - Pull -
    - task from worker thread. -

    -
    -

    - Push work item by executing a script. -

    -
    -

    Technology

    -
    -

    Use your preferred framework, e.g. a JAX-WS client to call SOAP Web Services.

    -
    -

    Use REST/SOAP Connector and Message Template

    -
    -

    Use Camunda External Task Client or REST API to query for work.

    -
    -

    Use JSR-223 compliant scripting engine.

    -
    -

    Implement -
    - via -

    -
    -

    Java (in same JVM)

    -
    -

    Expression Language -(can reference Java code)

    -
    -

    BPMN configuration

    -
    -

    BPMN configuration and external pull logic

    -
    -

    E.g. Groovy, JavaScript, JRuby or Jython

    -
    -

    Code Completion and Refactoring

    -
    -

    - ✔ -

    -
    -

    - ✔ -

    -
    -

    Maybe

    -
    - -

    - ✔ -

    -
    -

    Depends on language / IDE

    -
    -

    Compiler Checks

    -
    -

    - ✔ -

    -
    -

    - ✔ -

    -
    - - -

    - ✔ -

    -
    -

    Depends on language / IDE

    -
    -

    Dependency Injection

    -
    -

    - ✔ -
    - (when using Spring, CDI, ...) -

    -
    - -

    - ✔ -
    - (when using Spring, CDI, ...​) -

    -
    - - -
    -

    Forces on Testing

    -
    -

    Register mocks instead of original beans.

    -
    -

    Mock business logic inside the JavaDelegate.

    -
    -

    Register mocks instead of original beans.

    -
    -

    Difficult because of lack of dependency injection.

    -
    -

    Easy, as service is not actively called.

    -
    -

    Consider external script resources.

    -
    -

    Configure via

    -
    -

    BPMN Attribute -
    - serviceTask -
    - camunda: -
    - delegate -
    - Expression -
    -

    -
    -

    BPMN Attribute -
    - serviceTask -
    - camunda: -
    - class -
    -

    -
    -

    BPMN Attribute -
    - serviceTask -
    - camunda: -
    - expression -
    -

    -
    -

    BPMN Ext. Element+ - - serviceTask -
    - camunda: -
    - connector -
    -

    -
    -

    BPMN Attributes -
    - serviceTask -
    - camunda: -
    - type= -
    - 'external' and -
    - 'camunda:topic' -
    -

    -
    -

    BPMN Element -
    - script or -
    - BPMN Attribute -
    - scriptTask -
    - camunda: -
    - resource -
    -

    -
    -

    Fault Tolerance and Retrying

    -
    -

    Handled by Camunda retry strategies and incident management.

    -
    -

    Lock tasks for a defined time. Use Camunda’s retry and incident management.

    -
    -

    Handled by Camunda retry strategies and incident management.

    -
    -

    Scaling (having multiple Worker Threads)

    -
    -

    Via load balancer in front of service

    -
    -

    Multiple worker threads can be started.

    -
    -

    Via job executor configuration

    -
    -

    Throttling (e.g. one request at a time)

    -
    -

    Not possible out-of-the-box, requires own throttling logic being implemented.

    -
    -

    Start or stop exactly as many worker threads you need.

    -
    -

    Not possible out-of-the-box.

    -
    -

    Reusable Tasks

    -
    -

    - Use field injection -

    -
    -

    Use method parameters.

    -
    -

    - Build your own Connector -

    -
    -

    - Reuse external task topics and configure service via variables. -

    -
    -
    -

    Use when

    -
    - If external tasks do not work for your use case - -

    Defining small pieces of logic directly in BPMN

    -
    -

    Defining a self-contained BPMN process without Java code

    -
    -

    - Always if there is no reason against it -

    -
    -

    Defining BPMN processes without Java code.

    -
    - -

    - Learn more -

    -
    -

    - Learn more -

    -
    -

    - Learn more -

    -
    -

    - Learn more -

    -
    -

    - Learn more -

    -
    - -## Dealing with problems and exceptions - -When invoking services, you can experience faults and exceptions. See our separate best practices about: - -- [Understanding Camunda 7 transaction handling](../understanding-transaction-handling-c7/) -- [Dealing with problems and exceptions](../dealing-with-problems-and-exceptions/). - -## Example technology solutions - -### Calling SOAP web services - -When you need to call a SOAP web service, you will typically be given access to a machine-readable, WSDL-based description of the service. You can then use [JAX-WS](http://docs.oracle.com/javaee/6/tutorial/doc/bnayl.html) and (for example) Apache CXF's [JAX-WS client generation](http://cxf.apache.org/docs/maven-cxf-codegen-plugin-wsdl-to-java.html) to generate a Java Web Service Client by making use of a Maven plugin. That client can be called from within your JavaDelegate. - -Find a full example that uses JAX-WS client generation in the [Camunda examples repository](https://github.com/camunda/camunda-bpm-examples/tree/master/servicetask/soap-cxf-service). - -We typically prefer the client code generation over using the [Camunda SOAP Connector](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/), because of the better IDE support to do the data mapping by using code completion. You also can leverage standard testing approaches and changes in the WSDL will re-trigger code-generation and your compiler will check for any problems that arise from a changed interface. However, if you need a self-contained BPMN XML without any additional Java code, the connector could be the way to go. See [SOAP Connector example](https://github.com/camunda/camunda-bpm-examples/tree/master/servicetask/soap-service). - -### Calling REST web services - -If you need to call a REST web service, you will typically be given access to a human-readable documentation of the service. You can use standard Java REST client libraries like [RestEasy](http://resteasy.jboss.org) or [JAX-RS](http://docs.oracle.com/javaee/6/tutorial/doc/giepu.html) to write a Java REST service client that can be called from within a JavaDelegate. - -We typically prefer writing Java clients over the [Camunda REST Connector](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/), because of the better IDE support to do the data mapping by using code completion. This way, you also can leverage standard testing approaches. However, if you need a self-contained BPMN XML without any additional Java code, the Connector could be the way to go. See [REST Connector example](https://github.com/camunda/camunda-bpm-examples/tree/master/servicetask/rest-service). - -### Sending JMS messages - -When you need to send a JMS message, use a plain Java Client and invoke it from a service task in your process; for example, by using a Camunda Java delegate: - -```java -@Named("jmsSender") -public class SendJmsMessageDelegate implements JavaDelegate { - - @Resource(mappedName = "java:/queue/order") - private Queue queue; - - @Resource(mappedName = "java:/JmsXA") - private QueueConnectionFactory connectionFactory; - - public void execute(DelegateExecution execution) throws Exception { - String correlationId = UUID.randomUUID().toString(); // <1> - execution.setVariable("jmsCorrelationId", correlationId); - - Connection connection = connectionFactory.createConnection(); // <2> - Session session = connection.createSession(true, Session.AUTO_ACKNOWLEDGE); - MessageProducer producer = session.createProducer(queue); - - TextMessage message = session.createTextMessage( // <3> - "someOwnContent, e.g. Tweet Object Data, plus " + correlationId); // <4> - producer.send(message); - - producer.close(); - session.close(); - connection.close(); - } - -} -``` - -1 - -Consider what information you can use to correlate back an asynchronous response to your process instance. We typically prefer a generated, artificial UUID for communication, which the waiting process will also need to remember. - -2 - -You will need to open and close JMS connections, sessions, and producers. Note that this example just serves to get you started. In real life, you will need to decide which connections you need to open, and of course, properly close. - -3 - -You will need to create and send your specific message. - -4 - -Add relevant business data to your message together with correlation information. - -:::danger -This example just serves to get you started. In real life, consider whether you need to encapsulate the JMS client in a separate class and just wire it from the Java delegate. Also decide which connections you need to open and close properly at which peristaltic points. -::: - -On GitHub, you can find a more complete example for [asynchronous messaging with JMS](https://github.com/camunda/camunda-consulting/tree/master/snippets/asynchronous-messaging-jms). - -### Using SQL to access the database - -Use plain JDBC if you have simple requirements. Invoke your SQL statement from a service task in your process; for example, by using a Camunda Java delegate: - -```java -@Named("simpleSqlDelegate") -public class simpleSqlDelegate implements JavaDelegate { - - @Resource(name="customerDB") - private javax.sql.DataSource customerDB; - - public void execute(DelegateExecution execution) throws Exception { - Statement statement = null; - Connection connection = null; - - try { - connection = customerDB.getConnection(); - String query = "SELECT name " + // <1> - "FROM customer " + - "WHERE id = ?"; - statement = connection.createStatement(); - statement.setString(1, execution.getProcessBusinessKey()); // <2> - ResultSet resultSet = stmt.executeQuery(query); - if (resultSet.next()) { - execution.setVariable("customerName", resultSet.getString("name")); // <3> - } - } finally { - if (statement != null) statement.close(); - if (connection != null) connection.close(); - } - -} -``` - -1 - -You will need to define your SQL statement. Consider using prepared statements if you want to execute a statement object many times. - -2 - -You will typically need to feed parameters into your SQL query that are already known during execution of the process instance... - -3 - -...and deliver back a potential result that maybe needed later in the process. - -:::danger -This example just serves to get you started. For real life, consider whether you need to encapsulate the JDBC code in a separate class and just wire it from the Java delegate. Also decide which connections you need to open and close properly at which point. -::: - -Note that the Camunda process engine will have opened a database transaction for its own persistence purposes when calling the Java delegate shown above. You will need to make a conscious decision if you want to join that transaction (and setup your TX management accordingly). - -Instead of invoking SQL directly, consider using [JPA](http://www.oracle.com/technetwork/java/javaee/tech/persistence-jsp-140049.html) if you have more complex requirements. Its object/relational mapping techniques will allow you to bind database tables to Java objects and abstract from specific database vendors and their specific SQL dialects. - -### Calling SAP systems - -To call a **SAP** system, you have the following options: - -- Use REST or SOAP client calls, connecting Camunda to **SAP Netweaver Gateway** or **SAP Enterprise Services**. - -- Use **SAP's Java Connectors (JCo)**. Consider using some frameworks to make this easier, like the open source frameworks of [Hibersap](https://github.com/hibersap). - -### Executing a Groovy script - -A script task... - - - - ...is defined by specifying the script and the `scriptFormat`. - -```xml - - - -``` - -For more extensive code (which should also be tested separately), consider using scripts external to your BPMN file and reference them with a `camunda:resource` attribute on the `scriptTask`. - -Learn more about the many ways scripts can be used with Camunda from our [user guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/scripting/). diff --git a/versioned_docs/version-8.2/components/best-practices/development/routing-events-to-processes.md b/versioned_docs/version-8.2/components/best-practices/development/routing-events-to-processes.md deleted file mode 100644 index d8a22877a57..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/routing-events-to-processes.md +++ /dev/null @@ -1,533 +0,0 @@ ---- -title: Routing events to processes -tags: - - Event Handling - - Process Instantiation - - Message Handling - - Correlation - - SOAP - - JMS - - REST - - Camel - - ESB - - API - - BPMN Message Event - - BPMN Signal Event - - BPMN Timer Event -description: "To start a new process instance or to route a message to a running instance, choose the appropriate technology option to do so, like using the existing API." ---- - -To start a new process instance or to route a message to an already running instance, you have to choose the appropriate technology option to do so, like using the existing API or using customized possibilities including SOAP, AMQP, or Kafka. Leverage the possibilities of the universe of your runtime (like Java or Node.js) and the frameworks of your choice to support the technologies or protocols you need. - -## Choosing the right BPMN event - -### Start events - -Several BPMN start events can be used to start a new process instance. - -| | None Event | Message Event | Timer Event | Signal Event | Conditional Event | -| ----------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------- | ---------------------------------------------------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| | ![none start](/img/bpmn-elements/none-start.svg) | ![message start](/img/bpmn-elements/message-start.svg) | ![timer start](/img/bpmn-elements/timer-start.svg) | ![signal start](/img/bpmn-elements/signal-start.svg) | ![conditional start](/img/bpmn-elements/conditional-start.svg) | -| Use when | You have only **one start event** or a start event which is clearly standard. | You have to differentiate **several start events**. | You want to automatically start process instances **time controlled**. | You need to start **several process instances** at once. Rarely used. | When a specific **condition** is met, a process instance is created. | -| Supported for Execution | ✔ | ✔ | ✔ | Not yet supported in Camunda 8 | Determine occurrence of condition externally yourself and use the message event. | -| | [Learn more](/docs/components/modeler/bpmn/none-events/) | [Learn more](/docs/components/modeler/bpmn/message-events/) | [Learn more](/docs/components/modeler/bpmn/timer-events/) | | | - -
    - -1 - -This none start event indicates the typical starting point. Note that only _one_ such start event can exist in one process definition. - -2 - -This message start event is defined to react to a specific message type... - -3 - -...hence you can have _multiple_ message start events in a process definition. In this example, both message start events seems to be exceptional cases - for equivalent cases we recommend to just use message instead of none start events. - -### Intermediate events - -Several BPMN intermediate events (and the receive task) can be used to make a process instance _wait_ for and _react_ to certain triggers. - -| | Message Event | Receive Task | Timer Event | Signal Event | Conditional Event | -| ----------------------- | ---------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| | ![message intermediate](/img/bpmn-elements/message-intermediate.svg) | ![task receive](/img/bpmn-elements/task-receive.svg) | ![timer intermediate](/img/bpmn-elements/timer-intermediate.svg) | ![signal intermediate](/img/bpmn-elements/signal-intermediate.svg) | ![conditional intermediate](/img/bpmn-elements/conditional-intermediate.svg) | -| Use when | You route an incoming **message** to a specific and unique process instance. | As alternative to message events (to leverage BPMN boundary events, e.g. for timeouts). | You want to make your process instance wait for a certain (point in) **time**. | You route an incoming **signal** to all process instances waiting for it. | When a specific **condition** is met, the waiting process instance moves on. | -| Supported for Execution | ✔ | ✔ | ✔ | Not yet supported in Camunda 8 | Not yet supported in Camunda 8 | -| | [Learn more](/docs/components/modeler/bpmn/message-events/) | [Learn more](/docs/components/modeler/bpmn/receive-tasks/) | [Learn more](/docs/components/modeler/bpmn/timer-events/) | | - -Consider this example: - -
    - -1 - -This intermediate message event causes the process instance to wait unconditionally for a _specific_ event... - -2 - -...whereas the intermediate message event attached to the boundary of an activity waits for an _optional_ event, potentially arriving while we are occupied with the activity. - -## Reacting to process-internal events - -Events relevant for the process execution can occur from within the workflow engine itself. - -Consider the following loan application process - or at least the initial part with which the applicant's income is confirmed either via the employer or via the last income tax statement. - -
    - -1 - -In case the employer does not confirm the income within three business days, a **timer event** triggers and a human clerk now tries to contact the employer and investigate the situation. - -2 - -This could end with a successful income confirmation. However, it could also end with new findings regarding the applicant's employment status. We learn that the applicant is actually unemployed. - -3 - -In this case, a **conditional event** watching this data (e.g. a process variable changed by the human task) triggers and causes the process to reconsider the consequences of the new findings. - -:::caution Camunda 8 does not yet support conditional events -Camunda 8 does not yet [support the conditional event](/docs/components/modeler/bpmn/bpmn-coverage/). -::: - -A conditional event's condition expression is evaluated at it's "scope" creation time, too, and not just when variable data changes. For our example of a boundary conditional event, that means that the activity it is attached to could principally be left immediately via the boundary event. However, our process example evaluates the data via the exclusive gateway - therefore such a scenario is semantically impossible. - -## Routing events from the outside to the workflow engine - -Most events actually occur somewhere external to the workflow engine and need to be routed to it. The core workflow engine is by design not concerned with the technical part of receiving external messages, but you can receive messages and route them to the workflow engine by the following ways: - -- Using API: Receive the message by means of your platform-specific activities such as connecting to a AMQP queue or processing a REST request and then route it to the process. -- Using Connectors: Configure a Connector to receive messages such as Kafka records and rote it to the process. Note that this possibility works for Camunda 8 only. - -### Camunda 8 - -Camunda 8 only - -#### Starting process instance by BPMN process id - -If you have only one starting point (none start event) in your process definition, you reference the process definition by the ID in the BPMN XML file. - -:::note -This is the most common case and requires using the [`CreateProcessInstance`](../../../apis-tools/grpc.md#createprocessinstance-rpc) API. -::: - -Example in Java: - -```java -processInstance = zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("invoice").latestVersion() - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not create new process instance", throwable); }); -``` - -Example in Node.js: - -```js -zbc.createWorkflowInstance({ - bpmnProcessId: "invoice", -}); -``` - -This starts a new process instance in the latest version of the process definition. You can also start a specific version of a process definition: - -```java -processInstance = zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("invoice").version(5) - //... -``` - -or - -```js -zbc.createWorkflowInstance({ - bpmnProcessId: "invoice", - version: 6, -}); -``` - -You can also use [`CreateProcessInstanceWithResult`](../../../apis-tools/grpc.md#createprocessinstancewithresult-rpc) instead, if you want to block the execution until the process instance has completed. - -#### Starting process instance by message - -As soon as you have multiple possible starting points, you have to use named messages to start process instances. The API method is [`PublishMessage`](../../../apis-tools/grpc.md#publishmessage-rpc): - -```java -client.newPublishMessageCommand() - .messageName("message_invoiceReceived") // <1> - .corrlationKey(invoiceId) // <2> - .variables( // <3> - //... - ).send() - .exceptionally( throwable -> { throw new RuntimeException("Could not publish message", throwable); }); -``` - -1 - -Message name as defined in the BPMN. - -2 - -Correlation key has to be provided, even if a start event does not require correlation. - -3 - -_Payload_ delivered with the message. - -On one hand, now you do not have to know the key of the BPMN process. On the other hand, you cannot influence the version of the process definition used when starting a process instance by message. - -The message name for start events should be unique for the whole workflow engine - otherwise you might experience side effects you did not intend (like starting other processes too). - -### Camunda 7 - -Camunda 7 only - -#### Starting process instances by key - -If you have only one starting point, you reference the process definition by the ID in the BPMN XML file. This is the most common case. - -```java - processEngine.getRuntimeService().startProcessInstanceByKey('invoice'); // <1> -``` - -1 - -Process _ID_ defined in the BPMN. The API calls this ID the "Key" of the process. - -See the [Process Engine API](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-api/) for more details. - -#### Starting process instances by message - -As soon as you have multiple possible starting points, you have to use named messages to start process instances. - -```java -processEngine.getRuntimeService() - .createMessageCorrelation('message_invoiceReceived') // <1> - .setVariable("invoiceId", "123456") // <2> - .correlate(); -``` - -1 - -Message _Name_ defined in the BPMN - -2 - -_Payload_ delivered with the message - -On one hand, now you do not have to know the key of the BPMN process. On the other hand, you cannot influence the version of the process definition used when starting a process instance by message. - -The message name for start events has to be _unique_ to the whole workflow engine - otherwise the engine will not know which process to start. - -#### Starting specific versions of process instances by ID - -See [versioning process definitions](../../operations/versioning-process-definitions/) for details on versioning of process definitions. - -By default, the workflow engine always starts the newest version of a process definition. You can start a specific version of a process definition by referencing the _ID_ (primary key) of that definition in the engine's database. - -```java -ProcessDefinition processDefinition = processEngine().getRepositoryService() - .createProcessDefinitionQuery() - .processDefinitionKey("invoice") - .processDefinitionVersion(17) - .singleResult(); -processEngine().getRuntimeService() - .startProcessInstanceById(processDefinition.getId()); -``` - -"By ID" does _NOT_ relate to the ID in the BPMN XML file (which is known as "Key" in the process engine). Instead, ID relates to the _primary key_ in the Camunda database. You don't have influence on this ID - it will be created during deployment time. - -#### Correlating messages to running process instances - -In case you want to route an event to a process instance already started, you will need to _correlate_ the message to the specific process instance waiting for it by matching some properties of the incoming message to some properties of your process instance: - -```java -runtimeService - .createMessageCorrelation("myMessage") // <1> - .processInstanceBusinessKey(myMessage.getOrderId().toString()) // <2> - .processInstanceVariableEquals("customerId", myMessage.getCustomerId()) // <3> - .correlate(); -``` - -1 - -A process instance matches if it is waiting for a message _named_ myMessage... - -2 - -...if it carries the orderId of the message as its _business key_... - -3 - -...and if a _process variable_ "customerId" also matches the expectations. - -As a best practice, correlate incoming messages based on _one_ unique artificial attribute (e.g. `correlationIdMyMessage`) created specifically for this communication: - -```java -runtimeService - .createMessageCorrelation("myMessage") - .processInstanceVariableEquals("correlationIdMyMessage", myMessage.getCustomCorrelationId()) - .correlate(); -``` - -Alternatively, you also have the option to select the process instance targeted by a message based on a query involving complex criteria, and then as a second step explicitly correlate the message to the selected process instance. - -The [API docs](https://docs.camunda.org/manual/latest/reference/bpmn20/events/message-events/#explicitly-triggering-a-message) show more details about the possibilities to trigger message events. - -#### Routings signals to process instances - -In the case of a [BPMN signal](https://docs.camunda.org/manual/latest/reference/bpmn20/events/signal-events/), a correlation to a specific process instance is neither necessary nor possible, as the mechanism is meant to inform _all_ process instances "subscribing" to a specific signal event: - -```java -runtimeService - .createSignalEvent("mySignal") // <1> - .setVariables(variables) // pass variables (optional) - .send(); -``` - -1 - -A process instance matches if it is waiting for or started by a signal _named_ `mySignal`. - -#### Starting process instances at arbitrary nodes - -There are use cases when you want to start a process instance at some point -other than the modeled start event: - -- **Testing**: It's always best to test a process instances in chunks, so you don't always need to start at the beginning. - -- **Migration**: When migrating to Camunda, you might have existing process - instances you want to migrate to a new Camunda process instances **in a defined state**. - -In these cases, you can start a process instance in arbitrary activities using the API. - -
    - -1 - -This example starts the Twitter process directly before the "Publish on Twitter" service task, meaning the service task will be executed: - -```java -processEngine.getRuntimeService().createProcessInstanceByKey("twitter") - .startBeforeActivity("service_task_publish_on_twitter") - .setVariable("content", "Know how to circumvent the review!") - .execute(); -``` - -See [User Guide: Starting a Process Instance at Any Set of Activities](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-concepts/#start-a-process-instance-at-any-set-of-activities). - -## Technology examples for messages sent by external systems - -In this section, we give examples for _technical messages_, which are received from -other systems, typically by leveraging technologies like e.g. SOAP, REST, JMS or -other. - -
    - -1 - -You will need a mechanism receiving that message and routing it to the workflow engine. That could be a direct API call to Camunda. It could also be a AMQP or Kafka consumer or a SOAP endpoint using the Camunda API internally. It could even be a hotfolder polled by some framework like Apache Camel. - -### Camunda 8 - -Camunda 8 only - -API examples for REST, AMQP, and Kafka are shown in [connecting the workflow engine with your world](../connecting-the-workflow-engine-with-your-world/). - -### Camunda 7 - -Camunda 7 only - -#### SOAP - -To start a process instance via a SOAP web service, write some Java code, e.g. by leveraging the @WebService annotation. - -```java -@WebService(name = "InvoiceService") <1> -public class InvoiceService { - - @Inject - private RuntimeService runtimeService; <2> - - public void startInvoice(String invoiceId) { <3> - Map variables = new HashMap(); - variables.put("invoiceId", invoiceId); - runtimeService.startProcessInstanceByKey("invoiceId", variables); - } - -} -``` - -1 - -The @WebService annotation is sufficient to provide the SOAP web service. - -2 - -You can inject the process engine or the process engine services when using -a proper dependency injection container like Spring or CDI. - -3 - -Decide if you prefer to use a business interface (like shown here) or a generic one like `startProcessInstance`. - -#### Messages - -To start a process instance by AMQP messages, write some Java code, e.g. using Spring to connect to RabbitMQ: - -```java -@RabbitListener(queues="invoice") -public void messageReceived(String invoiceId) { - Map variables = new HashMap(); - variables.put("invoiceId", invoiceId); - runtimeService.startProcessInstanceByKey("invoice", variables); -} -``` - -Or to start a process instance by a JMS message, you could use a message-driven bean in a Java EE container: - -```java -@MessageDriven(name = "InvoiceMDB", activationConfig = { - @ActivationConfigProperty(propertyName = "destinationType", - propertyValue = "javax.jms.Queue"), - @ActivationConfigProperty(propertyName = "destination", - propertyValue = "queue/invoice") - } -) -public class InvoiceMDB implements MessageListener { - - @Inject - private RuntimeService runtimeService; - - @Override - public void onMessage(Message message) { - try { - String invoiceId = ((TextMessage) message).getText(); - Map variables = new HashMap(); - variables.put("invoiceId", invoiceId); - runtimeService.startProcessInstanceByKey("invoice", variables); - } catch (Exception ex) { - throw new RuntimeException("Could not process JMS message", ex); - } - } -} -``` - -#### REST - -The provided REST API can be directly used to communicate with the workflow engine remotely. - -``` -POST /process-definition/key/invoice/start - -Request body: -{ - "variables": { - "invoiceId" : {"value" : "123456", "type": "String"} - } -} -``` - -More information can be found in the [Camunda 7 REST API Reference](https://docs.camunda.org/manual/latest/reference/rest/process-definition/post-start-process-instance/). - -#### Apache Camel (e.g. files in a drop folder) - -Use [Apache Camel](http://camel.apache.org/) if you want to use one of the existing [Camel Components](http://camel.apache.org/components.html) (a huge list). Consider leveraging the -[Camunda 7 Camel Community Extension](https://github.com/camunda-community-hub/camunda-bpm-camel). - -Starting a process instance can be done by a Camel route, e.g. when a file was placed into a drop folder: - -```java -from("file://c:/tmp") // some drop folder - .routeId("file") - .convertBodyTo(String.class) // convert content of file into String - .to("log:org.camunda.demo.camel?level=INFO&showAll=true&multiline=true") // optional logging - .to("camunda-bpm:start?processDefinitionKey=invoice"); // and start new process instance -``` - -In this case, the message transported within the Camel route is handed over to the process instance as a variable named `camelBody` by default, see [documentation](https://github.com/camunda-community-hub/camunda-bpm-camel#camunda-bpmstart-start-a-process-instance). - -#### Messages sent via an Enterprise Service Bus (ESB) - -If you have an ESB in your architecture, you may want to start process instances from your ESB. The best approach to do this depends on the concrete product you use. There are two basic possibilities how you do this: - -- **Java**: You call the engine inside the VM via the Java API, like it is done in - the Camel community extension mentioned above. -- **Remote**: You call the remote API (e.g. Camunda REST) to communicate with the - engine. You might also build your own endpoint (e.g. JMS or SOAP) as described - above. - -## Using the Camunda BPMN framework - -If you use the **Camunda BPMN Framework** as described in the book ["Real Life BPMN"](https://www.amazon.de/dp/B07XC6R17R/) you will typically have message start events (even if you only have a single start event) to connect the surrounding human flows to the technical flow via messages: - -
    - -1 - -This is a message start event, which allows you to show the collaboration between the human and the technical flows. However, it is the only the starting point of the technical pool and could be a none start event in terms of execution. - -If there is _exactly one message start event_ for the whole process definition, it can also be treated as if it were a none start event when starting a process instance. - -## Sending messages to other processes - -If messages are exchanged between different processes deployed in the workflow engine you have to implement the communication yourself by writing some code that starts a new process instance. - -
    - -1 - -Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN id in Java: - -```java -@JobWorker(type="routeInput") -public void routeInput(@ZeebeVariable String invoiceId) { - Map variables = new HashMap(); - variables.put("invoiceId", execution.getVariable("invoiceId")); - zeebeClient.newCreateInstanceCommand() - .bpmnProcessId("invoice").latestVersion() - .variables(variables) - .send() - .exceptionally( throwable -> { throw new RuntimeException("Could not create new process instance", throwable); }); -} -``` - -2 - -Use some simple code on the sending side to correlate the message to a running process instance, for example in Java: - -```java -@JobWorker(type="notifyOrder") -public void notifyOrder(@ZeebeVariable String orderId, @ZeebeVariable String paymentInformation) { - Map variables = new HashMap(); - variables.put("paymentInformation", paymentInformation); - - execution.getProcessEngineServices().getRuntimeService() - .createMessageCorrelation("MsgPaymentReceived") - .processInstanceVariableEquals("orderId", orderId) - .setVariables(variables) - .correlate(); -} -``` - -## Handling messages sent by a user - -Sometimes explicit "user tasks" are not an appropriate choice to involve a human user to participate in a process: the user does not want to see a task in Tasklist, but rather have the possibility to actively trigger some action right at the time when it becomes necessary from a business perspective. The difference is which event gives the _active trigger_. - -
    - -1 - -We did not model a user task in this process, as the user will not immediately be triggered. The user cannot do anything at the moment when the process enters this event. Instead, we made it wait for a "message" which is later triggered by a human user. - -2 - -The accountant actually receives the "external trigger" by actively looking at new payments in the bank account. - -3 - -Every new payment now has to be correlated to the right waiting process instance manually. In this situation it is often the better choice not to model a user task, but let the process wait for a "message" generated from a user. - -These scenarios are not directly supported by Camunda Tasklist. A custom search screen built for the accountant might allow you to see and find orders waiting for a payment. By interacting with such a screen, the accountant communicates with those process instances all at once. When hitting a 'Paid' button, a piece of custom code using the API must now correlate the user's message to the affected process instance(s). diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/boundary-event.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/boundary-event.png deleted file mode 100644 index e4c385eb6ba..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/boundary-event.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/events-vs-tasks.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/events-vs-tasks.png deleted file mode 100644 index 44b704a336b..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/events-vs-tasks.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png deleted file mode 100644 index 743f1351b7d..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/img-src.pptx b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/img-src.pptx deleted file mode 100644 index dbdc671b2ca..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/img-src.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/receive-task.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/receive-task.png deleted file mode 100644 index 102c8cf5d5f..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/receive-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-boundary-message-events.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-boundary-message-events.png deleted file mode 100644 index 8e738a1921c..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-boundary-message-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-event-based-gateway.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-event-based-gateway.png deleted file mode 100644 index c34a0ef05e8..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-event-based-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-event-subprocess.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-event-subprocess.png deleted file mode 100644 index 602976e1dc7..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-event-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-gateway.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-gateway.png deleted file mode 100644 index be43f884ca0..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/response-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/send-and-receive-task.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/send-and-receive-task.png deleted file mode 100644 index b566551a3aa..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/send-and-receive-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/send-task.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/send-task.png deleted file mode 100644 index c403e9978f9..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/send-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/service-task.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/service-task.png deleted file mode 100644 index 3b3340e9da3..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/service-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/synchronous-ack.png b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/synchronous-ack.png deleted file mode 100644 index ed6227643cc..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns-assets/synchronous-ack.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns.md b/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns.md deleted file mode 100644 index bded871f4b5..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/service-integration-patterns.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: "Service integration patterns with BPMN" -description: "When integrating systems and services, you can choose between various modeling possibilities in BPMN." ---- - -When integrating systems and services, you can choose between various modeling possibilities in BPMN. This practice will give you an overview and advice on how to decide between alternatives. - -You will see that service tasks in general are a good choice, but there are also situations where you might want to switch to send and receive tasks or events. - -## Understanding communication patterns - -Let's briefly examine the three typical communication patterns to integrate systems: - -- **Request/response using synchronous communication styles**: You use a synchronous protocol, like HTTP, and block for the result. -- **Request/response using asynchronous communication styles**: You use asynchronous communication, for example, by sending messages via a message broker, but wait for a response message right after. Technically, these are two independent asynchronous messages, but the sender blocks until the response is received, hence logically making it a request/response. -- **Asynchronous messages or events:** If a peer service needs a long time to process a request, the response is much later than the request, say hours instead of milliseconds. In this case, the response is typically handled as a separate message. Additionally, some of your services might also wait for messages or events that are not connected to a concrete request, especially in event-driven architectures. - -The following table gives a summary of the three options: - -| | Synchronous request/response | Asynchronous request/response | Asynchronous messages or events | -| --------------------------------- | :--------------------------- | :---------------------------- | :------------------------------ | -| **Business level** | Synchronous | Synchronous | Asynchronous | -| **Technical communication style** | Synchronous | Asynchronous | Asynchronous | -| **Example** | HTTP | AMQP, JMS | AMQP, Apache Kafka | - -You can dive more into communication styles in the webinar [Communication Between Loosely Coupled Microservices](https://page.camunda.com/wb-communication-between-microservices) ([slides](https://www.slideshare.net/BerndRuecker/webinar-communication-between-loosely-coupled-microservices), [recording](https://page.camunda.com/wb-communication-between-microservices) and [FAQ](https://blog.bernd-ruecker.com/communication-between-loosely-coupled-microservices-webinar-faq-a02708b3c8b5)). - -## Integrating services with BPMN tasks - -Let’s look at using BPMN tasks to handle these communication patterns before diving into BPMN events later. - -### Service task - -The [service task](/docs/components/modeler/bpmn/service-tasks) is the typical element to implement synchronous request/response calls, such as REST, gRPC or SOAP. You should **always use service tasks for synchronous request/response**. - -![Service task](service-integration-patterns-assets/service-task.png) - -### Send task - -Technically, **send tasks behave exactly like service tasks**. However, the alternative symbol makes the meaning of sending a message easier to understand for some stakeholders. - -You **should use send tasks for sending asynchronous messages**, like AMQP messages or Kafka records. - -![Send task](service-integration-patterns-assets/send-task.png) - -There is some gray area whenever you call a synchronous service that then sends an asynchronous message. A good example is email. Assume your process does a synchronous request/response call to a service that then sends an email to inform the customer. The call itself is synchronous because it gives you a confirmation (acknowledgement, or ACK for short) that the email has been sent. Now is the "inform customer" task in your process a service, or a send task? - -![Asynchronous ACK](service-integration-patterns-assets/synchronous-ack.png) - -This question is not easy to answer and **depends on what your stakeholders understand more intuitively**. The more technical people are, the more you might tend towards a service task, as this is technically correct. The more you move towards the business side, the more you might tend to use a send task, as business people will consider sending an email an asynchronous message. - -In general, we tend to **let the business win** as it is vital that business stakeholders understand business processes. - -However, if you follow a microservice (or service-oriented architecture) mindset, you might argue that you don’t need to know exactly how customers are informed within the process. Hiding the information if the notification is synchronous or asynchronous is good to keep your process model independent of such choices, making it more robust whenever the implementation of the notification service changes. This is a very valid concern too, and might motivate for a service task. - -:::note -In case you can’t easily reach a conclusion, save discussion time and just use a service task. -::: - -You could also argue to use send tasks to invoke synchronous request/response calls when you are not interested in the response. However, this is typically confusing, and we do not recommend this. - -### Receive task - -A [receive task](/docs/components/modeler/bpmn/receive-tasks/) waits for an asynchronous message. Receive tasks **should be used for incoming asynchronous messages or events**, like AMQP messages or Kafka records. - -![Receive task](service-integration-patterns-assets/receive-task.png) - -Receive tasks can be used to receive the response in asynchronous request/response scenarios, which is discussed next. - -### Service task vs. send/receive task combo - -For asynchronous request/response calls, you can use a send task for the request, and a following receive task to wait for the response: - -![Send and receive task](service-integration-patterns-assets/send-and-receive-task.png) - -You can also use a service task, which is sometimes unknown even to advanced users. A service task can technically wait for a response that happens at any time, a process instance will wait in the service task, as it would in the receive task. - -![Service task](service-integration-patterns-assets/service-task.png) - -Deciding between these options is not completely straightforward. You can find a table listing the decision criteria below. - -As a general rule-of-thumb, we recommend using **the service task as the default option for synchronous _and_ asynchronous request/response** calls. The beauty of service tasks is that you remove visual clutter from the diagram, which makes it easier to read for most stakeholders. - -This is ideal if the business problem requires a logically synchronous service invocation. It allows you to ignore the technical details about the protocol on the process model level. - -The typical counter-argument is that asynchronous technical protocols might lead to different failure scenarios that you have to care about. For example, when using a separate receive task, readers of the diagram almost immediately start to think about what happens if the response will not be received. But this also has the drawback that now business people might start discussing technical concerns, which is not necessarily good. - -Furthermore, this is a questionable argument, as synchronous REST service calls could also timeout. This is exactly the same situation, just hidden deeper in network abstraction layers, as every form of remote communication uses asynchronous messaging somewhere down in the network stack. On a technical level, you should always think about these failure scenarios. The talk [3 common pitfalls in microservice integration and how to avoid them](https://berndruecker.io/3-pitfalls-in-microservice-integration/) goes into more detail on this. - -On a business level, you should be aware of the business implications of technical failures, but not discuss or model all the nuts and bolts around it. - -However, there are also technical implications of this design choice that need to be considered. - -**Technical implications of using service tasks** - -You can keep a service task open and just complete it later when the response arrives, but in **to complete the service task, you need the _job instance key_** from Zeebe. This is an internal ID from the workflow engine. You can either: - -- Pass it around to the third party service which sends it back as part of the response message. -- Build some kind of lookup table, where you map your own correlation information to the right job key. - -:::note -Later versions of Zeebe might provide query possibilities for this job key based on user controlled data, which might open up more possibilities. -::: - -Using workflow engine internal IDs can lead to problems. For example, you might cancel and restart a process instance because of operational failures, which can lead to a new ID. Outstanding responses cannot be correlated anymore in such instances. - -Or, you might run multiple workflow engines which can lead to internal IDs only being unique within one workflow engine. All of this might not happen, but the nature of an internal ID is that it is internal and you have no control over it — which bears some risk. - -In practice, however, using the internal job instance key is not a big problem if you get responses in very short time frames (milliseconds). Whenever you have more long-running interactions, you should consider using send and receive tasks, or build your own lookup table that can also address the problems mentioned above. - -This is also balanced by the fact that service tasks are simply very handy. The concept is by far the easiest way to implement asynchronous request/response communication. The job instance key is generated for you and unique for every message interchange. You don’t have to think about race conditions or idempotency constraints yourself. [Timeout handling and retry logic](/docs/components/concepts/job-workers#timeouts) is built into the service task implementation of Zeebe. There is also [a clear API to let the workflow engine know of technical or business errors](/docs/components/concepts/job-workers#completing-or-failing-jobs). - -**Technical implications of using send and receive tasks** - -Using send and receive tasks means to use [the message concept built into Zeebe](/docs/components/concepts/messages). This is a powerful concept to solve a lot of problems around cardinalities of subscriptions, correlation of the message to the right process instances, and verification of uniqueness of the message (idempotency). - -When using messages, you need to provide the correlation id yourself. This means that the correlation id is fully under your control, but it also means that you need to generate it yourself and make sure it is unique. You will most likely end up with generated UUIDs. - -You can leverage [message buffering](/docs/components/concepts/messages#message-buffering) capabilities, which means that the process does not yet need to be ready to receive the message. You could, for example, do other things in between, but this also means that you will not get an exception right away if a message cannot be correlated, as it is simply buffered. This leaves you in charge of dealing with messages that can never be delivered. - -Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. - -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/contact) to discuss such a scenario in more depth. - -**Summary And recommendations** - -The following table summarizes the possibilities and recommendations. - -| Case | Synchronous request/response | Synchronous request/response | Asynchronous request/response | Asynchronous request/response | -| :--------------------- | :-------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| BPMN element | Service task | Send task | Service task | Send + receive task | -| | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send task](/img/bpmn-elements/task-send.svg) | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send and receive task](/img/bpmn-elements/send-and-receive-task.png) | -| Technical implications | | Behaves like a service task | A unique correlation ID is generated for you. You don’t have to think about race conditions or idempotency. Timeout handling and retry logic are built-in. API to flag business or technical errors. | Correlation ID needs to be generated yourself, but is fully under control. Message buffering is possible but also necessary. Timeouts and retries need to be modeled. BPMN errors cannot be used. | -| Assessment | Very intuitive. | Might be more intuitive for fire and forget semantics, but can also lead to discussions. | Removes visual noise which helps stakeholders to concentrate on core business logic, but requires use of internal job instance keys. | More visual clutter, but also more powerful options around correlation and modeling patterns. | -| Recommendation | Default option, use unless it is confusing for business stakeholders (e.g. because of fire and forget semantics of a task). | Use for fire and forget semantics, unless it leads to unnecessary discussions, in this case use service task instead. | Use when response is within milliseconds and you can pass the Zeebe-internal job instance key around. | Use when the response will take time (> some seconds), or you need a correlation id you can control. | - -## Integrating services with BPMN events - -Instead of using send or receive **tasks**, you can also use send or receive **events** in BPMN. - -![Events vs tasks](service-integration-patterns-assets/events-vs-tasks.png) - -Let's first explore when you want to do that, and afterwards look into some more advanced patterns that become possible with events. - -### Tasks vs. events - -The **execution semantics of send and receive events is identical with send and receive tasks**, so you can express the very same thing with tasks or events. - -However, there is one small difference that might be relevant: **only tasks can have boundary events**, which allows to easily model when you want to cancel waiting for a message: - -![Boundary events](service-integration-patterns-assets/boundary-event.png) - -Despite this, the whole visual representation is of course different. In general, tasks are easier understood by most stakeholders, as they are used very often in BPMN models. - -However, in certain contexts, such as event-driven architectures, events might be better suited as the concept of events is very common. Especially, if you apply domain-driven design (DDD) and discuss domain events all day long, it might be intuitive that events are clearly visible in your BPMN models. - -Another situation better suited for events is if you send events to your internal reporting system besides doing “the real” business logic. Our experience shows that the smaller event symbols are often unconsciously treated as less important by readers of the model, leading to models that are easier to understand. - -| | Send task | Receive task | Send event | Receive event | -| :------------- | :----------------------- | :----------------------- | :---------------------------------------------------------------------------------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------- | -| Recommendation | Prefer tasks over events | Prefer tasks over events | Use only if you consistently use events over tasks and have a good reason for doing so (e.g. event-driven architecture) | Use only if you consistently use events over tasks and have a good reason for doing so (e.g. event-driven architecture) | - -:::note -The choice about events vs. commands also [needs to be reflected in the naming of the element](../../modeling/naming-bpmn-elements), as a task emphasizes the action (e.g. "wait for response") and the event reflects what happened (e.g. "response received"). -::: - -### Handling different response messages - -Very often the response payload of the message will be examined to determine how to move on in the process. - -![Gateway handling response](service-integration-patterns-assets/response-gateway.png) - -In this case, you receive exactly one type of message for the response. As an alternative, you could also use different message types, to which the process can react differently. For example, you might wait for the validation message, but also accept a cancellation or rejection message instead: - -![Boundary message event to capture different response messages](service-integration-patterns-assets/response-boundary-message-events.png) - -This modeling has the advantage that it is much easier to see the expected flow of the process (also called the happy path), with exceptions deviating from it. On the other hand, this pattern mixes receive tasks and events in one model, which can confuse readers. Keep in mind that it only works for a limited number of non-happy messages. - -To avoid the task/event mixture you could use a so-called event-based gateway instead, this gateway waits for one of a list of possible message types to be received: - -![Event based gateway to capture different response messages](service-integration-patterns-assets/response-event-based-gateway.png) - -We typically try to avoid the event-based gateway, as it is hard to understand for non-BPMN professionals. At the same time, it shares the downside of the first pattern with the decision gateway after the receive task: the happy path cannot be easily spotted. - -As a fourth possibility, you can add event subprocesses, which get activated whenever some event is received while the process is still active in some other area. In the above example, you could model the happy path and model all deviations as event subprocesses. - -![Event subprocess to capture different response messages](service-integration-patterns-assets/response-event-subprocess.png) - -This pattern is pretty handy, but also needs some explanation to people new to BPMN. It has one downside you need to know: once your process instance moves to the subprocess, you can’t easily go back to the normal flow. To some extent this problem can be solved by advanced modeling patterns like shown in the [allow for order cancellation anytime](../../modeling/building-flexibility-into-bpmn-models/#allow-for-order-cancellation-any-time) example. - -At the same time, the event subprocess has a superpower worth mentioning: you can now wait for cancellation messages in whole chunks of your process — it could arrive anytime. - -| | Receive task with boundary events | Payload and XOR-gateway | Event-based gateway | Event subprocess | -| ----------------- | ------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| | ![Boundary Events](service-integration-patterns-assets/response-boundary-message-events.png) | ![XOR Gateway](service-integration-patterns-assets/response-gateway.png) | ![Event-based Gateway](service-integration-patterns-assets/response-event-based-gateway.png) | ![Event Subprocess](service-integration-patterns-assets/response-event-subprocess.png) | -| Understandability | Easy | Very easy | Hard | Medium | -| Assessment | Limitation on how many message types are possible | Happy path not easily visible | | Might need some explanation for readers of the model | -| Recommendation | Use when it is important to see message types in the visual, limit to two boundary message events | Use when there are more response types or if the response type can be treated as a result | Try to avoid | Use if you need bigger scopes where you can react to events | - -### Message type on the wire != BPMN message type - -There is one important detail worth mentioning in the context of message response patterns: The message type used in BPMN models does not have to be exactly the message type you get on the wire. When you correlate technical messages, e.g. from AMQP, you typically write a piece of glue code that receives the message and calls the workflow engine API. This is described in [connecting the workflow engine with your world](../connecting-the-workflow-engine-with-your-world/), including a code example. In this glue code you can do various transformations, for example: - -- Messages on different message queues could lead to the same BPMN message type, probably having some additional parameter in the payload indicating the origin. -- Some message header or payload attributes could be used to select between different BPMN message types being used. - -It is probably not best practice to be as inconsistent as possible between technical message types and BPMN message types. Still, the flexibility of a custom mapping might be beneficial in some cases. - -## Hiding technical complexity behind call activities - -Whenever technical details of one service integration become complicated, you can think of creating a separate process model for the technicalities of the call and use a [call activity](/docs/components/modeler/bpmn/call-activities/) in the main process. - -An example is given in chapter 7 of [Practical Process Automation](https://processautomationbook.com/): - -![Hiding technical details behind call activity](service-integration-patterns-assets/hiding-technical-details-behind-call-activity.png) - -In the customer scenario, a document storage service was long-running, but could not do a real callback or response message for technical reasons (in short, firewall limitations). As a result, the document storage service needed to be regularly polled for the response. In the customer scenario, this was done by a "document storage adapter" process that leveraged workflow engine features to implement the polling every minute, and especially the persistent waiting in between. In the main business process, this technical adapter process was simply invoked via a call activity, meaning no technicalities bloated that diagram. diff --git a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/coverage.png b/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/coverage.png deleted file mode 100644 index ff6fa6582c5..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/coverage.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/img-src.pptx b/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/img-src.pptx deleted file mode 100644 index 3a2c617cd60..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/img-src.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/process-test-scope-example.png b/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/process-test-scope-example.png deleted file mode 100644 index 1c02046c8d8..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/process-test-scope-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/scopes.png b/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/scopes.png deleted file mode 100644 index b4c9210d5b9..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions-assets/scopes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions.md b/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions.md deleted file mode 100644 index f3f411ae8fe..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/testing-process-definitions.md +++ /dev/null @@ -1,573 +0,0 @@ ---- -title: "Testing process definitions" -tags: - - Test / Unit Test - - Test / Integration Test - - Mock - - Exception - - Java Delegate - - JUnit -description: "Test your executable BPMN processes as they are software. If possible, do automated unit tests with a fast in-memory workflow engine." ---- - -Test your executable BPMN processes as they are software. If possible, do automated unit tests with a fast in-memory workflow engine. Before releasing, verify with integration tests close to your real-life environment, which might include human-driven, exploratory integration tests. - -This best practice uses the following process example: - -
    - -1 - -New tweets need to be reviewed before publication. - -2 - -The tweeting employee is notified about rejected tweets. - -3 - -Approved tweets get published. - -4 - -Duplicate tweets are rejected by Twitter and dealt with by the original author (e.g. rephrased) just to be reviewed again. - -## Testing scopes - -There are basically three typical test scopes used when building process solutions: - -1. **Unit tests**: Testing glue code or programming code you developed for your process solution. How to unit test your software itself is not discussed here, as this is a common practice for software development. - -2. **Process tests**: Testing the expected behavior of the process model, including glue code and specifically the data flowing through the process model. Those tests should run frequently, so they should behave like unit tests (quick turnaround, no need for external resources, etc.) - -3. **Integration tests**: Testing the system in a close-to-real-life-environment to make sure it is really working. This is typically done before releasing a new version of your system. Those tests include _human-driven_, _exploratory_ tests. - -![Scopes](testing-process-definitions-assets/scopes.png) - -## Writing process tests in Java - -:::caution Camunda 8 only -This section targets Camunda 8. Refer to the specific Camunda 7 section below if you are looking for Camunda 7.x. -::: - -This section describes how to write process tests as unit tests in Java. We are working on more information on how to write tests in other languages, like Node.Js or C#. - -When using Java, most customers use Spring Boot. While this is a common setup for customers, it is not the only one. Find some more examples of Java process tests in the README.md of the [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test) project. - -### Technical setup using Spring - -:::caution JUnit 5 -You need to use JUnit 5. Ensure you use JUnit 5 in every test class: the `@Test` annotation you import needs to be `org.junit.jupiter.api.Test`. -::: - -1. Use [_JUnit 5_](http://junit.org) as unit test framework. -2. Use [spring-zeebe](https://github.com/camunda-community-hub/spring-zeebe). -3. Use `@ZeebeSpringTest` to ramp up an in-memory process engine. -4. Use annotations from [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/) to check whether your expectations about the state of the process are met. -5. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. - -A test can now look like the following example. The complete source code is available on [GitHub](https://github.com/camunda-community-hub/camunda-cloud-examples/blob/main/twitter-review-java-springboot/src/test/java/org/camunda/community/examples/twitter/TestTwitterProcess.java): - -```java -@ZeebeSpringTest -class TestTwitterProcess { - - @Autowired - private ZeebeClient zeebe; - - @MockBean - private TweetPublicationService tweetPublicationService; - - - @Test - void testTweetApproved() throws Exception { - // Prepare data input - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - // start a process instance - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - // And then retrieve the UserTask and complete it with 'approved = true' - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - // Now the process should run to the end - waitForProcessInstanceCompleted(processInstance); - - // Let's assert that it passed certain BPMN elements (more to show off features here) - assertThat(processInstance) - .hasPassedElement("end_event_tweet_published") - .hasNotPassedElement("end_event_tweet_rejected") - .isCompleted(); - - // And verify it caused the right side effects b calling the business methods - Mockito.verify(twitterService).tweet("Hello world"); - Mockito.verifyNoMoreInteractions(twitterService); - } -} -``` - -### Test scope and mocking - -In such a test case, you want to test the executable BPMN process definition, plus all the glue code which logically belongs to the process definition in a wider sense. Typical examples of glue code you want to include in a process test are: - -- Worker code, typically connected to a service task -- Expressions (FEEL) used in your process model for gateway decisions or input/output mappings -- Other glue code, for example, a REST API that does data mapping and delegates to the workflow engine - -In the example above, this is the worker code and the REST API: - -![Process test scope example](testing-process-definitions-assets/process-test-scope-example.png) - -Workflow engine-independent business code should _not_ be included in the tests. In the Twitter example, the `TwitterService` will be mocked, and the `TwitterWorker` will still read process variables and call this mock. This way, you can make test the process model, the glue code, and the data flow in your process test. - -The following code examples highlight the important aspects around mocking. - -The `PublishTweetWorker` is executed as part of the test. It does input data mapping **(1)** and also translates a specific business exception into a BPMN error **(2)**: - -```java -@Autowired -private TwitterService twitterService; - -@JobWorker( type = "publish-tweet") -public void handleTweet(@VariablesAsType TwitterProcessVariables variables) throws Exception { - try { - twitterService.tweet( - variables.getTweet() // 1 - ); - } catch (DuplicateTweetException ex) { // 2 - throw new ZeebeBpmnError("duplicateMessage", "Could not post tweet, it is a duplicate."); - } -} -``` - -The `TwitterService` is considered a business service (it could, for example, wrap the twitter4j API) and shall _not_ be executed during the test. This is why this interface is mocked: - -```java -@MockBean -private TwitterService tweetPublicationService; - -@Test -void testTweetApproved() throws Exception { - // ... - // Using Mockito you can make sure a business method was called with the expected parameter - Mockito.verify(tweetPublicationService).tweet("Hello world"); -} - -@Test -void testDuplicate() throws Exception { - // Using Mockito you can define what should happen if a method is called, in this case an exception is thrown to simulate a business error - Mockito.doThrow(new DuplicateTweetException("DUPLICATE")).when(tweetPublicationService).tweet(anyString()); - //... -``` - -### Drive the process and assert the state - -For tests, you drive the process from waitstate to waitstate and assert that you see the expected process and variable states. For example, you might implement a test to test the scneario that a tweet gets approved: - -```java -@Test -void testTweetApproved() throws Exception { - // Prepare data input - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - // start a process instance <1> - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - // And then retrieve the UserTask and complete it with 'approved = true' <2> - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - // Now the process should run to the end - waitForProcessInstanceCompleted(processInstance); - - // Let's assert that it passed certain BPMN elements (more to show off features here) <3> - assertThat(processInstance) - .hasPassedElement("end_event_tweet_published") - .hasNotPassedElement("end_event_tweet_rejected") - .isCompleted(); - - // And verify it caused the right side effects b calling the business methods <4> - Mockito.verify(twitterService).tweet("Hello world"); - Mockito.verifyNoMoreInteractions(twitterService); -} -``` - -1. Create a new process instance. You may want to use some glue code to start your process (e.g. the REST API facade), or also create helper methods within your test class. - -2. Drive the process to its next waitstate, e.g. by completing a waiting user task. You may extract boilerplate code into helper methods as shown below. - -3. Assert that your process is in the expected state. - -4. Verify with your mocking library that your business service methods were called as expected. - -This is the helper method used to verify the workflow engine arrived in a specific user task, and complete that task with passing on some variables. As you can see, [a user task behaves like a service task with the type `io.camunda.zeebe:userTask`](/docs/components/modeler/bpmn/user-tasks/): - -```java -public void waitForUserTaskAndComplete(String userTaskId, Map variables) { - // Let the workflow engine do whatever it needs to do - inMemoryEngine.waitForIdleState(); - - // Now get all user tasks - List jobs = zeebe.newActivateJobsCommand().jobType(USER_TASK_JOB_TYPE).maxJobsToActivate(1).send().join().getJobs(); - - // Should be only one - assertTrue(jobs.size()>0, "Job for user task '" + userTaskId + "' does not exist"); - ActivatedJob userTaskJob = jobs.get(0); - // Make sure it is the right one - if (userTaskId!=null) { - assertEquals(userTaskId, userTaskJob.getElementId()); - } - - // And complete it passing the variables - if (variables!=null) { - zeebe.newCompleteCommand(userTaskJob.getKey()).variables(variables).send().join(); - } else { - zeebe.newCompleteCommand(userTaskJob.getKey()).send().join(); - } -} -``` - -Be careful not to "overspecify" your test method by asserting too much. Your process definition will likely evolve in the future and such changes should break as little test code as possible, but just as much as necessary! - -As a rule of thumb _always_ assert that the expected _external effects_ of your process really took place (e.g. that business services were called as expected). Additionally, carefully choose which aspects of _internal process state_ are important enough so that you want your test method to warn about any related change later on. - -### Testing your process in chunks - -Divide and conquer by _testing your process in chunks_. Consider the important chunks and paths the Tweet Approval Process consists of: - -
    - -1 - -The _happy path_: The tweet just gets published. - -2 - -The tweet gets rejected. - -3 - -A duplicated tweet gets rejected by Twitter. - -#### Testing the happy path - -The happy path is kind of the default scenario with a positive outcome, so no exceptions or errors or deviations are experienced. - -Fully test the happy path in one (big) test method. This makes sure you have one consistent data flow in your process. Additionally, it is easy to read and to understand, making it a great starting point for new developers to understand your process and process test case. - -You were already exposed to the happy path in our example, which is the scenario that the tweet gets approved: - -```java -@Test -void testTweetApproved() throws Exception { - // Prepare data input - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - // start a process instance <1> - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - // And then retrieve the UserTask and complete it with 'approved = true' <2> - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - // Now the process should run to the end - waitForProcessInstanceCompleted(processInstance); - - // Let's assert that it passed certain BPMN elements (more to show off features here) <3> - assertThat(processInstance) - .hasPassedElement("end_event_tweet_published") - .hasNotPassedElement("end_event_tweet_rejected") - .isCompleted(); - - // And verify it caused the right side effects b calling the business methods <4> - Mockito.verify(twitterService).tweet("Hello world"); - Mockito.verifyNoMoreInteractions(twitterService); -} -``` - -#### Testing detours - -Test _forks/detours_ from the happy path as well as _errors/exceptional_ paths as chunks in separate test methods. This allows to unit test in meaningful units. - -The tests for the exceptional paths are basically very similar to the happy path in our example: - -```java -@Test -void testRejectionPath() throws Exception { - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setBoss("Zeebot"); - - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", false)); - - waitForProcessInstanceCompleted(processInstance); - waitForProcessInstanceHasPassedElement(processInstance, "end_event_tweet_rejected"); - Mockito.verify(twitterService, never()).tweet(anyString()); -} -``` - -and: - -```java -@Test -void testDuplicateTweet() throws Exception { - // throw exception simulating duplicateM - Mockito.doThrow(new DuplicateTweetException("DUPLICATE")).when(twitterService).tweet(anyString()); - - TwitterProcessVariables variables = new TwitterProcessVariables() - .setTweet("Hello world") - .setAuthor("bernd") - .setBoss("Zeebot"); - - ProcessInstanceEvent processInstance = zeebe.newCreateInstanceCommand() // - .bpmnProcessId("TwitterDemoProcess").latestVersion() // - .variables(variables) // - .send().join(); - - waitForUserTaskAndComplete("user_task_review_tweet", Collections.singletonMap("approved", true)); - - waitForProcessInstanceHasPassedElement(processInstance, "boundary_event_tweet_duplicated"); - // TODO: Add human task to test case - waitForUserTaskAndComplete("user_task_handle_duplicate", new HashMap<>()); -} -``` - - - -## Integration tests - -Test the process in a close-to-real-life environment. This verifies that it really works before releasing a new version of your process definition, which includes _human-driven_, _exploratory_ tests. - -Clearly _define your goals_ for integration tests! Goals could be: - -- End user & acceptance tests -- Complete end-to-end tests -- Performance & load tests, etc. - -Carefully consider _automating_ tests on scope 3. You need to look at the overall effort spent on writing test automation code and maintaining it when compared with executing human-driven tests for your software project's lifespan. The best choice depends very much on the frequency of regression test runs. - -Most effort is typically invested in setting up proper test data in surrounding systems. - -Configure your tests to be dedicated integration tests, and separate them from unit or process tests. - -You can use typical industry standard tools for integration testing together with Camunda. - -## Technical setup and example using Camunda 7 - -:::caution Camunda 7 only -This section targets Camunda 7.x only. Refer to the previous sections if you are using Camunda 8. -::: - -Camunda 7 also has support for writing tests in Java. This section gives you an example, the basic ideas of test scopes and testing in chunks are also valid with Camunda 7. - -The technical setup for Camunda 7: - -1. Use [_JUnit_](http://junit.org) as unit test framework. -2. Use Camunda's [JUnit Extension](https://github.com/camunda/camunda-bpm-platform/tree/7.17.0/test-utils/junit5-extension) to ramp up an in-memory process engine where the [JobExecutor](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.17/org/camunda/bpm/engine/test/Deployment.html) is turned off. -3. Use Camunda's [@Deployment](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.17/org/camunda/bpm/engine/test/Deployment.html) annotation to deploy and un-deploy one or more process definitions under test for a single test method. -4. Use [camunda-bpm-assert](http://github.com/camunda/camunda-bpm-assert) to easily check whether your expectations about the state of the process are met. -5. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. -6. Use Camunda's [MockExpressionManager](https://docs.camunda.org/javadoc/camunda-bpm-platform/7.17/org/camunda/bpm/engine/test/mock/MockExpressionManager.html) to resolve bean names used in your process definition without the need to ramp up the dependency injection framework (like CDI or Spring). -7. Use an [In-Memory H2 database](http://www.h2database.com/html/features.html#in_memory_databases) as default database to test processes on developer machines. If required, you can run the same tests on _multiple databases_, e.g. Oracle, DB2, or MS-SQL on a CI-Server. To achieve that, you can make use of (e.g. Maven) profiles and Java properties files for database configuration. - -Let's use the same example as above. - -A typical test case will look like this: - -```java -// ... -import static org.camunda.bpm.engine.test.assertions.ProcessEngineTests.*; // <4> -import static org.mockito.Mockito.*; // <5> - -@ExtendWith({ProcessEngineExtension.class, MockitoExtension.class}) // <1> <5> -class TwitterTest { - - @Mock // Mockito mock instantiated by MockitoExtension <5> - private TweetPublicationService tweetPublicationService; - - @BeforeEach - void setup() { - // ... - TweetPublicationDelegate tweetPublicationDelegate = new TweetPublicationDelegate(tweetPublicationService); - Mocks.register("tweetPublicationDelegate", tweetPublicationDelegate); // <6> - } - - @Test // <1> - @Deployment(resources = "twitter/TwitterDemoProcess.bpmn") // <3> - void testTweetApproved() { - // ... - } -// ... -} -``` - -The service task **Publish on Twitter** delegates to Java code: - -```xml - - -``` - -And this _Java delegate_ itself calls a business method: - -```java -@Named -public class TweetPublicationDelegate implements JavaDelegate { - - private final TweetPublicationService tweetPublicationService; - - @Inject - public TweetPublicationDelegate(TweetPublicationService tweetPublicationService) { - this.tweetPublicationService = tweetPublicationService; - } - - public void execute(DelegateExecution execution) throws Exception { - String tweet = new TwitterDemoProcessVariables(execution).getTweet(); // <1> - // ... - try { - tweetPublicationService.tweet(tweet); // <2> - } catch (DuplicateTweetException e) { - throw new BpmnError("duplicateMessage"); // <3> - } - } -// ... -``` - -The TweetPublicationService is mocked: - -```java -@Mock // 1 -private TweetPublicationService tweetPublicationService; - -@BeforeEach -void setup() { - // set up java delegate to use the mocked tweet service - TweetPublicationDelegate tweetPublicationDelegate = new TweetPublicationDelegate(tweetPublicationService); // <2> - // register a bean name with mock expression manager - Mocks.register("tweetPublicationDelegate", tweetPublicationDelegate); // <3> -} - -@AfterEach -void teardown() { - Mocks.reset(); // <3> -} -``` - -Now you can test the happy path to a published tweet: - -```java -@Test -@Deployment(resources = "twitter/TwitterDemoProcess.bpmn") -void testTweetApproved() { - // given - ProcessInstance processInstance = runtimeService().startProcessInstanceByKey( - "TwitterDemoProcess", - withVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET, TWEET)); // <1> - assertThat(processInstance).isStarted(); - // when - complete(task(), withVariables(TwitterDemoProcessConstants.VAR_NAME_APPROVED, true)); // <2> - // then - assertThat(processInstance) // <3> - .hasPassed("end_event_tweet_published") - .hasNotPassed("end_event_tweet_rejected") - .isEnded(); - verify(tweetPublicationService).tweet(TWEET); // <4> - verifyNoMoreInteractions(tweetPublicationService); -} -``` - -As a next step, you might want to test the path where a tweet gets rejected. You don't have to start at the start event, but can start anywhere in your process: - -```java -@Test -@Deployment(resources = "twitter/TwitterDemoProcess.bpmn") -void testTweetRejected() { - - // create a process instance directly at the point at which a tweet was rejected - ProcessInstance processInstance = runtimeService() - .createProcessInstanceByKey("TwitterDemoProcess") - .startBeforeActivity("service_task_publish_on_twitter") - .setVariables(variables) - .execute(); - assertThat(processInstance) - .isStarted() - .hasPassed("service_task_publish_on_twitter") - .hasVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET); - - // when - complete(task(), withVariables(TwitterDemoProcessConstants.VAR_NAME_APPROVED, false)); // <2> - - // then - assertThat(processInstance) - .hasPassed("end_event_tweet_rejected") - .hasNotPassed("end_event_tweet_published") - .isEnded(); - verifyZeroInteractions(tweetPublicationService); -} -``` - -You could also implement another `testTweetDuplicated()` to verify the logic in case a tweet turns out to be a duplicate and is rejected by Twitter. For this case, we attached an error event to the service task **Publish on Twitter**. In the BPMN XML we see an error event defined with an errorCode `duplicateMessage`. - -```xml - - - - -``` - -Above, we already saw the Java delegate code throwing the BPMN error exception with that code `duplicateMessage`. Here is the method testing for the case a tweet is duplicated: - -```java -@Test -@Deployment(resources = "twitter/TwitterDemoProcess.bpmn") -void testTweetDuplicated() { - // given - doThrow(new DuplicateTweetException()) // <1> - .when(tweetPublicationService).tweet(anyString()); - // when - ProcessInstance processInstance = rejectedTweet(withVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET, TWEET)); // <2> - // then - assertThat(processInstance) // <3> - .hasPassed("boundary_event_tweet_duplicated") - .hasNotPassed("end_event_tweet_rejected").hasNotPassed("end_event_tweet_published") - .isWaitingAt("user_task_handle_duplicate"); - verify(tweetPublicationService).tweet(TWEET); // <4> - verifyNoMoreInteractions(tweetPublicationService); - // when - complete(task()); // <5> - // then - assertThat(processInstance) // <6> - .isWaitingAt("user_task_review_tweet") - .hasVariables(TwitterDemoProcessConstants.VAR_NAME_TWEET) - .task().isAssignedTo("demo"); -} -``` diff --git a/versioned_docs/version-8.2/components/best-practices/development/understanding-transaction-handling-c7-assets/rollback.png b/versioned_docs/version-8.2/components/best-practices/development/understanding-transaction-handling-c7-assets/rollback.png deleted file mode 100644 index 5c9d44d0cca..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/understanding-transaction-handling-c7-assets/rollback.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/understanding-transaction-handling-c7.md b/versioned_docs/version-8.2/components/best-practices/development/understanding-transaction-handling-c7.md deleted file mode 100644 index b05d462ebfe..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/understanding-transaction-handling-c7.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Understanding Camunda 7 transaction handling -tags: - - Transaction - - ACID Transaction - - Incident - - Save Point -description: "Try to carefully study and fully understand the concepts of wait states (save points) acting as transaction boundaries for technical (ACID) transactions." ---- - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! Zeebe, the workflow engine used in Camunda 8, as a very different transactional behavior, please visit [dealing with problems and exceptions](../dealing-with-problems-and-exceptions/). -::: - -Try to carefully study and fully understand the concepts of wait states (save points) acting as _transaction boundaries_ for technical (ACID) transactions. In case of technical failures, they are by default rolled back and need to be retried either by the user or the background job executor. - -## Understanding technical (ACID) transactions in Camunda 7 - -Every time we use the Camunda 7 API to ask the workflow engine to do something (like e.g. starting a process, completing a task, signaling an execution), the engine will advance in the process until it reaches _wait states_ on each active path of execution, which can be: - -
    - -1 - -_User tasks_ and _receive tasks_ - -2 - -All _intermediate catching events_ - -3 - -The _event based gateway_, which offers the possibility of reacting to one of multiple intermediate catching events - -4 - -Several further task types (_service_, _send_, _business rule_ tasks) - -5 - -[External Tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) are wait states, too. In this case, the _throwing message events_ might be implemented as external task. - -At a wait state, any further process execution must wait for some trigger. Wait states will therefore always be persisted to the database. The design of the workflow engine is, that within a _single database transaction_, the process engine will cover the distance from one persisted wait states to the next. However, you have fine grained control over these transaction boundaries by introducing additional _save points_ using the [`async before` and `async after` attributes](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#configure-asynchronous-continuations). A background job executor will then make sure that the process _continues asynchronously_. - -Learn more about [transactions in processes](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/) in general and [asynchronous continuations](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#why-asynchronous-continuations) in the user guide. - -:::note Technical vs. business transactions -Sometimes when we refer to "transactions" in processes, we refer to a very different concept, which must be clearly distinguished from technical database transactions. A _business transaction_ marks a section in a process for which 'all or nothing' semantics apply, but from a pure business perspective. This is described in [dealing with problems and exceptions](../dealing-with-problems-and-exceptions/). -::: - -## Controlling transaction boundaries - -### Using additional save points - -You have fine grained control over transaction boundaries by introducing _save points_ additionally to [wait states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states), that are always a save point. Use the `asyncBefore='true'` and `asyncAfter='true'` attributes in your process definition BPMN XML. The process state will then be persisted at these points and a background job executor will make sure that it is continued asynchronously. - -
    - -1 - -A user task is an _obligatory wait state_ for the process engine. After the creation of the user task, the process state will be persisted and committed to the database. The engine will wait for user interaction. - -2 - -This service task is executed _"synchronously"_ (by default), in other words within the same thread and the same database transaction with which a user attempts to complete the "Write tweet" user task. When we assume that this service fails in cases in which the language used is deemed to be too explicit, the database transaction rolls back and the user task will therefore remain uncompleted. The user must re-attempt, e.g. by correcting the tweet. - -3 - -This service task is executed _"asynchronously"_. By setting the `asyncBefore='true'` attribute we introduce an additional save point at which the process state will be persisted and committed to the database. A separate job executor thread will continue the process asynchronously by using a separate database transaction. In case this transaction fails the service task will be retried and eventually marked as failed - in order to be dealt with by a human operator. - -Pay special attention to the consequence of these save points with regards to retrying. A retry for a job may be required if there are _any failures_ during the transaction which follows the save point represented by the job. Depending on your subsequent transaction boundaries this may very well be much more than just the service task which you configured to be `asyncBefore='true'`! The process instance will always roll back to its last known save point, as discussed later. - -### Marking every service task as asynchronous - -A typical _rule of thumb_, especially when doing a lot of service orchestration, is to _mark every service task_ being _asynchronous_. - -
    - -The downside is that the jobs slightly increase the overall resource consumption. But this is often worth it, as it has a couple of advantages for operations: - -- The process stops at the service task causing the specific error. -- You can configure a meaningful retry strategy for every service task. -- You can leverage the suspension features for service tasks. - -While it is not directly configurable to change Camunda 7's _default_ "async" behavior for all service tasks at once, you can achieve that by implementing a custom [ProcessEnginePlugin](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-plugins/) introducing a [BpmnParseListener](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/impl/bpmn/parser/BpmnParseListener.html) which adds async flags on-the-fly (eventually combined with custom [BPMN extension attributes](https://docs.camunda.org/manual/latest/user-guide/model-api/bpmn-model-api/extension-elements/) to control this behavior). You can find a [code example](https://github.com/camunda/camunda-bpm-examples/tree/master/process-engine-plugin/bpmn-parse-listener) for a similar scenario on GitHub. - -### Knowing typical do's and don'ts for save points - -Aside a general strategy to mark service tasks as being save points you will often want to _configure typical save points_. - -**Do** configure a savepoint **after** - -- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. - -- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. - -- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. - -- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. - -**Do** configure a savepoint **before** - -- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. - -- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will see the process instance waiting in the corresponding service task in cockpit. - -- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. - -The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. - -**Don't** configure save points **before** - -- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. - -- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. - -### Adding save points automatically to every model - -If you agree on certain save points to be important in all your process definitions, you can _add required BPMN XML attributes automatically_ by a [Process Engine Plugin](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-plugins/) during deployment. Then you don't have to add this configuration to each and every process definition yourself. - -As a weaker alternative the plugin could check for existance of correct configuration and _log warnings or errors if save points are missing_. - -Take a look at [this example](https://github.com/camunda/camunda-consulting/tree/master/snippets/engine-plugin-add-save-points) for details. - -## Thinking about operations during modeling - -Make sure you also understand how to [operate Camunda 7](../../operations/operating-camunda-c7) - in particular by understanding _retry behavior_ and _incident management_ for service tasks. - -## Rolling back a transaction on unhandled errors - -It is important to understand that every _non-handled, propagated exception_ happening during process execution rolls back the current technical transaction. Therefore the process instance will find its last known _wait state_ (or save point). The following image visualizes that default behavior. - -![Rollback](understanding-transaction-handling-c7-assets/rollback.png) - -1 - -When we ask the Camunda engine to complete a task ... - -2 - -... it tries to advance the process within the borders of a technical transaction until it reaches wait states (or save points) again. - -3 - -However, in cases where a non-handled exception occurs on the way, this transaction is rolled back and we find the user task we tried to complete to be still uncompleted. - -From the perspective of a user trying to complete the task, it appears _impossible_ to complete the task, because a subsequent service throws an exception. This can be unfortunate, and so you very well may want to introduce additional save points, e.g. here before the send task. - -```xml - -``` - -But hindering the user to complete the user task can also be just what you want. Consider e.g. the possibility to _validate task form input_ via a subsequent service: - -
    - -1 - -A user needs to provide data with a _user task_ form. When trying to complete the form ... - -2 - -... the subsequent synchronously executed _service task_ finds a validation problem and throws an exception which rolls back the transaction and leaves the user task uncompleted. - -Learn more about [rollback on exceptions](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#rollback-on-exception) and the reasoning for this design in the User Guide. - -## Handling exceptions via the process - -As an alternative to rolling back transactions, you can also handle those exceptions within the process, see [dealing with problems and exceptions](./dealing-with-problems-and-exceptions.md#handling-errors-on-the-process-level) for details. - -Just be aware of the following technical constraint: in case your transaction manager marks the current transaction _for rollback_ (as possible in Java transaction managers), handling the exception by a processis not possible as the workflow engine cannot commit its work in this transaction. diff --git a/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers-assets/order-fulfillment-process.png b/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers-assets/order-fulfillment-process.png deleted file mode 100644 index 53577a2431f..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers-assets/order-fulfillment-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers-assets/process-solution.png b/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers-assets/process-solution.png deleted file mode 100644 index f35b1784820..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers-assets/process-solution.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers.md b/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers.md deleted file mode 100644 index 5254dbb54bc..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/development/writing-good-workers.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: "Writing good workers" -description: "Service tasks within Camunda 8 require you to set a task type and implement job workers who perform whatever needs to be performed." ---- - -[Service tasks](/docs/components/modeler/bpmn/service-tasks/) within Camunda 8 require you to set a task type and implement [job workers](/docs/components/concepts/job-workers) who perform whatever needs to be performed. This describes that you might want to: - -1. Write all glue code in one application, separating different classes or functions for the different task types. -2. Think about idempotency and read or write as little data as possible from/to the process. -3. Write non-blocking (reactive, async) code for your workers if you need to parallelize work. Use blocking code only for use cases where all work can be executed in a serialized manner. Don’t think about configuring thread pools yourself. - -## Organizing glue code and workers in process solutions - -Assume the following order fulfillment process, that needs to invoke three synchronous REST calls to the responsible systems (payment, inventory, and shipping) via custom glue code: - -![order fulfillment example](writing-good-workers-assets/order-fulfillment-process.png) - -Should you create three different applications with a worker for one task type each, or would it be better to process all task types within one application? - -As a rule of thumb, we recommend implementing **all glue code in one application**, which then is the so-called **process solution** (as described in [Practical Process Automation](https://processautomationbook.com/)). This process solution might also include the BPMN process model itself, deployed during startup. Thus, you create a self-contained application that is easy to version, test, integrate, and deploy. - -![Process solution](writing-good-workers-assets/process-solution.png) -Figure taken from [Practical Process Automation](https://processautomationbook.com/) - -Thinking of Java, the three REST invocations might live in three classes within the same package (showing only two for brevity): - -```java -public class RetrieveMoneyWorker { - @JobWorker(type = "retrieveMoney", autoComplete = false) - public void retrieveMoney(final JobClient client, final ActivatedJob job) { - // ... code - } -} -``` - -```java -public class FetchGoodsWorker { - @JobWorker(type = "fetchGoods", autoComplete = false) - public void fetchGoods(final JobClient client, final ActivatedJob job) { - // ... code - } -} -``` - -You can also pull the glue code for all task types into one class. Technically, it does not make any difference and some people find that structure in their code easier. If in doubt, the default is to create one class per task type. - -There are exceptions when you might not want to have all glue code within one application: - -1. You need to specifically control the load for one task type, like _scaling it out_ or _throttling it_. For example, if one service task is doing PDF generation, which is compute-intensive, you might need to scale it much more than all other glue code. On the other hand, it could also mean limiting the number of parallel generation jobs due to licensing limitations of your third-party PDF generation library. -2. You want to write glue code in different programming languages, for example, because writing specific logic in a specific language is much easier (like using Python for certain AI calculations or Java for certain mainframe integrations). - -In this case, you would spread your workers into different applications. Most often, you might still have a main process solution that will also still deploy the process model. Only specific workers are carved out. - -## Thinking about transactions, exceptions and idempotency of workers - -Make sure to visit [Dealing With Problems and Exceptions](../dealing-with-problems-and-exceptions/) to gain a better understanding how workers deal with transactions and exceptions to the happy path. - -## Data minimization in workers - -If performance or efficiency matters in your scenario, there are two rules about data in your workers you should be aware of: - -1. Minimize what data you read for your job. In your job client, you can define which process variables you will need in your worker, and only these will be read and transferred, saving resources on the broker as well as network bandwidth. -2. Minimize what data you write on job completion. You should explicitly not transmit the input variables of a job upon completion, which might happen easily if you simply reuse the map of variables you received as input for submitting the result. - -Not transmitting all variables saves resources and bandwidth, but serves another purpose as well: upon job completion, these variables are written to the process and might overwrite existing variables. If you have parallel paths in your process (e.g. [parallel gateway](/docs/components/modeler/bpmn//parallel-gateways/), [multiple instance](/docs/components/modeler/bpmn/multi-instance/)) this can lead to race conditions that you need to think about. The less data you write, the smaller the problem. - -## Scaling workers - -If you need to process a lot of jobs, you need to think about optimizing your workers. - -Workers can control the number of jobs retrieved at once. In a busy system it makes sense to not only request one job, but probably 20 or even up to 50 jobs in one remote request to the workflow engine, and then start working on them locally. In a lesser utilized system, long polling is used to avoid delays when a job comes in. Long polling means the client’s request to fetch jobs is blocked until a job is received (or some timeout hits). Therefore, the client does not constantly need to ask. - -You will have jobs in your local application that need to be processed. The worst case in terms of scalability is that you process the jobs sequentially one after the other. While this sounds bad, it is still a valid approach for many use cases, as most projects do not need any parallel processing in the worker code as they simply do not care whether a job is executed a second earlier or later. Think of a business process that is executed only some hundred times per day and includes mostly human tasks — a sequential worker is totally sufficient. In this case, you can skip this paragraph section. - -However, you might need to do better and process jobs in parallel and utilize the full power of your worker’s CPUs. In such a case, you should read on and understand the difference between writing blocking and non-blocking code. - -### Blocking / synchronous code and thread pools - -With blocking code a thread needs to wait (is blocked) until something finishes before it can move on. In the above example, making a REST call requires the client to wait for IO — the response. The CPU cannot compute anything during this time period, however, the thread cannot do anything else. - -Assume that your worker shall invoke 20 REST requests, each taking around 100ms, this will take 2s in total to process. Your throughput can’t go beyond 10 jobs per second with one thread. - -A common approach to scaling throughput beyond this limit is to leverage a thread pool. This works as blocked threads are not actively consuming CPU cores, so you can run more threads than CPU cores — since they are only waiting for I/O most of the time. In the above example with 100ms latency of REST calls, having a thread pool of 10 threads increases throughput to 100 jobs/second. - -The downside of using thread pools is that you need to have a good understanding of your code, thread pools in general, and the concrete libraries being used. Typically, we do not recommend configuring thread pools yourself. If you need to scale beyond the linear execution of jobs, leverage reactive programming. - -### Non-blocking / reactive code - -Reactive programming uses a different approach to achieve parallel work: extract the waiting part from your code. - -With a reactive HTTP client you will write code to issue the REST request, but then not block for the response. Instead, you define a callback as to what happens if the request returns. Most of you know this from JavaScript programming. Thus, the runtime can optimize the utilization of threads itself, without you the developer even knowing. - -### Recommendation - -In general, using reactive programming is favorable in most situations where parallel processing is important. However, we sometimes see a lack of understanding and adoption in developer communities, which might hinder adoption in your environment. - -## Client library examples - -Let’s go through a few code examples using Java, Node.js, and C#, using the corresponding client libraries. All [code is available on GitHub](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution) and a [walk through recording is available on YouTube](https://youtu.be/ZHKz9l5yG3Q). - -### Java - -Using the [Java Client](https://github.com/camunda/camunda-platform-get-started/tree/master/java) you can write worker code like this: - -```java -client.newWorker().jobType("retrieveMoney") - .handler((jobClient, job) -> { - //... - }).open(); -``` - -The [Spring integration](https://github.com/zeebe-io/spring-zeebe/) provides a more elegant way of writing this, but also [uses a normal worker from the Java client](https://github.com/zeebe-io/spring-zeebe/blob/master/client/spring-zeebe/src/main/java/io/camunda/zeebe/spring/client/config/processor/ZeebeWorkerPostProcessor.java#L56) underneath. In this case, your code might look like this: - -```java -@JobWorker(type = "retrieveMoney", autoComplete = false) -public void retrieveMoney(final JobClient client, final ActivatedJob job) { - //... -} -``` - -In the background, a worker starts a polling component and [a thread pool](https://github.com/camunda-cloud/zeebe/blob/d24b31493b8e22ad3405ee183adfd5a546b7742e/clients/java/src/main/java/io/camunda/zeebe/client/impl/ZeebeClientImpl.java#L179-L183) to [handle the polled jobs](https://github.com/camunda/camunda/blob/stable/8.2/clients/java/src/main/java/io/camunda/zeebe/client/impl/worker/JobPoller.java#L109-L111). The [**default thread pool size is one**](https://github.com/camunda-cloud/zeebe/blob/760074f59bc1bcfb483fab4645501430f362a475/clients/java/src/main/java/io/camunda/zeebe/client/impl/ZeebeClientBuilderImpl.java#L49). If you need more, you can enable a thread pool: - -```java -ZeebeClient client = ZeebeClient.newClientBuilder() - .numJobWorkerExecutionThreads(5) - .build(); -``` - -Or, in Spring Zeebe: - -```properties -zeebe.client.worker.threads=5 -``` - -Now, you can **leverage blocking code** for your REST call, for example, the `RestTemplate` inside Spring: - -```java -@JobWorker(type = "rest", autoComplete = false) -public void blockingRestCall(final JobClient client, final ActivatedJob job) { - LOGGER.info("Invoke REST call..."); - String response = restTemplate.getForObject( // <-- blocking call - PAYMENT_URL, String.class); - LOGGER.info("...finished. Complete Job..."); - client.newCompleteCommand(job.getKey()).send() - .join(); // <-- this blocks to wait for the response - LOGGER.info(counter.inc()); -} -``` - -Doing so **limits** the degree of parallelism to the number of threads you have configured. You can [observe in the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/java-blocking-thread-1.log) that jobs are executed sequentially when running with one thread ([the code is available on GitHub)](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/java-worker/src/main/java/io/berndruecker/experiments/cloudclient/java/RestInvocationWorker.java): - -``` -10:57:00.258 [pool-4-thread-1] Invoke REST call… -10:57:00.258 [ault-executor-0] Activated 32 jobs for worker default and job type rest -10:57:00.398 [pool-4-thread-1] …finished. Complete Job… -10:57:00.446 [pool-4-thread-1] …completed (1). Current throughput (jobs/s ): 1 -10:57:00.446 [pool-4-thread-1] Invoke REST call… -10:57:00.562 [pool-4-thread-1] …finished. Complete Job… -10:57:00.648 [pool-4-thread-1] …completed (2). Current throughput (jobs/s ): 2 -10:57:00.648 [pool-4-thread-1] Invoke REST call… -10:57:00.764 [pool-4-thread-1] …finished. Complete Job…10:57:00.805 [pool-4-thread-1] …completed (3). Current throughput (jobs/s ): 3 -``` - -If you experience a large number of jobs, and these jobs are waiting for IO the whole time — as REST calls do — you should think about using **reactive programming**. For the REST call, this means for example the Spring WebClient: - -```java -@JobWorker(type = "rest", autoComplete = false) -public void nonBlockingRestCall(final JobClient client, final ActivatedJob job) { - LOGGER.info("Invoke REST call..."); - Flux paymentResponseFlux = WebClient.create() - .get().uri(PAYMENT_URL).retrieve() - .bodyToFlux(String.class); - - // non-blocking, so we register the callbacks (for happy and exceptional case) - paymentResponseFlux.subscribe( - response -> { - LOGGER.info("...finished. Complete Job..."); - client.newCompleteCommand(job.getKey()).send() - // non-blocking, so we register the callbacks (for happy and exceptional case) - .thenApply(jobResponse -> { LOGGER.info(counter.inc()); return jobResponse;}) - .exceptionally(t -> {throw new RuntimeException("Could not complete job: " + t.getMessage(), t);}); - }, - exception -> { - LOGGER.info("...REST invocation problem: " + exception.getMessage()); - client.newFailCommand(job.getKey()) - .retries(1) - .errorMessage("Could not invoke REST API: " + exception.getMessage()).send() - .exceptionally(t -> {throw new RuntimeException("Could not fail job: " + t.getMessage(), t);}); - } - ); -} -``` - -This code uses the reactive approach to use the Zeebe API: - -``` -client.newCompleteCommand(job.getKey()).send() - .thenApply(jobResponse -> { - counter.inc(); - return jobResponse; - }) - .exceptionally(t -> { - throw new RuntimeException("Could not complete job: " + t.getMessage(), t); - }); -``` - -With this reactive glue code, you don’t need to worry about thread pools in the workers anymore, as this is handled under the hood from the frameworks or the Java runtime. [You can see in the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/java-nonblocking.log) that many jobs are now executed in parallel — and even by the same thread in a loop within milliseconds. - -``` -10:54:07.105 [pool-4-thread-1] Invoke REST call… -[…] 30–40 times! -10:54:07.421 [pool-4-thread-1] Invoke REST call… -10:54:07.451 [ctor-http-nio-3] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-7] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-2] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-5] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-1] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-6] …finished. Complete Job… -10:54:07.451 [ctor-http-nio-4] …finished. Complete Job… -[…] -10:54:08.090 [pool-4-thread-1] Invoke REST call… -10:54:08.091 [pool-4-thread-1] Invoke REST call… -[…] -10:54:08.167 [ault-executor-2] …completed (56). Current throughput (jobs/s ): 56, Max: 56 -10:54:08.167 [ault-executor-1] …completed (54). Current throughput (jobs/s ): 54, Max: 54 -10:54:08.167 [ault-executor-0] …completed (55). Current throughput (jobs/s ): 55, Max: 55 -``` - -These observations yield the following recommendations: - -| | Blocking Code | Reactive Code | -| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | -| Parallelism | Some parallelism is possibly by a thread pool, which is used by the client library. The default thread pool size is one, which needs to be adjusted in the config in order to scale. | A processing loop combined with an internal thread pool, both are details of the framework and runtime platform. | -| **Use when** | You don't have requirements to process jobs in parallel | You need to scale and have IO-intensive glue code (e.g. remote service calls like REST) | -| | Your developers are not familiar with reactive programming | This should be the **default** if your developer are familiar with reactive programming. | - -### Node.js client - -Using the [Node.js client](https://github.com/camunda/camunda-platform-get-started/tree/master/nodejs), your worker code will look like this, assuming that you use Axios to do rest calls (but of course any other library is fine as well): - -```js -zbc.createWorker({ - taskType: "rest", - taskHandler: (job, _, worker) => { - console.log("Invoke REST call..."); - axios - .get(PAYMENT_URL) - .then((response) => { - console.log("...finished. Complete Job..."); - job.complete().then((result) => { - incCounter(); - }); - }) - .catch((error) => { - job.fail("Could not invoke REST API: " + error.message); - }); - }, -}); -``` - -This is **reactive code**. And a really interesting observation is that reactive programming is so deep in the JavaScript language that it is impossible to write blocking code, even code that looks blocking is still [executed in a non-blocking fashion](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/nodejs-blocking.log). - -Node.js code scales pretty well and there is no specific thread pool defined or necessary. The Camunda 8 Node.js client library also [uses reactive programming internally](https://github.com/camunda-community-hub/zeebe-client-node-js/blob/master/src/zb/ZBWorker.ts#L28). - -This makes the recommendation very straight-forward: - -| | Reactive code | -| ------------ | ------------------------------ | -| Parallelism | Event loop provided by Node.js | -| **Use when** | Always | - -### C# - -Using the [C# client](https://github.com/camunda/camunda-platform-get-started/tree/master/csharp), you can write worker code like this: - -```csharp -zeebeClient.NewWorker() - .JobType("payment") - .Handler(JobHandler) - .HandlerThreads(3) - .Name("MyPaymentWorker") - .Open() -``` - -You can see that you can set a number of handler threads. Interestingly, this is a naming legacy. The C# client uses the [Dataflow Task Parallel Library (TPL)](https://docs.microsoft.com/en-us/dotnet/standard/parallel-programming/dataflow-task-parallel-library) to implement parallelism, so the thread count configures the degree of parallelism allowed to TPL in reality. Internally, this is implemented as a mixture of event loop and threading, which is an implementation detail of TPL. This is a great foundation to scale the worker. - -You need to provide a handler. For this handler, you have to make sure to write non-blocking code; the following example shows this for a REST call using the [HttpClient](https://docs.microsoft.com/en-us/dotnet/api/system.net.http.httpclient?view=net-5.0) library: - -```csharp -private static async void NonBlockingJobHandler(IJobClient jobClient, IJob activatedJob) -{ - Log.LogInformation("Invoke REST call..."); - var response = await httpClient.GetAsync("/"); - Log.LogInformation("...finished. Complete Job..."); - var result = await jobClient.NewCompleteJobCommand(activatedJob).Send(); - counter.inc(); -} -``` - -The code is executed in parallel, [as you can see in the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/dotnet-nonblocking.log). Interestingly, the following code runs even faster for me, but [that’s a topic for another discussion](https://stackoverflow.com/questions/21403023/performance-of-task-continuewith-in-non-async-method-vs-using-async-await): - -```csharp -private static void NonBlockingJobHandler(IJobClient jobClient, IJob activatedJob) -{ - Log.LogInformation("Invoke REST call..."); - var response = httpClient.GetAsync("/").ContinueWith( response => { - Log.LogInformation("...finished. Complete Job..."); - jobClient.NewCompleteJobCommand(activatedJob).Send().ContinueWith( result => { - if (result.Exception==null) { - counter.inc(); - } else { - Log.LogInformation("...could not do REST call because of: " + result.Exception); - } - }); - }); -} -``` - -In contrast to Node.js, you can also write **blocking code** in C# if you want to (or more probable: it happens by accident): - -```csharp -private static async void BlockingJobHandler(IJobClient jobClient, IJob activatedJob) -{ - Log.LogInformation("Invoke REST call..."); - var response = httpClient.GetAsync("/").Result; - Log.LogInformation("...finished. Complete Job..."); - await jobClient.NewCompleteJobCommand(activatedJob).Send(); - counter.inc(); -} -``` - -The degree of parallelism is down to one again, [according to the logs](https://github.com/berndruecker/camunda-cloud-clients-parallel-job-execution/blob/main/results/dotnet-blocking-thread-1.log). So C# is comparable to Java, just that the typically used C# libraries are reactive by default, whereas Java still knows just too many blocking libraries. The recommendations for C#: - -| | Blocking code | Reactive code | -| ------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | -| Parallelism | Some parallelism is possibly by a thread pool, which is used by the client library. | A processing loop combined with an internal thread pool, both are details of the framework and runtime platform. | -| **Use when** | **Rarely**, and only if you don't have requirements to process jobs in parallel or might even want to reduce the level or parallelism. | This should be the **default** | -| | Your developers are not familiar with reactive programming | You need to scale and have IO-intensive glue code (e.g. remote service calls like REST) | diff --git a/versioned_docs/version-8.2/components/best-practices/management/doing-a-proper-poc.md b/versioned_docs/version-8.2/components/best-practices/management/doing-a-proper-poc.md deleted file mode 100644 index 6c9eaa98abb..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/management/doing-a-proper-poc.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Doing a proper POC -tags: - - POC -description: "When evaluating your process automation approach, a proof of concept helps check if the process automation method and Camunda technology suit your needs." ---- - -When evaluating your process automation approach, a **proof of concept (POC)** is often a good step to check if the process automation methodology, the standards of BPMN and DMN, as well as the Camunda technology suit your needs. It is vital for a POC to make up your mind about your goals, to select a suitable process, and to prepare it and carry it out properly. - -## Understanding POC - -With a POC, you create a prototype application within no more than _three to five days_. The result of a POC is intended to be thrown away after having served its purpose: to try and show that your project will "fly" - including all aspects relevant for your specific situation. Such aspects might be: - -- Does Camunda _fit into your own architecture_? -- Does the _development approach_ fit into your own organization's approaches? -- How can you _model_ a specific business domain problem? -- Which kind of _know how_ is needed for the business and development teams? -- Which _effort_ will typically be needed for these kinds of projects? -- What are the impacts of process applications for _operations_? - -Often, it does make sense to implement such a POC together with Camunda, our partners, or specialized consultants to get quick results and focused feedback with respect to your specific challenges. However, you should always at least _co-develop_ the POC yourself to really understand what is going on. A team size of two to four people has proven to be quite optimal. - -## Defining and focusing on specific goals - -Before planning and carrying out a POC, you should consciously clarify the specific goals you want the POC to achieve. Typical goals might be: - -- To _verify_ the approach or the tool works under specific circumstances. -- To _show_ a case that _convinces_ internal stakeholders that the approach makes sense. -- To work through a complete _example_ and get specific _questions_ sorted out. -- To _learn_ about Camunda and _understand_ how it works. - -:::note -When selecting your goal, keep in mind the needs of all relevant stakeholders. -::: - -Do not just "collect" goals here, but try to make up your mind as to what really matters. Often, it is better to make a clear choice. For example, whether to show off a nice user interface at the end of the week or to have time to clarify all questions and to understand Camunda in depth, maybe even only using unit tests. - -## Defining a scope relevant to your business - -Select a _useful_ and _suitable_ process, case, or decision given your goals. - -Typically, it should... - -- Be _relevant_ to your _core business_ stakeholders. -- Make your organization's _return on investment_ on BPM more transparent. -- Be _feasible_ within the POC time box. - -Avoid political mine fields when selecting the process for your POC. - -## Planning the POC - -### Involving the right people - -It does make sense to implement a POC together with the software vendor and/or specialized consultants to get _quick results_ and _focused feedback_ with respect to your specific challenges. However, you should always at least _co-develop_ the POC to really understand what is going on. - -When planning for your team, consider that successful process modeling requires not just knowledge about the business and the targeted technical solution, but experience with BPMN modeling and methodology as well as analytical and moderation skills. We therefore typically bring together _business people_ with _IT staff_ and internal _business analysts_, _train them properly_ and let them continue to _learn on the job_ by carrying out the POC together with an _experienced consultant_. A team size of up to a maximum of _four people_ has proven to be quite optimal. - -In case you want to access _system interfaces_ during your POC, also determine who will be a technically knowledgeable and available _contact person_ for that system. To integrate into existing _user interfaces_, you might need help from colleagues within your organization. - -Define a _moderator_ to avoid too many detours and keep your POC on track. - -### Planning the technical environment - -:::caution Camunda 8 -This best practice targets Camunda 8. If you want to run a POC with Camunda 7, visit [deciding about your Camunda 7 stack](../../architecture/deciding-about-your-stack-c7/). -::: - -Make the necessary technological choices. Typically, POCs _run on Camunda 8 SaaS_ unless your goal is to validate that Camunda 8 runs in your Kubernetes environment in a self-managed fashion. A simple test account is often sufficient, unless your goal is to do load or performance tests, for which you need bigger clusters. Reach out to us in such cases. - -To access _third party systems_ during your POC, set up proper test systems for those and verify that they are usable. - -Prepare a location in a _version control system_ where you can develop your POC. Having a shared repository with history does make sense also (or especially) in a 2-day POC! Collaboration is simplified if the Camunda consultant can also access that repository. It may be worth just creating a repository with weaker access limitations for the POC. - -If your organization cannot easily set up a repository for the POC, or access for externals is impossible, you can create a cloud repository. We typically recommend [GitHub](https://github.com/); a free account is sufficient. It gives you a Git repository and you can invite all necessary people for the POC. Afterwards, you can delete that repository. - -### Selecting the time frame - -As already mentioned above, we typically plan no more than _a focused week_ for the POC workshop itself. Sometimes it also works well to split up the POC into two weeks of 2-3 days each, which allows everybody to reflect on the POC over the weekend. - -- Plan _1-3 days_ for _modeling_ the process with Camunda Modeler. -- Plan _2-3 days_ for _implementing_ the process solution. - -When selecting the exact time frame, consider all the people involved, as well as any technical preparation you need to do up front. You also might want to plan for further steps, like a few more things you implement yourself internally in a second follow up week. - -## Presenting the results - -Before presenting the results of your POC to a wider audience of stakeholders, select a _speaker_ who is comfortable with presenting, prepare a set of focused _slides_ illustrating your progress and the lessons learned, and _test_ your solution and presentation at least once up front. - -The speaker might also be your Camunda Consultant - they are used to presenting to a wide audience! - -## Checklists - -### Technical - -- _Cloud Access_: Make sure you have an account for Camunda 8 with an active subscription or trial account. - -- _Installations_: Make sure your _developer systems_, as well as any _target systems_ for the POC test and production you wish to use are set up. In particular install: - - - Camunda _Modeler_ (https://camunda.org/download/modeler/) - - Java, Maven, and your favorite IDE (e.g. Eclipse) - - Make sure _Maven_ runs and builds and it can access all necessary dependencies. [Download and build this project](https://github.com/camunda/camunda-platform-tutorials/tree/main/quick-start/microservice%20orchestration/worker-java) to verify that your build runs. - -- _Developer Computers_: For maximum productivity, all participating developers should use the computer with which they work every day. Avoid using computers from a training room or shared laptops unless they allow a remote connection to the developer's personal computer. If the developer's computers are neither portable nor remotely accessible consider conducting the POC in the regular office space of the developers. If your company network is restricting access to Maven and Git repositories on the internet, consider using laptops that are not connected to the company network. Similarly, you should not force the external consultants to work on one of your computers. They will be twice as productive on their laptops and not lose time with software setup, configuration, and access restrictions. Obviously, you do not have to connect the consultant's laptop to your company network. Internet access and a shared code repository are enough to collaborate. - -- _Files_ or _Version Control System_: Make sure we can easily exchange files and code during the POC, preferably via your own version control system (e.g. Git or SVN) or at least via shared folders, USB sticks, or email attachments. - -- _Interfaces_: Clarify which technical systems' interfaces you want to access during your POC, make any _documentation_ for those available to the whole POC team, and make sure there is a technically knowledgeable _contact person_ for the interface available to the team during the POC. Set up a _test system_ and verify that it is usable. Verify with Camunda that everything is clear to the team, in particular from a technological perspective. - -### Organizational - -Inform all POC team members and other relevant stakeholders about the following: - -- _Goals_ and the selected _scope_ for the POC -- _Start_ and _end times_, as well as any additional preparation/meet-up times -- _Names and roles_ of all involved _people_ - -- For onsite POCs: - - - Exact _location/address_ at which the POC is taking place as well as instructions about how to find together when arriving (for onsite POCs) - - _Projector_, white-board, and flip-chart availability - - _Internet_ availability for team members and external consultants - -- For remote POCs: - - Exact meeting setup. For example, links to the meeting room, passwords, etc. In case you can't easily host meetings for external participants, your Camunda consultant can setup a Zoom or Microsoft Teams call. - - Ideally, some chat capability (e.g. a temporary Slack account) - -Ideally, prepare a few _organizational_ and/or _project_ info slides to get everybody up to speed on day one of the workshop. diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-l.png b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-l.png deleted file mode 100644 index 2abd7ff0132..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-l.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-m.png b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-m.png deleted file mode 100644 index 28d4347856e..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-m.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-s.png b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-s.png deleted file mode 100644 index 9f29a0409bf..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-s.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xl.png b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xl.png deleted file mode 100644 index 4e2440c2e72..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xl.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xxl.png b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xxl.png deleted file mode 100644 index 45361b53ce5..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirt-xxl.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.png b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.png deleted file mode 100644 index b0f326a6594..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.pptx b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.pptx deleted file mode 100644 index d837939af1f..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path-assets/t-shirts.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path.md b/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path.md deleted file mode 100644 index 555e49a3edc..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/management/following-the-customer-success-path.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "Following the customer success path" -tags: - - Project Management -description: "Following certain steps when evaluating and introducing process automation helps make it a success. Review the appropriate best practices at the right time." ---- - -Following certain steps when evaluating and introducing process automation helps to make it a success. Ensure you review the appropriate best practices at the right time. - -## Understanding the customer success path - -When introducing Camunda as a new process automation platform inside your company, the following process has shown to work best: - -
    - -1 - -_Evaluation_: Take the philosophy of the evaluated products into greater consideration than working solely with feature matrices. Practical experience can be invaluable. You might be interested in our [Whitepaper: "Camunda compared to alternatives"](https://page.camunda.com/wp-camunda-compared-to-alternatives). - -2 - -_Process selection_: It is very important to select a suitable pilot process. Use a relevant process where you can show benefits of BPM including a Return on Invest (ROI) calculation. However, avoid too big or too "political" processes to minimize the risk of failure due to avoidable reasons. Note that you can use this process in the proof of concept (PoC) or select a different process for the first PoC, depending on the goals you have. - -3 - -_Proof of Concept_ (PoC): Model the process to a high standard. It should be clear, understandable, and precise, as it will have a high visibility. Include necessary technical proofs, like calling real services in your environment. Include human tasks if your process where appropriate. We suggest using Camunda Tasklist as a first step to save effort in developing your own tasklist, unless a tasklist is important for your overall proof. Include "eye candies" like reporting to make non-technical stakeholders happy. Concentrate on the important aspects to do the proof and prepare to throw away the code afterwards to start fresh for the pilot, as it is very valid for early POCs to be "hacky" in order to keep focus on the end goals. - -4 - -_Development_: Model the process with the same standard described for a PoC. It should be clear, understandable, and precise. Again, the reason for this is that it will be the most visible part of the project. Develop the process application in an iterative manner to learn fast. Do proper testing to achieve a high quality. - -5 - -_Operations_: Prepare for real operations, which includes setting up the real hardware as well as securing and monitoring the platform. - -6 - -_Pilot review_ and _Pilot improvements_: Review the project after it has finished and gone live. Take some time to clean up, as the project normally serves as a "lighthouse" and "copy and paste" template for sequential projects, so it is worth the effort. It's better to plan time for this phase than try to make things perfect during early development, as you will have learned a lot once the pilot runs on the live system for a while. - -7 - -_Next processes_: Try to avoid doing too many projects in parallel in the beginning to allow new learning to influence your future work. If you have parallel pilots, organize knowledge sharing between the teams. Ideally, let the team of the first pilot directly implement a sequential process. - -8 - -_Custom BPM Platform_: In bigger organizations, you typically try to set up your custom BPM platform, meaning a common infrastructure for all upcoming Camunda projects. Try to do as little of this as possible during the first pilot and start building the platform afterwards, taking all learnings into account. At the same time, do what is necessary for the pilot project itself or for other stakeholders to feel comfortable (e.g. Enterprise Architecture). - -9 - -_Process architecture_: BPM initiatives often start by drafting a process landscape and capture all relevant processes of the company. Try to avoid this, and do as little as possible during your first pilot project. Maybe do a quick process survey to capture relevant processes (by name) to identify a good candidate for the pilot. Especially do not model all processes in your company in depth before you experienced an "end-to-end" project, including automation of Camunda yourself. Then, you will have gained a deeper understanding of methodology and value around BPMN and DMN. - -## Estimating effort - -When starting your BPM project, it is often necessary to roughly estimate the expected effort. A process model can serve as a central artifact for estimation purposes. Avoid too fine-grained estimations as they typically are not worth the effort. - -However, on a management level one often must have some estimations to secure budgets, get projects started, allocate needed resources, and communicate expected time frames. The success factor is to do estimations _on a very rough level_ and avoid spending too much time with details. More often than not, the details develop differently than expected anyway. - -We often see customers successfully estimate _T-Shirt size categories (S, M, L, XL and XXL)_. Such an approach is sufficient for us to make roughly informed decisions about priority and return on investment. - -![T-Shirts](following-the-customer-success-path-assets/t-shirts.png) - -Having said that, your organization may demand that you _map_ such rough sizes to some measuring system already used; for example, _story points_ or _person days_. To preserve the rough character, consider mapping the sizes by using a series of sharply increasing numbers: - -| S | M | L | XL | XXL | -| --- | --- | --- | --- | --- | -| 2 | 5 | 13 | 50 | 200 | - -Much more important than concrete numbers is an educated gut feeling. Therefore, try to understand the influencing factors determining most of the effort by implementing your lighthouse process. - -### Using the process model for estimation - -A process model can be seen as a central artifact for estimation purpose, as it indicates and visually maintains a lot of the influencing factors mentioned above. - -
    - -Here are the figures you could estimate: - -1. Setting up development environment: **S** -2. Modeling and understanding requirements: **L** -3. Implementing the process solution: - -- Implement UI for PDF upload: **S** 1 -- Implement forms: **S** 2 3 4 5 -- Implement integrating the PDF archive: **S** 6 - -4. Going live: **M** - -Using the process model, you can also foresee potential effort drivers, for example: - -- The legacy archive is really hard to integrate. -- The tasks need to be integrated into an existing legacy task list, which might not be straight forward to do. -- The metadata from the PDF shall be extracted, and a specialized form be shown to the user. diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/building-flexibility-into-bpmn-models.md b/versioned_docs/version-8.2/components/best-practices/modeling/building-flexibility-into-bpmn-models.md deleted file mode 100644 index 9b46116af15..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/building-flexibility-into-bpmn-models.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Building flexibility into BPMN models -tags: - - BPMN -description: "Sometimes we need ways to build flexibility into process models to deal with operational problems or to allow for humans to intervene." ---- - -BPMN modeling primarily targets structured processes, often with the goal to automate as many steps as possible, increase efficiency, and decrease process execution costs. But sometimes we need ways to build flexibility into such process models to deal with expected or unexpected operational problems or to allow for humans to intervene. - -## Understanding the required symbols - -To build flexibility into BPMN process models, one must understand BPMN symbols and modeling techniques. After introducing the main symbols, we can demonstrate more concrete examples. - -### Use events as triggers - -BPMN events allow us to react to all kinds of information. We can use them to trigger flexible activities. In particular, BPMN events **catching** **messages**, **conditions**, and **timeouts** are useful in that context. - -
    - -:::caution Camunda 7 Only -Condition events are [not yet supported in Camunda 8](/components/modeler/bpmn/bpmn-coverage.md). -::: - -### Boundary events to add activities on triggers - -BPMN allows us to attach events to the boundary of activities to trigger some follow-up action. By modeling such an event as either **interrupting** or **non-interrupting**, we can decide to do the activities either _instead of_ the activity we attach the event to, or _in addition to_ it. - -
    - -### Subprocesses with boundary events - -By attaching boundary events not just to individual activities, but also to subprocesses, we can flexibly define the area or scope for which we want to trigger some flexible activities. - -
    - -1 - -While we are occupied with carrying out some area of activities, in a scope of our process... - -2 - -...an event might occur, which causes us... - -3 - -...to carry out this activity in addition to continuing with ordinary work. - -### Event subprocesses - -Sometimes we need to build in flexible activities which are carried out at any point in time. In such cases, we can leverage BPMN's event-based subprocesses. - -
    - -### Escalation events - -Sometimes we need highly flexible means to cancel scopes or trigger additional activities from within a scope. The BPMN escalation events can be particularly useful to implement such requirements. - -
    - -1 - -As soon as we are finished with the first activity inside the scope... - -2 - -...we inform the surrounding scope about that and trigger an additional, essential activity... - -3 - -...but also continue with our second activity to complete the subprocess. - -4 - -We can then already continue with the follow-up work regardless of whether that additional activity is already finished. - -### Terminate end events - -To build flexibility into process models, it is also useful to remember that the terminate end event just terminates the scope within which it is defined and therefore _not_ always the whole process instance. With that technique, it becomes possible to cancel some activities inside a subprocess while completing it successfully and leaving it via the typical outgoing path. - -
    - -1 - -As soon as one of our two activities achieves the result, we can cancel the other one... - -2 - -...and successfully complete the subprocess and normally continue with our follow-up work. - -## Examples - -### Allow proactive order status communication - -Assume that for an order to be validated, the customer must determine the delivery date before we can confirm the order. If the order is not acceptable—due to consistency issues or customer related issues—it is declined. - -Some of our orders might be so important that we want to ensure we keep customers happy, even if not everything runs smoothly on our side. - -
    - -1 - -Order managers can request proactive customer communication on demand. Assume they can communicate the reasons via a form, whereas the communication as such is carried out by the call center. - -2 - -On a regular basis, we check based on some rules, whether the order is so important that we proactively communicate why the order is not yet confirmed. Again, the communication is carried out by the call center. - -### Allow for order cancellation any time - -The customer might be allowed to request a cancellation until the order is confirmed. This request would have to be reviewed to determine whether we must accept the cancellation. - -
    - -1 - -Whenever the customer requests a cancellation until the order is confirmed, we review that request and decide whether we have to accept the cancellation or not. - -2 - -If we accept the cancellation, we must terminate the entire process. To do so, we need to use one trick: throw an error event that will end the current event subprocess, but not yet the order process. - -3 - -This leads to another subprocess to be triggered, and this one is interrupting. Now, the process instance is really cancelled. - -### Allow for order details to change, but repeat order validation - -:::caution Camunda 7 Only -Condition events are [not yet supported in Camunda 8](/components/modeler/bpmn/bpmn-coverage.md) -::: - -If the customer changes the order details, the order must be validated again. - -
    diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/choosing-the-dmn-hit-policy.md b/versioned_docs/version-8.2/components/best-practices/modeling/choosing-the-dmn-hit-policy.md deleted file mode 100644 index 1daea7852a6..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/choosing-the-dmn-hit-policy.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Choosing the DMN hit policy -tags: - - DMN -description: "Hit policies describe different ways (standardized by DMN) to evaluate the rules contained in a decision table." ---- - -**Hit policies** describe different ways (standardized by DMN) to evaluate the rules contained in a decision table. Different hit policies do not only lead to different results, but typically also require different modes of thinking and reason about the meaning of the entire table. Therefore, it's crucial to not just know the different DMN hit policies, but also to understand the motivations for their existence and the most typical cases for using them. - -## Knowing the DMN hit policy basics - -A decision table consists of several **rules**, typically represented as rows. When reading such a row, we look at certain **input values** and deduct a certain result represented by **output values**. When using the simplest hit policy **"unique"** (**U**), such rules do **not overlap**: only a single rule must match. - -
    - -1 - -We define an "input" value **season** here. For every single season ... - -2 - -... there is a **jacket** defined we want to use, the "output" of the rules here. - -3 - -The hit policy "**Unique**" (indicated by the character **U**) enforces that rules do **not overlap**: only a single rule must match. - -Now consider that we build a decision table with **overlapping rules**. In other words, that means more than one rule may match a given set of input values. We then need one of the **alternative hit policy** indicators to unambiguously understand the decision logic according to which such rules are interpreted. - -The hit policy **indicator** is a single character shown in the decision table's top left cell, right beneath the decision's name. The character is the initial letter of one of the defined seven hit policies `U`**nique**, `A`**ny**, `P`**riority**, `F`**irst**, `C`**ollect**, `O`**utput order** and `R`**ule order**. Furthermore, the hit policy 'Collect' may also be used with one of four aggregation operators, actually giving us four more hit policies `C+` (**Sum**), `C<` (**Minimum**), `C<` (**Maximum**) and `C#` (**Number**). - -Eight of those eleven hit policies evaluate a decision table to a **single result**. Three hit policies evaluate a decision table to **multiple results**. - -### Single result decision tables - -Such tables either return the output of only one rule or aggregate the output of many rules into one result. The hit policies to be considered are - -- `U`**nique**: Rules do not overlap. Only a single rule can match. - -- `F`**irst**: Rules are evaluated from top to bottom. Rules may overlap, but only the first match counts. - -- `P`**riority**: Rule outputs are prioritized. Rules may overlap, but only the match with the highest output priority counts. - -:::note -Camunda does not yet support the hit policy **priority**. In essence, priorities are specified as an ordered list of output values in decreasing order of priority. Such priorities are therefore independent from rule sequence! Though not yet supported, you can mimic that behavior using hit policy "(**C**)ollect" and determining a priority yourself; for example, by means of an execution listener attached to the end of your business rule task. -::: - -- `A`**ny**: Multiple matching rules must not make a difference: all matching rules must lead to the same output. - -**Collect** and **aggregate**: The output of all matching rules is aggregated by means of an operator: - -- `C+`**Sum**: Add up all the matching rule's distinct outputs. -- `C<`**Minimum**: Take the smallest value of all the matching rule's outputs. -- `C>`**Maximum**: Take the largest value of all the matching rule's outputs. -- `C#`**Number**: Return the number of all the matching rule's distinct outputs. - -### Multiple result decision tables - -**Multiple result** tables may return the output of multiple rules. The hit policies for such tables are: - -- `C`**ollect**: All matching rules result in an arbitrarily ordered list of all the output entries. - -- `R`**ule order**: All matching rules result in a list of outputs ordered by the sequence of those rules in the decision table. - -- `O`**utput order**: All matching rules result in a list of outputs ordered by their (decreasing) output priority. - -:::note -Camunda does not yet support the hit policy **output order**. In essence, output orders are specified as an ordered list of output values in decreasing order of priority. Such priorities are therefore independent from rule sequence! Though not yet supported, you can mimic that behavior using hit policy "(**C**)ollect" and determining an output order yourself; for example, by means of an execution listener attached to the end of your business rule task. -::: - -## Understanding DMN hit policy use cases - -Most situations can be addressed using different hit policies. In that case, the hit policy will have an effect on the readability and maintainability of the table. Often it is worth trying different varieties until you have a feel for what will work best. In practice, we often use the free [online simulator](https://consulting.camunda.com/dmn-simulator/) to experiment with various alternatives. - -### Unique: granting categories of customers a specified discount - -Hit policy "**Unique**" will typically make it easy to build a decision table, which ensures your rules are "complete" - in the sense that the rules do not just not overlap but cover all possible input values - so that you do not "forget" anything. - -
    - -1 - -The _input_ area of each row specifies a certain **segment** of possible input values. - -2 - -This row, for example, expresses that _long time silver customers receive a 9% discount_. - -Such a use case fits to the hit policy "**Unique**". For such use cases, it is an advantage that this hit policy make your decision logic invalid in case you violate its requirement that your table rules never "overlap": after all, you must not produce ambiguous results. - -### First: accepting a customer based on hard criteria - -Having said that, the hit policy "**First**" can sometimes make it easier for an organization to reason about decision logic dealing with some criteria that are "harder" (more "clearcut") than others. Furthermore, it can help to make a decision table layout more compact and therefore easier to interpret. - -
    - -1 - -Assume that everybody in the organisation knows that first rule: "Once on the blocklist, never again accepted." The layout and the hit policy of the decision table therefore supports the organization's way of doing business: once we know that single fact about a customer, we don't need to think further. - -2 - -The following rules from row 2-4 are expressed in an "Accept" manner and might change more often over time. The organization's way of thinking is literally "from top to bottom". Once we find an acceptance rule, we can deal with the customer. - -3 - -For execution in a decision engine, don't forget to add a rule not accepting any other customers as a last row. - -In scenarions dealing with **hard** **exclusion** and **inclusion** criteria, we often don't care that much if the rules overlap, but prefer to argue about very clearcut cases first and about more sophisticated ones later on. Furthermore, the organization's way of thinking and doing business might be better supported by a decision table using the hit policy **First**. - -Our experience so far tends to show that it can be more tricky and error prone to argue about a **First** hit policy decision table than it might occur to you at first sight. Therefore, be especially careful and always test your logic in case you are dealing with sensitive business! - -### Collect: deciding which groups of people may review an order - -With hit policy **collect**, you do not care about the order or any interdependencies between your rules at all. Instead, you just "collect" independent rules and care about the question which rules are applicable to your specific case. - -Consider, for example, the question of "who is allowed" to carry out some action, as, for example, reviewing and deciding about incoming orders: - -
    - -As a result of this decision table, we will either get `["Sales"]` or `["Management"]` or a list of both groups `["Sales", "Management"]`. - -We could use this information to route the order into the applicable group's task lists or control access rights of a configurable software solution, etc. Of course, you could at any time introduce more rules and eventually also differentiate between more groups without changing your software solution. - -### Sum: accepting a customer based on soft criteria - -Hit policy "collect" may be combined with operators such as **Sum (C+)**, leading to very different use cases. A very typical one is the requirement to evaluate a case based on manyfold factors influencing the overall result. - -Assume, for example, that we want to deal with customers we know nothing about. They receive a score of 0. But in case we know something about them, we also weigh in our knowledge: - -
    - -1 - -The overall creditworthiness is deducted by throwing in many factors. - -2 - -Here, for example, we give credit in case we made good experiences with the customer in the past. - -3 - -A very low current income does not matter as long as the customer is not a stranger to us! - -4 - -On the other hand, as soon as a customer has proof for a good income, they receive five points for "reasonable" income as well as 10 points extra for good income. - -Even if we had bad experience with a customer (which means they start from -15), we end up with an overall score of 0 in case the customer has a good income now, and start to accept the customer again. - -In scenarions dealing with **soft exclusion** and **inclusion** criteria, we need a mechanism to associate a weight to different scenarios. This is ideally supported by hit policy **Sum (C+)**. diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/creating-readable-process-models.md b/versioned_docs/version-8.2/components/best-practices/modeling/creating-readable-process-models.md deleted file mode 100644 index b762e9907da..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/creating-readable-process-models.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: Creating readable process models -tags: - - BPMN -description: "We create visual process models to better understand, discuss, and remember processes. Hence, it is crucial that models are easy to read and understand." ---- - -We create visual process models to better understand, discuss, and remember processes. Hence, it is crucial that models are easy to read and understand. The single most important thing is to to use well-chosen labels. - -## Essential practices - -### Labeling BPMN elements - -Use [conventions for naming BPMN elements](naming-bpmn-elements.md); this will consistently inform the reader of the business semantics. The clarity and meaning of a process is often only as good as its labels. - -
    - -1 - -_Start event_ labels informs the reader of how the process is _triggered_. - -2 - -An _activity_ - labeled as "activity" - informs the reader of the piece of _work_ to be _carried out_. - -3 - -_Gateway_ labels clarifies based on which condition(s) and along _which sequence flow_ the process proceeds. - -4 - -Labeled _boundary events_ clearly express in which cases a process execustion might follow an _exceptional path_. - -5 - -Labeled _end events_ characterize end _results_ of the process from a business perspective. - -## Recommended practices - -### Modeling symmetrically - -Try to model symmetrically. Identify related splitting and joining gateways and form easily recognizable _visual_, eventually _nested_, _blocks_ with those gateways. - -
    - -1 - -The inclusive gateway splits the process flow into two paths which are ... - -2 - -... joined again with an inclusive gateway. Inside that block ... - -3 - -another exclusive gateway splits the process flow into two more paths which are ... - -4 - -... joined again with an exclusive gateway. - -By explicitly showing _pairs of gateways_ "opening" and "closing" parts of the process diagram, and by positioning such gateway pairs _as symmetrically as possible_, the readability of process model is improved. The reader can easily recognize logical parts of the diagram and quickly jump to those parts the reader is momentarily interested in. - -### Modeling from left to right - -Model process diagrams _from left to right_. By carefully positioning symbols from left to right, according to the typical point in time at which they occur, one can improve the readability of process models significantly: - -
    - -Modeling from left to right supports the reading direction (for western audience) and supports the human field of vision - which prefers wide screens. - -### Creating readable sequence flows - -Consciously decide whether _overlapping sequence flows_ make your model more or less readable. On one hand, avoid overlapping sequence flows where the reader will not be able to follow the flow directions anymore. Use overlapping sequence flows where it is less confusing for the reader to see just one line representing several sequence flows leading to the same target. - -Avoid sequence flows _violating the reading direction_, meaning no outgoing flows on the left or incoming flows on the right of a symbol. - -
    - -1 - -The author could have made the five (!) sequence flows leading into the end event visible by separating them. However, by consciously choosing to partly overlap those flows, this model becomes less cluttered, therefore less confusing and easier to read. - -2 - -The author could have attached the sequence flow, leaving this task on its left. However, this would have decreased readability, because the flow connection violates the reading direction. The same applies to incoming flows on the right of a symbol. - -_Avoid flows crossing each other_ and _flows crossing many pools or lanes_, wherever possible. Rearrange the order of lanes and paths to make your sequence flows more readable. Oftentimes, removing lanes can improve readability! Rearrange the order of pools in a collaboration diagram to avoid message flows crossing pools as much as possible. Often, you will find a "natural" order of pools reflecting the order of first involvement of parties in the end-to-end process. This order will often also lead to a minimum of crossing lines. - -_Avoid very long (multi page) sequence flows_, especially when flowing against the reading direction. The reader will lose any sense of what such lines actually mean. Instead, use link events to connect points which are not on the same page or screen anymore. - -
    - -1 - -You see a throwing link event here, which... - -2 - -...directly links to a catching link event just as if the sequence flow would have been connected. - -Avoid excessive use of link events. The example above serves to show the possible usage, but at the same time, it is too small to satisfy the usage of link events in real-world sceanrio! - -### Modeling explicitly - -Make your models easier to understand by modeling _explicitly_, which most often means to either completely avoid certain more "implicit" BPMN constructs, or at least to use them cautiously. Always consider the central _goal of increased readability_ and understandability of the model when deciding whether to model explicitly or implicitly. When in doubt, it's best to favor an explicit style. - -#### Using gateways instead of conditional flows - -Model splitting the process flow by always using _gateway symbols_ like instead of conditional flows . - -
    - -1 - -For example, you could've left out this inclusive gateway by drawing two outgoing sequence flows directly out of the preceding task **Choose menu** and attaching conditions to those sequence flows (becoming conditional sequence flows ). However, experience shows that readers understand the flow semantics of gateways better, which is why we do not make use of this possibility. - -#### Modeling start and end events - -Model the trigger and the end status of processes by always explicitly showing the _start_ and _end event symbols_. - -
    - -:::caution -Process models without start and end event cannot be executed on the Camunda workflow engine -::: - -1 - -According to the BPMN standard, you could have left out the start event... - -2 - -...as long as you also leave out the end events of a process. However, you would have lost important information in your model, which is why we do not make use of this syntactical possibility. - -Be specific about the _state_ you reached with your event from a _business perspective_. Quite typically, you will reach "success" and "failure" like events from a business perspective: - -
    - -1 - -'Invoice paid' better qualifies the "successful" business state than e.g. 'Invoice processed' would... - -2 - -...because in principle, you can call the failed state 'Invoice processed', too, but the reader of the diagram is much better informed by calling it 'Invoice rejected'. - -#### Separating splitting and joining gateways - -In general, avoid mixing up the split and join semantics of gateways by explicitly showing _two separate symbols_: - -
    - -1 - -You could have modeled this join implicitly by leaving out the explicitly joining XOR gateway and directly connecting two incoming sequence flows to... - -2 - -...the subsequent splitting XOR gateway. Of course, BPMN would allow this for other gateway types, too. However, experience shows that readers will often overlook the join semantics of such gateways serving two purposes at the same time. - -The fact that readers will often overlook the join semantics of gateways serving to join as well as split the process flow at the same time, combined with the preference for [modeling symmetrically](#modeling-symmetrically), leads us to prefer _splitting and joining gateways modeled with separate symbols_. - -However, there are cases in which the readability of models can be improved with _implicit modeling_. Consider the following example: - -
    - -1 - -The two incoming sequence flows to the task "Review tweet" could be merged with an XOR gateway, following explicit modeling. We argue that a merging XOR gateway directly behind the start event decreases the readability. A merging XOR gateway is a passive element and the reader expects the process to continue with an active element after the start event. - -#### Using XOR gateway markers - -Model the XOR gateway by explicitly showing the **X** symbol, even if some tools allow to draw a blank gateway. - -
    - -1 - -You could have shown the splitting gateway... - -2 - -...as well as the joining gateway without the **X** symbol indicating that it is an exclusive gateway. - -The **X** marker makes a clearer difference to the other gateway types (inclusive, parallel, event-based, complex) which leads us to prefer _explicit XOR gateway markers_ in general. - -#### Splitting sequence flows with parallel gateways - -Always model splitting the process flow by explicitly showing the _gateway symbol_: - -
    - -1 - -You could have modeled this parallel split implicitly by leaving out the gateway and drawing two outgoing sequence flows out of the preceding task **Choose menu**. However, the reader needs deeper BPMN knowledge in order to understand this model. Additionally, for joining the parallel flows... - -2 - -...you will always need the explicit symbol. - -The fact that readers of models using parallelization will likely need to understand the semantics of a parallel join combined with the preference for modeling symmetrically leads us to prefer _explicit parallel gateways_, too. - -#### Joining sequence flows with XOR gateways - -Model joining the process flow by explicitly showing the _XOR gateway symbol_ so the reader does not have to know BPMN details to understand how two incoming or outgoing sequence flows in a task behave. Additionally, this often supports the [symmetry of the model](#modeling-symmetrically) by explicitly showing a "relationship" of the splitting and joining _gateways forming a visual "block"_. - -
    - -1 - -You could have modeled this join implicitly by leaving out the gateway and directly connecting the two incoming sequence flows to the subsequent task **Have lunch**. However, explicitly modeling the join better visualizes a block, the joining gateway semantically "belongs" to... - -2 - -...the earlier split. In case the reader is not interested in the details of dinner preparation but just in having dinner, it's easy to "jump" to the gateway, "closing" that logical part of the model. - -This is particularly helpful for models bigger than that example with many such (eventually nested) blocks. Consider the following model, showing two _nested blocks_ of gateways: - -
    - -1 - -Now, you couldn't have modeled this join implicitly, because it's directly followed by an inclusive gateway with very different join semantics. _Consistency_ of joining techniques is another reason why we prefer explicitly joining sequence flows in general. - -There are always exceptions to the rule! There are cases in which the readability of models can be _improved_ with _implicit modeling_. So don't be dogmatic about explicit modeling; always aim for the most readable model. The following example shows a case of a model in which splitting and joining points do not form natural "blocks" anyway. In such cases, it can be preferable to make use of _implicit joining_ to improve the overall readability! - -### Avoiding lanes - -Consider _avoiding lanes_ for most of your models all together. They tend to conflict with several of the best practices presented here, like [Modeling _Symmetrically_](#modeling-symmetrically), [Emphasizing the _Happy Path_](#emphasizing-the-happy-path) and [Creating Readable _Sequence Flows_](#creating-readable-sequence-flows). Apart from readability concerns, our experience also shows that lanes make it more difficult to change the resulting process models and therefore cause considerably _more effort in maintenance_. - -When modeling on an _operational level_, where showing the responsibility of roles matters most, we recommend to [use _collaboration diagrams_](#using-collaboration-diagrams) with several _separate pools_ for the process participants instead of lanes. - -However, the usage of lanes might be meaningful for: - -- _Strategic_ level models (see [BPMN Tutorial](https://camunda.com/bpmn/) and [Real-Life BPMN](https://www.amazon.com/Real-Life-BPMN-4th-introduction-DMN/dp/1086302095/) on details for modeling levels) - especially when they have a focus on _responsibilities and their borders_. - -- _Technical/executable_ models with a focus on _human work-flow_ and its ongoing "ping pong" between several participants. - -For these cases, also consider alternative methods to maintain and show roles: - -- As a _visible part_ of the _task name_, e.g. in between squared brackets []: _"Review tweet [Boss]"_. - -:::caution Camunda 7 Only -During execution you can remove this part of the task name if you like by using simple mechanisms like shown in the [Task Name Beautifier](https://github.com/camunda/camunda-consulting/tree/master/snippets/task-name-beautifier) so it does not clutter your tasklist. -::: - -- As a _text annotation_ or a _custom artifact_ - -:::note -Roles are part of your executable BPMN process model as _technical attributes_ anyway - even if hidden in the BPMN diagram. For example, they can be used during execution for assignment at runtime. -::: - -## Helpful practices - -### Emphasizing the happy path - -You may want to emphasize the _"happy path"_ leading to the delivery of a successful process result by placing the tasks, events, and gateways belonging to the happy path on a straight sequence flow in the center of your diagram - at least as often as possible. - -
    - -The _five_ BPMN symbols belonging to the happy path are put on a straight sequence flow in the center of the diagram. - -### Avoid modeling retry behavior - -A common idea is to model retry behavior into your process models. This _should be avoided_ in general. The following process model shows a typical example of this anti pattern: - -
    - -All operations use cases put into the model can be handled via Camunda tooling, e.g. by [retrying](/docs/components/concepts/job-workers/#completing-or-failing-jobs) or [Camunda Operate](/docs/components/operate/operate-introduction/). - -### Using collaboration diagrams - -If you model on an operational level (see [BPMN Tutorial](https://camunda.com/bpmn/) and [Real-Life BPMN](https://www.amazon.com/Real-Life-BPMN-4th-introduction-DMN/dp/1086302095/) on details for modeling levels) use _collaboration diagrams_ with several _separate pools_ for the process participants [instead of lanes](#avoiding-lanes) as operational models using lanes make it very hard for the individual process participant to identify the details of their process involvement. - -Furthermore, model just _one coherent process per pool_ (apart from event subprocesses, of course), even though BPMN in principle allows several processes per pool. This improves readability by constituting a clear visual border around every process and by providing a natural space for labeling that part of the end-to-end process in the pool's header. - -
    - -1 - -The Team Assistance is responsible for initial "Invoice Collection" as well as "Invoice Clarification" - if applicable. Those two processes are modeled by using two separate pools for the team assistance, just as... - -2 - -...the approver can see the "Invoice Approval" process in a separate pool and... - -3 - -...the managing director can see the "Invoice Payment" process in a separate pool while the collaboration diagram as a whole shows the business analyst that the overall end-to-end process works. - -Using _collaboration diagrams_ with _separate pools_ for the process participants allows to explicitly show interaction and communication between them by means of message flow and further improves readability by transparently showing the participants their own involvement in the end-to-end-process. As a consequence, they do not need to fully read and understand the end-to-end process in order to read, understand, and agree to their own involvement by looking at their own pools. - -### Showing interaction with systems - -Consciously decide how you want to model systems the process participants are interacting with. Use _data stores_ to show systems which primarily serve as a means to store and retrieve data. Use - depending on your needs _collapsed_ or _expanded_ - _pools_ for systems which are carrying out crucial activities in the process going way beyond storing and retrieving data. - -
    - -1 - -A _collapsed pool_ is used to represent a system which supports the process and/or carries out process tasks on its own. The pool could be expanded later to model the internal system details, maybe even with the goal to execute a technical process flow directly with a BPMN capable process engine. - -2 - -A _data store_ is used to represent a technical container meant to archive PDFs and store them for later retrieval. - -3 - -Another _data store_ is used to represent a container which could be a physical storage place for paper invoices to be paid at the moment but could become a representation for business objects in a database with the object state "to be paid" in the future. - -When _choosing_ between those _two options_ for modeling systems (data stores, collapsed pools) keep in mind that only pools represent processes and therefore have the capability to be expanded and modeled in all their internal details later on. - -### Avoiding excessive usage of data objects - -Avoid excessive use of _data objects_, but use them cautiously to show the _most important data related aspects_ of your process. - -Experience shows that many data objects and especially many data associations quickly clutter your process model and that visual noise reduces readability - especially for less experienced readers. - -You might find three practices helpful to find your own "right" amount of data visualization: - -
    - -1 - -Cautiously use data objects and associations to show the _most important data related aspects_ of your process. We could have modeled that all the tasks in the "Payments Creation" process either read, update, or delete the "new payment", however we decided that we just want to point out that the process works on a new payment object. - -2 - -Use data stores for _coupling processes via data_. We could have modeled a lot of other tasks in the process that either read or update the "payments", however, we decided to just point out the most important aspect for the process diagram, which is that the "Payments Creation" process of delivery service is loosely coupled with the "Payments Processing" via commonly shared data. - -3 - -Here we decided that it's helpful to know that this message does not only inform an adjustment possibility was checked, but that it also delivers all the necessary details of the adjustment. - -### Avoiding changes to symbol size and color - -Leave the _size of symbols as it is_ by default. For example, different sizes of tasks or events suggest that the bigger symbol is more important than the smaller one - an often unwarranted assumption. Instead of writing long labels, use short and consistent labels in line with your [naming conventions](naming-bpmn-elements.md) and move all additional information into BPMN annotations associated to your specific BPMN element. - -Furthermore, avoid _excessive use of colors_. Experience shows that colors are visually very strong instruments and psychologically very suggestive, but will typically suggest different things to different readers. Additionally, a colorful model often looks less professional. - -However, there are valid exceptions. For example, you could mark the _happy path_ through a process with a visually weak coloring: - -
    - -Another case for useful coloring might be to make a visual difference between _human_ and _technical flows_ within a bigger collaboration diagram by coloring the header bar on the left side of the pools. diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/modeling-beyond-the-happy-path.md b/versioned_docs/version-8.2/components/best-practices/modeling/modeling-beyond-the-happy-path.md deleted file mode 100644 index 9d2a5cfc5f4..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/modeling-beyond-the-happy-path.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Modeling beyond the happy path -tags: - - BPMN - - BPMN Error Event - - BPMN Message Event - - BPMN Timer Event - - Happy Path -description: "Model the happy path to the desired end result before collecting problems and exceptions, prioritizing them, and introducing them incrementally." ---- - -First, model the happy path to the desired end result before collecting problems and exceptions, prioritizing them, and introducing them incrementally. Secondly, focus on one selected issue at a time, and choose the right techniques for modeling beyond the happy path. - -## The happy path and beyond - -The happy path is kind of the default scenario with a positive outcome, so no exceptions, errors, or deviations are experienced. Typically, you want to model the happy path first, and therefore you should define the desired _end result_, find a suitable _start event_, and collect the _activities_ and external _dependencies_ which _always_ need to be considered to reach the result. - -When we have that, the diagram shows the _happy path_ of a business process (or of the selectively chosen part of the end-to-end business process): - -
    - -1 - -_End Event_: It's often the easiest first step to agree upon the desired ("happy") end _result_ of a process. - -2 - -_Start Event_: As a second step, one might agree upon a _trigger_ for the work leading to the end result. - -3 - -_Activities_: After that, you can brainstorm and collect activities which _always_ need to be carried out to reach the result. - -4 - -_Intermediate Events_: Optionally, you can brainstorm and collect _milestones_ (modeled as blank events) and important external _dependencies_ (e.g. modeled as message events). - -### Modeling beyond the happy path by error scenarios - -As soon as you have this happy path, start modeling beyond the happy path. Focus on _one_ particular, selected problem at a time. - -1. Try to _understand_ the worries for _the business_ in the light of the desired end result. - -1. Identify the _undesired end result_ the process will reach in case the problem cannot be mitigated. This informs you about the _end event_ you will eventually reach because of the problem. - -1. Identify the affected areas in the happy path. Can the problem occur at a _particular point_, _during_ (one or several) _activities_, or basically _all the time_? This will inform you about the most promising modeling technique for the problem: whether either _gateways_, _boundary events_, or _event-based subprocesses_ can serve you to fork off your "problem path". - -This best practice will guide you through practices that help you model beyond the happy path. - -## Forking off at a particular point - -With BPMN gateways, we can deal with problems arising at a _particular point_ in our process. - -### Dealing with results - -By using data-based gateways, we _actively decide_ "now and here" on the basis of our own _process data_ which path our process must move along. For example, we can therefore use an XOR gateway to fork off a "problem path," dealing with a problematic result of _our own activities_: - -
    - -1 - -The _exclusive gateway_ deals with the potentially problematic result of incomplete order data. Note that we deal here with the procedural consequences of work which already took place in the preceding task, where we actually checked the order for completeness. - -2 - -Again, the preceding task already dealt with the actual work of checking the customer's creditworthiness. The _result_ of the task is a "yes" or "no" (true or false). We can deal with data by means of a data-based gateway, which immediately redirects to the path our process must move along. - -3 - -The _end event_ characterizes the undesired end result "order declined," which we now reach because of having modeled two problems. In the example, both of them lead to one and the same business outcome. - -### Dealing with events - -By using event-based gateways, we _passively wait_ for _future events_ deciding about which path our process will have to move along. For example, we can therefore use use it to fork off a "problem path" dealing with an undesired event _outside of our own control_: - -
    - -1 - -After having requested a delivery date (e.g. from wholesale), we use an _event-based gateway_ to passively wait for what happens next. We can not know "now and here", because it's outside of our own control. - -2 - -The _intermediate message event_ allows us to deal with the undesired event that the ordered good is not deliverable. - -### Dealing with missing results via timeouts - -By using event-based gateways, we can also deal with the situation that _nothing relevant_ for our process _happens_. We do this by defining a time period, after which we decide that we do not want to wait any longer: - -
    - -1 - -The _intermediate timer event_ allows us to deal with the situation that nothing relevant for our process happened for a defined time period. In case we do not get an answer from wholesale, we inform the customer that the order is not deliverable at the moment. - -## Forking off during (one or several) activities - -With BPMN boundary events, we can deal with problems arising _while we are actively occupied_ to carry out work in our process. - -### Dealing with errors - -A typical case is that it turns out to be _impossible to achieve the result_ of an activity while working on it. We can then choose to interrupt our work and fork off a "problem path" to deal with the issue: - -
    - -1 - -The _interrupting boundary error event_ allows us to deal with the fact that the order is not readable. As this prevents us from properly judging the completeness of the order, we cannot reach one of the expected results of our activity ("complete" or "not complete"), but instead deal with the problem by interrupting the activity and assuming the order to be declined. - -When modeling for business process automation, "dealing with errors" might be a highly technical concern. As a rule of thumb, we just want to show the _"business related" problems_ in a process model: those problems and errors which cause that our business process must move along a different path, because different work must be carried out as a reaction. - -An example for a typical technical concern would be that we currently cannot reach a system, which is why, for example, we want to re-attempt it another time later on. We do not show such purely technical problems in a business process diagram, not even in an executable one: (1) It would clutter the diagram, and (2) There are more suitable ways to deal with technical issues potentially occuring almost anywhere. Read our Best Practice about [dealing-with-problems-and-exceptions](../../development/dealing-with-problems-and-exceptions) from a more technical point of view to learn more about the border between business related shown in a process diagram and purely technical concerns not shown in a process diagram. - -### Dealing with work on top of usual work - -Another typical use case for reacting to situations while we are actively occupied is that it sometimes turns out we need to do stuff _in addition to what we already do_: - -
    - -1 - -We encapsulate part of our process into a subprocess to enable us to express that while we are occupied with that part of the process, additional work might pop up. - -2 - -The _non-interrupting boundary timer event_ allows us to speed up order preparation in case it takes longer than two days; for example, by informing a responsible manager. - -## Being able to react all the time - -A bit similar to boundary events, with BPMN event subprocesses we can deal with problems arising while we are actively occupied to carry out work. The main advantage when being compared with boundary events is that some issues can _occur almost anywhere_ on our way through the happy path. - -### Dealing with issues occurring almost anywhere - -Some issues can occur almost anywhere on the way through our process. The event subprocess allows us to fork off a _problem path_ modeled separately from our main process to deal with such issues: - -
    - -1 - -The _non-interrupting start message event_ of the event subprocess allows us to express that wherever we currently are on our way through order confirmation, it can happen that the customer requests information about the status of that process. - -2 - -We should then provide the requested information without interferring with the order confirmation process itself. - -### Dealing with canceling the process - -Another typical use case for event-based subprocesses is a cancellation requested by the customer: - -
    - -1 - -The _interrupting start message event_ of the event subprocess allows us to express that wherever we currently are on our way through order confirmation, it can happen that the customer requests cancellation. - -2 - -We should then interrupt the main process (which is already expressed by the nature of the start event) and inform an involved dealer. - -## Boundary events as alternative for event based gateways - -### Using receive tasks with boundary events - -The examples above leverage the _event based gateway_. BPMN also allows to model _receive tasks_ that wait for responses. This has the advantage that you now can leverage boundary events to deal with _missing results_ or other _events occuring while you are waiting_ for the response. This is an _alternative_ to the event-based gateways shown in the above models. - -
    - -1 - -Instead of modeling an event for receiving a delivery date, we model a _task_ here. - -2 - -The fact that we do not receive such an answer at all can now be modeled as an _interrupting boundary timer event_. We inform the customer about the status, but as the timer is interrupting, do not wait any longer for the delivery date. - -3 - -It might turn out that the ordered good is not deliverable. This can be modeled as _boundary message event_. Upon that message we cancel any further waiting but inform the customer about the status instead. - -### Modeling a multi phase escalation path - -Boundary events are particularly useful when you consider that you might want to remind your dealer that the answer is overdue and give them another chance for transmitting the delivery date before you give up waiting. First, consider how this could be achieved by using event-based gateways: - -
    - -1 - -After having realized that the dealer's answer is late, we decide whether we want to remind the dealer and continue to wait - or not. We modeled here that we want to remind the dealer just once. - -2 - -However, note that while we are reminding the dealer, we are strictly speaking not in a state "ready-to-receive" the dealer's answer! According to BPMN execution semantics, the dealer's message might get lost until we are back at the event-based gateway. While you might want to choose to ignore that when modeling for communication purposes only, you will need to get it right for executable models. - -To get the BPMN execution semantics above fully right, we would now need to attach the two possible answers of the dealer ("Delivery data fixed", "Ordered good not available") as boundary events to the task "Remind dealer", too! Quite a modeling construct, just to properly wait for the dealer's response, right? Therefore, consider the following alternative to this modeling issue using boundary events only: - -
    - -1 - -Modeling a _non-interrupting boundary timer event_ directly at a task which waits for the response has the advantage that we never leave the "ready-to-receive" state and therefore avoid troubles with the strict interpretation of BPMN execution semantics. - -The second alternative is _very compact_ and avoids issues with _not being ready-to-receive_, but typically needs a _deeper understanding_ of BPMN symbols and their consequences for the token flow. Therefore, we sometimes also prefer event-based gateways for showing human flows, and ignore sophisticated token flow issues as discussed here. diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/modeling-with-situation-patterns.md b/versioned_docs/version-8.2/components/best-practices/modeling/modeling-with-situation-patterns.md deleted file mode 100644 index 836c9ade1f3..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/modeling-with-situation-patterns.md +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: Modeling with situation patterns -tags: - - BPMN -description: "When modeling, you will sometimes realize some situations share common characteristics. Document such patterns and find a satisfying solution for modeling them." ---- - -When modeling, you will sometimes realize that some situations share common characteristics. To save work for yourself and spread such knowledge within your organization, collect and document such patterns as soon as you understand their nature and have found a satisfying solution for modeling them. For a start, we collected some typical patterns for you, which we see quite often in our modeling practice. You do not need to reinvent the wheel over and over again. - -## Escalating a situation step by step - -You need something and hope that it happens. Such a hope for a result may materialize, but it does not have to! After some time, you will typically become impatient and try to do something to make it happen. But if it then still does not happen, there comes a point at which you will have to decide that you must accept a failure. - -We sometimes also call that very common pattern a **multi-step escalation**. - -**Example:** "A month ago, I ordered a pair of shoes with that new online shop! After two weeks of waiting: nothing. I contacted them to determine what's up. The clerk promised me that the shoes will leave the warehouse today! But again, nothing, so after another week I just canceled that order. Since then I did not hear a word." - -In this scenario, the shop clearly did not implement the escalation of the delay properly. They should have applied one of the following patterns in the order delivery process: - -### Option 1: Using event-based gateways - -
    - -1 - -After ordering the goods, the process passively waits for the success case by means of an event-based gateway: the goods should be delivered. However, in case this does not happen within a reasonable time, we make a first step of escalation: remind the dealer. - -2 - -We still stay optimistic. Therefore, the process again passively waits for the success case by means of another event-based gateway: the goods should still be delivered. However, in case this does not happen again within a reasonable time, we make a second step of escalation: cancel the deal. - -**Evaluation:** - -- :thumbsup: This solution explicitly shows how the two steps of this escalation are performed. Timers are modeled separately, followed by their corresponding escalation activities. - -- :thumbsdown: The usage of separate event-based gateways leads to _duplication_ (for example, of the receiving message events) and makes the model _larger_, even more so in case multiple steps of escalation need to be modeled. - -- :thumbsdown: During the time we need to remind the dealer, we are strictly speaking not in a position to receive the goods! According to the BPMN specification, a process can handle a message event only if it is ready to receive at exactly the moment it occurs. Fortunately, Camunda 8 introduced [message buffering](/docs/components/concepts/messages/#message-buffering), allowing to execute this model properly without loosing messages. Using Camunda 7, the message might get lost until we are at the second event-based gateway. - -:::note -You might want to use that pattern when modeling _simple two phase escalations_. You should not execute it on Camunda 7. -::: - -### Option 2: Using gateways forming a loop - -
    - -1 - -After having ordered the goods, the process passively waits for the success case by means of an event-based gateway: the goods should be delivered. However, in case this does not happen within a reasonable time... - -2 - -We choose by means of an exclusive gateway to make a _first step of escalation_: remind the dealer. We still stay optimistic. Therefore, the process returns to the event-based gateway and again passively waits for the success case: the goods should still be delivered. However, in case this does not happen again within a reasonable time, we choose a _second step of escalation_: cancel the deal. - -**Evaluation:** - -- :thumbsup: This model is a more _compact_ and more _generic_ modeling solution to the situation. If it comes to multiple steps of escalation, you will need such an approach to avoid huge diagrams. - -- :thumbsdown: The solution is _less explicit_. We could not choose to label the timer with explicit durations, as a single timer is used for both durations. The solution is _less readable_ for a less experienced reading public. For a fast understanding of the two step escalation, this method of modeling is less suitable. - -- :thumbsdown: During the time we need to remind the dealer, we are strictly speaking not in a position to receive the goods! According to the BPMN specification, a process can handle a message event only if it is ready to receive at exactly the moment it occurs. Fortunately, Camunda 8 introduced [message buffering](/docs/components/concepts/messages/#message-buffering), allowing to execute this model properly without loosing messages. Using Camunda 7, the message might get lost until we are at the second event-based gateway. - -:::note -You might want to use that pattern when modeling _escalations with multiple steps_. You should not execute it on Camunda 7. -::: - -### Option 3: Using boundary events - -
    - -1 - -After having ordered the goods, the process passively waits for the success case by means of a receive task: the goods should be delivered. However, in case this does not happen within a reasonable time... - -2 - -a non-interrupting boundary timer event triggers a _first step of escalation_: remind the dealer. We still stay optimistic. Therefore, we did not interrupt the receive task, but continued to wait for the success case: the goods should still be delivered. - -3 - -However, in case this does not happen within a reasonable time, we trigger a _second step of escalation_ by means of an interrupting boundary timer event: interrupt the waiting for delivery and cancel the deal. - -**Evaluation:** - -- :thumbsup: This model is even more _compact_ and a very _generic_ modeling solution to the situation. If it comes to multiple steps of escalation, the non-interrupting boundary timer event could even trigger multiple times. - -- :thumbsup: The model complies with BPMN execution semantics. Since we never leave the wait state, the process is always ready to receive incoming messages. - -- :thumbsdown: The solution is _less readable_ and _less intuitive_ for a less experienced reading public, because the way the interrupting and non-interrupting timers collaborate requires a profound understanding of boundary events and the consequences for token flow semantics. For communication purposes, this method of modeling is therefore typically less suitable. - -:::note -You might want to use that pattern when modeling _escalations with two steps_ as well as _escalations with multiple steps_ for _executable models._ -::: - -## Requiring a second set of eyes - -For a certain task - typically a critical one in terms of your business - you need the opinion, review, or approval of two different people. - -We sometimes also call that pattern the **four eyes principle**. - -**Example:** The manager of a small sized bank's lending department has a problem: "Over the last quarter, we lost €100,000 in unrecoverable medium-sized loans. Controlling now tells me that could probably have been easily avoided by more responsible decisions of our lending department staff! I want that every such decision is signed off by two people from now on." - -Modeling a process dealing with that requirement can be achieved easily, but the better solution also depends on whether you prefer overall speed over total effort. - -All of the following modeling patterns assume that the two or more tasks needed to ultimately approve the loan must not be completed by one and the same person. When executing such patterns, you must enforce that with the workflow engine. - -### Option 1: Using separate tasks - -
    - -1 - -A first approver looks at the loan and decides whether they approve. If they decide not to approve, we are done, but if the loan is approved... - -2 - -...a second approver looks at the loan. If they also decide to approve, the loan is ultimately approved. - -**Evaluation:** - -- :thumbsup: This solution _explicitly_ shows how the two steps of this approval are performed. Tasks are modeled separately, followed by gateways visualizing the decision making process. - -- Note that the approvers work in a _strictly sequential_ mode, which might be exactly what we need in case we want _minimization of effort_ and, for example, display the reasonings of the first approver for the second one. However, we also might prefer _maximization of speed_. If this is the case, see solution [option 3 (multi-instance)](#option-3-using-a-multi-instance-task) further below. - -- :thumbsdown: The usage of separate tasks leads to _duplication_ and makes the model _larger_, even more so in case multiple steps of approvals need to be modeled. - -You might want to use that pattern when modeling the need for a _second set_ of eyes needed in _sequential_ order, therefore _minimizing effort_ needed by the participating approvers. - -While it is theoretically possible to model separate, explicit approval tasks in parallel, we do not recommend such patterns due to readability concerns. - -
    - -As a better alternative when looking for _maximization of speed_, see [option 3 (multi-instance)](#option-3-using-a-multi-instance-task) below. - -### Option 2: Using a loop - -
    - -1 - -A first approver looks at the loan and decides if they approve. If they decide not to approve, we are done, but... - -2 - -...if the loan is approved, we turn to a second approver to look at the loan. If they also decide to approve, the loan is ultimately approved. - -**Evaluation:** - -- :thumbsup: This model is a more _compact_ modeling solution to the situation. If it comes to multiple sets of eyes needed, you will probably prefer such an approach to avoid huge diagrams. - -- Note that the approvers work in a _strictly sequential_ mode, which might be exactly what we need if we want _minimization of effort_ and, for example, display the reasonings of the first approver for the second one. However, we also might prefer _maximization of speed_. If this is the case, see [option 3 (multi-instance)](#option-3-using-a-multi-instance-task) below. - -- :thumbsdown: The solution is _less explicit_. We could not choose to label the tasks with explicit references to a first and a second step of approval, as a single task is used for both approvals. The solution is _less readable_ for a less experienced reading public. For a fast understanding of the two steps needed for ultimate approval, this method of modeling is less suitable. - -You might want to use that pattern when modeling the need for _multiple sets_ of eyes needed in _sequential_ order, therefore _minimizing effort_ needed by the participating approvers. - -### Option 3: Using a multi-instance task - -
    - -1 - -All the necessary approvers are immediately asked to look at the loan and decide by means of a multi-instance task. The tasks are completed with a positive approval. Once all positive approvals for all necessary approvers are made, the loan is ultimately approved. - -2 - -If the loan is not approved by one of the approvers, a boundary message event is triggered, interrupting the multi-instance task and therefore removing all the tasks of all approvers who did not yet decide. The loan is then not approved. - -**Evaluation:** - -- :thumbsup: This model is a very _compact_ modeling solution to the situation. It can also easily deal with multiple sets of eyes needed. - -- Note that the approvers work in a _parallel_ mode, which might be exactly what we need in case we want _maximization of speed_ and want the approvers to do their work independent from each other and uninfluenced by each other. However, we also might prefer _minimization of effort_. If this is the case, see [option 1 (separate tasks)](#option-1-using-separate-tasks) or [option 2 (loop)](#option-2-using-a-loop) above. - -- :thumbsdown: The solution is much _less explicit_ and _less readable_ for a less experienced reading public, because the way the boundary event interacts with a multi-instance task requires a profound understanding of BPMN. For communication purposes, this method of modeling is therefore typically less suitable. - -You might want to use that pattern when modeling the need for _two_ or _multiple sets_ of eyes needed in _parallel_ order, therefore _maximising speed_ for the overall approval process. - -## Measuring key performance indicators (KPIs) - -You want to measure specific aspects of your process execution performance along some indicators. - -**Example:** A software developer involved in introducing Camunda gets curious about the business: "How many applications do we accept or decline per month, and how many do we need to review manually? How many are later accepted and declined? How much time do we spend for those manual work cases, and how long does the customer have to wait for an answer? I mean...do we focus on the meaningful cases...?" - -When modeling a process, we should actually always add some information about important key performance indicators (KPIs) implicitly. For example, specifically [naming start and end events](../naming-bpmn-elements/#naming-events) with the process state reached from a business perspective. Additionally, we might explicitly add additional business milestones or phases. - -While the following section concentrates on the aspects of modeling KPIs, you might want to learn more about using them for [reporting about processes](../../operations/reporting-about-processes/) from a more technical perspective. For example, when being faced with the task to actually retrieve and present Camunda's historical data collected on the way of execution. - -### Option 1: Showing milestones - -
    - -1 - -First, we assess the application risk based on a set of automatically evaluable rules. - -2 - -We can then determine whether the automated rules already came to a (positive or negative) conclusion or not. If the rules led to an unsure result, a human must assess the application risk. - -3 - -We use explicit intermediate events to make perfectly clear that we are interested in the applications which never see a human... - -4 - -...and be able to compare that to the applications which needed to be assessed manually, because the automatic assessment failed to determine a clear result. - -5 - -We also use end events, which are meaningful from a business perspective. We must know whether an application was either accepted... - -6 - -...or rejected. - -By means of that process model, we can now let Camunda count the applications which were accepted and declined. We know how many and which instances we needed to review manually, and can therefore also narrow down our _accpeted/declined statistics_ to those manual cases. - -Furthermore, we will be able to measure the _handling time_ needed for the user task; for example, by measuring the time needed from claiming the task to completing it. The customer will need to wait a _cycle time_ from start to end events, and these statistics, for example, could be limited to the manually assessed applications and will then also include any idle periods in the process. - -*By comparing the economic *value* of manually assessed insurance policies to the *effort\* (handling time) we invest into them, we will also be able to learn whether we focus our manual work on the meaningful cases and eventually improve upon the automatically evaluated assessment rules. - -### Option 2: Emphasizing process phases - -As an alternative or supplement to using events, you might also use subprocesses to emphasize certain phases in your process. - -
    - -1 - -By introducing a separate embedded subprocess, we emphasize the _phase_ of manual application assessment, which is the critical one from an economic perspective. - -Note that this makes even more sense if multiple tasks are contained within one phase. - -## Evaluating decisions in processes - -You need to come to a decision relevant for your next process steps. Your actual decision depends on a number of different factors and rules. - -We sometimes also call that pattern **business rules** in BPMN. - -**Example:** The freshly hired business analyst is always as busy as a bee: "Let's see... Category A customers always get their credit card applications approved, whereas Category D gets rejected by default. For B and C it's more complicated. Right, in between 2500 and 5000 Euros, we want a B customer, below 2500 a C customer is OK, too. Mmh. Should be no problem with a couple of gateways!" - -### Showing decision logic in the diagram? - -
    - -When modeling business processes, we focus on the flow of work and just use gateways to show that following tasks or results fundamentally differ from each other. However, in the example above, the business analyst used gateways to model the logic underlying a decision, which clearly is considered to be an anti-pattern! - -It does not make sense to model the rules determining a decision inside the BPMN model. The rules decision tree will grow exponentially for every additional criteria. Furthermore, we typically will want to change such rules much more often than the process (in the sense of tasks needed to be carried out). - -### Using a single task for a decision - -
    - -1 - -Instead of modeling the rules determining a decision inside the BPMN model, we just show a single task representing the decision. Of course, when preparing for executing such a model in Camunda, we can wire such a task with a DMN decision table or some other programmed piece of decision logic. - -2 - -While it would be possible to hide the evaluation of decision logic behind the exclusive gateway, we recommend always showing an explicit node with which the data is retrieved, which then might be used by subsequent data-based gateways. - -## Distinguishing undesired results from fatal problems - -You model a certain step in a process and wonder about undesired outcomes and other problems hindering you to achieve the result of the step. - -**Example:** What today is a problem for the business might become part of the happy path in a less successful future: "Before we can issue a credit card, we must ensure that a customer is credit-worthy. Unfortunately sometimes it might also turn out that we cannot even get any information about the customer. Then we typically also reject at the moment. Luckily, we do have enough business with safe customers anyway." - -### Option 1: Using gateways to check for undesired results - -
    - -1 - -Showing the check for the applicant's creditworthiness as a gateway also informs about the result of the preceding task: the applicant might be creditworthy - or not. Both outcomes are _valid results_ of the task, even though one of the outcomes here might be _undesired_ from a business perspective. - -### Option 2: Using boundary error events to check for fatal problems - -
    - -1 - -Not to know anything about the creditworthiness (because we cannot even retrieve information about the applicant) is not considered to be a valid result of the step, but a _fatal problem_ hindering us to achieve any valid result. We therefore model it as a boundary error event. - -The fact that both problems (an unknown applicant number or an applicant which turns out not to be credit-worthy) lead us at the moment to the same reaction in the process (we reject the credit card application) does not influence that we need to model it differently. The decision in favor of a gateway or an error boundary event solely depends on the exact definition of the result of a process step. See the next section. - -### Understanding the definition of the result - -What we want to consider to be a valid result for a process step depends on assumptions and definitions. We might have chosen to model the process above with slightly different execution semantics, while achieving the same business semantics: - -
    - -1 - -The only valid result for the step "Ensure credit-worthiness" is knowing that the customer is in fact credit-worthy. Therefore, any other condition must be modeled with an error boundary event. - -To advance clarity by means of process models, it is absolutely crucial for modelers to have a clear mental definition of the _result_ a specific step produces, and as a consequence, to be able to distinguish _undesired results_ from _fatal problems_ hindering us to achieve any result for the step. - -While there is not necessarily a right way to decide what to consider as a valid result for your step, the business reader will typically have a mental preference to see certain business issues, either more as undesired outcomes or more as fatal problems. However, for the executable pools, your discretion to decide about a step's result might also be limited when using, for example, service contracts which are already pre-defined. - -## Asking multiple recipients for a single reply - -You offer something to or request something from multiple communication partners, but you actually just need the first reply. - -We sometimes also call that pattern **first come, first serve**. - -**Example:** A well-known personal transportation startup works with a system of relatively independent drivers. "Of course, when the customer requests a tour, speed is everything. Therefore, we need to limit a tour to those of our drivers who are close by. Of course, there might be several drivers within a similar distance. We then just offer the tour to all of them!" - -### Using a multi-instance task - -
    - -1 - -After determining all drivers currently close enough to serve the customer, we push the information about the tour to all of those drivers. - -2 - -We then wait for the reply of a single driver. Once we have it, the process won't wait any longer, proceeds to the end event, and informs the customer about the approaching driver. - -According to the process model, it is possible that another driver accepts the tour as well. However, as the process in the tour offering system is not waiting for the message anymore, it will get lost. As our process proceeded to the end event after the first reply, all subsequent messages are intentionally ignored in this process design. - -## Processing a batch of objects - -You need to process many objects at once, which were already created before one by one, or which were updated one by one to reach a certain status. - -We sometimes also call that pattern simply the **1-to-n problem**. - -**Example:** A lawyer explains to a new client the way he intends to bill him: "Of course, if you need advice, you can call me whenever you want! We will agree about any work that needs to be done and my assistant will track those services which are subject to a charge. Once a month mostly you will receive a neatly-structured invoice providing you with all the details!" - -### Using data stores and multi instance activities - -
    - -1 - -The client asks for advice whenever they need it. Note that we create one process instance per request for advice. - -2 - -The lawyer makes sure to record the billable hours needed for the client. - -3 - -As he does not directly inform anybody by doing this, but rather collects data, we show this with a data store representing the time sheet and a data association pointing in its direction - representing the write operation. - -4 - -The assistant starts their invoicing process on a monthly basis. In other words, we create one process instance per monthly billing cycle. - -5 - -As a first step, the assistant determines all the billable clients. This are the clients for which time sheet entries exist in the respective month. Note that we have _many_ legal advice instances who have a relationship to _one_ billing instance and that the connection is implicitly shown by the read operation on the current status of data in the time sheet. - -6 - -Now that the assistant knows the billable clients, they can iterate through them and invoice all of them. We use a sequential multi-instance subprocess to illustrate that we need to do this for every billable client. - -7 - -On the way, the assistant is also in charge of checking and correcting time sheet entries, illustrated with a parallel multi-instance task. Note that these time sheet entries (and hence task instances) relate here 1:1 to the instances of the lawyer's "legal consulting" process. In real life, the lawyer might have created several time sheet entries per legal advice process, but this does not change the logic of the assistant's process. - -8 - -Once the client is invoiced, the assistant starts a "payment processing" instance per invoice, the details of which are not shown in this diagram. We can imagine that the assistant needs to be prepared to follow up with reminders until the client eventually pays the bill. - -## Concurring dependent instances - -You need to process a request, but need to make sure that you don't process several similar requests at the same time. - -**Example:** A bank worries about the increasing costs for creditworthiness background checks: "Such a request costs real money, and we often have packages of related business being processed at the same time. So we should at least make sure that if one credit check of a customer is already running, we do not want another credit check for the same customer to be performed at the same time." - -### Using message events - -
    - -1 - -Once an instance passes this event and moves on to the subsequent actual determination of the creditworthiness... - -2 - -...other instances will determine that there already exists an active instance and wait to be informed by this instance. - -3 - -When the active instance has determined the creditworthiness, it will move on to inform the waiting instances... - -4 - -...which will receive a message with a creditworthiness payload and be finished themselves with the needed information. - -The model explicitly shows separate steps (_determine_ and _inform_ waiting instances) which you might want to implement more efficiently within one single step doing both semantic steps at once by means of a small piece of programming code. - -### Using a timer event - -While using timer events can be a feasible approach in case you want to avoid communication between instances, we do not recommend it. For example, one downside is that such solutions cause delays and overhead due to the perdiodical queries and the loop. - -
    - -1 - -Once an instance passes this event and moves on to the subsequent actual determination of the creditworthiness... - -2 - -...all other instances will go into a wait state for some time, but check periodically, if the active instance is finished. - -3 - -When the active instance has determined the creditworthiness and finishes... - -4 - -...all other instances will also finish after some time. diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/naming-bpmn-elements.md b/versioned_docs/version-8.2/components/best-practices/modeling/naming-bpmn-elements.md deleted file mode 100644 index 4114b5b9142..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/naming-bpmn-elements.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: "Naming BPMN elements" -description: "Best Practices for naming BPMN elements" -tags: - - BPMN - - Naming Convention ---- - -Name all elements in your BPMN diagrams by focusing on the business perspective. For activities, use a verb to describe what to do. For events, describe in which (business) state the process or domain object is currently in. For (data-based) gateways, pose a question and describe the conditions under which the process moves on along the outgoing flows. - -## Essential practices - -### Naming activities - -Name a _task_ using an object and a verb in the infinitive. By doing this, you consistently describe _what you do with an object_. - -
    - -Name a _subprocess_ (or _call activity_) by using an object and a (by convention _nominalized_) verb. Similar to tasks, you should always describe _what you do with an object_. - -
    - -:::note -Avoid very broad and general verbs like "Handle invoice" or "Process order." Try to be more specific about what you do in your activity from a business perspective. -::: - -### Naming events - -Wherever possible, name an _event_ using an object and a verb reflecting a state. Always try to describe _which state an object is in_ when the process is about to leave the event. - -
    - -This naming approach does not always work perfectly. In those cases, precisely describe the business semantics when the process is about to leave the event. The following names are also valid: - -
    - -Be specific about the state you reached with your event from a business perspective. Often, you will reach "success" and "failure" like events from a business perspective: - -
    - -1 - -"Invoice paid" better qualifies the "successful" business state than "Invoice processed" would... - -2 - -...because in principle, you can call the failed state "Invoice processed", too, but the reader of the diagram is much better informed by calling it "Invoice rejected". - -:::note -Avoid very broad and general verbs like "Invoice processed" or "Order handled"! -::: - -### Naming gateways - -Label a data-based _exclusive gateway_ with a question. Label the outgoing sequence flows with the conditions they are executed under. Formulate the conditions as answers to the question posed at the gateway. - -
    - -This naming approach does not always work for _inclusive gateways_, because the outgoing flows' conditions can be completely independent from each other. Still, use a question whenever possible. - -
    - -If this is not possible, leave out the question completely but describe the conditions under which the outgoing paths are executed. - -
    - -_Avoid naming event-based gateways_, but ensure you name their subsequent events. Also, avoid naming _parallel gateways_ and all forms of _joining gateways_. You don't need to specify anything about those gateways, as the flow semantics are always the same. - -### Naming processes - -A _pool_ should be given the same name as the process the pool contains using an object and a nominalized verb. Optionally, add the organizational role responsible for the process shown in the pool as a whole. - -
    - -If you have more than one lane in a pool, name each _lane_ using the organizational role or technical system responsible for carrying out the activities shown in the lane. - -
    - -Name a _diagram_ (file) with same name as the process shown in the diagram. In case of a collaboration diagram, use a name reflecting the end-to-end perspective shown in that diagram. - -## Recommended practices - -### Using sentence case - -Use [sentence case](https://en.wiktionary.org/wiki/sentence_case) when naming BPMN symbols. This is standard capitalization of an English sentence, with the first letter uppercase and subsequent letters lowercase, with exceptions such as proper nouns or acronyms. - -
    - -### Avoiding technical terms - -Avoid using purely _technical terms_ when naming activities or other BPMN symbols, for example. These are not always clear to every reader. Completely avoid using names of coding artifacts like classes, methods, technical services, or purely technical systems. - -## Helpful practices - -### Avoiding abbreviations - -Avoid using _abbreviations_ as they are not always clear to every reader. This is especially true for abbreviations which are specific to companies or departments. Try to avoid them completely. - -If you want to use an abbreviation in your model (to save space or sometimes even to improve understandability) make sure you explain the abbreviation in the model in brackets, by text annotations, or use an accessible glossary. diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png b/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png deleted file mode 100644 index ee573aaec7e..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png b/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png deleted file mode 100644 index 67d65ae73e6..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids.md b/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids.md deleted file mode 100644 index 7e522e95642..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/modeling/naming-technically-relevant-ids.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Naming technically relevant IDs" -tags: - - BPMN - - Naming Convention ---- - -For executable flows, properly name all relevant technical element IDs in your BPMN diagrams. - -Focus on process, activity, message, and error IDs, but also consider events as well as gateways and their sequence flows that carry conditional expressions. Those elements can show up regularly (e.g. in your logs) and it makes things easier if you can interpret their meaning. - -## Using naming conventions for BPMN IDs - -Define developer-friendly and business-relevant IDs for the process itself, as well as all activities, messages, and errors. Also consider events, gateways, and the sequence flows that carry conditional expressions. Even though IDs are just identifiers, keep in mind that they will show up regularly on the technical level. Meaningful IDs will help a lot. - -Examine the IDs shown in the following example: - -
    - -The following table provides you with a guideline that we would use in a context where developers are comfortable with _Java_ and _PascalCase_ naming style. You may adapt these suggestions to typical naming conventions used in your programming context. - -| | | XML Attribute | Prefix or Suffix | Resulting ID | -| ----- | ----------------- | -------------------- | ---------------- | ----------------------------- | -| **1** | Tweet Approval | process/@id | Process | TweetApprovalProcess | -| **2** | New tweet written | startEvent/@id | StartEvent\_ | StartEvent_NewTweetWritten | -| | | message/@id | Message\_ | Message_NewTweetWritten | -| | | message/@name | Msg\_ | Msg_NewTweetWritten | -| **3** | Review tweet | userTask/@id | Task\_ | Task_ReviewTweet | -| **4** | Tweet approved? | exclusiveGateway/@id | Gateway\_ | Gateway_TweetApproved | -| **5** | No | sequenceFlow/@id | SequenceFlow\_ | SequenceFlow_TweetApprovedNo | -| **6** | Tweet duplicated | boundaryEvent/@id | BoundaryEvent\_ | BoundaryEvent_TweetDuplicated | -| | | error/@id | Error\_ | Error_TweetDuplicated | -| | | error/@errorCode | Err\_ | Err_TweetDuplicated | -| **7** | Tweet published | EndEvent\_/@id | EndEvent\_ | EndEvent_TweetPublished | - -### Editing IDs with Camunda Modeler - -We recommend using Camunda Modeler's properties panel to edit technical identifiers and change them according to your naming conventions, like it is shown here for the process id: - -![Properties Panel](naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png) - -We especially do not recommend editing identifiers in the XML directly, as it might accidently corrupt your BPMN file. You have to keep the identifiers in the section about the graphical layout (so called "DI" for diagram interchange) further down in sync with the execution semantics at the top of the XML. - -However, we include an XML example of all those identifiers mentioned for illustration: - -```xml - - - - - - - - - - - - - - - - - - -... - - - - - -``` - -8 - -Elements in the diagram interchange section (DI) reference identifiers from above; you have to adjust them accordingly! Camunda Modeler takes care of this automatically. - -Changing IDs can potentially break your tests or even process logic if done at a late stage of development. Therefore, consider using meaningful IDs right from the beginning and perform the renaming as part of the modeling. - -### Aligning the BPMN file name with the process id - -It is a good practice to _align_ the _file name_ of your BPMN models with the _process id_ of the executable process that is inside the file. - -![BPMN file name](naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png) - -## Generating id constants classes - -If you have lots of process, case, and decision definitions with lots of IDs, consider generating constant classes (e.g. via XSLT) directly from your BPMN or DMN XML files. For example, this can be used for testing. - -## Using a Camunda Modeler plugin to generate meaningful ids - -You can use [this modeler plugin community extension](https://github.com/camunda-community-hub/camunda-modeler-plugin-rename-technical-ids) to automatically convert your ids to comply with our best practices. Of course, you could also use this as a basis to create your own modeler plugin to generate ids that follow your custom naming conventions. Or, you could implement a similar plugin to implement checks if all relavant ids follow your naming conventions. diff --git a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/document-request-failed.png b/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/document-request-failed.png deleted file mode 100644 index d966c69132d..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/document-request-failed.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-detail.png b/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-detail.png deleted file mode 100644 index e2019f6ed79..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-detail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed-with-detail.png b/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed-with-detail.png deleted file mode 100644 index 1b4074a8d4b..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed-with-detail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed.png b/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed.png deleted file mode 100644 index 91358014a03..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7-assets/insurance-application-failed.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7.md b/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7.md deleted file mode 100644 index 0c72e1c8b8a..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/operations/operating-camunda-c7.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: "Operating Camunda 7" -tags: - - Save Point - - Retry - - Incident - - Monitoring - - Alarming - - Backup -description: "To successfully operate Camunda 7.x, you need to take into account operation requirements when modeling business processes." ---- - -To successfully operate Camunda 7.x, you need to take into account operation requirements when modeling business processes. Use your existing tools and infrastructure for technical monitoring and alarming. When appropriate, use Camunda Cockpit and consider extending it with plugins instead of writing your own tooling. - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! The Camunda 8 stacks differs and operating it is discussed in [Camunda 8 Self-Managed](/docs/self-managed/about-self-managed/). -::: - -## Installing Camunda 7.x - -For a quick start, especially during development, follow [our greenfield recommendation for Camunda 7](../../architecture/deciding-about-your-stack-c7). - -For _production_ usage we recommend setting up the container of your choice yourself, as we do not make sure we always ship the latest stable patched container version in our distribution. Additionally, we cannot ship some containers for licensing reasons. Install Camunda into this container following the [installation guide](https://docs.camunda.org/manual/latest/installation/). Add required JDBC drivers for the database of your choice and configure data sources accordingly. Make sure to [secure Camunda](../securing-camunda-c7/) if required. - -We recommend to _script_ the installation process, to allow for an _automated installation_. Typical steps include: - -1. Set up (or extract) the container and install Camunda into it. As an alternative, you might use the Camunda distribution and remove the example application. -2. Add JDBC drivers and configure the data source for Camunda. -3. Configure identity management (e.g. to use LDAP) or add required users and groups to the database-based identity management. -4. Set up Maven build for Camunda webapp in case you want to add your own plugins or customizations. -5. Install the Camunda license. - -To script the installation, you can retrieve all required artifacts also from our Maven repositories. This way, it is easy to switch to new Camunda versions. Integrate all pieces by leveraging a scripted configuration management and server automation tool such as [Docker](http://www.docker.com/), [Puppet](https://puppet.com/), [Chef](https://www.chef.io/), or [Ansible](http://www.ansible.com/). - -## Setting up monitoring and alarming - -Certain situations have to be recognized quickly in order to take appropriate action during the runtime of the system. Therefore, consider monitoring and alarming up front when planning for production operations. - -Distinguish between process execution-related monitoring and basic systems monitoring. Do systems monitoring via normal Java or Container Tools - nothing Camunda specific is needed in that area. - -### Recognizing and managing incidents - -In case a service call initiated by Camunda fails, a _retry_ strategy will be used. By default, a service task is retried three times. Learn more about [retrying failed transactions](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#failed-jobs) with your custom retry strategy. - -In case the problem persists after those retries, an _incident_ is created and Camunda will not recover without intervention from a human operator. Therefore, make sure somebody is notified whenever there are any (new) incidents. - -**You can build an _active_ solution**, where Camunda actively notifies somebody when there is a new incident. For example, you could send an email or create a user task in Camunda. To achieve this, you can hook in your own [incident handler](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/impl/incident/IncidentHandler.html) as shown in [this example](https://github.com/camunda/camunda-consulting/tree/master/snippets/incidents-as-tasks). The upside is that sending emails like this is very easy, the downside is that you have to implement Camunda specific classes. - -However, if a crucial system goes down you might end up spamming people with thousands of process instances running into the same incident. - -This is why typically **a passive solution is preferred**, which queries for (new) incidents from the outside, leveraging the Camunda (Java or REST) API and taking the desired action. The most common way is to query the number of incidents by the tool of your choice using the REST API: `GET incident/count`. More information can be found in the [REST API](https://docs.camunda.org/manual/latest/reference/rest/incident/get-query-count/). We prefer the REST API over more low level technologies (like JMX or PMI), as this typically works best in any environment. - -Now you can easily batch multiple incidents into one email or delegate alarming to existing tools like Nagios or Icinga. An additional advantage is that you eventually already have proper alarming groups defined in such a tool. - -### Monitoring performance indicators - -Monitor the following typical performance indicators _over all process definitions_ at once: - -- Number of _open executable jobs_: `GET /job/count?executable=true` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/job/get-query-count/)), as these are jobs that should be executed, but are not yet. -- Number of _open incidents_: `GET /incident/count` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/incident/get-query-count)), as somebody has to manually clear incidents and increasing numbers point to problems. -- Number of _running process instances_: `GET /process-instance/count` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/process-instance/get-query-count/)). Increasing numbers might be a trigger to check the reasons, even if it can be perfectly fine (e.g. increased business). - -:::note -If you want to monitor _process definition-specific_ performance indicators, you can either iterate over the process definitions - e.g. by using `GET /process-definition/{id}/statistics` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/process-definition/get-activity-statistics/)), or leverage `GET /process-definition/statistics` ([REST API](https://docs.camunda.org/manual/latest/reference/rest/process-definition/get-statistics/)), which groups overall performance indicators by process definitions. Beware that you eventually need to take into account older versions of process definitions, too. -::: - -### Organizing dedicated teams for monitoring - -In general, the performance indicators mentioned above can and should be _monitored generically_ and independent of specific process applications. However, you may want to set up _dedicated alarming_ for different operating teams with more knowledge about specific process application characteristics. For example, one of those teams might already know what the typical number of open user tasks for a certain process definition is during normal runtime. There are two approaches to achieve this: - -**The recommended approach is to configure dedicated alarming directly in your monitoring tool** by creating separate monitoring jobs querying the performance indicators for specific process definitions. This approach does not need any operation centric adjustments in Camunda and is easy to set up and handle. - -An alternative approach is to define team-specific bundles of process definitions in Camunda by leveraging the process definition "category" or even your own BPMN extension elements. However, this information cannot be directly used in the queries mentioned above. Hence, you have to implement additional logic to do so. We typically advise that you do not do so unless you have very good reasons to invest the effort. - -### Creating your own alarming mechanism - -In case you do not have a monitoring and alarming tool or cannot create new jobs there, build an easy alarming scheduler yourself. This could be a Java component called every couple of minutes to query the current performance indicators by Java API generating custom emails afterwards. - -```java -public void scheduledCheck() { - // Query for incidents - List incidents = processEngine.getRuntimeService() - .createIncidentQuery().list(); - // Prepare mailing text - String emailContent = "There are " + incidents.size() + " incidents:
    "; - for (Incident incident : incidents) { - emailContent += "" - + incident.getIncidentMessage() + "
    "; - } - emailContent += "Please have a look into Camunda Cockpit for details."; - // Send mailing, e.g. via SimpleMail - sendEmail(emailContent); -} -``` - -### Defining custom service level agreements - -Apart from generic monitoring, you might want to define _business oriented service level agreements (SLAs)_ for very specific aspects of your processes, like for instance, overdue tasks, missed deadlines or similar. You can achieve that by: - -1. Adding custom extension attributes in your BPMN process definition, e.g. for specific tasks, message events, etc., which serve to define your specific business performance indicators. -2. Reading deployed process definitions and their _custom extension attributes_, e.g. by means of Camunda's [BPMN Model API](https://docs.camunda.org/manual/latest/user-guide/model-api/bpmn-model-api/) and _interpreting_ their meaning for your _business performance indicators_, e.g. by calculating deadlines for tasks. -3. _Querying_ for (e.g. task or other) instances within/without the borders of your service level agreement. - -This is normally implemented similar to the Java Scheduler we described above. - -## Intervening with human operator actions - -### Handling incidents - -Incidents are ultimately [failed jobs](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/failed-jobs/), for which no automatic recovery can take place anymore. Hence, a human operator has to deal with incidents. Check for incidents within Camunda Cockpit and take action there. You might, for example, want to: - -- [Edit process variables](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/process-instance-view/#edit-variables). -- [Modify the process instance ("move" the tokens)](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/process-instance-modification/). -- [Trigger additional retries](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/failed-jobs/#retry-a-failed-job). - -Camunda Enterprise Edition offers a [bulk retry](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/failed-jobs/#bulk-retry) feature allowing you to retry jobs which failed for a common reason (e.g. a remote system being down for a longer time) with a single human operator action. - -:::note -If you have a failing _call activity_ in your process, you _retry "bottom-up"_ (in the failing subprocess instance), but you _cancel "top-down"_ (the parent process instance to be canceled). Consider the following example incident visualized in Camunda Cockpit. -::: - -![Cockpit call activity](operating-camunda-c7-assets/insurance-application-failed-with-detail.png) - -You eventually see the incident first on the parent process call activity **Request documents**, but it is actually caused by the failing activity **Request documents** in the subprocess. For better comprehensibility, this is directly visualized in the picture above. In Cockpit, you can navigate to the call activity in the **called process instance** pane to the bottom of the screen. There you could now _retry_ the failing step of the _subprocess_ instance: - -![Cockpit failed task](operating-camunda-c7-assets/document-request-failed.png) - -1 - -By clicking on this button, you can _retry_ the failing step of the _subprocess_ instance. Note that a successful retry will also resolve the incident you see on the parent process instance. - -On the other hand, you might also want to _cancel_ the failing _parent process_ instance: - -![Cockpit cancel](operating-camunda-c7-assets/insurance-application-failed.png) - -1 - -By clicking on this button, you can _cancel_ the failing _parent process_ instance. The cancellation will also cancel the subprocess instances running in the scope of the parent process instance. - -### Turning on/off all job execution - -Sometimes you might want to _prevent jobs being executed at all_. When starting up a cluster, for example, you might want to turn off the job executor and start it up later manually when everything is up and running. - -1. Configure the [jobExecutorActivate](https://docs.camunda.org/manual/latest/reference/deployment-descriptors/tags/process-engine/#configuration-properties) property to `false`. -2. Start the job executor manually by writing a piece of Java code and making it accessible, e.g. via a REST API: - -```java -@POST -public void startJobExecutor() { - ((ProcessEngineConfigurationImpl) processEngine - .getProcessEngineConfiguration()) - .getJobExecutor() - .start(); -} -``` - -A similar piece of code can be implemented to allow to stop the job executor. - -### Suspending specific service calls - -When you want to _avoid certain services to be called_ because they are down or faulty, you can suspend the corresponding job definitions, either using [Cockpit](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/suspension/#job-definition-suspension) or using an API ([Java](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/ManagementService.html) or [REST](https://docs.camunda.org/manual/latest/reference/rest/job/put-activate-suspend-by-job-def-id/)). - -By using the API, you can even _automate suspension_, e.g. by monitoring and recognizing when a target system goes down. By using naming conventions and accordingly customized job definition queries, you can then find all job definitions for that target system (e.g. "SAP") and suspend them until the target system goes up again. - -### Suspending whole processes - -Sometimes, you may want an _emergency stop_ for a specific process instance or all process instances of a specific process definition, because something behaves strange. Suspend it using [Cockpit](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/suspension/#process-definition-suspension) or using an API ([Java](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/engine/RuntimeService.html) or [REST](https://docs.camunda.org/manual/latest/reference/rest/process-definition/put-activate-suspend-by-id/)) until you have clarified what's going on. - -## Create backups - -1. Camunda stores all state information in its _database_. Therefore, backup your database by means of your database vendors tools or your favorite tools. -2. The Camunda _container installation_, as well as the _process application deployments_, are fully static from the point of view of Camunda. Instead of backing up this data, we recommend doing a script-based, automated installation of containers, as well as process applications in order to recover easily in case anything goes wrong. - -## Updating Camunda - -For updating Camunda to a new version, follow the guide for [patch level updates](https://docs.camunda.org/manual/latest/update/patch-level/) or one of the dedicated [minor version update guides](https://docs.camunda.org/manual/latest/update/minor/) provided for each minor version release. - -A [rolling update](https://docs.camunda.org/manual/latest/update/rolling-update) feature has been introduced in version 7.6. This allows users to update Camunda _without having to stop the system_. Outdated engine versions are able to continue to access an already updated database, allowing updates to clustered application servers one by one, without any downtime. - -### Preparation - -1. Before touching the servers, all unit tests should be executed with the desired Camunda version. -2. Check running processes in Cockpit - -- Handle open incidents -- Cancel undesired process instances if any - -3. Make a backup (see above) - -### Rollout - -- Shut down all application server(s) (unless performing a rolling update in which only one cluster node is taken down at a time after the database has been updated). - -- Update database using SQL scripts provided in the distro (all distros contain the same scripts) - - Ensure you also execute all patch level scripts - - Run all update scripts - - To check which version is in the database, check for missing tables, indexes, or columns from the update scripts - -```SQL -SELECT TABLE_NAME, INDEX_NAME FROM SYS.USER_INDEXES WHERE INDEX_NAME like 'ACT_IDX_%' ORDER BY TABLE_NAME, INDEX_NAME; -SELECT TABLE_NAME FROM SYS.USER_TABLES WHERE TABLE_NAME LIKE 'ACT_%' ORDER BY TABLE_NAME; -``` - -- Update applications and application server(s) or container(s) -- Start application server(s) or container(s) -- Check logfile for exceptions -- Check Cockpit for incidents -- Test application using UI or API -- Repeat in all stages diff --git a/versioned_docs/version-8.2/components/best-practices/operations/performance-tuning-camunda-c7.md b/versioned_docs/version-8.2/components/best-practices/operations/performance-tuning-camunda-c7.md deleted file mode 100644 index 4a97ade0588..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/operations/performance-tuning-camunda-c7.md +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: "Performance tuning Camunda 7" -tags: - - Performance -description: "Understand influencing aspects on performance and apply tuning strategies. For example, by configuring the job executor or applying external tasks." ---- - -Understand influencing aspects on performance and apply tuning strategies appropriately, for example, by configuring the job executor or applying external tasks. When facing concrete challenges, look at scenarios like the proper handling of huge batches. - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! The Camunda 8 stacks differ in regards to performance and scalabilities and requires different strategies we currently work on providing as best practice. -::: - -## Performance basics - -Note that this document assumes some understanding of fundamentals of underlying technologies such as the following: - -- Database fundamentals -- Monitoring, observability, and benchmark tools -- JVM fundamentals - -### Setting up monitoring - -It's important to **set up proper monitoring** as described in our [Monitoring Best Practice](../operating-camunda-c7/). Writing the value of certain performance indicators over time can help to judge the urgency of certain bottlenecks or to warn you before an overload will happen. - -### Runtime database - -The database i/o for **writing** state changes of process instances to your **runtime tables** depend on your use case. The following are the fundamental factors: - -- The complexity of process models - measured by the **number of save points**. -- The **number of started process instances** - measured per time unit. -- The **data attached** to process instances (aka process variables) - measured in bytes. -- The average **duration** of process instances, as the longer they need to complete (and hence wait in a persistent state) the less database traffic their total number of save points cause per time unit, but the more data you have stored in the runtime database. - -The performance for **querying and reading** from the runtime tables is most influenced by the process variables/business data you use. For every process variable used in a query, a join is needed on SQL level, which influences performance. This can hit you, especially when doing message correlation or tasklist queries. You can tune performance **by using indices** as described below. - -Further database tuning may be required depending on the specific use case and performance requirements. In combination with other configurations mentioned above, specific performance goals can be reached for querying and reading from runtime by using **indices**. - -### History database - -Camunda uses a **relational database as history backend** by default. The i/o for **writing** process instance information to the **history** primarily depends on the [History Level](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#choose-a-history-level) chosen. This is one of the biggest tuning opportunities when it comes to database i/o and the simplest method to reduce load on the database is to reduce the history level. - -It is possible to hook-in a [Custom History Backend](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#provide-a-custom-history-backend), e.g. to leverage alternative data stores (like NoSQL, for example using the Elastic stack). A [complete example](https://github.com/camunda/camunda-bpm-examples/tree/master/process-engine-plugin/custom-history-level) is available. - -By default, the history database tables **(denoted by HI)** and the runtime database tables **(denoted by RU)** share the same schema. - -Separating your runtime database from the historical database is theoretically possible by implementing a custom history backend. This custom backend could then store the data in a different database instance. But note that many of Camunda Cockpit's capabilities depend on both data sets. Writing history to another database instance would cause Cockpit to function incorrectly. - -A valid strategy is to write the data to a custom backend (like NoSQL) for long time retrieval, but also to the normal Camunda tables for operations. Then, you can delete the history from the Camunda database after short intervals using [history cleanup](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/#history-cleanup). - -A simpler and easier to manage strategy is to filter data using a Camunda custom history mechanism. For this, use history level **CUSTOM** and filter the data yourself. Just extend the built-in **HistoryEventHandler** and hook that into your process engine configuration: - -```Java -public class CamundaFilterHistoryEventHandler extends DbHistoryEventHandler { - - @Override - public void handleEvent(HistoryEvent historyEvent) { - if (historyEvent instanceof HistoricVariableUpdateEventEntity) { - if (...) { - // ignore some variable update events - log.info("Ignore event for variable " + variableUpdateEvent.getVariableName() + "."); - return; - } - } - // handle all other events - super.handleEvent(historyEvent); - } -} -``` - -Typical use cases are: - -- Filtering high-volume but unnecessary events from the history in order to improve performance -- Filtering sensible data which should not be written to history (e.g. individual-related data) - -### Thread handling and the job executor - -Make sure you understand [save points and threading behavior](../../development/understanding-transaction-handling-c7/). - -Save points are the tool to change threading and scaling behavior of a process instance. The more you use it, the more work will be done by the [job executor](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/), which is the key component to look at when you want to improve your system's performance. - -The **default configuration** of the job executor is typically **not good** and **must be tuned**, there exist no general sensible defaults. Strategies are described below. - -[Job prioritization](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#job-prioritization) and the configured [retry strategy](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#failed-jobs) can **influence the execution order of jobs**. Which is also useful in case you hit exceptions (e.g. a network connection is down). The default strategy retries three times without a delay, which normally should be changed to something more meaningful. - -You have to set the retry strategy for every save point. Be aware that retries increase the load of the system because you're creating a new transaction, database connection, thread when a job is re-executed, and any additional processing required by your business logic. - -[Exclusive Jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs) are the default in Camunda, which means that _for one process instance_ there is always **only one job executed** in parallel. This is a safety net to avoid optimistic lock exceptions, as multiple parallel paths might conflict by writing to the same database row. - -You can **change this configuration** to run jobs of one process instance in parallel if you make sure not to create optimistic lock exceptions by a **fitting process design**. Additionally, handle optimistic lock exceptions properly by doing **retries**. - -Keep in mind parallel processing of jobs, and having loads of optimistic lock exceptions causes overhead and might slow down your system. Using parallel processing features is not recommended for most use-cases as it adds complexity and should be carefully tested in cases where it is attempted. - -### Considering external tasks - -An important **alternative to job handling by the job executor** where Camunda does the thread handling, is [external tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) where an external worker or application does the work. - -This makes it easy to throttle execution using a thread pool or a cluster of apps. Potentially, only one process or thread is allowed in parallel in other cases it may be required to scale up workers or threads. - -Using external tasks allows for complex logic or expensive network calls to be executed with external systems are blocking within Camunda, threads aren't typically a problem anymore. - -When using [external tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/), Camunda does not actively call your business logic, but stops in a wait state and persists the state in the database. You are responsible for querying or polling these tasks using the Camunda API and executing your logic. **You fully control threading behavior** yourself and can influence scaling. - -It's important to understand the external task API and behavior as the cost of using external tasks is not free from Camunda transaction overhead as well as the overhead management of external workers. Understanding the life cycle of the external task is crucial to designing and managing external workers. - -### Void JVM tuning - -It is normally **not required** to tune the Java Virtual Machine (JVM). It's better to concentrate on the strategies described in this article. - -If you have hints that you have memory issues, GC problems, or thread locking, you should employ a JVM profiling tool as suggested in the tools section. - -### Common pitfalls with process variables - -Below are some common scenarios that could potentially cause issues. Be sure to read the section on [handling data in processes](../../development/handling-data-in-processes/) to understand the best options for dealing with potential data-flow and business data in Camunda. - -#### Performance degrades after storing large files as variables - -Problem: - -- BLOB selection leads to huge RAM allocation -- Operations become more costly (e.g VACUUM) -- Replication becomes slower - -Solution: - -- Store large files in a dedicated third-party CMS -- Only store file reference as variable in Camunda - -#### In production variables report to long - -Problem: - -- When storing variable values of type _string_ the char limit is 2000 for Oracle. - -Solution: - -- Reduce the length of the value. -- Store String as Object in Oracle. - -#### Optimistic locking exceptions occur when updating variables using external tasks API - -Problem: - -- Same variables are updated by multiple workers consequently the same row in the DB is updated. - -Solution: - -- Use the local API when updating variables. You must combine this with input/output mappings to have access to variables in subsequent activities. - -#### Use Camunda as a source of truth for tasks - -Problem: - -- Storing a large number of variables leads to very large ACT_RU_VARINST table and slow queries on several API's. - -Solution: - -- Store variables in external data-store or in separate tables in the Camunda schema. Learn more about [handling data in processes](../../development/handling-data-in-processes/). - -## Scaling basics - -In general, process engine performance is highly dependent on your usage scenarios. There isn't a one-size-fits-all answer, but as our most senior consultant keeps saying, "So far we found a solution for every high-performance scenario we encountered with customers." - -### Basic scaling and failover - -Basic scaling of Camunda is very simple: connect multliple engines to the same database to form a [cluster](https://docs.camunda.org/manual/latest/introduction/architecture/#clustering-model). All data is synchronized through the database, so clustering requires no special configuration for Camunda. You can implement auto-scaling with container orchestration systems like Kubernetes or OpenShift. - -Camunda requires [READ COMMITTED transaction isolation](https://docs.camunda.org/manual/latest/user-guide/process-engine/database/#isolation-level-configuration) and [synchronous replication](https://docs.camunda.org/manual/latest/introduction/supported-environments/#database-clustering-replication) to all active cluster nodes. - -### Understanding cluster and load balancing options - -Load balancing has two layers: - -- Load balancing on the inbound channel is out-of-scope for Camunda, instead use standard third-party software like an HTTP load balancer or messaging. - -- Job execution (also known as asynchronous processing or `_jobs_`) in Camunda can be used to do load balancing, using multiple threads and multiple cluster nodes. This is described in more detail in the following sections. - -## Running load tests - -When you are in doubt if a certain load requirement can be tackled by Camunda, you should run a load test. This normally involves the following phases: - -- Prepare an _environment_ which is as close to production as possible, otherwise results might be biased. -- Prepare concrete _scenarios_ you want to run, which includes e.g. BPMN workflows that are realistic for you. If you typically run synchronous service tasks do so in the scenarios. If you have big payloads use them. If you leverage multiple instance tasks make sure your scenario also contains them. -- Define _clear goals_ for the load tests, e.g. you might need to run at least **1000 workflow instances/second**, or you might need to keep **latency below 50 ms for the 95th percentile**. -- Prepare _load generation_, which is not always easy as you have to stress your system in a way, that you cannot do by one simple client. -- Prepare _monitoring_ to analyze the situation if you run into problems. Typical measures are (see below for a more complete list): - -Java memory consumption, especially garbage collection and potential memory leaks, often occur due to issues in surrounding components. - -These problems can be spotted by checking which objects occupy a lot of memory using a JVM observability tool. - -Monitor load on the database to avoid overloading the database. It's sometimes better to reduce the number of connections in your connection pool. - -Typical monitoring and profiling tools our customer use: - -- Basic tools available with the Java installation - - [VisualVM](https://docs.oracle.com/javase/8/docs/technotes/guides/visualvm/profiler.html) - - JConsole - - JVM Thread Dumps -- Commercial offerings - - App Dynamics - - Dynatrace - - YourKit - -Typical load generation tools our customer use: - -- JMeter -- Postman -- SOAP-UI - -## Resolving overload - -This section applies if the system is experiencing acute problems due to load or poor configuration. - -:::caution Camunda 8 is built with scalability top of mind -Note that Camunda 8 and its workflow engine Zeebe were engineered for performance and scalability. If you hit problems you cannot easily resolve with Camunda 7.x, it might be worth having a look at Camunda 8 instead. -::: - -### Collecting information for root causing - -Initially, we need to have a strategy to deal with problems. Take a minute to think about what principles you will apply to solve acute and generic performance problems. Below are some questions to ask to analyze the root cause: - -- What makes you think there is a performance problem? -- Has this system ever performed well? -- What has changed recently? (Software? Hardware? Load?) -- Can the performance degradation be expressed in terms of latency or run time? -- Does the problem affect other people or applications (or is it just you)? -- What is the environment? - - What software and hardware is used? - - Versions? - - Configuration? - -When we suspect (or experience) problems, we typically have a deeper look at: - -- Detailed information about **jobs**, typically retrieved from the database via **SQL queries** (see also [unsupported sample queries](https://github.com/camunda-consulting/code/tree/master/snippets/db-queries-for-monitoring)): - - **# of executed jobs**: How many jobs are currently acquired/locked, which means they are executed at the moment? - - **Cluster distribution**: How are the executed jobs distributed over the cluster? Therefore, look at the lock owner, which is written to the database. - - **# of not yet executed jobs**: How many jobs are currently due, which means the due date is reached or no due date was set, but are not acquired? These are the jobs that should be executed but are not yet. This number should be normally close to zero. Capture the number over time, if it stays above a certain threshold, you have a bottleneck. In this situation, you might even suffer from job starvation, as Camunda does not enforce a FIFO principle for job execution. This situation needs to be resolved. A typical pattern is to experience this overload only on peak times of the day and resolve in quiet times. - -So far, we've never experienced running out of CPU capacity. If that happens, clustering is a very natural choice to solve the problem. But in most cases, applications built on Camunda will more often than not be waiting for i/o (database, remote service calls, etc.) To solve overload problems correctly, you have to analyze the root cause: - -- Basic system metrics for your Camunda application (container, application server or Java process) and database. Plot them over time! -- CPU utilization -- Memory utilization -- I/O -- Response times - -Often, we cannot get metrics from the database due to security restrictions. In this case, we try to measure response times from the database as an indicator of its health. This works very well with dedicated frameworks like App Dynamics. - -- Database information -- Slow query log -- Other utilization information, depending on the concrete database product. Best approach your DBA. - -Collecting this information normally gives a good indication which component is really busy and causes the bottleneck. - -### Using benchmarks and a systematic approach for tuning - -Having an idea about the bottleneck leads you to the proper tuning strategy. However, system behaviors are very complex and experience shows that you need multiple tries to improve the situation. This is typical and not a problem, but makes it important to follow a systematic approach to be able to resolve overload problems. A good background read is [this blog post on scaling Camunda in a cluster](https://blog.camunda.org/post/2015/09/scaling-camunda-bpm-in-cluster-job/). - -The basic strategy is simple: - -- Set up tests and conduct measurements, which give you a **baseline** you can compare against. -- **Change** something, but best only **one thing at a time**. -- Measure again and **compare against your benchmark** so you get an idea how much the change improved the situation. - -For resources like the job executor thread pool, start with small numbers and increase them. If you start too big, you always have to check in two dimensions: increasing and decreasing. - -:::note -**Guessing can lead to wrong conclusions**. Hence, we recommend setting up a load testing environment and generating load to get all resources busy. This allows optimizing your system corresponding to your specific load scenario. But we also know that this is hard, especially because you normally have to mock service calls but simulate realistic response times. -::: - -A good compromise often is: - -- Monitor the load on your production systems (as indicated above, e.g. using database queries). -- Change settings and inspect the impact over time. - -:::note -This is not a scientific but rather hands-on approach. Production load might vary very much, so plan enough time to allow regression towards the mean and keep an eye on other performance indicators like process instances started to judge the results realistically. -::: - -### Tuning the job executor - -There is no configuration of the job executor which is generally sensible. The configuration options and defaults are: - -```xml - - 3 <1> - 5 - - 10 <2> - - - 3 <3> - 300000 <4> - 5000 <5> - - -``` - -1 - -Number of threads that execute jobs. - -2 - -Number of jobs that can be queued in an in-memory queue, waiting for an execution thread to become available. - -3 - -Number of jobs acquired at once (in the database). - -4 - -Time the job will be locked for a specific job executor. - -5 - -Idle time for acquisition if no executable job was found. - -A meaningful configuration has to balance these values according to the given situation. In order to give hints, you need to understand some basics: - -- It does not make sense to have more **active threads** than the CPU cores can directly handle. Otherwise, you will just swap in and out threads and hinder efficient computation. -- Whenever a **thread blocks because of i/o**, e.g. the user waits for some database operation to finish, it is not active and the CPU will not be bothered with it. - -When you want to figure out **how many threads you can assign to the job executor** thread pool **(1)** you need to know how much threads are available in total and **how much threads are already in use** by other thread pools (web server and servlets, scheduling frameworks, EJB, JMS, etc.) The more components you run on your machine, the harder it gets to predict the free CPU capacity. This is also true for virtualized environments where resources are shared. - -You also have to think about the **nature of your processes**: Do you run **CPU intensive computations** by Camunda job executor threads, or do you **wait most of the time** for remote service calls? Typical processes spend their time waiting for i/o. In this case, you can safely increase the number of threads. Keep in mind that scaling up Camunda puts more load on downstream services and systems, so you might need to throttle it to avoid "denial of service attacks". - -When increasing the number of threads, make sure that you also **increase the internal queue size** **(2)**, otherwise it might run empty, and your threads don't get new jobs to execute. On the other hand, the queue should not be made too big. In a cluster **too big queue sizes** can lead to one node taking all jobs into his queue **leaving other cluster node idle**. If you queue up **more jobs than you can finish within the lock timeout** **(4)**, jobs are timed out and will be executed twice (with one running into an optimistic lock exception). - -A typical approach to tune performance is: - -- Start with the number of threads = CPU cores \* 1.5 -- Increase queue size stepwise until there is no gain in throughput anymore because all threads are "busy" waiting for i/o. -- Now increase worker threads and afterward queue size and always check that this improves throughput. -- Whenever you reach a limit, you found your upper configuration limit, which is typically optimal for production. - -As already indicated, when you dive deep into job executor tuning because of high volume operations, it might be worth to take one step back and think about using [external tasks](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) as an alternative. This often scales better, as a worker can, for example, collect a huge amount of tasks and just report completion back, how this is executed and scaled can be completely decided by you. - -### Tuning the database connection pool - -A resource that the process engine and the job executor heavily depend on are database connections. They are provided by a JDBC data source which has a pool of connections. - -First, you should find out which connection pool implementation is used based on your project's dependencies: - -- For Spring Boot 2.x, [HikariCP](https://github.com/brettwooldridge/HikariCP) is the default connection pool. Camunda Run also uses this. -- If you are not sure, here take a look at [this code example to detect data source implementation](https://www.mkyong.com/spring-boot/spring-boot-how-to-know-which-connection-pool-is-used/). - -Preferably, use [HikariCP](https://github.com/brettwooldridge/HikariCP) and configure its [settings](https://github.com/brettwooldridge/HikariCP#gear-configuration-knobs-baby) using `spring.datasource.hikari.*` properties. HikariCP's default pool size is 10. Their website provides an [article about connection pool sizing](https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing). - -### Resolving database overload - -Having tuned the job execution the database might become a bottleneck when handling high-load scenarios. A very simple approach is then to **tune the database or assign more resources to it**. It is also possible to **tune some database queries** as described below. - -If both are not possible or sufficient, check if the database load can be reduced by **changes in your application**. Therefore, you need to analyze the root cause of the load. It is a good idea to partition your database in a way that you see load data for runtime, history, and specifically the table containing byte arrays. Two typical findings are: - -- A lot of data is written into **history**, for example, because you run through a lot of tasks and update a lot of variables. In this case, a good strategy is to reconfigure history to reduce the amount of data or use a custom history backend, as already described. - -- Big chunks of data are written to the byte array table, mostly because you save **too much data as process variable** like big XML or JSON structures. Camunda always needs to update one process variable as a whole, even if you only change some attributes or add lines to a list being part of the data structure. Additionally, the whole chunk is also written to history to keep a history of variable values. In this scenario, it is much more efficient to store the business data as a separate structured entity or into a better fitting storage (like a document database). Then Camunda only stores a reference and is freed of a lot of load towards the database. - -Camunda batches SQL statements of the current call and runs them at once at the end of the transaction. Depending on the nature of the process model and the work done in this transaction, this batch might become big. - -### Tuning database queries - -Use cases of Camunda customers differ very much, so we cannot fine-tune our database schema for all use cases out-of-the-box. We strive for an optimal balance between too less and too many indices. As you know your use case in detail you can **improve database performance by adjusting indices** of Camunda tables. Typically, additional indices are added that lead to reduced runtimes and less database load for certain queries. However, this typically affects write performance and has to be balanced depending on the concrete situation at hand. - -In order to find candidates for optimization, **check the slow query log** of your database or discuss with your DBA. - -Examples: - -- Creating an index on process instance end time (`create index PROC_DEF_ID_END_TIME ON ACT_HI_PROCINST (PROC_DEF_ID_,END_TIME_`) in case you query for that very often. -- [Job acquisition](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#the-job-order-of-job-acquisition) contains hints on indices depending on the job executor configuration. - -### Applying sharding - -If none of the above strategies are sufficient, you need to reduce the load put on the Camunda engine as a whole. This can be done by a mechanism called **[sharding]()**. - -Therefore, you distribute the overall load to multiple logical engines (called shards), which itself can be a cluster on its own. Every shard runs its own database. A sharding algorithm and distribution must be implemented. One example was described [by Zalando in this blog post](https://blog.camunda.org/post/2015/03/camunda-meets-cassandra-zalando/). - -The Camunda platform supports multiple engine configurations pointing to different databases on a single application server. When you run Camunda in [container-managed aka infrastructure mode](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-bootstrapping/#shared-container-managed-process-engine), multiple engines work out-of-the-box with no additional code in Camunda's [configuration](https://docs.camunda.org/manual/latest/user-guide/process-engine/multi-tenancy/#one-process-engine-per-tenant) and [APIs](https://docs.camunda.org/manual/latest/reference/rest/overview/#engine-usage). - -The distribution to the different engines (shards) is usually domain-specific and must be implemented as part of your project. When using inversion-of-control (IoC) containers like Spring or CDI, one strategy is to centralize the engine selection in a request-scoped producer for the **ProcessEngine** object. With dependency injection, the rest of the code can then be written as if there is only one **ProcessEngine** instance. - -## Some real-life stories - -In this bonus section, we share some anecdotes which might inspire you when trying to resolve issues in your environment. - -### Session context memory consumption - -In one customer scenario, the REST API was used heavily with basic authentication enabled. The client did not reuse the REST connection and opened a new one for every request, including the authentication information. - -On the server side, there was no special configuration given, which means that for every authentication there was a SessionContext created with a certain timeout. This SessionContext was never reused and the default timeout was relatively high (30 minutes in Tomcat). As a result, all this SessionContexts plugged up the memory which ultimately lead to garbage collection cycles being so long, that the whole system was basically just doing garbage collection most of the time. - -This could be resolved by setting a very low `session-timeout`. - -### Spring Boot data collector - -One project had a relatively little heap memory (500 MB) and using [Micrometer Metrics provided by Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-metrics.html) to collect metrics. With around 200 requests/second, the memory required for metrics data consumed around half of the heap and lead into fatal full garbage collection cycles. - -Removing the metrics collections was a quick fix to resolve the problem. - -### Processing high numbers of parallel activities (aka batch processing) - -One concrete scenario is worth looking at, as customers stumble upon it regularly: doing some kind of batch processing via BPMN, where you have a high number of parallel activities in one process instance. - -
    - -The important characteristics are - -- It is modeled using parallel [Multiple Instance](https://docs.camunda.org/manual/latest/reference/bpmn20/tasks/task-markers/#multiple-instance) (MI) -- You have high numbers of elements for the MI (> 1000) -- You are using wait states or save points within the parallel branch - -This scenario is supported by Camunda, but you can run into serious problems. - -:::caution Solved in Camunda 8 -This problem is only a problem with Camunda 7.x! Zeebe, the workflow engine used in Camunda 8, can run high number of parallel activities. -::: - -The basic problem is the [execution tree](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-concepts/#executions) getting really big in this scenario. In most situations, the engine has to load the whole tree in order to do anything, even if that happens only in one parallel path. This not only influences performance, but also adds load to the database. - -Turning off execution pre-fetching (available as internal process engine configuration property) is not recommended, as it may cause other trouble. Cockpit also suffers from huge data chunks, making it slow. - -If you add additional scopes, like the BPMN subprocess **(2)**, this leads to an additional execution being created. Every embedded subprocess doubles the size of the execution tree, so avoid subprocesses in this situation. - -The described problems only arise if you have wait state or save points in your process model, as only then the engine needs to persist the process instance to the database. If you run through the multiple instances in one transaction, the internal optimization removes almost all runtime database update statements, so almost nothing needs to be done (except for the history). - -There is one very specific scenario you need to avoid. When a parallel activity is finished and you want to collect the result in a list, you might use a process variable storing that list **(4)**. With running a lot of instances in parallel, they might finish at the same time and try to change that process variable simultaneously, leading to optimistic lock exceptions. - -This typically leads to retries. Even if this situation can heal itself, it increases the load on the database. Assume that you serialize that list as reasonable big XML (growing to several megabytes) in the process variables. That means Camunda sends this chunk of data to the database in every transaction, but might even lose the commit because of the optimistic lock. Now that situation fuels itself, as commit times increase by having big chunks of data, leading to more parallel activities finishing within that time frame, leading to more optimistic lock exceptions. - -In this situation, the best approach is not to collect any results, at least not in Camunda itself. You might still leverage a simple database table, where every instance can insert a new line for its result. This would remove the lock problems and is very simple to set up. - -In any case, the situation improves if you don't wait for the parallel processing to finish. This avoids a lot of the problem described here. You can also use workarounds like polling for all subprocesses to finish. Obviously, this is not only harder to understand from a business perspective, but also requires more effort to develop, so it should only be used if you run into serious performance trouble. - -
    diff --git a/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes-assets/history-architecture.png b/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes-assets/history-architecture.png deleted file mode 100644 index 55388680652..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes-assets/history-architecture.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes-assets/slides.pptx b/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes-assets/slides.pptx deleted file mode 100644 index dbe2fed7d92..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes-assets/slides.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes.md b/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes.md deleted file mode 100644 index cefca6999f2..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/operations/reporting-about-processes.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Reporting about processes -tags: - - Reporting - - History - - DWH - - BI - - KPI - - SQL - - MIS (Management Information System) -description: "The Camunda engine automatically collects audit information about historical process or instances for users to leverage and generate relevant reports." ---- - -The Camunda engine automatically collects audit information about historical process or decision instances. Leverage this data by generating and displaying business relevant reports. Add business relevant phases and milestones to your process models serving as a basis for key performance indicators (KPIs). - -## Modeling key performance indicators (KPIs) - -When modeling a process, you always add information about important key performance indicators implicitly; for example, by introducing **start and end events**. - -Additionally, you can explicitly add the following: - -- Meaningful additional business **milestones** by modeling **intermediate events**, for example. This might not have any execution semantics other than leaving a trace in the history of the workflow engine. The milestone is met as soon as the process has passed the event. Its status can therefore be **passed** or **not passed**. - -- Meaningful business **phases** by modeling things like (embedded) **subprocesses**. In contrast to a milestone, a phase's state can be **not entered**, currently **active**, or **passed**. - -Consider the following example - a "Tweet Approval Process" shows start and end events as well as **milestones**: - -
    " - -3 - -After one business day, the reviewer is reminded to speed up - and such reviews are internally _marked_ by passing the end event 'Review done slowly'. - -4 - -**Approved tweets** will pass the additional **intermediate event**. The **cycle time** up until that point is automatically captured too. - -5 - -Furthermore, when tweets are successfully published, we are interested in the **ratio** of those tweets... - -6 - -...when compared to tweets that do not get published. Therefore, we model _two different end events_ representing those two business end states of the process. - -:::note -Duplicate tweets will _not be published_ even though they have been _approved_ before. The more precisely we describe and _name_ the business semantics of events, the better our KPI's will reflect the reality we want to measure! -::: - -When you do not (only) want to concentrate on milestones, but _phases_ in your process, model the phases as subprocesses: - -
    - -1 - -The phase _Review_—modeled with a subprocess—will be active, while the human reviewer will need to find time to complete the task... - -2 - -...whereas the phase _Publication_ will be completed automatically - hence process instances "remaining" there for longer than a few seconds will probably indicate ongoing problems with the uptime and reachability of the used services. - -## History architecture - -It is useful to understand the architecture around history data in Camunda 8. - -:::caution Camunda 7 -Note that the history architecture is very different in Camunda 7.x, see [Camunda 7 User Guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/history/). -::: - -![History architecture](reporting-about-processes-assets/history-architecture.png) - -Camunda saves historical data not just when a process instance finishes, but on the go, while a process instance is active. By doing this, Camunda separates runtime data from history data. A growing history will not influence the runtime behavior, and you should never need to access runtime data for reporting purposes. - -Historical data can be leveraged via three possible mechanisms: - -- **Camunda tools**: Leverage Camunda Operate or Camunda Optimize. This is a very simple approach that works out-of-the-box and should satisfy many requirements already. Camunda Operate focuses on operational use cases ("Where is my process? Why did this fail?") whereas Camunda Optimize provides business intelligence about your processes. Optimize allows you to build reports and dashboards including setting alerts for thresholds. - -- **Query API**: Using the public API (currently under development), this has the advantage that you can make use of the history data within your own applications. - -- Pushing **events**: Pushing Camunda events by using [exporters](/docs/components/zeebe/technical-concepts/architecture/#exporters). Note that you can only add own exporters in a Self-Managed setting, not in Camunda 8 SaaS. Exporters have the advantage that you can push the data into any infrastructure you have, and possibly even filter or enrich the data in that step. - -## Connecting custom business intelligence systems (BI), data warehouses (DWH), or monitoring solutions - -You might move data from the Camunda History to a decoupled system like a Business Intelligence (BI) solution, a Data Warehouse (DWH), some Data Lake, or an own monitoring solution, for example based on Prometheus. - -Leveraging typical BI system's **ETL** (extract, transform, and load) features allows you to optimize data structure for your reporting purposes (to _speed up_ report generation) or to combine generic process engine data with business entities (to allow for _more in-depth analysis_). - -To get the data into the BI system, leverage one of the mechanisms described above. Our recommendation generally is: - -- In SaaS, leverage the history API to regularly pull data, as custom exporters are not supported there. diff --git a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/process-definition-authorization.png b/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/process-definition-authorization.png deleted file mode 100644 index f75705f57f6..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/process-definition-authorization.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/security-architecture.png b/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/security-architecture.png deleted file mode 100644 index fee73e7fa58..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/security-architecture.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/security-architecture.pptx b/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/security-architecture.pptx deleted file mode 100644 index e70a5cd481e..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/security-architecture.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/sso.jpg b/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/sso.jpg deleted file mode 100644 index 4ce3403d138..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-assets/sso.jpg and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-c7.md b/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-c7.md deleted file mode 100644 index 078eb3808cc..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/operations/securing-camunda-c7.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: "Securing Camunda 7" -tags: - - Security - - SSO - - Authentication - - Authorization -description: "Disallow unauthorized access by securing the Camunda 7.x before going live with your process applications." ---- - -Disallow unauthorized access by securing the Camunda 7.x before going live with your process applications. Understand Camunda user management essentials, enforce authorization for the REST API, define access rights for Camunda specific resources such as process definitions, and consider integrating with your Single-Sign-On (SSO). - -:::caution Camunda 7 only -This best practice targets Camunda 7.x only! For Camunda 8, visit [Zeebe Security](/docs/self-managed/zeebe-deployment/security/). -::: - -## Understanding user management essentials - -We suggest taking a look at the [security](https://docs.camunda.org/manual/latest/user-guide/security/) section of the documentation. - -### Understanding users, groups and tenants - -A **user** refers to a human individual, and a **group** is any custom defined "bundle" of users sharing some usage relevant attributes (like e.g. working on specific business functions). Set up **groups** corresponding to your workflow roles or create new logical roles for that purpose. - -Both **groups** and **users** can be added to one or more **tenants** to ensure a certain degree of data isolation between different logical entities (for more information, see [multi-tenancy](https://docs.camunda.org/manual/latest/user-guide/security/)). - -The core of the Camunda engine treats **users**, **groups**, and **tenants** as simple **text strings**. Therefore, you can do things like assign a user task to a group of people by directly referencing the group in your BPMN file, for example: - -```xml - -``` - -Or, claim that user task for a specific user via the Java API by referencing the user with a text string-based user id: - -```java -taskService.claim(taskId, "fozzie"); -``` - -No further concepts exist like logical workflow roles or special mappings. - -Camunda ships with an [IdentityService](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/) which allows you to either manage real users and groups _directly within the Camunda database_, or access the users and group information managed in a directory service database which supports **LDAP** (Lightweight Directory Access Protocol), like Microsoft's "Active Directory" and many others. One can also provide a custom **IdentityService** implementation to satisfy each and every requirement apart from the default identity service options shipped with Camunda. This is particularly helpful if you plan to integrate with a third party identity management system. Using the **IdentityService** is not mandatory - it is _possible_ to reference users and groups within Camunda that are not known by the engine's **IdentityService** at all. This could be useful for testing purposes or when integrating with third party identity management solutions. - -The Camunda LDAP Identity Service doesn’t support tenants. That means tenant-related access restrictions do not work by default when using the LDAP plugin. - -To illustrate, Camunda needs access to (text string based) _users and groups_ in order to: - -- Allow _logging into_ the web applications shipping with it (Camunda Tasklist, Cockpit, etc.) -- Allow Tasklist to, for example, present _open tasks_ available for the groups of the logged in user -- Allow Cockpit to, for example, present just the process definitions related to the _tenant(s)_ the logged in user is associated with. - -Keep in mind that your custom directory service is decoupled from Camunda. While it is possible to delete users and groups or change memberships in your directory service without harming Camunda's runtime, the text strings already known to Camunda won't change without manual intervention. - -### Understanding memberships - -Camunda's [IdentityService](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/) allows you to add users to groups; we call this a **group membership**. Accordingly, adding a user or group to a tenant creates a **tenant membership**. - -### Understanding authentication - -The procedure of _authentication_ makes sure that the user is known to the Camunda engine. When directly using Camunda's Java API, this must be done _for each thread_ by issuing, for example: - -```java -identityService.setAuthenticatedUserId("fozzie"); -``` - -If you use the Java API and do not set the authenticated user, Camunda will not check any authorizations. This is because the engine simply does not know who is logged in. When using the REST API, whether an authentication is set or not depends on the configuration as described below. - -### Understanding authorizations - -Permissions and restrictions for specific **users** or **groups** to access **resources** within Camunda (e.g. process definitions, tenants, process instances) are called **authorizations**. Because they relate users and groups to Camunda-specific resources, they must always be managed in a Camunda-specific way and be contained in the Camunda database. - -Camunda comes with an [AuthorizationService](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/) API (Java or REST), allowing to manage such authorizations and also ships with a dedicated [admin application](https://docs.camunda.org/manual/latest/webapps/admin/authorization-management) to manage them through a web interface. For example, you might want to give the group "accounting" all rights to access a specific process definition called "invoice": - -![Authorization](securing-camunda-assets/process-definition-authorization.png) - -While some permissions are requirement specific and should be created during deployment, others are created automatically (e.g. for assigned tasks). - -## Securing Camunda with authentication and authorizations - -To better understand the consequences and needs when being faced with the task to secure a Camunda installation, it is good to understand the big picture. - -![Security Architecture](securing-camunda-assets/security-architecture.png) - -1 - -A _request_ is either asking for a REST API endpoint or one of the web applications functionalities. - -2 - -The `ProcessEngineAuthenticationFilter` (for REST) or the `AuthenticationFilter` (for the web applications) check the user's authentication credentials via the _IdentityService_. The filters retrieve groups and tenant memberships and set the authenticated user for the current thread in the engine. - -3 - -The request is _allowed_. - -4 - -The request might also be _denied_, in case the authentication fails (e.g. because the username is unknown or the password does not match). For the web applications, a denied request is redirected to the login page. - -5 - -All applications use Camunda's Java API internally. - -6 - -Under the hood, the engine enforces authorizations by instrumenting SQL queries. That means you can never get any data from a query the current user is not authorized for. - -7 - -As a consequence, only allowed and _accessible data_ will be presented to the user. - -### Securing the Camunda core engine - -You can enable or disable authorization checks for the engine itself. Authorizations will only be checked if you [enable authorization checks](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/#enable-authorization-checks) and _tell the engine who is logged in_ with the _current thread_: - -```java -identityService.setAuthenticatedUserId("fozzie"); -``` - -If you directly use the API and do not tell the process engine who is logged in with the current thread, it will provide full access to all data. - -Authorization is enabled per default in the Camunda distributions, but if you configure and run your own engine (e.g. via Spring), it is disabled by default. - -For the authorization checks (to access specific resources), the engine does not question whether the authenticated user is known to the used IdentityService. As mentioned above, the engine treats users, groups and tenants as strings and grants access if those strings match with the defined authorization rules. - -In case you _do not require authorizations_, make sure that [authorization checks are disabled](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/#enable-authorization-checks), since they do have a performance impact. You might not need authorizations if you build your own custom web application handling authentication and authorization itself that just uses Camunda in the background, for example. - -If you have authorization checks enabled, you might or might not want to perform these checks when you execute Java code as part of your workflow. One example could be loading the number of running process instances to be used for some decision. For this reason, you can [enable or disable authorization checks for custom user code](https://docs.camunda.org/manual/latest/user-guide/process-engine/authorization-service/#enable-authorization-checks-for-user-code) separately. - -### Securing Camunda's REST API - -Internally, the REST API is just another client for the Java API which needs to inform the engine about the authenticated user. This only works if you turn on authentication for the REST API. Otherwise, no user is logged in and you have _unrestricted access_. - -Authentication and hence authorization checks are by default disabled for the REST API to allow for a quick getting started experience. - -For real life usage, enable at least **Basic Authentication** for the **REST API** by adjusting the `web.xml` as described in the [User Guide](https://docs.camunda.org/manual/latest/reference/rest/overview/authentication/). The REST API's default `ProcessEngineAuthenticationFilter` authenticates the user with HTTP Basic Auth. It makes use of the `IdentityService` to check the user's password and to load **group** and **tenant** memberships for that user. If that was successful, it sets the user as authenticated for the current thread via the Java API. - -If you require an authentication mechanism other than HTTP Basic Auth, you need to implement your own `AuthenticationFilter`. For more details, see the SSO section below. - -If you do not need the REST API in production, consider undeploying the REST API web application. - -### Securing Camunda's web applications - -The Camunda web applications (Tasklist, Cockpit, Admin) have by default a form based _authentication turned on_. There is no further need for changing any configuration when going into production, apart from the more general consideration to enable a custom identity service provider (see below). -However, ensure that you do not deploy artifacts like the _h2 console_ and the _example applications_ in your production environments. They are solely shipped for development purposes and a smooth experience when getting started with Camunda. - -Internally, Camunda Web Apps use an `AuthenticationFilter` very similar to the REST API `ProcessEngineAuthenticationFilter` described above; it just redirects an unknown user to the login page. - -### Configuring the identity service - -By default, Camunda will manage users and groups directly within the Camunda database. As an alternative to that, you can also enable read-only access to an LDAP-based user/group repository. The [LDAP identity service](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/#the-ldap-identity-service) is implemented as a Process Engine Plugin and can be added to the process engine configuration in order to replace the default database identity service. - -As an alternative to those two possibilities, [implement a custom IdentityProvider](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/) to satisfy each and every other requirement. You can provide _read-only_ or even _writable_ access to your user repository. - -Note that as the LDAP Identity Service doesn’t support tenants (multi-tenancy). For multi-tenancy configured via LDAP, you would therefore need a custom identity service allowing you to retrieve tenant IDs from your LDAP. - -The identity service ships with a [security feature that throttles log in attempts](https://docs.camunda.org/manual/latest/user-guide/process-engine/identity-service/#throttle-login-attempts). That means that a user will not be able to log in for a specific amount of time after an unsuccessful login attempt. Carefully review the default values for this feature's configuration and change them to your requirements if needed. - -### Securing custom code - -The process engine offers numerous extension points for customization of process behavior by using Java code, expression language, scripts, and templates. While these extension points allow for great flexibility in process implementation, they open up the possibility to perform malicious actions when in the wrong hands. It is therefore advisable to restrict access to APIs that allows custom code submission to trusted parties only. You can find more information on that topic in the [User Guide](https://docs.camunda.org/manual/latest/user-guide/process-engine/securing-custom-code/). - -### Securing Camunda Run - -The Camunda Run distributions aim for easy configuration and thus provides a very easy way for common security problems, see this [blog post](https://camunda.com/blog/2021/05/what-you-should-know-about-using-camunda-platform-run-in-production/). - -### Securing your web application container - -Make sure to secure your web application container (e.g. Wildfly or Tomcat) by checking and securing default settings, e.g. by removing any default predefined users allowed to access your container's administration console. - -### Supporting single sign-on (SSO) - -The difficulty with **Single sign-on** (SSO) is that it always works a bit differently depending on your exact environment and SSO framework used. - -Therefore, Camunda only provides the hooks for plugging in the exact mechanism that fits your needs. The key part that you need to touch concerns the authentication filters of Camunda's web applications and the REST API (see illustration above). The idea is to exchange or modify the [AuthenticationFilter](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/webapp/impl/security/auth/AuthenticationFilter.html) defined inside the `WEB-INF/web.xml` file of Camunda's web applications (or the REST API) and hook in your custom SSO provider. That means that your filter implementation circumvents the redirect to Camunda's login page by telling Camunda directly who is currently logged in. - -From Camunda 7.9 on, it is much easier to implement SSO by making use of the [ContainerBasedAuthenticationFilter](https://docs.camunda.org/manual/latest/reference/javadoc/?org/camunda/bpm/webapp/impl/security/auth/ContainerBasedAuthenticationFilter.html). This means that you do not need to replace the existing **AuthenticationFilter** by a custom one anymore, but you only need to add the **ContainerBasedAuthenticationFilter** that ships with the product and implement a custom **AuthenticationProvider** if required. - -You can get started by looking at some examples showing how this can be achieved for different authentication frameworks: - -- [Very basic authentication filter](https://github.com/camunda-consulting/camunda-webapp-plugins/tree/master/camunda-webapp-plugin-sso-autologin) for the Camunda web apps that reads the user from a provided URL parameter. -- Many _application servers_ support single sign-on out of the box (or through plugins) and can provide the user id to the application. Have a look at the [Single Sign-On Community Extension](https://github.com/camunda/camunda-sso-jboss/). -- It is quite easy to [integrate Camunda with Spring Security](https://github.com/camunda-consulting/code/tree/master/snippets/springboot-security-sso) so that the framework handles authentication and passes the authenticated user on to Camunda. diff --git a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/database-versions.png b/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/database-versions.png deleted file mode 100644 index 0501f099453..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/database-versions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/process-solution-example.png b/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/process-solution-example.png deleted file mode 100644 index 987603773f5..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/process-solution-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/process-solution-v2.png b/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/process-solution-v2.png deleted file mode 100644 index b28b3af8ba1..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/process-solution-v2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/slides.pptx b/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/slides.pptx deleted file mode 100644 index 27f041dc94d..00000000000 Binary files a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions-assets/slides.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions.md b/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions.md deleted file mode 100644 index a80687c2859..00000000000 --- a/versioned_docs/version-8.2/components/best-practices/operations/versioning-process-definitions.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Versioning process definitions -tags: - - Versioning - - Version Migration - - Long Running Processes -description: "For real-life applications, it's crucial to understand how Camunda deals with evolving process definitions by means of versioning." ---- - -For real-life applications, it's crucial to understand how Camunda deals with evolving process definitions by means of versioning. As a rule of thumb, we recommend to version just the process and decision models, but not other process solution artifacts (like e.g. code classes or scripts). Often you might not even want to run multiple model versions at the same time, then you have to think about migrate running process instances to new versions. When modeling very long-running processes (> 6 months), consider cutting them into reasonable pieces to ease managing your versioning requirements. - -## Understanding versioning - -By default, deploying a process or decision definition means that the workflow engine will check if the version has changed. If it has, it will register that deployment as a new version of the definition. By default, running instances will continue to run on the basis of the version they started with, new instances will be created based on the latest version of that definition. - -![Versions](versioning-process-definitions-assets/database-versions.png) - -## Selecting the best versioning approach - -### Running versions in parallel - -You can run several versions of a model in parallel. - -The big _advantage_ of that default behavior is that you can deploy changed process definitions without caring about running process instances. The process engine is able to manage running instances based on different process definitions in parallel. - -The _disadvantage_ is that one needs to deal with the operational complexity of different versions of the process running in parallel as well as the additional complexity in case those processes call subprocesses which have different versions of their own. - -Run versions _in parallel_ for - -- _Development_ or _test systems_ for which you do not care about old instances -- _Phasing out_ existing instances as the existing instances need to finish based on the model they where created with, which often has _legal reasons_. -- Situations in which _migration is not advisable_, because it is too complex and too much effort when weighed against its upsides. - -### Migrating process instances to a new version - -:::caution Camunda 8 -Camunda 8 does not yet support process instance migrations as described here. This feature is currently in development and will be available soon. -::: - -_Migrate_ running instances to the newest definition when: - -- Deploying _patches or bug fixes_ of a process model. -- _Avoiding operational complexity_ due to different versions running in production is a priority. - -Migrating process instances can be achieved either programmatically or by using the operations tooling. _Programmatically_, you need to _create a migration plan_ that describes how process instances are to be migrated from one process definition to another. - -```java -// Sample code from Camunda 7.x, this feature is not yet available in Camunda 8: -MigrationPlan migrationPlan = processEngine.getRuntimeService() - .createMigrationPlan("exampleProcess:1", "exampleProcess:2") - .mapActivities("assessCreditWorthiness", "assessCreditWorthiness") - .mapActivities("validateAddress", "validatePostalAddress") - .mapActivities("archiveApplication", "archiveApplication") - .build(); -``` - -You can then apply such a plan to a set of process instances selected by you. - -Learn more about [process instance migration in Camunda 7](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-instance-migration/) in the user guide. You can also learn about [how to use Camunda 7's cockpit](https://docs.camunda.org/manual/latest/webapps/cockpit/bpmn/process-instance-migration/) there. An interesting option is, that you can export the migration plan you configured in Cockpit as JSON string. This migration plan can be applied later [via REST-API](https://docs.camunda.org/manual/latest/reference/rest/migration/), making it possible to _fully automate_ migration even if you do not want to program a migration plan in Java. - -It's important to understand that process instance migration _maintains the full 'identity' of the migrated process instances_ including their unique IDs and their full history audit trail. However, as the process definition also might change fundamentally in between versions, this can have effects on the history log of a process instance which might be unexpected from an end user's or operator's perspective. - -### Things to consider before migration - -When planning your migration, here are some factors to consider: - -- _Do I have a good reason to migrate?_ Technically, you do not have to migrate process instances when using Camunda. Previous process definition instances will simply continue to run as intended (with some important caveats, see other things to consider below). Here are some examples of good reasons to migrate: - - Your supporting implementation resources have changed. - - Your latest process definition represents a substantial change in your business process. - - Your latest process definition fixes a bug. - - Your latest process definition enforces some time-sensitive legal obligations or rules. -- _How big of a difference is there between process definition versions?_ Not only the definition itself, but the data required to be present at any given time in your instance. -- _Did supporting implementation resources change from the previous deployment?_ If a service implementation changes in the new deployment and the reference to the implementation did not change from the previous deployment, then older process instances that are in flight will utilize the newer implementation by default upon deployment of the new resources. If that breaks older instances, then you must migrate. -- _Do I have a proper infrastructure to support “real data” testing of my migration plan?_ This might be the most important aspect. An ideal way to test your process instance migration would be to have prod-like data in some kind of staging environment that represents not only the type and quality of existing production data, but also volume, scale, and size. You run your migration plan there so that you know what to expect when it comes time to migrate in production. You also need the ability to quickly reset this data via some kind of snapshot, so that you can test over and over again. You can expect many iterations of your migration plan before you move forward with a solid plan. - -For Camunda 7 users there is some more information available in [these migration examples](https://github.com/camunda-consulting/migration-examples/blob/master/README.md). - -## Avoid versioning of dependant artifacts - -When versioning process or decision definitions, you need to be aware that the process of course communicates with the outside world, e.g. by _calling services_ or by _using forms_ to collect data input from human users. All the additional artifacts needed for that might _depend_ on the details of each other in a subtle way. - -Whenever possible, we recommend that you _avoid to version other artifacts_ beyond the process and/or decision definitions, in other words, just version '.bpmn' and '.dmn' files by using the default mechanism of the process engine. Embed all other artifacts (like e.g. classes, templates, scripts) into your normal application (for example a Java or Node.js application) and don't version them. - -Of course, this approach requires that you _manage the subtle differences_ needed by running process instances of old versions. There are various options to do that. And even if some of those options discussed below might not sound 'ideal' from a theoretical point of view, they proved to be _good enough_ for real life purposes and _much easier to understand_ than complex approaches. As understandability by every team member is a very important argument, we recommend going for the approach that is as simple as possible. - -The following options us a Java example of a process solution, containing not only the process model, but also some Java code and an HTML form: - -![Sample Process Application](versioning-process-definitions-assets/process-solution-example.png) - -### Option 1: Keep the artifacts backwards compatible - -_Extend_ the functionality of e.g. a method in `MyClass.java` in a way which can still deal with "old" process instances. - -```java -public class MyClass { - public void doSomething(Long customerId) { - if(customerId != null) { // <1> - // new code introduced - } - } -} -``` - -1 - -Assume you introduced a customerId in the new version of the process. Your code can still deal with old cases not aware of a customerId. - -### Option 2: Introduce a new artifact for different versions - -_Change_ the artifact and add a new version of it to the application. Now you can reference this new artifact from your new version of the process definition, while the old version will continue to use the first version of it. - -For example: - -- Change the file name for the form from `task-form.html` to `task-form-v2.html` -- Change the `task type` of a service task from `doSomething` to `doSomethingV2` - -![Sample Process Application](versioning-process-definitions-assets/process-solution-v2.png) - -Sometimes it is preferable to manage different versions by means of folders/packages. Just make sure to have a clear and straightforward convention to keep track of the versions. - -## Dealing with long running processes - -In general, _do not be concerned with deploying long-running processes_ which might run days, weeks or even months. After all, this is exactly what Camunda was built to properly deal with. - -Having said that, also review the possibilities the workflow engine provides with respect to _cutting process definitions_ (e.g. via _message exchange_ or via _call activities_) and _migrating running process instances_. But even though it's possible to migrate running process instances to a new version (see below), it's typically a bit of _effort_. Therefore, the information presented in the following sections is meant to enable your conscious decision at which points it might make sense for you to avoid the necessity for migration by cutting processes and which aspects of versioning behavior you can control by doing that. - -### Cutting very long running processes into pieces - -The longer the lifespans of process instances are, the bigger the _risks_ that you might want to exchange important software components like e.g. the workflow engine itself. Typically, _very long-running, end-to-end processes_ (running longer than _six months_) have periods without activity (e.g. waiting for a certain date in the future). Cut the process into several independent process definitions at these points. - -
    - -1 - -After the mobile phone was shipped, we finish the first process instance and just keep a reminder for the renewal in 24 months. - -2 - -We periodically check due renewals and start new process instances whenever necessary. - -We typically don't model such processes in one diagram it's shown here as a way to show the message flow. Typically, we would rather use a separate diagram per executable process and either leave out the other process completely or show it as a collapsed pool. - -Also try to avoid modeling the complete life-cycle of very long living objects, like a life insurance contract. Only capture the active phases as separate processes (e.g. "Policy Issuing", "Address Change", "Cancellation" or "Death"). - -Having said this, we want to emphasize that the engine is perfectly fine with handling lots of process instances for a long time. So if you want to have process instances waiting for months or years, you can still do so. Just make sure you think about all resulting implications. - -### Using call activities to influence versioning behavior of pieces - -:::caution Camunda 8 -With Camunda 8 you cannot yet influence the version of the started process instance via the call activity. This feature is on the roadmap. At the moment, [a new process instance of the latest process definition version is started](/docs/components/modeler/bpmn/call-activities/). -::: - -When calling separately modeled subprocesses (i.e. _Call Activities_), the default behavior of the process engine is to call the _latest_ deployed version of that subprocess. You can change this default 'binding' behavior to call a _specific_ version or the version which was _deployed_ together with the parent process. - -Keeping in mind pros and cons of versioning as discussed above, we can therefore _encapsulate parts of a process_, for which we want to be able to change the runtime behavior more often into such call activities. This is an especially useful consideration for _long-running processes_. - -
    - -1 - -We could decide that we always want to follow the _latest_ shipping process changes, even if the rules for shipping changed while we are in the order acceptance phase. We for example reason that this acceptance phase could sometimes take a long time, because the procurement for goods currently not shelved happens within that phase. - -2 - -Contrary to that, we could decide that the order billing always happens according to the rules valid at the moment we received the order and instantiated the parent process (_deployment_). We for example reason here that it is critical that the billing follows the rules communicated to the customer together with the offer. diff --git a/versioned_docs/version-8.2/components/components-overview.md b/versioned_docs/version-8.2/components/components-overview.md deleted file mode 100644 index e62f4ca085f..00000000000 --- a/versioned_docs/version-8.2/components/components-overview.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: components-overview -title: Introduction to Components -sidebar_label: Introduction to Components -slug: /components/ -description: "This section contains product manual content for each component in Camunda 8, including conceptual content." -keywords: ["process automation tools"] ---- - -This section contains product manual content for each component in Camunda 8, including conceptual content. Together, these components comprise the Camunda 8 SaaS experience. - -- [Concepts](concepts/what-is-camunda-8.md) - Learn more about a variety of Camunda 8 topics with this conceptual documentation on clusters, processes, job workers, workflow patterns, and more. -- [Console](console/introduction-to-console.md) - With this management application for the included products, create and delete clusters, manage API clients and alerts, and more. -- [Modeler](modeler/about-modeler.md) - Design and implement diagrams using Camunda's modeling tools, including Web Modeler and Desktop Modeler. -- [Connectors](connectors/introduction.md) - Integrate with external systems using these reusable building blocks. -- [Zeebe](zeebe/zeebe-overview.md) - Define processes graphically in BPMN 2.0, choose any gRPC-supported programming language to implement your workers, build processes that react to events from Apache Kafka, and more using this process automation engine powering Camunda 8. -- [Operate](operate/operate-introduction.md) - Monitor and troubleshoot process instances running in Zeebe, and carry out key operations such as resolving incidents and updating process instance variables with Operate. -- [Tasklist](tasklist/introduction-to-tasklist.md) - Implement business processes alongside user tasks in Zeebe, and orchestrate human workflows critical to your business to reduce time-to-value for your process orchestration projects with this interface for manual work. -- [Optimize]($optimize$/components/what-is-optimize) - Geared toward business stakeholders, Optimize offers business intelligence tooling for Camunda enterprise customers. By leveraging data collected during process execution, users can collaboratively examine areas in business processes for improvement. - -:::note -Additionally, Camunda hosts a section of [Best Practices](./best-practices/best-practices-overview.md). A mix of conceptual and practical implementation information, this section hosts our condensed experience using BPMN and DMN on the Camunda tool stack collected by consulting engagement with our customers and feedback from the community. -::: - -![Architecture diagram for Camunda including all the components for SaaS](./img/ComponentsAndArchitecture_SaaS.png) - -:::note Looking for deployment guides? - -Deployment guides for Camunda 8 components are available in the [Self-Managed section](/self-managed/about-self-managed.md). - -::: diff --git a/versioned_docs/version-8.2/components/concepts/assets/create-process-with-result.png b/versioned_docs/version-8.2/components/concepts/assets/create-process-with-result.png deleted file mode 100644 index 773b4230a44..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/create-process-with-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/create-process.png b/versioned_docs/version-8.2/components/concepts/assets/create-process.png deleted file mode 100644 index f4761d5b0a8..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/create-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/message-aggregator.png b/versioned_docs/version-8.2/components/concepts/assets/message-aggregator.png deleted file mode 100644 index fabe84a4054..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/message-aggregator.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/message-correlation.png b/versioned_docs/version-8.2/components/concepts/assets/message-correlation.png deleted file mode 100644 index 64a675ee0c7..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/message-correlation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/message-single-instance.png b/versioned_docs/version-8.2/components/concepts/assets/message-single-instance.png deleted file mode 100644 index 59e3ded0cab..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/message-single-instance.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/order-process.png b/versioned_docs/version-8.2/components/concepts/assets/order-process.png deleted file mode 100644 index 07e87b95d02..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-conditions.png b/versioned_docs/version-8.2/components/concepts/assets/process-conditions.png deleted file mode 100644 index 6b3483e9519..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-conditions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-data-flow.png b/versioned_docs/version-8.2/components/concepts/assets/process-data-flow.png deleted file mode 100644 index 29b0470dd9a..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-data-flow.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-events.png b/versioned_docs/version-8.2/components/concepts/assets/process-events.png deleted file mode 100644 index 499e5562651..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-an-element.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-an-element.png deleted file mode 100644 index ad6d1fda122..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-an-element.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-interrupting-event-subprocess.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-interrupting-event-subprocess.png deleted file mode 100644 index 1a470837649..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-interrupting-event-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-nested-element.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-nested-element.png deleted file mode 100644 index 277d50363e1..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-activate-nested-element.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-1.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-1.png deleted file mode 100644 index aa80656ac54..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-2.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-2.png deleted file mode 100644 index 9b9f7723a3a..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-3.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-3.png deleted file mode 100644 index 2c95cf51c6f..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-example-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-set-variables.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-set-variables.png deleted file mode 100644 index ac5b588afc6..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-set-variables.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-terminate-element-instance.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-terminate-element-instance.png deleted file mode 100644 index 240a3259397..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-terminate-element-instance.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-use-at-your-own-risk.png b/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-use-at-your-own-risk.png deleted file mode 100644 index 1b5b391d177..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-instance-modification/process-instance-modification-use-at-your-own-risk.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-parallel-gw.png b/versioned_docs/version-8.2/components/concepts/assets/process-parallel-gw.png deleted file mode 100644 index b9208f0ec82..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-parallel-gw.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-parallel-mi.png b/versioned_docs/version-8.2/components/concepts/assets/process-parallel-mi.png deleted file mode 100644 index 2ff63f00b68..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-parallel-mi.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/process-sequence.png b/versioned_docs/version-8.2/components/concepts/assets/process-sequence.png deleted file mode 100644 index 55cebecee05..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/process-sequence.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/processes-data-based-conditions.png b/versioned_docs/version-8.2/components/concepts/assets/processes-data-based-conditions.png deleted file mode 100644 index 63126a12e57..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/processes-data-based-conditions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/processes-parallel-gateway.png b/versioned_docs/version-8.2/components/concepts/assets/processes-parallel-gateway.png deleted file mode 100644 index e32ce06f1b9..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/processes-parallel-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/variable-mappings.png b/versioned_docs/version-8.2/components/concepts/assets/variable-mappings.png deleted file mode 100644 index 7391083ff70..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/variable-mappings.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/variable-propagation.png b/versioned_docs/version-8.2/components/concepts/assets/variable-propagation.png deleted file mode 100644 index 377a5b3c300..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/variable-propagation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/variable-scopes.png b/versioned_docs/version-8.2/components/concepts/assets/variable-scopes.png deleted file mode 100644 index 1c4d9cefa32..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/variable-scopes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/assets/zeebe-job-workers-graphic.png b/versioned_docs/version-8.2/components/concepts/assets/zeebe-job-workers-graphic.png deleted file mode 100644 index 3c424a8c2e0..00000000000 Binary files a/versioned_docs/version-8.2/components/concepts/assets/zeebe-job-workers-graphic.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/concepts/backups.md b/versioned_docs/version-8.2/components/concepts/backups.md deleted file mode 100644 index 0aaf2335456..00000000000 --- a/versioned_docs/version-8.2/components/concepts/backups.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -id: backups -title: "Backups" -description: "Learn more about Backups in Camunda 8 SaaS." ---- - -Camunda Enterprise - -You can use the backup feature of Camunda 8 SaaS to regularly back up the state of all of its components (Zeebe, Operate, Tasklist, and Optimize) with _zero downtime_. In case of failures that lead to data loss, you can request to restore the backup. - -A Camunda 8 SaaS backup consists of a data backup of Zeebe, Operate, Tasklist, Optimize, and the backup of exported Zeebe records in Elasticsearch. Since the data of these applications depend on each other, the backup must be consistent across all components. Therefore, the backup of a Camunda 8 cluster is taken as a whole. - -With backups, you can capture snapshots of your data and applications while they are actively in use, resulting in zero downtime or disruption to your operations. Backups are designed specifically for disaster recovery purposes, and should not be used for archival of process data. - -:::caution -Backups are created and managed on a per-cluster basis. It is important to be aware that deleting a cluster will also delete all associated backups. - -Exercise caution when deleting clusters to avoid unintended loss of backups. -::: - -> Your cluster generation needs to be greater or equal to `8.2.4` to support backups. - -## Backup location - -When you create a cluster in Camunda 8 SaaS, you must specify a region for that cluster. - -You also need to specify where the backups for that cluster will be located: - -- By default, the backups will be located in the same region as the cluster. -- For disaster recovery reasons, you can select a "dual-region" backup location. Backups will be automatically replicated in the secondary region to give you better protection in case the primary region experiences disruption. Dual-region backup is offered at no additional cost. - -## Manual backup - -Manual backups refer to the user-initiated process of creating a consistent snapshot of the state of all system components, including Zeebe, Operate, Tasklist, and Optimize. These backups are managed on a per-cluster basis and are primarily designed for disaster recovery purposes. - -### Retention and rate limits - -To ensure system stability, backup operations are subject to rate limits. Specifically, you can perform a backup operation every hour. -However, users can delete an existing backup to create a new one before the rate limit period ends. - -The system retains the three most recent completed backups per cluster. Failed backup attempts do not count towards the retention count. When a new backup is successful and the retention count is reached, the oldest backup is automatically deleted. - -## Scheduled backups - -Scheduled backups are created periodically (e.g daily, weekly). They are configured to run automatically on the scheduled time. - -### Retention - -A backup schedule retains the last three successful and failed backups. Failed backups are retained to allow further root-causing why the backup failed. If a backup fails, it is not retried immediately as the failure can lead to instability. - -:::note -If you require more retained backups or more frequent backups, contact your Customer Success Manager to discuss your specific needs. -::: - -## Programmatic access - -The backup operations can be performed programmatically using the Administration API. -This provides the flexibility to seamlessly integrate backup-related tasks with your existing systems and automation workflows. -For detailed information on using the API, refer to the [Administration API reference](/docs/apis-tools/administration-api/administration-api-reference.md). - -## Restore - -To restore your Camunda 8 cluster from a backup (and for any further assistance in general), [contact Camunda support](https://camunda.com/services/support/) to request a restore for your backup. Our support team will assist you with the restoration process and guide you through the necessary steps to recover your cluster from the backup. diff --git a/versioned_docs/version-8.2/components/concepts/clusters.md b/versioned_docs/version-8.2/components/concepts/clusters.md deleted file mode 100644 index 5f5cf566f83..00000000000 --- a/versioned_docs/version-8.2/components/concepts/clusters.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: clusters -title: "Clusters" -description: "Learn more about the clusters available in your Camunda 8 plan." ---- - -A cluster is a provided group of nodes that run Camunda 8. By default, Camunda 8 clusters are production-ready. - -Enterprise plan customers can create as many production or development clusters as they want. Starter plan customers are limited based on the [fair usage limits of the plan](https://camunda.com/legal/fair-usage-limits-for-starter-plan/). - -Production clusters come in three sizes: small (S), medium (M), and large (L). To learn more about the size of cluster best suited for your use case, see our [Best Practices](/components/best-practices/best-practices-overview.md) for more information on [sizing your runtime environment](/components/best-practices/architecture/sizing-your-environment.md#sizing-your-runtime-environment). - -The following table shows each plan and available type or size of cluster: - -| | Development | Production - S | Production - M | Production - L | -| ------------ | ----------- | -------------- | -------------- | -------------- | -| Free Trial | \- | X | \- | \- | -| Free | \- | \- | \- | \- | -| Professional | X | X | \- | \- | -| Enterprise | X | X | X | X | - -When you deploy and execute your [BPMN](/components/modeler/bpmn/bpmn.md) or [DMN](/components/modeler/dmn/dmn.md) models on a production cluster, this might impact your monthly (Professional) or annual (Enterprise) total fee, meaning the more you execute your models, the higher your total fee may be. - -## Free Trial cluster - -Free Trial clusters have the same functionality as a production cluster, but are size small and only available during your trial period. You cannot convert a Free Trial cluster to a different kind of cluster. - -Once you sign up for a Free Trial, you are able to create one production cluster for the time of your trial. - -When your Free Trial plan expires, you are automatically transferred to the Free plan. This plan allows you to model BPMN and DMN collaboratively, but does not support execution of your models. Any cluster created during your free trial is deleted, and you cannot create new clusters. - -### Auto-pause - -Free Trial `dev` (or untagged) clusters are automatically paused eight hours after a cluster is created or resumed from a paused state. Auto-pause occurs regardless of cluster usage. - -You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. - -- Clusters tagged as `test`, `stage`, or `prod` do not auto-pause. -- Paused clusters are automatically deleted after 30 consecutive paused days. You can change the tag to avoid cluster deletion. -- No data is lost while a cluster is paused. All execution and configuration is saved, but cluster components such as Zeebe and Operate are temporarily disabled until you resume the cluster. - -:::tip - -To prevent auto-pause, you can: - -- Tag the cluster as `test`, `stage`, or `prod` instead of `dev`. -- [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter, Professional, or Enterprise plan. - -::: - -## Development clusters - -Development clusters are recommended for development, testing, proof of concepts, and demos. - -Professional plan users have the option to create **development clusters**, offering free execution for development. Deployment and execution of models (process instances, decision instances, and task users) is provided at no cost. - -Additionally, the following applies to **development clusters**: - -- **Cluster is not high-available & less hardware**: Reduced hardware resources and availability compared to production cluster (for example, one Zeebe node only). -- **Shorter history of processes and decisions**: Data retention in Operate, Optimize, and Tasklist is reduced to one day. For example, pending or historical process instances are deleted after one day. diff --git a/versioned_docs/version-8.2/components/concepts/data-retention.md b/versioned_docs/version-8.2/components/concepts/data-retention.md deleted file mode 100644 index 6bbecefbe04..00000000000 --- a/versioned_docs/version-8.2/components/concepts/data-retention.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: data-retention -title: "Data retention" -description: "In Camunda 8 SaaS, the following data retention strategies are implemented. This is necessary as the amount of data can grow significantly over time." ---- - -In Camunda 8 SaaS, the following data retention strategies are implemented. This is necessary as the amount of data can grow significantly overtime. These settings are a balance between performance and usability. - -## Default retention time of each application - -The following time-to-live settings are configured in SaaS for each application. These are the defaults for our Starter and Enterprise plans. - -- **Operate**: 30 days -- **Optimize**: 180 days -- **Tasklist**: 30 days -- **Zeebe**: 7 days - -If there are specific requirements for your use-case, [reach out to us](/contact/) to discuss your data retention needs under an Enterprise plan. - -## Further information - -The following resources in our [Self-Managed documentation](../../self-managed/about-self-managed.md) describe these data retention concepts in more detail: - -- [Operate Data Retention](/self-managed/operate-deployment/data-retention.md) -- [Optimize History Cleanup]($optimize$/self-managed/optimize-deployment/advanced-features/engine-data-deletion) diff --git a/versioned_docs/version-8.2/components/concepts/encryption-at-rest.md b/versioned_docs/version-8.2/components/concepts/encryption-at-rest.md deleted file mode 100644 index 22f38e0ea53..00000000000 --- a/versioned_docs/version-8.2/components/concepts/encryption-at-rest.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -id: encryption-at-rest -title: "Encryption at rest" -description: "Camunda 8 SaaS cluster data at rest is protected using Google Cloud Platform (GCP) encryption with a provider-managed encryption key." -keywords: - [ - "encryption", - "encryption at rest", - "encryption key", - "provider key", - "software key", - "hardware key", - ] ---- - -Camunda 8 SaaS only - -Camunda 8 SaaS cluster data is encrypted at rest to provide security and protection for your data. - -## Overview - -By default, Camunda 8 SaaS cluster data at rest is protected with a provider-managed encryption key using [Google Cloud Platform (GCP) encryption](https://cloud.google.com/docs/security/encryption/default-encryption). The encryption key is owned and managed by GCP. - -Enterprise customers requiring a higher level of protection can select a dedicated Camunda-managed software or hardware (HSM) encryption key when creating a new cluster. The encryption key is managed by Camunda using Google Cloud Key Management Service (KMS). - -- You can only select the encryption type when [creating a cluster](/components/console/manage-clusters/create-cluster.md). You cannot change the encryption type after cluster creation. -- You can configure encryption keys on a per-cluster basis so that each cluster has a dedicated encryption key. Encryption keys can be configured for all cluster versions. -- You can view cluster encryption key details in **Cluster Details** on the **Console Overview** tab. - -:::note -Backups use the default provider GCP encryption. -::: - -### Encryption types - -The following table summarizes the available types of cluster encryption at rest. - -| Encryption type | Managed by | Protection level | -| :-------------------------------- | :--------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Provider encryption key (default) | Google | Google uses a [FIPS 140-2](https://cloud.google.com/security/compliance/fips-140-2-validated) validated encryption module (certificate 4407) in our production environment. | -| Software encryption key | Camunda |

    • Google KMS [software](https://cloud.google.com/docs/security/key-management-deep-dive#software_backend_software_protection_level) protection level.

    • Cryptographic operations are performed in software.
    • Compliant with [FIPS 140-2 Level 1](https://cloud.google.com/docs/security/key-management-deep-dive#fips_140-2_validation).

    | -| Hardware encryption key | Camunda |

    • Google KMS [hardware](https://cloud.google.com/docs/security/key-management-deep-dive#backend_hardware_protection_level) protection level.

    • Cryptographic operations are performed in a hardware security module (HSM).
    • Compliant with [FIPS 140-2 Level 3](https://cloud.google.com/docs/security/key-management-deep-dive#fips_140-2_validation).

    | - -## Provider encryption key (default) - -By default, Camunda 8 SaaS cluster data at rest is protected using GCP encryption. - -- Provider encryption keys are owned and managed by GCP. -- Google uses a [FIPS 140-2](https://cloud.google.com/security/compliance/fips-140-2-validated) validated encryption module. - -:::info -Learn more about [Google default encryption at rest](https://cloud.google.com/docs/security/encryption/default-encryption) and default provider encryption settings. -::: - -## Camunda-managed software encryption key - -Camunda-managed software encryption keys use the Google KMS [software](https://cloud.google.com/docs/security/key-management-deep-dive#software_backend_software_protection_level) protection level to provide a higher level of protection than default provider encryption. - -- Requires an enterprise plan. -- Software encryption keys are managed by Camunda. -- Software encryption keys are compliant with [FIPS 140-2 Level 1](https://cloud.google.com/docs/security/key-management-deep-dive#fips_140-2_validation). -- Cryptographic operations are performed in software. -- Rotated with zero downtime for security and compliance. - -## Camunda-managed hardware encryption key - -Camunda-managed hardware encryption keys use the Google KMS [hardware](https://cloud.google.com/docs/security/key-management-deep-dive#backend_hardware_protection_level) protection level to provide a higher level of protection than both default provider encryption and Camunda-managed software encryption keys. - -- Requires an enterprise plan. -- Hardware encryption keys are managed by Camunda. -- Hardware encryption keys are compliant with [FIPS 140-2 Level 3](https://cloud.google.com/docs/security/key-management-deep-dive#fips_140-2_validation). -- Rotated with zero downtime for security and compliance. diff --git a/versioned_docs/version-8.2/components/concepts/expressions.md b/versioned_docs/version-8.2/components/concepts/expressions.md deleted file mode 100644 index c016254a409..00000000000 --- a/versioned_docs/version-8.2/components/concepts/expressions.md +++ /dev/null @@ -1,354 +0,0 @@ ---- -id: expressions -title: "Expressions" -description: "Expressions can be used to access variables and calculate values dynamically." ---- - -Expressions can be used to access variables and calculate values dynamically. - -This is particularly useful when [automating a process using BPMN](../../guides/automating-a-process-using-bpmn.md) and [orchestrating human tasks](../../guides/getting-started-orchestrate-human-tasks.md). - -The following attributes of BPMN elements _require_ an expression: - -- Sequence flow on an exclusive gateway: [condition](/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md#conditions) -- Message catch event/receive task: [correlation key](/components/modeler/bpmn/message-events/message-events.md#messages) -- Multi-instance activity: [input collection](/components/modeler/bpmn/multi-instance/multi-instance.md#defining-the-collection-to-iterate-over), [output element](/components/modeler/bpmn/multi-instance/multi-instance.md#collecting-the-output) -- Input/output variable mappings: [source](variables.md#inputoutput-variable-mappings) - -Additionally, the following attributes of BPMN elements can define an expression _optionally_, instead of a static value: - -- Timer catch event: [timer definition](/components/modeler/bpmn/timer-events/timer-events.md#timers) -- Message catch event/receive task: [message name](/components/modeler/bpmn/message-events/message-events.md#messages) -- Service task/business rule task/script task/send task: [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition), [job retries](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) -- User task: [assignee](/components/modeler/bpmn/user-tasks/user-tasks.md#assignments), [candidateGroups](/components/modeler/bpmn/user-tasks/user-tasks.md#assignments) -- Call activity: [process id](/components/modeler/bpmn/call-activities/call-activities.md#defining-the-called-process) - -## Expressions vs. static values - -Some attributes of BPMN elements—like the timer definition of a timer catch event—can be defined in one of two ways: - -- As an expression (e.g. `= remainingTime`) -- As a static value (e.g. `PT2H`) - -Expressions always start with an **equals sign** (**=**). For example, `= order.amount > 100`. The text following the equal sign is the actual expression. For example, `order.amount > 100` checks if the amount of the order is greater than 100. - -If the element does not start with the prefix, it is used as a static value. A static value is used either as a string (e.g. job type) or as a number (e.g. job retries). A string value must not be enclosed in quotes. - -:::note -An expression can also define a static value by using literals (e.g. `= "foo"`, `= 21`, `= true`, `= [1,2,3]`, `= {x: 22}`, etc.) -::: - -## The expression language - -An expression is written in **FEEL** (**Friendly Enough Expression Language**). FEEL is part of the OMG's **DMN** (**Decision Model and Notation**) specification. It is designed to have the following properties: - -- Free of side effects -- Simple data model with JSON-like object types: numbers, dates, strings, lists, and contexts -- Simple syntax designed for business professionals and developers -- Three-valued logic (true, false, null) - -Camunda 8 integrates the [FEEL Scala](https://github.com/camunda/feel-scala) engine to evaluate FEEL expressions. The following sections cover common use cases in Zeebe. A complete list of supported expressions can be found in [FEEL expressions](/components/modeler/feel/what-is-feel.md). - -### Access variables - -A variable can be accessed by its name: - -```feel -owner -// "Paul" - -totalPrice -// 21.2 - -items -// ["item-1", "item-2", "item-3"] -``` - -If a variable is a JSON document/object, it is handled as a FEEL context. A property of the context (e.g. nested variable property) can be accessed by a period (`.`) and the property name: - -```feel -order.id -// "order-123" - -order.customer.name -// "Paul" -``` - -### Boolean expressions - -Values can be compared using the following operators: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OperatorDescriptionExample
    = (only one equals sign)equal toowner = "Paul"
    !=not equal toowner != "Paul"
    <less thantotalPrice < 25
    <=less than or equal tototalPrice <= 25
    >greater thantotalPrice > 25
    >=greater than or equal tototalPrice >= 25
    between [X] and [Y]same as (v >= [X] and v <= [Y]])totalPrice between 10 and 25
    - -Multiple boolean values can be combined as disjunction (`and`) or conjunction (`or`): - -```feel -orderCount >= 5 and orderCount < 15 - -orderCount > 15 or totalPrice > 50 -``` - -### Null checks - -If a variable or nested property can be `null`, it can be compared to the `null` value. Comparing `null` to a value different from `null` results in `false`. - -```feel -order = null -// true - if "order" is null or doesn't exist - -order.id = null -// true - if "order" is null, "order" doesn't exist, -// "id" is null, or "order" has no property "id" -``` - -In addition to the comparison with `null`, the built-in function `is defined()` can be used to differentiate between a value that is `null` and a value that doesn’t exist. - -```feel -is defined(order) -// true - if "order" has any value or is null - -is defined(order.id) -// false - if "order" doesn't exist or it has no property "id" -``` - -### String expressions - -A string value must be enclosed in double quotes. Multiple string values can be concatenated using the `+` operator. - -```feel -"foo" + "bar" -// "foobar" -``` - -Any value can be transformed into a string value using the `string()` function. - -```feel -"order-" + string(orderId) -// "order-123" -``` - -More functions for string values are available as [built-in string functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md) (e.g. contains, matches, etc.) - -### Temporal expressions - -The current date and date-time can be accessed using the built-in functions `today()` and `now()`. To store the current date or date-time in a variable, convert it to a string using the built-in function `string()`. - -```feel -now() -// date and time("2020-04-06T15:30:00@UTC") - -today() -// date("2020-04-06") - -string(today()) -// "2020-04-06" -``` - -The following operators can be applied on temporal values: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Temporal TypeExamplesOperators
    datedate("2020-04-06") -
  • date + duration
  • -
  • date - date
  • -
  • date - duration
  • -
    time - time("15:30:00"),
    - time("15:30:00+02:00"),
    - time("15:30:00@Europe/Berlin") -
    -
  • time + duration
  • -
  • time - time
  • -
  • time - duration
  • -
    date-time - date and time("2020-04-06T15:30:00"),
    - date and time("2020-04-06T15:30:00+02:00"),
    - date and time("2020-04-06T15:30:00@UTC") -
    -
  • date-time + duration
  • -
  • date-time - date-time
  • -
  • date-time - duration
  • -
    durationduration("PT12H"),
    duration("P4Y")
    -
  • duration + duration
  • -
  • duration + date
  • -
  • duration + time
  • -
  • duration + date-time
  • -
  • duration - duration
  • -
  • date - duration
  • -
  • time - duration
  • -
  • date-time - duration
  • -
  • duration * number
  • -
  • duration / duration
  • -
  • duration / number
  • -
    cyclecycle(3, duration("PT1H")),
    cycle(duration("P7D"))
    - -A temporal value can be compared in a boolean expression with another temporal value of the same type. - -The `cycle` type is different from the other temporal types because it is not supported in the FEEL type system. - -Instead, the `cycle` type is defined as a function that returns the definition of the cycle as a string in the ISO 8601 format of a recurring time interval. - -The function expects two arguments: the number of repetitions, and the recurring interval as duration. If the first argument is `null` or not passed in, the interval is unbounded (i.e. infinitely repeated). - -```feel -cycle(3, duration("PT1H")) -// "R3/PT1H" - -cycle(duration("P7D")) -// "R/P7D" -``` - -### List expressions - -An element of a list can be accessed by its index. The index starts at `1` with the first element (_not_ at `0`). - -A negative index starts at the end by `-1`. If the index is out of the range of the list,`null` is returned instead: - -```feel -["a","b","c"][1] -// "a" - -["a","b","c"][2] -// "b" - -["a","b","c"][-1] -// "c" -``` - -A list value can be filtered using a boolean expression; the result is a list of elements that fulfill the condition. - -The current element in the condition is assigned to the variable `item`: - -```feel -[1,2,3,4][item > 2] -// [3,4] -``` - -The operators `every` and `some` can be used to test if all elements or at least one element of a list fulfill a given condition: - -```feel -every x in [1,2,3] satisfies x >= 2 -// false - -some x in [1,2,3] satisfies x > 2 -// true -``` - -### Invoke functions - -A function can be invoked by its name followed by the arguments. The arguments can be assigned to the function parameters either by their position or by defining the parameter names: - -```feel -floor(1.5) -// 1 - -count(["a","b","c"]) -// 3 - -append(["a","b"], "c") -// ["a","b","c"] - -contains(string: "foobar", match: "foo") -// true -``` - -FEEL defines several built-in functions: - -- [Conversion functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-conversion.md) -- [Boolean functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-boolean.md) -- [String functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md) -- [Numeric functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-numeric.md) -- [List functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md) -- [Context functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-context.md) -- [Temporal functions](/components/modeler/feel/builtin-functions/feel-built-in-functions-temporal.md) - -## Next steps - -- [FEEL](/components/modeler/feel/what-is-feel.md) -- [FEEL data types](/components/modeler/feel/language-guide/feel-data-types.md) -- [FEEL expressions](/components/modeler/feel/language-guide/feel-expressions-introduction.md) -- [DMN specification](https://www.omg.org/spec/DMN/About-DMN/) diff --git a/versioned_docs/version-8.2/components/concepts/incidents.md b/versioned_docs/version-8.2/components/concepts/incidents.md deleted file mode 100644 index 0b4cd4e269f..00000000000 --- a/versioned_docs/version-8.2/components/concepts/incidents.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -id: incidents -title: "Incidents" -description: "A process instance is stuck at a particular point, and requires user interaction to resolve the problem." ---- - -In Camunda 8, an incident represents a problem in process execution. This means a process instance is stuck at a particular point, and requires user interaction to resolve the problem. - -Incidents are created in different situations, including the following: - -- A job is failed and it has no retries left. -- An input or output variable mapping can't be applied. -- A condition can't be evaluated. -- A decision can't be evaluated. - -:::note -Note that not all errors will necessarily lead to incidents. For example, unexpected errors in Zeebe do not always result in incidents. -::: - -## Resolving - -To resolve an incident, complete the following steps: - -1. Identify and resolve the problem. -2. Mark the incident as resolved, triggering retry process execution. -3. If the problem still exists, a new incident is created. - -### Resolving a job-related incident - -If a job fails and has no retries remaining, an incident is created. There are many different reasons why the job may have failed. For example, the variables may not be in the expected format, or a service is not available (e.g. a database). - -If the variables are causing the incident, complete the following steps: - -1. Update the variables of the process instance. -2. Increase the remaining retries of the job. -3. Mark the incident as resolved. - -:::note -It's recommended you complete these operations in [Operate](/components/operate/operate-introduction.md). -::: - -It is also possible to complete these steps via the client API. Using the Java client, this could look like the following: - -```java -client.newSetVariablesCommand(incident.getElementInstanceKey()) - .variables(NEW_PAYLOAD) - .send() - .join(); - -client.newUpdateRetriesCommand(incident.getJobKey()) - .retries(3) - .send() - .join(); - -client.newResolveIncidentCommand(incident.getKey()) - .send() - .join(); -``` - -When the incident is resolved, the job can be activated by a worker again. - -### Resolving a process instance-related incident - -If an incident is created during process execution and it's not related to a job, the incident is usually related to the variables of the process instance. For example, an input or output variable mapping can't be applied. - -To resolve the incident, update the variables and mark the incident as resolved. - -:::note -It's recommended you complete these operations in [Operate](/components/operate/operate-introduction.md). -::: - -Using the Java client, this could look like the following: - -```java -client.newSetVariablesCommand(incident.getElementInstanceKey()) - .variables(NEW_VARIABLES) - .send() - .join(); - -client.newResolveIncidentCommand(incident.getKey()) - .send() - .join(); -``` - -When the incident is resolved, the process instance continues. - -- [Operate](/components/operate/operate-introduction.md) -- [APIs and Clients](/apis-tools/working-with-apis-tools.md) diff --git a/versioned_docs/version-8.2/components/concepts/job-workers.md b/versioned_docs/version-8.2/components/concepts/job-workers.md deleted file mode 100644 index 549097ddb81..00000000000 --- a/versioned_docs/version-8.2/components/concepts/job-workers.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -id: job-workers -title: "Job workers" -description: "A job worker is a service capable of performing a particular task in a process." ---- - -A job worker is a service capable of performing a particular task in a process. - -Each time such a task needs to be performed, this is represented by a job. - -A job has the following properties: - -- **Type**: Describes the work item and is defined in each task in the process. The type is referenced by workers to request the jobs they are able to perform. -- **Custom headers**: Additional static metadata that is defined in the process. Custom headers are used to configure reusable job workers (e.g. a `notify Slack` worker might read out the Slack channel from its header.) -- **Key**: Unique key to identify a job. The key is used to hand in the results of a job execution, or to report failures during job execution. -- **Variables**: The contextual/business data of the process instance required by the worker to do its work. - -## Requesting jobs - -Job workers request jobs of a certain type on a regular interval (i.e. polling). This interval and the number of jobs requested are configurable in the Zeebe client. - -If one or more jobs of the requested type are available, Zeebe (the workflow engine inside Camunda 8) will stream activated jobs to the worker. Upon receiving jobs, a worker performs them and sends back a `complete` or `fail` command for each job, depending on if the job could be completed successfully. - -For example, the following process might generate three different types of jobs: `process-payment`, `fetch-items`, and `ship-parcel`: - -![order-process-model](assets/order-process.png) - -Three different job workers, one for each job type, could request jobs from Zeebe: - -![zeebe-job-workers-requesting-jobs](assets/zeebe-job-workers-graphic.png) - -Many workers can request the same job type to scale up processing. In this scenario, Zeebe ensures each job is sent to only one of the workers. - -Such a job is considered activated until the job is completed, failed, or the job activation times out. - -On requesting jobs, the following properties can be set: - -- **Worker**: The identifier of the worker. Used for auditing purposes. -- **Timeout**: The time a job is assigned to the worker. If a job is not completed within this time, it can be reassigned by Zeebe to another worker. -- **MaxJobsToActivate**: The maximum number of jobs which should be activated by this request. -- **FetchVariables**: A list of required variables names. If the list is empty, all variables of the process instance are requested. - -### Long polling - -Ordinarily, a request for jobs can be completed immediately when no jobs are available. - -To find a job to work on, the worker must poll again for available jobs. This leads to workers repeatedly sending requests until a job is available. - -This is expensive in terms of resource usage, because both the worker and the server are performing a lot of unproductive work. Zeebe supports **long polling** for available jobs to better utilize resources. - -With **long polling**, a request will be kept open while no jobs are available. The request is completed when at least one job becomes available. - -**Long Polling** is set during [job activation with the parameter `request-timeout`](../../apis-tools/grpc.md#activatejobs-rpc). - -### Job queueing - -Zeebe decouples creation of jobs from performing the work on them. It is always possible to create jobs at the highest possible rate, regardless if there is a job worker available to work on them. This is possible because Zeebe queues jobs until workers request them. - -This increases the resilience of the overall system. Camunda 8 is highly available so job workers don't have to be highly available. Zeebe queues all jobs during any job worker outages, and progress will resume as soon as workers come back online. - -This also insulates job workers against sudden bursts in traffic. Because workers request jobs, they have full control over the rate at which they take on new jobs. - -## Completing or failing jobs - -After working on an activated job, a job worker informs Camunda 8 that the job has either `completed` or `failed`. - -- When the job worker completes its work, it sends a `complete job` command along with any variables, which in turn is merged into the process instance. This is how the job worker exposes the results of its work. -- If the job worker can not successfully complete its work, it sends a `fail job` command. Fail job commands include the number of remaining retries, which is set by the job worker. - - If `remaining retries` is greater than zero, the job is retried and reassigned. - - If `remaining retries` is zero or negative, an incident is raised and the job is not retried until the incident is resolved. - -When failing a job it is possible to specify a `retry back off`. This back off allows waiting for a specified amount of time before retrying the job. -This could be useful when a job worker communicates with an external system. If the external system is down, immediately retrying the job will not work. -This will result in an incident when the retries run out. Using the `retry back off` will delay the retry. This allows the external system some time to recover. -If no `retry back off` the job is immediately retried. - -When `Completing or failing jobs` with [variables](components/concepts/variables.md), the variables are merged into the process at the job's associated task. - -- When `Completing a job` the variables are propagated from the scope of the task to its higher scopes. -- When `Failing a job` the variables are only created in the local scope of the task. - -:::tip Failing a job with variables - -There are several advantages when failing a job with variables. Consider the following use cases: - -- You can fail a job and raise an incident by setting the job `retries` to zero. In this case, it would be useful to provide some additional details through a variable when the incident is analyzed. -- If your job worker can split the job into smaller pieces and finish some but not all of these, it can fail the job with variables indicating which parts of the job were successfully finished and which weren't. Such a job should be failed with a positive number of retries so another job worker can pick it up again and continue where the other job worker left off. The job can be completed when all parts are finished by a job worker successfully. - -::: - -## Timeouts - -If the job is not completed or failed within the configured job activation timeout, Zeebe reassigns the job to another job worker. This does not affect the number of `remaining retries`. - -A timeout may lead to two different workers working on the same job, possibly at the same time. If this occurs, only one worker successfully completes the job. The other `complete job` command is rejected with a `NOT FOUND` error. - -The fact that jobs may be worked on more than once means that Zeebe is an "at least once" system with respect to job delivery and that worker code must be idempotent. In other words, workers **must** deal with jobs in a way that allows the code to be executed more than once for the same job, all while preserving the expected application state. - -## Next steps - -- [Zeebe overview](components/zeebe/zeebe-overview.md) diff --git a/versioned_docs/version-8.2/components/concepts/messages.md b/versioned_docs/version-8.2/components/concepts/messages.md deleted file mode 100644 index 980f6bbad57..00000000000 --- a/versioned_docs/version-8.2/components/concepts/messages.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -id: messages -title: "Messages" ---- - -Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called **message correlation**. - -## Message subscriptions - -A message is not sent to a process instance directly. Instead, the message correlation is based on subscriptions that contain the `message name` and the `correlation key` (also known as the correlation value). - -![Message Correlation](assets/message-correlation.png) - -A subscription is opened when a process instance awaits a message; for example, when entering a message catch event. The message name is defined either statically in the process (e.g. `Money collected`) or dynamically as an expression. The correlation key is defined dynamically as an expression (e.g. `= orderId`). The expressions are evaluated on activating the message catch event. The results of the evaluations are used as message name and as correlation key of the subscription (e.g. `"order-123"`). - -When a message is published and the message name and correlation key match to a subscription, the message is correlated to the corresponding process instance. If no proper subscription is opened, the message is discarded. - -A subscription is closed when the corresponding element (e.g. the message catch event), or its scope is left. After a subscription is opened, it is not updated (for example, when the referenced process instance variable is changed.) - -
    - Publish message via zbctl -

    - -``` -zbctl publish message "Money collected" --correlationKey "order-123" -``` - -

    -
    - -## Message buffering - -Messages can be buffered for a given time. Buffering can be useful in a situation when it's not guaranteed the subscription is opened before the message is published. - -A message has a **time-to-live** (**TTL**) which specifies for how long it's buffered. Within this time, the message can be correlated to a process instance. - -When a subscription is opened, it polls the buffer for a proper message. If a proper message exists, it is correlated to the corresponding process instance. In case multiple messages match to the subscription, the first published message is correlated (like a FIFO queue). - -The buffering of a message is disabled when its TTL is set to zero. If no proper subscription is open, the message is discarded. - -
    - Publish message with TTL via zbctl -

    - -``` -zbctl publish message "Money collected" --correlationKey "order-123" --ttl 1h -``` - -

    -
    - -## Message cardinality - -A message is correlated only _once_ to a process (based on the BPMN process id), across all versions of this process. If multiple subscriptions for the same process are opened (by multiple process instances or within one instance,) the message is correlated only to one of the subscriptions. - -When subscriptions are opened for different processes, the message is correlated to _all_ of the subscriptions. - -A message is _not_ correlated to a message start event subscription if an instance of the process is active and was created by a message with the same correlation key. If the message is buffered, it can be correlated after the active instance is ended. Otherwise, it is discarded. - -## Message uniqueness - -A message can have an optional message id — a unique id to ensure the message is published and processed only once (i.e. idempotency). The id can be any string; for example, a request id, a tracking number, or the offset/position in a message queue. - -A message is rejected and not correlated if a message with the same name, the same correlation key, and the same id is already buffered. After the message is discarded from the buffer, a message with the same name, correlation key, and id can be published again. - -The uniqueness check is disabled when no message id is set. - -
    - Publish message with id via zbctl -

    - -``` -zbctl publish message "Money collected" --correlationKey "order-123" --messageId "tracking-12345" -``` - -

    -
    - -## Message correlation overview - -By combining the principles of message correlation, message uniqueness, and message buffering, very different behaviors can be achieved. Please note that a message name is mandatory, so it is omitted from the table. - -| Correlation key | Message Id | Time to live | Receiver type | Behavior | -| --------------- | ---------- | ------------ | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| set | not set | set to 0 | Start event | A new instance is started if no instance with the correlation key set at start is active, see [single instance](./#single-instance). | -| set | not set | set to 0 | Intermediate event | The message is correlated if a matching subscription is active. | -| set | not set | set > 0 | Start event | A new instance is started if no instance with the correlation key set at start is active during the lifetime of the message; new [equal messages](#message-uniqueness) are buffered. | -| set | not set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription is active; new [equal messages](#message-uniqueness) are buffered. | -| set | set | set to 0 | Start event | A new instance is started if no instance with the correlation key set at start is active and there is no [equal message](#message-uniqueness) in the buffer. | -| set | set | set to 0 | Intermediate event | The message is correlated if a matching subscription is active and there is no [equal message](#message-uniqueness) in the buffer. | -| set | set | set > 0 | Start event | A new instance is started if no instance with the correlation key set at start is active during the lifetime of the message and there is no [equal message](#message-uniqueness) in the buffer. | -| set | set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription is active and there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | not set | set to 0 | Start event | A new instance is started. | -| empty string | not set | set to 0 | Intermediate event | The message is correlated if a matching subscription to the empty string is active. | -| empty string | not set | set > 0 | Start event | A new instance is started. | -| empty string | not set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription to the empty string is active; new [equal messages](#message-uniqueness) are buffered. | -| empty string | set | set to 0 | Start event | A new instance is started if there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | set | set to 0 | Intermediate event | The message is correlated if a matching subscription to the empty string is active and there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | set | set > 0 | Start event | A new instance is started if there is no [equal message](#message-uniqueness) in the buffer. | -| empty string | set | set > 0 | Intermediate event | The message is correlated during the lifetime of the message if a matching subscription to the empty string is active and there is no [equal message](#message-uniqueness) in the buffer. | - -## Message patterns - -The following patterns describe solutions for common problems that can be solved using message correlation. - -### Message aggregator - -**Problem**: Aggregate/collect multiple messages, map-reduce, batching - -**Solution**: - -![Message Aggregator](assets/message-aggregator.png) - -The messages are published with a `TTL > 0` and a correlation key that groups the messages per entity. - -The first message creates a new process instance. The following messages are correlated to the same process instance if they have the same correlation key. - -When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. - -### Single instance - -**Problem**: Create exactly one instance of a process - -**Solution**: - -![Message Single Instance](assets/message-single-instance.png) - -The message is published with a `TTL = 0` and a correlation key that identifies the entity. - -The first message creates a new process instance. The following messages are discarded and do not create a new instance if they have the same correlation key and the created process instance is still active. diff --git a/versioned_docs/version-8.2/components/concepts/process-instance-creation.md b/versioned_docs/version-8.2/components/concepts/process-instance-creation.md deleted file mode 100644 index 85fba033d9a..00000000000 --- a/versioned_docs/version-8.2/components/concepts/process-instance-creation.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -id: process-instance-creation -title: "Process instance creation" -description: "Depending on the process definition, an instance of it can be created in several ways." ---- - -Depending on the process definition, an instance of it can be created in several ways. - -Camunda 8 supports the following ways to create a process instance: - -- [`CreateProcessInstance` commands](#commands) -- [Message event](#message-event) -- [Timer event](#timer-event) - -## Commands - -A process instance is created by sending a command specifying the BPMN process id, or the unique key of the process. - -There are two commands to create a process instance, outlined in the sections below. - -### Create and execute asynchronously - -A process that has a [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events) is started explicitly using **[CreateProcessInstance](/apis-tools/grpc.md#createprocessinstance-rpc)**. - -This command creates a new process instance and immediately responds with the process instance id. The execution of the process occurs after the response is sent. - -![create-process](assets/create-process.png) - -
    - Code example -

    Create a process instance: - -``` -zbctl create instance "order-process" -``` - -Response: - -``` -{ - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 -} - -``` - -

    -
    - -### Create and await results - -Typically, process creation and execution are decoupled. However, there are use cases that need to collect the results of a process when its execution is complete. - -**[CreateProcessInstanceWithResult](/apis-tools/grpc.md#createprocessinstancewithresult-rpc)** allows you to “synchronously” execute processes and receive the results via a set of variables. The response is sent when the process execution is complete. - -![create-process](assets/create-process-with-result.png) - -This command is typically useful for short-running processes and processes that collect information. - -If the process mutates system state, or further operations rely on the process outcome response to the client, consider designing your system for failure states and retries. - -:::note -When the client resends the command, it creates a new process instance. -::: - -
    - Code example -

    Create a process instance and await results: - -``` -zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' -``` - -Response: (Note that the variables in the response depend on the process.) - -``` -{ - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686045, - "variables": "{\"orderId\":\"1234\"}" -} -``` - -

    -
    - -Failure scenarios applicable to other commands are applicable to this command as well. Clients may not get a response in the following cases even if the process execution is completed successfully: - -- **Connection timeout**: If the gRPC deadlines are not configured for long request timeout, the connection may be closed before the process is completed. -- **Network connection loss**: This can occur at several steps in the communication chain. -- **Failover**: When the node processing this process crashes, another node continues the processing. The other node does not send the response because the request is registered on the first one. -- **Gateway failure**: If the gateway the client is connected to fails, nodes inside the cluster cannot send the response to the client. - -### Create and start at a user-defined element - -The [`create and execute asynchronously`](#create-and-execute-asynchronously) and [`create and await results`](#create-and-await-results) commands both start the process instance at their default initial element: the single [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events). Camunda 8 also provides a way to create a process instance starting at user-defined element(s). - -:::info -This is an advanced feature. Camunda recommends to only use this functionality for testing purposes. The none start event is the defined beginning of your process. Most likely the process is modeled with the intent to start all instances from the beginning. -::: - -To start the process instance at a user-defined element, you need to provide start instructions along with the command. Each instruction describes how and where to start a single element. - -By default, the instruction starts before the given element. This means input mappings of that element are applied as usual. - -Multiple instructions can be provided to start the process instance at more than one element. -You can activate the same element multiple times inside the created process instance by referring to the same element id in more than one instruction. - -:::note -Start instructions have the same [limitations as process instance modification](/components/concepts/process-instance-modification.md#limitations), e.g., it is not possible to start at a sequence flow. -::: - -Start instructions are supported for both `CreateProcessInstance` commands. - -
    - Code example -

    - Create a process instance starting before the 'ship_parcel' element: - -```java -client.newCreateInstanceCommand() - .bpmnProcessId("order-process") - .latestVersion() - .variables(Map.of("orderId", "1234")) - .startBeforeElement("ship_parcel") - .send() - .join(); -``` - -

    -
    - -## Events - -Process instances are also created implicitly via various start events. Camunda 8 supports message start events and timer start events. - -### Message event - -A process with a [message start event](/components/modeler/bpmn/message-events/message-events.md#message-start-events) can be started by publishing a message with the name that matches the message name of the start event. - -For each new message a new instance is created. - -### Timer event - -A process can also have one or more [timer start events](/components/modeler/bpmn/timer-events/timer-events.md#timer-start-events). An instance of the process is created when the associated timer is triggered. Timers can also trigger periodically. - -## Next steps - -- [About Modeler](/components/modeler/about-modeler.md) -- [Automating a process using BPMN](/guides/automating-a-process-using-bpmn.md) diff --git a/versioned_docs/version-8.2/components/concepts/process-instance-modification.md b/versioned_docs/version-8.2/components/concepts/process-instance-modification.md deleted file mode 100644 index b4352c29b94..00000000000 --- a/versioned_docs/version-8.2/components/concepts/process-instance-modification.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -id: process-instance-modification -title: "Process instance modification" -description: "Use process instance modification to repair a running process instance." ---- - -Process instance modification is a powerful feature to repair a running process instance. The process instance may be -stuck on an element, waiting for an event, or taking an unintended path because an external system is not -available or doesn't respond as expected, for example. - -Use the [modification command](/apis-tools/grpc.md#modifyprocessinstance-rpc) to skip or repeat a step in the -process. Consider the following example: - -![The process instance is stuck in the message catch event.](assets/process-instance-modification/process-instance-modification-example-1.png) - -The process contains two service tasks and a message catch event in between. The process instance completed the first -task `A` and waits on the message catch event `B`. An external system should publish the message, but the external -system is not available and can't continue the process. The process instance is stuck. - -![We use the modification to skip the event and continue on the next task.](assets/process-instance-modification/process-instance-modification-example-2.png) - -We use the modification to repair the process instance. We "move the token" from the catch event `B` to the next -task `C`. This operation is presented by two instructions in the modification command: - -- Terminate the instance of the catch event `B`. -- Activate the element `C`. - -![After the modification is applied, the message catch event is terminated and the next task is active.](assets/process-instance-modification/process-instance-modification-example-3.png) - -As a result of the command, the process instances terminated the instance of catch event `B` and activated the task `C`. -Now, the process instance is not stuck anymore and can continue in the process. - -Generally, the process instance modification command can contain multiple instructions: - -- To activate an element of the process. -- To terminate an active instance of an element. - -Read more about the behavior of the instructions in the following sections. - -:::note -Use the process instance modification only in exceptional cases to repair the process instance. It is not -recommended using it as a part of the regular flow of the process; find additional details [here](#use-at-your-own-risk). -Instead, model all possible cases -explicitly in your process. -::: - -## Activate an element - -We can use the modification command to activate an element of the process. Consider the following example: - -![The process instance waits on task. We use the modification command to activate a task from a parallel flow.](assets/process-instance-modification/process-instance-modification-activate-an-element.png) - -The process instance completed the first task `A` and waits on task `B`. Task `C` is connected to task `A` by a -non-interrupting message catch event. An external system should publish the message, but it is not available. - -To correct the state of the process instance, we modify it and activate task `C`. As a result, task `C` is active, and a -job worker can pick it up. - -The process instance activates the element in the same way as the regular flow; for example, if the incoming sequence -flow of the element would be taken. The activation of the element can include the following steps: - -- Apply the input variable mappings. -- Create the event subscriptions; for example, of boundary events. -- Apply additional logic depending on the element; for example, create a job for a service task. - -## Activate a nested element - -We can use the modification command to activate an element of the process nested inside an embedded or an event -subprocess. This is a special case of [activating an element](#activate-an-element). Consider the following example: - -![The process instance waits on a task inside a subprocess. We use the modification command to activate a task from a parallel flow in the same subprocess.](assets/process-instance-modification/process-instance-modification-activate-nested-element.png) - -The process instance completed the first task `A` in the embedded subprocess. It passed the inclusive gateway and waited -on task `B`. Task `C` is also connected to the inclusive gateway, but the condition didn't match. The condition should -match, but the job worker for task `A` provided unexpected variables. - -To correct the state of the process instance, we modify it and activate task `C`. As a result, task `C` is active in the -same instance of the subprocess as the other task `B`. - -The process instance activates the element always in an existing instance of its subprocess. - -If the subprocess doesn't have an active instance, the process instance creates a new instance of the subprocess -first. The creation of the subprocess can include the creation of the event subscriptions; for example, of boundary -events. In contrast to a regular activation of the subprocess, the process instance doesn't activate the start event -of the subprocess or apply any input variable mappings. - -If the subprocess itself is nested in another subprocess, the same procedure is applied to this subprocess. - -:::note -The process instance can't activate the element if the subprocess has more than one active instance. It can't decide in -which instance of the subprocess to activate the element. As a result, the process instance doesn't apply the activation -instruction and rejects the command. -::: - -## Activate an interrupting event subprocess - -We can use the modification command to activate an interrupting event subprocess of the process. Consider the following -example: - -![The process instance waits on task. We use the modification command to activate an interrupting event subprocess in the same scope.](assets/process-instance-modification/process-instance-modification-activate-interrupting-event-subprocess.png) - -The process instance completed the first task `A` and waits on task `B`. Task `C` is embedded in an interrupting message -event subprocess. An external system should publish the message and interrupt the process, but it is not available. - -To correct the state of the process instance, we modify it and activate the interrupting event subprocess. As a result, -the event subprocess is active and enters the start event. But the activation doesn't interrupt the process instance -and terminate task `B`. So, both tasks `B` and `C` are active. - -If we want to simulate the interrupting behavior of the event subprocess, we need to add a modification instruction -to [terminate the instance](#terminate-an-element-instance) of the task `B`. - -If the start event of the event subprocess has output variable mappings, we may need to -[set the variables](#set-variables) with the activation instruction. Otherwise, the process instance may create an -incident when applying the output variable mappings. - -## Set variables - -We can use the modification command to set one or more variables together by activating an element of the process. -Consider the following example: - -![The process instance waits on task. We use the modification command to activate a task inside a non-interrupting message event subprocess.](assets/process-instance-modification/process-instance-modification-set-variables.png) - -The process instance completed the first task `A` and waited on task `B`. Task `C` is embedded in a non-interrupting -message event subprocess. An external system should publish the message, but it is not available. - -To correct the state of the process instance, we modify it and activate the task `C` inside the non-interrupting message -event subprocess. Additionally, we add variable instructions to the modification to set variables that should be -provided by the message. - -The process instance sets the variables before activating the given element. As a result, the variables are available -when applying the input variable mappings and creating the event subscriptions of the element. - -A variable instruction can define the [scope](variables.md#variable-scopes) of the variables. If a scope is defined, the process instance sets the variables as **local** variables in the given scope. For example, set the message variables -as local variables of the event subprocess. The scope must be a flow scope of the activating element. - -If no scope is defined, the process instance sets the variables **globally** in the root scope of the process instance. - -## Terminate an element instance - -We can use the modification command to terminate an active element instance of the process instance. Consider the -following example: - -![The process instance waits on a task inside a nested interrupting event subprocess. We use the modification command to terminate the event subprocess.](assets/process-instance-modification/process-instance-modification-terminate-element-instance.png) - -The process instance completed the first task `A` in the embedded subprocess. It triggered the interrupting timer event -subprocess and terminated task `B` inside the embedded subprocess. An external system published a message and triggered -the non-interrupting message boundary event on the subprocess. The process instance waits on task `C` inside the event -subprocess and on task `D` connected to the boundary event. - -The job worker for task `C` failed to complete the job successfully. To skip the task and continue the process instance, -we modify it and terminate the element instance of the event subprocess. - -As a result, the process instance terminates the event subprocess and the element instance of the task `C` that is -inside the event subprocess. Additionally, the process instance terminates the embedded subprocess because it doesn't -contain active element instances anymore. - -Generally, the modification applies the following rules: - -- If the terminating element instance is a subprocess, it terminates all active instances in the subprocess. -- If the terminating element instance is a call activity, it terminates the child process instance. -- If the terminating element instance was the last active instance inside a subprocess, it terminates the subprocess. -- If the terminating element instance was the last active instance of the process instance, it terminates the - process instance. - -If a terminating element instance is not active, the process instance doesn't apply the termination instruction -and rejects the command. - -:::note -The process instance can't terminate the last active element instance of a child process instance. As a result, the -process instance doesn't apply the termination instruction and rejects the command. - -Instead, we can terminate the call activity that created the child process instance. -::: - -## Execute the modification instructions - -A modification command can contain multiple activation and termination instructions. The process instance applies these -instructions in a specific order: - -1. Apply all activation instructions. -2. Apply all termination instructions. - -The order of the instructions matters if the modification terminates the last active instances of the process instance -or inside a subprocess, and activates an element in the process instance or the subprocess. Since the process instance -applies the activation instructions first, the process instance or the subprocess still has an active instance and is -not terminated. - -If the process instance can't apply one of the modification instructions, it rejects the modification command. For -example, if one of the terminating element instances is not active. As a result, the process instance is not modified -and is in the same state as before. It applies the instructions of a modification command in a **transactional** way (i.e. -apply all or nothing). - -## Limitations - -Currently, we can't modify the process instance in all possible ways. In the following cases, the process instance can't -apply the modification instructions and rejects the modification command. - -- If the activating element is a BPMN element of the type: - - A start event of a process or a subprocess - - A boundary event - - An event that belongs to an event-based gateway - - An element inside a multi-instance subprocess - - A sequence flow -- If the activating element is [a nested element](#activate-a-nested-element) and the subprocess has more than one - active instance. -- If the modification terminates all active instances of a child process instance. - -## Use at your own risk - -Process instance modification is a powerful tool to repair a process instance. However, use it with care. You -can modify the process instance to create situations that are not reachable by the regular execution. Consider the -following example: - -![The process instance waits on a task after a parallel joining gateway.](assets/process-instance-modification/process-instance-modification-use-at-your-own-risk.png) - -The process instance completed the first tasks `A` and `B` and waits on task `C`. - -We could apply the following modifications, but the process instance may end up in an unintended situation: - -- If we activate task `A` again, the process instance is stuck on the parallel gateway. -- If we activate task `D` and don't set all variables that would be provided by the message, the task `D` could be - processed with the wrong input. -- If we activate task `E` inside the interrupting event subprocess, the process instance doesn't interrupt task `C` - and the processing of the tasks could override variables. - -The process instance doesn't detect these situations. It is up to you to apply suitable modifications. diff --git a/versioned_docs/version-8.2/components/concepts/processes.md b/versioned_docs/version-8.2/components/concepts/processes.md deleted file mode 100644 index dcc626867c8..00000000000 --- a/versioned_docs/version-8.2/components/concepts/processes.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -id: processes -title: "Processes" -description: "Processes are flowchart-like blueprints that define the orchestration of tasks." ---- - -Processes are flowchart-like blueprints that define the orchestration of **tasks**. - -For example, with Camunda you can [orchestrate human tasks](../../guides/getting-started-orchestrate-human-tasks.md). - -Every task represents a piece of business logic so the ordered execution produces a meaningful result. - -A **job worker** implements the business logic required to complete a task. A job worker must be able to communicate with Camunda 8, but otherwise, there are no restrictions on its implementation. You can choose to write a worker as a microservice, but also as part of a classical 3-tier application, as a \(lambda\) function, via command line tools, etc. - -Running a process requires three steps: - -1. Deploy a process to Camunda 8. -2. Implement and register job workers for tasks in the workflows. -3. Create new instances of said process. - -Let's not get ahead of ourselves; the very first step is to design the process. - -## BPMN 2.0 - -Zeebe uses [BPMN 2.0](http://www.bpmn.org/) to represent processes. BPMN is an industry standard widely supported by different vendors and implementations. Using BPMN ensures processes can be interchanged between Zeebe and other process systems. - -## BPMN modeler - -Zeebe provides a free and open source BPMN modeling tool to create BPMN diagrams and configure their technical properties. The modeler is a desktop application based on the [bpmn.io](https://bpmn.io) open source project. - -Desktop Modeler can be [downloaded from GitHub](https://camunda.com/download/modeler/). - -:::note -New to modeling a process using BPMN? Visit our step-by-step introductory guide to [automating a process using BPMN](../../guides/automating-a-process-using-bpmn.md). -::: - -## Sequences - -The simplest kind of process is an ordered sequence of tasks. Whenever process execution reaches a task, Zeebe (the workflow engine inside Camunda 8) creates a job that can be requested and completed by a job worker. - -![process-sequence](assets/order-process.png) - -You can think of Zeebe's process orchestration as a state machine, taking the following steps: - -1. A process instance reaches a task, and Zeebe creates a job that can be requested by a worker. -2. Zeebe waits for the worker to request a job and complete the work. -3. Once the work is complete, the flow continues to the next step. -4. If the worker fails to complete the work, the process remains at the current step, and the job could be retried until it's successfully completed. - -## Data flow - -As Zeebe progresses from one task to the next in a process, it can move custom data in the form of variables. Variables are key-value pairs and part of the process instance. - -![data-flow](assets/process-data-flow.png) - -Any job worker can read the variables and modify them when completing a job so data can be shared between different tasks in a process. - -## Data-based conditions - -Some processes don't always execute the same tasks, and instead need to choose different tasks based on variables and conditions: - -![data-conditions](assets/processes-data-based-conditions.png) - -The diamond shape with the **X** in the middle is an element indicating the process can take one of several paths. - -## Events - -Events represent things that happen. A process can react to events (catching event) and can emit events (throwing event). - -![process](assets/process-events.png) - -There are different types of events, such as a message, timer, or error. - -## Parallel execution - -In many cases, it's also useful to perform multiple tasks in parallel. This can be achieved with a parallel gateway: - -![data-conditions](assets/processes-parallel-gateway.png) - -The diamond shape with the **+** marker means all outgoing paths are activated. The tasks on those paths can run in parallel. The order is only fulfilled after both tasks have completed. - -## Next steps - -- [About Modeler](/components/modeler/about-modeler.md) -- [Automating a process using BPMN](/guides/automating-a-process-using-bpmn.md) diff --git a/versioned_docs/version-8.2/components/concepts/signals.md b/versioned_docs/version-8.2/components/concepts/signals.md deleted file mode 100644 index 47294d1e98f..00000000000 --- a/versioned_docs/version-8.2/components/concepts/signals.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: signals -title: "Signals" -description: "Learn about broadcasting signals, which can trigger all matching signal events with a single broadcast." ---- - -Signals are a similar concept to [messages](messages.md). However, messages are correlated to a specific -process instance, whereas signals can trigger _all_ the matching signal events with a single broadcast. -Depending on the type of [signal catch events](../modeler/bpmn/signal-events/signal-events.md) the process instance will -respond accordingly. - -## Signal subscriptions - -Signals work using subscriptions. When a process encounters a signal catch event it creates a new signal subscription. -This process instance waits until a signal with a matching name is broadcasted. You can define the signal name in the -process definition. -Deploying a process with a signal start event also creates a new signal subscription. In this case the subscription will -be used to start a new process instance. - -## Signal cardinality - -A broadcasted signal iterates over _all_ available subscriptions. As a result, a single broadcast triggers _all_ the -signal catch events that match the signal name. diff --git a/versioned_docs/version-8.2/components/concepts/variables.md b/versioned_docs/version-8.2/components/concepts/variables.md deleted file mode 100644 index f06a13f9d3f..00000000000 --- a/versioned_docs/version-8.2/components/concepts/variables.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -id: variables -title: "Variables" -description: "Variables are part of a process instance and represent the data of the instance." ---- - -Variables are part of a process instance and represent the data of the instance. - -A variable has a name and a JSON value. The visibility of a variable is defined by its variable scope. - -When [automating a process using BPMN](../../guides/automating-a-process-using-bpmn.md) or [orchestrating human tasks](../../guides/getting-started-orchestrate-human-tasks.md), you can leverage the scope of these variables and customize how variables are merged into the process instance. - -## Variable names - -The name of a variable can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the operator `-`. - -When accessing a variable in an expression, keep in mind the variable name is case-sensitive. - -Restrictions of a variable name: - -- It may not start with a **number** (e.g. `1stChoice` is not allowed; you can use `firstChoice`instead). -- It may not contain **whitespaces** (e.g. `order number` is not allowed; you can use `orderNumber` instead). -- It may not contain an **operator** (e.g. `+`, `-`, `*`, `/`, `=`, `>`, `?`, `.`). -- It may not be a **literal** (e.g. `null`, `true`, `false`) or a **keyword** (e.g. `function`, `if`, `then`, `else`, `for`, `between`, `instance`, `of`, `not`). - -## Variable values - -The value of a variable is stored as a JSON value. It can have one of the following types: - -- String (e.g. `"John Doe"`) -- Number (e.g. `123`, `0.23`) -- Boolean (e.g. `true` or `false`) -- Array (e.g. `["item1" , "item2", "item3"]`) -- Object (e.g. `{ "orderNumber": "A12BH98", "date": "2020-10-15", "amount": 185.34}`) -- Null (`null`) - -## Variable size limitation - -Generally, there is a limit of 4 MB for the payload of a process instance. This 4 MB includes the variables and the workflow engine internal data, which means there is slightly less memory available for variables. The exact limitation depends on a few factors, but you can consider 3 MB as being safe. If in doubt, run a quick test case. - -:::note -Regardless, we don't recommend storing much data in your process context. See our [best practice on handling data in processes](/docs/components/best-practices/development/handling-data-in-processes/). -::: - -## Variable scopes - -Variable scopes define the _visibility_ of variables. The root scope is the process instance itself. Variables in this scope are visible everywhere in the process. - -When the process instance enters a subprocess or an activity, a new scope is created. Activities in this scope can see all variables of this and of higher scopes (i.e. parent scopes). However, activities outside of this scope can not see the variables which are defined in this scope. - -If a variable has the same name as a variable from a higher scope, it covers this variable. Activities in this scope see only the value of this variable and not the one from the higher scope. - -The scope of a variable is defined when the variable is created. By default, variables are created in the root scope. - -![variable-scopes](assets/variable-scopes.png) - -This process instance has the following variables: - -- `a` and `b` are defined on the root scope and can be seen by **Task A**, **Task B**, and **Task C**. -- `c` is defined in the subprocess scope and can be seen by **Task A** and **Task B**. -- `b` is defined again on the activity scope of **Task A** and can be seen only by **Task A**. It covers the variable `b` from the root scope. - -### Variable propagation - -When variables are merged into a process instance (e.g. on job completion, on message correlation, etc.) each variable is propagated from the scope of the activity to its higher scopes. - -The propagation ends when a scope contains a variable with the same name. In this case, the variable value is updated. - -If no scope contains this variable, it's created as a new variable in the root scope. - -![variable-propagation](assets/variable-propagation.png) - -The job of **Task B** is completed with the variables `b`, `c`, and `d`. The variables `b` and `c` are already defined in higher scopes and are updated with the new values. Variable `d` doesn't exist before and is created in the root scope. - -### Local variables - -In some cases, variables should be set in a given scope, even if they don't exist in this scope before. - -To deactivate the variable propagation, the variables are set as **local variables**. This means the variables are created or updated in the given scope, regardless if they existed in this scope before. - -## Input/output variable mappings - -Input/output variable mappings can be used to create new variables or customize how variables are merged into the process instance. - -Variable mappings are defined in the process as extension elements under `ioMapping`. Every variable mapping has a `source` and a `target` expression. - -The `source` expression defines the **value** of the mapping. Usually, it [accesses a variable](/components/modeler/feel/language-guide/feel-variables.md#access-variable) of the process instance that holds the value. If the variable or the nested property doesn't exist, an [incident](incidents.md) is created. - -The `target` expression defines **where** the value of the `source` expression is stored. It can reference a variable by its name or a nested property of a variable. If the variable or the nested property doesn't exist, it's created. - -Variable mappings are evaluated in the defined order. Therefore, a `source` expression can access the target variable of a previous mapping. - -![variable-mappings](assets/variable-mappings.png) - -**Input mappings** - -| Source | Target | -| --------------- | ----------- | -| `customer.name` | `sender` | -| `customer.iban` | `iban` | -| `totalPrice` | `price` | -| `orderId` | `reference` | - -**Output mapping** - -| Source | Target | -| -------- | --------------- | -| `status` | `paymentStatus` | - -### Input mappings - -Input mappings can be used to create new variables. They can be defined on service tasks and subprocesses. - -When an input mapping is applied, it creates a new **local variable** in the scope where the mapping is defined. - -Examples: - -| Process instance variables | Input mappings | New variables | -| -------------------------------------- | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------- | -| `orderId: "order-123"` | **source:** `=orderId`
    **target:** `reference` | `reference: "order-123"` | -| `customer:{"name": "John"}` | **source:** `=customer.name`
    **target:** `sender` | `sender: "John"` | -| `customer: "John"`
    `iban: "DE456"` | **source:** `=customer`
    **target:** `sender.name`
    **source:** `=iban`
    **target:** `sender.iban` | `sender: {"name": "John", "iban": "DE456"}` | - -### Output mappings - -Output mappings can be used for several purposes: - -- To customize how variables are merged into the process instance. -- They can be defined on service tasks, receive tasks, message catch events, and subprocesses. -- They can be used in script and user tasks. - -If **one or more** output mappings are defined, the results variables are set as **local variables** in the scope where the mapping is defined. Then, the output mappings are applied to the variables and create new variables in this scope. The new variables are merged into the parent scope. If there is no mapping for a job/message variable, the variable is not merged. - -If **no** output mappings are defined, all results variables are merged into the process instance. - -In the case of a subprocess, the behavior is different. There are no results variables to be merged. However, output mappings can be used to propagate **local variables** of the subprocess to higher scopes. By default, all **local variables** are removed when the scope is left. - -Examples: - -| Results variables | Output mappings | Process instance variables | -| ---------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------- | -| `status: "Ok"` | **source:** `=status`
    **target:** `paymentStatus` | `paymentStatus: "OK"` | -| `result: {"status": "Ok", "transactionId": "t-789"}` | **source:** `=result.status`
    **target:** `paymentStatus`
    **source:** `=result.transactionId`
    **target:** `transactionId` | `paymentStatus: "Ok"`
    `transactionId: "t-789"` | -| `status: "Ok"`
    `transactionId: "t-789"` | **source:** `=transactionId`
    **target:** `order.transactionId` | `order: {"transactionId": "t-789"}` | - -## Next steps - -- [Access variables](/components/modeler/feel/language-guide/feel-variables.md#access-variable) -- [Incidents](incidents.md) diff --git a/versioned_docs/version-8.2/components/concepts/what-is-camunda-8.md b/versioned_docs/version-8.2/components/concepts/what-is-camunda-8.md deleted file mode 100644 index f57a980033a..00000000000 --- a/versioned_docs/version-8.2/components/concepts/what-is-camunda-8.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -id: what-is-camunda-8 -title: "What is Camunda 8?" -description: "Camunda 8 orchestrates complex business processes that span people, systems, and devices." -keywords: - [ - "workflow process", - "workflow engine", - "process management software", - "bpm business process management", - "business process automation", - "camunda software", - "camunda cloud", - "process automation platform", - "process automation software", - "process orchestration", - ] ---- - -[Camunda 8](https://camunda.io) orchestrates complex business processes that span people, systems, and devices. With Camunda, business users collaborate with developers to model and [automate end-to-end processes using BPMN-powered flowcharts](/guides/automating-a-process-using-bpmn.md), alongside DMN decision tables that promote speed, scale, and decision logic. - -## What use cases does Camunda 8 have? - -### Orchestrate, observe, and analyze microservices & human tasks - -An end-to-end, automated business process typically requires multiple microservices to achieve an outcome. Software developers and architects often struggle to effectively communicate across multiple microservices, monitor their performance, and identify and resolve problems when they occur. - -Camunda enables organizations to overcome these issues without compromising autonomy and the coupling of microservices. Camunda offers speed, scale, and security when paired with [microservices](/guides/getting-started-orchestrate-microservices.md), without the overhead of building and maintaining a daunting infrastructure. - -In addition to microservices, many organizations have mission-critical processes that require people to perform tasks manually. An end-to-end business process often requires the combination of manual work with automated steps in a unified workflow. - -It’s important that workflows are properly orchestrated to achieve a desired outcome. For example, if a customer onboarding process is delayed because an employee doesn’t know they need to complete a task, the customer will have a poor experience. Camunda provides a lightweight, developer-friendly, easy-to-integrate solution with the [human task orchestration](/guides/getting-started-orchestrate-human-tasks.md) feature of Camunda 8 to help individuals and groups fix slow, inefficient, or broken human workflows. - -For a closer look at other use cases, see the [solutions page](https://camunda.com/solutions/) which outlines the following: - -- Modernize legacy IT systems -- Orchestrate, monitor, and analyze RPA bots -- Replace homegrown workflow automation software -- Modernize legacy business process management systems (BPMS) -- Build a centralized process automation platform - -## What are the core quality attributes of Camunda 8? - -Camunda 8 is designed to operate on a very large scale. To achieve this, it provides: - -- **Horizontal scalability** and no dependence on an external database; [Zeebe](/components/zeebe/zeebe-overview.md) (the workflow engine inside Camunda 8) writes data directly to the file system on the same servers where it is deployed. Zeebe enables distribution processing across a cluster of machines to deliver high throughput. -- **High availability and fault tolerance** via a pre-configured replication mechanism, ensuring Camunda 8 can recover from machine or software failure with no data loss and minimal downtime. This ensures the system as a whole remains available without requiring manual action. -- **Audit trail** as all process-relevant events are written to an append-only log, providing an audit trail and a history of the state of a process. -- **Reactive publish-subscribe interaction model** which enables microservices that connect to Camunda 8 to maintain a high degree of control and autonomy, including control over processing rates. These properties make Camunda 8 resilient, scalable, and reactive. -- **Visual processes modeled in ISO-standard BPMN 2.0** so technical and non-technical stakeholders can collaborate on process design in a widely-used modeling language. -- **Language-agnostic client model** makes it possible to build a client in nearly any programming language an organization uses to build microservices. -- **Operational ease-of-use** as a SaaS provider we take care of all operational details. - -## What are the Camunda 8 components? - -### Modeler - -Model and deploy business process diagrams with BPMN and DMN. By using industry-standard BPMN flowcharts to model and automate end-to-end processes, both developers and business stakeholders can collaborate and work on process diagrams and decision tables simultaneously, and use collaborative features such as comments to discuss. Available via [web and desktop app](/components/modeler/about-modeler.md). - -#### Connectors - -Connectors help you communicate with systems and technology, reducing the time required to automate and orchestrate business processes that span multiple systems. Connectors are inserted into BPMN diagrams directly from within the Camunda Modeler interface. Once added to your diagram, they are configured via an intuitive properties panel. - -#### Forms - -[Create and implement custom forms](/guides/utilizing-forms.md) that power workflows requiring human interaction. - -### Workflow engine & decision engine - -Powered by Zeebe, Camunda’s cloud-native workflow engine provides organizations with speed, scale, and security without the overhead of building and maintaining a complex infrastructure. Zeebe can scale throughput linearly by adding cluster nodes, allowing the processing of an unlimited amount of transactions at consistently low latencies. Zeebe also comes with a new fail-over architecture that also supports geo-replication across data centers to provide enterprise grade availability. - -### Tasklist - -With [Tasklist](/components/tasklist/introduction-to-tasklist.md), process owners can achieve end-to-end process automation by [orchestrating human tasks](/guides/getting-started-orchestrate-human-tasks.md). When a user needs to work on a task, they’ll see it appear in Tasklist. - -### Operate - -[Operate](/components/operate/operate-introduction.md) provides transparency and real-time visibility to monitor, analyze, and resolve problems with processes running in Camunda 8. - -### Optimize - -[Optimize]($optimize$/components/what-is-optimize) leverages process execution data to continuously [provide actionable insights](/guides/improve-processes-with-optimize.md). Optimize specializes in BPMN-based analysis and can show users exactly what their process model needs for successful execution. - -### Console - -With [Console](/components/console/introduction-to-console.md), teams can create, configure, manage, and monitor clusters for all environments from development to production. Additionally, Console offers control over organizational settings such as user management, roles, and insights into usage metrics. - -## How does Camunda 8 compare to other solutions? - -### End-to-end orchestration - -Design, automate, and improve all components of the business process across different technologies, systems, infrastructures, people, and devices. - -### Open architecture - -Fit into diverse and complex enterprise environments and technology stacks with Camunda's open and scalable architecture. It provides a highly scalable based on open components that can be easily integrated with most common technical architectures and frameworks. - -### Standards-based business & IT collaboration - -Use BPMN and DMN standards as a common language for developers and business stakeholders alike throughout the entire process automation lifecycle. - -### Developer-friendly approach - -The platform and tools are usable in your environment right away, with full public access to all of Camunda's documentation, [open APIs for integration](/apis-tools/working-with-apis-tools.md), and a [community](https://camunda.com/developers/) comprised of around 100,000 developers. - -## Next steps - -- To request information about Camunda 8 performance and benchmarking, see our [Contact](/contact/) page. -- [Introduction to Camunda 8](/guides/introduction-to-camunda-8.md) -- [Create a Camunda 8 account](/guides/create-account.md) -- [Migrate from Camunda 7 to Camunda 8](/guides/migrating-from-camunda-7/index.md) -- [Automate a process using BPMN](/guides/automating-a-process-using-bpmn.md) diff --git a/versioned_docs/version-8.2/components/concepts/workflow-patterns.md b/versioned_docs/version-8.2/components/concepts/workflow-patterns.md deleted file mode 100644 index af4c0f580ae..00000000000 --- a/versioned_docs/version-8.2/components/concepts/workflow-patterns.md +++ /dev/null @@ -1,297 +0,0 @@ ---- -id: workflow-patterns -title: Workflow patterns -sidebar_label: Workflow patterns -slug: /components/concepts/workflow-patterns/ -description: "For end-to-end process orchestration, you must accurately express the things happening in your business processes, requiring workflow patterns." ---- - -For true end-to-end process orchestration, you must be able to accurately express all the things happening in your business processes which will require simple and advanced workflow patterns. This page describes typical patterns and how you can implement them using Camunda and BPMN. - -## The power of BPMN - -Let's discuss the ISO standard [Business Process Model and Notation (BPMN)](https://camunda.com/bpmn/) first, as this is really a great workflow language to express workflow patterns. BPMN was developed as a collaboration of different vendors rooted in real-life industry experience. It happened during a time when the scientific background of workflow patterns was already well researched, for example by the [Workflow Patterns Initiative](http://www.workflowpatterns.com/). - -In other words, scientists already wrote down all the patterns that are important to express any problem you might get in a workflow, and BPMN used this knowledge to design a language that implemented all the relevant patterns (see this [evaluation](http://www.workflowpatterns.com/evaluations/standard/bpmn.php), for example). Essentially, BPMN is feature complete and will always be able to express what you need to orchestrate your processes. - -Additionally, BPMN has expressed all real-life problems rather easily when reflecting on our more than 15 years of hands-on experience with the language. - -If you now try to rely on workflow languages that promise to be simpler than BPMN, what it really means is that they lack important workflow patterns. You might want to look in the blog post on [why process orchestration needs advanced workflow patterns](https://camunda.com/blog/2022/07/why-process-orchestration-needs-advanced-workflow-patterns/), showing exemplary workarounds that are necessary if the language cannot express certain patterns. - -Typically, this involves emulating advanced patterns with basic constructs plus programming code so that your development takes longer, your solution becomes more brittle, and the resulting process model can't serve as a communication vehicle for business and IT as the model will be contaminated with technical details. - -## Routing - -The most basic workflow patterns are (excuse the play on words) around the basic flow of work. - -### Sequence - -See [Workflow Pattern 1: Sequence](http://www.workflowpatterns.com/patterns/control/basic/wcp1.php): "A task in a process is enabled after the completion of a preceding task in the same process." - -This is implemented by a [sequence flow](/docs/components/modeler/bpmn/bpmn-primer/#sequence-flow-controlling-the-flow-of-execution) connecting two activities: - -
    - -The first sequence flow (1) connects the start event with **Task A**. Then, **Task B** is connected using another sequence flow, meaning that it can only happen if **Task A** was completed. If **Task B** completes, the sequence flow routes to the end event so that the process instance can complete. - -You can read more about it in [our BPMN primer: sequence flows - controlling the flow of execution](/docs/components/modeler/bpmn/bpmn-primer/#sequence-flow-controlling-the-flow-of-execution). - -### Conditions (if/then) - -See [Workflow Pattern 4: Exclusive Choice](http://www.workflowpatterns.com/patterns/control/basic/wcp4.php): "The thread of control is immediately passed to precisely one of the outgoing branches." - -This is implemented by an [exclusive gateway (XOR)](/docs/components/modeler/bpmn/exclusive-gateways/): - -
    - -All outgoing sequence flows of the XOR gateway (1) have a [condition](/docs/components/concepts/expressions/#boolean-expressions) configured, which decides if the process continues in **Task B** (2 if `x>42`) or **Task C** (3if `not(x>42)`). - -You can read more about it in [our BPMN primer: gateways - steering flow](/docs/components/modeler/bpmn/bpmn-primer/#gateways-steering-flow). - -### Invoke subworkflows - -You need to invoke another process as part of your process. - -This is implemented by a [call activity](/docs/components/modeler/bpmn/call-activities/): - -
    - -1 - -When the call activity is entered, a new process instance of the referenced process is created. Only when the created process instance is completed is the call activity left and the outgoing sequence flow taken. - -You can reference any other BPMN process, for example: - -
    - -### Loop - -See [Workflow Pattern 21: Structured Loop](http://www.workflowpatterns.com/patterns/control/basic/wcp21.php): "The ability to execute a task or subprocess repeatedly. The loop has either a pre-test or post-test condition associated with it." - -In BPMN, you can simply model a loop: - -
    - -1 - -This exclusive gateway contains the expression to decide if to continue or exit the loop. The gateway can be before or after the loop. - -There is also a specific loop task marker in BPMN: - -
    - -:::note -The loop task marker event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: - -### Static parallel branches - -Imagine you want some tasks known during design time to be carried out in parallel. See [Workflow Pattern 2: Parallel Split](http://www.workflowpatterns.com/patterns/control/new/wcp2.php) and [Workflow Pattern 33: Generalized AND-Join](http://www.workflowpatterns.com/patterns/control/new/wcp33.php): "The divergence of a branch into two or more parallel branches each of which execute concurrently" plus "the convergence of two or more branches into a single subsequent branch." - -In BPMN, this is implemented using [parallel gateways (AND)](/docs/components/modeler/bpmn/parallel-gateways/): - -
    - -1 - -This AND-gateway splits the flow into concurrent paths so that Task A, B, and C are executed in parallel. - -2 - -This AND-gateway waits for Task A, B, and C to complete before the flow can move on. - -You can read more about it in [our BPMN primer: gateways - steering flow](/docs/components/modeler/bpmn/bpmn-primer/#gateways-steering-flow). - -### Dynamic parallel branches - -You might want to execute some tasks for every element of a list, like the `for each` construct in programming languages. See [Workflow Pattern 14: Multiple Instances with a priori Run-Time Knowledge](http://www.workflowpatterns.com/patterns/control/new/wcp14.php): "Multiple instances of a task can be created. The required number of instances may depend on a number of runtime factors, but is known before the task instances must be created. Once initiated, these instances are independent of each other and run concurrently. It is necessary to synchronize the instances at completion before any subsequent tasks can be triggered." - -In BPMN, this is implemented using [multiple instance activities](/docs/components/modeler/bpmn/multi-instance/): - -
    - -1 - -The parallel multiple instance marker defines that this subprocess is executed multiple times - once for each element of a given collection (like a `for each` loop in a programming language). - -### Wait - -A typical situation is that a process needs to wait for some event to happen, e.g. some time to pass or some external message to arrive. This is related to [Workflow Pattern 23: Transient Trigger](http://www.workflowpatterns.com/patterns/control/new/wcp23.php). - -In BPMN, this is implemented using [events](/docs/components/modeler/bpmn/events/) (or [receive tasks](/docs/components/modeler/bpmn/receive-tasks/)): - -
    - -1 - -The timer event causes the process to wait, in this case until a specific point in time is due or some duration has elapsed. See [timer events](/docs/components/modeler/bpmn/timer-events/) for more details. - -2 - -The process will wait for a message to arrive. The message is an external trigger provided by API and can technically be anything, from a callback (e.g. via REST), over real messaging (like AMQP), or to notifications within your system. See [message events](/docs/components/modeler/bpmn/message-events/) for more details. - -You can read more about events in [our BPMN primer: events - waiting for something to happen](/docs/components/modeler/bpmn/bpmn-primer/#events-waiting-for-something-to-happen). - -## Reacting to events - -The waiting mentioned above is a special case where you react to events while not doing anything else. Oftentimes, you want to react to events even if the process is doing something else at the moment. This is described in this section. - -Typical examples are customer cancelation requests coming in for running order fulfillment processes, or timeouts if parts of the process take too long. - -### Time based - -You want to react if a certain point in time is due or a specific time duration has passed. This is related to [Workflow Pattern 23: Transient Trigger](http://www.workflowpatterns.com/patterns/control/new/wcp23.php). - -In BPMN, you can leverage [boundary events](/docs/components/modeler/bpmn/events/#boundary-events) or [event subprocesses](/docs/components/modeler/bpmn/event-subprocesses/). - -Those events can be interrupting or non-interrupting, meaning you will either interrupt the current activity, or start something in parallel. - -
    - -1 - -This timer is non-interrupting (dashed line), so the **Escalate request approval** task is started in parallel, additionally to the **Approve request** task. The idea is that the escalation task might make a manager double-checking the original task does not slip. Non-interrupting events can also be recurring, so you could also escalate "every two hours". - -2 - -This timer is interrupting (solid line). Once it fires, the **Approve request** task is canceled and the process continues on the alternative path, in this case to automatically reject the request. Note that both timers so far can only happen if the task **Approve request** is active. - -3 - -This is an event subprocess (dotted line). This can be activated from everywhere in the current scope. In this example, the scope is the whole process. - -4 - -So if the process is not completed within the defined SLA, the timer fires and the event subprocess is started. As the timer is non-interrupting (dashed line again), it does not intervene with the typical flow of operations, but starts something additionally in parallel. - -:::note -The above process is not necessarily modeled following all of our [modeling best practices](/docs/components/best-practices/modeling/creating-readable-process-models/), but intentionally shows different ways to use BPMN to implement certain workflow patterns. -::: - -### External messages/events - -You might also want to react to certain incoming messages or events in an existing process. A good example is a customer canceling the current order fulfillment process. This might be possible only in a certain process phase and lead to different actions. This is related to [Workflow Pattern 23: Transient Trigger](http://www.workflowpatterns.com/patterns/control/new/wcp23.php) and [Workflow Pattern 24: Persistent Trigger](http://www.workflowpatterns.com/patterns/control/new/wcp24.php). - -As with timers, you can leverage [boundary events](/docs/components/modeler/bpmn/events/#boundary-events) or [event subprocesses](/docs/components/modeler/bpmn/event-subprocesses/). - -
    - -Assume that an order cancelation message comes in for the current process instance using [message correlation](/docs/components/concepts/messages/). - -1 - -Subprocesses can be easily used to define phases of a process, as the cancelation is treated differently depending on the current process phase. - -2 - -For example, a cancelation during the clearing phase has no consequences and can simply be executed. - -3 - -But when the process is already in the preparation phase it might need to clean up certain things properly. - -4 - -During delivery, it does not even allow cancelations anymore. This is also why this event is non-interrupting (dashed line), so we keep doing **Delivery**. - -### Correlation mechanisms - -Mapping external messages to an existing process instance is called [message correlation](/docs/components/concepts/messages/). This is a crucial functionality to ensure you can communicate with process instances from the outside. - -There are two main problems to solve: - -1. How to find the right process instance? In Camunda, this is solved by a `message name` and a `correlation key` (e.g. `orderCanceled` and `order-42`). - -2. How to persist messages if a process instance is not yet ready to receive that message yet? In Camunda, this is solved by having an internal message store and a `time to live` attached to messages. This is related to [Workflow Pattern 24: Persistent Trigger](http://www.workflowpatterns.com/patterns/control/new/wcp24.php) - -You can find more information in [our documentation about messages](/docs/components/concepts/messages/). - -### Events from subprocesses - -Sometimes, a subprocess needs to communicate with its parent process without ending the subprocess yet. BPMN allows this by an [escalation event](/docs/components/modeler/bpmn/bpmn-coverage/). - -:::note -The escalation event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: - -
    - -1 - -An escalation event can be thrown from any of the called subprocesses and is picked up by its parent to start something in parallel, as this is a non-interrupting event (dashed line). - -The subprocess can raise the escalation any time: - -
    - -### Broadcasts and engine-wide events - -While messages are always targeted at one specific process instance, you might also want to inform many processes about an event at once. For example, you might regularly adjust certain customer scoring rules that always should be taken into account immediately. This can be implemented using the [signal event](/docs/components/modeler/bpmn/bpmn-coverage/). - -:::note -The escalation event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: - -
    - -1 - -The signal event is caught and in this case interrupts the onboarding to go back to score the customer again. - -## Handling errors - -Handling exceptions well is one of the most important capabilities of a workflow engine, and it needs built-in support from the modeling language. - -You might also want to look into our [best practice: modeling beyond the happy path](/docs/components/best-practices/modeling/modeling-beyond-the-happy-path/) to understand possibilities. - -### Error scopes - -The reaction to errors might need to be different depending on the current state of the process. This can be achieved by using [subprocesses](/docs/components/modeler/bpmn/embedded-subprocesses/) in combination with either [boundary events](/docs/components/modeler/bpmn/events/#boundary-events) or [event subprocesses](/docs/components/modeler/bpmn/event-subprocesses/). - -
    - -1 - -This boundary error event is attached to the subprocess "clearing" and only catches errors within that subprocess. The idea here would be that in case of any clearing service not being available, the order is assumed cleared. Note that this example is mainly built for illustration, and does not necessarily mean this is the best way to solve this business requirement. - -2 - -Alternatively, this error event subprocess is triggered whenever there is a fraud detected, independent of whether the error occurs in any of the subprocesses or the main process. - -### Catch errors per type - -You might need to react to different event types differently, which is possible by using the [error type](/docs/components/modeler/bpmn/error-events/#defining-the-error) known to BPMN: - -
    - -Now there is a different reaction if fraud was detected (1) or the address was found to contain an error (2). - -## Business transactions - -Modern systems are highly distributed across the network. In such systems, you cannot rely on technical ACID transactions for consistency, but need to elevate decisions around consistency or regaining consistency to the business level. See [Achieving consistency without transaction managers](https://blog.bernd-ruecker.com/achieving-consistency-without-transaction-managers-7cb480bd08c) for additional background on this. - -### Compensation - -An important problem to solve is how to roll back a business transaction in case of problems. In other words, how to restore business consistency. One strategy is to leverage compensating activities to undo the original actions whenever the problem occurs. This is also known as the [Saga Pattern](https://blog.bernd-ruecker.com/saga-how-to-implement-complex-business-transactions-without-two-phase-commit-e00aa41a1b1b). - -In BPMN, you can use [compensation events](/docs/components/modeler/bpmn/bpmn-coverage/) to easily implement compensations in your processes. - -:::note -The compensation event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: - -
    - -1 - -For every task in a process model, you can define a compensation task. This can be any valid BPMN task, like a service task, a human task (2), or a subprocess, for example. - -3 - -This compensation task is connected to the original task by a dedicated compensation event. - -4 - -Within your process model, you can define when it is time to compensate. Whenever you trigger the compensation event, all tasks of the current scope that were executed are automatically compensated. This means that their configured compensation task is executed. - -The big advantage is that you don't have to remodel the routing logic to compensate correctly, like checking again if the customer balance was used. The workflow engine will take care automatically, also in more complicated situations like multiple instance activities. diff --git a/versioned_docs/version-8.2/components/connectors/connector-types.md b/versioned_docs/version-8.2/components/connectors/connector-types.md deleted file mode 100644 index e241dffcf09..00000000000 --- a/versioned_docs/version-8.2/components/connectors/connector-types.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: connector-types -title: Types of Connectors -description: "Connectors come in type and subtypes that describe their functionality." ---- - -Connectors are categorized by the direction data flows into or out of Camunda 8. - -:::note -Looking for pre-built, [Out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md)? -::: - -## Outbound Connectors - -Outbound Connectors allow workflows to trigger external systems or services, making it possible to integrate workflows with other parts of a business process or system architecture. - -The Java code to connect to the external system is executed when the workflow reaches the service task. - -![Outbound Connectors](img/outbound-connectors.png) - -Use outbound Connectors if something needs to happen in the third-party system if a process reaches a service task. For example, calling a REST endpoint or publishing a message to Slack. - -## Inbound Connectors - -Inbound Connectors enable workflows to receive data or messages from external systems or services, making it possible to integrate workflows into a wider business process or system architecture. -Inbound Connectors can be used to create a new process instance, or to send a message to a running process instance. - -The Java code of the inbound Connector has a lifecycle suitable for long-running operations, such as listening for messages on a queue or waiting for a webhook to be called. -The Connector code is **activated** as soon as the Connector Runtime detects an element in a process definition that references an inbound connector. It gets `deactivated` in case of an updated or deleted process definition. - -Inbound Connector instances are linked to process definitions and not to specific process instances. If a process definition contains an element referencing an inbound Connector, the Connector code will be first executed when the process definition is deployed and the deployment has been detected by the Connector Runtime. -The Connector object created during deployment will be kept active as long as the process is deployed, and it is reused to serve all instances of the process. -When the process definition is deleted or replaced with a newer version, the Connector object will be removed or updated as well. - -:::note -Inbound Connectors currently rely on [Operate](../../operate/operate-introduction) API to retrieve the information about deployed process definitions. - -If your Camunda 8 installation doesn't include Operate, you can only use outbound Connectors. -::: - -![Inbound Connectors](img/inbound-connectors.png) - -Use inbound Connectors if something needs to happen within the workflow engine because of an external event in the third-party system. For example, because a Slack message was published, or a REST endpoint is called. - -There are three types of inbound Connectors: - -1. **Webhook Connector**: An inbound connector which creates a webhook for a Camunda workflow. -2. **Subscription Connector**: An inbound Connector that subscribes to a message queue. -3. **Polling Connector**: An inbound Connector that periodically polls an external system or service for new data using HTTP polling. - -## Protocol Connectors - -Protocol Connectors can serve as either inbound or outbound Connectors, supporting a variety of technical protocols. These connectors are highly generic, designed to provide a flexible and customizable means of integrating with external systems and services. - -Protocol Connectors can be customized to meet the needs of specific use cases using configurable [Connector Templates](manage-connector-templates.md), with no additional coding or deployment required. Examples of protocol Connectors include HTTP REST, GraphQL, as well as message queue connectors. - -## Next steps - -Review the current list of [available Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). diff --git a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/connector-sdk.md b/versioned_docs/version-8.2/components/connectors/custom-built-connectors/connector-sdk.md deleted file mode 100644 index c7196349ff4..00000000000 --- a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/connector-sdk.md +++ /dev/null @@ -1,1060 +0,0 @@ ---- -id: connector-sdk -title: Connector SDK -description: The Connector SDK allows you to develop custom Connectors using Java code. Focus on the logic of the Connector, test it locally, and reuse its runtime logic. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -:::note -The **Connector SDK** is in developer preview and subject to breaking changes. Use at your own risk. -::: - -The **Connector SDK** allows you to [develop custom Connectors](#creating-a-custom-connector) -using Java code. - -You can focus on the logic of the Connector, test it locally, and -reuse its [runtime logic](#runtime-environment) in multiple [runtime environments](#runtime-environments). The SDK achieves this by abstracting from -Camunda 8 internals that usually come with -[job workers](/components/concepts/job-workers.md). - -You can find the latest **Connector SDK** version source code [here](https://github.com/camunda/connector-sdk). - -The SDK provides APIs for common Connector operations, such as: - -- Fetching and deserializing input data -- Validating input data -- Replacing secrets in input data - -Additionally, the SDK allows for convenient [testing](#testing) of your Connector behavior and -[executing it in the environments](#runtime-environments) that suit your use cases best. - -## Creating a custom Connector - -Using the Connector SDK, you can create environment-agnostic and reusable Connector runtime behavior. -This section outlines how to set up a Connector project, test it, and run it locally. - -### Setup - -When developing a new **Connector**, we recommend using one of our custom Connector -templates for custom [outbound](https://github.com/camunda/connector-template-outbound) and -[inbound](https://github.com/camunda/connector-template-inbound) connectors. -These templates are [Maven](https://maven.apache.org/)-based Java projects, and can be used in various -ways such as: - -- _Create your own GitHub repository_: Click **Use this template** and follow the prompted steps. - You can manage code changes in your new repository afterward. -- _Experiment locally_: Check out the source code to your local machine using [Git](https://git-scm.com/). - You won't be able to check in code changes to the repository due to restricted write access. -- _Fetch the source_: Download the source code as a ZIP archive using **Code** > **Download ZIP**. - You can adjust and manage the code the way you like afterward, using your chosen source code - management tools. - -To manually set up your Connector project, include the following dependency to use the SDK. -Ensure you adhere to the project outline detailed in the next section. - - - - - -```xml - - io.camunda.connector - connector-core - 0.11.2 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-core:0.11.2' -``` - - - - -### Outbound Connector project outline - -There are multiple parts of a Connector that enables it for reuse, as a -reusable building block, for modeling, and for the runtime behavior. -For example, the following parts make up an outbound Connector: - -``` -my-connector -├── element-templates/ -│ └── template-connector.json (1) -├── src/main -│ ├── java/io/camunda/connector (2) -│ │ ├── MyConnectorFunction.java (3) -│ │ ├── MyConnectorRequest.java (4) -│ │ └── MyConnectorResult.java (5) -│ └── resources/META-INF/services -│ └── io.camunda.connector.api.outbound.OutboundConnectorFunction (6) -└── pom.xml (7) -``` - -For the modeling building blocks, the Connector provides -[Connector templates](/components/connectors/custom-built-connectors/connector-templates.md) with **(1)**. - -You provide the runtime logic as Java source code under a directory like **(2)**. -Typically, a Connector runtime logic consists of the following: - -- Exactly one implementation of a `OutboundConnectorFunction` with **(3)**. -- At least one input data object like **(4)**. -- At least one result object like **(5)**. - -For a detectable Connector function, you are required to expose your function class name in the -[`OutboundConnectorFunction` SPI implementation](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/ServiceLoader.html) -with **(6)**. - -A configuration file like **(7)** manages the project setup, including dependencies. -In this example, we include a Maven project's `POM` file. Other build tools like -[Gradle](https://gradle.org/) can also be used. - -### Outbound Connector element template - -To create reusable building blocks for modeling, you are required to provide a -domain-specific [Connector template](/components/connectors/custom-built-connectors/connector-templates.md). - -A Connector template defines the binding to your Connector runtime behavior via the following object: - -```json -{ - "type": "Hidden", - "value": "io.camunda:template:1", - "binding": { - "type": "zeebe:taskDefinition:type" - } -} -``` - -This type definition `io.camunda:template:1` is the connection configuring which version of your Connector runtime behavior to use. -In technical terms, this defines the **Type** of jobs created for tasks in your process model that use this template. -Consult the [job worker](/components/concepts/job-workers.md) guide to learn more. - -Besides the type binding, Connector templates also define the input variables of your Connector as `zeebe:input` objects. -For example, you can create the input variable `message` of your Connector in the element template as follows: - -```json -{ - "label": "Message", - "type": "Text", - "feel": "optional", - "binding": { - "type": "zeebe:input", - "name": "message" - } -} -``` - -You can also define nested data structures to reflect domain objects that group attributes. -For example, you can create the domain object `authentication` that contains the properties -`user` and `token` as follows: - -```json -{ - "label": "Username", - "description": "The username for authentication.", - "type": "String", - "binding": { - "type": "zeebe:input", - "name": "authentication.user" - } -}, -{ - "label": "Token", - "description": "The token for authentication.", - "type": "String", - "binding": { - "type": "zeebe:input", - "name": "authentication.token" - } -} -``` - -You can deserialize these authentication properties into a domain object using the SDK. -Visit the [input data](#outbound-connector-input-data) section for further details. - -Connectors that offer any kind of result from their invocation should allow users to configure -how to map the result into their processes. Therefore, Connector templates can reuse the two -recommended objects, **Result Variable** and **Result Expression**: - -```json -{ - "label": "Result Variable", - "description": "Name of variable to store the response in", - "type": "String", - "binding": { - "type": "zeebe:taskHeader", - "key": "resultVariable" - } -}, -{ - "label": "Result Expression", - "description": "Expression to map the response into process variables", - "type": "Text", - "feel": "required", - "binding": { - "type": "zeebe:taskHeader", - "key": "resultExpression" - } -} -``` - -These objects create custom headers for the jobs created for the tasks that use this template. -The Connector runtime environments pick up those two custom headers and translate them into process variables accordingly. -You can see an example of how to use this in the [out-of-the-box REST Connector](/components/connectors/protocol/rest.md#response). - -All Connectors are recommended to offer exception handling to allow users to configure how to map results and technical errors into -BPMN errors. To provide this, Connector templates can reuse the recommended object **Result Expression**: - -```json -{ - "label": "Error Expression", - "description": "Expression to define BPMN Errors to throw", - "group": "errors", - "type": "Text", - "feel": "required", - "binding": { - "type": "zeebe:taskHeader", - "key": "errorExpression" - } -} -``` - -This object creates custom headers for the jobs created for the tasks that use this template. -The Connector runtime environments pick up this custom header and translate it into BPMN errors accordingly. -You can see an example of how to use this in the [BPMN errors in Connectors guide](/components/connectors/use-connectors/index.md#bpmn-errors). - -### Outbound Connector runtime logic - -To create a reusable runtime behavior for your Connector, you are required to implement -and expose an implementation of the `OutboundConnectorFunction` interface of the SDK. The Connector runtime -environments will call this function; it handles input data, executes the Connector's -business logic, and optionally returns a result. Exception handling is optional since the -Connector runtime environments handle this as a fallback. - -The `OutboundConnectorFunction` interface consists of exactly one `execute` method. A minimal recommended -outline of a Connector function implementation looks as follows: - -```java -package io.camunda.connector; - -import io.camunda.connector.api.annotation.OutboundConnector; -import io.camunda.connector.api.error.ConnectorException; -import io.camunda.connector.api.outbound.OutboundConnectorContext; -import io.camunda.connector.api.outbound.OutboundConnectorFunction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@OutboundConnector( - name = "MYCONNECTOR", - inputVariables = {"myProperty", "authentication"}, - type = "io.camunda:template:1" -) -public class MyConnectorFunction implements OutboundConnectorFunction { - - private static final Logger LOGGER = LoggerFactory.getLogger(MyConnectorFunction.class); - - @Override - public Object execute(OutboundConnectorContext context) throws Exception { - // (1) - var connectorRequest = context.bindVariables(MyConnectorRequest.class); - // (2) - return executeConnector(connectorRequest); - } - - private MyConnectorResult executeConnector(final MyConnectorRequest connectorRequest) { - String message = connectorRequest.getMessage(); - // (3) - if (message != null && message.toLowerCase().startsWith("fail")) { - throw new ConnectorException("FAIL", "My property started with 'fail', was: " + message); - } - var result = new MyConnectorResult(); - - // (4) - result.setMyProperty("Message received: " + message); - return result; - } -} -``` - -The `execute` method receives all necessary environment data via the `OutboundConnectorContext` object. -The Connector runtime environment initializes the context and allows the following to occur: - -- Fetch and deserialize the input data as shown in **(1)**. See the [input data](#outbound-connector-input-data) section for details. -- Execute the Connector's business logic as shown in **(2)**. - -If the Connector handles exceptional cases, it can use any exception to express technical errors. If a technical -error should be associated with a specific error code, the Connector can throw a `ConnectorException` and define -a `code` as shown in **(3)**. -We recommend documenting the list of error codes as part of the Connector's API. Users can build on those codes -by creating [BPMN errors](/components/connectors/use-connectors/index.md#bpmn-errors) in their Connector configurations. - -If the Connector has a result to return, it can create a new result data object and set -its properties as shown in **(4)**. - -For best interoperability, Connector functions provide default meta-data via the `@OutboundConnector` annotation. -Connector runtime environments can use this data to auto-discover provided Connector runtime behavior. - -Using this outline, you start the business logic of your Connector in the `executeConnector` method -and expand from there. - -#### Outbound Connector input data - -The input data of a Connector is provided by the process instance that executes the Connector. -You can either fetch this data as a raw JSON string using the context's `getVariables` method, -or deserialize the data into your own request object directly with the `bindVariables` -method shown in **(1)**. - -Using `bindVariables` will attempt to replace Connector secrets, deserialize the JSON string -containing the input data into Java objects, and perform the input validation. -The JSON deserialization depends on the Connector runtime environment your Connector function runs in. - -Thus, use this deserialization approach with caution. -While it works reliably for many input data types like string, boolean, integer, and nested -objects, you might want to consider deserializing your Connector's input data in a custom fashion -using `getVariables` and a library like [Jackson](https://github.com/FasterXML/jackson) or -[Gson](https://github.com/google/gson). - -The `bindVariables` method and tools like Jackson or Gson can properly reflect nested data -objects. You can define nested structures by referencing other Java classes as attributes. -Looking at the `authentication` data input example described in the [Connector template](#outbound-connector-element-template), -you can create the following input data objects to reflect the structure properly: - -```java -package io.camunda.connector; - -public class MyConnectorRequest { - - private String message; - private Authentication authentication; -} -``` - -```java -package io.camunda.connector; - -public class Authentication { - - private String user; - private String token; -} -``` - -### Inbound Connector project outline - -There are multiple parts of a Connector that enables it for reuse, as a -reusable building block, for modeling, and for the runtime behavior. -For example, the following parts make up an inbound Connector: - -``` -my-connector -├── element-templates -│ └── inbound-template-connector.json (1) -├── pom.xml -├── src -│ ├── main -│ │ ├── java/io/camunda/connector -│ │ │ └── inbound -│ │ │ ├── MyConnectorExecutable.java (2) -│ │ │ ├── MyConnectorEvent.java (3) -│ │ │ ├── MyConnectorProperties.java (4) -│ │ │ └── subscription -│ │ │ ├── MockSubscription.java -│ │ │ └── MockSubscriptionEvent.java -│ │ └── resources/META-IN/services -│ │ └── io.camunda.connector.api.inbound.InboundConnectorExecutable (5) -``` - -For the modeling building blocks, the Connector provides -[Connector element templates](./connector-templates.md) with **(1)**. - -You provide the runtime logic as Java source code. -Typically, a Connector runtime logic consists of exactly one implementation of -a `InboundConnectorExecutable` with **(2)** and at least one input object like **(3)**, and Connector's -properties like **(4)** - -For a detectable Connector function, you are required to expose your function class name in the -[`InboundConnectorExecutable` SPI implementation](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/ServiceLoader.html) -with **(5)**. - -A configuration file like **(5)** manages the project setup, including dependencies. -In this example, we include a Maven project's `POM` file. Other build tools like -[Gradle](https://gradle.org/) can also be used. - -### Inbound Connector element template - -To create reusable building blocks for modeling, you are required to provide a -domain-specific [Connector element template](./connector-templates.md). - -A Connector template defines the binding to your Connector runtime behavior via the following object: - -```json -{ - "type": "Hidden", - "value": "io.camunda:mytestinbound:1", - "binding": { - "type": "zeebe:property", - "name": "inbound.type" - } -} -``` - -This type definition `io.camunda:mytestinbound:1` is the connection configuring which version of your Connector runtime -behavior to use. In technical terms, this defines the **Type** of jobs created for tasks in your process model that use -this template. Consult the [job worker](../../concepts/job-workers.md) guide to learn more. - -Besides the type binding, Connector templates also define the properties of your Connector as `zeebe:property` objects. -For example, you can create the input variable `sender` of your Connector in the element template as follows: - -```json -{ - "type": "String", - "label": "Sender", - "description": "Message sender name", - "value": "Alice", - "binding": { - "type": "zeebe:property", - "name": "sender" - } -} -``` - -### Inbound Connector runtime logic - -To create a reusable runtime behavior for your Connector, you are required to implement -and expose an implementation of the `InboundConnectorExecutable` interface of the SDK. The Connector runtime -environments will call this function; it handles input data, executes the Connector's -business logic. Exception handling is optional since the Connector runtime environments handle this as a fallback. - -The `InboundConnectorExecutable` interface consists of two methods: `activate` and `deactivate`. -A minimal recommended outline of a Connector function implementation looks as follows: - -```java -package io.camunda.connector.inbound; - -import io.camunda.connector.api.annotation.InboundConnector; -import io.camunda.connector.api.inbound.InboundConnectorContext; -import io.camunda.connector.api.inbound.InboundConnectorExecutable; -import io.camunda.connector.inbound.subscription.MockSubscription; -import io.camunda.connector.inbound.subscription.MockSubscriptionEvent; - -@InboundConnector(name = "MYINBOUNDCONNECTOR", type = "io.camunda:mytestinbound:1") -public class MyConnectorExecutable implements InboundConnectorExecutable { - - private MockSubscription subscription; - private InboundConnectorContext connectorContext; - - @Override - public void activate(InboundConnectorContext connectorContext) { - MyConnectorProperties props = connectorContext.bindProperties(MyConnectorProperties.class); - - this.connectorContext = connectorContext; - - subscription = new MockSubscription( - props.getSender(), props.getMessagesPerMinute(), this::onEvent); - } - - @Override - public void deactivate() { - subscription.stop(); - } - - private void onEvent(MockSubscriptionEvent rawEvent) { - MyConnectorEvent connectorEvent = new MyConnectorEvent(rawEvent); - connectorContext.correlate(connectorEvent); - } -} -``` - -The `activate` method is a trigger function to start listening to inbound events. The implementation of this method -has to be asynchronous. Once activated, the inbound Connector execution is considered active and running. -From this point, it should use the respective methods of `InboundConnectorContext` to communicate with the Connector -runtime (e.g. to correlate the inbound event or signal the interrupt). - -The `deactivate` method is just a graceful shutdown hook for inbound connectors. -The implementation must release all resources used by the subscription. - -#### Validation - -Validating input data is a common task in a Connector function. The SDK provides -an out-of-the-box solution for input validation. -A default implementation of the SDK's core validation API is provided in a separate, -optional artifact `connector-validation`. If you want to use validation in your -Connector, add the following dependency to your project: - - - - - -```xml - - io.camunda.connector - connector-validation - 0.11.2 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.11.2' -``` - - - - -Validation is performed automatically if you use the `bindVariables` / `bindProperties` methods. - -This instructs the context to prepare a validator that is provided by an implementation -of the `ValidationProvider` interface. The `connector-validation` artifact brings along -such an implementation. It uses the [Jakarta Bean Validation API](https://beanvalidation.org/) -together with [Hibernate Validator](https://hibernate.org/validator/). - -For your input object `connectorRequest` to be validated, you need to annotate the input's -attributes to define your requirements: - -```java -package io.camunda.connector; - -import javax.validation.Valid; -import javax.validation.constraints.NotEmpty; -import javax.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty private String message; - @NotNull @Valid private Authentication authentication; -} -``` - -The Jakarta Bean Validation API comes with a long list of -[supported constraints](https://jakarta.ee/specifications/bean-validation/2.0/bean-validation_2.0.html#builtinconstraints). -It also allows to -[validate entire object graphs](https://jakarta.ee/specifications/bean-validation/2.0/bean-validation_2.0.html#constraintdeclarationvalidationprocess-validationroutine-graphvalidation) -using the `@Valid` annotation. Thus, the `authentication` object will also be validated. - -```java -package io.camunda.connector; - - -import javax.validation.constraints.NotEmpty; - -public class Authentication { - - @NotEmpty private String user; - - @NotEmpty @Pattern(regexp = "^xobx") private String token; -} -``` - -Using this approach, you can validate your whole input data structure with one initial call from -the central Connector function. - -Beyond that, the Jakarta Bean Validation API supports more advanced constructs like -[groups](https://jakarta.ee/specifications/bean-validation/2.0/bean-validation_2.0.html#constraintdeclarationvalidationprocess-groupsequence) -for conditional validation and constraints on different types, i.e., attributes, methods, and classes, -to enable [cross-parameter validation](https://www.baeldung.com/javax-validation-method-constraints). -You can use the built-in constraints and create custom ones to define requirements exactly as -you need them. - -If the validation approach that comes with `connector-validation` doesn't fit your needs, you -can provide your own SPI implementing the SDK's `ValidationProvider` interface. Have a look at -the [connector validation code](https://github.com/camunda/connector-sdk/tree/main/validation) -for a default implementation. - -##### Conditional validation - -Validating Connector input data can require to check different constraints, depending on the -specific input data itself. As an example, the following `authentication` input object requires -that `oauthToken` is only necessary when the `type` is `oauth`. If the type is `basic`, the -attribute `password` is required instead. - -```java -public class Authentication { - - private String type; - private String user; - private String password; - private String oauthToken; -} -``` - -Using the `connector-validation` module, there are three common options to achieve this conditional validation: - -1. Write a [custom constraint](#custom-constraint) that allows to validate one attribute in relation to another attribute. - This appraoch yields a reusable constraint that you can use in other classes as well. This approach also comes with the highest - implementation effort. -1. Write [manual, imperative validation logic](#manual-validation-method) in a method with a boolean return value and annotate - it with `@AssertTrue`. You require less code to take this appraoch but the result is also specifc to the respective class. You - cannot reuse the logic in other classes as is. This approach also comes without further constraint annotation support. You have - to write all validation logic manually in the method. -1. Define [validation groups dynamically](#dynamic-validation-groups) with Hibernate Validator's `@DefaultGroupSequenceProvider`. - This appraoch allows to reuse existing constraint annotations and to only apply them for specific use cases. It has a - higher complexity than an imperative validation method but allows to reuse existing constraints to avoid writing manual - validation logic. - -Each option has its own benefits and drawbacks, depending on what you need in your Connector. The following sections -cover each of the options in more detail. - -###### Custom constraint - -The [Bean Validation guide](https://jakarta.ee/specifications/bean-validation/2.0/bean-validation_2.0.html#constraintsdefinitionimplementation) -covers defining **custom constraints** extensively. For the use case described above, you could -write a custom constraint like the following: - -```java -@Target({TYPE, ANNOTATION_TYPE}) -@Retention(RUNTIME) -@Repeatable(NotNullIfAnotherFieldHasValue.List.class) -@Constraint(validatedBy = NotNullIfAnotherFieldHasValueValidator.class) -@Documented -public @interface NotNullIfAnotherFieldHasValue { - - String fieldName(); - String fieldValue(); - String dependFieldName(); - - String message() default "{NotNullIfAnotherFieldHasValue.message}"; - Class[] groups() default {}; - Class[] payload() default {}; - - @Target({TYPE, ANNOTATION_TYPE}) - @Retention(RUNTIME) - @Documented - @interface List { - NotNullIfAnotherFieldHasValue[] value(); - } - -} -``` - -You can use this constraint on the Connector input object as follows: - -```java -@NotNullIfAnotherFieldHasValue( - fieldName = "type", - fieldValue = "oauth", - dependFieldName = "oauthToken") -@NotNullIfAnotherFieldHasValue( - fieldName = "type", - fieldValue = "basic", - dependFieldName = "password") -public class Authentication { - - @NotEmpty - private String type; - @NotEmpty - private String user; - private String password; - private String oauthToken; -} -``` - -You can find more details and the `NotNullIfAnotherFieldHasValueValidator` implementation in -[this StackOverflow thread](https://stackoverflow.com/questions/9284450/jsr-303-validation-if-one-field-equals-something-then-these-other-fields-sho/9287796#9287796). - -This approach is the most flexible and reusable one for writing conditional constraints. It is -independent of the parameters and classes involved. However, for simple use cases, one of the -following approaches might lead to more maintainable results that require less code. - -###### Manual validation method - -The Jakarta Bean Validation API comes with an -[AssertTrue](https://jakarta.ee/specifications/bean-validation/2.0/bean-validation_2.0.html#builtinconstraints-asserttrue) -constraint that you can use to ensure boolean attributes are enabled. - -The nature of the bean validation API allows to also use this annotation on methods. Those usually -are the getter methods for boolean attributes. However, there doesn't have to be a related boolean -attribute in an object in order to validate a method constraint. Thus, you can use this constraint -to also write manual validation logic in a method that returns a boolean value and starts with `is`. - -For the example use case, you can write a method that verifies the requirements as follows: - -```java -public class Authentication { - - @NotEmpty private String type; - @NotEmpty private String user; - private String password; - private String oauthToken; - - @AssertTrue(message = "Authentication must contain 'oauthToken' for type 'oauth' and 'password' for type 'basic'") - public boolean isAuthValid() { - return ("basic".equals(type) && password != null) || - ("oauth".equals(type) && oauthToken != null); - } -} -``` - -This approach allows for concise conditional validation when the constraint logic is simple -and does not justify creating more complex, reusable interfaces and validators. - -###### Dynamic validation groups - -The Jakarta Bean Validation API allows to statically define validation -[groups](https://jakarta.ee/specifications/bean-validation/2.0/bean-validation_2.0.html#constraintdeclarationvalidationprocess-groupsequence) -for conditional constraint evaluation. However, to use those groups you have to define the -group to validate statically when starting the validation. To dynamically define the groups to -validate, you can use Hibernate Validator's -[DefaultGroupSequenceProvider](https://docs.jboss.org/hibernate/validator/6.2/reference/en-US/html_single/#_code_groupsequenceprovider_code). - -Given the following validation groups: - -```java -public interface BasicAuthValidation {} -public interface OAuthValidation {} -``` - -You can annotate the input object as follows: - -```java -@GroupSequenceProvider(AuthenticationSequenceProvider.class) -public class Authentication { - - @NotEmpty private String type; - @NotEmpty private String user; - @NotEmpty(groups = BasicAuthValidation.class) - private String password; - @NotEmpty(groups = OAuthValidation.class) - private String oauthToken; -``` - -The `AuthenticationSequenceProvider` needs to implement the `DefaultGroupSequenceProvider` to -dynamically add the validation groups you need: - -```java -public class AuthenticationSequenceProvider implements DefaultGroupSequenceProvider { - - @Override - public List> getValidationGroups(Authentication authentication) { - - List> sequence = new ArrayList<>(); - - // Apply all validation rules from Default group, e.g. ensuring type is not empty - sequence.add(Authentication.class); - - if ("basic".equals(authentication.getType())) { - sequence.add(BasicAuthValidation.class); - } else if ("oauth".equals(authentication.getType())) { - sequence.add(OAuthValidation.class); - } - - return sequence; - } -} -``` - -Using this approach, you can reuse existing constraint annotations in your input objects. -The sequence provider is however bound to your specific input class and therefore less reusable -than writing custom constraints. - -#### Secrets - -Connectors that require confidential information to connect to external systems need to be able -to manage those securely. As described in the -[guide for creating secrets](/components/console/manage-clusters/manage-secrets.md), secrets can be -controlled in a secure location and referenced in a Connector's properties using a placeholder -pattern `{{secrets.*}}`. To make this mechanism as robust as possible, secret handling comes with -the Connector SDK out of the box. That way, all Connectors can use the same standard way of -handling secrets in input data. - -The SDK allows replacing secrets in input data as late as possible to avoid passing them around -in the environments that handle Connector invocation. We do not pass secrets into the -Connector function in clear text but only as placeholders that you can replace from -within the Connector function. - -Secrets are replaced automatically in the Connector input when you use the variable access methods -of the `OutboundConnectorContext` or properties access methods of the `InboundConnectorContext`. -You will always receive inputs with secrets replaced. - -The Runtime automatically replaces secrets in String fields or in container types. Using the -placeholder pattern `{{secrets.*}}` in a String field will replace the placeholder with the secret -value. Using the placeholder pattern in a container type will replace the placeholder in all -String fields of the container type. - -```java -package io.camunda.connector; - -public class MyConnectorRequest { - - private String message; - private Authentication authentication; -} -``` - -```java -package io.camunda.connector; - -import io.camunda.connector.api.annotation.Secret; - -public class Authentication { - - private String user; - private String token; -} -``` - -In the input model above, the Runtime will attempt to find and replace secrets in all String fields -of the `Authentication` and `MyConnectorRequest` classes. - -## Testing - -Ensuring your Connector's business logic works as expected is vital to develop the Connector. -The SDK aims to make testing of Connectors convenient without imposing strict -requirements on your test development flow. The SDK is not enforcing any testing libraries. - -By abstracting from Camunda 8 internals, the SDK provides a good starting -ground for scoped testing. There is no need to test Camunda engine internals or provide related mocks. -You can focus on testing the business logic of your Connector and the associated objects. - -We recommend testing at least the following parts of your Connector project: - -- All data validation works as expected. -- All expected attributes support secret replacement. -- The core logic of your Connector works as expected until calling the external API or service. - -The SDK provides a `OutboundConnectorContextBuilder` for test cases that lets you create a `OutboundConnectorContext`. -You can conveniently use that test context to test the secret replacement and validation routines. - -Writing secret replacement tests can look similar to the following test case. You can write one test -case for each attribute that supports secret replacement: - -```java -@Test -void shouldReplaceTokenSecretWhenReplaceSecrets() { - // given - var input = new MyConnectorRequest(); - var auth = new Authentication(); - input.setMessage("Hello World!"); - input.setAuthentication(auth); - auth.setToken("{{secrets.MY_TOKEN}}"); - auth.setUser("testuser"); - - // (1) - var context = OutboundConnectorContextBuilder.create() - .secret("MY_TOKEN", "token value") - .build(); - // when - var variables = context.bindVariables(MyConnectorType.class); - // then - assertThat(input) - .extracting("authentication") - .extracting("token") - .isEqualTo("token value"); -} -``` - -Ensuring validation routines work as expected can be written similarly for every attribute -that is required: - -```java -@Test -void shouldFailWhenValidate_NoAuthentication() { - // given - var input = new MyConnectorRequest(); - input.setMessage("Hello World!"); - var context = OutboundConnectorContextBuilder.create().build(); - // when - assertThatThrownBy(() -> context.validate(input)) - // then - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("authentication"); -} -``` - -Testing custom validations works in the same way: - -```java -@Test -void shouldFailWhenValidate_TokenWrongPattern() { - // given - var input = new MyConnectorRequest(); - var auth = new Authentication(); - input.setMessage("foo"); - input.setAuthentication(auth); - auth.setUser("testuser"); - auth.setToken("test"); - var context = OutboundConnectorContextBuilder.create().build(); - // when - assertThatThrownBy(() -> context.validate(input)) - // then - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("Token must start with \"xobx\""); -} -``` - -Testing the business logic of your Connector can vary widely depending on the -functionality it provides. For our example logic, the following test would be a good start: - -```java -@Test -void shouldReturnReceivedMessageWhenExecute() throws Exception { - // given - var input = new MyConnectorRequest(); - var auth = new Authentication(); - input.setMessage("Hello World!"); - input.setAuthentication(auth); - auth.setToken("xobx-test"); - auth.setUser("testuser"); - var function = new MyConnectorFunction(); - var context = OutboundConnectorContextBuilder.create() - .variables(input) - .build(); - // when - var result = function.execute(context); - // then - assertThat(result) - .isInstanceOf(MyConnectorResult.class) - .extracting("myProperty") - .isEqualTo("Message received: Hello World!"); -} -``` - -## Runtime environments - -To integrate Connectors with your business use case, you need a runtime environment to act as the intermediary between -your business and Connectors space. - -The Connector SDK enables you to write environment-agnostic runtime behavior for Connectors. -This makes the Connector logic reusable in different setups without modifying your Connector -code. To invoke this logic, you need a runtime environment that knows the Connector function -and how to call it. - -In Camunda 8 SaaS, every cluster runs a component that knows the -[available out-of-the-box connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) -and how to invoke them. This component is the runtime environment specific to Camunda's SaaS use case. - -Regarding Self-Managed environments, you are responsible for providing the runtime environment that -can invoke the Connectors. - -There are several runtime options provided by Camunda: - -- [Spring Boot Starter runtime](#spring-boot-starter-runtime) -- [Docker runtime image](#docker-runtime-image) -- [Custom runtime environment](#custom-runtime-environment) - -### Spring Boot Starter runtime - -This option is applicable for Spring Boot users. All you need to do is to include respective starter: - -```xml - - io.camunda.connector - spring-boot-starter-camunda-connectors - ${version.connectors} - - - org.myorg - connector-my-awesome - ${version.connector-my-awesome} - -``` - -Upon starting your Spring Boot application, you will have a job worker connected to Zeebe, waiting to -receive jobs for your connectors. - -### Docker runtime image - -This option is applicable for those users who prefer Docker. - -The Docker image can be found at the [Docker Hub](https://hub.docker.com/r/camunda/connectors) or alternatively -built [from source](https://github.com/camunda/connector-runtime-docker). - -To build it, you have to run `docker build -t camunda/connectors:X.Y.Z .`. - -Once you have both built a Docker image, and a custom connector into JAR, you can start runtime with: - -```shell -docker run --rm --name=connectors -d \ - -v $PWD/connector.jar:/opt/app/connector.jar \ # Add a connector jar to the classpath - --network=your-zeebe-network \ # Optional: attach to network if Zeebe is isolated with Docker network - -e ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=ip.address.of.zeebe:26500 \ # Specify Zeebe address - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=true \ # Optional: provide security configs to connect to Zeebe - -e CAMUNDA_OPERATE_CLIENT_URL=http://ip.address.of.operate:8080 \ # Specify Operate URL for inbound Connectors - -e CAMUNDA_OPERATE_CLIENT_USERNAME=demo \ # Optional: provide Operate credentials - -e CAMUNDA_OPERATE_CLIENT_PASSWORD=demo \ - -e MY_SECRET=secret \ # Optional: set a secret with value - -e SECRET_FROM_SHELL \ # Optional: set a secret from the environment - --env-file secrets.txt \ # Optional: set secrets from a file - camunda/connectors:X.Y.Z -``` - -If you would like to disable inbound Connectors, you can do so by setting `CAMUNDA_CONNECTOR_POLLING_ENABLED=false`. - -### Custom runtime environment - -A custom runtime environment may be required if your organizational and infrastructural needs are not met -by the existing pre-packaged runtime environments. Such use cases may include (but are not limited to) running on custom serverless services or software platforms. - -If using the pre-packaged runtime environment that comes with the SDK does not fit your use case, -you can create a custom runtime environment. There are three options that come with the SDK: - -- Wrap Connector functions as job workers using the `ConnectorJobHandler`. -- Implement your own Connector function wrapper. - -#### Connector job handler - -To wrap Connector functions as job workers, the SDK provides the wrapper class `ConnectorJobHandler`. - -The job handler wrapper provides the following benefits: - -- Provides an `OutboundConnectorContext` that handles the Camunda-internal job worker API regarding variables. -- Handles secret management by defaulting to an environment variables-based secret store and - allowing to provide a custom secret provider via an SPI for `io.camunda.connector.api.secret.SecretProvider`. -- Handles Connector result mapping for **Result Variable** and **Result Expression** as described - in the [Connector element template](#outbound-connector-element-template) section. -- Provides flexible BPMN error handling via **Error Expression** as described in the - [Connector template](#outbound-connector-element-template) section. - -Using the wrapper class, you can create a custom [Zeebe client](/apis-tools/working-with-apis-tools.md). -For example, you can spin up a custom client with the -[Zeebe Java client](/apis-tools/java-client/index.md) as follows: - -```java -import io.camunda.connector.MyConnectorFunction -import io.camunda.connector.runtime.jobworker.outbound.ConnectorJobHandler; -import io.camunda.zeebe.client.ZeebeClient; - -public class Main { - - public static void main(String[] args) { - - var zeebeClient = ZeebeClient.newClientBuilder().build(); - - zeebeClient.newWorker() - .jobType("io.camunda:template:1") - .handler(new ConnectorJobHandler(new MyConnectorFunction())) - .name("MESSAGE") - .fetchVariables("authentication", "message") - .open(); - } -} -``` - -#### Custom function wrapper - -If the provided job handler wrapper does not fit your needs, you can extend or replace -it with your job handler implementation that handles invoking the Connector functions. - -Your custom job handler needs to create a `OutboundConnectorContext` that the Connector -function can use to handle variables, secrets, and Connector results. You can extend the -provided `io.camunda.connector.impl.outbound.AbstractConnectorContext` to quickly gain access -to most of the common context operations. diff --git a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/connector-templates.md b/versioned_docs/version-8.2/components/connectors/custom-built-connectors/connector-templates.md deleted file mode 100644 index 08f97594e3b..00000000000 --- a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/connector-templates.md +++ /dev/null @@ -1,623 +0,0 @@ ---- -id: connector-templates -title: Connector templates -description: Learn how to modify BPMN elements with Connector templates to create custom modeling experiences. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -**Connector templates** are JSON configuration files, which customize how a BPMN element is shown, -and how it can be configured by process developers. Connector templates are a specific kind of [element templates](/components/modeler/desktop-modeler/element-templates/about-templates.md). - -Before developing one, you'll need to decide what you would like to achieve with your Connector. - -Currently, the options are: - -- Starting a BPMN process, triggered by external service - use [inbound start event Connector template](#inbound-start-event-connector-templates) -- Continue process with an intermediate catch event emitted by external service call - use [inbound intermediate catch event Connector templates](#inbound-intermediate-catch-event-connector-templates) -- Trigger an external service - use [outbound Connector template](#outbound-connector-templates) - -:::note -Do not confuse **Connector templates** with the **[Connector template](https://github.com/camunda/connector-template-outbound)**, -which is used to supply boilerplate code and configuration when developing a new custom Connector. -::: - -## Inbound start event Connector templates - -You can, for example, allow the user to model and configure the following **HTTP Webhook Connector** by providing -a simple JSON configuration: - - - - - -![Webhook Inbound Connector Example](./img/custom-connector-template-inbound-start.png) - - - - - -```json -{ - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "Webhook Connector", - "id": "io.camunda.connectors.webhook.WebhookConnector.v1", - "version": 1, - "description": "Configure webhook to receive callbacks", - "documentationRef": "https://docs.camunda.io/docs/components/connectors/out-of-the-box-connectors/http-webhook/", - "category": { - "id": "connectors", - "name": "Connectors" - }, - "appliesTo": ["bpmn:StartEvent"], - "elementType": { - "value": "bpmn:StartEvent" - }, - "groups": [ - { - "id": "endpoint", - "label": "Webhook Configuration" - }, - { - "id": "activation", - "label": "Activation" - }, - { - "id": "variable-mapping", - "label": "Variable Mapping" - } - ], - "properties": [ - { - "type": "Hidden", - "value": "io.camunda:webhook:1", - "binding": { - "type": "zeebe:property", - "name": "inbound.type" - } - }, - { - "type": "Hidden", - "value": "ConfigurableInboundWebhook", - "binding": { - "type": "zeebe:property", - "name": "inbound.subtype" - } - }, - { - "label": "Webhook ID", - "type": "String", - "group": "endpoint", - "binding": { - "type": "zeebe:property", - "name": "inbound.context" - }, - "description": "The webhook ID is a part of the URL" - }, - { - "id": "shouldValidateHmac", - "label": "HMAC authentication", - "group": "endpoint", - "description": "Choose whether HMAC verification is enabled. See documentation and example that explains how to use HMAC-related fields", - "value": "disabled", - "type": "Dropdown", - "choices": [ - { - "name": "Enabled", - "value": "enabled" - }, - { - "name": "Disabled", - "value": "disabled" - } - ], - "binding": { - "type": "zeebe:property", - "name": "inbound.shouldValidateHmac" - } - }, - { - "label": "HMAC secret key", - "description": "Shared secret key", - "type": "String", - "group": "endpoint", - "optional": true, - "binding": { - "type": "zeebe:property", - "name": "inbound.hmacSecret" - }, - "condition": { - "property": "shouldValidateHmac", - "equals": "enabled" - } - }, - { - "label": "HMAC header", - "description": "Name of header attribute that will contain the HMAC value", - "type": "String", - "group": "endpoint", - "optional": true, - "binding": { - "type": "zeebe:property", - "name": "inbound.hmacHeader" - }, - "condition": { - "property": "shouldValidateHmac", - "equals": "enabled" - } - }, - { - "label": "HMAC algorithm", - "group": "endpoint", - "description": "Choose HMAC algorithm", - "value": "sha_256", - "type": "Dropdown", - "choices": [ - { - "name": "SHA-1", - "value": "sha_1" - }, - { - "name": "SHA-256", - "value": "sha_256" - }, - { - "name": "SHA-512", - "value": "sha_512" - } - ], - "binding": { - "type": "zeebe:property", - "name": "inbound.hmacAlgorithm" - }, - "condition": { - "property": "shouldValidateHmac", - "equals": "enabled" - } - }, - { - "label": "Condition", - "type": "String", - "group": "activation", - "feel": "required", - "optional": true, - "binding": { - "type": "zeebe:property", - "name": "inbound.activationCondition" - }, - "description": "Condition under which the connector triggers. Leave empty to catch all events. See documentation" - }, - { - "label": "Variables", - "type": "String", - "group": "variable-mapping", - "feel": "required", - "binding": { - "type": "zeebe:property", - "name": "inbound.variableMapping" - }, - "description": "Map variables from the webhook payload (request) to start the process with. When blank, entire payload is copied over. See documentation" - } - ], - "icon": { - "contents": "data:image/svg+xml,%3Csvg id='icon' xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 32 32'%3E%3Cdefs%3E%3Cstyle%3E .cls-1 %7B fill: none; %7D %3C/style%3E%3C/defs%3E%3Cpath d='M24,26a3,3,0,1,0-2.8164-4H13v1a5,5,0,1,1-5-5V16a7,7,0,1,0,6.9287,8h6.2549A2.9914,2.9914,0,0,0,24,26Z'/%3E%3Cpath d='M24,16a7.024,7.024,0,0,0-2.57.4873l-3.1656-5.5395a3.0469,3.0469,0,1,0-1.7326.9985l4.1189,7.2085.8686-.4976a5.0006,5.0006,0,1,1-1.851,6.8418L17.937,26.501A7.0005,7.0005,0,1,0,24,16Z'/%3E%3Cpath d='M8.532,20.0537a3.03,3.03,0,1,0,1.7326.9985C11.74,18.47,13.86,14.7607,13.89,14.708l.4976-.8682-.8677-.497a5,5,0,1,1,6.812-1.8438l1.7315,1.002a7.0008,7.0008,0,1,0-10.3462,2.0356c-.457.7427-1.1021,1.8716-2.0737,3.5728Z'/%3E%3Crect id='_Transparent_Rectangle_' data-name='<Transparent Rectangle>' class='cls-1' width='32' height='32'/%3E%3C/svg%3E" - } -} -``` - - - - -## Inbound intermediate catch event Connector templates - -You can, for example, allow the user to model and configure the following **HTTP Webhook Connector** by providing -a simple JSON configuration: - - - - - -![Webhook Inbound intermediate Connector Example](./img/custom-connector-template-inbound-intermediate.png) - - - - - -```json -{ - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "Webhook Connector", - "id": "io.camunda.connectors.webhook.WebhookConnectorIntermediate.v1", - "version": 1, - "description": "Configure webhook to receive callbacks", - "documentationRef": "https://docs.camunda.io/docs/components/connectors/out-of-the-box-connectors/http-webhook/", - "category": { - "id": "connectors", - "name": "Connectors" - }, - "appliesTo": ["bpmn:IntermediateCatchEvent", "bpmn:IntermediateThrowEvent"], - "elementType": { - "value": "bpmn:IntermediateCatchEvent", - "eventDefinition": "bpmn:MessageEventDefinition" - }, - "groups": [ - { - "id": "endpoint", - "label": "Webhook Configuration" - }, - { - "id": "activation", - "label": "Activation" - }, - { - "id": "variable-mapping", - "label": "Variable Mapping" - } - ], - "properties": [ - { - "type": "Hidden", - "value": "io.camunda:webhook:1", - "binding": { - "type": "zeebe:property", - "name": "inbound.type" - } - }, - { - "type": "Hidden", - "generatedValue": { - "type": "uuid" - }, - "binding": { - "type": "bpmn:Message#property", - "name": "name" - } - }, - { - "type": "Hidden", - "value": "ConfigurableInboundWebhook", - "binding": { - "type": "zeebe:property", - "name": "inbound.subtype" - } - }, - { - "label": "Webhook ID", - "type": "String", - "group": "endpoint", - "binding": { - "type": "zeebe:property", - "name": "inbound.context" - }, - "description": "The webhook ID is a part of the URL" - }, - { - "id": "shouldValidateHmac", - "label": "HMAC authentication", - "group": "endpoint", - "description": "Choose whether HMAC verification is enabled. See documentation and example that explains how to use HMAC-related fields", - "value": "disabled", - "type": "Dropdown", - "choices": [ - { - "name": "Enabled", - "value": "enabled" - }, - { - "name": "Disabled", - "value": "disabled" - } - ], - "binding": { - "type": "zeebe:property", - "name": "inbound.shouldValidateHmac" - } - }, - { - "label": "HMAC secret key", - "description": "Shared secret key", - "type": "String", - "group": "endpoint", - "optional": true, - "binding": { - "type": "zeebe:property", - "name": "inbound.hmacSecret" - }, - "condition": { - "property": "shouldValidateHmac", - "equals": "enabled" - } - }, - { - "label": "HMAC header", - "description": "Name of header attribute that will contain the HMAC value", - "type": "String", - "group": "endpoint", - "optional": true, - "binding": { - "type": "zeebe:property", - "name": "inbound.hmacHeader" - }, - "condition": { - "property": "shouldValidateHmac", - "equals": "enabled" - } - }, - { - "label": "HMAC algorithm", - "group": "endpoint", - "description": "Choose HMAC algorithm", - "value": "sha_256", - "type": "Dropdown", - "choices": [ - { - "name": "SHA-1", - "value": "sha_1" - }, - { - "name": "SHA-256", - "value": "sha_256" - }, - { - "name": "SHA-512", - "value": "sha_512" - } - ], - "binding": { - "type": "zeebe:property", - "name": "inbound.hmacAlgorithm" - }, - "condition": { - "property": "shouldValidateHmac", - "equals": "enabled" - } - }, - { - "label": "Correlation key (process)", - "type": "String", - "group": "activation", - "feel": "required", - "description": "Sets up the correlation key from process variables", - "binding": { - "type": "bpmn:Message#zeebe:subscription#property", - "name": "correlationKey" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Correlation key (payload)", - "type": "String", - "group": "activation", - "feel": "required", - "binding": { - "type": "zeebe:property", - "name": "correlationKeyExpression" - }, - "description": "Extracts the correlation key from the incoming message payload", - "constraints": { - "notEmpty": true - } - }, - { - "label": "Condition", - "type": "String", - "group": "activation", - "feel": "required", - "optional": true, - "binding": { - "type": "zeebe:property", - "name": "inbound.activationCondition" - }, - "description": "Condition under which the connector triggers. Leave empty to catch all events. See documentation" - }, - { - "label": "Variables", - "type": "String", - "group": "variable-mapping", - "feel": "required", - "binding": { - "type": "zeebe:property", - "name": "inbound.variableMapping" - }, - "description": "Map variables from the webhook payload (request) to start the process with. When blank, entire payload is copied over. See documentation" - } - ], - "icon": { - "contents": "data:image/svg+xml,%3Csvg id='icon' xmlns='http://www.w3.org/2000/svg' width='18' height='18' viewBox='0 0 32 32'%3E%3Cdefs%3E%3Cstyle%3E .cls-1 %7B fill: none; %7D %3C/style%3E%3C/defs%3E%3Cpath d='M24,26a3,3,0,1,0-2.8164-4H13v1a5,5,0,1,1-5-5V16a7,7,0,1,0,6.9287,8h6.2549A2.9914,2.9914,0,0,0,24,26Z'/%3E%3Cpath d='M24,16a7.024,7.024,0,0,0-2.57.4873l-3.1656-5.5395a3.0469,3.0469,0,1,0-1.7326.9985l4.1189,7.2085.8686-.4976a5.0006,5.0006,0,1,1-1.851,6.8418L17.937,26.501A7.0005,7.0005,0,1,0,24,16Z'/%3E%3Cpath d='M8.532,20.0537a3.03,3.03,0,1,0,1.7326.9985C11.74,18.47,13.86,14.7607,13.89,14.708l.4976-.8682-.8677-.497a5,5,0,1,1,6.812-1.8438l1.7315,1.002a7.0008,7.0008,0,1,0-10.3462,2.0356c-.457.7427-1.1021,1.8716-2.0737,3.5728Z'/%3E%3Crect id='_Transparent_Rectangle_' data-name='<Transparent Rectangle>' class='cls-1' width='32' height='32'/%3E%3C/svg%3E" - } -} -``` - - - - -## Outbound Connector templates - -You can, for example, allow the user to model and configure the following **REST Connector** by providing a JSON configuration for a service task: - - - - - -![REST Outbound Connector Example](./img/custom-connector-template.png) - - - - - -```json -{ - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "(Conditional) REST Connector", - "id": "io.camunda.examples.ConditionalRestConnector", - "description": "A REST API invocation task.", - "appliesTo": ["bpmn:ServiceTask"], - "icon": { - "contents": "data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='22' height='22' viewBox='0 0 22 22' fill='none'%3E%3Ccircle cx='11' cy='11' r='9' fill='black'/%3E%3Ctext x='6.9' y='14.9' fill='white' style='font-family: Arial; font-size: 10px;'%3EM%3C/text%3E%3C/svg%3E" - }, - "properties": [ - { - "type": "Hidden", - "value": "http", - "binding": { - "type": "zeebe:taskDefinition:type" - } - }, - { - "label": "REST Endpoint URL", - "description": "Specify the url of the REST API to talk to.", - "type": "String", - "binding": { - "type": "zeebe:taskHeader", - "key": "url" - }, - "constraints": { - "notEmpty": true, - "pattern": { - "value": "^https?://.*", - "message": "Must be http(s) URL." - } - } - }, - { - "id": "httpMethod", - "label": "REST Method", - "description": "Specify the HTTP method to use.", - "type": "Dropdown", - "value": "get", - "choices": [ - { "name": "GET", "value": "get" }, - { "name": "POST", "value": "post" }, - { "name": "PATCH", "value": "patch" }, - { "name": "DELETE", "value": "delete" } - ], - "binding": { - "type": "zeebe:taskHeader", - "key": "method" - } - }, - { - "label": "Request Body", - "description": "Data to send to the endpoint.", - "value": "", - "type": "String", - "optional": true, - "binding": { - "type": "zeebe:input", - "name": "body" - }, - "condition": { - "property": "httpMethod", - "oneOf": ["patch", "post", "delete"] - } - }, - { - "id": "authenticationType", - "label": "Authentication Type", - "description": "Specify the authentication type to use.", - "type": "Dropdown", - "value": "", - "optional": true, - "choices": [ - { - "name": "None", - "value": "" - }, - { - "name": "Basic", - "value": "basic" - }, - { - "name": "Bearer", - "value": "bearer" - } - ], - "binding": { - "type": "zeebe:input", - "name": "authentication.type" - } - }, - { - "label": "Username", - "type": "String", - "feel": "optional", - "binding": { - "type": "zeebe:input", - "name": "authentication.username" - }, - "constraints": { - "notEmpty": true - }, - "condition": { - "property": "authenticationType", - "equals": "basic" - } - }, - { - "label": "Password", - "type": "String", - "feel": "optional", - "binding": { - "type": "zeebe:input", - "name": "authentication.password" - }, - "constraints": { - "notEmpty": true - }, - "condition": { - "property": "authenticationType", - "equals": "basic" - } - }, - { - "label": "Bearer Token", - "type": "String", - "feel": "optional", - "binding": { - "type": "zeebe:input", - "name": "authentication.token" - }, - "constraints": { - "notEmpty": true - }, - "condition": { - "property": "authenticationType", - "equals": "bearer" - } - } - ] -} -``` - - - - -## Develop Connector templates - -You can develop Connector templates using the [`element template` feature](/components/modeler/desktop-modeler/element-templates/defining-templates.md). You can also look at existing [examples](https://github.com/camunda/camunda-modeler/blob/master/resources/element-templates/cloud-samples.json). - -## Providing and using Connector templates - -When using [Web Modeler](/components/modeler/web-modeler/launch-web-modeler.md), you can create **Connector templates** [directly within the application](/components/connectors/manage-connector-templates.md) and share them with your respective organization. - -When using [Desktop Modeler](/components/modeler/desktop-modeler/index.md), you must place the **Connector templates** [within the file system](/components/modeler/desktop-modeler/element-templates/configuring-templates.md) so the modeler will pick them up. - -Once available, process developers can directly [use the **Connector templates** from within the modeling canvas](/components/connectors/use-connectors/index.md). diff --git a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-intermediate.png b/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-intermediate.png deleted file mode 100644 index 1ceba523f6c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-intermediate.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-start.png b/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-start.png deleted file mode 100644 index 5e13c3abb3e..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-start.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template.png b/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template.png deleted file mode 100644 index dca1fd5dab9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/custom-built-connectors/img/custom-connector-template.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-add-work-item-to-the-queue.png b/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-add-work-item-to-the-queue.png deleted file mode 100644 index b01fbb96770..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-add-work-item-to-the-queue.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-example.png b/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-example.png deleted file mode 100644 index 18ffb9caeb9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-get-work-item-result.png b/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-get-work-item-result.png deleted file mode 100644 index 1dafb926d54..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-get-work-item-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-red-properties.png deleted file mode 100644 index ee96adf3d6b..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connector-automation-anywhere-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connector-concept.png b/versioned_docs/version-8.2/components/connectors/img/archive/connector-concept.png deleted file mode 100644 index 464bd8d344f..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connector-concept.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-lambda-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-lambda-filled.png deleted file mode 100644 index b44e1a9850c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-lambda-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-filled.png deleted file mode 100644 index a9341802eb3..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-inbound-intermediate-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-inbound-intermediate-filled.png deleted file mode 100644 index b2f997e65bf..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-inbound-intermediate-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-inbound-start-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-inbound-start-filled.png deleted file mode 100644 index 646b5f0a48f..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sns-inbound-start-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-catch-event-choose-connector.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-catch-event-choose-connector.png deleted file mode 100644 index e227440b6be..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-catch-event-choose-connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-filled.png deleted file mode 100644 index 29715207e46..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-message-attributes.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-message-attributes.png deleted file mode 100644 index 139171aed53..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-message-attributes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-start-event-activation.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-start-event-activation.png deleted file mode 100644 index c0cd23d19f9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-start-event-activation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-start-event-choose-connector.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-start-event-choose-connector.png deleted file mode 100644 index 9a63b671d11..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-aws-sqs-start-event-choose-connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blue-prism-get-item-by-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blue-prism-get-item-by-id.png deleted file mode 100644 index f43c8f7467a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blue-prism-get-item-by-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blueprism-add-item.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blueprism-add-item.png deleted file mode 100644 index dfa55c3aebe..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blueprism-add-item.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blueprism-example.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blueprism-example.png deleted file mode 100644 index 4bac0d69f82..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-blueprism-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-decision-tree.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-decision-tree.png deleted file mode 100644 index 4c8fb1ab4b5..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-decision-tree.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-add-empty-column-or-row.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-add-empty-column-or-row.png deleted file mode 100644 index e870fa2bc5f..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-add-empty-column-or-row.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-add-values.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-add-values.png deleted file mode 100644 index e951a714bf5..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-add-values.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-row.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-row.png deleted file mode 100644 index 10116b6cdec..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-row.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-spreadsheet.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-spreadsheet.png deleted file mode 100644 index dddf5c2ed29..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-spreadsheet.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-worksheet.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-worksheet.png deleted file mode 100644 index e4b4125fc6a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-create-worksheet.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-delete-column.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-delete-column.png deleted file mode 100644 index 039c7772541..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-delete-column.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-delete-worksheet.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-delete-worksheet.png deleted file mode 100644 index 18ed12feb99..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-delete-worksheet.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-row-by-index.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-row-by-index.png deleted file mode 100644 index 1df022281b0..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-row-by-index.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-spreadsheet-details.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-spreadsheet-details.png deleted file mode 100644 index b9a6c0bf488..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-spreadsheet-details.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-spreadsheet-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-spreadsheet-id.png deleted file mode 100644 index fd2ca648094..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-spreadsheet-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-worksheet-data.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-worksheet-data.png deleted file mode 100644 index 59cad7b363a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-worksheet-data.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-worksheet-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-worksheet-id.png deleted file mode 100644 index 3dbc4dd5dfc..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-get-worksheet-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-letter-column-index.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-letter-column-index.png deleted file mode 100644 index e645238fc8a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-letter-column-index.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-row-index.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-row-index.png deleted file mode 100644 index b19071a0651..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-google-sheets-row-index.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-get-parent-folder-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-get-parent-folder-id.png deleted file mode 100644 index 20836d81095..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-get-parent-folder-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-get-template-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-get-template-id.png deleted file mode 100644 index 24c25040643..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-get-template-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-jwt-bearer.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-jwt-bearer.png deleted file mode 100644 index 206da5a851c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-jwt-bearer.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-jwt-refresh.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-jwt-refresh.png deleted file mode 100644 index 3a2d04f5b41..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-jwt-refresh.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-new-file-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-new-file-filled.png deleted file mode 100644 index aa8d05ef17f..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-new-file-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-new-folder-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-new-folder-filled.png deleted file mode 100644 index 7fc2d78b218..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-new-folder-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-processed-template-example.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-processed-template-example.png deleted file mode 100644 index b80d58d6ba3..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-processed-template-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-template-example.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-template-example.png deleted file mode 100644 index 5d46413bf42..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-googledrive-template-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-basic.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-basic.png deleted file mode 100644 index 49a9706cd05..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-basic.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-bearer-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-bearer-token.png deleted file mode 100644 index 58e6bfb327b..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-bearer-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-http-method-url.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-http-method-url.png deleted file mode 100644 index ee8bcd7f299..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-http-method-url.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-oauth-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-oauth-token.png deleted file mode 100644 index 31b839be6a5..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-oauth-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-query.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-query.png deleted file mode 100644 index 3af00563a21..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-query.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-red-properties.png deleted file mode 100644 index aa8fffcc37f..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-response-mapping.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-response-mapping.png deleted file mode 100644 index 2408f1a41eb..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-response-mapping.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-timeout.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-timeout.png deleted file mode 100644 index 325da6f9d27..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-timeout.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-variables.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-variables.png deleted file mode 100644 index 3103d0a8aa8..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-graphql-variables.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-filled.png deleted file mode 100644 index cb8b11f068b..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-inbound-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-inbound-filled.png deleted file mode 100644 index 9cf105466b7..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-inbound-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-inbound-intermediate-catch-event-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-inbound-intermediate-catch-event-filled.png deleted file mode 100644 index 1e275d3ae57..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-kafka-inbound-intermediate-catch-event-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-bearer-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-bearer-token.png deleted file mode 100644 index f80c3eafcd9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-bearer-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-choose-method.gif b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-choose-method.gif deleted file mode 100644 index b753ba363bf..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-choose-method.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-client-credentials.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-client-credentials.png deleted file mode 100644 index b7164bb6b55..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-client-credentials.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-copy-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-copy-token.png deleted file mode 100644 index a8ae5a8ed2d..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-copy-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-members.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-members.png deleted file mode 100644 index 3a120efd160..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-members.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-red-properties.png deleted file mode 100644 index 2089ac301e3..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-refresh-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-refresh-token.png deleted file mode 100644 index 78ddf0e0a7e..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-microsoft-teams-refresh-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-openai-prompt-engineering.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-openai-prompt-engineering.png deleted file mode 100644 index a2b83a8c9ea..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-openai-prompt-engineering.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-openai-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-openai-red-properties.png deleted file mode 100644 index 80dc1508f69..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-openai-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-auth.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-auth.png deleted file mode 100644 index b3c6f801725..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-auth.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-cluster-saas.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-cluster-saas.png deleted file mode 100644 index ae4c7e729ca..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-cluster-saas.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-cluster-sm.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-cluster-sm.png deleted file mode 100644 index 473e72fa22a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-operate-cluster-sm.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-allow-implicit-flow.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-allow-implicit-flow.png deleted file mode 100644 index d224229f5f2..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-allow-implicit-flow.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-bearer-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-bearer-token.png deleted file mode 100644 index fa37fec5a38..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-bearer-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-cluster-api-credentials.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-cluster-api-credentials.png deleted file mode 100644 index 27460384218..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-cluster-api-credentials.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-connection-name.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-connection-name.png deleted file mode 100644 index 8128cbd4e39..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-connection-name.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-create-new-connection.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-create-new-connection.png deleted file mode 100644 index 891e67503b5..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-create-new-connection.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-oauth-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-oauth-token.png deleted file mode 100644 index ea6ff62a71a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-oauth-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-operations.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-operations.png deleted file mode 100644 index db8b87ae7b9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-operations.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-trigger-a-flow-run.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-trigger-a-flow-run.png deleted file mode 100644 index 184a30f9b91..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-power-automate-trigger-a-flow-run.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-append-connector.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-append-connector.png deleted file mode 100644 index d38296fce00..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-append-connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-credentials-fields.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-credentials-fields.png deleted file mode 100644 index f5067a0925e..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-credentials-fields.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-intermediate.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-intermediate.png deleted file mode 100644 index dc7eeba6447..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-intermediate.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-properties-red.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-properties-red.png deleted file mode 100644 index 9b7b3ecb439..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-properties-red.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-routing.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-routing.png deleted file mode 100644 index bc98887325a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-inbound-routing.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-message-with-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-message-with-properties.png deleted file mode 100644 index 6c2759c968a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-message-with-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-red-properties.png deleted file mode 100644 index a076d85612d..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-routing-cred.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-routing-cred.png deleted file mode 100644 index 0f1a32ef1ce..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-routing-cred.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-routing-uri.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-routing-uri.png deleted file mode 100644 index b4776c1fa06..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-routing-uri.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-uri-fields.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-uri-fields.png deleted file mode 100644 index 6d3cd46fc2f..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-uri-fields.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-wrench-shaped.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-wrench-shaped.png deleted file mode 100644 index a84d83cb83e..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rabbitmq-wrench-shaped.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-response-mapping-result.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-response-mapping-result.png deleted file mode 100644 index d84c7d10ca8..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-response-mapping-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-response-mapping.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-response-mapping.png deleted file mode 100644 index 2098b79d166..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-response-mapping.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-basic.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-basic.png deleted file mode 100644 index ef165abfbdf..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-basic.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-bearer-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-bearer-token.png deleted file mode 100644 index 785aa569f63..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-bearer-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-connection-timeout.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-connection-timeout.png deleted file mode 100644 index 325da6f9d27..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-connection-timeout.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-headers.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-headers.png deleted file mode 100644 index 38eb7f3682c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-headers.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-method-url.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-method-url.png deleted file mode 100644 index d99c3a99fc0..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-method-url.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-request-body.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-request-body.png deleted file mode 100644 index 6e56a988325..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-request-body.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-response-mapping.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-response-mapping.png deleted file mode 100644 index 7d7d05855e9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-http-response-mapping.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-oauth-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-oauth-token.png deleted file mode 100644 index 1560b4b4de9..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-oauth-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-query-param.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-query-param.png deleted file mode 100644 index e65eaa6e669..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-query-param.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-red-properties.png deleted file mode 100644 index cfe82abe730..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-rest-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-account-info.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-account-info.png deleted file mode 100644 index ca424df20c6..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-account-info.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-account.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-account.png deleted file mode 100644 index 9150e99c3d1..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-account.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-api-key.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-api-key.png deleted file mode 100644 index ec3cabb0c6e..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-api-key.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-single-sender.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-single-sender.png deleted file mode 100644 index faa292d0511..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-create-single-sender.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-complete-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-complete-properties.png deleted file mode 100644 index d7a5d1eef21..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-complete-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-red-properties.png deleted file mode 100644 index 8c267630ee6..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-complete-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-complete-properties.png deleted file mode 100644 index 7efe9753f0b..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-complete-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-id.png deleted file mode 100644 index 986105ca6d4..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-red-properties.png deleted file mode 100644 index aeb54260dad..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-test-data.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-test-data.png deleted file mode 100644 index 609201592ad..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-email-template-test-data.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-verify-single-sender.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-verify-single-sender.png deleted file mode 100644 index 39fc5b98a17..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-sendgrid-verify-single-sender.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-create-channel.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-create-channel.png deleted file mode 100644 index 18c56e0e950..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-create-channel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-create-task-wrench.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-create-task-wrench.png deleted file mode 100644 index 9abe58303f1..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-create-task-wrench.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-inbound-intermediate-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-inbound-intermediate-filled.png deleted file mode 100644 index 6836ec22652..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-inbound-intermediate-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-inbound-start-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-inbound-start-filled.png deleted file mode 100644 index d17b46f416d..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-inbound-start-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-invite-to-channel.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-invite-to-channel.png deleted file mode 100644 index 03385b4a619..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-invite-to-channel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-post-message.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-post-message.png deleted file mode 100644 index 0adc8c5ab77..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-post-message.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-red-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-red-properties.png deleted file mode 100644 index 3bdb955d7ee..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-response-mapping.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-response-mapping.png deleted file mode 100644 index 62a3fc5bdfd..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-slack-response-mapping.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-twilio-webhook-correlate-activate.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-twilio-webhook-correlate-activate.png deleted file mode 100644 index 179f406fa46..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-twilio-webhook-correlate-activate.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-twilio-webhook-variable-mapping.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-twilio-webhook-variable-mapping.png deleted file mode 100644 index 58d5522d053..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-twilio-webhook-variable-mapping.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-add-queue-item.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-add-queue-item.png deleted file mode 100644 index e44ce95639c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-add-queue-item.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-bearer-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-bearer-token.png deleted file mode 100644 index 4ccf3a151d3..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-bearer-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-configuration.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-configuration.png deleted file mode 100644 index a287c227c0c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-configuration.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-get-queue-item-result-by-id.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-get-queue-item-result-by-id.png deleted file mode 100644 index 89eb862bba0..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-get-queue-item-result-by-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-long-polling-pattern.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-long-polling-pattern.png deleted file mode 100644 index 747024b9d0b..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-long-polling-pattern.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-oauth-token.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-oauth-token.png deleted file mode 100644 index ffe94be69c7..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-oauth-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-operations.png b/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-operations.png deleted file mode 100644 index 9c74ba42ba5..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/connectors-uipath-operations.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/feel-icon.png b/versioned_docs/version-8.2/components/connectors/img/archive/feel-icon.png deleted file mode 100644 index e75ae7cd815..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/feel-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/replace-via-upload.png b/versioned_docs/version-8.2/components/connectors/img/archive/replace-via-upload.png deleted file mode 100644 index ddb49ea79fc..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/replace-via-upload.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/upload-files.png b/versioned_docs/version-8.2/components/connectors/img/archive/upload-files.png deleted file mode 100644 index cd92cb7255c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/upload-files.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-append.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-append.png deleted file mode 100644 index 81f87230f11..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-append.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-context-menu.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-context-menu.png deleted file mode 100644 index b5c5c475517..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-context-menu.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-feel.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-feel.png deleted file mode 100644 index 3a305da1436..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-feel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-properties.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-properties.png deleted file mode 100644 index 79ccf3c201e..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-connectors-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-github-connector-template.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-github-connector-template.png deleted file mode 100644 index 89c72ed05ff..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-github-connector-template.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template-filled-gh.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template-filled-gh.png deleted file mode 100644 index 6d242b7f998..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template-filled-gh.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template-filled.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template-filled.png deleted file mode 100644 index 4f49ffaa5f8..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template-filled.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template.png deleted file mode 100644 index ef38cb5615b..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-template.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-webhooks-tab.png b/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-webhooks-tab.png deleted file mode 100644 index 6d7bf1d000a..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/archive/use-inbound-connector-webhooks-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-1.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-1.png deleted file mode 100644 index 9ea40b01675..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-2.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-2.png deleted file mode 100644 index 1aad91ed9ec..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-3.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-3.png deleted file mode 100644 index 6e9cefb4615..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-4.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-4.png deleted file mode 100644 index 3186b31c87c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/create-connector-template-4.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-1.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-1.png deleted file mode 100644 index 47953c0fbce..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-2.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-2.png deleted file mode 100644 index 3e9dc947e79..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-3.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-3.png deleted file mode 100644 index cfeff0cd250..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/edit-connector-template-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/fix-connector-template-problems-2.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/fix-connector-template-problems-2.png deleted file mode 100644 index 92158564df1..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/fix-connector-template-problems-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/fix-connector-template-problems.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/fix-connector-template-problems.png deleted file mode 100644 index 341de0b0184..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/fix-connector-template-problems.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/replace-via-upload.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/replace-via-upload.png deleted file mode 100644 index ddb49ea79fc..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/replace-via-upload.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/connector-templates/upload-files.png b/versioned_docs/version-8.2/components/connectors/img/connector-templates/upload-files.png deleted file mode 100644 index cd92cb7255c..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/connector-templates/upload-files.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/inbound-connectors.png b/versioned_docs/version-8.2/components/connectors/img/inbound-connectors.png deleted file mode 100644 index b22861ec658..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/inbound-connectors.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/multilayer-coding-experience.png b/versioned_docs/version-8.2/components/connectors/img/multilayer-coding-experience.png deleted file mode 100644 index b95c7bad233..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/multilayer-coding-experience.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/outbound-connectors.png b/versioned_docs/version-8.2/components/connectors/img/outbound-connectors.png deleted file mode 100644 index bce05ac17b2..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/outbound-connectors.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/img/use-connectors-error-general.png b/versioned_docs/version-8.2/components/connectors/img/use-connectors-error-general.png deleted file mode 100644 index 11e33cfc2b6..00000000000 Binary files a/versioned_docs/version-8.2/components/connectors/img/use-connectors-error-general.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/connectors/introduction.md b/versioned_docs/version-8.2/components/connectors/introduction.md deleted file mode 100644 index 369cff53ea0..00000000000 --- a/versioned_docs/version-8.2/components/connectors/introduction.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: introduction-to-connectors -title: Introduction -description: "A Connector is a reusable building block that performs the integration with an external system and works out of the box." -keywords: [bridge, reusable component, low code] ---- - -A **Connector** is a reusable building block that performs the integration with an external system. - -Most processes require interaction with systems outside of Camunda to orchestrate and complete work. - -- In **human task orchestration**, actors are notified of pending or completed processes through communication tools such as [Slack](/components/connectors/out-of-the-box-connectors/slack.md) or [Microsoft Teams](/components/connectors/out-of-the-box-connectors/microsoft-teams.md) for example in onboarding flow. -- In **business processes**, records are created or updated in enterprise business systems. -- In **microservices orchestration**, processes communicate through events pushed to a common systems through infrastructure such as [Kafka](/components/connectors/out-of-the-box-connectors/kafka.md). - -Connectors are often represented as tasks in a [BPMN process](/components/concepts/processes.md), which can be configured with parameters specific for the external system. As such, it can remove the need to write custom programming code for integration. - -Connectors offer a multilayer coding experience, enabling users with a variety of technical abilities to be successful when integrating with external systems. - -![Multilayer Coding Experience](img/multilayer-coding-experience.png) - -A Connector consists of two parts: - -1. The programming code in Java to connect to the external system (for example, refer to the [Connector function](./custom-built-connectors/connector-sdk.md#outbound-connector-runtime-logic) for outbound Connectors.) -2. The user interface to be used during modeling, which is provided using [Connector Templates](manage-connector-templates.md). - -Users interacting with Connectors may only need to understand the configuration options exposed by the Connector Template in the properties panel. - -Connectors are available [out-of-the-box (OOTB)](./out-of-the-box-connectors/available-connectors-overview.md) and come with [Connector Templates](manage-connector-templates.md) which customize how a BPMN element is shown, -and how it can be configured by process developers. Connector templates are a specific kind of [element templates](/components/modeler/desktop-modeler/element-templates/about-templates.md), which can also be used when creating custom Connectors via the [Connector SDK](./custom-built-connectors/connector-sdk.md). - -## Next steps - -- [Learn about types of Connectors](./connector-types.md) -- [Use Connectors in your BPMN process](./use-connectors/index.md) -- [Learn about available out-of-the-box Connectors](./out-of-the-box-connectors/available-connectors-overview.md) -- [Configure Connector Templates](manage-connector-templates.md) -- [Install Connectors in Self-Managed](/self-managed/connectors-deployment/install-and-start.md) diff --git a/versioned_docs/version-8.2/components/connectors/manage-connector-templates.md b/versioned_docs/version-8.2/components/connectors/manage-connector-templates.md deleted file mode 100644 index 2bfd0f36367..00000000000 --- a/versioned_docs/version-8.2/components/connectors/manage-connector-templates.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -id: manage-connector-templates -title: Manage Connector templates -description: Manage your Connector templates in Web Modeler. ---- - -export const UploadIcon = () => ; - -Camunda 8 only - -You can create and manage [Connector templates](/components/connectors/custom-built-connectors/connector-templates.md) just as any other asset in a Web Modeler project. - -## Create Connector templates - -Take the following steps to create a new Connector template: - -1. Navigate to the project of your choice and click **New**. - -2. Click **Connector Template**. - - ![Creating a new template](img/connector-templates/create-connector-template-1.png) - -3. You will be taken to the **Connector template editor** screen. In this screen, you can define the Connector template by writing the template JSON. The template editor supports you in writing the template by providing autocompletion, error highlighting, and a live preview. - - ![Template editor anatomy](img/connector-templates/create-connector-template-2.png) - - The components of the editor interface are as follows: - - - In the **breadcrumbs bar** at the top of the screen, you can rename your template by clicking the chevron next to the template name. Note that you cannot change the name of your template in the template JSON, but only with this action. - - On the left, you see the **template JSON editor**. Here, you define the actual template descriptor. The descriptor follows the [most recent element template schema](https://github.com/camunda/element-templates-json-schema). The editor is divided into two sections: a read-only section, containing the schema reference, the template name, the template id, and an editable section, where you can [define your template descriptor](/components/modeler/desktop-modeler/element-templates/defining-templates.md). - - On the right, you see the live **Visual Preview**. The live preview shows how the properties panel will look when you apply the template to an element. It automatically updates on every valid change, and reflects the latest valid state of the template. The preview allows you to interactively check your template before publishing, enhancing its usability. - - In the upper left, you can **Upload an icon** for your template. You can upload any image file you want, however we recommend to use squared SVG graphics. The icons get rendered 18x18 pixels in the element on the modeling canvas, and 32x32 pixels in the properties panel. - - On every valid change, the template is saved automatically. If there are errors in the JSON file, the template will not be saved. Ensure all [errors are resolved](#fixing-template-problems) for the template to save successfully. - -4. Once you've written your template, publish it by clicking **Publish**. You will be prompted to optionally enter a version name. This name appears in your milestone list. - - ![Publishing a template](img/connector-templates/create-connector-template-3.png) - - When using the Camunda 8 SaaS offering, as [organization owner or admin](/components/console/manage-organization/manage-users.md#users) you can publish a template version to the organization making it available to all projects in the organization. - To do so, either click **Publish > Publish to organization** on the Connector template editor screen or promote a template via the [Version History](#versioning-connector-templates). - - ![Promoting a template](img/connector-templates/create-connector-template-4.png) - -5. After publishing, a template version can be applied to elements of all models in the same project or organization depending on its publication status. - You can check the publication status of template versions in the [Version History](#versioning-connector-templates). - -### JSON editor features - -The JSON editor is based on the [Monaco Editor](https://microsoft.github.io/monaco-editor/). The Monaco Editor is the editor that powers VS Code. As a result, the template editor supports many familiar features, such as auto-formatting, indentation support, code completion, and error highlighting. - -With code completion, you can add a complete property object when you press `Ctrl+Space` at a location for a new property. - -![Adding a property with Ctrl+Space](img/connector-templates/edit-connector-template-1.png) - -When you press `Ctrl+Space` to create a new attribute, you get proposals for all available attributes. - -![Adding an attribute with Ctrl+Space](img/connector-templates/edit-connector-template-2.png) - -When the domain for values is defined, you can select one by pressing `Ctrl+Space` in a value. - -![Editing the value of the `type` attribute with Ctrl+Space](img/connector-templates/edit-connector-template-3.png) - -Read the [Visual Studio Code editor docs](https://code.visualstudio.com/docs/editor/editingevolved) for a full overview of features. - -### Versioning Connector templates - -You can version your Connector templates using [milestones](/components/modeler/web-modeler/milestones.md), similar to diagrams. - -If you publish a new version of a Connector template and an older version is already being used in diagrams, the user can either [update the diagram elements](/components/modeler/desktop-modeler/element-templates/using-templates.md#updating-templates) to use the most recent version of the Connector template, or they can continue using the older version of the Connector template in their diagrams. - -## Fixing problems in your templates {#fixing-template-problems} - -While working on a template, the template will be in invalid intermediate states. For instance, when you add a new property, it must contain various mandatory attributes. - -Unless all mandatory attributes are defined, the template will not be saved, and the preview is not updated. This ensures that you can never publish an invalid or broken template. - -The editor toolbar indicates if the template is currently in a valid state or not. The JSON editor provides you with error highlighting, allowing you to add mandatory values and resolve problems without missing anything. - -![Indicating problems in connector templates](img/connector-templates/fix-connector-template-problems.png) - -If there are problems at the root level of the JSON (such as a missing or misspelled mandatory attribute,) the error is highlighted in the first line of the editor. Click the error marker at the curly bracket to expand the error message. - -![Some connector template problems highlighted in the first line](img/connector-templates/fix-connector-template-problems-2.png) - - - -## Importing existing Connector templates - -If you have created templates for Desktop Modeler and want to reuse them in Web Modeler, you need to make some adjustments to the template files: - -1. **Split the files**. Web Modeler maintains a 1:1 relation between Connector templates and files. Since Desktop Modeler allows you to keep multiple template definitions in single file, you must split the file in advance to one file per template before uploading. -2. **Remove the brackets**. Remove the list brackets from the Connector template file before uploading. Even if a template file for Desktop Modeler contains only a single template, it is always wrapped in a list. -3. **Be aware that the ID and name of the template from the original file will be ignored.** A new ID is auto-assigned to ensure consistency and uniqueness in Web Modeler. - -Once your file follows the requirements, you can upload it. There are two ways to do so: - -1. Upload it as a _new Connector template_ via the **Upload files** action in the projects view. This will auto-generate a new ID for the template. - - ![Uploading a new template via file upload](img/connector-templates/upload-files.png) - -2. Update an existing template via the **Replace via upload** action in the breadcrumbs of the editor view. This preserves the name and ID of the existing template. - - ![Updating a template via file upload](img/connector-templates/replace-via-upload.png) - -:::info Desktop Modeler support -The Connector template editor is currently only available in Web Modeler. See the [Desktop Modeler documentation](/components/modeler/desktop-modeler/element-templates/about-templates.md) for instructions on configuring Connector templates in Desktop Modeler. -::: diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md deleted file mode 100644 index c1d48ba11ff..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md +++ /dev/null @@ -1,330 +0,0 @@ ---- -id: amazon-dynamodb -sidebar_label: AWS DynamoDB Connector -title: Amazon DynamoDB Connector -description: Use the Amazon DynamoDB Connector to connect your BPMN service with Amazon Web Service's DynamoDB Service, and work with tables and items using this service. ---- - -The **Amazon DynamoDB Connector** allows you to connect your BPMN service with Amazon Web Service's [DynamoDB Service](https://aws.amazon.com/dynamodb/). This can be useful for performing CRUD operations on Amazon DynamoDB tables from within a BPMN process. - -## Prerequisites - -To use the **Amazon DynamoDB Connector**, you need to have an AWS account with an access key and secret key to access DynamoDB, as well as a region where your DynamoDB instance is located. You can create an account and obtain the access and secret keys from the [AWS Console](https://aws.amazon.com/console/). - -:::note -Use Camunda secrets to store credentials so you don't expose sensitive information directly from the process. Refer to [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create an Amazon DynamoDB Connector task - -To use the **Amazon DynamoDB Connector** in your process, either change the type of existing task by clicking on it and using the **wrench-shaped** change type context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Amazon DynamoDB Connector executable - -To work with **Amazon DynamoDB Connector**, choose the required operation type in the **Operation** section and complete the -mandatory fields highlighted in red in the Connector properties panel. - -:::note -All the mandatory and non-mandatory fields depending on the authentication selection you choose are covered in the upcoming sections. -::: - -## Operation - -Choose an operation type of either **Table** or **Item** in the **Operation** section: - -- **Table**: Perform operations on a DynamoDB table. -- **Item**: Perform operations on a specific item in a DynamoDB table. - -### Method - -Choose one of the following methods: - -#### [Table](#table-operations) - -- [Create table](#create-table): Creates a new DynamoDB table. -- [Delete table](#delete-table): Deletes an existing DynamoDB table. -- [Describe table](#describe-table): Returns information about a DynamoDB table. -- [Scan table](#scan-table): Returns one or more items and their attributes by accessing every item in a table. You can use filter expressions to selectively scan for items that meet certain criteria. - -#### [Item](#item-operations) - -- [Add item](#add-item): Creates a new item or replaces an existing item with a new item. -- [Delete item](#delete-item): Deletes a single item in a table by primary key. -- [Get item](#get-item): Returns a set of attributes for the item with the given primary key. -- [Update item](#update-item): Modifies an existing item's attributes or adds a new item to the table if it does not already exist. - -## Authentication - -Choose an applicable authentication type from the **Authentication** dropdown. Learn more about authentication types in the related [appendix entry](#aws-authentication-types). - -If you select **credentials** to access the **Amazon DynamoDB service**, the Connector requires the appropriate credentials. The following authentication options are available: - -- **Access key**: Provide an access key of a user with permissions to the Amazon DynamoDB service. -- **Secret key**: Provide the secret key of the user with the access key provided above. - -The Access Key and Secret Key are required properties and must be provided to use the Connector. If these properties are not set, the Connector will not be able to authenticate with the [DynamoDB Service](https://aws.amazon.com/dynamodb/). - -For more information on authentication and security in Amazon DynamoDB, refer to the [AWS documentation](https://docs.aws.amazon.com/dynamodb/index.html). - -## Configuration - -The **Region** property in the **Configuration** section specifies the AWS region in which the DynamoDB table exists or will be created. This property is required and must be set to use the Connector. - -For more information on AWS regions, refer to the [AWS documentation](https://docs.aws.amazon.com/general/latest/gr/rande.html). - -## Input - -The **Input** section of the **Amazon DynamoDB Connector** specifies the input data for the [selected operation](#operation). The input data varies depending on the [operation type](#operation) and [method](#method) selected. - -### Table operations - -For the **Table** operation type, the following input data is required: - -#### Create table - -**Request** - -| Property name | Data type | Required | Description | -| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------: | :------: | :------------------------------------------------------------------------------------: | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-TableName) | string | Yes | The name of the DynamoDB table to be created. | -| [Partition key](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-KeySchema) | string | Yes | The attribute name of the partition key for the table. | -| [Partition key role](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-KeySchema) | dropdown | Yes | The role of the partition key. Can be set to "HASH" or "RANGE". | -| [Partition key attribute data type](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeDefinition.html#DDB-Type-AttributeDefinition-AttributeType) | dropdown | Yes | The data type of the partition key attribute. | -| [Sort key](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-KeySchema) | string | No | The attribute name of the sort key for the table (if applicable). | -| [Sort key role](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-KeySchema) | dropdown | No | The role of the sort key. Can be set to "HASH" or "RANGE". | -| [Sort key attribute data type](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeDefinition.html#DDB-Type-AttributeDefinition-AttributeType) | dropdown | No | The data type of the sort key attribute. | -| [Read capacity units](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-ProvisionedThroughput) | number | Yes | The maximum number of strongly consistent reads per second that the table can support. | -| [Write capacity units](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-ProvisionedThroughput) | number | Yes | The maximum number of writes per second that the table can support. | -| [Billing mode](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-BillingMode) | dropdown | No | The billing mode of the table. Can be set to "PROVISIONED" or "PAY_PER_REQUEST". | -| [Deletion protection](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#DDB-CreateTable-request-GlobalSecondaryIndexUpdates) | dropdown | No | Indicates whether to enable or disable deletion protection for the table. | - -**Response** - -| Property | Data type | Description | -| :-----------------------------------------------------------------------------------------------------------: | :-------: | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Table description](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html) | Object | Information about the created table, including the table name, attribute definitions, primary key schema, provisioned throughput settings, and more. | - -#### Delete table - -**Request** - -| Property name | Data type | Required | Description | -| :------------------------------------------------------------------------------------------------ | :-------: | :------: | :-------------------------------------------: | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteTable.html) | string | Yes | The name of the DynamoDB table to be deleted. | - -**Response** - -| Property | Data type | Description | -| :------- | :-------: | :-----------------------------------------------------------------------------------------------------------------------------------: | -| action | string | The action performed. In this case, it will always be "delete Table [tableName]", where `tableName` is the name of the deleted table. | -| status | string | The status of the operation. In this case, it will always be "OK" to indicate that the table was successfully deleted. | - -#### Describe table - -**Request** - -| Property name | Data type | Required | Description | -| :-------------------------------------------------------------------------------------------------- | :-------: | :------: | :---------------------------------------------: | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) | string | Yes | The name of the DynamoDB table to be described. | - -**Response** - -| Property | Data type | Description | -| :-----------------------------------------------------------------------------------------------------------: | :-------: | -------------------------------------------------------------------------------------------------------------------------------------------- | -| [Table description](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html) | Object | Information about the table, including the table name, attribute definitions, primary key schema, provisioned throughput settings, and more. | - -#### Scan table - -**Request** - -| Property name | Data type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html#DDB-Scan-request-TableName) | string | Yes | The name of the DynamoDB table to be scanned. | -| [Filter expression](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilterExpression) | string | No | The filter expression to apply to the scan results. For more information, refer to the [Expression Attribute Names and Values](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) section of the Amazon DynamoDB Developer Guide. | -| [Projection expression](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#ProjectionExpression) | string | No | A string that identifies one or more attributes to retrieve from the specified table. | -| [Expression attribute names](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html#ExpressionAttributeNames) | map | No | A map of attribute names to their replacements in the filter expression or projection expression. For more information, refer to the [Expression Attribute Names and Values](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) section of the Amazon DynamoDB Developer Guide. | -| [Expression attribute values](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html#ExpressionAttributeValues) | map | No | A map of attribute values to their replacements in the filter expression or projection expression. For more information, refer to the [Expression Attribute Names and Values](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeValues.html) section of the Amazon DynamoDB Developer Guide. | - -**Response** - -| Property | Data type | Description | -| :------- | :-------: | :---------------------------------------------------------------------------------------------------------------------------------------: | -| action | string | The action performed. In this case, it will always be `scanTable`. | -| status | string | The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. | -| items | list | The list of items returned by the scan operation, in case the operation was successful. If there are no items, this field will be `null`. | - -### Item operations - -:::note -The **Amazon DynamoDB Connector** does not currently support binary data types. If binary data is input during the creation or update of items, it will be saved as a string. - -When updating items, if an attribute of type SET is updated, it will be overwritten and saved as a list type. Consider these limitations to prevent unintended data structure modifications in your DynamoDB tables. -::: - -#### Add item - -**Request** - -| Property name | Data type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------------- | --------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-TableName) | string | Yes | The name of the DynamoDB table to add the item to. | -| [Item](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-Item) | object | Yes | The item to add to the table is represented in JSON format. For example:
    `{"Name": "Example Item", "ID": "123", "Description": "This is an example item"}`.
    This JSON object succinctly represents the item's attributes through straightforward key-value pairs, without the need to explicitly mention data types. | - -**Response** - -| Property | Data type | Description | -| ---------------------------------------------------------------------------------------------------------------------- | --------- | ----------- | ----------------------------- | -| [Result](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#API_PutItem_ResponseElements) | object | Yes | The item to add to the table. | - -#### Delete item - -**Request** - -| Property name | Data type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -------- | ------------------------------------------------------------ | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html#DDB-DeleteItem-request-TableName) | string | Yes | The name of the DynamoDB table to delete the item from. | -| [Primary Key Components](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) | object | Yes | The primary key components that identify the item to delete. | - -**Response** - -| Property | Data type | Description | -| ---------------------------------------------------------------------------------------------------------------------------------- | --------- | ----------------------------------------------------------------------------- | -| [Deleted Item](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html#API_DeleteItem_ResponseElements) | object | The item that was deleted. This field will be null if the item was not found. | - -#### Get item - -**Request** - -| Property Name | Data type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------------------- | --------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_GetItem.html#DDB-GetItem-request-TableName) | string | Yes | The name of the table containing the requested item. | -| [Primary key components](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_GetItem.html#DDB-GetItem-request-Key) | object | Yes | A map of attribute names to `AttributeValue` objects, representing the primary key of the item to retrieve. For the primary key, you must provide all the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. | - -**Response** - -| Property | Data type | Description | -| ----------------------------------------------------------------------------------------------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Attributes](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_GetItem.html#DDB-GetItem-response-Item) | object | A map of attribute names to `AttributeValue` objects, representing the item retrieved. If there is no matching item, the response will contain only the consumed capacity, and a null attributes field. The keys of the attributes map correspond to the column names of the table | - -#### Update item - -**Request** - -| Property name | Data type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -------- | ------------------------------------------------------------------------------------------------------------------------- | -| [Table name](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-TableName) | string | Yes | The name of the table to update the item in. | -| [Primary key components](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-Key) | map | Yes | A map of attribute names to `AttributeValue` objects, representing the primary key of the item to update. | -| [Key attributes](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates) | map | Yes | A map of attribute names to `AttributeValue` objects, representing the attributes to update. | -| [Attribute action](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates) | dropdown | No | Dropdown option for each attribute to be updated, allowing selection between "PUT" (add or replace) and "DELETE" (remove) | - -**Response** - -| Property | Data type | Description | -| --------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [UpdateItemOutcome](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBMapper.Methods.html#DynamoDBMapper.Methods.updateItem) | object | An object representing the outcome of the `UpdateItem` operation. The `UpdateItemOutcome` object contains the updated attributes of the item, as well as other metadata about the operation, such as the consumed capacity. | - -## Request example - -| Section | Field | Description | Example value | -| -------------- | ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------- | -| Operation | Category | Choose the category of the operation to be performed. | Item | -| | Action | Select the specific action to update an item in the DynamoDB table. | Update item | -| Authentication | Authentication type | The method of AWS authentication; credentials are used here. | Credentials | -| | Access key | An example of an AWS access key. | `AKIAU3GOTH...JBYX` | -| | Secret key | An example of an AWS secret key. | `bZ/LPpqaw...0igikS` | -| | Region | The AWS region where the DynamoDB table is located. | `us-east-1` | -| Input | Table name | The name of the DynamoDB table to be updated. | `test` | -| | Primary key components | The primary key component(s) of the item to be updated. | `{"id": "5"}` | -| | Key attributes | JSON object representing the new values for the item attributes. | `{ "stringValue": "StringValue", "numberValue": 42, "booleanValue": true }` | -| | Attribute action | The action to be performed on the attributes. Here it's set to PUT, which means the specified attributes will be added or updated. | PUT | -| Output mapping | Result variable | The name of the variable that will store the response from DynamoDB. | `result` | -| | Result expression | The FEEL expression used to map the DynamoDB response to process variables. Not provided in the screenshots. | - | - -## Response Mapping - -When using the DynamoDB Connector, the response from the DynamoDB Connector will be available in a temporary local `response` variable. This variable can be mapped to the process by specifying the **Result Variable**. - -For example, if you use the **Update Item** method in the DynamoDB Connector, the response may look like this: - -```json -{ - "action": "updateItem", - "status": "OK", - "response": { - "Attributes": { - "ID": { - "N": "3" - }, - "price": { - "N": "10" - } - } - } -} -``` - -In this example, the `response` variable contains an `Attributes` object with the updated values for the specified item. - -The following fields are available in the `response` variable: - -- `action`: The action that was performed by the DynamoDB Connector. -- `status`: The status of the response, which will be "OK" if the operation was successful. -- `response`: The response from the DynamoDB service, which will contain the updated attributes of the specified item. - -You can choose to unpack the content of your `response` into multiple process variables using the **Result Expression**, which is a [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md). - -The **Result Expression** allows you to access specific attributes from the response and assign them to process variables that can be used in subsequent steps of your process. - -```feel -= { - id: response.response.Attributes.ID.N, - price: response.response.Attributes.price.N -} -``` - -In this example, we are using the **Result Expression** to extract the **ID** and **price** attributes from the response variable and assign them to the id and price process variables, respectively. You can then use these variables in subsequent steps of your process. - -:::note -The syntax for accessing attributes in the **Result Expression** may vary depending on the structure of your response object. You can refer to the [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md) documentation for more information on how to use the **Result Expression**. -::: - -## Error handling - -The **Amazon DynamoDB Connector** may throw the following exceptions: - -- AwsDynamoDbConnectionException: Thrown if there is an error connecting to DynamoDB. -- AwsDynamoDbExecutionException: Thrown if there is an error executing a DynamoDB operation. -- AwsDynamoDbConfigurationException: Thrown if the Connector is not properly configured. - -All of these checked exceptions are wrapped in a `RuntimeException`, so be prepared to handle this type of exception as well. - -## Troubleshooting - -If you are having issues with the **Amazon DynamoDB Connector**, try the following: - -- Ensure your AWS credentials are correct. -- Ensure your DynamoDB table exists and is located in the specified region. -- Ensure your configuration properties are set correctly. -- Check the logs for any error messages. -- Contact (Camunda support)[https://camunda.com/services/support/] if you need further assistance. - -For more information on Amazon DynamoDB, visit the [official documentation](https://docs.aws.amazon.com/dynamodb/). - -## Using DynamoDB Connector best practice - -When using the DynamoDB Connector in a BPMN process, it is important to keep in mind that there is no guarantee that a requested item will be retrieved or updated immediately. In this case, it is recommended to build your BPMN diagram to periodically retry polling until the item is available. - -:::note -To avoid performance issues, it is recommended to limit the number of retries. -::: - -To learn more about implementing retry logic in your BPMN diagram, you can refer to the [Camunda BPMN examples](https://camunda.com/bpmn/examples/) page, which includes examples of BPMN diagrams with timer and loop configurations. - -## Appendix - -### AWS authentication types - -There are two options to authenticate the Connector with AWS: - -- Choose **Credentials** in the **Authentication** dropdown if you have a valid pair of access and secret keys provided by your AWS account administrator. This option is applicable for both SaaS and Self-Managed users. -- Choose **Default Credentials Chain (Hybrid/Self-Managed only)** in the **Authentication** dropdown if your system is configured as an implicit authentication mechanism, such as role-based authentication, credentials supplied via environment variables, or files on target host. This option is applicable only for Self-Managed or hybrid distribution. This approach uses the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html) to resolve required credentials. diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md deleted file mode 100644 index ec5b67dc33d..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md +++ /dev/null @@ -1,305 +0,0 @@ ---- -id: amazon-eventbridge -sidebar_label: AWS EventBridge Connector -title: Amazon EventBridge Connector -description: Send events to Amazon EventBridge from your BPMN process. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **Amazon EventBridge Connector** integrates your BPMN service with [Amazon EventBridge](https://aws.amazon.com/eventbridge/), enabling the sending of events from your workflows for further processing or routing to other AWS services. It provides seamless event-driven integration within your business processes. - -For more information, refer to the [Amazon EventBridge documentation](https://docs.aws.amazon.com/eventbridge/index.html). - -## Prerequisites - -Before using the **Amazon EventBridge Connector**, ensure you have the necessary permissions in your AWS account to send events to EventBridge. You will need an access key and secret key of a user with the appropriate permissions. Refer to the [AWS documentation](https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html) for more information. - -:::note -Use Camunda secrets to avoid exposing your AWS IAM credentials as plain text. Refer to our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create an Amazon EventBridge Connector task - -To use the **Amazon EventBridge Connector** in your process, you can either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Refer to our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Configure the Amazon EventBridge Connector - -Follow these steps to configure the Amazon EventBridge Connector: - -1. Choose an applicable authentication type from the **Authentication** dropdown. Learn more about authentication types in the related [appendix entry](#aws-authentication-types). -2. In the **Authentication** section, enter the relevant IAM key and secret pair of the user with permissions to send events to [Amazon EventBridge](https://aws.amazon.com/eventbridge). -3. In the **Configuration** section, specify the AWS region where your EventBridge resides. -4. In the **Event Details** section, provide the following information: - - **Event bus name**: Enter the name of the destination event bus. Refer to the [Amazon EventBridge documentation](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-create-event-bus.html) for more details on event buses. - - **Source**: Enter the value that identifies the service that generated the event. - - **Detail type**: Enter the type of event being sent. Refer to the [Amazon documentation](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-events-structure.html) for more information on these properties. -5. In the **Event Payload** section, enter a JSON object that contains information about the event. -6. (Optional) In the **Output Mapping** section, you can set a **Result variable** or **Result expression**. Refer to the [response mapping documentation](/docs/components/connectors/use-connectors/index.md#response-mapping) to learn more. -7. (Optional) In the **Error Handling** section, define the **Error expression** to handle errors that may occur during the event sending process. Refer to the [response mapping documentation](/docs/components/connectors/use-connectors/index.md#bpmn-errors) to learn more. - -## Amazon EventBridge Connector response - -The **Amazon EventBridge Connector** returns the [original response](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_PutEvents.html) from the Amazon EventBridge service, including the **sdkResponseMetadata** and **sdkHttpMetadata**. Here is an example of the response: - -```json -{ - "sdkResponseMetadata": { - "requestId": "766647a2-835a-418b-9161-94245d0c93a3" - }, - "sdkHttpMetadata": { - "httpHeaders": { - "Content-Length": "85", - "Content-Type": "application/x-amz-json-1.1", - "Date": "Fri, 23 Jun 2023 08:39:22 GMT", - "x-amzn-RequestId": "766647a2-835a-418b-9161-94245d0c93a3" - }, - "httpStatusCode": 200, - "allHttpHeaders": { - "x-amzn-RequestId": ["766647a2-835a-418b-9161-94245d0c93a3"], - "Content-Length": ["85"], - "Date": ["Fri, 23 Jun 2023 08:39:22 GMT"], - "Content-Type": ["application/x-amz-json-1.1"] - } - }, - "failedEntryCount": 0, - "entries": [ - { - "eventId": "bb86b1af-9abb-0f8e-28c2-c69c24c35e05", - "errorCode": null, - "errorMessage": null - } - ] -} -``` - -## Appendix - -### AWS authentication types - -There are two options to authenticate the Connector with AWS: - -- Choose **Credentials** in the **Authentication** dropdown if you have a valid pair of access and secret keys provided by your AWS account administrator. This option is applicable for both SaaS and Self-Managed users. -- Choose **Default Credentials Chain (Hybrid/Self-Managed only)** in the **Authentication** dropdown if your system is configured as an implicit authentication mechanism, such as role-based authentication, credentials supplied via environment variables, or files on target host. This option is applicable only for Self-Managed or hybrid distribution. This approach uses the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html) to resolve required credentials. - -## Next steps - -- [Amazon EventBridge documentation](https://docs.aws.amazon.com/eventbridge/) -- Learn about [other Connectors available](./available-connectors-overview.md) in Camunda to integrate with different systems and services. -- Learn more about using Connectors [here](../use-connectors/index.md). - - - - - -The **Amazon EventBridge Webhook Connector** is an inbound Connector enabling you to start a BPMN process instance triggered by an event from [Amazon EventBridge](https://aws.amazon.com/eventbridge/). - -:::note -If you have used the **Amazon EventBridge Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/guides/update-guide/connectors/060-to-070.md) for more details. -::: - -## Create an Amazon EventBridge Webhook Connector task - -1. Start building your BPMN diagram. You can use the **Amazon EventBridge Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. -2. Select the applicable element and change its template to an **Amazon EventBridge Connector**. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the event trigger. - -## Configure the Amazon EventBridge Webhook Connector - -### Fill properties in the Webhook Configuration section - -1. Choose one of the required methods in the **Webhook method** property. For example, if you know the webhook will be triggered by the **POST** method, choose **POST**. Alternatively, if it is not essential to specify a specific method for the webhook trigger, select **ANY**. -2. Configure the **Webhook ID**. By default, the **Webhook ID** is pre-filled with a random value. This value will be part of the Webhook URL. For more details about Webhook URLs, refer to the section below on [activating the Amazon EventBridge Webhook Connector by deploying your diagram](#activate-the-amazon-eventbridge-connector-by-deploying-your-diagram). -3. (Optional) Fill in the **Event Bus Name** property if you want to specify a specific event bus to subscribe to. If left empty, the default event bus will be used. - -### Fill properties in the Authorization section - -The Amazon EventBridge Webhook Connector supports four types of authorization: - -- **None (without authorization)**: No authentication is required for the webhook. Anyone can trigger the webhook without any credentials. - -- **JWT (JSON Web Token)**: This authorization type requires the following properties to be filled: - - - **JWK URL**: A link to the JSON Web Key (JWK) Set containing the public keys used to verify the JWT signature. [Learn more about JWK](https://datatracker.ietf.org/doc/html/rfc7517). - - **JWT Role Property Expression** (optional): An expression to extract the roles from the JWT token. These roles will be used to check against the **Required Roles** property. For example, the expression could be: - - ``` - =if admin = true then ["admin"] else roles - ``` - - - **Required Roles** (optional): A list of roles to test JWT roles against. If provided, the webhook will only be triggered if the JWT token contains at least one of the required roles. For example, if the required role is "admin", the property could be: - - ``` - ["admin"] - ``` - -- **Basic**: This authorization type requires the following properties to be filled: - - - **Username**: The username to authenticate the webhook. - - **Password**: The password associated with the provided username. - -- **API Key**: This authorization type requires the following properties to be filled: - - **API Key**: The API key that needs to be provided in the request to authenticate the webhook. - - **API Key Locator**: A FEEL expression that extracts the API key from the request. This expression is evaluated in the Connector Runtime to retrieve the API key from the incoming request. For example, the API Key Locator could be: - ``` - =split(request.headers.authorization, " ")[2] - ``` - or - ``` - request.headers.mycustomapikey - ``` - -Select the appropriate authorization type based on your security requirements and fill in the corresponding properties accordingly. - -### Fill properties in the **Activation** section - -1. (Optional) Configure the **Activation Condition**. This condition will be used to filter the events from the specified event source. For example, if an incoming Amazon EventBridge event has the following body: - -``` - { - "version": "0", - "id": "6d3d35b7-5bf2-43ec-9e55-5cfb27ad31b4", - "detail-type": "MyEvent", - "source": "custom.application", - "account": "123456789012", - "time": "2023-07-25T12:34:56Z", - "region": "us-west-2", - "resources": [], - "detail": { - "shipment": "123456789", - "status": "received" - } - } -``` - -the Activation Condition value might look like this: - -``` -=(get value(request.body, "detail-type")="MyEvent" and request.body.detail.status="received") -``` - -This condition will trigger the Amazon EventBridge Webhook Connector only when the detail-type is "MyEvent" and the status is "received". - -2. When using the **Amazon EventBridge Webhook Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. - -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime, and the result is used to correlate the message. - -For example, if your correlation key is defined with a process variable named `myCorrelationKey`, and you want to correlate by the `shipment` property in the request detail, which contains: - -```json -{ - "version": "0", - "id": "6d3d35b7-5bf2-43ec-9e55-5cfb27ad31b4", - "detail-type": "MyEvent", - "source": "custom.application", - "account": "123456789012", - "time": "2023-07-25T12:34:56Z", - "region": "us-west-2", - "resources": [], - "detail": { - "shipment": "123456789", - "status": "received" - } -} -``` - -your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=request.body.detail.shipment` - -## Activate the Amazon EventBridge Connector by deploying your diagram - -Once you click **Deploy**, your Amazon EventBridge Webhook Connector will be activated and ready to receive events. - -The URLs of the exposed Amazon EventBridge Webhooks adhere to the following pattern: - -`http(s):///webhooks/` - -- `` is the URL of the Connectors component deployment. When using the Camunda 8 SaaS offering, this will typically contain your cluster region and cluster ID. -- `` is the ID (path) you configured in the properties of your Amazon EventBridge Webhook Connector. - -:::note -If you make changes to your Amazon EventBridge Webhook Connector configuration, redeploy the BPMN diagram for the changes to take effect. -::: - -When you click on the event with the Amazon EventBridge Webhook Connector applied to it, a new **Webhooks** tab will appear in the properties panel. -This tab displays the URL of the Amazon EventBridge Webhook Connector for every cluster where you have deployed your BPMN diagram. - -:::note -The **Webhooks** tab is only supported in Web Modeler as part of the Camunda 8 SaaS offering. -You can still use the Amazon EventBridge Webhook Connector in Desktop Modeler or with Camunda 8 Self-Managed. -In that case, Amazon EventBridge Webhook Connector deployments and URLs will not be displayed in Modeler. -::: - -## Variable mapping - -The **Variable mapping** section allows you to configure the mapping of the event payload to the process variables. - -- Use the **Result variable** to store the event data in a process variable. For example, `myEventPayload`. -- Use the **Result expression** to map specific fields from the event payload into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). For example, given the Amazon EventBridge Connector is triggered with an event payload like: - -``` -{ - "id": "6d3d35b7-5bf2-43ec-9e55-5cfb27ad31b4", - "detail-type": "MyEvent", - "source": "custom.application", - "region": "us-west-2", - "resources": [], - "detail": { - "event": "order_created", - "customer_id": "12345", - "order_total": 100.50 - } -} -``` - -and you would like to extract the `customer_id` and `order_total` as process variables `customerId` and `orderTotal`, the **Result Expression** might look like this: - -``` -= { -customerId: request.body.detail.customer_id, -orderTotal: request.body.detail.order_total -} -``` - -## Example of configuring Amazon EventBridge - -To configure Amazon EventBridge, follow the steps below: - -1. Go to the [AWS Management Console](https://aws.amazon.com/console/). -2. Set the required permissions for EventBridge by navigating to: https://aws.permissions.cloud/iam/events. -3. Access Amazon EventBridge service by going to [Amazon EventBridge](https://aws.amazon.com/eventbridge/). -4. Click **Integration > API Destination**. -5. Switch to the **Connection** tab. -6. Create a new connection with the required authorization type (basic, API key, OAuth). -7. Now, create a new API destination with the following information: - - Select the previously created **connection**. - - Choose the appropriate **HTTP method**. - - Specify the **API destination endpoint**, which should be the webhook URL generated after deploying the BPMN diagram. -8. Create a new event bus by following the documentation [here](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-create-event-bus.html). -9. Lastly, create a rule using the **API destination** that you already created. Refer to the [documentation](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-get-started.html) for guidance. - -## Next steps - -- Learn more about [Amazon EventBridge](https://aws.amazon.com/eventbridge/) and its capabilities. -- Explore other [Connectors available](./available-connectors-overview.md) in Camunda to integrate with different systems and services. -- Learn more about using Connectors [here](../use-connectors/index.md). -- Learn more about inbound Connectors [here](../use-connectors/inbound.md). - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-sns.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-sns.md deleted file mode 100644 index ff88fe40f72..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-sns.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -id: amazon-sns -title: Amazon Simple Notification Service Connector -sidebar_label: AWS SNS Connector -description: Publish messages to Amazon Simple Notification Service (SNS) from your BPMN process. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **Amazon Simple Notification Service (SNS)** Connector is an outbound Connector that allows you to connect your BPMN service with [Amazon Simple Notification Service (SNS)](https://aws.amazon.com/sns/) to send messages. - -## Prerequisites - -To use the **Amazon SNS Connector**, you need to have an SNS Topic, IAM key, and secret pair with the `sns:Publish` policy relative to your SNS. - -:::note -Use Camunda secrets to avoid exposing your AWS IAM credentials as plain text. Refer to an [appendix entry](#how-do-i-store-aws-iam-secrets-for-my-sns-connector) and the [SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/sns-using-identity-based-policies.html#sns-policy-keys) to learn more. -::: - -## Create an Amazon SNS Connector task - -To use the **Amazon SNS Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Amazon SNS Connector for sending messages executable - -To make your Amazon SNS Connector for sending messages executable, take the following steps: - -1. Choose an applicable authentication type from the **Authentication** dropdown. Learn more about authentication types in the related [appendix entry](#aws-authentication-types). -2. Set the relevant IAM key and secret pair in the **Authentication** section. For example, `{{secrets.MY_AWS_ACCESS_KEY}}`. The value can be plain text, but this is not recommended due to security concerns. -3. In the **Topic Properties** section, set the topic ARN of your SNS topic as well as its region. -4. In the **Input message data** section, fill out the field **Message** with the data you would like to publish to the topic. The field requires FEEL input. -5. (Optional) In the **Input message data** section, fill out the field **Message attributes** to set optional message metadata. This field requires FEEL input. Refer to the relevant [appendix](#what-are-the-message-attributes-and-how-can-i-set-them) section to find out more about this field. -6. (Optional) In the **Input message data** section, fill out the field **Subject** to set optional message subject. FEEL input of the field is optional. Length must be less than 100 characters. -7. (FIFO only) For a FIFO type topic in Amazon SNS, a **Message Group ID** is required. This ID ensures that messages within the same group are delivered in sequence. The [Amazon SNS documentation on FIFO topics](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) provides more details on Message Group ID usage. Additionally, an optional **Message Deduplication ID** can be provided. This is useful for message deduplication in FIFO topics and its necessity depends on the [deduplication settings of your SNS FIFO topic](https://docs.aws.amazon.com/sns/latest/dg/sns-message-deduplication.html). The Message Deduplication ID helps ensure Amazon SNS does not resend the same message within the deduplication interval. - -## Amazon SNS Connector response - -The **Amazon SNS Connector** returns the SNS message identifier of a newly created message. -The response contains a `messageId` variable. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - "createdMessageID": response.messageId -} -``` - -## Appendix & FAQ - -### What are the message attributes and how can I set them? - -Amazon SNS lets you include structured metadata (such as timestamps, geospatial data, signatures, and identifiers) with messages using message attributes. -The **Amazon SNS Connector** allows you to include non-binary message attributes in the **Input message data** section. The message attribute value must be composed to be compliant with Amazon SNS [message attribute data format](https://docs.aws.amazon.com/sns/latest/dg/sns-message-attributes.html). - -Example of a valid message attribute as a FEEL value: - -``` -= { - "timestamp":{ - "StringValue":today(), - "DataType":"String" - }, - "messageSubmittedBy":{ - "StringValue":"user12345", - "DataType":"String" - } -} -``` - -### How do I store AWS IAM secrets for my SNS Connector? - -Use Camunda secrets to avoid exposing your AWS IAM credentials. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. - -### AWS authentication types - -There are two options to authenticate the Connector with AWS: - -- Choose **Credentials** in the **Authentication** dropdown if you have a valid pair of access and secret keys provided by your AWS account administrator. This option is applicable for both SaaS and Self-Managed users. -- Choose **Default Credentials Chain (Hybrid/Self-Managed only)** in the **Authentication** dropdown if your system is configured as an implicit authentication mechanism, such as role-based authentication, credentials supplied via environment variables, or files on target host. This option is applicable only for Self-Managed or hybrid distribution. This approach uses the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html) to resolve required credentials. - - - - - -The **Amazon Simple Notification Service (SNS) inbound Connector** is a Connector that allows you to start or continue -a BPMN process triggered by an [Amazon SNS](https://console.aws.amazon.com/sns/home) notification. - -## Create an Amazon SNS inbound Connector task - -1. Start building your BPMN diagram. You can use the **Amazon SNS inbound Connector** with either a **Start Event** or **Intermediate Catch Event**. -2. Select the applicable element and change its template to an **Amazon SNS connector**. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the webhook. -6. Navigate to the **Webhooks** tab in the properties panel to observe the webhook URL. - -## Make your Amazon SNS inbound Connector for receiving notifications executable - -1. In the **Subscription Configuration** section, configure the **Subscription ID**. By default, **Subscription ID** is pre-filled with a random value. This value will be a part of the topic subscription URL. -2. Set the **Allow to receive messages from topic(s)** value to **any** if the process may be triggered by any topic, or **Specific topic(s)** if you wish to allow-list only certain topics to start a new BPMN process. -3. If you have chosen the **Specific topic(s)**, you have to list comma-separated topics in the field **Topic ARN(s)** as well. In that case, the **Amazon SNS inbound Connector** will auto-approve each qualified subscription request. -4. In the section **Activation**, configure **Condition** when the Amazon SNS topic can trigger a new BPMN process. The following example will trigger a new BPMN process for every notification with a subject _Start BPMN_: `=(request.body.Subject = "Start BPMN")`. -5. In the section **Variable mapping** fill the field **Result variable** to store the response in a process variable. For example, `myResultVariable`. -6. In the section **Variable expression** fill the field to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). - The following example will extract both message and subject from Amazon SNS message: `={message: request.body.Message, subject: request.body.Subject}`. - -When using the **Amazon SNS inbound Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the request body contains `"MessageAttributes": {"attrName1" : {"Type":"String","Value":"attrVal"}}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=request.body.MessageAttributes.attrName1.Value` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -## Activate the Amazon SNS inbound Connector by deploying your diagram - -Once you click the **Deploy** button, your **Amazon SNS inbound Connector** will be activated and publicly available. - -URLs of the exposed **Amazon SNS Inbound Connector** adhere to the following pattern: - -`https:///inbound/` - -- `` is the URL of Connectors component deployment. When using the Camunda 8 SaaS offering, this will typically contain your cluster region and cluster ID. -- `` is the ID (path) you configured in the properties of your **Amazon SNS inbound Connector**. - -If you make changes to your **Amazon SNS inbound Connector** configuration, you need to redeploy the BPMN diagram for the changes to take effect. - -When you click on the event with **Amazon SNS inbound Connector** applied to it, a new **Webhooks** tab will appear in the properties panel. -This tab displays the URL of the **Amazon SNS inbound Connector** for every cluster where you have deployed your BPMN diagram. - -:::note -The **Webhooks** tab is only supported in Web Modeler as part of the Camunda 8 SaaS offering. -You can still use Amazon SNS inbound Connectors in Desktop Modeler, or with Camunda 8 Self-Managed. -In that case, Amazon SNS inbound Connector deployments and URLs will not be displayed in Modeler. -::: - -## Wiring with Amazon SNS - -1. Sign in to the [Amazon SNS console](https://console.aws.amazon.com/sns/home). -2. On the navigation panel, choose **Topics**. -3. Choose the **Create** subscription. -4. In the **Protocol** drop-down list, select **HTTPS**. -5. In the **Endpoint** box, paste in the URL of the subscription found in at the **Webhooks** tab of your BPMN - diagram that you want the topic to send messages. Then, choose **Create subscription**. -6. The confirmation message is displayed. Choose **Close**. Your new subscription's **Subscription ID** - displays **PendingConfirmation**. Shortly after it will be confirmed by the BPMN process assuming **Allow to receive messages from topic(s)** contains the SNS topic ARN. - -## Security considerations - -### Access control - -The field **Allow to receive messages from topic(s)** and related **Topic ARN(s)** allows you to control which Amazon SNS topics can trigger a BPMN process. -You can also achieve the same outcome by specifying **Condition** in the **Activation** section. For example, given **Topic ARN(s)** equals `arn:aws:sns:eu-central-1:1234567890:SNSWebhook`, -is the same as **Condition** equals `=(request.body.TopicArn = "arn:aws:sns:eu-central-1:1234567890:SNSWebhook")`. - -### Integrity - -Each Amazon SNS message is digitally signed with an AWS private key. The body of a message contains a digital signature of -the entire content. The **Amazon Simple Notification Service (SNS) Inbound Connector** verifies every message against -the Amazon SNS public certificate to ensure the message is of known origin and has not been tampered with. - -## Appendix - -### Amazon SNS Subscription message example - -``` -POST https:///inbound/ - -connection: close -accept-encoding: gzip,deflate -user-agent: Amazon Simple Notification Service Agent -host: -content-length: 9999 -content-type: text/plain; charset=UTF-8 -x-amz-sns-topic-arn: arn:aws:sns:eu-central-1:1234567890:SNSWebhook -x-amz-sns-message-id: b9b4574f-b4ab-4c03-ac14-a3145896747f -x-amz-sns-message-type: SubscriptionConfirmation - -{ - "Type": "SubscriptionConfirmation", - "MessageId": "b9b4574f-b4ab-4c03-ac14-a3145896747f", - "Token": "233...18b", - "TopicArn": "arn:aws:sns:eu-central-1:1234567890:SNSWebhook", - "Message": "You have chosen to subscribe to the topic arn:aws:sns:eu-central-1:1234567890:SNSWebhook.\nTo confirm the subscription, visit the SubscribeURL included in this message.", - "SubscribeURL": "https://sns.eu-central-1.amazonaws.com/?Action=ConfirmSubscription&TopicArn=arn:aws:sns:eu-central-1:1234567890:SNSWebhook&Token=233...18b", - "Timestamp": "2023-04-26T15:04:47.883Z", - "SignatureVersion": "1", - "Signature": "u+0i/F/+qew...zw==", - "SigningCertURL": "https://sns.eu-central-1.amazonaws.com/SimpleNotificationService-56e67fcb41f6fec09b0196692625d385.pem" -} -``` - -### Amazon SNS Notification message example - -``` -POST https:///inbound/ - -connection: close -accept-encoding: gzip,deflate -user-agent: Amazon Simple Notification Service Agent -host: webhook.site -content-length: 1046 -x-amzn-trace-id: Root=1-64493ecd-dcfadf2f053429acb884eee3;Sampled=1 -content-type: text/plain; charset=UTF-8 -x-amz-sns-subscription-arn: arn:aws:sns:eu-central-1:1234567890:SNSWebhook:4aa14ec3-a492-4a8e-8247-ea658d1aad96 -x-amz-sns-topic-arn: arn:aws:sns:eu-central-1:1234567890:SNSWebhook -x-amz-sns-message-id: 2e062e6b-a527-5e68-b69b-72a8e42add60 -x-amz-sns-message-type: Notification - -{ - "Type" : "Notification", - "MessageId" : "2e062e6b-a527-5e68-b69b-72a8e42add60", - "TopicArn" : "arn:aws:sns:eu-central-1:1234567890:SNSWebhook", - "Subject" : "Subject - test", - "Message" : "Hello, world", - "Timestamp" : "2023-04-26T15:10:05.479Z", - "SignatureVersion" : "1", - "Signature" : "a2w...A==", - "SigningCertURL" : "https://sns.eu-central-1.amazonaws.com/SimpleNotificationService-56e67fcb41f6fec09b0196692625d385.pem", - "UnsubscribeURL" : "https://sns.eu-central-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-central-1:1234567890:SNSWebhook:4aa14ec3-a492-4a8e-8247-ea658d1aad96", - "MessageAttributes" : { - "attrName1" : {"Type":"String","Value":"attrVal"} - } -} -``` - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-sqs.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-sqs.md deleted file mode 100644 index e8a420d305b..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/amazon-sqs.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -id: amazon-sqs -title: Amazon Simple Queue Service Connector -sidebar_label: AWS SQS Connector -description: Send messages to Amazon Simple Queue Service (SQS) from your BPMN process. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **Amazon SQS Connector** is an outbound Connector that allows you to connect your BPMN service with [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) to send messages. - -## Prerequisites - -To use the **Amazon SQS Connector**, you need to have an SQS Queue, IAM key, and secret pair with the `sqs:SendMessage` policy relative to your SQS. - -Use Camunda secrets to avoid exposing your AWS IAM credentials as plain text. Refer to an [appendix entry](#how-do-i-store-aws-iam-secrets-for-my-sqs-connector) and the [SQS Developer Guide](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-basic-examples-of-iam-policies.html) to learn more. - -## Create an Amazon SQS Connector task - -To use the **Amazon SQS Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Amazon SQS Connector for sending messages executable - -To make your Amazon SQS Connector for sending messages executable, take the following steps: - -1. Choose an applicable authentication type from the **Authentication** dropdown. Learn more about authentication types in the related [appendix entry](#aws-authentication-types). -2. Set the relevant IAM key and secret pair in the **Authentication** section. For example, `{{secrets.MY_AWS_ACCESS_KEY}}`. The value can be plain text, but this is not recommended due to security concerns. -3. In the **Queue Properties** section, set the URL of your SQS queue, its region, and its type. -4. In the **Input message data** section, fill the **Message body** with the data you would like to submit to the queue. The field requires FEEL input. -5. (Optional) In the **Input message data** section, fill out the field **Message attributes** to set optional message metadata. This field requires FEEL input. Refer to the relevant [appendix](#what-are-the-message-attributes-and-how-can-i-set-them) section to find out more about this field. -6. (FIFO only) If you are using a queue of type **FIFO**, a [**Message Group ID** must be provided](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html). An optional **Message Deduplication ID** can be provided as well, depending on how you [configured](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) the message deduplication of the queue. - -## Amazon SQS Connector response - -The **Amazon SQS Connector** returns the SQS message identifier of a newly created message. -The response contains a `messageId` variable. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - "createdMessageID": response.messageId -} -``` - -## Appendix & FAQ - -### What are the message attributes and how can I set them? - -Amazon SQS lets you include structured metadata (such as timestamps, geospatial data, signatures, and identifiers) with messages using message attributes. -The **Amazon SQS Connector** allows you to include non-binary message attributes in the section **Input message data**. The message attribute value must be composed to be compliant with Amazon SQS [message attribute data format](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes). - -Example of a valid message attribute as a FEEL value: - -``` -= { - "timestamp":{ - "StringValue":today(), - "DataType":"String" - }, - "messageSubmittedBy":{ - "StringValue":"user12345", - "DataType":"String" - } -} -``` - -### How do I store AWS IAM Secrets for my SQS Connector? - -Use Camunda secrets to avoid exposing your AWS IAM credentials. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. - -### AWS authentication types - -There are two options to authenticate the Connector with AWS: - -- Choose **Credentials** in the **Authentication** dropdown if you have a valid pair of access and secret keys provided by your AWS account administrator. This option is applicable for both SaaS and Self-Managed users. -- Choose **Default Credentials Chain (Hybrid/Self-Managed only)** in the **Authentication** dropdown if your system is configured as an implicit authentication mechanism, such as role-based authentication, credentials supplied via environment variables, or files on target host. This option is applicable only for Self-Managed or hybrid distribution. This approach uses the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html) to resolve required credentials. - - - - - -:::note -To maintain stable behavior from the Amazon SQS Connector, do not subscribe multiple Amazon SQS Connectors to the same queue. - -Successfully consumed messages are removed from the queue, even if they are not correlated. -::: - -The **Amazon SQS Inbound Connector** is an inbound Connector that allows you to start or continue -a BPMN process triggered by [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/). - -## Prerequisites - -Before using the Amazon SQS inbound Connector, ensure you have the following: - -1. An active SQS Queue in your AWS account. -2. IAM credentials with the necessary permissions to receive messages from the SQS Queue. Use Camunda secrets to store your AWS IAM credentials securely. Refer to the [Camunda secrets documentation](/components/console/manage-clusters/manage-secrets.md) for more details. - -## Create an SQS inbound Connector task - -To receive messages from Amazon SQS in your process, follow these steps: - -1. Start building your BPMN diagram. You can use the **Amazon SNS Inbound Connector** with either **Start Event** or **Intermediate Catch Event** building blocks. -2. Select the appropriate element and change its template to an SQS inbound Connector. -3. Fill in all the required properties for the Connector, such as the AWS region, SQS Queue URL, and the visibility timeout. -4. Complete your BPMN diagram by adding other necessary elements and connectors. -5. Deploy the diagram to activate the SQS Inbound Connector. - -## Configure the SQS inbound Connector - -To configure the SQS inbound Connector and receive messages from your SQS Queue, follow these steps: - -1. Choose an applicable authentication type from the **Authentication** dropdown. Learn more about authentication types in the related [appendix entry](#aws-authentication-types-1). -2. Set the relevant IAM key and secret pair in the **Authentication** section. For example, `{{secrets.MY_AWS_ACCESS_KEY}}`. The value can be plain text, but this is not recommended due to security concerns. -3. In the **Queue Properties** section, set the URL of your SQS Queue and its region. -4. In the **Message polling properties** section, set the polling wait time. This is the duration (in seconds) for which the call waits for a message to arrive in the queue before returning. Refer to the [Amazon documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-short-and-long-polling.html) for more details. -5. (Optional) In the **Use next attribute names for activation condition** section, set an array of **Attribute names** or **Message attribute name** (e.g., `["attributeName1", "attributeName2"]`) to receive messages from the queue with specific metadata. Alternatively, you can leave it empty to get results with all available attributes. Learn more about message metadata [here](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html). -6. (Optional) Configure the **Activation Condition**. For example, if an external message has the body `{"messageId": 1, "body": "Hi team", "messageAttributes":{"key":{"stringValue":"value"}}...}`, the **Activation Condition** value might look like `=(messageAttributes.key.stringValue="value")`. Leave this field empty to receive all messages every time. -7. Set **Variable mapping**. For example, to get only the message body, you can set `{resultBody: body}` in the **Result expression**. Learn more about **Variable mapping** [here](../use-connectors/index.md). - -When using the **Amazon SQS inbound Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -Example for correlation and activation condition properties (correlation by ID in the body and activation condition by message attribute): - -SQS message: - -```json -{ - "messageId": "12345", - "receiptHandle": "ABCDE", - "mD5OfBody": "1c6bb59997376e5182a88a6f582cd92a", - "body": { - "id": 4567, - "value": "Hello world" - }, - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1703062074171", - "SenderId": "333293239507", - "ApproximateFirstReceiveTimestamp": "1703062074185" - }, - "messageAttributes": { - "messageName": { - "stringValue": "myProcess", - "binaryValue": null, - "stringListValues": [], - "binaryListValues": [], - "dataType": "String" - } - }, - "md5OfMessageAttributes": "9de691a346c79e4fda4af06248aa9dfc" -} -``` - -- **Correlation key (process)**: `=4567` -- **Correlation key (payload)**: `=body.id` -- **Activation condition**: `=messageAttributes.key.stringValue="myProcess"` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -## Activate the SQS inbound Connector - -Once you click the **Deploy** button, your SQS inbound Connector will be activated and publicly available. Whenever the SQS inbound Connector receives a new message, a new BPMN process will be created. - -## Amazon SQS Connector response - -The **Amazon SQS Connector** provides the SQS message as a response. Utilize output mapping to align this response with process variables: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. This approach stores the entire SQS message as a process variable named `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. This approach allows for more granularity. Instead of storing the entire response in one variable, you can extract specific fields from the SQS message and assign them to different process variables. This is particularly useful when you are only interested in certain parts of the message, or when different parts of the message need to be used separately in your process. - Example: - -SQS message : - -```json -{ - "messageId": "12345", - "receiptHandle": "ABCDE", - "mD5OfBody": "1c6bb59997376e5182a88a6f582cd92a", - "body": { - "id": 4567, - "value": "Hello world" - }, - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1703062074171", - "SenderId": "33333333333", - "ApproximateFirstReceiveTimestamp": "1703062074185" - }, - "messageAttributes": { - "messageName": { - "stringValue": "myProcess", - "binaryValue": null, - "stringListValues": [], - "binaryListValues": [], - "dataType": "String" - } - }, - "md5OfMessageAttributes": "9de691a346c79e4fda4af06248aa9dfc" -} -``` - -To store the entire body in a process variable `resultBody`, ID from body to `bodyId`, and messageId to `messageId`, use: - -``` -= `{resultBody:body, bodyId:body.id, messageId: messageId}` -``` - -Learn more about **Variable mapping** [here](../use-connectors/index.md). - -## Appendix - -### AWS authentication types - -There are two options to authenticate the Connector with AWS: - -- Choose **Credentials** in the **Authentication** dropdown if you have a valid pair of access and secret keys provided by your AWS account administrator. This option is applicable for both SaaS and Self-Managed users. -- Choose **Default Credentials Chain (Hybrid/Self-Managed only)** in the **Authentication** dropdown if your system is configured as an implicit authentication mechanism, such as role-based authentication, credentials supplied via environment variables, or files on target host. This option is applicable only for Self-Managed or hybrid distribution. This approach uses the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html) to resolve required credentials. - -## Next Steps - -- Explore more about [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) and its capabilities. -- Learn about [other Connectors available](./available-connectors-overview.md) in Camunda to integrate with different systems and services. -- Learn more about [using Connectors](../use-connectors/index.md). -- Learn more about [inbound Connectors](../use-connectors/inbound.md). - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/asana.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/asana.md deleted file mode 100644 index 470edb9f84b..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/asana.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -id: asana -title: Asana Connector -sidebar_label: Asana Connector -description: Manage Asana projects and tasks from your BPMN process. Learn how to create an Asana Connector task, and get started. ---- - -The Asana Connector is an outbound protocol Connector that allows you to connect your BPMN service with [Asana](https://asana.com/) to get and create Asana tasks and projects. - -## Prerequisites - -To use the **Asana Connector**, you must have an Asana [personal access token](https://developers.asana.com/docs/personal-access-token). - -:::note -Use Camunda secrets to avoid exposing your Asana personal access token as plain text. Learn more in our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md). -::: - -## Create an Asana Connector task - -To use the **Asana Connector** in your process, either change the type of existing task by clicking on it and using -the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. -Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Authentication - -In the **Authentication** section, provide a **Personal access token**. [Read more on how to obtain it](https://developers.asana.com/docs/personal-access-token). - -## Select operation to execute - -### Tasks - -#### Get tasks from a project - -- **Asana API:** [Get tasks from a project](https://developers.asana.com/reference/gettasksforproject). -- **Project ID:** Globally unique identifier for the project. - -#### Get a task by ID - -- **Asana API:** [Get a task](https://developers.asana.com/reference/gettask). -- **Task ID**: The task to operate on. - -#### Create a task - -- **Asana API:** [Create a task](https://developers.asana.com/reference/createtask). -- **Task name:** The name of the task. -- **Project ID:** Globally unique identifier for the project. -- **Parent task ID:** Globally unique identifier for the parent task. -- **Notes:** Free-form textual information associated with the task (i.e. its description). - -#### Delete a task - -- **Asana API:** [Delete a task](https://developers.asana.com/reference/deletetask). -- **Task ID**: The task to operate on. - -### Projects - -#### Get projects - -- **Asana API:** [Get multiple projects](https://developers.asana.com/reference/getprojects). -- **Workspace ID:** The workspace or organization to filter projects on. -- **Team ID:** The team to filter projects on. - -#### Get a project by ID - -- **Asana API:** [Get a project](https://developers.asana.com/reference/getproject). -- **Project ID:** Globally unique identifier for the project. - -#### Create a project in a workspace - -- **Asana API:** [Create a project in a workspace](https://developers.asana.com/reference/createprojectforworkspace). -- **Workspace ID:** Globally unique identifier for the workspace or organization. -- **Project name:** Name of the project. This is generally a short sentence fragment that fits on a line in the UI for maximum readability. However, it can be longer. -- **Project note:** Free-form textual information associated with the project (ie., its description). - -#### Delete a project - -- **Asana API:** [Delete a project](https://developers.asana.com/reference/deleteproject). -- **Project ID:** Globally unique identifier for the project. - -## Handle Connector response - -The **Asana Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**. Therefore, -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/automation-anywhere.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/automation-anywhere.md deleted file mode 100644 index 9589f6bde47..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/automation-anywhere.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -id: automation-anywhere -title: Automation Anywhere Connector -description: Orchestrate your Automation Anywhere queue items from your BPMN process. ---- - -The **Automation Anywhere Connector** allows you to orchestrate an Automation Anywhere queue from your BPMN process with [Automation Anywhere RPA](https://www.automationanywhere.com/) to add work items to the queue and obtain work item results. - -To start using the Connector, you need an instance with a [license](https://docs.automationanywhere.com/bundle/enterprise-v2019/page/enterprise-cloud/topics/control-room/dashboards/cloud-administration-licenses.html) we configured via an API service. Refer to the [official documentation page](https://docs.automationanywhere.com/bundle/enterprise-v2019/page/enterprise-cloud/topics/aae-client/bot-creator/using-the-workbench/cloud-install.html) to learn more about installing and configuring the Automation Anywhere API service. - -You also need a user account with the `AAE_Queue Admin` role to query and manage workload queues and work items in a Control Room. Read more about roles in the [official documentation](https://docs.automationanywhere.com/bundle/enterprise-v2019/page/enterprise-cloud/topics/control-room/administration/roles/cloud-system-created-roles.html). - -## Create an Automation Anywhere Connector task - -To use an **Automation Anywhere Connector** in your process, either change the type of existing task using the wrench-shaped **Change type** context menu or create a new Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Automation Anywhere Connector executable - -To work with Automation Anywhere, choose the required operation type in the **Operation** section and authentication type in the **Authentication** section and complete the mandatory fields highlighted in red in the Connector properties panel. - -:::note -All the mandatory and non-mandatory fields depending on the authentication and operation selections you choose are covered in the upcoming sections. -::: - -## Authentication - -You can choose among the available **Automation Anywhere Connector** authentication types according to your authentication requirements. - -:::note -We advise you to keep your authentications and secrets data safe and avoid exposing it in the BPMN XML file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `AUTOMATION_ANYWHERE_PASSWORD`) so you can reference it later in the Connector. - -::: - -### _Authenticate (username and password)_ authentication - -Select the **Automation Anywhere Connector** and fill out the following properties under the **Authentication** section: - -1. Select **Authenticate (username and password)** in the **Authentication** section. -2. Set **Password** to `Password` to the secret you created (i.e. `{{secrets.AUTOMATION_ANYWHERE_PASSWORD}}`). -3. Set **Username** to `Username` to the secret you created (i.e. `{{secrets.AUTOMATION_ANYWHERE_UESRNAME}}`). -4. Select needed **Multiple login** type. If this value is set to `true`, you will be allowed multiple API sessions. For more information on multi-login, see [multi-login user](https://docs.automationanywhere.com/bundle/enterprise-v2019/page/enterprise-cloud/topics/control-room/administration/users/cloud-multi-login-user.html). - -### _Authenticate (username and API key)_ authentication - -Select the **Automation Anywhere Connector** and fill out the following properties under the **Authentication** section: - -1. Select **Authenticate (username and API key)** in the **Authentication** section. -2. Set **Password** to `Password` to the secret you created (i.e. `{{secrets.AUTOMATION_ANYWHERE_PASSWORD}}`). -3. Set **API key** as `API key` to the secret you created (i.e. `{{secrets.AUTOMATION_ANYWHERE_API_KEY}}`). The API-Key is a 40-character string generated in the Control Room. See [create and assign API key generation role documentation](https://docs.automationanywhere.com/bundle/enterprise-v2019/page/enterprise-cloud/topics/control-room/administration/roles/cloud-control-room-apikey-role.html) to learn more. - -### _Authentication (refresh) token_ authentication - -Select the **Automation Anywhere Connector** and fill out the following properties under the **Authentication** section: - -1. Select **Authentication (refresh) token** in the **Authentication** section. -2. Set **Token** to `Token` to the secret you created (i.e. `{{secrets.AUTOMATION_ANYWHERE_TOKEN}}`). It can be an authentication or refresh token. See [authentication API documentation](https://docs.automationanywhere.com/bundle/enterprise-v11.3/page/enterprise/topics/control-room/control-room-api/api-authentication.html) to learn how to generate an authentication token or see [refresh token API documentation](https://docs.automationanywhere.com/bundle/enterprise-v11.3/page/enterprise/topics/control-room/control-room-api/refresh-authentication-token.html) to learn how to generate a refresh token. - -## Configuration - -### Control Room URL - -1. Set **Control Room URL** to `Control Room URL`. The Control Room URL is the URL you use to access the Automation Anywhere Control Room. The Control Room URL is typically provided by the Automation Anywhere administrator and is specific to the organization's instance of the platform (i.e. `https://domainname.myautomationanywhere.digital`). - -## Operation types - -The **Automation Anywhere Connector** currently supports two operation types in the **Operation type** dropdown list: _Add work item to the queue_ and _Get work item result from queue by ID_. - -### Add work item to the queue - -This operation provides the ability to add a work queue item in the specified queue. -It corresponds directly to the respective Automation Anywhere API - [`Add Work Items to the queue API`](https://docs.automationanywhere.com/bundle/enterprise-v11.3/page/enterprise/topics/control-room/control-room-api/add-work-item-data-to-queue-api.html). - -#### Usage - -1. Select **Add work item to the queue** from the **Operation type** dropdown in the **Operation** section. -2. Populate **Authentication section** as described in the [respective section](#authentication). -3. In the **Configuration** section, set the **Control Room URL** field as described in the [respective section](#control-room-url). -4. In the **Input** section, set **Work queue ID**. This is the identifier of a queue, where an item will be fetched from. -5. In the **Input** section, set **Work Item json Data** that you want to pass together with the item. The **Data** has to comply with the Automation Anywhere API, and should contain the following semantics: - -```json -{ - "coll_name": "your value", - "last_name": "Doe", - "email": "jane.doe@example.com" -} -``` - -#### Add work item to the queue response - -The operation **Add work item to the queue** returns information about the newly created item in the queue. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. It comes with a pre-filled value of `={itemId:response.body.list[1].id}`. To use operation _Get work item result from queue by ID_, you need an `itemId`. This expression will add it to the context for you. Learn more in [get work item result from queue by ID](#get-work-item-result-from-queue-by-id). - -Response example: - -```json -{ - "list": [ - { - "id": "40957", - "createdBy": "25", - "createdOn": "2021-11-24T01:53:10.175335900Z", - "updatedBy": "25", - "updatedOn": "2021-11-24T01:53:10.175335900Z", - "version": "0", - "json": { - "TRN_ID": "A11", - "DATA": "mydata" - }, - "result": "", - "deviceId": "0", - "status": "NEW", - "col1": "A11", - "col2": "", - "deviceUserId": "0", - "queueId": "0", - "comment": "", - "automationId": "0", - "totalPausedTime": "0", - "error": "", - "col6": "", - "jobExecutionId": "" - } - ] -} -``` - -### Get work item result from queue by ID - -This operation provides the ability to return the details of the specified work item from the work queue. -It corresponds directly to the respective Automation Anywhere API - [`List Work Items in queue with filter by work item ID`](https://docs.automationanywhere.com/bundle/enterprise-v2019/page/enterprise-cloud/topics/control-room/control-room-api/cloud-api-list-wlm-workitems.html). - -#### Usage - -1. Select **Get work item result from queue by ID** from the **Operation type** dropdown in the **Operation** section. -2. Populate **Authentication section** as described in the [respective section](#authentication). -3. In the **Configuration** section, set the **Control Room URL** field as described in the [respective section](#control-room-url). -4. In the **Input** section, set **Work queue ID**. This is the identifier of a queue, where an item will be fetched from. -5. In the **Input** section, set **Work item ID**. This is the identifier of the item to be fetched. - -#### Get work item result from queue by ID response - -Given you have a queue work item ID previously added to a queue, the operation **Get work item result from queue by ID** returns information about a certain work item. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. It comes with a pre-filled value of `={itemState:response.body.list[1].status}`. You will see the `itemState` in the process variables. Its value will let you know if the item was processed or not. - -Response example: - -```json -{ - "page": { - "offset": 0, - "total": 5, - "totalFilter": 1 - }, - "list": [ - { - "id": "11804", - "createdBy": "24", - "createdOn": "2020-05-26T10:19:34.786711300Z", - "updatedBy": "24", - "updatedOn": "2020-05-26T10:19:34.786711300Z", - "version": "1", - "json": {}, - "result": "", - "deviceId": "0", - "status": "NEW", - "col1": "Brian", - "col2": "Matthews", - "col3": "bmatthews0@example.com", - "deviceUserId": "0", - "queueId": "20", - "comment": "", - "automationId": "0", - "totalPausedTime": "0", - "error": "", - "col6": "", - "col10": "" - } - ] -} -``` - -## Using Automation Anywhere Connector best practice - -There is no guarantee a queue item will be processed right away. In that case, we suggest building your BPMN diagram to periodically retry polling. -To learn more, see an entry _Solution with Timer and Loop_ on the [Camunda BPMN examples](https://camunda.com/bpmn/examples/) page. - -:::note -To avoid performance issues, it is recommended to limit the number of loop retries. -::: diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/available-connectors-overview.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/available-connectors-overview.md deleted file mode 100644 index 94c90b72b3f..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/available-connectors-overview.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -id: available-connectors-overview -title: Overview -description: Take a closer look at the Connectors available in Camunda 8. ---- - -Out-of-the-box (OOTB) Connectors accelerate solution implementation by providing pre-built, ready-to-use connectors to popular external systems. - -Each Connector has a dedicated page with relevant configuration. - -All Connectors are available for Camunda 8 SaaS and [Self-Managed](../../../self-managed/connectors-deployment/install-and-start.md). - -Beginners to Connectors may want to get familiar with Connectors using a [guide](/guides/configuring-out-of-the-box-connector.md). - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -## Outbound Connectors - -- [Amazon DynamoDB Connector](/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md) - Interact with [Amazon DynamoDB NoSQL database service](https://aws.amazon.com/dynamodb/) within your BPMN process, enabling you to store and retrieve data from tables, as well as perform queries and scans. -- [Amazon EventBridge Service Connector](/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md) - Send events using [Amazon EventBridge service](https://aws.amazon.com/eventbridge/) within your BPMN process. -- [Amazon SNS Outbound Connector](/components/connectors/out-of-the-box-connectors/amazon-sns.md) - Send messages to [Amazon Simple Notification Service](https://aws.amazon.com/sns/) from your BPMN process. -- [Amazon SQS Connector](/components/connectors/out-of-the-box-connectors/amazon-sqs.md) - Send messages to [Amazon Simple Queue Service](https://aws.amazon.com/sqs/) from your BPMN process. -- [Asana Connector](/components/connectors/out-of-the-box-connectors/asana.md) - Manage [Asana](https://asana.com/) projects and tasks from your BPMN process. -- [Automation Anywhere Connector](/components/connectors/out-of-the-box-connectors/automation-anywhere.md) - Orchestrate your [Automation Anywhere](https://www.automationanywhere.com/) queue from your BPMN process. -- [AWS Lambda Connector](/components/connectors/out-of-the-box-connectors/aws-lambda.md) - Invoke [AWS Lambda Functions](https://aws.amazon.com/lambda/) from your BPMN process. -- [Azure OpenAI](/components/connectors/out-of-the-box-connectors/azure-open-ai.md) - Interact with [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) from your BPMN process. -- [Blue Prism](/components/connectors/out-of-the-box-connectors/blueprism.md) - Orchestrate your [Blue Prism](https://www.blueprism.com/) queue items from your BPMN process. -- [Camunda Operate Connector](/components/connectors/out-of-the-box-connectors/operate.md) - Fetch process execution data from [Camunda Operate](https://camunda.com/platform/operate/). -- [Easy Post Connector](/components/connectors/out-of-the-box-connectors/aws-lambda.md) - Create addresses, parcels, and shipments, as well as purchase and verify shipments with [EasyPost](https://www.easypost.com/) from your BPMN process. -- [GitHub Connector](/components/connectors/out-of-the-box-connectors/github.md) - Manage [GitHub](https://github.com/) issues and releases from your BPMN process. -- [GitLab Connector](/components/connectors/out-of-the-box-connectors/gitlab.md) - Manage [GitLab](https://about.gitlab.com/) issues and releases from your BPMN process. -- [Google Drive Connector](/components/connectors/out-of-the-box-connectors/googledrive.md) - Create folders or files from a [Google Drive](https://www.google.com/drive/) template from your BPMN process. -- [Google Maps Platform Connector](/components/connectors/out-of-the-box-connectors/google-maps-platform.md) - Validate addresses, retrieve postal addresses, and calculate distances with [Google Maps Platform Service](https://mapsplatform.google.com/) from your BPMN process -- [Google Sheets Connector](/components/connectors/out-of-the-box-connectors/google-sheets.md) - Allows you to work with an existing or new empty spreadsheet on [Google Drive](https://drive.google.com/) from your BPMN process. -- [Hugging Face Connector](/components/connectors/out-of-the-box-connectors/hugging-face.md) - Interact with [Hugging Face](https://huggingface.co/) models from your BPMN process. -- [Kafka Producer Connector](/components/connectors/out-of-the-box-connectors/kafka.md) - Produce messages to [Kafka](https://kafka.apache.org/) from your BPMN process. -- [Microsoft Teams Connector](/components/connectors/out-of-the-box-connectors/microsoft-teams.md) - Interactions with [Microsoft Teams](https://www.microsoft.com/microsoft-teams/) from your BPMN process. -- [Microsoft 365 Connector](/components/connectors/out-of-the-box-connectors/microsoft-o365-mail.md) - Interactions with [Microsoft 365](https://outlook.office.com/mail/) mail from your BPMN process. -- [OpenAI Connector](/components/connectors/out-of-the-box-connectors/openai.md) - Interact with [ChatGPT](https://chat.openai.com/) and [OpenAI Moderation API](https://platform.openai.com/docs/guides/moderation/overview). -- [RabbitMQ Connector](/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md) - Send messages to [RabbitMQ](https://www.rabbitmq.com/) from your BPMN process. -- [Salesforce Connector](/components/connectors/out-of-the-box-connectors/salesforce.md) - Manage your Salesforce instance from your BPMN process. -- [SendGrid Connector](/components/connectors/out-of-the-box-connectors/sendgrid.md) - Quickly send emails from your BPMN processes. -- [Slack outbound Connector](/components/connectors/out-of-the-box-connectors/slack.md) - Send messages to channels or users in your [Slack](https://slack.com) workspace from your BPMN process. -- [Twilio Connector](/components/connectors/out-of-the-box-connectors/twilio.md) - Send and get SMS messages with [Twilio](https://www.twilio.com) service from your BPMN process. -- [UiPath Connector](/components/connectors/out-of-the-box-connectors/uipath.md) - Orchestrate your [UiPath](https://cloud.uipath.com) Bots with Camunda. -- [WhatsApp Connector](/components/connectors/out-of-the-box-connectors/whatsapp.md) - Send messages with [WhatsApp Business](https://business.whatsapp.com/) from your BPMN process. - - - - - -## Inbound Connectors - -- [Amazon EventBridge Webhook Connector](/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md) - Start a BPMN process instance triggered by an [Amazon EventBridge service event](https://aws.amazon.com/eventbridge/). -- [Amazon SNS inbound Connector](/components/connectors/out-of-the-box-connectors/amazon-sns.md) - Trigger your BPMN process with an [Amazon Simple Notification Service](https://aws.amazon.com/sns/) notification via HTTPS. -- [Amazon SQS Connector](/components/connectors/out-of-the-box-connectors/amazon-sqs.md) - Receive messages from [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) in your BPMN process. -- [GitHub Webhook Connector](/components/connectors/out-of-the-box-connectors/github.md) - Start a process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -- [Kafka Consumer Connector](/components/connectors/out-of-the-box-connectors/kafka.md) - Consume messages from [Kafka](https://kafka.apache.org/) from your BPMN process. -- [RabbitMQ Consumer Connector](/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md) - Receive messages from [RabbitMQ](https://www.rabbitmq.com/) in your BPMN process. -- [Slack inbound Connector](/components/connectors/out-of-the-box-connectors/slack.md) - Trigger a [Slack](https://slack.com) bot to start a BPMN process with an event or a slash command -- [Twilio Webhook Connector](/components/connectors/out-of-the-box-connectors/twilio.md) - Start a process instance triggered by a [Twilio webhook](https://www.twilio.com/docs/usage/webhooks). Can be used as an intermediate Connector in existing processes. - - - - - -## Protocol Connectors - -- [GraphQL Connector](/components/connectors/protocol/graphql.md) - Execute a [GraphQL](https://graphql.org/) query or mutation from your BPMN process. -- [HTTP Webhook Connector](/components/connectors/protocol/http-webhook.md) - Start a process instance with your custom webhook configuration. -- [Polling Connector](/components/connectors/protocol/polling.md) - The HTTP Polling Connector polls an endpoint at regular intervals, enabling periodic data fetching as an intermediate step in your BPMN processes. -- [REST Connector](/components/connectors/protocol/rest.md) - Make a request to a REST API and use the response in the next steps of your process. - - - - - -In addition to this section on Connectors, we recommend reviewing [Connector secrets](/components/console/manage-clusters/manage-secrets.md). - -If you want to build **custom Connectors**, head over to our [Connector SDK guide](/components/connectors/custom-built-connectors/connector-sdk.md). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/aws-lambda.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/aws-lambda.md deleted file mode 100644 index de1c9997e3e..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/aws-lambda.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: aws-lambda -title: AWS Lambda Connector -description: Invoke AWS Lambda functions with an outbound Connector. ---- - -The **AWS Lambda Connector** is an outbound Connector that allows you to connect your BPMN service with Amazon Web Service's [AWS Lambda Service](https://aws.amazon.com/lambda/) to invoke [AWS Lambda functions](https://aws.amazon.com/lambda/). - -## Prerequisites - -To use an **AWS Lambda Connector**, you need to have an [AWS Lambda Function](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html), IAM key, and secret pair with permissions for execute function. Refer to the [AWS Lambda developer guide](https://docs.aws.amazon.com/lambda/latest/dg/lambda-permissions.html) to learn more. - -:::note -Use Camunda secrets to avoid exposing your AWS IAM credentials as plain text. Refer to [manage secrets](components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create an AWS Lambda Connector task - -To use an **AWS Lambda Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Invoking your AWS Lambda function - -To make the **AWS Lambda Connector** executable, fill out the mandatory fields highlighted in red in the properties panel: - -1. Choose an applicable authentication type from the **Authentication** dropdown. Learn more about authentication types in the related [appendix entry](#aws-authentication-types). -2. Set the relevant IAM key and secret pair in the **Authentication** section. For example, `{{secrets.MY_AWS_ACCESS_KEY}}`. The value can be plain text, but this is not recommended due to security concerns. -3. Set the relevant AWS region in the **Authentication** section. Refer to the [Regions and Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) to learn more. -4. In the **Select Operation** section, the default option is set to synchronous invocation; an asynchronous invocation option is currently not available. Refer to [event-driven invocation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-services.html#event-driven-invocation) to learn more. -5. In the **Operation Details** section, fill out the field **Function name**. This field can be a [function URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html?icmpid=docs_lambda_help), [function ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html), function name, or alias. -6. (Optional) The **Payload** field in the **Operation Details** section is optional. This field requires FEEL input. Payload must be in JSON format as this is the data that will be processed by your Lambda function. - -## AWS Lambda Connector response - -The **AWS Lambda Connector** returns the HTTP status code, executed version, and payload (the response from the function, or an error object). -The following fields are available in the response variable: - -- `statusCode` - HTTP status code. -- `executedVersion` - Executed version of the Lambda function. -- `payload` - The response from the function, or an error object. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - "myNewReportStatusCode": response.statusCode, - "myNewReportExecutedVersion": response.executedVersion, - "myNewReportPayload": response.payload -} -``` - -## Appendix - -### AWS authentication types - -There are two options to authenticate the Connector with AWS: - -- Choose **Credentials** in the **Authentication** dropdown if you have a valid pair of access and secret keys provided by your AWS account administrator. This option is applicable for both SaaS and Self-Managed users. -- Choose **Default Credentials Chain (Hybrid/Self-Managed only)** in the **Authentication** dropdown if your system is configured as an implicit authentication mechanism, such as role-based authentication, credentials supplied via environment variables, or files on target host. This option is applicable only for Self-Managed or hybrid distribution. This approach uses the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html) to resolve required credentials. diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/azure-open-ai.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/azure-open-ai.md deleted file mode 100644 index fd84f196716..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/azure-open-ai.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: azure-open-ai -title: Azure OpenAI Connector -sidebar_label: Azure OpenAI Connector -description: Interact with Azure OpenAI from your BPMN process. ---- - -The **Azure OpenAI Connector** is an outbound Connector that allows you to interact with -[Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) models from your BPMN processes. - -The **Azure OpenAI Connector** currently supports only prompt operations: -[`completions`](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#completions), -[`chat completions`](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions), and -[`completions extensions`](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#completions-extensions). - -Refer the [official models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) -to find out if a desired model supports the operations mentioned. - -## Prerequisites - -To begin using the **Azure OpenAI Connector**, ensure you have created and deployed an Azure OpenAI resource. -A valid Azure OpenAI API key is also required. - -Learn more at the [official Azure OpenAI portal entry](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource). - -## Create an Azure OpenAI Connector task - -To use the **Azure OpenAI Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Azure OpenAI Connector executable - -To work with the **Azure OpenAI Connector**, fill all mandatory fields. - -## Authentication - -Fill the **API key** field with a valid Azure OpenAI API key. -[Learn more](https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line%2Cpython-new&pivots=rest-api#retrieve-key-and-endpoint) about obtaining a key. - -### Create a new Connector secret - -Keep your **API key** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (for example, `AZURE_OAI_SECRET`) so you can reference it later in the Connector. - -## Operation - -Select the desired operation from the **Operation** dropdown. -Fill in the **Resource name**, the **Deployment ID**, and the **API version** related to your operation. Ensure the -deployed [model](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) supports the selected operation. - -### Completion, chat completion, and completion extension - -- For **completion** details, refer to the related [Microsoft reference documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#completions). -- For **chat completion** details, refer to the related [Microsoft reference documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions). -- For **completion extension** details, refer to the related [Microsoft reference documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#completions-extensions). - -## Handle Connector response - -The **Azure OpenAI Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**. Therefore, -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). - -## Usage example - -### Chat completions - -Assume you have deployed a `gpt-35-turbo` model with the following URL: -`https://myresource.openai.azure.com/openai/deployments/mydeployment/completions?api-version=2024-02-01`, and created a -Connector secret with the name `AZURE_OAI_SECRET`. - -Consider the following input: - -- **API key**: `{{secrets.AZURE_OAI_SECRET}}` -- **Operation**: `Chat completion` -- **Resource name**: `myresource` -- **Deployment ID**: `mydeployment` -- **API version**: `2024-02-01` -- **Message role**: `User` -- **Message content**: `What is the age of the Universe?` -- **Message context**: `=[{"role": "system", "content": "You are helpful assistant."}]` -- Leave the rest of the params blank or default -- **Result variable**: `myOpenAIResponse` - -In the `myOpenAIResponse` you will find the following result: - -```json -{ - "status": 200, - "headers": { - ... - }, - "body": { - "choices": [ - { - "content_filter_results": { - "hate": { - "filtered": false, - "severity": "safe" - }, - "self_harm": { - "filtered": false, - "severity": "safe" - }, - "sexual": { - "filtered": false, - "severity": "safe" - }, - "violence": { - "filtered": false, - "severity": "safe" - } - }, - "finish_reason": "stop", - "index": 0, - "message": { - "content": "The age of the universe is estimated to be around 13.8 billion years. This age is determined through various scientific methods, such as measuring the cosmic microwave background radiation and studying the expansion rate of the universe.", - "role": "assistant" - } - } - ], - "created": "...", - "id": "...", - "model": "gpt-35-turbo", - "object": "chat.completion", - "prompt_filter_results": [ - { - "prompt_index": 0, - "content_filter_results": { - "hate": { - "filtered": false, - "severity": "safe" - }, - "self_harm": { - "filtered": false, - "severity": "safe" - }, - "sexual": { - "filtered": false, - "severity": "safe" - }, - "violence": { - "filtered": false, - "severity": "safe" - } - } - } - ], - "usage": { - "completion_tokens": 43, - "prompt_tokens": 24, - "total_tokens": 67 - } - } -} -``` diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/blueprism.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/blueprism.md deleted file mode 100644 index c9cd5962a08..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/blueprism.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -id: blueprism -title: Blue Prism Connector -description: Orchestrate your Blue Prism queue items from your BPMN process. ---- - -The **Blue Prism Connector** allows you to orchestrate a Blue Prism queue from your BPMN process with [Blue Prism RPA](https://www.blueprism.com/). - -To start using the Connector, you must have a running Blue Prism instance configured API service. Refer to the [official documentation page](https://bpdocs.blueprism.com/bp-7-1/en-us/Guides/bp-api/api-introduction.htm) to learn more about how to install and configure Blue Prism API service. - -## Create a Blue Prism Connector task - -To use a **Blue Prism Connector** in your process, either change the type of existing task using the wrench-shaped **Change type** context menu, or create a new Connector task using the **Append Connector** context menu. Follow [our guide on using Connectors](../use-connectors/index.md) to learn more. - -## Authentication - -You can choose among the available **Blue Prism Connector** authentication types according to your authentication requirements. - -### Bearer token authentication - -#### Create a new Connector secret - -We advise you to keep your **Bearer Token** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](../../console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `BLUE_PRISM_BEARER_TOKEN`) so you can reference it later in the Connector. - -#### Configure the bearer token - -Select the **Blue Prism Connector** and fill out the following properties under the **Authentication** section: - -1. Click **Bearer Token** in the **Authentication** section. -2. Set **Bearer** to the secret you created (i.e. `{{secrets.BLUE_PRISM_BEARER_TOKEN}}`). - -### OAuth Client Credentials Flow - -#### Create a new Connector secret - -We advise you to keep your **Client ID** and **Client secret** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](../../console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `BLUE_PRISM_CLIENT_ID`) so you can reference it later in the Connector. - -#### Configure the OAuth Token - -Select the **Blue Prism Connector** and fill out the following properties under the **Authentication** section: - -1. Select **OAuth 2.0 client credentials** in the **Authentication** section. -2. Set **Identity token provider URL** to identity provider configured for your Blue Prism instance. -3. Set **Client ID** to the secret you created (i.e. `{{secrets.BLUE_PRISM_CLIENT_ID}}`). -4. Set **Client secret** to the secret you created (i.e. `{{secrets.BLUE_PRISM_CLIENT_SECRET}}`). - -Find more information about the OAuth client credentials flow in the [RFC reference](https://www.rfc-editor.org/rfc/rfc6749#section-4.4). - -## Operation types - -The **Blue Prism Connector** currently supports two operation types in the **Operation type** dropdown list: _Get item from a queue by ID_ and _Create work queue item_. - -### Get item from a queue by ID - -This operation allows you to return details of a specified item from a work queue. -It matches directly to respective Blue Prism API endpoint - [`Return details of a specified item from a work queue`](https://bpdocs.blueprism.com/bp-7-1/en-us/api-spec-7-1-2.html#tag/Work-Queues/paths/~1api~1v7~1workqueues~1%7BworkQueueId%7D~1items~1%7BworkQueueItemId%7D/get). - -#### Usage - -1. Select **Get item from a queue by ID** from the **Operation** dropdown. -2. Populate **Authentication section** as described in the [respective section](#authentication). -3. In the **Configuration** section, set **Blue Prism API base URL** field. E.g., `http://my.bp.host.com:9876`. -4. In the **Input** section, set **Work queue ID**. This is the identifier of a queue, where the item is fetched from. -5. In the **Input** section, set **Queue item ID**. This is the identifier of the item to be fetched. - -#### Get item from a queue by ID response - -Given you have a queue item ID previously added to a queue, the operation **Get item from a queue by ID response** returns information about a certain item. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. It comes with a pre-filled value of `={itemState:response.body.state}`. You will see the `itemState` in the process variables. Its value will let you know if the item was processed or not. - -Response example: - -```json -{ - "id": "01234567-89ab-cdef-0123-456789abcdef", - "priority": 3, - "ident": 123, - "state": "Completed", - "keyValue": "Example value", - "status": "Example status", - "tags": ["Example tag 1", "Example tag 2"], - "attemptNumber": 1, - "loadedDate": "2020-10-02T12:34:56+01:00", - "deferredDate": "2020-10-02T12:34:56+01:00", - "lockedDate": "0001-01-01T00:00:00Z", - "completedDate": "2020-10-02T13:00:00+01:00", - "exceptionedDate": "0001-01-01T00:00:00Z", - "exceptionReason": "Example reason", - "lastUpdated": "2020-10-02T13:00:00+01:00", - "workTimeInSeconds": 123, - "attemptWorkTimeInSeconds": 123, - "resource": "Example resource", - "data": { - "rows": [] - }, - "sla": 7200, - "sladatetime": "0001-01-01T00:00:00Z", - "processname": "Example process name", - "issuggested": false -} -``` - -### Create work queue item - -This operation allows you to create work queue items in the specified queue. -It matches directly to respective Blue Prism API endpoint - [`Create work queue items`](https://bpdocs.blueprism.com/bp-7-1/en-us/api-spec-7-1-2.html#tag/Work-Queues/paths/~1api~1v7~1workqueues~1%7BworkQueueId%7D~1items~1batch/post). - -#### Usage - -1. Select **Create work queue item** from the **Operation** dropdown. -2. Populate the **Authentication section** as described in the [respective section](#authentication). -3. In the **Configuration** section, set **Blue Prism API base URL** field. E.g., `http://my.bp.host.com:9876`. -4. In the **Input** section, set **Work queue ID**. This is the identifier of a queue, where item will be fetched from. -5. In the **Input** section, set **Item type** of the data entry you wish to submit to the queue. -6. In the **Input** section, set **Item value** of the data entry you wish to submit to the queue. -7. In the **Input** section, set **Defer date**. This field is the earliest time and date that this item is deferred until. -8. In the **Input** section, set **Priority**. This field is the priority value assigned to the item. -9. In the **Input** section, set **Status**. This is the user-supplied status value. _Note: Do not confuse this with queue item 'state' property._ - -#### Create work queue item response - -The operation **Create work queue item** returns information about the newly created item in the queue. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. It comes with a pre-filled value of `={itemId:response.body.ids[1]}`. To use operation _Get queue item result by ID_, you need an `itemId`. This expression will add it in the context for you. Learn more in [get queue item result by ID](#get-item-from-a-queue-by-id). - -Response example: - -```json -{ - "ids": ["497f6eca-6276-4993-bfeb-53cbbbba6f08"] -} -``` - -### Using Blue Prism Connector best practice - -There is no guarantee a queue item will be processed right away. In that case, we suggest building your BPMN diagram to periodically retry polling. -To learn more, see an entry _Solution with Timer and Loop_ at [Camunda BPMN examples](https://camunda.com/bpmn/examples/) page. - -:::note -To avoid performance issues, it is recommended to limit the number of loop retries. -::: diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/easy-post.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/easy-post.md deleted file mode 100644 index 9f9a44c7bee..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/easy-post.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -id: easy-post -title: EasyPost Connector -sidebar_label: EasyPost Connector -description: Allows you to create addresses, parcels, and shipments, as well as purchase and verify shipments. ---- - -The **EasyPost Connector** is an outbound Connector that allows you to create addresses, parcels, and shipments, as well as purchase and verify shipments with [EasyPost Service](https://www.easypost.com/) from your BPMN process. - -## Prerequisites - -To use the **EasyPost Connector**, sign up for an EasyPost account, enter your carrier-specific credentials on the [Carrier Account Dashboard](https://www.easypost.com/account/carriers), and get your [API key](https://www.easypost.com/account/api-keys). - -:::note -Use Camunda secrets to avoid exposing your EasyPost API key as plain text. See our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create an EasyPost Connector task - -To use the **EasyPost Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Authentication - -In the **Authentication** section, provide an **API key**, which you can find in the [API Key dashboard](https://www.easypost.com/account/api-keys). - -## Select operation to execute - -The **EasyPost Connector** currently supports the following operations: - -### Create address - -Allows you to create an address, save it, and get an address ID to use in follow-up operations. -Set address information in the **Input** section. -See [address object documentation](https://www.easypost.com/docs/api#addresses) to learn more about address object and to see response examples. - -### Verify a created address - -Allows you to verify an address by **Address ID** and return verified address object. - -### Create a parcel - -Allows you to create a parcel, save it, and get a parcel ID to use in follow-up operations. -Set required properties in the **Input** section. -See the [parcel object documentation](https://www.easypost.com/docs/api#parcels) to learn more about the parcel object and to see response examples. - -### Create a shipment - -Allows you to create a shipment, save it, and get the shipment ID for use in follow-up operations. -Set required properties in the **Input** section: ID of destination address, ID of origin address, and ID of parcel. -See the [shipment object documentation](https://www.easypost.com/docs/api#shipments) to learn more about the shipment object and to see response examples. -In the **Output** section, the pre-filled **Result Expression** returns the ID of the shipment and the ID of [rate](https://www.easypost.com/docs/api#rates). - -FEEL expression: - -``` -{priorityRate: response.body.rates[item.service = "Priority"], shipmentId: priorityRate[1].shipment_id, rateId: priorityRate[1].id} -``` - -Response: - -``` -{ - "shipment_id": "shp...", - "rateId": "rate...." - } -``` - -### Buy a shipment - -Allows you to buy a shipment. Set required properties in the **Input** section: IDs of rate and shipment. -See the [shipment object documentation](https://www.easypost.com/docs/api#buy-a-shipment) to learn more about the shipment object and to see response examples. -In the **Output** section the pre-filled **Result Expression** returns the ID of a tracker, tracking code, and status of the shipment. - -FEEL expression: - -``` -{trackerId: response.body.tracker.id, trackingCode: response.body.tracking_code, shipmentstatus:response.body.status} -``` - -Response: - -``` -{ - "shipmentstatus": "shp...", - "trackerId": "trk....", - "trackingCode: :"track...." - } -``` - -### Retrieve a tracker by ID - -Allows you to retrieve a tracker by ID and get information about the status of the tracker. -Set required properties in the **Input** section: Tracker ID fetched after buying shipment). -See the [tracker object documentation](https://www.easypost.com/docs/api/java#trackers) to learn more about the tracker object and to see response examples. -In the **Output** section, the pre-filled **Result Expression** returns the ID of the tracker, tracking code, and status of the shipment. - -FEEL expression: - -``` -{trackerStatus: response.body.status} -``` - -## Handle Connector response - -The **EasyPost Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**, therefore -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/github.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/github.md deleted file mode 100644 index b11c2ccc64b..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/github.md +++ /dev/null @@ -1,305 +0,0 @@ ---- -id: github -title: GitHub Connector -sidebar_label: GitHub Connector -description: Manage GitHub issues and releases from your BPMN process. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **GitHub Connector** is an outbound Connector that allows you to connect your BPMN service with [GitHub](https://github.com/) to manage [GitHub](https://github.com/) issues and releases. - -## Prerequisites - -To use the **GitHub Connector**, you must have a GitHub instance and an [access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) of a user or a service account on whose behalf a BPMN process will be executed. - -:::note -Use Camunda secrets to avoid exposing your GitHub access token credentials as plain text. -See our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create a GitHub Connector task - -To use the **GitHub Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Authentication - -In the **Authentication** section, provide a **GitHub access token**. - -## Select operation to execute - -The **GitHub Connector** currently supports the following operations. - -### Issues - -#### Create an issue - -- **GitHub API:** [Create an issue](https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#create-an-issue). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Title:** The title of the issue. -- **Body:** The contents of the issue. -- **Assignees:** Logins for users to assign to this issue. Only users with push access can set assignees for new issues. Assignees are silently dropped otherwise. -- **Labels:** Labels to associate with this issue. Only users with push access can set labels for new issues. Labels are silently dropped otherwise. -- **Milestone:** The number of the milestone to associate this issue with or use null to remove the current milestone. Only users with push access can set the milestone for issues. Without push access to the repository, milestone changes are silently dropped. - -#### Get an issue - -- **GitHub API:** [Get an issue](https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#get-an-issue). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Issue number:** The number that identifies the issue. - -#### Update an issue - -- **GitHub API:** [Update an issue](https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#update-an-issue). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Issue number:** The number that identifies the issue. -- **Title:** The title of the issue. -- **Body:** The contents of the issue. -- **Assignees:** Logins for users to assign to this issue. Only users with push access can set assignees for new issues. Assignees are silently dropped otherwise. -- **Labels:** Labels to associate with this issue. Only users with push access can set labels for new issues. Labels are silently dropped otherwise. -- **Milestone:** The number of the milestone to associate this issue with or use null to remove the current milestone. Only users with push access can set the milestone for issues. Without push access to the repository, milestone changes are silently dropped. -- **Issue state:** The open or closed state of the issue. Can be open or closed. -- **State reason:** The reason for the state change. Ignored unless state is changed. Can be one of: completed, not_planned, reopened, null. - -#### Create an issue comment - -- **GitHub API:** [Create an issue comment](https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#create-an-issue-comment). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Issue number:** The number that identifies the issue. -- **Body:** The contents of the comment. - -#### Search issue - -- **GitHub API:** [Search issue](https://docs.github.com/en/rest/search?apiVersion=2022-11-28#search-issues-and-pull-requests). -- **Query:** The query contains one or more search keywords and qualifiers. Qualifiers allow you to limit your search to specific areas of GitHub. The REST API supports the same qualifiers as the web interface for GitHub. To learn more about the format of the query, see [constructing a search query](https://docs.github.com/en/rest/search?apiVersion=2022-11-28#constructing-a-search-query). See [searching issues and pull requests](https://docs.github.com/en/search-github/searching-on-github/searching-issues-and-pull-requests) for a detailed list of qualifiers. - -#### List commits - -- **GitHub API:** [List commits](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28#list-commits). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -### Branches - -#### List branches - -- **GitHub API:** [List branches](https://docs.github.com/en/rest/branches/branches?apiVersion=2022-11-28#list-branches). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -#### Get a branch - -- **GitHub API:** [Get a branch](https://docs.github.com/en/rest/branches/branches?apiVersion=2022-11-28#get-a-branch). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Branch:** The name of the branch. Cannot contain wildcard characters. To use wildcard characters in branch names, use the GraphQL API. - -#### Merge a branch - -- **GitHub API:** [Merge a branch](https://docs.github.com/en/rest/branches/branches?apiVersion=2022-11-28#merge-a-branch). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Base:** The name of the base branch that the head will be merged into. -- **Head:** The head to merge. This can be a branch name or a commit SHA1. - -### Code scanning - -#### List code scanning alerts for an organization - -- **GitHub API:** [List code scanning alerts for an organization](https://docs.github.com/en/rest/code-scanning?apiVersion=2022-11-28#list-code-scanning-alerts-for-an-organization). -- **Organization name:** The organization name. The name is not case-sensitive. - -#### List code scanning alerts for a repository - -- **GitHub API:** [List code scanning alerts for a repository](https://docs.github.com/en/rest/code-scanning?apiVersion=2022-11-28#list-code-scanning-alerts-for-a-repository). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -### Organization - -#### Create an organization invitation - -- **GitHub API:** [Create an organization invitation](https://docs.github.com/en/rest/orgs/members?apiVersion=2022-11-28#create-an-organization-invitation). -- **Organization name:** The organization name. The name is not case-sensitive. - -### Release - -#### Create a release - -- **GitHub API:** [Create a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#create-a-release). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Body:** Text describing the contents of the tag. -- **Tag name:** The name of the tag. -- **Release name:** The name of the release. -- **Make latest:** Specifies whether this release should be set as the latest release for the repository. Drafts and pre-releases cannot be set as latest. Defaults to true for newly published releases. Legacy specifies that the latest release should be determined based on the release creation date and higher semantic version. Default: true. Can be one of: true, false, legacy. - -#### Update a release - -- **GitHub API:** [Update a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#update-a-release). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Release id:** The unique identifier of the release. -- **Body:** Text describing the contents of the tag. -- **Tag name:** The name of the tag. -- **Release name:** The name of the release -- **Make latest:** Specifies whether this release should be set as the latest release for the repository. Drafts and pre-releases cannot be set as latest. Defaults to true for newly published releases. Legacy specifies that the latest release should be determined based on the release creation date and higher semantic version. Default: true. Can be one of: true, false, legacy. - -#### Delete a release - -- **GitHub API:** [Delete a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#delete-a-release). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Release id:** The unique identifier of the release. - -#### List releases - -- **GitHub API:** [List releases](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#list-releases). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -### Repository - -#### List organization repositories - -- **GitHub API:** [List organization repositories](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#list-organization-repositories). -- **Organization name:** The organization name. The name is not case-sensitive. - -#### Create an organization repository - -- **GitHub API:** [Create an organization repository](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#create-an-organization-repository). -- **Organization name:** The organization name. The name is not case-sensitive. -- **Repository name:** The organization name. The name is not case-sensitive. -- **Description:** A short description of the repository. -- **Home page:** A URL with more information about the repository. -- **Visibility:** The visibility of the repository. Can be one of: public, private. - -#### Get a repository - -- **GitHub API:** [Get a repository](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#get-a-repository). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -#### Update a repository - -- **GitHub API:** [Update a repository](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#update-a-repository). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Repository name:** The organization name. The name is not case-sensitive. -- **Description:** A short description of the repository. -- **Home page:** A URL with more information about the repository. -- **Visibility:** The visibility of the repository. Can be one of: public, private. - -#### Delete a repository - -- **GitHub API:** [Delete a repository](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#delete-a-repository). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -#### List repository contributors - -- **GitHub API:** [List repository contributors](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#list-repository-contributors). -- **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. - -## Handle Connector response - -The **GitHub Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**, therefore -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). - - - - - -The **GitHub Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). - -## Create a GitHub Webhook Connector task - -1. Start building your BPMN diagram. You can use GitHub Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. -2. Select the applicable element and change its template to a GitHub Webhook. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the webhook. -6. Navigate to the **Webhooks** tab in the properties panel to observe the webhook URL. - -## Make your GitHub Webhook Connector for receiving messages executable - -1. In the **Webhook Configuration** section, configure the **Webhook ID**. By default, **Webhook ID** is pre-filled with a random value. This value will be part of the Webhook URL. You will find more details about GitHub Webhook URLs [below](#activate-the-github-webhook-connector-by-deploying-your-diagram). -2. Set the **GitHub secret**. This is a shared secret key that has to be defined in both your BPMN and GitHub webhook configuration page. The value is used to calculate HMAC authentication signature. -3. Configure **Activation Condition**. For example, given GitHub triggers a webhook endpoint with a new PR payload `{"action": "opened", "pull_request": ...}`, the **Activation Condition** value might look like as `=(request.body.action = "opened")`. Leave this field empty to trigger your webhook every time. -4. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -5. Use **Result Expression** to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). - For example, given that the GitHub webhook is triggered with the body `{"pull_request": {"id": 123}}` and you would like to extract the pull request `id` as a process variable `pullRequestId`, the **Result Expression** might look like this: - -``` -= { - pullRequestId: request.body.pull_request.id -} -``` - -6. If you are using the GitHub Webhook Connector with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - - - **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. - - **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - - - For example, given that your correlation key is defined with `pullRequestId` process variable, and the request body contains `{"pull_request": {"id": 123}}`, your correlation key settings will look like this: - - **Correlation key (process)**: `=pullRequestId` - - **Correlation key (payload)**: `=request.body.pull_request.id` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -## Activate the GitHub Webhook Connector by deploying your diagram - -Once you click the **Deploy** button, your GitHub Webhook will be activated and publicly available. - -URLs of the exposed GitHub Webhooks adhere to the following pattern: - -`http(s):///inbound/>` - -- `` is the URL of Connectors component deployment. When using the Camunda 8 SaaS offering, this will typically contain your cluster region and cluster ID. -- `` is the ID (path) you configured in the properties of your GitHub Webhook Connector. - -If you make changes to your GitHub Webhook Connector configuration, you need to redeploy the BPMN diagram for the changes to take effect. - -When you click on the event with GitHub Webhook Connector applied to it, a new **Webhooks** tab will appear in the properties panel. -This tab displays the URL of the GitHub Webhook Connector for every cluster where you have deployed your BPMN diagram. - -:::note -The **Webhooks** tab is only supported in Web Modeler as part of the Camunda 8 SaaS offering. -You can still use GitHub Webhook Connector in Desktop Modeler, or with your Camunda 8 Self-Managed. -In that case, GitHub Webhook Connector deployments and URLs will not be displayed in Modeler. -::: - -## Configure GitHub - -1. Ensure you have administrator rights for the repository where you wish to enable a webhook. -2. Open a repository in your web browser and navigate to the **Settings** page. -3. Click **Webhooks > Add webhook**. -4. Fill out the required fields: - 1. **Payload URL** - URL of your webhook. - 2. **Content type** - Select `application/json`. - 3. **Secret** - Shared secret between GitHub and your BPMN diagram. -5. Confirm by clicking **Add webhook**. - -Refer to the [GitHub documentation](https://docs.github.com/en/rest/webhooks) for more details. - -## Next steps - -- Learn more about [GitHub webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/gitlab.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/gitlab.md deleted file mode 100644 index ec566be6952..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/gitlab.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: gitlab -title: GitLab Connector -sidebar_label: GitLab Connector -description: Manage GitLab issues and releases from your BPMN process. Learn about creating a GitLab Connector task and get started. ---- - -The **GitLab Connector** is an outbound Connector that allows you to connect your BPMN service with [GitLab](https://about.gitlab.com/) to manage GitLab issues and releases. - -## Prerequisites - -To use the **GitLab Connector**, you must have a GitLab instance and an [access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) of a user -or a service account on whose behalf a BPMN process will be executed. - -:::note -Use Camunda secrets to avoid exposing your GitLab access token credentials as plain text. -See our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create a GitLab Connector task - -To use the **GitLab Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Add endpoint and authentication - -In the **HTTP Endpoint** section, provide a **GitLab base URL**, i.e. `https://gitlab.mycorp.com`, and a **GitLab access token**. - -## Select operation to execute - -The **GitLab Connector** currently supports the following operations. - -### Issues - -#### Get an issue by ID - -- **GitLab API:** [Single project issue](https://docs.gitlab.com/ee/api/issues.html#single-project-issue). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. -- **Issue ID:** The internal ID of a project’s issue. - -#### Create an issue - -- **GitLab API:** [New issue](https://docs.gitlab.com/ee/api/issues.html#new-issue). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. -- **Title:** The title of an issue. -- **Description:** The description of an issue. - -#### Delete an issue - -- **GitLab API:** [Delete an issue](https://docs.gitlab.com/ee/api/issues.html#delete-an-issue). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. -- **Issue ID:** The internal ID of a project’s issue. - -#### Comment to an issue - -- **GitLab API:** [Create a new issue note](https://docs.gitlab.com/ee/api/notes.html#create-new-issue-note). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. -- **Issue ID:** The internal ID of a project’s issue. -- **Note text:** The content of a note. -- **Level of confidentiality:** Indicates if an issue has to be marked as **internal** or not. - -#### Search issues - -- **GitLab API:** [List issues](https://docs.gitlab.com/ee/api/issues.html#list-issues). -- **Scope:** Return issues for the given scope: **Created by me**, **Assigned to me**, or **all**. -- **State:** Return all issues or just those that are **opened** or **closed**. -- **Assignee ID:** Return issues assigned to the given user ID. Mutually exclusive with **Assignee username**. **None** returns unassigned issues. **Any** returns issues with an assignee. -- **Assignee username:** Return issues assigned to the given username. Similar to **Assignee ID** and mutually exclusive with **Assignee ID**. -- **Author ID:** Return issues created by the given user ID. -- **Contains text:** Search issues against their **Title** and **Description**. - -### Releases - -#### List all releases by a project ID - -- **GitLab API:** [List releases](https://docs.gitlab.com/ee/api/releases/#list-releases). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. - -#### Get release by a tag name - -- **GitLab API:** [Get a release by a tag name](https://docs.gitlab.com/ee/api/releases/#get-a-release-by-a-tag-name). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. -- **Tag name:** The Git tag the release is associated with. - -#### Create a release - -- **GitLab API:** [Create a release](https://docs.gitlab.com/ee/api/releases/#create-a-release). -- **Project ID:** The global ID or URL-encoded path of the project owned by the authenticated user. -- **Tag name:** The tag where the release is created from. -- **Ref:** A commit SHA, another tag name, or a branch name. -- **Release name:** The release name. -- **Description:** The description of the release. - -## Handle Connector response - -The **GitLab Connector** is a protocol connector, meaning it is built on top of the **HTTP REST Connector**, therefore -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/google-maps-platform.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/google-maps-platform.md deleted file mode 100644 index 6d7b3f2064a..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/google-maps-platform.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -id: google-maps-platform -title: Google Maps Platform Connector -sidebar_label: Google Maps Platform Connector -description: Learn how to validate addresses, retrieve postal addresses, and calculate distances with Google Maps Platform Connector. ---- - -The **Google Maps Platform Connector** in an inbound Connector that allows you to validate addresses, retrieve postal addresses, and calculate distances with [Google Maps Platform Service](https://mapsplatform.google.com/) in BPMN process. - -## Create a Google Maps Platform Connector task - -To use a **Google Maps Platform Connector** in your process, either change the type of existing task using the wrench-shaped **Change type** context menu or create a new Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Google Maps Platform Connector executable - -To work with the Google Maps Platform Connector, choose the required operation type in the **Operation** section and enable the required Google Service API (which depends on the operation). Set the API key in the **Authentication** section and complete the mandatory fields highlighted in red in the Connector properties panel. - -:::note -All the mandatory and non-mandatory fields and required settings depending on the operation selection you choose are covered in the upcoming sections. -::: - -## Authentication - -In the **Authentication** section, set the relevant API key. See the [official documentation](https://cloud.google.com/docs/authentication/api-keys#create) for more information on creating an API key. - -:::note -We advise you to keep your authentications and secrets data safe and avoid exposing it in the BPMN XML file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `GOOGLE_MAPS_PLATFORM_API_KEY`) so you can reference it later in the Connector. - -::: - -## Operation types - -### Validate address - -This operation allows you to validate an address and its components, standardize the address for mailing, and determine the best known geocode for it. -To use this operation, enable the [Google Address Validation API](https://developers.google.com/maps/documentation/address-validation/overview). See the [official documentation](https://developers.google.com/maps/documentation/address-validation/cloud-setup) for more information on enabling the Google API. - -### Get place ID - -This operation allows you to get the Google Maps place ID by address. -To use this operation, enable the [Google Places API](https://developers.google.com/maps/documentation/places/web-service). See the [official documentation](https://developers.google.com/maps/documentation/places/web-service/get-api-key) for more information on enabling the Google API. - -### Calculate distance - -This operation allows you to calculate a distance between two place IDs. -To use this operation, enable the [Google Directions API](https://developers.google.com/maps/documentation/directions). See the [official documentation](https://developers.google.com/maps/documentation/directions/get-api-key) for more information on enabling the Google API. - -## Usage - -### Address validation, formatting, getting postal address - -1. Select **Validate Address** from the **Operation type** dropdown in the **Operation** section. -2. Populate the **Authentication** section as described in the [respective section](#authentication). -3. (Optional) In the **Input** section, set **Region Code** (i.e `US`). You can find supported region codes [here](https://developers.google.com/maps/documentation/address-validation/coverage). -4. (Optional) In the **Input** section, set **Locality**, an incorporated city or town political entity (i.e `Mountain View`). -5. In the **Input** section, set **Address**, an incorporated city or town political entity (i.e `1600 Amphitheatre Pkwy`). -6. In the **Output** section set **Result Variable** or **Result Expression**. See the [response mapping documentation](/docs/components/connectors/use-connectors/index.md#response-mapping) to learn more. -7. Find a full example of the **Google Maps Platform Connector** response [here](https://developers.google.com/maps/documentation/address-validation/requests-validate-address#address_validation_response). To get postal address and formatted address, set to **Result Expression** in the FEEL expression: - -``` -{ - formattedAddress: response.body.result.address.formattedAddress, - postalAddress: response.body.result.address.postalAddress -} -``` - -### Get place ID - -1. Select **Get Place ID** from the **Operation type** dropdown in the **Operation** section. -2. Populate the **Authentication** section as described in the [respective section](#authentication). -3. In the **Input** section, set **Address**. This address can be `formatedAddress`, which you can get using [this example](#address-validation-formatting-getting-postal-address). -4. In the **Output** section in the **Result Expression** property, the following expression is preset: - -``` -{ - placeId: response.body.candidates[1].place_id -} -``` - -In this way, the response of this method will contain a mapping from the variable 'placeId' and the ID of the place: - -```json -{ - "placeId": "place....." -} -``` - -### Calculate distance - -1. Select **Calculate Distance** from the **Operation type** dropdown in the **Operation** section. -2. Populate the **Authentication** section as described in the [respective section](#authentication). -3. In the **Input** section, set **Destination**, the place ID value that you want to use as the destination for calculating distance. -4. In the **Input** section, set **Origin**, the place ID value that you want to use as the starting point for calculating distance. -5. Select the unit system to use when displaying results from the **Units** dropdown in the **Input** section. -6. Select the transportation mode to use when calculating distances and directions from the **Mode** dropdown in the **Input** section. -7. In the **Output** section, set **Result Variable** or **Result Expression**. See the [response mapping documentation](/docs/components/connectors/use-connectors/index.md#response-mapping) to learn more. -8. Find a full example of the **Google Maps Platform Connector** response [here](https://developers.google.com/maps/documentation/directions/start#getting-directions). To get a distance, set **Result Expression** in the FEEL expression: - -``` -{ - distance: response.body.routes[1].legs[1].distance.text -} -``` - -## Using Google Maps Platform Connector best practice - -There is no guarantee a queue item will be processed right away. In that case, we suggest building your BPMN diagram to periodically retry polling. -To learn more, see the entry titled _Solution with Timer and Loop_ on the [Camunda BPMN examples](https://camunda.com/bpmn/examples/) page. - -:::note -To avoid performance issues, it is recommended to limit the number of loop retries. -::: diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/google-sheets.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/google-sheets.md deleted file mode 100644 index 4e6a6076692..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/google-sheets.md +++ /dev/null @@ -1,355 +0,0 @@ ---- -id: google-sheets -title: Google Sheets Connector -sidebar_label: Google Sheets Connector -description: Use the Google Sheets Connector to connect your BPMN service with Google Sheets. ---- - -The **Google Sheets Connector** is an outbound Connector that allows you to work with an existing or new spreadsheet -on [Google Drive](https://drive.google.com/) from your BPMN process. - -## Prerequisites - -To start working with the **Google Sheets Connector**, a relevant OAuth token must be configured and stored as a secret -in your cluster. The token must have permission to read/write and create a file from a desired Google Drive instance. -Follow the steps from the [appendix](#appendix--faq) to find out more about creating an OAuth token and assigning relevant -permissions. - -## Create a Google Sheets Connector task - -Currently, the Google Sheets Connector supports next operations: - -- [Add values to spreadsheet](#add-values-to-spreadsheet) -- [Create empty column or row](#create-empty-column-or-row) -- [Create row](#create-row) -- [Create spreadsheet](#create-spreadsheet) -- [Create worksheet](#create-worksheet) -- [Delete column](#delete-column) -- [Delete worksheet](#delete-worksheet) -- [Get row by index](#get-row-by-index) -- [Get spreadsheet details](#get-spreadsheet-details) -- [Get worksheet data](#get-worksheet-data) - -To use a **Google Sheets Connector** in your process, either change the type of existing task by clicking on it and -using the wrench-shaped **Change type** context menu icon or create a new Connector task by using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Google Sheets Connector executable - -To make the **Google Sheets Connector** executable, fill out the mandatory fields highlighted in red in the properties -panel. - -### Add values to spreadsheet - -To add values to a spreadsheet, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Add values to spreadsheet**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, in which a new - value will be added. -4. _(optional)_ In the **Operation details** section, set the field **Worksheet name** to the desired worksheet, in - which a new value will be added. Keep in mind that if not specified, a new value will be added to the first available - worksheet in the desired spreadsheet. -5. In the **Operation details** section, set the field **Cell ID** to the desired cell, in which a new value a new value - will be added. Use format ColumnRow (for example A1). -6. In the **Operation details** section, set the field **Value** to the desired value, which will be added in the - desired cell. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Add values to Spreadsheet**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, there will be an error message. -- `response` - The response of the operation. In this case, it will always be **null**. - -### Create empty column or row - -To create empty column or row, take the following steps: - -1. Set the required credentials in the **Authentication** section. See - the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Create empty column or row**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, in which new - columns/rows will be added. -4. In the **Operation details** section, set the field **Worksheet ID** to the desired worksheet, in which new - columns/rows will be added. -5. In the **Operation details** section, select the **Dimension**, which will be added. -6. _(optional)_ In the **Operation details** section, set the both of the **Start index** and **End index** fields, in - which new columns/rows will be added. Keep in mind that **count starts from 0**. It's possible to leave these fields - empty. In this case, a new column/row will be added in the end of the desired worksheet. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Create empty column or row**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. In this case, it will always be **null**. - -### Create row - -To create a row, take the following steps: - -1. Set the required credentials in the **Authentication** section. See - the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Create row**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, in which a new row - will be added. -4. _(optional)_ In the **Operation details** section, set the field **Worksheet name** to the desired worksheet, in - which a new row will be added. Keep in mind that if not specified, a new row will be added to the first available - worksheet in the desired spreadsheet. -5. In the **Operation details** section, set the field **Row index** to the desired row index, where a new row will be - added. See the [relevant appendix entry](#what-is-a-row-index) to find out more. -6. In the **Operation details** section, set the field **Enter values** to the desired values, which will be added. This - property requires [FEEL input](../../../components/modeler/feel/language-guide/feel-expressions-introduction.md). - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Create row**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. In this case, it will always be **null**. - -### Create spreadsheet - -To create a spreadsheet, take the following steps: - -1. Set the required credentials in the **Authentication** section. See - the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Create spreadsheet**. -3. _(optional)_ In the **Operation details** section, set the field **Parent folder ID** to the desired parent, inside - which a new spreadsheet will be created. Keep in mind that if not specified, a new spreadsheet will be created in the - Google Drive root folder of a user who owns the OAuth token. -4. In the **Operation details** section, set the field **Spreadsheet name** to the desired spreadsheet name. - -#### Operation response - -The following fields are available in the response variable: - -- `spreadsheetId` - ID of the newly created spreadsheet. -- `spreadsheetUrl` - Human-readable URL of the newly created spreadsheet. - -### Create worksheet - -To create a worksheet, take the following steps: - -1. Set the required credentials in the **Authentication** section. See - the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Create worksheet**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, in which a new - worksheet will be created. -4. In the **Operation details** section, set the field **Worksheet name** to the desired worksheet name. -5. _(optional)_ In the **Operation details** section, set the field **Worksheet index** to the desired index, in which a - new worksheet will be created. See the [relevant appendix entry](#what-is-a-worksheet-index) to find out more. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Create worksheet**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. In this case, it will always be **null**. - -### Delete column - -To delete a column, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Delete column**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, in which a column - will be deleted. -4. In the **Operation details** section, set the field **Worksheet ID** to the desired worksheet ID, in which a column - will be deleted. -5. In the **Operation details** section, select **Index format** of desired index of column to be deleted. See - the [relevant appendix entry](#how-can-i-define-which-column-will-be-deleted) to find out more. -6. In the **Operation details** section, set the **Column letter index** to the desired column index. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Delete column**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. In this case, it will always be **null**. - -### Delete worksheet - -To delete a worksheet, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Delete worksheet**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, in which a worksheet will be deleted. -4. In the **Operation details** section, set the field **Worksheet ID** to the desired worksheet ID, which will be deleted. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Delete worksheet**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. In this case, it will always be **null**. - -### Get row by index - -To get row by index, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Get row by index**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, from which a row - will be retrieved. -4. _(optional)_ In the **Operation details** section, set the field **Worksheet name** to the desired worksheet, from - which a row will be retrieved. Keep in mind that if not specified, a row will be retrieved from the first available - worksheet in the desired spreadsheet. -5. In the **Operation details** section, set the field **Row index** to the desired row index. See - the [relevant appendix entry](#what-is-a-row-index) to find out more. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Get row by index**. -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. If row is empty, the response is **null**, else **array**. - -### Get spreadsheet details - -To get spreadsheet details, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Get spreadsheet details**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, which details will be returned. - -#### Operation response - -The response contains spreadsheet properties. For details, read the [official Google documentation](https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#spreadsheetproperties). - -### Get worksheet data - -To get worksheet data, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select operation** section, select **Get worksheet data**. -3. In the **Operation details** section, set the field **Spreadsheet ID** to the desired spreadsheet, from which data will be retrieved. -4. In the **Operation details** section, set the field **Worksheet name** to the desired worksheet, from which data will be retrieved. - -#### Operation response - -The following fields are available in the response variable: - -- `action` - The action performed. In this case, it will always be **Get worksheet data** -- `status` - The status of the operation. If successful, it will always be "OK". Otherwise, it will be an error message. -- `response` - The response of the operation. If the worksheet is empty, the response is **null**, else **array of rows (also array)**. - -## Appendix & FAQ - -### How can I authenticate my Connector? - -The **Google Sheets Connector** currently supports two methods for authentication and authorization: based on -short-lived JWT bearer token, and based on refresh token. - -Google supports multiple ways to obtain both. Refer to -the [official Google OAuth documentation](https://developers.google.com/identity/protocols/oauth2) to get up-to-date -instructions or see the examples below. - -You also enable _Google Sheets API_ and _Google Drive API_ for every client intended to use. You can do this from -the [Google Cloud API Library](https://console.cloud.google.com/apis/library). - -#### Example 1: Obtaining JWT bearer token with a service account - -:::warning -The following code snippet is for demonstration purposes only and must not be used for real production systems due to -security concerns. -For production usage, follow -the [official Google guidelines](https://developers.google.com/identity/protocols/oauth2/service-account). -::: - -Assuming you have created a service account and downloaded a JSON file with keys, run the following Python 3 snippet -that prints the JWT token in the terminal: - -```python -import google.auth -import google.auth.transport.requests -from google.oauth2 import service_account -# Scopes required to execute 'create' endpoind with Google Sheets API -SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.appdata'] -# File with keys -SERVICE_ACCOUNT_FILE = 'google-service-account-creds.json' -credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES) -auth_req = google.auth.transport.requests.Request() -credentials.refresh(auth_req) -# Print token -print(credentials.token) -``` - -#### Example 2: Obtaining bearer and refresh tokens with OAuth client - -:::warning -The following code snippet is for demonstration purposes only and must not be used for real production systems due to -security concerns. -For production usage, follow -the [official Google guidelines](https://developers.google.com/identity/protocols/oauth2/web-server). -::: - -Assuming you have created an OAuth client, you can download key files from the -Google [Console](https://console.cloud.google.com/apis/credentials). Run the following Python 3 snippet that prints the -refresh token in the terminal: - -```python -from google_auth_oauthlib.flow import InstalledAppFlow -import pprint - -SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets'] -OAUTH_KEYS = './oauth-keys.json' # path to your file with OAuth credentials - -def main(): - flow = InstalledAppFlow.from_client_secrets_file(OAUTH_KEYS, SCOPES) - creds = flow.run_local_server(port=54948) - pprint.pprint(vars(creds)) - -if __name__ == "__main__": - main() -``` - -### Where do I get a spreadsheet ID? - -The spreadsheet ID is located within the URL of the Google Sheets document. Here's how to find it: - -For example, if the URL looks like this: - -``` -https://docs.google.com/spreadsheets/d/1xhNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk/edit#gid=0 -``` - -The spreadsheet ID is the alphanumeric string between `d/` and `/edit`, which in this case is `1xhNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk`. - -### Where do I get a worksheet ID? - -The Worksheet ID (also known as the gid) can be found in the same URL. Here's how to find it: - -For example, if the URL looks like this: - -``` -https://docs.google.com/spreadsheets/d/1xhNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk/edit#gid=0 -``` - -The worksheet ID is the number after `#gid=`, which in this case is `0`. - -### What is a worksheet index? - -You can define the place where a new worksheet will be created. By default, the new worksheet will be created at the end -of the worksheet. Keep in mind, **count starts from 0**. For instance, to create a new worksheet on the second -place, worksheet index should be set as **1**. - -### What is a row index? - -Row index is the unique identifier for each row in some worksheet, which is used both for reading and writing operations -with row. This index is defined to the left of the row. - -### How can I define which column will be deleted? - -There are two ways to define which column will be deleted: by letter index and numeric one. Numeric index can be found -at the top of the column. - -The other option is to use numeric index. Keep in mind **count starts from 0**. To delete column A, -numeric index should be 0, B -> 1... diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/googledrive.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/googledrive.md deleted file mode 100644 index 624fe37046c..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/googledrive.md +++ /dev/null @@ -1,285 +0,0 @@ ---- -id: googledrive -title: Google Drive Connector -description: Create folders or files from a Google Drive template from your BPMN process. ---- - -The **Google Drive Connector** is an outbound Connector that allows you to create empty folders or files on [Google Drive](https://drive.google.com/) from templates from your BPMN process. - -## Prerequisites - -To start working with the **Google Drive Connector**, a relevant OAuth token must be configured and stored as a secret in your cluster. The token must have permission to read and create a folder and/or files from a desired Google Drive instance. Follow the steps from the [appendix](#appendix--faq) to find out more about creating an OAuth token and giving relevant permissions. - -## Create a Google Drive Connector task - -Currently, the Google Drive Connector supports two types of operations: create a folder and create a file from a template. - -To use a **Google Drive Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon or create a new Connector task by using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Google Drive Connector executable - -To make the **Google Drive Connector** executable, fill out the mandatory fields highlighted in red in the properties panel. - -### Create a new folder - -To create a new folder, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select Operation** section, set the field value **Operation Type** as **Create Folder**. -3. In the **Operation Details** section, set the field **Folder name** as the desired name of a folder you wish to create. For example, `MyNewFolder`. Alternatively, you could use a FEEL expression. -4. _(optional)_ In the **Operation Details** section, set the field **Parent folder ID** to the desired parent, inside which a new folder will be created. Keep in mind that if not specified, a new folder will be created in the Google Drive root folder of a user who owns the OAuth token. -5. _(optional)_ In the **Operation Details** section, you can set the **Additional properties or metadata** field to Google Drive compatible properties. For example, _description_ of the folder. This property requires FEEL input. Check [the appendix](#what-are-the-limitations-of-the-additional-properties-or-metadata) for known values and limitations. - -### Create a new file from a template - -To create a new file from a template, take the following steps: - -1. Set the required credentials in the **Authentication** section. See the [relevant appendix entry](#how-can-i-authenticate-my-connector) to find out more. -2. In the **Select Operation** section, set the field value **Operation Type** as **Create File from template**. -3. In the **Operation Details** section, set the field **File name** as the desired name of a folder you wish to create. You can use FEEL expressions here. -4. In the **Operation Details** section, set the field **Template ID** of the desired template. -5. _(optional)_ In the **Operation Details** section, set the field **Parent folder ID** to the desired parent, inside which a new file will be created. Keep in mind that if not specified, a new folder will be created in the Google Drive root folder of a user who owns the OAuth token. -6. In the **Operation Details** section, set the field **Template variables** as desired variables that will be applied to the template. The template variables are compatible with the Google Docs [Requests API](https://developers.google.com/docs/api/reference/rest/v1/documents/request). This property requires FEEL input. -7. _(optional)_ In the **Operation Details** section, you can set the **Additional properties or metadata** field to Google Drive compatible properties. This property requires FEEL input. Check [the appendix](#what-are-the-limitations-of-the-additional-properties-or-metadata) for known values and limitations. - -## Google Drive Connector response - -The **Google Drive Connector** exposes Google Drive API response as a local variable called response. -The following fields are available in the response variable: - -- `googleDriveResourceId` - ID of the newly created resource. -- `googleDriveResourceUrl` - Human-readable URL of the newly created resource. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - "myNewReportFolderId": response.googleDriveResourceId, - "myNewReportFolderUrl": response.googleDriveResourceUrl -} -``` - -## Appendix & FAQ - -### What Google API does the Google Drive Connector use to create a folder? - -The **Google Drive Connector** uses the Google Drive [`Files:Create`](https://developers.google.com/drive/api/v3/reference/files/create) API endpoint. - -### What Google API does the Google Drive Connector use to create a file from template? - -The **Google Drive Connector** uses the Google Drive [`Files:Copy`](https://developers.google.com/drive/api/v3/reference/files/copy) API endpoint to copy an original template. Afterwards, the **Google Drive Connector** utilizes Google Docs [Merge](https://developers.google.com/docs/api/how-tos/merge) approach via [`Documents:BatchUpdate`](https://developers.google.com/docs/api/reference/rest/v1/documents/batchUpdate) Google Docs API method. - -### How can I authenticate my Connector? - -The **Google Drive Connector** currently supports two methods for authentication and authorization: based on short-lived JWT bearer token, and based on refresh token. - -Google supports multiple ways to obtain both. Refer to the [official Google OAuth documentation](https://developers.google.com/identity/protocols/oauth2) to get up-to-date instructions or see the examples below. - -You also enable _Google Docs API_ and _Google Drive API_ for every client intended to use. You can do this at the [Google Cloud API Library](https://console.cloud.google.com/apis/library) page. - -#### Example 1: Obtaining JWT bearer token with a service account - -:::warning -The following code snippet is for demonstration purposes only and must not be used for real production systems due to security concerns. -For production usage, follow the [official Google guidelines](https://developers.google.com/identity/protocols/oauth2/service-account). -::: - -Assuming you have created a service account and downloaded a JSON file with keys, run the following Python 3 snippet that prints the JWT token in the terminal: - -```python -import google.auth -import google.auth.transport.requests -from google.oauth2 import service_account -# Scopes required to execute 'create' endpoind with Google Drive API -SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.appdata'] -# File with keys -SERVICE_ACCOUNT_FILE = 'google-service-account-creds.json' -credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES) -auth_req = google.auth.transport.requests.Request() -credentials.refresh(auth_req) -# Print token -print(credentials.token) -``` - -#### Example 2: Obtaining bearer and refresh tokens with OAuth client - -:::warning -The following code snippet is for demonstration purposes only and must not be used for real production systems due to security concerns. -For production usage, follow the [official Google guidelines](https://developers.google.com/identity/protocols/oauth2/web-server). -::: - -Assuming you have created an OAuth client, you can download key files from the Google [Console](https://console.cloud.google.com/apis/credentials). Run the following Python 3 snippet that prints the refresh token in the terminal: - -```python -from google_auth_oauthlib.flow import InstalledAppFlow -import pprint - -SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/documents'] -OAUTH_KEYS = './oauth-keys.json' # path to your file with OAuth credentials - -def main(): - flow = InstalledAppFlow.from_client_secrets_file(OAUTH_KEYS, SCOPES) - creds = flow.run_local_server(port=54948) - pprint.pprint(vars(creds)) - -if __name__ == "__main__": - main() -``` - -### Where do I get a parent folder ID? - -To find the Parent Folder ID for a Google Drive folder, follow these steps: - -1. Open the Google Drive folder in your web browser. -2. Look at the URL in the address bar, which should look something like this: - ``` - https://docs.google.com/drive/folder/1whNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk - ``` -3. The Parent Folder ID is the alphanumeric string after `/folders/`, which in this case is `1whNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk`. - -### How do I set additional properties or metadata? - -You can set any property from the Google Drive [Create API](https://developers.google.com/drive/api/v3/reference/files/create). - -For example: - -``` -= { - "description":"myDescription" -} -``` - -The unknown or mistyped properties will be ignored. - -### What are the limitations of the additional properties or metadata? - -Some properties are applicable only for the token owners, like `folderColorRgb` and `starred`. - -### Where do I get Template ID? - -To find the Template ID for a Google Docs template, follow these steps: - -1. Open the link to the Google Docs template in your web browser. The URL should look something like this: - ``` - https://docs.google.com/document/d/1whNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk - ``` -2. The Template ID is the alphanumeric string after `/d/`, which in this case is `1whNL0a6WjZtYRHF2522FrCYUYxHve9ju-DHNkaTm9Sk`. - -### Can you show me an example of a valid template? - -Certainly! Here is an example of a valid template: - -```text - {{CompanyName}} confidential. -{{DocumentDate}} - -{{RecipientFullName}} -{{RecipientAddress}} - - -Dear {{RecipientShortName}}! - -We are pleased to inform you that your application {{ApplicationNumber}} has been approved. - -Sincerely, -{{SigneeName}}, Executive Director - -``` - -Now, in the **Template variables** field we can apply the following FEEL JSON object which must be compatible with the Google Docs Requests API: - -``` -= { - "requests":[ - { - "replaceAllText":{ - "containsText":{ - "text":"{{DocumentDate}}", - "matchCase":"true" - }, - "replaceText":today() - } - }, - { - "replaceAllText":{ - "containsText":{ - "text":"{{RecipientFullName}}", - "matchCase":"true" - }, - "replaceText":"John W. Doe" - } - }, - { - "replaceAllText":{ - "containsText":{ - "text":"{{RecipientAddress}}", - "matchCase":"true" - }, - "replaceText":"Zweibrückenstraße 1845, 80000 Munich" - } - }, - { - "replaceAllText":{ - "containsText":{ - "text":"{{RecipientShortName}}", - "matchCase":"true" - }, - "replaceText":"Mr. Doe" - } - }, - { - "replaceAllText":{ - "containsText":{ - "text":"{{ApplicationNumber}}", - "matchCase":"true" - }, - "replaceText":"0123456789" - } - }, - { - "replaceAllText":{ - "containsText":{ - "text":"{{SigneeName}}", - "matchCase":"true" - }, - "replaceText":"Jane T. Doe" - } - }, - { - "replaceAllText":{ - "containsText":{ - "text":"{{CompanyName}}", - "matchCase":"true" - }, - "replaceText":"Good Company Inc." - } - } - ] -} -``` - -The result should be as follows: - -```text - Good Company inc. confidential. -2022-08-10 - -John W. Doe -Zweibrückenstraße 1845, 80000 Munich - - -Dear Mr. Doe! - -We are pleased to inform you that your application 0123456789 has been approved. - -Sincerely, -Jane T. Doe, Executive Director - -``` - -### What kind of templates are currently supported? - -The **Google Drive Connector** currently supports only Google Doc files (MIME type `application/vnd.google-apps.document`). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/hugging-face.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/hugging-face.md deleted file mode 100644 index 8382248bcd2..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/hugging-face.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -id: hugging-face -title: Hugging Face Connector -sidebar_label: Hugging Face Connector -description: Interact with Hugging Face models from your BPMN process. ---- - -The **Hugging Face Connector** is an outbound Connector that allows you to interact with -[Hugging Face](https://huggingface.co/) models from your BPMN processes. - -## Prerequisites - -To begin using the **Hugging Face Connector**, you need to have a valid -[API key](https://huggingface.co/docs/api-inference/quicktour#get-your-api-token), -and a model deployed with [Inference API](https://huggingface.co/docs/api-inference/index). - -## Create a Hugging Face Connector task - -To use the **Hugging Face Connector** in your process, either change the type of existing task by clicking on it -and using the wrench-shaped **Change type** context menu icon, or create a new Connector task using the -**Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Hugging Face Connector executable - -To work with the **Hugging Face Connector**, fill all mandatory fields. - -## Authentication - -Fill the **Hugging Face API key** field with a valid Hugging Face API key. - -### Create a new Connector secret - -Keep your **API key** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (for example, `HUGGING_FACE_SECRET`) so you can reference it later in the Connector. - -## Payload - -In the **Model** field, enter a model name you wish to use in your BPMN process, for example, `gpt2` if you wish to use -the [GPT2 model](https://huggingface.co/openai-community/gpt2). - -In the **Input** field, enter input parameters for your model, for example, `{"inputs":"What is the Capital of Germany?"}`. - -## Handle Connector response - -The **Hugging Face Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**. Therefore, -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). - -## Usage example - -Let's assume you want to use the [BART (large-sized model), fine-tuned on CNN Daily Mail](https://huggingface.co/facebook/bart-large-cnn) model, -and created the `HUGGING_FACE_SECRET` secret containing your Hugging Face API key. - -Consider the following input: - -- **Hugging Face API key**: `{{secrets.HUGGING_FACE_SECRET}}` -- **Model**: `facebook/bart-large-cnn` -- **Input**: - -```json -{ - "inputs": "Java is a high-level, class-based, object-oriented programming language that is designed to have as few implementation dependencies as possible. It is a general-purpose programming language intended to let programmers write once, run anywhere (WORA), meaning that compiled Java code can run on all platforms that support Java without the need to recompile. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of the underlying computer architecture. The syntax of Java is similar to C and C++, but has fewer low-level facilities than either of them. The Java runtime provides dynamic capabilities (such as reflection and runtime code modification) that are typically not available in traditional compiled languages. As of March 2024, Java 22 is the latest version. Java 8, 11, 17, and 21 are previous LTS versions still officially supported.", - "parameters": { "max_length": 75, "temperature": 10 }, - "options": { "use_cache": "false" } -} -``` - -- **Result variable**: `myHuggingFaceResponse`. - -In the `myHuggingFaceResponse` you will find the following result: - -```json -{ - "status":200, - "headers":{ -... - }, - "body":[ - { - "summary_text":" Java is a high-level, class-based, object-oriented programming language. It is intended to let programmers write once, run anywhere. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of the underlying computer architecture. As of March 2024, Java 22 is the latest version." - } - ] -} -``` diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/kafka.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/kafka.md deleted file mode 100644 index 0756cae99bf..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/kafka.md +++ /dev/null @@ -1,388 +0,0 @@ ---- -id: kafka -title: Kafka Connector -sidebar_label: Kafka Connector -description: The Kafka Producer Connector allows you to connect your BPMN service with Kafka. Learn how to create a Kafka Producer Connector and make it executable. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **Kafka Producer Connector** is an outbound Connector that allows you to connect your BPMN service with [Kafka](https://kafka.apache.org/) to produce messages. - -## Prerequisites - -To use the **Kafka Producer Connector**, you need to have a Kafka instance with configured bootstrap server. -Use Camunda secrets to avoid exposing your sensitive data as plain text. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. - -## Create a Kafka Producer Connector task - -To use the **Kafka Producer Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Kafka Producer Connector for publishing messages executable - -To make your **Kafka Producer Connector** for publishing messages executable, take the following steps: - -1. (Optional) Set the relevant credentials in the **Authentication** section. For example, `{{secrets.MY_KAFKA_USERNAME}}`. See the relevant [appendix section](#what-mechanism-is-used-to-authenticate-against-kafka) to find more about Kafka secure authentication. -2. In the **Kafka** section, select the serialization type for your messages. Choose **Default (JSON)** for JSON serialization or **Avro (experimental)** for Avro serialization. [Read more about Kafka Avro serialization](#avro-serialization). -3. In the **Kafka** section, set the URL of bootstrap server(s); comma-separated if more than one server required. -4. In the **Kafka** section, set the topic name. -5. (Optional) In the **Kafka** section, fill out the field **Headers** to set producer configuration values. Only `UTF-8` strings are supported as header values. -6. (Optional) In the **Kafka** section, fill out the field **Additional properties** to set producer configuration values. See the list of supported configurations at the [official Kafka documentation page](https://kafka.apache.org/documentation/#producerconfigs). Also check preconfigured values for the **Kafka Producer Connector** in the relevant [appendix section](#what-are-default-kafka-producer-client-properties). -7. In the **Message** section, set the **Key** and the **Value** that will be sent to Kafka topic. -8. (Optional for **Avro (experimental)**) In the **Avro schema** field, input the schema that defines the message structure. Ensure this schema is in your Avro schema registry. - -## Avro serialization - -:::note -Use Avro serialization with caution, as this is an experimental feature. Functionality may not be comprehensive and could change. -::: - -The **Kafka Producer Connector** supports Avro serialization, which offers a compact, fast, and binary data exchange format for Kafka messages. Avro relies on schemas for serialization and deserialization. When using Avro, each message is serialized according to a specific schema written in JSON format. This schema defines the structure of the Kafka message, ensuring the data conforms to a predefined format and enabling schema evolution strategies. - -For more detailed information on Kafka Avro serialization, you may refer to the [official Kafka documentation](https://kafka.apache.org/documentation/#serialization) and [official Apache Avro documentation](https://avro.apache.org/docs/). - -### Example Avro schema and data - -#### Avro schema: - -```json -{ - "doc": "Sample schema to help you get started.", - "fields": [ - { - "name": "name", - "type": "string" - }, - { - "name": "age", - "type": "int" - }, - { - "name": "emails", - "type": { - "items": "string", - "type": "array" - } - } - ], - "name": "sampleRecord", - "namespace": "com.mycorp.mynamespace", - "type": "record" -} -``` - -#### Kafka message - -- **Key** : `employee1` -- **Value** : - -```json -{ - "name": "John Doe", - "age": 29, - "emails": ["johndoe@example.com"] -} -``` - -## Kafka Producer Connector response - -The **Kafka Producer Connector** returns metadata for a record that has been acknowledged by the Kafka instance. - -The following fields are available in the `response` variable: - -- `timestamp`: the timestamp of the message -- `offset`: message offset -- `partition`: message partition -- `topic`: topic name - -You can read about these fields at the [official Kafka documentation page](https://kafka.apache.org/documentation/#intro_concepts_and_terms). - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - "messageAcknowledgedAt": response.timestamp -} -``` - -## Appendix & FAQ - -### What mechanism is used to authenticate against Kafka? - -If the fields **Username** and **Password** are not empty, by default the **Kafka Producer Connector** enables the credentials-based SASL SSL authentication and the following properties are set: - -``` -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='' password=''; -security.protocol=SASL_SSL -sasl.mechanism=PLAIN -``` - -If any of the field is not populated, you have to configure your security method in respect to your Kafka configuration. You can do so via the field **Additional properties**. - -### What are default Kafka Producer client properties? - -- Authentication properties (only if both **Username** and **Password** are not empty): - -``` -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='' password=''; -security.protocol=SASL_SSL -sasl.mechanism=PLAIN -``` - -- Bootstrap server property: - -``` -bootstrap.servers= -``` - -- Message properties: - -``` -key.serializer=org.apache.kafka.common.serialization.StringSerializer -value.serializer=org.apache.kafka.common.serialization.StringSerializer -``` - -- Miscellaneous properties: - -``` -session.timeout.ms=45000 -client.dns.lookup=use_all_dns_ips -acks=all -delivery.timeout.ms=45000 -``` - -### What is the precedence of client properties loading? - -Properties loading consists of three steps: - -1. Construct client properties from the BPMN diagram: authentication, bootstrap server, message properties. -2. Load miscellaneous properties. -3. Load and **override** properties from the field **Additional properties**. - -### How do I set or override additional client properties? - -The following example sets a new client property `client.id` and overrides SASL mechanism to SCRAM SHA-256 instead of plain text: - -``` -= { - "client.id":"MyDemoClientId", - "sasl.mechanism":"SCRAM-SHA-256" -} -``` - - - - - -The **Kafka Consumer Connector** allows you to consume messages by subscribing to [Kafka](https://kafka.apache.org/) topics and map them your BPMN processes as start or intermediate events. - -## Prerequisites - -To use the **Kafka Consumer Connector**, you need to have a Kafka instance with configured bootstrap server. -Use Camunda secrets to avoid exposing your sensitive data as plain text. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. - -## Create a Kafka Consumer Connector task - -1. Add a **Start Event** or an **Intermediate Event** to your BPMN diagram to get started. -2. Change its template to a Kafka Consumer. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the Kafka consumer. - -## Make your Kafka Consumer Connector executable - -To make your **Kafka Consumer Connector** executable, take the following steps: - -1. In the **Authentication** section, select the **Authentication type**. -2. (If you selected _Credentials_ as the **Authentication type**) In the **Authentication** section, set the relevant credentials. For example, `{{secrets.MY_KAFKA_USERNAME}}`. Refer to the relevant [appendix section](#what-mechanism-is-used-to-authenticate-against-kafka) to find more about Kafka secure authentication. -3. In the **Kafka** section, select the serialization type for your messages. Choose **Default (JSON)** for JSON serialization or **Avro (experimental)** for Avro serialization. [Read more about Kafka Avro serialization](#avro-serialization). -4. In the **Kafka** section, set the URL of bootstrap server(s); comma-separated if more than one server required. -5. In the **Kafka** section, set the topic name. -6. (Optional) In the **Kafka** section, fill out the field **Additional properties** to set consumer configuration values. See the list of supported configurations at the [official Kafka documentation page](https://kafka.apache.org/documentation/#consumerconfigs). Additionally, check preconfigured values for the **Kafka Consumer Connector** in the relevant [appendix section](#what-are-default-kafka-consumer-client-properties). -7. In the **Kafka** section, you can set the **Offsets** for the partition. The number of offsets specified should match the number of partitions on the current topic. -8. In the **Kafka** section, you can set the **Auto offset reset** which tells the Connector what strategy to use when there is no initial offset in Kafka or if the specified offsets do not exist on the server. -9. (For **Avro (experimental)**) In the **Message deserialization** section, input the schema that defines the message structure into the **Avro schema** field. -10. In the **Activation** section, you can set the **Activation Condition**. Based on this condition, we either start a process instance or do nothing if the condition is not met. For example, `=(value.itemId = "a4f6j2")`. Leave this field empty to trigger your webhook every time. - -When using the **Kafka Consumer Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the value contains `"value":{"correlationKey":"myValue"}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=value.correlationKey` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -### Example Avro schema and data - -If the expected Kafka message looks like this: - -#### Kafka message - -- **Key** : `employee1` -- **Value** : - -```json -{ - "name": "John Doe", - "age": 29, - "emails": ["johndoe@example.com"] -} -``` - -Then the corresponding Avro schema to describe this message's structure would be: - -#### Avro schema: - -```json -{ - "doc": "Sample schema to help you get started.", - "fields": [ - { - "name": "name", - "type": "string" - }, - { - "name": "age", - "type": "int" - }, - { - "name": "emails", - "type": { - "items": "string", - "type": "array" - } - } - ], - "name": "sampleRecord", - "namespace": "com.mycorp.mynamespace", - "type": "record" -} -``` - -This schema defines a structure for a record that includes a name (string), an age (integer), and emails (an array of strings), aligning with the given Kafka message's value format. - -## Activate the Kafka Consumer Connector by deploying your diagram - -Once you click the **Deploy** button, your Kafka Consumer will be activated and starts consuming messages from the specified topic. - -## Kafka Consumer Connector response - -The **Kafka Consumer Connector** returns the consumed message. - -The following fields are available in the `response` variable: - -- `key`: The key of the message. -- `value`: The value of the message. -- `rawValue`: The value of the message as a JSON string. - -You can use an output mapping to map the response: - -1. Use **Result variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result expression** to map fields from the response into process variables. For example: - -``` -= { - "itemId": value.itemId -} -``` - -## Appendix & FAQ - -### What mechanism is used to authenticate against Kafka? - -If you selected _Credentials_ as **Authentication type** and the fields **Username** and **Password** are not empty, by default the **Kafka Consumer Connector** enables the credentials-based SASL SSL authentication and the following properties are set: - -``` -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='' password=''; -security.protocol=SASL_SSL -sasl.mechanism=PLAIN -``` - -If any of the field is not populated, you must configure your security method in respect to your Kafka configuration. You can do so via the field **Additional properties**. - -### What are default Kafka Consumer client properties? - -- Authentication properties (only if both **Username** and **Password** are not empty): - -``` -sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='' password=''; -security.protocol=SASL_SSL -sasl.mechanism=PLAIN -``` - -- Bootstrap server property: - -``` -bootstrap.servers= -``` - -- Message properties: - -``` -key.deserializer=org.apache.kafka.common.serialization.StringDeserializer -value.deserializer=org.apache.kafka.common.serialization.StringDeserializer -``` - -- Miscellaneous properties: - -``` -session.timeout.ms=45000 -client.dns.lookup=use_all_dns_ips -acks=all -group.id=kafka-inbound-connector-{{bpmnProcessId}} -enable.auto.commit=false -``` - -### What is the precedence of client properties loading? - -Properties loading consists of three steps: - -1. Construct client properties from the BPMN diagram: authentication, bootstrap server, message properties. -2. Load miscellaneous properties. -3. Load and **override** properties from the field **Additional properties**. - -### How is the message payload deserialized? - -Kafka messages usually use JSON format, therefore we first try to deserialize it as a `JsonElement`. If this fails (e.g. because of wrong format) we use the `String` representation of the original raw value. For convenience, we always store the original raw value as `String` in a different attribute. - -The deserialized object structure: - -``` -{ - key: "String" - rawValue: "String" - value: {} -} -``` - -### When is the offset committed? What happens if the Connector execution fails? - -The following outcomes are possible: - -- If Connector execution is successful and **Activation condition** was met, the offset is committed. -- If **Activation condition** was not met, the offset is also committed to prevent consuming the same message twice. -- If Connector execution fails due to an unexpected error (e.g. Zeebe is unavailable), the offset is not committed. - -### What lifecycle does the Kafka Consumer Connector have? - -The Kafka Consumer Connector is a long-running Connector that is activated when the process is deployed and deactivated when the process is un-deployed or overwritten by a new version. - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/microsoft-o365-mail.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/microsoft-o365-mail.md deleted file mode 100644 index 5e63d0e2646..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/microsoft-o365-mail.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -id: microsoft-o365-mail -title: Microsoft 365 Connector -sidebar_label: Microsoft 365 Connector -description: Send and read Microsoft 365 emails from your BPMN process. ---- - -The **Microsoft 365 Connector** is an outbound Connector that allows you to connect your BPMN service with [Microsoft 365](https://outlook.office.com/mail/) mail to send, read e-mails, and manage folders. - -## Prerequisites - -- To use the **Microsoft 365 Connector**, you must have a [Microsoft 365](https://outlook.office.com/mail/) mail instance. -- You might also need to have sufficient access rights at [Microsoft Entra](https://entra.microsoft.com) to create a new app; - set [Microsoft Graph](https://developer.microsoft.com/en-us/graph) permissions and assign an app to u user. - -Learn more about [creating, configuring, and authorizing Microsoft App](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app). - -:::note -Use Camunda secrets to avoid exposing your Microsoft credentials as plain text. -Refer to our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create a Microsoft 365 Connector task - -To use the **Microsoft 365 Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Access control - -Each operation requires permissions to be assigned by a system administrator. Learn more about [Microsoft permissions](https://learn.microsoft.com/en-us/entra/identity-platform/permissions-consent-overview). - -### Bearer token authentication - -If you own a bearer token, in the **Authentication** section select a **Bearer token** in the **Type** field. -Enter a bearer token in the field **Bearer token**. Use [Camunda secrets](/components/console/manage-clusters/manage-secrets.md) to avoid exposing sensitive credentials. - -:::note -Default TTL for bearer tokens is 3600 seconds. Therefore, this approach might not work for long-living and/or repetitive processes. -::: - -### OAuth2 client credentials flow authentication - -:::note -In the client credential flow, an application gets access to all accounts associated with the organization. -For example, if an app has permissions `Mail.Read`, it will be able to read emails of all users. -::: - -To proceed with this step, you'll need the following data: - -- OAuth 2.0 token endpoint -- Client ID (Application ID) -- Client secret; can be created on your application page - -The app must be assigned to a user. - -If you own a bearer token, in the **Authentication** section select a **OAuth 2.0** in the **Type** field. -Enter the above data into the respective fields. - -Learn more about [creating, configuring, and authorizing Microsoft App](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app). - -## Select operation to execute - -Select the desired operation from the **Operations** section. - -### Get user folders - -Related Microsoft Graph API: [user: list mailFolders](https://learn.microsoft.com/en-us/graph/api/user-list-mailfolders) - -1. Enter user's email or system UUID to fetch all their folders in the **User ID** field. -2. You can also pass [OData parameters](https://learn.microsoft.com/en-us/graph/query-parameters?tabs=http) in the **Query parameters** field. - -For example, if you wish to pass an OData `$top` URL parameter, execute the following: - -```json -{ - "$top": 10 -} -``` - -### Create mail folder for a user - -Related Microsoft Graph API: [user: create mailFolder](https://learn.microsoft.com/en-us/graph/api/user-post-mailfolders) - -1. Enter user's email or system UUID to fetch all their folders in the **User ID** field. -2. In the **Request** section, enter a **Folder display name** string value. - -### Get user messages - -Related Microsoft Graph API: [user: list messages](https://learn.microsoft.com/en-us/graph/api/user-list-messages) - -1. Enter user's email or system UUID to fetch all their folders in the **User ID** field. -2. You can also pass [OData parameters](https://learn.microsoft.com/en-us/graph/query-parameters?tabs=http) in the **Query parameters** field. - -For example, if you wish to pass an OData `$top` URL parameter, execute the following: - -```json -{ - "$top": 10 -} -``` - -### Send mail on behalf of a user - -Related Microsoft Graph API: [user: sendMail](https://learn.microsoft.com/en-us/graph/api/user-sendmail) - -1. Enter user's email or system UUID to fetch all their folders in the **User ID** field. -2. In the **Request** section, enter a **Subject** string value. -3. Select **Body content type** from the dropdown. -4. Enter desired content in the field **Body content**. -5. Pass an array of emails into the **To recipients** field, for example `["myuser1@mycompany.com", "myuser2@mycompany.com"]`. -6. (Optional) Pass an array of emails into the **CC recipients** field, for example `["myuser3@mycompany.com", "myuser4@mycompany.com"]`. - -## Handle Connector response - -The **Microsoft 365 Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**, therefore -[handling response is still applicable](/components/connectors/protocol/rest.md#response). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/microsoft-teams.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/microsoft-teams.md deleted file mode 100644 index 90b3fc9931a..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/microsoft-teams.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -id: microsoft-teams -title: Microsoft Teams Connector -sidebar_label: Microsoft Teams Connector -description: Work with Microsoft Teams from your BPMN process using the Microsoft Teams Connector. Learn about authentication, conversation type and method, and more. ---- - -The **Microsoft Teams Connector** is an outbound Connector that allows you to connect your BPMN process with [Microsoft Teams](https://www.microsoft.com/microsoft-teams/) to manage interactions. - -## Prerequisites - -To use the **Microsoft Teams Connector**, you need to have a [Microsoft Teams](https://www.microsoft.com/microsoft-teams/) account and -relevant [permissions](https://support.microsoft.com/en-us/office/manage-team-settings-92d238e6-0ae2-447e-af90-40b1052c4547) -or the registered application in the [Azure Active Directory](https://aad.portal.azure.com/) (visit [how to register the app](https://learn.microsoft.com/en-us/graph/auth-register-app-v2) for more information) alongside -the relevant [Microsoft Graph API permissions](https://learn.microsoft.com/en-us/graph/permissions-reference). - -Some methods can use [protected Microsoft Teams APIs](https://learn.microsoft.com/en-us/graph/teams-protected-apis). Read more on [how to request access to protected APIs](https://learn.microsoft.com/en-us/graph/teams-protected-apis#request-access-to-protected-apis). - -:::note -Use Camunda secrets to store credentials so you don't expose sensitive information directly from the process. See [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create a Microsoft Teams Connector task - -To use the **Microsoft Teams Connector** in your process, either change the type of existing task by clicking on it and -using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Microsoft Teams Connector executable - -To work with Microsoft Teams, choose the required connection type in the **Authentication** section and complete the -mandatory fields highlighted in red in the connector properties panel: - -:::note -All the mandatory and non-mandatory fields depending on the authentication selection you choose are covered in the -upcoming sections. -::: - -## Authentication - -Choose among the available Microsoft Teams Connectors according to your authentication requirements. -The Microsoft Teams Connector uses the [Microsoft Graph API](https://learn.microsoft.com/en-us/graph/overview). Visit the [Microsoft Graph auth overview](https://learn.microsoft.com/en-us/graph/auth/) for more information on the Microsoft Graph API authentication. - -First, you must have a user account with Microsoft Teams with the necessary permissions. See more at -in [Microsoft Teams overview](https://learn.microsoft.com/microsoftteams/teams-overview). If you don't have administration roles and permissions, ask your Microsoft Teams administrator to add required permissions to work with the **Microsoft Teams Connector**. - -Next, you will choose the type of connection. - -### Bearer Token type authentication - -For a **Bearer Token** type authentication, take the following steps: - -1. Click the **Bearer Token** connection type in the **Authentication** section. -2. Set **Bearer Token** to `Bearer Token`. - -Visit [Microsoft Teams Access Token](https://learn.microsoft.com/azure/active-directory/develop/access-tokens) for more information. - -#### Options to obtain an access token - -- Via the Graph Explorer: - - 1. Visit [developer.microsoft.com/graph/graph-explorer](https://developer.microsoft.com/graph/graph-explorer). - 2. Log in with your Microsoft account. - 3. Click the **Access Token** tab and copy the bearer token. - -- Register your app with the Microsoft identity platform and send a POST request to the `/token` identity platform endpoint to acquire an access token. - - [How to register your app](https://learn.microsoft.com/en-us/graph/auth-register-app-v2) - - [How to get access on behalf of a user](https://learn.microsoft.com/en-us/graph/auth-v2-user) - - [How to get access without a user](https://learn.microsoft.com/en-us/graph/auth-v2-service) - -### Refresh Token type authentication - -For a **Refresh Token** type authentication, take the following steps: - -1. Click the **Refresh Token** connection type in the **Authentication** section. -2. Set **Refresh Token** to `Refresh Token`. Read more on [how to get a refresh token](https://learn.microsoft.com/en-us/graph/auth-v2-user). -3. Set **Tenant id** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. Read more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). -4. Set the **Client ID** field: the application ID that the [Azure app registration portal](https://go.microsoft.com/fwlink/?linkid=2083908) assigned to your app. -5. Set the **Secret ID** field: the client secret that you created in the app registration portal for your app. - -### Client credentials type authentication - -For a **Client credentials** type authentication, take the following steps: - -1. Click the **Client credentials** connection type in the **Authentication** section. -2. Set **Tenant id** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. See more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). -3. Set the **Client ID** field: the application ID that the [Azure app registration portal](https://go.microsoft.com/fwlink/?linkid=2083908) assigned to your app. -4. Set the **Secret ID** field: the client secret that you created in the app registration portal for your app. - -:::note -With **Client credentials** type authentication, some methods of the **Microsoft Teams Connector** may not be available. Find more details in the [chat methods table](#chat-methods) and [channel methods table](#channel-methods). -::: - -## Conversation type and method - -In the **Operation** section, choose a conversation type of either **Chat** or **Channel**. Then, choose one of the suggested methods. - -For example, if you want to send a message in a Microsoft Teams channel, choose the conversation type **Channel** and method **Send message in channel**. - -## Data section - -### Chat conversation type - -#### Properties - -| Property | Methods | Required | Type | Description | -| :-------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Chat ID | Get chat by id
    List chat members
    Send message in chat
    List messages in chat
    Get message in chat
    List chat members | Yes | string | Microsoft Teams chat id | -| Content | Send message in chat | Yes | text | Content that will be sent to chat | -| Content Type | Send message in chat | Yes | dropdown | Content type of body message | -| Chat type | Create a new chat | Yes | dropdown | Click **one on one** to create a one-on-one chat or **group** to create a group chat. | -| Topic | Create a new chat | No | string | Topic of chat | -| Members | Create a new chat | Yes | FEEL expression | See [members property](#members-property) to learn more. | -| Top | List messages in chat | No | numbers | Controls the number of items per response; maximum allowed top value is 50. | -| Order by | List messages in chat | Yes | dropdown | Can order by 'lastModifiedDateTime' and 'createdDateTime'. | -| Expand response | Get chat by id | Yes | dropdown | Choose | -| Filter | List messages in chat | No | string | Sets the date range filter for the lastModifiedDateTime and createdDateTime properties. [Learn more about filtering](https://learn.microsoft.com/en-us/graph/filter-query-parameter). | -| Message ID | Get message in chat | Yes | string | Microsoft Teams chat message id | - -##### Expand response - -For method **Get chat by ID**, you can get more information in the response by using the dropdown property **Expand response**. You can choose one of the following values: - -- select **With chat members**, to get information about chat members. -- select **With last message preview**, to get last message in chat. **Note:** This function doesn't work with [client credentials type authentication](#client-credentials-type-authentication), make sure that you use another authentication type. -- select **Without expand**, to get main information about chat. - -##### Members property - -The **members** property must contain a list of members: - -| Property | Type | Required | -| :---------------: | :----------: | -------------------------------------- | -| userId | string | Yes, if 'userPrincipalName' is not set | -| userPrincipalName | string | Yes, if 'userId' is not set | -| roles | string array | Yes | - -```json -[ - { - "userId": "abc01234-0c7f-012c-9876-&812dsfw2", - "roles": ["owner"] - }, - { - "principalName": "john.dou@mail.com", - "roles": ["owner"] - } -] -``` - -#### Chat methods - -| Method | Use [protected APIs](https://learn.microsoft.com/en-us/graph/teams-protected-apis) | Available for [client credentials type authentication](#client-credentials-type-authentication) | Link to method documentation with required permissions and return value | -| :-------------------: | :--------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Create a new chat | false | true | [https://learn.microsoft.com/en-us/graph/api/chat-post](https://learn.microsoft.com/en-us/graph/api/chat-post?view=graph-rest-1.0&tabs=http) | -| Get chat by ID | false | true | [https://learn.microsoft.com/en-us/graph/api/chat-get](https://learn.microsoft.com/en-us/graph/api/chat-get?view=graph-rest-1.0&tabs=http) | -| List chats | false | true | [https://learn.microsoft.com/en-us/graph/api/chat-list](https://learn.microsoft.com/en-us/graph/api/chat-list?view=graph-rest-1.0&tabs=http) | -| List chat members | false | false | [https://learn.microsoft.com/en-us/graph/api/chat-list-members](https://learn.microsoft.com/en-us/graph/api/chat-list-members?view=graph-rest-1.0&tabs=http) | -| Send message in chat | false | false | [https://learn.microsoft.com/en-us/graph/api/chat-post-messages](https://learn.microsoft.com/en-us/graph/api/chat-post-messages?view=graph-rest-1.0&tabs=http) | -| Get message in chat | false | true | [https://learn.microsoft.com/en-us/graph/api/chatmessage-get](https://learn.microsoft.com/en-us/graph/api/chatmessage-get?view=graph-rest-1.0&tabs=http) | -| List messages in chat | true | true | [https://learn.microsoft.com/en-us/graph/api/chat-list-messages](https://learn.microsoft.com/en-us/graph/api/chat-list-messages?view=graph-rest-1.0&tabs=http) | - -### Channel conversation type - -#### Properties - -| Property | Methods | Required | Type | Description | -| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :------: | :--------------------------------------------------------------------------------------------------------------------: | -| Group ID | Create channel
    Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams group id | -| Channel ID | Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams channel id | -| Display name | Create channel | No | string | Displayed name of new Microsoft Teams channel | -| Description | Create channel | No | text | Description of new Microsoft Teams channel | -| Channel membership type | Create channel | Yes | dropdown | See [teams-channels-overview](https://learn.microsoft.com/microsoftteams/teams-channels-overview) for more information | -| Owner | Create channel (if Channel membership type != STANDARD) | Yes | string | Channel owner; Microsoft Teams user id or Microsoft Teams principal name | -| Filter | List channels | No | string | The search filter. [Learn more about filtering](https://learn.microsoft.com/en-us/graph/filter-query-parameter) | -| Content | Send message to channel | Yes | text | Content that will be sent to chat | -| Content Type | Send message to channel | Yes | dropdown | Content type of body message | -| Message ID | Get channel message | Yes | string | Message id of Microsoft Teams in channel | -| Top | List channel messages | No | numbers | Controls the number of items per response | -| With replies | List channel messages | Yes | boolean | Choose **FALSE** for get messages without replies
    Choose **FALSE** for get messages without replies | -| Message ID | List message replies | Yes | string | Microsoft Teams channel message id | - -#### Channel methods - -| Method | Use [protected APIs](https://learn.microsoft.com/en-us/graph/teams-protected-apis) | Available for [client credentials type authentication](#client-credentials-type-authentication) | Link to method documentation with required permissions and return value | -| :---------------------: | :--------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Create channel | false | true | [https://learn.microsoft.com/en-us/graph/api/channel-post](https://learn.microsoft.com/en-us/graph/api/channel-post?view=graph-rest-1.0&tabs=http) | -| Get channel | false | true | [https://learn.microsoft.com/en-us/graph/api/channel-get](https://learn.microsoft.com/en-us/graph/api/channel-get?view=graph-rest-1.0&tabs=http) | -| List channels | false | true | [https://learn.microsoft.com/en-us/graph/api/channel-list](https://learn.microsoft.com/en-us/graph/api/channel-list?view=graph-rest-1.0&tabs=http) | -| Send message to channel | false | false | [https://learn.microsoft.com/en-us/graph/api/channel-post-messages](https://learn.microsoft.com/en-us/graph/api/channel-post-messages?view=graph-rest-1.0&tabs=http) | -| Get channel message | true | true | [https://learn.microsoft.com/en-us/graph/api/chatmessage-get](https://learn.microsoft.com/en-us/graph/api/chatmessage-get?view=graph-rest-1.0&tabs=http) | -| List channel messages | true | true | [https://learn.microsoft.com/en-us/graph/api/channel-list-messages](https://learn.microsoft.com/en-us/graph/api/channel-list-messages?view=graph-rest-1.0&tabs=http) | -| List message replies | true | true | [https://learn.microsoft.com/en-us/graph/api/chatmessage-list-replies](https://learn.microsoft.com/en-us/graph/api/chatmessage-list-replies?view=graph-rest-1.0&tabs=http) | -| List members | false | true | [https://learn.microsoft.com/en-us/graph/api/channel-list-members](https://learn.microsoft.com/en-us/graph/api/channel-list-members?view=graph-rest-1.0&tabs=http) | - -## Microsoft Teams Connector response - -The **Microsoft Teams Connector** returns the Microsoft Graph API response in `result` wrapper: - -```json -{ - "result": { - "chatType": "ONE_ON_ONE", - "createdDateTime": { - "dateTime": { - "date": { - "year": 2022, - "month": 11, - "day": 29 - }, - "time": { - "hour": 18, - "minute": 10, - "second": 33, - "nano": 361000000 - } - }, - "offset": { - "totalSeconds": 0 - } - }, - "lastUpdatedDateTime": { - "dateTime": { - "date": { - "year": 2022, - "month": 11, - "day": 29 - }, - "time": { - "hour": 18, - "minute": 10, - "second": 33, - "nano": 361000000 - } - }, - "offset": { - "totalSeconds": 0 - } - }, - "tenantId": "0000000-0000-0000-0000-000000000", - "webUrl": "https://teams.microsoft.com/l/chat/19%3Aefb08ac3-0000f-0000-0000-example-chat-id_fe35bf61-0000-0000-0000-ddc97d8903d4%40unq.gbl.spaces/0?tenantId=00000-0000-0000-0000-00000000", - "id": "19%3Aefb08ac3-0000f-0000-0000-example-chat-id_fe35bf61-0000-0000-0000-ddc97d8903d4%40unq.gbl.spaces" - } -} -``` - -See [channel resource type](https://learn.microsoft.com/graph/api/resources/channel?view=graph-rest-1.0) to find the response for the required method for a channel conversation type, or see [chat resource type](https://learn.microsoft.com/graph/api/resources/chat?view=graph-rest-1.0) to find the response for the required method for a chat conversation type. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - "chatId": result.id, - "tenantId": result.tenantId -} -``` diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/openai.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/openai.md deleted file mode 100644 index 4aec79dfb3d..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/openai.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -id: openai -title: OpenAI Connector -sidebar_label: OpenAI Connector -description: Send messages to OpenAI from your BPMN process using the OpenAI Connector Connector. ---- - -The **OpenAI Connector** is an outbound Connector that allows you to use [ChatGPT](https://platform.openai.com/docs/guides/chat/chat-completions-beta) -or [Moderation API](https://platform.openai.com/docs/guides/moderation/moderation) in your BPMN process. - -## Prerequisites - -To use the **OpenAI Connector**, create an OpenAI account and create an API key. - -Refer to the [OpenAI Platform](https://platform.openai.com/docs/quickstart) documentation for a detailed setup guide. - -:::note -Use Camunda secrets to avoid exposing your sensitive data, such as your OpenAI API key, as plain text. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -:::note - -## Create an OpenAI Connector task - -To use the **OpenAI Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your OpenAI Connector executable - -To work with the **OpenAI Connector**, fill all mandatory fields. - -## Authentication - -To use the **OpenAI Connector**, obtain an API key from OpenAI. To create an OpenAI account and learn more about API keys, visit the [OpenAI Platform](https://platform.openai.com/) documentation. - -### Create a new Connector secret - -Keep your **API key** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `OPENAI_API_KEY`) so you can reference it later in the Connector. - -### Configure the API key - -Select the **OpenAI API key** field in the **Authentication** section and set it to the secret you created (e.g. `{{secrets.OPENAI_API_TOKEN}}`). - -## Operations - -The **OpenAI Connector** currently supports two operation types in the **Operation** dropdown list: **Chat** and **Moderation**. - -## Chat - -With the **Chat** operation, you can interact with OpenAI chat-based language models. - -### Model - -The **Model** dropdown list allows you to select the model. - -Refer to the [Models](https://platform.openai.com/docs/models/models) section of OpenAI documentation for detailed information about models. - -#### Custom model version - -The **Model** dropdown list does not contain all available models. - -To use a model that is not listed, use the **Custom** option and provide the model name in the **Custom model version** field that appears. - -:::note -Selection of models is user-specific and depends on your account privileges. For this reason, GPT-4 -may appear as non-existing when you attempt to use it, although it is defined in the element template. -::: - -### Temperature - -The **Temperature** field controls the randomness of the model's output. Lower temperatures make the model more deterministic and less random, while higher temperatures make the model more random. - -Accepted values are between `0` and `2` (add a leading `0` for values less than `1`), and you can use two digits after the decimal point. - -### System message - -The **System message** field allows you to provide initial instructions for the model, and helps set the behavior of the assistant. - -For example, if you want ChatGPT to translate the prompt into a different language instead of interpreting the questions contained in the prompt, you can set the **System message** to -`You are a translator bot. You provide literal translation of inputs from English into German. You do not interpret the input.` - -### Chat history - -OpenAI API doesn't store message history for ChatGPT. Therefore, it is up to you as a process developer to decide if and how you should retain the chat history. - -The **Chat history** input field may contain the history of previous messages or examples of the desired behavior. -Following the translation example above, you can provide some translation examples to make the expectations clearer. - -Chat history consumed by this Connector follows the chat format described in the corresponding part of [OpenAI documentation](https://platform.openai.com/docs/guides/chat/introduction). - -### Prompt - -While **System message** and **Chat history** fields are optional and provide the model with additional context, **Prompt** is the actual input. -This is the query that is used to trigger the model output. - -To use the **System message**, **Chat history**, and **Prompt** together, you would follow this format: - -The example below illustrates how you can use **System message**, **Chat history**, and **Prompt** together. - -**System message** - -```text -You are a helpful assistant. -``` - -**Chat history** - -``` -= [ -{"role": "user", "content": "Who won the world series in 2020?"}, -{"role": "assistant", "content": "The Los Angeles Dodgers the World series in 2020."} -] -``` - -**Prompt** - -```text -Where was it played? -``` - -In this example, the chat history provides the context of a user asking who won the World Series in 2020, and the assistant providing the correct answer that the Los Angeles Dodgers won. The prompt, "Where was it played?" is the follow-up question that seeks additional information about the location where the World Series took place in 2020. - -:::note -Find more complex examples of prompt engineering and sample real-life use cases of ChatGPT on the OpenAI [examples](https://platform.openai.com/examples) page. -::: - -### Choices to generate - -The numeric **Choices to generate** field determines how many alternative answers the model returns in the API response. - -### Sample chat output - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. - -``` -{ - "status": 200, - "headers": { - # response headers - }, - "body": { - "id": "chatcmpl-6ws27w7nADFLWp7KD3dhjiUmP0kfu", - "object": "chat.completion", - "created": 1679488747, - "model": "gpt-3.5-turbo-0301", - "usage": { - "prompt_tokens": 16, - "completion_tokens": 79, - "total_tokens": 95 - }, - "choices": [ - { - "message": { - "role": "assistant", - "content": "ChatGPT has gained significant attention in recent years, especially with the development and advancement of Natural Language Processing (NLP) tools used in chatbots and virtual assistants." - }, - "finish_reason": "stop", - "index": 0 - } - ] - } -} -``` - -## Moderation - -It is recommended to use the Moderation API to sanitize inputs and outputs of the language model. You will be able to prevent violation of OpenAI policies and displaying the potentially unsafe content in your system. - -### Evaluation input - -### Sample moderation output - -Output contains the evaluation result broken down by violation categories. To learn more about Moderation output, visit the [OpenAI documentation](https://platform.openai.com/docs/guides/moderation/moderation). - -``` -{ - "status": 200, - "headers": { - # response headers - }, - "body": { - "id": "modr-6wtH8E1f2W533qdQAzq8dUpmKRVCV", - "model": "text-moderation-004", - "results": [ - { - "flagged": false, - "categories": { - "sexual": false, - "hate": false, - "violence": false, - "self-harm": false, - "sexual/minors": false, - "hate/threatening": false, - "violence/graphic": false - }, - "category_scores": { - "sexual": 1.0084246241603978E-5, - "hate": 5.5422344303224236E-5, - "violence": 8.184280159184709E-5, - "self-harm": 1.3117542607687938E-7, - "sexual/minors": 4.457491709075612E-9, - "hate/threatening": 9.144552337581047E-10, - "violence/graphic": 1.770446012017146E-8 - } - } - ] - } -} -``` diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/operate.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/operate.md deleted file mode 100644 index ac9fd92176e..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/operate.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -id: operate -title: Camunda Operate Connector -sidebar_label: Camunda Operate Connector -description: Fetch process execution data from Camunda Operate, a monitoring solution for Camunda 8. ---- - -The **Camunda Operate Connector** allows you to interact with [Camunda Operate](https://camunda.com/platform/operate/) in your BPMN process to fetch process execution data. - -## Prerequisites - -To use the **Camunda Operate Connector**, you need to have an active Camunda 8 cluster with Operate. -This Connector is compatible with both Camunda 8 SaaS and Camunda 8 Self-Managed. - -:::note -Password authentication with Operate is currently not supported. -If you are using Camunda 8 Self-Managed, you can only authenticate using [Identity](/self-managed/operate-deployment/operate-authentication.md#identity). -::: - -You also need to obtain the Operate API client credentials. Follow the links below to learn more about API client configuration. - -- [API client configuration in Camunda 8 SaaS](/components/console/manage-clusters/manage-api-clients.md) -- [Authentication with a Self-Managed Operate deployment](/self-managed/operate-deployment/operate-authentication.md#identity) - -:::note -Use Camunda secrets to store credentials so you don't expose sensitive information directly from the process. See [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Create an Operate Connector task - -To use the **Operate Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task using the **Append Connector** context menu. -Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Enter your cluster information - -Choose between **Camunda 8 SaaS** and **Camunda 8 Self-Managed** depending on your Camunda 8 installation type. The input fields will update accordingly. - -### SaaS clusters - -If you are using a SaaS cluster, you will be required to provide **region** and **clusterId**. You will see these values when you [create an API client](/guides/setup-client-connection-credentials.md) for your cluster. - -### Self-Managed clusters - -If you are using a Self-Managed cluster, you need to provide two URLs: - -- URL of your OAuth token endpoint -- Operate URL - -If you are testing this Connector on your local machine with the Camunda 8 Docker Compose setup, set the following URLs: - -- OAuth Token endpoint: `http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token` -- Operate URL: `http://localhost:8081` - -## Configure authentication - -For both SaaS and Self-Managed clusters, you need to provide **clientId** and **clientSecret**. -You will see these values when you [create an API client](/guides/setup-client-connection-credentials.md) for your cluster. - -For Self-Managed clusters, you can additionally specify the Operate **audience**. -If you are using the default Identity configuration, leave the default `operate-api` value. - -## Choose endpoint and operation - -In the **Endpoint** dropdown list, select the API entity. You can choose between **Process instances**, **Process definitions**, **Variables**, **Flownode instances**, and **Incidents**. - -In the **Operation** dropdown list, select one of the supported operations: **Get by key** or **Search**. - -Refer to the Operate [API documentation](/apis-tools/operate-api/overview.md) for more details on the specific operations. - -:::note Unsupported operations -The following API operations are currently not supported by the **Operate Connector**: - -- Delete process instance -- Get process definition XML - -::: - -## Configure operation parameters - -For **Get by key** operation, you must provide a single input, the entity **key**. - -For **Search** operation, the following search parameters can be configured: - -- **Filter**: Allows you to filter objects by fields, e.g. the following filter will return active process instance with key 235 if it contains incidents: - - `{ "processInstanceKey": 235, "state": "ACTIVE", "incidents": true }` - - If one of the fields doesn't match, an empty response will be returned for this request. Refer to the [Operate API documentation](/apis-tools/operate-api/overview.md#filter) for more detailed information. - -- **Sort**: Sorting properties in Operate format, e.g. `[{ "field": "name", "order": "DESC" }]`. You need to provide a list of sort objects in this field. -- **Results**: Number of results to return. -- **Pagination**: Identifier of an item from which the search should start. Copy this `sortValues` value from the previous Operate response here, or leave this field blank if you don't need pagination. See the [API reference](/apis-tools/operate-api/overview.md#pagination) for details. - -## Handle the API response - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. -2. Use **Result Expression** to map fields from the response into process variables. - -Response example: - -``` -{ - "status": 200, - "headers": { - # response headers - }, - "body": { - "items": [ - { - "key": 2251799814052469, - "processVersion": 1, - "bpmnProcessId": "Process_1ea5d26b-27dd-4335-bd2a-d1e39c5ce1e3", - "startDate": "2023-03-21T08:25:04.499+0000", - "endDate": "2023-03-21T08:25:12.093+0000", - "state": "COMPLETED", - "processDefinitionKey": 2251799814052467 - }, - { - "key": 2251799814052613, - "processVersion": 2, - "bpmnProcessId": "Process_1ea5d26b-27dd-4335-bd2a-d1e39c5ce1e3", - "startDate": "2023-03-21T08:27:49.784+0000", - "endDate": "2023-03-21T08:27:58.838+0000", - "state": "COMPLETED", - "processDefinitionKey": 2251799814052610 - } - ], - "sortValues": [ - 2251799814052613 - ], - "total": 55 - } -} -``` diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md deleted file mode 100644 index d008e1e4723..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/rabbitmq-outbound.md +++ /dev/null @@ -1,293 +0,0 @@ ---- -id: rabbitmq -title: RabbitMQ Connector -sidebar_label: RabbitMQ Connector -description: Send messages to RabbitMQ from your BPMN process using the RabbitMQ Connector. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **RabbitMQ Connector** is an outbound Connector that allows you to connect your BPMN process with [RabbitMQ](https://www.rabbitmq.com/) to send messages to RabbitMQ. - -## Prerequisites - -To use the **RabbitMQ Connector**, you need to have installed a RabbitMQ server and create the relevant [credentials](https://www.rabbitmq.com/passwords.html). -Use Camunda secrets to store credentials so you don't expose sensitive information directly from the process. See [this appendix entry](#how-do-i-store-secrets-for-my-connector) to learn more. - -:::note -Ensure you enter the correct exchange name and routing key, as the **RabbitMQ Connector** can't throw an exception if they are incorrect. -::: - -## Create a RabbitMQ Connector task - -To use the **RabbitMQ Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. - -Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Connecting to RabbitMQ and sending messages - -To connect to RabbitMQ, choose the required connection type in the **Authentication** section and complete the mandatory fields highlighted in red in the connector properties panel: - -:::note -All the mandatory and non-mandatory fields depending on the authentication selection you choose are covered in the upcoming sections. -::: - -## Authentication - -You can choose among the available RabbitMQ Connectors according to your authentication requirements. -First, you must have a user in your RabbitMQ instance with the necessary permissions. See more at the [RabbitMQ access control specification](https://www.rabbitmq.com/access-control.html). - -Next, we will choose the type of connection. - -### URI type connection - -For a URI connection, take the following steps: - -1. Click the **URI** connection type in the **Authentication** section -2. Set **URI** to `URI`. It must contain RabbitMQ username, password, host name, port number, and virtual host. For example, `amqp://userName:password@serverHost:port/virtualHost`; follow the [RabbitMQ URI specification](https://www.rabbitmq.com/uri-spec.html) to learn more. - -### Credentials type connection - -To connect with credentials, take the following steps: - -1. Click the **Username/Password** connection type in the **Authentication** section -2. Set the **Password** to `Password`. - -## Routing data - -In the **Routing** section, you must set the routing data attributes: - -- For a **URI** type connection, the required fields are `exchange` and `routingKey`. -- For a **Credentials** type connection, the required fields are `exchange`, `routingKey`, `virtualHost`, `hostName`, and `port`. - -Refer to the RabbitMQ documentation to learn about routing attributes: - -- [Exchanges, routing keys, and bindings](https://www.cloudamqp.com/blog/part4-rabbitmq-for-beginners-exchanges-routing-keys-bindings.html) -- [Virtual hosts](https://www.rabbitmq.com/vhosts.html) -- [Networking, host, and port configuration](https://www.rabbitmq.com/networking.html) - -## Message - -1. In the **Message** section, insert the message payload. The message can be Text or JSON format. -2. (Optional) In the **Properties** section, insert the message properties in JSON or as a [FEEL](/components/modeler/feel/what-is-feel.md) expression. Go to [RabbitMQ documentation](https://www.rabbitmq.com/publishers.html#message-properties) for learn more about RabbitMQ message properties. - example of message : - -``` -= {"myMessageKey":"Hello Camunda Team"} -``` - -example of properties: - -``` -= { - "contentEncoding":"UTF-8", - "contentType":"text/plain" -} -``` - -## RabbitMQ Connector response - -The **RabbitMQ Connector** returns the `Success` result. -The response contains a `messageId` variable. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). For example: - -``` -= { - "myResultVariable": response.statusResult -} -``` - -## Appendix & FAQ - -### How do I store secrets for my Connector? - -Use Camunda secrets to avoid exposing your credentials. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. - - - - - -:::note -To maintain stable behavior from the RabbitMQ Connector, do not subscribe multiple RabbitMQ Connectors to the same queue. - -Successfully consumed messages are removed from the queue, even if they are not correlated. -::: - -The **RabbitMQ Connector** is an inbound Connector that allows you to connect your BPMN process with [RabbitMQ](https://www.rabbitmq.com/) to receive messages from RabbitMQ. - -## Prerequisites - -To use the **RabbitMQ Connector**, you need to have installed a RabbitMQ server and create the relevant [credentials](https://www.rabbitmq.com/passwords.html). -Use Camunda secrets to store credentials so you do not expose sensitive information directly from the process. See [this appendix entry](#how-do-i-store-secrets-for-my-connector) to learn more. - -## Create a RabbitMQ Connector event - -To use the **RabbitMQ Consumer Connector** in your process, either change the type of existing event by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector event using the **Append Connector** context menu. Follow our [guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Create a RabbitMQ Consumer Connector task - -1. Add a **Start Event** or an **Intermediate Event** to your BPMN diagram to get started. -2. Change its template to a RabbitMQ Connector. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the RabbitMQ consumer. - -## Connecting to RabbitMQ and receiving messages - -### Authentication - -You can choose among the available RabbitMQ Connectors according to your authentication requirements. -First, you must have a user in your RabbitMQ instance with the necessary permissions. See more at the [RabbitMQ access control specification](https://www.rabbitmq.com/access-control.html). - -Next, we will choose the type of connection. - -#### URI type connection - -For a URI connection, take the following steps: - -1. Click the **URI** connection type in the **Authentication** section. -2. Set **URI** to `URI`. It must contain RabbitMQ username, password, host name, port number, and virtual host. For example, `amqp://userName:password@serverHost:port/virtualHost`; follow the [RabbitMQ URI specification](https://www.rabbitmq.com/uri-spec.html) to learn more. - -#### Credentials type connection - -To connect with credentials, take the following steps: - -1. Click the **Username/Password** connection type in the **Authentication** section -2. Set the **Password** to `Password`. - -### Routing data - -- For a **Credentials** type connection, you are required to fill in the `virtualHost`, `hostName`, and `port` fields. -- For a **URI** type connection, these values are already included in the URI, so you don't need to fill them in. - -### Subscription properties - -The **Subscription** section allows you to configure the subscription to the RabbitMQ queue. - -- **Queue name** is a mandatory field that specifies the name of the queue to subscribe to. -- **Consumer tag** is an optional field that specifies the consumer tag to use for the consumer. If not set, the server will generate one. -- **Exclusive consumer** is an optional dropdown field that specifies whether the consumer is exclusive. Exclusivity allows you to ensure only one consumer at a time consumes from the queue. -- **Arguments** is an optional FEEL expression field that specifies the arguments for the queue. The expression must be a [FEEL context expression](/components/modeler/feel/language-guide/feel-context-expressions.md). For example, `={x-message-ttl: 60000}`. See more at the [RabbitMQ queue arguments specification](https://www.rabbitmq.com/queues.html#optional-arguments). - -:::note -When configuring the **Arguments** field, remember that inbound Connectors are executed outside the BPMN process context and are not tied to a specific process instance. -Therefore, you cannot use process variables in the **Arguments** context expression. -However, you can refer to Connector secrets using placeholder syntax. For example, `= {x-consumer-timeout: "{{secrets.CONSUMER_TIMEOUT}}"}`. -::: - -### Activation - -The **Activation** section allows you to configure the custom activation conditions for the RabbitMQ Consumer Connector. - -#### Correlation key - -The correlation key fields are only applicable for the intermediate event **RabbitMQ Connector**. - -When using the **RabbitMQ Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the value contains `message:{body:{correlationKey:myValue}}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=message.body.correlationKey` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -#### Activation condition - -**Activation condition** is an optional FEEL expression field that allows for the fine-tuning of the Connector activation. -For example, given that RabbitMQ message contains the payload `{"role": "USER", "action": "LOGIN""}`, the **Activation Condition** value might look like as `=(message.body.role="USER")`. -This way, the Connector will be triggered only if the message body contains the `role` field with the value `USER`. Leave this field empty to trigger your Connector for every incoming message. - -### Variable mapping - -The **Variable mapping** section allows you to configure the mapping of the RabbitMQ message to the process variables. - -- Use **Result variable** to store the response in a process variable. For example, `myResultVariable`. -- Use **Result expression** to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). For example, given the RabbitMQ Connector is triggered with the message body `{"role": "USER", "action": "LOGIN""}` and you would like to extract the pull request `role` as a process variable `messageRole`, the **Result Expression** might look like this: - -``` -= { - "messageRole": message.body.role -} -``` - -## Appendix & FAQ - -### How do I store secrets for my Connector? - -Use Camunda secrets to avoid exposing your credentials. Follow our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. - -### What is the output format of the RabbitMQ Connector? - -The RabbitMQ Connector returns the following output that can be used in the next steps of your process, including result expressions: - -``` -{ - "message": { - "consumerTag": "myConsumerTag", - "body": { - {{ the message body }} - }, - "properties": { - "contentType": "application/json", - "contentEncoding": "UTF-8", - "headers": { - "x-first": "1", - "x-second": "2" - }, - "deliveryMode": 2, - "priority": 0, - "correlationId": "myCorrelationId", - "replyTo": "myReplyTo", - "expiration": "myExpiration", - "messageId": "myMessageId", - "timestamp": "myTimestamp", - "type": "myType", - "userId": "myUserId", - "appId": "myAppId", - "clusterId": "myClusterId" - } - } -} -``` - -:::note -The output payload contains a top-level `message` object that contains `consumerTag`, `body`, and `properties` fields. -::: - -### How is message body deserialized? - -The RabbitMQ Consumer Connector always tries to deserialize the message body as JSON. If the deserialization fails, the Connector will return the message body as a string. -However, if the body only contains a primitive value, such as a string, a number, or a boolean, the Connector will return the primitive value itself. - -### When is the message acknowledged? What happens if the Connector execution fails? - -The following outcomes are possible: - -- If Connector execution is successful and **Activation condition** was met, the message is acknowledged and removed from the queue. -- If **Activation condition** was not met, the message is rejected and removed from the queue. -- If Connector execution fails due to an unexpected error (e.g. Zeebe is unavailable), the message is rejected and re-queued. - -### What lifecycle does the RabbitMQ Consumer Connector have? - -The RabbitMQ Subscription Connector is a long-running Connector that is activated when the process is deployed and deactivated when the process is un-deployed or overwritten by a new version. - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/salesforce.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/salesforce.md deleted file mode 100644 index 323328f540f..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/salesforce.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -id: salesforce -title: Salesforce Connector -description: Manage your Salesforce Instance from your BPMN process. Learn how to create a Salesforce Connector task, and get started. ---- - -The **Salesforce Connector** is an outbound protocol Connector that allows you to connect your BPMN service with [Salesforce](https://salesforce.com/) to interact with the [Salesforce APIs](https://developer.salesforce.com/docs/apis). - -## Prerequisites - -To use the **Salesforce Connector**, you must have a [Salesforce Connected App with OAuth 2.0 Client Credentials Flow](https://help.salesforce.com/s/articleView?id=sf.connected_app_client_credentials_setup.htm&type=5). - -:::note -Use Camunda secrets to avoid exposing your _Salesforce Connected App_ client ID and client secret as plain text. Learn more in our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md). -::: - -## Create a Salesforce Connector task - -To use the **Salesforce Connector** in your process, either change the type of existing task by clicking on it and using -the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. -Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Instance - -Each operation requires information about the **Salesforce Base URL**. - -Example: `https://MyDomainName.my.salesforce.com` - -The Salesforce API version should be the one you want to use. You can search for this information [in your Salesforce API](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_versions.htm). - -## Authentication - -In the **Authentication** section, select **Bearer Token** to provide a static access token or **OAuth 2.0** to configure client credentials. - -:::note -While the static access token is useful for getting started, we recommend providing the **OAuth 2.0** client credentials. -::: - -## Operation - -### Operation types - -Currently, this Connector supports two types of operation: - -- [SOQL Query](https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm) -- [sObject records](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_list.htm) - -### SOQL Query - -The **SOQL Query** only requires the query itself as input. A query is useful for receiving data based on a structured query language. Take a closer look at some available [examples](https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm). - -The response body looks like the following: - -```json -{ - "totalSize": 1, - "done": true, - "records": [ - { - "attributes": { - "type": "", - "url": "/services/data//sobjects//" - }, - "": "", - "...": "..." - } - ] -} -``` - -### sObject records - -**sObject records** support **Create record**, **Get record**, **Update record**, and **Delete record**. - -:::note -Every operation explanation contains a link to the Salesforce API docs which will explain the request and provide an example. -::: - -#### Create record - -- **Salesforce object:** The Salesforce object to create, e.g. _Account_. -- **Record fields:** Field values for the Salesforce object to create, e.g. `{ Name: "Express Logistics and Transport" }`. - -Review an example including the response body format in the [Salesforce documentation](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_sobject_create.htm). - -#### Get record - -- **Salesforce object:** The Salesforce object to create, e.g. _Account_. -- **Salesforce object ID:** Identifier of the Salesforce object, e.g. _001R0000005hDFYIA2_. -- **Relationship field name _(optional)_:** Name of the field that contains the relationship, e.g. _Opportunities_. -- **Query Parameters _(optional)_:** Additional query parameters that can be provided along with the request, e.g. `{ fields: "AccountNumber,BillingPostalCode" }`. - -When omitting the **Relationship field name**, a [get request for a record](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_retrieve_get.htm) is performed. Otherwise, a [get request for records using sObject relationships](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_relationships_get.htm) is performed. In the documentation linked above, you can find the possible use case for **Query parameters**; for example, [filtering fields](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_get_field_values.htm). - -The response body will contain the requested object as the root object: - -```json -{ - "attributes": { - "type": "", - "url": "/services/data//sobjects//" - }, - "": "", - "...": "..." -} -``` - -Find another example [here](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_get_field_values.htm). - -#### Update object - -- **Salesforce object:** The Salesforce object to create, e.g. _Account_. -- **Salesforce object ID:** Identifier of the Salesforce object, e.g. _001R0000005hDFYIA2_. -- **Record fields:** Field values for the Salesforce object to update, e.g. `{ BillingCity : "San Francisco" }`. - -[These update the record](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_retrieve_patch.htm) using the given fields. - -As an update does not return a body, you will not be able to map any data from the response back to the process. - -#### Delete object - -- **Salesforce object:** The Salesforce object to create, e.g. _Account_. -- **Salesforce object ID:** Identifier of the Salesforce object, e.g. _001R0000005hDFYIA2_. - -[These delete the record](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_retrieve_delete.htm). - -As a delete does not return a body, you will not be able to map any data from the response back to the process. - -## Handle Connector response - -The **Salesforce Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**. Therefore, -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/sendgrid.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/sendgrid.md deleted file mode 100644 index 00ad071a926..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/sendgrid.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -id: sendgrid -title: SendGrid Connector -description: Quickly send emails from your BPMN processes. ---- - -The **SendGrid Connector** is an outbound Connector that allows you to quickly send emails from your BPMN processes. - -## Prerequisites - -To use the SendGrid Connector, a SendGrid API key is needed. Follow [these steps](#appendix) if you do not have a SendGrid account or API key [secret configured](#create-a-new-connector-secret) in your cluster. - -## SendGrid Connector - -The SendGrid Connector comes with two options: - -1. **SendGrid Email Connector** allows sending simple emails (i.e. text/plain, text/html). -2. **SendGrid Email Template Connector** supports [SendGrid Dynamic Templates](https://sendgrid.com/solutions/email-api/dynamic-email-templates/). - -### SendGrid Email Connector - -#### Create a SendGrid Email Connector Task - -To use a **SendGrid Connector** in your process, either change the type of an existing task to **SendGrid: Send Email** using the wrench-shaped **Change type** context menu, or create a new **SendGrid: Send Email** Connector task using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -#### Make your SendGrid Email Connector executable - -To make the **SendGrid Email Connector** executable, you need to fill out all the mandatory fields highlighted in red in the properties panel: - -1. Set **SendGrid API Key** to `{{secrets.SEND_GRID_API_KEY}}`. -2. Set **Sender Name** to `Jane Doe` (or the [sender identity](#create-a-sender-identity) you configured above). -3. Set **Sender Email** to `jane-doe@camunda.com` (or the [sender identity](#create-a-sender-identity) you configured above). -4. Set **Receiver Name** to `Your Name`. -5. Set **Receiver Email** to `Your email address`. -6. Set **Email Content Subject**. -7. Leave **Content Type** to **text/plain** (or alternatively to **text/html** if you intend to provide an HTML body to your email). -8. Provide a text (or HTML) **Body** for your email. - -### SendGrid Email Template Connector - -Send an email via SendGrid Dynamic Template and use the [Handlebars templating language](https://handlebarsjs.com/) to pass dynamic values to your Connector. - -#### Configure a Dynamic Template - -1. Open the [Dynamic Transactional Templates page](https://sendgrid.com/dynamic_templates) and click **Create Template**. -2. Add a unique template name and click **Save**. -3. To begin editing your new template, click **Add Version**. -4. Select an editor and click **Continue**. -5. Design your template. Find more information on using Handlebars [here](https://docs.sendgrid.com/for-developers/sending-email/using-handlebars). - -In our example template, we will use the following subject and body: - -```text -Subject: -Your Camunda Weather Report for {{location}} -``` - -```text -Body: -Hi {{name}}, - -Thanks for using Camunda Connectors to check your current weather report. -Your current weather in Berlin is {{weather}} with {{actual-temp}}°C and feels like {{feel-temp}}°C - -The Camunda Team -``` - -In our example template, we will use the following Handlebars: - -`{{name}}` - The name of the user requesting the weather report - -`{{location}}` - The location used for the weather report - -`{{weather}}` - The current weather condition - -`{{actual-temp}}` - The measured temperature - -`{{feel-temp}}` - How the temperature feels like in reality - -While you are editing your template, you can test how your email would look by switching to **Preview** mode, choosing **{} Show Test Data**, and then providing the necessary data. - -#### Create a SendGrid Email template Connector task - -To use this **SendGrid Connector** in your process, either change the type of an existing task to **SendGrid: Send Email via Template** using the wrench-shaped **Change type** context menu, or create a new **SendGrid: Send Email via Template** Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -#### Make your SendGrid Email Template Connector executable - -To make the **SendGrid Email Template Connector** executable, fill out all the mandatory fields highlighted in red in the properties panel: - -1. Set **SendGrid API Key** to `{{secrets.SEND_GRID_API_KEY}}`. -2. Set **Sender Name** to `Jane Doe` (or the [sender identity](#create-a-sender-identity) you configured above). -3. Set **Sender Email** to `jane-doe@camunda.com` (or the [sender identity](#create-a-sender-identity) you configured above). -4. Set **Receiver Name** to `Your Name`. -5. Set **Receiver Email** to `Your email address`. -6. Log in to your SendGrid account and navigate to [the Dynamic Template you created](#configure-a-dynamic-template). -7. Copy the id of the template and paste it in the **Template ID field**. -8. Provide the test data in the **Template Data** field as a [FEEL context expression](/components/modeler/feel/language-guide/feel-context-expressions.md): - -```text -= { - name: "Jane", - location: "Berlin", - weather: "Clear", - actual-temp: 30, - feel-temp: 3 -} -``` - -If you want to provide dynamic content in the email via process variables, you can set them in the **Template Data** field as well: - -```text -= { - name: nameVariable, - location: locationVariable, - weather: weatherVariable, - actual-temp: temerature, - feel-temp: windChill -} -``` - -## Appendix - -### Create a SendGrid account - -To use the **SendGrid Connector**, create a free account in SendGrid if you do not have one yet: - -1. Go to [https://signup.sendgrid.com/](https://signup.sendgrid.com/). -2. Set up the account with your email and choose a password. -3. Click **Create Account**. -4. Provide further information required by SendGrid. -5. Click **Get Started**. - -### Create a sender identity - -Before sending your first email, you'll need to create a sender identity and verify it. - -1. Click **Settings > Sender Authentication** or click [here](https://app.sendgrid.com/settings/sender_auth). -2. Choose **Verify a Single Sender** for demo purposes (or alternatively **Authenticate Your Domain** for a production setup.) -3. Provide the details requested by SendGrid in the form and click **Create**. -4. Go to your email inbox and open the email sent to you by SendGrid. -5. Click **Verify Single Sender**. - -### Create an API key - -To create an API key in SendGrid, take the following steps: - -1. Log in to your new account. -2. Go to **Settings**. -3. Click **API Keys > Create API Key**. -4. Give your key a name (i.e. `my-camunda-connector-key`). -5. Click **Create Key**. -6. Copy the **API Key** and move on to the next step for creating a Connector secret. - -### Create a new Connector secret - -We advise you to keep your API key safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret `SEND_GRID_API_KEY` so you can reference it later in the Connector. diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/slack.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/slack.md deleted file mode 100644 index 0796a140bcb..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/slack.md +++ /dev/null @@ -1,415 +0,0 @@ ---- -id: slack -title: Slack Connector -description: Send messages to channels or users in your Slack workspace from your BPMN process. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **Slack Connector** is an outbound Connector that allows you to send messages to channels or users in your [Slack](https://slack.com) workspace from your BPMN process. - -## Prerequisites - -To use the Slack Connector, a Slack app must be registered with the Slack workspace you would like to send messages to. A respective OAuth token needs to be configured as a secret in your cluster. Follow [these steps in the appendix](#appendix) to learn how to set this up. - -## Create a Slack Connector task - -To use a **Slack Connector** in your process, either change the type of an existing task by clicking on it and using the wrench-shaped **Change type** context menu, or create a new Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your Slack Connector executable - -To make the **Slack Connector** executable, fill out the mandatory fields highlighted in red in the properties panel. - -### Authentication - -Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, `{{secrets.SLACK_OAUTH_TOKEN}}`. - -### Create channel - -:::info -This API uses the Slack [`conversations.create`](https://api.slack.com/methods/conversations.create) method. -You need to ensure that your Slack application has related permissions enabled. -::: - -To create a channel, take the following steps: - -1. Set **Method** to `Create Channel`. -2. Set the **New Channel Name**: - 1. Channel name can be up to 80 characters and may contain lowercase letters, digits, and symbols `-` and `_`. - 2. This can be provided as a FEEL expression. -3. Set channel **Visibility** as required: - 1. **Public** channels are visible to every workspace member. - 2. **Private** channels are visible to explicitly invited people only. - -### Invite user to channel - -:::info -This API uses the Slack [`conversations.invite`](https://api.slack.com/methods/conversations.invite) method. -You need to ensure that your Slack application has related permissions enabled. -::: - -To invite users to a channel, take the following steps: - -1. Set **Method** to `Invite to Channel`. -2. Set the **Channel Name**: - 1. Channel name can be up to 80 characters and may contain lowercase letters, digits, and symbols `-` and `_`. - 2. This can be provided as a FEEL expression. -3. Set the **Users** as required: - 1. One single user name or email or id (for example: `@myUser` or `my.user@company.com` or `ABCDEF12345`). - 2. A comma separated list of users (for example: `@myUser, my.user@company.com, ABCDEF12345`). - 3. FEEL expression. In this case you can provide a valid list of strings (for example: `["@myUser", "my.user@company.com", "ABCDEF12345"]`). - - Formats: - - If a username starts with an `@` symbol, it will be handled as user name. - - If a username is in an email format, it will be handled as an email. - - If a username doesn't start with an `@`, and isn't an email, it will be handled as a user id. - - If a null input or an input which is not a type of String or a Collection provided, you will get an Exception. - - If all username is provided as any other type than a String, you will get an Exception. - - If one of the usernames is provided as any other type than a String, it will be omitted. - - If you provide a channel name it will be omitted since it is not possible to invite a channel to another channel. - -### Post message - -:::info -This API uses the Slack [`chat.postMessage`](https://api.slack.com/methods/chat.postMessage) method. -You need to ensure that your Slack application has related permissions enabled. -::: - -To post a message, take the following steps: - -1. Set **Method** to `Post Message`. -2. Set **Channel/User Name** to either the **channel** or **user** you want to send the message to. - 1. A **channel** is specified by a unique identifier starting with a `#` (for example, `#myChannel`). - 2. A **user** is specified by a username starting with an `@` symbol (for example, `@myUser`). -3. Select a **Message type**. - 1. When **Plain text** is selected, set **Message** to the message string you would like to send (for example, `Hello World!`). - 2. When **Message block** is selected, set **Message block** to a formatted rich text block format. Learn more about rich text message block format in the [official Slack documentation](https://api.slack.com/reference/surfaces/formatting#stack_of_blocks). - -The **Channel/User Name** and **Message** can either be given [static values](/docs/components/concepts/expressions.md#expressions-vs-static-values), or FEEL expressions. FEEL expressions can be used to [access process variables or dynamically create values](/components/concepts/expressions.md). This can be handy if a process variable is used to store the relevant channel or if the message needs to be composed dynamically, for example: - -`Channel/User Name` property might look like: - -``` -#slack-connectors -``` - -`Message` property: - -``` -= "Order-" + orderId + " was dispatched" -``` - -In the above example, the Channel/User Name is set to the static value "#slack-connectors," which will post the message to the specified Slack channel. The **Message** property uses a FEEL expression to dynamically create the message content. It concatenates the string "Order-" with the value stored in the process variable orderId and adds "was dispatched" to complete the message. This way, the message will vary based on the specific orderId stored during the process execution. - -:::note -Slack's [guidance on formatting](https://api.slack.com/reference/surfaces/formatting#basics) can assist in formatting messages. -::: - -## Slack API response - -The **Slack Connector** exposes the Slack API response as a [local variable](/components/concepts/variables.md#variable-scopes) called `response`. -Response contents are method-specific. - -### Create channel - -The following fields are available in the `response` variable after executing **Create Channel** method: - -- **channel**: - - **id**: channel ID - - **name**: channel name - -Notice that the **name** field can be subsequently used as an argument of **Post Message** method. - -### Post message - -The following fields are available in the `response` variable after executing the **Post Message** method. -Notice that all fields describe state in the Slack workspace: - -- **ts**: timestamp ID -- **channel**: channel ID -- **message**: - - **type**: message type - - **team**: team ID - - **user**: user ID - - **text**: message text - - **ts**: timestamp ID - - **appID**: Slack App ID - - **botID**: Slack Bot ID - -### Output mapping - -You can use an Output Mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. For example: - -``` -= { - messageText: response.message.text -} -``` - -## Appendix - -To use the **Slack Connector**, the following prerequisites need to be set up. - -1. [Slack workspace](#use-a-well-known-slack-workspace) - The workspace the **Slack Connector** will communicate with. -2. [Slack basic app with bot token configured](#configure-a-basic-slack-app) - The **Slack Connector** will communicate through this Slack app with the workspace. You can consider the Slack app as _Slack bot representing the Camunda platform_. -3. [Slack bot token stored as secret](#store-slack-bot-token-as-secret) - The secret will store the Slack bot token and can be used to reference it from BPMN processes without revealing it in the BPMN `xml`. - -### Use a well-known Slack workspace - -A Slack workspace consists of channels in which workspace members can communicate and collaborate. A workspace is identified by a unique name, for example `https://myWorkspace.slack.com/`. In most cases you will know which workspace you want to connect with already. If you want to set up a new workspace, refer to the [official Slack documentation](https://slack.com/help/articles/115001344007-Create-a-workspace-on-Enterprise-Grid). - -### Configure a basic Slack app - -:::caution -You can only install a Slack app to a workspace in which you are a member or that you own. It is not possible if you have guest-only permissions. See the [guide to apps in Slack](https://slack.com/help/articles/360001537467-Guide-to-apps-in-Slack) for more details. -::: - -The **Slack Connector** communicates through a Slack app with a concrete Slack workspace. For example, when sending a Slack message, the message will be posted by the Slack app. For the **Slack Connector** to work, you need to perform the following steps: - -1. [Create a Slack app](https://api.slack.com/apps). -2. [Request required scopes](https://api.slack.com/scopes) - The scopes represent what your app can and cannot do (for example, posting messages). - 1. For the **Create Channel** method to work, you need to grant at least the [`channels:manage`](https://api.slack.com/scopes/channels:manage) scope. - 2. For the **Post Message** method to work, you need to grant at least the [`chat:write`](https://api.slack.com/scopes/chat:write) scope. -3. [Install the Slack app to your workspace](https://api.slack.com/authentication/basics#installing). -4. [Invite the Slack app to your workspace via /invite](https://slack.com/help/articles/201259356-Slash-commands-in-Slack#h_01EPZ2Z81EJ67RA2BGDKZ9M1AN). - -Once the app is set up, copy the [bot token](https://api.slack.com/authentication/token-types) of the app. It is represented as a string and begins with `xoxb-`. This is the OAuth Bearer token, which the **Slack Connector** will use to authenticate with the Slack API. - -### Store Slack bot token as secret - -The **Slack Connector** uses an OAuth bearer token (for example, the Slack app bot token) to authenticate with the Slack API. - -We advise you to keep your Slack bot token safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret `SLACK_OAUTH_TOKEN` so you can reference it later in the Connector. - - - - - -The **Slack inbound Connector** is a Connector that allows you to start or continue -a BPMN process triggered by a [Slack](https://slack.com/) message. - -## Create a Slack inbound Connector task - -1. Start building your BPMN diagram. You can use the **Slack inbound Connector** with either a **Start Event** or **Intermediate Catch Event**. -2. Select the applicable element and change its template to a **Slack Inbound Connector**. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the webhook. -6. Navigate to the **Webhooks** tab in the properties panel to see the webhook URL. - -## Make your Slack inbound Connector for receiving event notifications executable - -1. In the **Webhook Configuration** section, configure the **Webhook ID**. By default, **Webhook ID** is pre-filled with a random value. This value will be a part of the Slack event subscription or slash command URL. -2. In the **Webhook Configuration** section, configure the **Slack signing secret**. This value is unique to your Slack application and used to validate a Slack payload integrity. Read more about signing secrets in the [Slack documentation](https://api.slack.com/authentication/verifying-requests-from-slack). -3. In the **Activation** section, configure **Condition** when the Slack event or command can trigger a new BPMN process. The following example will trigger a new BPMN process for every `app_mention` Slack event type: `=(request.body.event.type = "app_mention")`. -4. In the **Variable mapping** section, fill the field **Result variable** to store the response in a process variable. For example, `myResultVariable`. -5. In the **Variable expression** section, fill the field to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). - The following example will extract both Slack message sender ID and text from Slack `app_mention` event: `={senderId: request.body.event.user, text: request.body.event.text}`. - -When using the **Slack inbound Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the request body contains `"event": {"text": "12345"}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=request.body.event.text` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -## Make your Slack inbound Connector for receiving slash command notifications executable - -1. In the **Webhook Configuration** section, configure the **Webhook ID**. By default, **Webhook ID** is pre-filled with a random value. This value will be a part of the Slack event subscription or slash command URL. -2. In the **Webhook Configuration** section, configure the **Slack signing secret**. This value is unique to your Slack application and used to validate a Slack payload integrity. Read more about signing secrets in the [Slack documentation](https://api.slack.com/authentication/verifying-requests-from-slack). -3. In the **Activation** section, configure **Condition** when the Slack event or command can trigger a new BPMN process. The following example will trigger a new BPMN process for every `/test` Slack command type: `=(connectorData.command = "/test")`. -4. In the **Variable mapping** section, fill the field **Result variable** to store the response in a process variable. For example, `myResultVariable`. -5. In the **Variable expression** section, fill the field to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). - The following example will extract both Slack message sender ID and text from Slack `/test hello` command: `={senderId: connectorData.user_id, text: connectorData.text}`. - -When using the **Slack inbound Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the request body contains `text=hello}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=connectorData.text` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -## Activate the Slack inbound Connector by deploying your diagram - -Once you click the **Deploy** button, your **Slack inbound Connector** will be activated and publicly available. - -URLs of the exposed **Slack inbound Connector** adhere to the following pattern: - -`https:///inbound/` - -- `` is the URL of Connectors component deployment. When using the Camunda 8 SaaS offering, this will typically contain your cluster region and cluster ID. -- `` is the ID (path) you configured in the properties of your **Slack inbound Connector**. - -If you make changes to your **Slack Inbound Connector** configuration, you need to redeploy the BPMN diagram for the changes to take effect. - -When you click on the event with **Slack inbound Connector** applied to it, a new **Webhooks** tab will appear in the properties panel. This tab displays the URL of the **Slack inbound Connector** for every cluster where you have deployed your BPMN diagram. - -:::note -The **Webhooks** tab is only supported in Web Modeler as part of the Camunda 8 SaaS offering. -You can still use Slack inbound Connectors in Desktop Modeler, or with your Camunda 8 Self-Managed. -In that case, Slack inbound Connector deployments and URLs will not be displayed in Modeler. -::: - -## Wiring with Slack - -### Events API - -This is a simplified guide. For full guide, refer to the [official Slack documentation](https://api.slack.com/apis/connections/events-api). - -1. Make sure you have sufficient permissions to modify your Slack application. -2. Open [Slack API portal](https://api.slack.com) and select your Slack application. -3. Navigate to the **Event Subscription** page. -4. Click **Enable Events**. -5. In the **Request URL** field, put the webhook URL. You can find it at the **Webhook** tab in the properties panel of you BPMN diagram. -6. Make sure that the **Request URL** indicates that endpoint is **Verified**. This process may take several seconds. -7. Click **Subscribe to bot events**. -8. Select all events you wish to receive. **Note:** some messages may produce several events. For example, a message `@YourBot test` will generate both `app-mention` and `message` events. -9. Click **Save** to apply new changes. -10. Install or re-install your app into your workspace. - -### Slash commands - -This is a simplified guide. For a full guide, refer to the [official Slack documentation](https://api.slack.com/interactivity/slash-commands). - -1. Make sure you have sufficient permissions to modify your Slack application. -2. Open [Slack API portal](https://api.slack.com) and select your Slack application. -3. Navigate to **Slash Commands**. -4. Click **Create New Command**. -5. Fill the fields **Command**, **Short Description**, and **Usage Hint** as you prefer. -6. In the **Request URL** field, put the webhook URL. You can find it at the **Webhook** tab in the properties panel of your BPMN diagram. -7. Click **Save** to apply new changes. - -## Security considerations - -### Integrity - -Each Slack message is signed with HMAC using a Slack signing key. The **Slack inbound Connector** verifies HMAC integrity -for every incoming request. Read more about signing secrets in the -[Slack documentation](https://api.slack.com/authentication/verifying-requests-from-slack). - -## Appendix - -### Slack `app_mention` event example - -``` -POST https:///inbound/ -connection: close -content-type: application/json -content-length: 429 -x-slack-request-timestamp: 1687791117 -x-slack-signature: v0=aaaaaaaabbbbbbbbcccccccddddddeeeeeeffffffff -accept: application/json,*/* -accept-encoding: gzip,deflate -user-agent: Slackbot 1.0 (+https://api.slack.com/robots) -host: -{ - "token": "XXXXXXXX", - "team_id": "XXXXXXXX", - "api_app_id": "XXXXXXXX", - "event": { - "client_msg_id": "ffb7ded2-6f55-468d-926f-cad3195c8056", - "type": "app_mention", - "text": "<@XXXXXXXX> say hello", - "user": "XXXXXXXX", - "ts": "11111111.2222222", - "blocks": [ - { - "type": "rich_text", - "block_id": "rarsi", - "elements": [ - { - "type": "rich_text_section", - "elements": [ - { - "type": "user", - "user_id": "XXXXXXXX" - }, - { - "type": "text", - "text": " say hello" - } - ] - } - ] - } - ], - "team": "XXXXXXXX", - "thread_ts": "1687864866.335329", - "parent_user_id": "XXXXXXXX", - "channel": "XXXXXXXX", - "event_ts": "1687866358.496959" - }, - "type": "event_callback", - "event_id": "XXXXXXXX", - "event_time": 1687866358, - "authorizations": [ - { - "enterprise_id": null, - "team_id": "XXXXXXXX", - "user_id": "XXXXXXXX", - "is_bot": true, - "is_enterprise_install": false - } - ], - "is_ext_shared_channel": false, - "event_context": "XXXXXXXX" -} -``` - -### Slack slash command example - -Given the following command is executed: `/test123 test`. - -``` -POST https:///inbound/ -connection: close -content-type: application/x-www-form-urlencoded -content-length: 429 -x-slack-request-timestamp: 1687792480 -x-slack-signature: v0=aaaaaaaabbbbbbbbcccccccddddddeeeeeeffffffff -accept: application/json,*/* -accept-encoding: gzip,deflate -user-agent: Slackbot 1.0 (+https://api.slack.com/robots) -host: -token=qQqQqQqQqQqQqQqQqQ -&team_id=T05ABCDEFG -&team_domain=yourdomain -&channel_id=C05QQQQQQ -&channel_name=channel1 -&user_id=U05AAAAAAA -&user_name=your.user -&command=%2Ftest123 -&text=test -&api_app_id=A05DDDDDDD -&is_enterprise_install=false -&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FXXXXXXXXX%2FYYYYYYYYYYY%2FZZZZZZZZ -&trigger_id=111111111.222222222.33333333 -``` - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/twilio.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/twilio.md deleted file mode 100644 index 75a7c8af987..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/twilio.md +++ /dev/null @@ -1,330 +0,0 @@ ---- -id: twilio -title: Twilio Connector -sidebar_label: Twilio Connector -description: Integrate your BPMN service with Twilio's messaging API to send SMS messages, get messages, and more. ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -The **Twilio Connector** allows you to integrate your BPMN service with Twilio's messaging API. With this Connector, you can send SMS messages, get messages, and more. This documentation will guide you through the process of setting up and using the **Twilio Connector**. - -## Prerequisites - -Before you can use the Twilio Connector, create a Twilio account and obtain an account SID and auth token from the [Twilio Console](https://www.twilio.com/console). You will also need to have a phone number to use as the sender for your SMS messages. - -:::note -Use Camunda secrets to store your account SID and auth token so you don't expose sensitive information directly from the process. See [managing secrets](https://docs.camunda.org/manual/latest/user-guide/process-engine/secrets/) to learn more. -::: - -## Create a Twilio Connector task - -To use the Twilio Connector in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. Follow our [guide to using Connectors](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/) to learn more. - -## Make your Twilio Connector executable - -To work with the Twilio Connector, choose the required operation type in the **Operation** section and complete the mandatory fields highlighted in red in the Connector properties panel. - -### Operation - -Choose an operation type of either sendSms, listMessages, or getMessage in the **Operation** section: - -- **Send SMS**: Send an SMS message to a specified phone number from your Twilio account. -- **List messages**: Retrieve a list of messages sent from your Twilio account within specified filters. -- **Get message**: Retrieve the details of a specific message sent from your Twilio account. - -## Authentication - -To access the Twilio API, the Connector needs the appropriate credentials. The following authentication options are available: - -- **Account SID**: Provide the Account SID for your Twilio account. -- **Auth Token**: Provide the Auth Token for your Twilio account. - -**OR** - -- **API Key**: Provide the API Key for your Twilio account. -- **API Secret**: Provide the API Secret for your Twilio account. - -The Account SID and Auth Token or API Key and API secret are required properties and must be provided to use the Connector. If these properties are not set, the Connector will not be able to authenticate with the Twilio API. - -For more information on authentication and security in Twilio, refer to the [Twilio documentation](https://www.twilio.com/docs/usage/security). - -## Required fields - -### Send SMS operation - -- `Body`: The content of the SMS message. -- `To number`: The phone number that you want to send the SMS message to. -- `From number`: The phone number to use as the sender of the SMS message. - -:::note -See the [Twilio documentation](https://www.twilio.com/docs/sms/send-messages) for more details. -::: - -### List messages operation - -- `Date sent after`: (Optional) The date and time to start retrieving messages from. Messages sent on or after this date and time will be included in the results. The date and time should be in ISO 8601 format, such as `2023-04-19T08:30:00Z`. -- `Date sent before`: (Optional) The date and time to stop retrieving messages at. Messages sent before this date and time will be included in the results. The date and time should be in ISO 8601 format, such as `2023-04-19T08:30:00Z`. -- `From`: (Optional) The phone number that the message was sent from. Only messages sent from this phone number will be included in the results. -- `To`: (Optional) The phone number that the message was sent to. Only messages sent to this phone number will be included in the results. -- `Page size`: (Optional) The maximum number of messages to retrieve per page. This value must be between 1 and 1000. - -:::note -See the Twilio documentation on [filtering by date sent](https://www.twilio.com/docs/sms/api/message-resource?code-sample=code-read-list-messages-filter-by-before-sent-date&code-language=curl&code-sdk-version=json) and [getting filters](https://www.twilio.com/docs/sms/api/message-resource?code-sample=code-read-list-messages-matching-filter-criteria&code-language=curl&code-sdk-version=json) for more information. -::: - -### getMessage operation - -- `Message SID`: The SID of the message you want to retrieve. See the [Twilio documentation](https://www.twilio.com/docs/sms/api/message-resource?code-sample=code-fetch-message&code-language=curl&code-sdk-version=json) for more details. - -## Handle Connector response - -The **Twilio Connector** is a protocol Connector built on top of the HTTP REST Connector. Therefore, handling the response is still applicable and can be done as described in the [HTTP REST Connector response documentation](/components/connectors/protocol/rest.md#response). - -When using the **Twilio connector**, the response from the Twilio API will be available in a temporary local response variable. This variable can be mapped to the process by specifying the Result Variable. - -For example, if you use the **Send SMS Message** method in the Twilio Connector, the response may look like this: - -```json -{ - "status": 201, - "headers": { - "content-type": "application/json" - }, - "response": { - "sid": "SM1234567890", - "date_created": "2023-04-18T15:30:00Z", - "date_updated": "2023-04-18T15:30:00Z", - "date_sent": null, - "account_sid": "AC1234567890", - "from": "+1234567890", - "to": "+0987654321", - "body": "Hello, World!", - "status": "queued", - "num_segments": "1", - "direction": "outbound-api", - "api_version": "2010-04-01", - "price": null, - "price_unit": "USD", - "error_code": null, - "error_message": null, - "uri": "/2010-04-01/Accounts/AC1234567890/Messages/SM1234567890.json", - "subresource_uris": { - "media": "/2010-04-01/Accounts/AC1234567890/Messages/SM1234567890/Media.json" - } - } -} -``` - -In this example, the response variable contains an SID attribute that uniquely identifies the message that was sent. - -You can choose to unpack the content of your response into multiple process variables using the **Result Expression**, which is a FEEL Context Expression. - -The Result Expression allows you to access specific attributes from the response and assign them to process variables that can be used in subsequent steps of your process. - -```feel -= { - sid: response.body.sid, - date_created: response.body.date_created, - from: response.body.from, - to: response.body.to, - body: response.body.body -} -``` - -In this example, we are using the Result Expression to extract the `sid`, `date_created`, `from`, `to`, and `body` attributes from the response variable and assign them to process variables with the same name. You can then use these variables in subsequent steps of your process. - -:::note -The syntax for accessing attributes in the Result Expression may vary depending on the structure of your response object. You can refer to the [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md) documentation for more information on how to use the **Result Expression**. -::: - -## Troubleshooting - -If you are having issues with the Twilio Connector, try the following: - -- Ensure your Twilio credentials are correct. -- Ensure you have set up your Twilio account and have a valid phone number. -- Ensure your configuration properties are set correctly. -- Check the logs for any error messages. -- Contact [Camunda support](https://camunda.com/services/support/) if you need further assistance. - -For more information on using Twilio, visit the [official documentation](https://www.twilio.com/docs). - -## Using Twilio Connector Best Practices - -When using the Twilio Connector in a BPMN process, it is important to keep in mind that there may be delays in message delivery or processing, and that some messages may fail to be delivered due to various reasons such as invalid phone numbers, network issues, etc. To ensure that messages are sent and delivered reliably, it is recommended to build your BPMN diagram to handle retries and error scenarios. - -One way to achieve this is by using an intermediate timer event to trigger a retry after a certain amount of time has elapsed, or by using an error boundary event to catch and handle errors in the process. - -:::note -To avoid performance issues, it is recommended to limit the number of retries and to implement proper error handling mechanisms in your process. -::: - -To learn more about implementing retry and error handling logic in your BPMN diagram, you can refer to the [Camunda BPMN examples](https://camunda.com/bpmn/examples/) page, which includes examples of BPMN diagrams with timer and error configurations. - - - - - -The **Twilio Webhook Connector** is an inbound Connector that enables you to start a BPMN process instance triggered by a [Twilio event](https://www.twilio.com/docs/usage/webhooks). - -## Create a Twilio Webhook Connector task - -1. Start building your BPMN diagram. You can use the **Twilio Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. -2. Select the applicable element and change its template to a **Twilio Webhook Connector**. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the webhook. -6. Navigate to the **Webhooks** tab in the properties panel to see the webhook URL. -7. Run the process if you use the **Twilio Webhook Intermediate Catch Event Connector**, and only deploy the process if the diagram starts from the **Start Event**. - -## Make your Twilio Webhook Connector for receiving messages executable - -### Fill properties in the Webhook Configuration section - -1. Choose one of the required methods in the **Webhook method** property. For example, if you know the webhook will be triggered by the **POST** method, choose **POST**. Alternatively, if it is not essential to specify a specific method for the webhook trigger, select **ANY**. -2. Configure the **Webhook ID**. By default, the **Webhook ID** is pre-filled with a random value. This value will be part of the Webhook URL. For more details about Twilio Webhook URLs, refer to the section below on [activating the Twilio Webhook Connector by deploying your diagram](#activate-the-twilio-webhook-connector-by-deploying-your-diagram). -3. Select **Enabled** in **HMAC authentication** if you want to use HMAC authentication. After that, set the [Twilio Auth Token](https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them) as the shared secret key in the **HMAC secret key** field property. - -:::note -Use Camunda secrets to store your credentials securely. Refer to the [Camunda secrets documentation](/components/console/manage-clusters/manage-secrets.md) for more details. -::: - -### Fill properties in the **Activation** section - -1. (Optional) Configure the **Activation Condition**. For example, if an external message has the body: - - ``` - { - "body": { - "ApiVersion": "2010-04-01", - "FromCountry": "EU", - "Body": "Hello world", - "SmsStatus": "received" - ... - } - ... - } - ``` - - the **Activation Condition** value might look like this: - - ``` - =(request.body.SmsStatus="received") - ``` - - Leave this field empty to receive all messages every time. - -2. When using the **Twilio Webhook Connector** with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime, and the result is used to correlate the message. - -For example, if your correlation key is defined with a process variable named `myCorrelationKey`, and you want to correlate by the `Body` property in the request body, which contains: - -``` -{ - "body": { - "ApiVersion": "2010-04-01", - "FromCountry": "EU", - "Body": "Continue process", - "SmsStatus": "received" - ... - } - ... -} -``` - -your correlation key settings will look like this: - -- **Correlation key (process)**: `=myCorrelationKey` -- **Correlation key (payload)**: `=request.body.Body` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -## Activate the Twilio Webhook Connector by deploying your diagram - -Once you click the **Deploy** button, your Twilio Webhook will be activated and publicly available. - -The URLs of the exposed Twilio Webhooks adhere to the following pattern: - -`http(s):///inbound/` - -- `` is the URL of the Connectors component deployment. When using the Camunda 8 SaaS offering, this will typically contain your cluster region and cluster ID. -- `` is the ID (path) you configured in the properties of your Twilio Webhook Connector. - -:::note -If you make changes to your Twilio Webhook Connector configuration, you need to redeploy the BPMN diagram for the changes to take effect. -::: - -When you click on the event with the Twilio Webhook Connector applied to it, a new **Webhooks** tab will appear in the properties panel. -This tab displays the URL of the Twilio Webhook Connector for every cluster where you have deployed your BPMN diagram. - -:::note -The **Webhooks** tab is only supported in Web Modeler as part of the Camunda 8 SaaS offering. -You can still use the Twilio Webhook Connector in the Desktop Modeler or with Camunda 8 Self-Managed. -In that case, Twilio Webhook Connector deployments and URLs will not be displayed in the Modeler. -::: - -## Variable mapping - -The **Variable mapping** section allows you to configure the mapping of the webhook request to the process variables. - -- Use the **Result variable** to store the response in a process variable. For example, `myResultVariable`. -- Use the **Result expression** to map specific fields from the response into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). For example, given that the **Twilio Webhook Connector** is triggered with the webhook: - - ``` - { - "body": { - "ApiVersion": "2010-04-01", - "FromCountry": "EU", - "Body": "Hello world", - "SmsStatus": "received" - ... - } - ... - } - ``` - - and you would like to extract the `SmsStatus` as a process variable `mySmsStatus`, the **Result Expression** might look like this: - - ``` - = { - mySmsStatus: request.body.SmsStatus - } - ``` - -## Configure your Twilio account - -To set a webhook URL in Twilio for SMS, follow these steps: - -1. Log in to your Twilio account at [www.twilio.com/console](https://www.twilio.com/console). -2. Navigate to the **Phone Numbers** section, which you can find in the left-hand side menu. -3. Click on the phone number for which you want to set the webhook URL. -4. Scroll down to the **Messaging** section and locate the **A message comes in** field. -5. In the input box next to **A message comes in**, enter the URL where you want Twilio to send incoming SMS messages and choose the required method. -6. Save your changes. - -Once you have set the webhook URL, Twilio will send a `POST` or `GET` request to that URL whenever an incoming SMS message is received on the specified phone number. - -## Next steps - -- Learn more about [Twilio webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -- Read the [Twilio webhooks FAQ](https://www.twilio.com/docs/usage/webhooks/webhooks-faq). -- Understand [Twilio webhooks security](https://www.twilio.com/docs/usage/webhooks/webhooks-security). -- Learn about [other Connectors available](./available-connectors-overview.md) in Camunda to integrate with different systems and services. -- Learn more about using Connectors [here](../use-connectors/index.md). -- Learn more about inbound Connectors [here](../use-connectors/inbound.md). - - - - diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/uipath.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/uipath.md deleted file mode 100644 index d62a9feba25..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/uipath.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -id: uipath -title: UiPath Connector -description: Orchestrate your UiPath Bots with Camunda to create new queue items and get the result from it. ---- - -The **UiPath Connector** allows you to orchestrate a UiPath bot from your BPMN process with [UiPath](https://cloud.uipath.com). - -## Prerequisites - -To use the **UiPath Connector**, you need to have a [UiPath](https://cloud.uipath.com) account and configure your organization settings. See the [automation cloud guide](https://docs.uipath.com/automation-cloud/docs/introduction) to learn more. - -## Create a UiPath Connector task - -To use a **UiPath Connector** in your process, either change the type of existing task using the wrench-shaped **Change type** context menu, or create a new Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Operation types - -The UiPath Connector currently supports two operation types in the **Operation type** dropdown list: _Add queue item_ and _Get queue item result by ID_. - -### Authentication - -You can choose among the available UiPath Connector authentication types according to your authentication requirements. - -### UiPath Connector (bearer token) - -#### Create a new Connector secret - -We advise you to keep your **Bearer Token** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `BEARER_TOKEN_UIPATH`) so you can reference it later in the Connector. - -#### Configure the bearer token - -Select the **UiPath Connector** and fill out the following properties under the **Authentication** section: - -1. Click **Bearer Token** in the **Authentication** section. -2. Set **Bearer** to the secret you created (i.e. `{{secrets.UIPATH_BEARER_TOKEN}}`). - -### UiPath Connector (OAuth token) - -#### Create a new Connector secret - -We advise you to keep your **Client ID** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `UIPATH_CLIENT_ID`) so you can reference it later in the Connector. - -#### Configure the OAuth Token - -Select the **UiPath Connector** and fill out the following properties under the **Authentication** section: - -1. Click **OAuth 2.0** in the **Authentication** section. -2. Set **Client ID** to the secret you created (i.e. `{{secrets.UIPATH_CLIENT_ID}}`). -3. Set **Client secret** to the secret you created (i.e. `{{secrets.UIPATH_CLIENT_SECRET}}`). -4. Choose **Client Authentication** from the dropdown menu (i.e. `Send client credentials in body`). - -Find more information about the OAuth client credentials flow in the [RFC reference](https://www.rfc-editor.org/rfc/rfc6749#section-4.4). - -### Add queue item - -This operation allows you to create a new item and add it to a queue from UiPath Orchestrator. To execute it, take the following steps: - -1. Select the operation **Add queue item** from the **Operation type** dropdown list. -2. Configure authentication as described in the [authentication](#authentication) section. -3. Fill out the input fields as described in the [configuration](#configuration) section. -4. Fill out the input fields as described in the [input](#input) section. -5. Fill out the response mapping as described in the [add queue item response](#add-queue-item-response) section. - -#### Configuration - -For this section, you must fill out the following fields: - -1. **Cloud URL**: Comes with a default value of `cloud.uipath.com`. You can always change it, if needed. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. -2. **Cloud organization**: The name of your organization. See [about organizations](https://docs.uipath.com/automation-cloud/docs/about-organizations) to learn more. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. -3. **Cloud tenant**: The name of the tenant. See [about tenants](https://docs.uipath.com/automation-cloud/docs/about-tenants) to learn more. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. -4. **Organization Unit ID**: Click **Orchestrator** and you will find the id in the URL. For example, `https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/?tid=26929&fid=112233` where the **Organization Unit ID** is `112233`. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. - -#### Input - -For this section, fill out the following fields: - -1. **Queue Name**: The queue where the QueueItem object is to be added. Check [queues and transactions](https://docs.uipath.com/orchestrator/docs/about-queues-and-transactions) to learn more. -2. _(Optional)_ **Defer date**: The earliest date and time at which the item is available for processing. If empty, the item can be processed as soon as possible. Expected date format is `yyyy-MM-dd`. -3. _(Optional)_ **Due date**: The latest date and time at which the item should be processed. If empty, the item can be processed at any given time. Expected date format is `yyyy-MM-dd`. -4. _(Optional)_ **Priority**: Select a value from the dropdown list to represent the priority level of the queue item to be added. This property is a criterion for the prioritization of queue items, alongside **Deadline** and **Postpone**. -5. _(Optional)_ **Specific Content for UiPath Job**: Data that will be passed in to the job. This should be in JSON format. - -``` -= { - "Name":"testItemName", - "Value":"testItemValue" - } -``` - -6. _(Optional)_ **Reference**: A string reference for the queue item. - -#### Add queue item response - -The operation **Add Queue Item** returns information about the newly created item in the queue. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. It comes with a pre-filled value of `= {itemId: response.body.Id}`. To use operation _Get queue item result by ID_, you need an `itemId`. This expression will add it in the context for you. Learn more in [get queue item result by ID](#get-queue-item-result-by-id). - -Response example: - -``` -= { - "status":201, - "headers":{ - "date":"Fri, 20 Jan 2023 10:13:20 GMT", - "content-length":878, - "server":"cloudflare", - "expires":"-1", - "cf-ray":"78c70973ce68153b-CDG", - "api-supported-versions":"15.0", - "x-frame-options":"Deny", - "x-download-options":"noopen", - "x-correlation-id":"7a211afe-53f1-4225-b77c-0fa477912685", - "cf-cache-status":"DYNAMIC", - "x-uipath-correlation-id":"undefined", - "pragma":"no-cache", - "strict-transport-security":"max-age=15724800; includeSubDomains", - "request-context":"appId=cid-v1:354c7cb9-ae5a-4d16-84a7-f13242bbac6d", - "content-security-policy":"default-src 'self';script-src 'self' https://orch-cdn.uipath.com https://use.typekit.net/ https://d2c7xlmseob604.cloudfront.net https://platform-cdn.uipath.com https://*.uipath.com https://*.pendo.io;style-src 'self' 'unsafe-inline' https://orch-cdn.uipath.com https://fonts.googleapis.com/css https://use.typekit.net https://p.typekit.net/ https://platform-cdn.uipath.com https://content.usage.uipath.com;img-src 'self' data: https://orch-cdn.uipath.com https://s.gravatar.com https://secure.gravatar.com https://*.wp.com https://*.googleusercontent.com https://i.ytimg.com https://platform-cdn.uipath.com https://*.pendo.io https://*.blob.core.windows.net https://*.amazonaws.com blob:;frame-src 'self' https://*.uipath.com https://*.pendo.io;font-src 'self' https://orch-cdn.uipath.com https://use.typekit.net/ https://fonts.gstatic.com https://platform-cdn.uipath.com data:;connect-src 'self' wss: https://orch-cdn.uipath.com https://primer.typekit.net https://use.typekit.net/ https://sentry.io https://studio-feedback.azure-api.net https://app.launchdarkly.com https://clientstream.launchdarkly.com https://events.launchdarkly.com https://api.smartling.com https://platform-cdn.uipath.com https://*.service.signalr.net https://*.uipath.com https://*.pendo.io https://cloud.uipath.com https://storage.googleapis.com https://*.blob.core.windows.net https://*.amazonaws.com dc.services.visualstudio.com;worker-src 'self' blob:", - "x-xss-protection":"1", - "x-content-type-options":"nosniff", - "x-robots-tag":"noindex,nofollow", - "content-type":"application/json; odata.metadata=minimal; odata.streaming=true", - "location":"https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/odata/QueueItems(436141352)", - "connection":"keep-alive", - "cache-control":"no-cache, no-store, must-revalidate", - "odata-version":"4.0" - }, - "body":{ - "@odata.context":"https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/odata/$metadata#QueueItems/$entity", - "QueueDefinitionId":165001, - "Encrypted":false, - "Status":"New", - "ReviewStatus":"None", - "Key":"2196eb07-c96a-4f47-a734-326dd5d58a9d", - "Reference":"test", - "Priority":"Low", - "DeferDate":"2023-01-12T00:00:00Z", - "SecondsInPreviousAttempts":0, - "RetryNumber":0, - "SpecificData":"{\"DynamicProperties\":{\"test\":\"test\"}}", - "CreationTime":"2023-01-20T10:13:20.6603953Z", - "RowVersion":"AAAAAE2f4GY=", - "OrganizationUnitId":112233, - "Id":436141352, - "SpecificContent":{ - "test":"test" - } - } -} -``` - -### Get queue item result by ID - -This operation allows you get an item from your UiPath Orchestrator. To execute it, take the following steps: - -1. Select the operation **Get Queue Item result by ID** from the dropdown list **Operation type**. -2. Configure authentication as described in the [authentication](#authentication) section. -3. Fill out the **Item ID** field. This field supports FEEL, so you're able to fetch an item ID from the process context; for example, if you exported it while [adding a new queue item](#add-queue-item). - -#### Get queue item result by ID response - -Given you have a queue item ID previously added to a queue, the operation **Get queue item result by ID** returns information about a certain item. - -You can use an output mapping to map the response: - -1. Use **Result Variable** to store the response in a process variable. For example, `myResultVariable`. -2. Use **Result Expression** to map fields from the response into process variables. It comes with a pre-filled value of `= {itemStatus: response.body.value[1].Status}`. You will see the `itemStatus` in the process variables. Its value will let you know if the item was processed or not. - -Response example: - -``` -{ - "status":200, - "headers":{ - "date":"Fri, 20 Jan 2023 10:13:21 GMT", - "server":"cloudflare", - "expires":"-1", - "transfer-encoding":"chunked", - "cf-ray":"78c709774a112a34-CDG", - "api-supported-versions":"15.0", - "x-frame-options":"Deny", - "x-download-options":"noopen", - "x-correlation-id":"8db50244-5f55-4598-82d3-1d6a00f806b0", - "cf-cache-status":"DYNAMIC", - "x-uipath-correlation-id":"undefined", - "pragma":"no-cache", - "strict-transport-security":"max-age=15724800; includeSubDomains", - "request-context":"appId=cid-v1:354c7cb9-ae5a-4d16-84a7-f13242bbac6d", - "content-security-policy":"default-src 'self';script-src 'self' https://orch-cdn.uipath.com https://use.typekit.net/ https://d2c7xlmseob604.cloudfront.net https://platform-cdn.uipath.com https://*.uipath.com https://*.pendo.io;style-src 'self' 'unsafe-inline' https://orch-cdn.uipath.com https://fonts.googleapis.com/css https://use.typekit.net https://p.typekit.net/ https://platform-cdn.uipath.com https://content.usage.uipath.com;img-src 'self' data: https://orch-cdn.uipath.com https://s.gravatar.com https://secure.gravatar.com https://*.wp.com https://*.googleusercontent.com https://i.ytimg.com https://platform-cdn.uipath.com https://*.pendo.io https://*.blob.core.windows.net https://*.amazonaws.com blob:;frame-src 'self' https://*.uipath.com https://*.pendo.io;font-src 'self' https://orch-cdn.uipath.com https://use.typekit.net/ https://fonts.gstatic.com https://platform-cdn.uipath.com data:;connect-src 'self' wss: https://orch-cdn.uipath.com https://primer.typekit.net https://use.typekit.net/ https://sentry.io https://studio-feedback.azure-api.net https://app.launchdarkly.com https://clientstream.launchdarkly.com https://events.launchdarkly.com https://api.smartling.com https://platform-cdn.uipath.com https://*.service.signalr.net https://*.uipath.com https://*.pendo.io https://cloud.uipath.com https://storage.googleapis.com https://*.blob.core.windows.net https://*.amazonaws.com dc.services.visualstudio.com;worker-src 'self' blob:", - "x-xss-protection":"1", - "x-content-type-options":"nosniff", - "x-robots-tag":"noindex,nofollow", - "content-type":"application/json; odata.metadata=minimal; odata.streaming=true", - "connection":"keep-alive", - "cache-control":"no-cache, no-store, must-revalidate", - "odata-version":"4.0" - }, - "body":{ - "@odata.context":"https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/odata/$metadata#QueueItems", - "@odata.count":1, - "value":[ - { - "QueueDefinitionId":165001, - "Encrypted":false, - "Status":"New", - "ReviewStatus":"None", - "Key":"2196eb07-c96a-4f47-a734-326dd5d58a9d", - "Reference":"test", - "Priority":"Low", - "DeferDate":"2023-01-12T00:00:00Z", - "SecondsInPreviousAttempts":0, - "RetryNumber":0, - "SpecificData":"{\"DynamicProperties\":{\"test\":\"test\"}}", - "CreationTime":"2023-01-20T10:13:20.66Z", - "RowVersion":"AAAAAE2f4GY=", - "OrganizationUnitId":1964413, - "OrganizationUnitFullyQualifiedName":"MyCorporateWorkspace", - "Id":436141352, - "SpecificContent":{ - "test":"test" - } - } - ] - } -} -``` - -## Appendix - -### Using UiPath Connector best practice - -There is no guarantee a queue item will be processed right away. In that case, we suggest building your BPMN diagram to periodically retry polling. -To learn more, see an entry _Solution with Timer and Loop_ at [Camunda BPMN examples](https://camunda.com/bpmn/examples/) page. - -:::note -To avoid performance issues, it is recommended to limit the number of loop retries. -::: diff --git a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/whatsapp.md b/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/whatsapp.md deleted file mode 100644 index ecab54ff850..00000000000 --- a/versioned_docs/version-8.2/components/connectors/out-of-the-box-connectors/whatsapp.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: whatsapp -title: WhatsApp Connector -sidebar_label: WhatsApp Connector -description: Send messages with [WhatsApp Business](https://business.whatsapp.com/) from your BPMN process. ---- - -The **WhatsApp Connector** is an outbound Connector that allows you to send messages to users from your BPMN process. - -## Prerequisites - -To start using the **WhatsApp Connector**, you must have an approved Meta WhatsApp application; follow the [official guide](https://developers.facebook.com/docs/whatsapp/cloud-api/get-started) to obtain one. - -:::note -WhatsApp webhooks are currently not supported by Camunda. -::: - -## Create a WhatsApp Connector task - -To use the **WhatsApp Connector** in your process, either change the type of existing task by clicking on it and using -the wrench-shaped **Change type** context menu icon, or create a new Connector task by using the **Append Connector** context menu. -Follow our [guide to using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Authentication - -The **WhatsApp Connector** supports authentication through Meta access tokens. Take a look at [this blog post](https://developers.facebook.com/blog/post/2022/12/05/auth-tokens/) to learn more on how to obtain one for yourself. - -Once the token is obtained, put it in the **Access token** field of the **Authentication** section. - -:::note -Use Camunda secrets to avoid exposing your WhatsApp access token credentials as plain text. -See our documentation on [managing secrets](/components/console/manage-clusters/manage-secrets.md) to learn more. -::: - -## Sender and recipient - -Your WhatsApp application can have multiple phone numbers registered. Set your phone number ID in the **Sender phone number ID** field -of the **Payload** section. You can find the phone number ID at the Meta developer portal. - -In the **Recipient phone number** field, enter a phone number you wish to send message to. - -## Select operation to execute - -You can select one of the following operations from the **Message type** dropdown. - -### Plain text - -When this option is selected, write any arbitrary text in the **Message text** field. This message will be sent to the target recipient. - -### Message template - -When this option is selected, it is implied that you already have an approved WhatsApp message template. -Read more bout message templates at the [official page](https://developers.facebook.com/docs/whatsapp/message-templates/guidelines/). - -1. In the field **Template name**, set the name of your WhatsApp template. For example, **my_delivery_scheduled_template**. -2. In the field **Template language code**, specify the language code of your template. For example, **en_US**. -3. In the field **Header variables**, set the values for your variables only if the header has any. For example, `{"type": "text","text": "My header param"}`. -4. In the field **Body variables**, set the values for your variables only if the body has any. For example, `{"type": "text","text": "My body param"}`. - -See the [official Meta guide](https://developers.facebook.com/docs/whatsapp/cloud-api/guides/send-message-templates/) for more information and examples. - -## Handle Connector response - -The **WhatsApp Connector** is a protocol Connector, meaning it is built on top of the **HTTP REST Connector**. Therefore, -handling response is still applicable [as described](/components/connectors/protocol/rest.md#response). diff --git a/versioned_docs/version-8.2/components/connectors/protocol/graphql.md b/versioned_docs/version-8.2/components/connectors/protocol/graphql.md deleted file mode 100644 index 94ae3d77b5b..00000000000 --- a/versioned_docs/version-8.2/components/connectors/protocol/graphql.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -id: graphql -title: GraphQL Connector -sidebar_label: GraphQL Connector -description: Execute a GraphQL query or mutation from your BPMN process. ---- - -The **GraphQL Connector** is an outbound protocol Connector that allows you to execute a GraphQL query or mutation from your BPMN process. - -## Prerequisites - -The GraphQL Connector allows you to connect to a GraphQL API endpoint. To use the GraphQL Connector, you need to know the GraphQL endpoint URL, authentication, and available API methods. - -## Create a GraphQL Connector task - -To use a **GraphQL Connector** in your process, either change the type of existing task by clicking on it and using the wrench-shaped **Change type** context menu, or create a new Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your GraphQL Connector executable - -To make the **GraphQL Connector** executable, fill out the mandatory fields highlighted in red in the properties panel: - -:::note -All the mandatory and non-mandatory fields are covered in the upcoming sections. Depending on the authentication selection you make, more fields might be required; this is covered in the next section. -::: - -### Authentication - -You can choose among the available authentication types according to your authentication requirements using the **Authentication** section. - -### None - -Click **None** in the **Authentication** section. No extra authentication configuration is required. - -### Basic - -#### Create a new Connector secret - -We advise you to keep your **Password** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `GRAPHQL_PASSWORD`) so you can reference it later in the Connector. - -### Configure Basic Authentication - -Select the **GraphQL Connector** and fill out the following properties under the **Authentication** section: - -1. Click **Basic** in the **Authentication** section. -2. Set **Username** (i.e. `{{secrets.GRAPHQL_USERNAME}}`). -3. Set **Password** to the secret you created (i.e. `{{secrets.GRAPHQL_PASSWORD}}`). - -### Bearer Token - -#### Create a new Connector secret - -We advise you to keep your **Bearer Token** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `GRAPHQL_BEARER_TOKEN`) so you can reference it later in the Connector. - -#### Configure the Bearer Token - -Select the **GraphQL Connector** and fill out the following properties under the **Authentication** section: - -1. Click **Bearer Token** in the **Authentication** section. -2. Set **Bearer** to the secret you created (i.e. `{{secrets.GRAPHQL_BEARER_TOKEN}}`). - -### OAuth token - -#### Create a new Connector secret - -We advise you to keep your **OAUTH_TOKEN_ENDPOINT** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `OAUTH_TOKEN_ENDPOINT`) so you can reference it later in the Connector. - -#### Configure the OAuth Token - -Select the **GraphQL Connector** and fill out the following properties under the **Authentication** section: - -1. Click **OAuth 2.0** in the **Authentication** section. -2. Set **OAuth Token Endpoint** to the secret you created (i.e. `{{secrets.OAUTH_TOKEN_ENDPOINT}}`). -3. Set **Client ID** to the secret you created (i.e. `{{secrets.CLIENT_ID}}`). -4. Set **Client secret** to the secret you created (i.e. `{{secrets.CLIENT_SECRET}}`). -5. (Optional) Set **Scopes** (i.e. `read:clients`). Depending on the OAuth provider you're using, this may or may not be required. -6. Set **Audience** to the secret you created (i.e. `{{secrets.AUDIENCE}}`). This is an optional field depending on the OAuth provider you're using. -7. Choose **Client Authentication** from the dropdown menu (i.e. `Send client credentials in body`). - -Find more information about the OAuth client credentials flow in the [RFC reference](https://www.rfc-editor.org/rfc/rfc6749#section-4.4). - -## HTTP endpoint - -Under the **HTTP Endpoint** section, fill in the **URL** with your desired endpoint and select the desired **Method**. - -## GraphQL query - -### Query/Mutation - -Insert your query or mutation you wish to execute here. This must be a syntactically valid instruction. For more details, see [the official documentation](https://graphql.org/learn/queries/). - -You can use [arguments](https://graphql.org/learn/queries/#arguments), [aliases](https://graphql.org/learn/queries/#aliases), [directives](https://graphql.org/learn/queries/#directives), and [fragments](https://graphql.org/learn/queries/#fragments) as well. -For example: - -```text -query Root($id: ID) { - person (id: $id) { - id - name - } -} -``` - -:::note -Secrets are currently not supported in the **Query/Mutation** of a GraphQL Connector. -::: - -:::note -You can test your queries on publicly available GraphQL API [here](https://studio.apollographql.com/public/star-wars-swapi/home?variant=current). -::: - -#### Example - -```text -query Query { - allFilms { - films { - title - director - releaseDate - speciesConnection { - species { - name - classification - } - } - } - } -} -``` - -### Variables - -You can specify [variables](https://graphql.org/learn/queries/#variables) to your queries/mutations. - -The **Variables** field can be configured using the [FEEL Map](/components/modeler/feel/language-guide/feel-data-types.md#context) data type. - -```text -= { - "id": "{{secrets.GRAPHQL_ENTITY_ID}}", - "includeDroids": false, -} -``` - -:::note -Secrets are not like regular variables and must be wrapped in double quotes (`"`) when used in an expression. -::: - -#### Example - -Query: - -```text -query Root($id: ID, $includeGender: Boolean!) { - person (id: $id) { - name, - height, - gender @include(if: $includeGender) - } -} -``` - -Variables: - -```text -{ - "id": "cGVvcGxlOjI=", - "includeGender": false -} -``` - -### Network communication timeouts - -- **Connection timeout in seconds** determines the time frame in which the client will try to establish a connection with the server. If you do not specify a value, the system uses the default of 20 seconds. For cases where you need to wait indefinitely, set this value to 0. - -- **Read timeout in seconds** is the amount of time the client will wait to read data from the server after the connection has been made. The default is also set to 20 seconds. To allow an unlimited wait time for slow responses, set this to 0. - -- **Write timeout in seconds** controls how long the client will wait to successfully send data to the server. The default setting for this is 0, indicating that there is no limit and the client will wait indefinitely for the operation to complete. - -## Response mapping - -The HTTP response will be available in a temporary local `response` variable. This variable can be mapped to the process by specifying the **Result Variable**. - -The following fields are available in the `response` variable: - -- **status**: Response status -- **body**: Response body of your request -- **headers**: Response headers - -Additionally, you can choose to unpack the content of your `response` into multiple process variables using the **Result Expression**, which is a [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md). - -```text -= { - person: response.body.data.person -} -``` - -The next steps in your process will have access to the `graphqlQueryResponse` variable that contain the full response and the mapped variable from the result expression: `person`. diff --git a/versioned_docs/version-8.2/components/connectors/protocol/http-webhook.md b/versioned_docs/version-8.2/components/connectors/protocol/http-webhook.md deleted file mode 100644 index 7920728d470..00000000000 --- a/versioned_docs/version-8.2/components/connectors/protocol/http-webhook.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -id: http-webhook -title: HTTP Webhook Connector -sidebar_label: HTTP Webhook Connector -description: Start a process instance with your custom webhook configuration, triggered by an external HTTP call. ---- - -The **HTTP Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by external HTTP call. - -:::note -If you have used the HTTP Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/guides/update-guide/connectors/060-to-070.md) for more details. -::: - -## Create an HTTP Webhook Connector event - -1. Start building your BPMN diagram. You can use HTTP Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. -2. Select the applicable element and change its template to an HTTP Webhook. -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the webhook. -6. Navigate to the **Webhooks** tab in the properties panel to see the webhook URL. - -## Make your HTTP Webhook Connector executable - -1. In the **Webhook Configuration** section, configure the **Webhook ID**. By default, **Webhook ID** is pre-filled with a random value. This value will be part of the Webhook URL. You will find more details about HTTP Webhook URLs [below](#activate-the-http-webhook-connector-by-deploying-your-diagram). -2. Configure [**HMAC authentication**](https://en.wikipedia.org/wiki/HMAC) if required in the **Authentication** section: - If you require HMAC authentication for your webhook, you can set up the HMAC shared secret key, HMAC header, HMAC hash algorithm, and an array of HMAC scopes. - - - **HMAC secret key**: Set the HMAC shared secret key that will be used to calculate the message hash. The value of this key should be provided by the webhook provider to ensure secure communication. - - **HMAC header**: Set the HMAC header whose value contains an encrypted hash message. The exact value of this header should be provided by the external caller when invoking your webhook. - - **HMAC hash algorithm**: Select the HMAC hash algorithm to be used in the HMAC signature calculation. The exact value of this algorithm should also be provided by the external caller when invoking your webhook. - - **HMAC Scopes** (optional): Here, you can define an array of HMAC scopes to specify which parts of the webhook request are included in the HMAC signature calculation. The available HMAC scopes are: - - - `BODY` (default value): Includes the body of the webhook request in the HMAC signature calculation. - - `URL`: Includes the URL of the webhook request in the HMAC signature calculation. - - `PARAMETERS`: Includes the query parameters of the URL in the HMAC signature calculation. - - Example: `=["URL","PARAMETERS"]` - - Based on the selected HMAC scopes and the HTTP method used in the webhook request, the system will automatically determine the appropriate HMAC strategy to be used. The supported HMAC strategies are: - - - **Body Encoding Strategy (default)**: This strategy is used when the HMAC scopes only include `BODY` or empty. The signature data for the HMAC signature is generated from the body of the webhook request. - - - **URL and Parameters Encoding Strategy**: This strategy is used when the HMAC scopes include `URL`, and `PARAMETERS`, and the HTTP method is `GET`. The signature data for the HMAC signature is generated by combining the URL and the parameters of the webhook request. - - - **URL and Body Encoding Strategy**: This strategy is used when the HMAC scopes include `URL`, `BODY`. When the HTTP method is `GET`, it uses the **URL and Parameters Encoding Strategy** instead. The signature data for the HMAC signature is generated by combining the URL and the body of the webhook request. - - **Example for URL and Parameters Encoding Strategy**: - Let's consider a sample webhook request: - - ``` - HTTP Method: GET - Webhook URL: "https://example.com/webhook?id=123456&name=John%20Doe" - ``` - - In this example, the HMAC strategy will combine the URL and the query parameters to generate the signature data for the HMAC signature. The URL-encoded query parameters will be sorted alphabetically, and then, they will be concatenated with the URL: Signature Data: `https://example.com/webhook?name=John%20Doe&id=123456` - The `Signature Data` will then be used to calculate the HMAC signature using the provided secret key and hash algorithm. - - **Example for URL and Body Encoding Strategy**: - Let's consider another sample webhook request: - - ``` - HTTP Method: POST - Webhook URL: `https://example.com/webhook` - Webhook Body: `{"id": 123456, "name": "John Doe", "age": 30}` - ``` - - In this example, the HMAC strategy will combine the URL and the body of the webhook request to generate the signature data for the HMAC signature: - Signature Data: `https://example.com/webhook{"id":123456,"name":"John Doe","age":30}` - The `Signature Data` will then be used to calculate the HMAC signature using the provided secret key and hash algorithm. - -3. Configure authorization if required in the **Authorization** section. The HTTP Webhook Connector supports the following authorization methods: - -- **Basic** - The incoming requests must contain an `Authorization` header that contains the word `Basic` followed by a space and a base64-encoded string username:password. - - - Set the **Username** and **Password** properties which will be used to validate the incoming requests. - - Provide the values in plain text, not base64-encoded. - -- **API Key** - The API key can be provided anywhere in the request, for example, in the `Authorization` header or in the request body. - - - Set the **API Key** property to the expected value of the API key. - - Set the **API Key locator** property that will be evaluated against the incoming request to extract the API key. [See the example](#how-to-configure-api-key-authorization). - -- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer {JWT_TOKEN}. - - - Set JWK URL which is used as a well-known public URL to fetch the [JWKs](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets). - - Set JWT role property expression which will be evaluated against the content of the JWT to extract the list of roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). - - Set the required roles which will be used to validate if the JWT contains all required roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). - -4. Configure **Activation Condition**. For example, given external caller triggers a webhook endpoint with the body `{"id": 1, "status": "OK"}`, the **Activation Condition** value might look like `=(request.body.status = "OK")`. Leave this field empty to trigger your webhook every time. -5. Use **Variable Mapping** to map specific fields from the request into process variables using [FEEL](/components/modeler/feel/what-is-feel.md). - For example, given the external caller triggers a webhook endpoint with the body `{"id": 1, "status": "OK"}` and you would like to extract `id` as a process variable `myDocumentId`, the **Result Expression** might look like this: - -``` -= { - myDocumentId: request.body.id -} -``` - -6. If you are using the HTTP Webhook Connector with an **Intermediate Catch Event**, fill in the **Correlation key (process)** and **Correlation key (payload)**. - -- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **Message Intermediate Catch Event**. -- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `orderId` process variable, and the request body contains `{"orderId": "123"}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=orderId` -- **Correlation key (payload)**: `=request.body.orderId` - -Learn more about correlation keys in the [messages guide](../../../concepts/messages). - -7. To avoid double message submission, you can set a unique message ID by using `Message ID expression` field, - for example, `=request.body.orderId`. A request with the same value evaluated by `Message ID expression` will be rejected. - -## Activate the HTTP Webhook Connector by deploying your diagram - -Once you click the **Deploy** button, your HTTP Webhook will be activated and publicly available. -You can trigger it by making a POST request to the generated URL. - -:::note -HTTP Webhook Connector currently supports only POST requests. -::: - -URLs of the exposed HTTP Webhooks adhere to the following pattern: - -`http(s):///inbound/>` - -- `` is the URL of Connectors component deployment. When using the Camunda 8 SaaS offering, this will typically contain your cluster region and cluster ID. -- `` is the ID (path) you configured in the properties of your HTTP Webhook Connector. - -If you make changes to your HTTP Webhook Connector configuration, you need to redeploy the BPMN diagram for the changes to take effect. - -When you click on the event with HTTP Webhook Connector applied to it, a new **Webhooks** tab will appear in the properties panel. -This tab displays the URL of the HTTP Webhook Connector for every cluster where you have deployed your BPMN diagram. - -:::note -The **Webhooks** tab is only supported in Web Modeler as part of the Camunda 8 SaaS offering. -You can still use HTTP Webhook Connector in Desktop Modeler, or with your Camunda 8 Self-Managed. -In that case, HTTP Webhook Connector deployments and URLs will not be displayed in Modeler. -::: - -### Example - -Give a use-case when you need to configure a GitHub webhook with an **HTTP Webhook Connector** in such a way that: (1) your BPMN process starts on every opened PR, and (2) the PR URL is exposed as a process variable. -Let's say you choose `mySecretKey` as a shared secret passphrase. GitHub [declares](https://docs.github.com/en/developers/webhooks-and-events/webhooks/securing-your-webhooks) that they use `X-Hub-Signature-256` header for `SHA-256` HMAC. -Therefore, you would need to set the following: - -1. **Webhook ID**: any unique to your cluster webhook ID. This will generate a URL to trigger your webhook. In example, `myWebhookPath`. -2. **HMAC Authentication**: `enabled`. -3. **HMAC Secret Key**: `mySecretKey` or `{{secrets.MY_GH_SECRET}}`. -4. **HMAC Header**: `X-Hub-Signature-256`. -5. **HMAC Algorithm**: `SHA-256`. -6. **HMAC Scopes**: `=["BODY"]` or leave empty. -7. **Activation Condition**: `=(request.body.action = "opened")`. -8. **Variable Mapping**: `={prUrl: request.body.pull_request.url}`. -9. Click **Deploy**. - -### How to configure API key authorization - -External callers can provide an API key anywhere in the requests. Some webhook providers use an `Authorization` header, while others pass the API key in the request body. -To support any scenario, you can configure the HTTP Webhook Connector to extract the API key from the request. - -Use the **API Key locator** field to provide a FEEL expression that will be evaluated against the request to extract the API key. -The result of this expression will be used as the API key and compared against the expected API key value. - -Use the **API Key** field to provide the expected API key value. - -#### API key locator examples - -Suppose an external caller triggers a webhook endpoint with the following request body: - -```json -{ - "id": 1, - "status": "OK", - "secret": "my_secret" -} -``` - -You want to extract the `secret` field and use it as the API key to authorize the webhook request. -In this case, you can set the **API Key locator** to: - -```feel -=request.body.secret -``` - -The expression above will be evaluated to `my_secret`, which will be used as the API key. - -Alternatively, you can use the **API Key locator** to extract the API key from the `Authorization` header: - -```feel -=request.headers.authorization -``` - -If your `Authorization` header contains the **Bearer** prefix, you can use the [`split`](/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md#splitstring-delimiter) function to remove it: - -```feel -=split(request.headers.authorization, " ")[2] -``` - -### How to extract roles from JWT data - -To extract roles from the JWT payload, specify the **JWT role property expression** using the FEEL expression syntax. - -:::note -This expression will be evaluated only against the JWT payload, therefore you cannot access process variables or secrets here. -::: - -#### JWT payload and role property expression example - -Let's observe a typical JWT payload example below: - -```json -{ - "iss": "https://idp.local", - "aud": "api1", - "sub": "5be86359073c434bad2da3932222dabe", - "client_id": "my_client_app", - "exp": 1786822616, - "iat": 1686819016, - "jti": "114f8c84c53703ac2120d302611e358c", - "roles": ["admin", "superadmin"], - "admin": true -} -``` - -To extract the roles you can set the **JWT role property expression** to: - -```feel -if admin = true then ["admin"] else roles -``` - -Note: the result of this expression should always be an array. - -In this particular case, the if statement is evaluated to true, the extracted roles will be: - -```feel -["admin"] -``` - -If you provide _["admin"]_ for **Required roles**, the message _can be correlated_. - -If you provide _["superadmin"]_ or _["admin","superadmin"]_, for **Required roles**, for example, the message _can NOT be correlated_ and the connector will throw an exception. - -:::note -For GitHub, there is a simplified [GitHub Webhook Connector](/components/connectors/out-of-the-box-connectors/github.md). -::: - -## Return data from your HTTP Webhook Connector - -Below, find several ways to return data from your Webhook Connector. - -### Verification expression - -Verification expression is used whenever a webhook needs to return response data **without** starting a process. -A common use-case may be a [one-time verification challenge](https://webhooks.fyi/security/one-time-verification-challenge). - -For example, consider the following verification challenge from [Slack](https://slack.com/): - -`{"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl","challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P","type": "url_verification"}` - -To confirm the Slack events subscription, you must return the following response: - -`HTTP 200 OK Content-type: application/json {"challenge":"3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P"}` - -To do so, the **Verification expression** field may look like: - -`=if request.body.type = "url_verification" then {"body": {"challenge": request.body.challenge}, "statusCode": 200} else null`. - -When working with `request` data, use the following references to access data: - -- Body: `request.body.`. -- Headers: `request.headers.`. -- URL parameters: `request.params.`. - -When working with response, you can use the following placeholders: - -- Body: `"body"`, for example `{"body": {"challenge": request.body.challenge}}`. -- Status code: `statusCode`, for example `{"statusCode": 201, "body": {"challenge": request.body.challenge}}`. -- Headers: `headers`, for example `{"headers": {"X-Challenge": request.body.challenge}, "body": {"challenge": request.body.challenge}}`. - -You can also use FEEL expressions to modify the data you return. - -### Response body expression - -Response body expression can be used to return data after webhook has been triggered. You can craft a response body -based on your needs. For example, given a webhook request `{"myDataKey1":"myValue1", "myDataKey2":"myValue2"}`, you can -return `myValue1` in a new key `myCustomKey` with a response body expression that may look like this: -`={"myCustomKey": request.body.myDataKey1}`. - -When working with `request` data, use the following references to access data: - -- Body: `request.body.`. -- Headers: `request.headers.`. -- URL parameters: `request.params.`. - -You can also use FEEL expressions to modify the data you return. diff --git a/versioned_docs/version-8.2/components/connectors/protocol/polling.md b/versioned_docs/version-8.2/components/connectors/protocol/polling.md deleted file mode 100644 index b6cdb0d8b51..00000000000 --- a/versioned_docs/version-8.2/components/connectors/protocol/polling.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -id: polling -title: HTTP Polling Connector -sidebar_label: HTTP Polling Connector -description: The HTTP Polling Connector polls an endpoint at regular intervals, enabling periodic data fetching as an intermediate step in your BPMN processes. ---- - -The **HTTP Polling Connector** polls an endpoint at regular intervals, enabling periodic data fetching as an intermediate step in your BPMN processes. This Connector is built on top of the [REST Connector](../protocol/rest.md), ensuring consistent functionality and reliability. - -:::caution -If you use the HTTP Polling Connector, ensure you do not have any instance variable named in the list below, as these are reserved words for this connector: - -- body, url, method, headers, authentication, queryParameters, connectionTimeoutInSeconds, httpRequestInterval - -::: - -## Prerequisites - -Ensure that you have: - -- An HTTP endpoint that supports polling. -- Necessary credentials if the endpoint demands authentication. - -:::note Execution Exception Handling -If the HTTP Polling Connector encounters an execution exception while polling, it will ignore the exception and attempt to execute the request again after the next interval delay. Ensure to monitor your logs for any recurring issues. - -::: - -## Setting up the HTTP Polling Connector - -1. Add an **Intermediate Event** to your BPMN diagram. -2. Change its template to the **HTTP Polling Connector**. -3. Populate all mandatory fields, like the endpoint URL, polling interval, and required headers. -4. Complete your BPMN diagram. -5. Deploy the diagram to activate the **HTTP Polling Connector**. - -## Configuring the HTTP Polling Connector - -### Authentication - -Navigate to the **Authentication** section and select your desired **Authentication type** (e.g., Basic, OAuth). Refer to the [Authentication section of the REST Connector documentation](docs/components/connectors/protocol/rest.md#authentication) for a comprehensive guide. - -### HTTP polling configuration - -- **Method**: Choose the HTTP method for your request, e.g., GET, POST, PUT. -- **URL**: Enter the URL of the targeted HTTP endpoint. -- **Headers** (Optional): Input required headers as per the external service. Learn more about headers in the [REST Connector headers](docs/components/connectors/protocol/rest.md#http-headers) section. -- **Query Parameters** (Optional): Add necessary query parameters for the endpoint. More details can be found in the [REST Connector query parameters](docs/components/connectors/protocol/rest.md#query-parameters) section. -- **Interval** (Optional): Set the frequency for polling the endpoint in ISO 8601 durations format. The default interval is 50 seconds. Review [how to configure a time duration](../../modeler/bpmn/timer-events/timer-events.md#time-duration) for details. -- **Connection Timeout**: Define how long (in seconds) the Connector waits before timing out. Further information on this can be found [here](docs/components/connectors/protocol/rest.md#connection-timeout). - -### Payload configuration (optional) - -In the **Payload** section, you can include a **request body**. Learn more about this [here](docs/components/connectors/protocol/rest.md#request-body). - -### Condition to proceed - -1. **Correlation key (process)**: Defines the correlation key based on the process instance. - - - **Example**: Using a process variable named `orderId`: - ``` - Correlation key (process): =orderId - ``` - -2. **Correlation key (payload)**: Extracts the correlation key from the polled data. - - - **Example**: With data like `{"orderId": "123"}`: - ``` - Correlation key (payload): =body.orderId - ``` - -3. **Activation Condition**: Checks if the polled data meets criteria to activate the intermediate catch event. - - **Example**: If the data should have a `status` of "OK": - ``` - Activation Condition: =(body.status = "OK") - ``` - -For more information about correlation keys, review the [messages guide](../../../concepts/messages). - -## Handling HTTP Connector responses - -The response from any HTTP Connector contains the status, headers, and body. Learn more about the response structure [here](docs/components/connectors/protocol/rest.md#response). - -To structure and utilize the response: - -1. Set a **Result Variable** to store the HTTP response, e.g., `pollingData`. -2. Use a **Result Expression** to extract specific fields from the `={fieldProperty:body.fieldProperty}`. - -## Examples - -### Scenario 1: Monitoring GitHub issues - -Monitor a GitHub issue to see when it's closed and if it has a specific label ('needs review'). - -#### Steps - -1. Drag an intermediate event onto your BPMN diagram. -2. Choose the HTTP Polling Connector template. -3. Configure the connector with the relevant details: - - **URL**: `https://api.github.com/repos/[YourRepoOwner]/[YourRepoName]/issues/[IssueNumber]` - - **Authorization Type**: Bearer token - - **Bearer token**: `{{secrets.BEARER_TOKEN}}` - - **Method**: `GET` - - **Headers**: `={"Content-Type": "application/vnd.github+json","X-GitHub-Api-Version": "2022-11-28"}` - - **Interval**: `PT10M` (Every 10 minutes) – This checks the GitHub issue every 10 minutes. - - **Correlation Key (process)**: `=issueNumber` - - **Correlation Key (payload)**: `=body.number` - - **Activation Condition**: `=(body.state = "closed")` - - **Result Expression**: `={issueUrl:body.html_url, needsReview: list contains((body.labels).name, "needs review")}` - Extract the issue URL and check if the label 'needs review' is present. - -#### Example response - -```json -{ - "status": 200, - "body": { - "number": 212, - "title": "Important Issue", - "labels": [{ "name": "bug" }, { "name": "needs review" }], - "state": "closed", - "html_url": "https://github.com/YourRepoOwner/YourRepoName/issues/212" - } -} -``` - -In this scenario, once the issue #212 titled **Important Issue** is closed, the process will proceed. If the issue is also labeled **needs review**, this label can be leveraged in the next steps of the process. For instance, it can trigger the creation of a new issue for review or initiate other related actions. - -### Scenario 2: Monitoring product stock levels - -Suppose you're overseeing an e-commerce platform. It's vital to ensure certain popular products remain stocked to guarantee user satisfaction. Avoiding stock-outs is essential to prevent lost sales and keep customers happy. With Camunda's HTTP Polling Connector, you can maintain a real-time stock level check. - -#### Steps - -1. Drag an intermediate event onto your BPMN diagram. -2. Choose the HTTP Polling Connector template. -3. Configure the Connector as follows: - - **URL**: `https://inventory.yourstore.com/api/v2/products/12345/stock` - - **Authorization Type**: Basic Authentication - - **Username**: `[YourInventoryAPIUsername]` - - **Password**: `{{secrets.PASSWORD}}` - - **Interval**: `PT1H` (Every hour) - - **Correlation Key (process)**: `=productID` - - **Correlation Key (payload)**: `=body.productID` - - **Activation Condition**: `=(body.stockLevel < 10)` - - **Result Expression**: `={stockLevelResponse:body.stockLevel}` - -#### Example response - -```json -{ - "status": 200, - "body": { - "productID": 12345, - "productName": "Wireless Bluetooth Earbuds", - "stockLevel": 8, - "lastUpdated": "2023-09-17T11:20:32Z" - } -} -``` - -Whenever the stock level of this particular product goes below 10 units, the BPMN process can be set up to perform tasks such as notifying the supply chain, alerting marketing teams, or showcasing a "Low in Stock" badge on the product's webpage. - -## Next steps - -- Dive deeper into the [REST Connector](docs/components/connectors/protocol/rest.md) to understand its capabilities and configurations. -- Explore [other Connectors available](../out-of-the-box-connectors/available-connectors-overview.md) in Camunda to integrate with various systems and services. -- Get a comprehensive understanding of how to use Connectors in your BPMN processes [here](../use-connectors/index.md). -- Learn about the specifics of inbound Connectors and how they can be used [here](../use-connectors/inbound.md). diff --git a/versioned_docs/version-8.2/components/connectors/protocol/rest.md b/versioned_docs/version-8.2/components/connectors/protocol/rest.md deleted file mode 100644 index 5683db47a4f..00000000000 --- a/versioned_docs/version-8.2/components/connectors/protocol/rest.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -id: rest -title: REST Connector -description: Make a request to a REST API and use the response in the next steps of your process. ---- - -:::caution -If you use the REST Connector, ensure you do not have any instance variable named in the list below: - -- `body`, `url`, `method`, `headers`, `authentication`, `queryParameters`, `connectionTimeoutInSeconds`, `readTimeoutInSeconds`, `writeTimeoutInSeconds` - -::: - -The **REST Connector** is an outbound protocol Connector that allows you to make a request to a REST API and use the response in the next steps of your process. - -## Create a REST Connector task - -To use a **REST Connector** in your process, either change the type of existing task using the wrench-shaped **Change type** context menu, or create a new Connector task by using the **Append Connector** context menu. Follow [our guide on using Connectors](/components/connectors/use-connectors/index.md) to learn more. - -## Make your REST Connector executable - -To make the **REST Connector** executable, choose the required authentication type and fill out the mandatory fields highlighted in red in the properties panel: - -:::note -All the mandatory and non-mandatory fields will be covered in the upcoming sections. Depending on the authentication selection you make, more fields might be required. We will also cover this in the next section. -::: - -### Authentication - -You can choose among the available authentication type according to your authentication requirements. - -### REST Connector (None) - -Click **None** in the **Authentication** section. -No extra authentication configuration is required; you can jump to the [next section](#request). - -### REST Connector (API key) - -For services that require an API key for authentication, you can configure the REST Connector to include your API key in the request. - -#### Create a new Connector secret - -We advise you to keep your **API key** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `REST_API_KEY_SECRET`) so you can reference it later in the Connector. - -### Configure API key authentication - -Select the **REST Connector** and fill out the following properties under the **Authentication** section: - -1. In the **Authentication** section, select **API key**. -2. Choose the location where the API key should be included: - - **Query parameters**: The API key will be added to the URL as a query string. - - **Headers**: The API key will be included in the request headers. -3. Specify your API key details: - - **API key name**: Enter the parameter name expected by the API (e.g., apiKey). - - **API key value**: Reference the secret you created for your API key (e.g., {{secrets.REST_API_KEY_SECRET}}). - -### REST Connector (Basic) - -##### Create a new Connector secret - -We advise you to keep your **Password** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `REST_BASIC_SECRET`) so you can reference it later in the Connector. - -### Configure Basic Authentication - -Select the **REST Connector** and fill out the following properties under the **Authentication** section: - -1. Click **Basic** in the **Authentication** section. -2. Set **Username** (i.e. `{{secrets.YOUR_USERNAME}}`). -3. Set **Password** to the secret you created (i.e. `{{secrets.REST_BASIC_SECRET}}`). - -### REST Connector (Bearer Token) - -#### Create a new Connector secret - -We advise you to keep your **Bearer token** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `REST_BEARER_TOKEN`) so you can reference it later in the Connector. - -#### Configure the Bearer token - -Select the **REST Connector** and fill out the following properties under the **Authentication** section: - -1. Click **Bearer token** in the **Authentication** section. -2. Set **Bearer** to the secret you created (i.e. `{{secrets.REST_BEARER_TOKEN}}`). - -### REST Connector (OAuth token) - -#### Create a new Connector secret - -We advise you to keep your **OAUTH_TOKEN_ENDPOINT** safe and avoid exposing it in the BPMN `xml` file by creating a secret: - -1. Follow our [guide for creating secrets](/components/console/manage-clusters/manage-secrets.md). -2. Name your secret (i.e `OAUTH_TOKEN_ENDPOINT`) so you can reference it later in the Connector. - -#### Configure the OAuth token - -Select the **REST Connector** and fill out the following properties under the **Authentication** section: - -1. Click **OAuth 2.0** in the **Authentication** section. -2. Set **OAuth token endpoint** to the secret you created (i.e. `{{secrets.OAUTH_TOKEN_ENDPOINT}}`). -3. Set **Client ID** to the secret you created (i.e. `{{secrets.CLIENT_ID}}`). -4. Set **Client secret** to the secret you created (i.e. `{{secrets.CLIENT_SECRET}}`). -5. (Optional) Set **Scopes** (i.e. `read:clients`). Depending on the OAuth provider you're using, this may or may not be required. -6. Set **Audience** to the secret you created (i.e. `{{secrets.AUDIENCE}}`). It is an optional field. Depending on the OAuth provider you're using, you should fill this field or not. -7. Choose **Client authentication** from the dropdown menu (i.e. `Send client credentials in body`). - -Find more information about the OAuth client credentials flow at the [RFC reference](https://www.rfc-editor.org/rfc/rfc6749#section-4.4). - -## Request - -Under the **HTTP Endpoint** section, select the desired **Method** and fill the **URL** with your desired REST API. - -### Query parameters - -The **Query parameters** field can be configured using the [FEEL Map](/components/modeler/feel/language-guide/feel-data-types.md#context) data type. - -```text -= { - q: "Berlin", - appid: "{{secrets.OPEN_WEATHER_MAP_API_KEY}}", - units: "metric", - lang:"en" -} -``` - -:::note -Secrets are not like regular variables and must be wrapped in double quotes (`"`) when used in an expression. -::: - -### HTTP Headers - -Similarly to the Query Parameters, the **HTTP headers** can be specified using the [FEEL Map](/components/modeler/feel/language-guide/feel-data-types.md#context) data type. - -``` -= { - Origin: "https://modeler.camunda.io/" -} -``` - -:::note -If you do not set the `Content-Type` header in your HTTP headers, the Connector will automatically set the `Content-Type` to `application/json`. -::: - -### Request body - -When you are making a PUT, POST, or PATCH request, you might need to provide a body. -You can provide a body for your request under the **Payload** section in the **Request body** field. - -:::note -Secrets are currently not supported in the body of a **REST Connector**. -::: - -``` -= { - "temp": 25, - "pressure": 1013, - "humidity": 44, - "temp_min": 16, - "temp_max": 30 -} -``` - -### Network communication timeouts - -- **Connection timeout in seconds** determines the time frame in which the client will try to establish a connection with the server. If you do not specify a value, the system uses the default of 20 seconds. For cases where you need to wait indefinitely, set this value to 0. - -- **Read timeout in seconds** is the amount of time the client will wait to read data from the server after the connection has been made. The default is also set to 20 seconds. To allow an unlimited wait time for slow responses, set this to 0. - -- **Write timeout in seconds** controls how long the client will wait to successfully send data to the server. The default setting for this is 0, indicating that there is no limit and the client will wait indefinitely for the operation to complete. - -## Response - -The HTTP response will be available in a temporary local `response` variable. This variable can be mapped to the process by specifying the **Result variable**. - -The following fields are available in the `response` variable: - -- **status**: Response status -- **body**: Response body of your request -- **headers**: Response headers - -## Output mapping - -### Result variable - -You can export a complete response from an HTTP REST call into a dedicated variable accessible anywhere in a process. -To do so, just input a variable name in the **Result variable** field. We recommend using a unique name to avoid -variables being overwritten, for example `currentWeather`. - -## Result expression - -Additionally, you can choose to unpack the content of your `response` into multiple process variables using the **Result expression**, which is a [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md). - -``` -= { - actual_temp: response.body.main.temp, - feel_temp: response.body.main.feels_like, - weather: response.body.weather[1].main -} -``` - -## OData support - -The **REST Connector** supports JSON-based [OData protocol](https://www.odata.org/). - -### Requesting resources - -Requesting resources works the same way regular REST requests work: - -1. Under the **HTTP Endpoint** section, select the desired **Method**. -2. Fill in the **URL** with your desired REST API. -3. Depending on your provider, you may also need to set `OData-Version` and `OData-MaxVersion` headers. - -### Requesting an individual resource - -Similar to requesting resources, to request an individual resource you need to have a process variable. Use a FEEL string concatenation function -when building **URL**, e.g. `="https://my.odata.service/v4/Service/Resource('" + resourceId + "')"`. - -### Queries - -Additionally, if your provider supports OData queries, such as `$top`, you can use these when defined in the **Query parameters** field, e.g. `={"$top": 3, "$select": "FirstName, LastName"}`. diff --git a/versioned_docs/version-8.2/components/connectors/use-connectors/inbound.md b/versioned_docs/version-8.2/components/connectors/use-connectors/inbound.md deleted file mode 100644 index 7ffbdeb4576..00000000000 --- a/versioned_docs/version-8.2/components/connectors/use-connectors/inbound.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -id: inbound -title: Using inbound Connectors -description: Learn how to use inbound Connectors ---- - -[Inbound Connectors](/components/connectors/connector-types.md#inbound-connectors) enable workflows to receive data or messages from external systems or services. -Review our [list of existing inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) for more information. - -## Creating the Connector event - -Inbound Connectors are modeled as **catch events** in BPMN. Connectors that trigger a process instance are modeled as **start events**, and Connectors that send messages to an already running process instance are modeled as **intermediate catch events**. - -When you **deploy** such a BPMN diagram with an inbound Connector, the Connector becomes ready to receive incoming requests. The outcome depends on the Connector type: - -- The webhook endpoint becomes available. -- Subscription Connectors start listening to the message queue. -- Polling Connectors start polling the external system. - -### Modeling the Connector start event - -1. Start building your BPMN diagram with a **Start Event** building block. -2. Change its template to an inbound Connector of your choice (e.g., HTTP webhook, or a message queue subscription). -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy it to your Camunda 8 instance. - -:::note -You can still start instances of that process manually via the modeler, which is sometimes useful during testing. -::: - -### Modeling the Connector intermediate message catch event - -1. Start building your BPMN diagram with an **Intermediate Catch Event** building block. -2. Change its template to an inbound Connector of your choice (e.g., HTTP webhook, or a message queue subscription). -3. Fill in all required properties. -4. Complete your BPMN diagram. -5. Deploy it to your Camunda 8 instance. - -### Modeling the Connector boundary event - -1. Start building your BPMN diagram with any **Task** building block. -2. Attach a **Boundary event** to a **Task** at your diagram. -3. Change its template to an inbound Connector of your choice (e.g., HTTP webhook, or a message queue subscription). -4. Fill in all required properties. -5. Complete your BPMN diagram. -6. Deploy it to your Camunda 8 instance. - -### Modeling the Connector non-interrupting message start event - -1. Start building your BPMN diagram with an **Event subprocess**. -2. Add a plain **Message Start Event (non-interrupting)** into an **Event subprocess**. -3. Change its template to an inbound Connector of your choice (e.g., HTTP webhook, or a message queue subscription). -4. Fill in all required properties. -5. Select **Correlation required** in the **Subprocess correlation** section. -6. Specify both **Correlation key (process)** and **Correlation key (payload)** values. -7. Complete your BPMN diagram. -8. Deploy it to your Camunda 8 instance. - -### Example: Configuring an HTTP webhook - -Different Connector types have different configuration options, but parts like **Result expression**, or **Correlation key** are common for all of them. Let's take a look at an example of configuring an HTTP webhook. - -To deploy and use the webhook, you need to fill in several fields: - -1. **Webhook method** - HTTP method for your inbound webhook. You can either set a specific one or choose `Any` if all methods need to be supported. -2. **Webhook ID** - Context path for your inbound webhook. This is used to build a URL endpoint for your webhook. For example, given the `Webhook ID` value is `myWebhookPath`, the complete webhook URL endpoint will be `http(s):///inbound/myWebhookPath`. -3. **HMAC authentication** - If an external caller uses HMAC as a means of request validation and authentication, you can `enable` this property. In that case, you'll need to specify additional field values. Read more about the [generic HTTP webhook configuration](/components/connectors/protocol/http-webhook.md). -4. **Authorization** - Authorization method of the webhook. -5. **Activation condition** - FEEL expression that assesses trigger conditions. Note: Unlike other properties, in the activation condition, you cannot use the process instance variables. For example, given external caller triggers a webhook endpoint with body `{"id": 1, "status": "OK"}`, the **Activation Condition** value might look like `=(request.body.status = "OK")`. Leave this field empty to trigger your webhook every time. -6. **Result variable** - Name of the process variable that will be created or updated with the result of the webhook. For example, if you want to save the result of the webhook in a variable called `myDocumentId`, you would specify `myDocumentId` as the **Result variable** value. -7. **Result expression** - FEEL expression that transforms incoming body into BPMN process variables. For example, given external caller triggers a webhook endpoint with body `{"id": 1, "status": "OK"}` and you would like to extract `id` as a process variable `myDocumentId`. In that case, the **Variable mapping** might look as `={myDocumentId: request.body.id}`. -8. **Response body expression** - FEEL expression that forces a webhook to return a specific response. - This might be useful for one-time challenge verification, or acknowledgement response. - Given your webhook triggered with body `{"id": 1, "status": "OK"}`, if you wish to return acknowledgement, you can specify the following expression `={message: "received document ID " + string(request.body.id)}` which will produce `{"message":"received document ID 123"}` as a response. - Another example, when you wish to return a one-time subscription challenge. Given your webhook triggered with body `{"event": "subscribe", "challenge":"myRandomChallenge"}`. You can return challenge back with the following expression `=if request.body.event = "subscribe" then request.body.challenge else null` which will produce a plain string `"myRandomChallenge"` as a response. - -If the Webhook Connector is applied to an **intermediate catch event**, you also need to specify the following fields: - -9. **Correlation key (process)** - a FEEL expression that defines the correlation key for the subscription. This corresponds to the **Correlation key** property of a regular **message intermediate catch event**. -10. **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This expression is evaluated in the Connector Runtime and the result is used to correlate the message. - -For example, given that your correlation key is defined with `requestIdValue` process variable, and the request body contains `{"request": {"id": 123}}`, your correlation key settings will look like this: - -- **Correlation key (process)**: `=requestIdValue` -- **Correlation key (payload)**: `=request.body.request.id` - -See the [webhook documentation](/components/connectors/protocol/http-webhook.md) or the documentation of [other Connector types](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) for more details. - -### Working with request context - -A user can access request context in the **Activation condition**, **Result expression**, and **Response body expression**. - -Let's consider the following cURL query: `curl -X POST -H "Content-Type: application/json" -H "MyHeader: myValue" -d '{"status": "OK", "id": 123}' "http:///inbound/myWebhook?param1=val1"`. - -A webhook Connector context data will arrive as follows: - -```json -{ - "request": { - "body": { - "status": "OK", - "id": 123 - }, - "headers": { - "host": "YOUR_HOST", - "user-agent": "curl/7.88.1", - "accept": "*/*", - "content-type": "application/json", - "myheader": "myValue", - "content-length": "27" - }, - "params": { - "param1": "val1" - } - }, - "connectorData": {} -} -``` - -This means in scope of the fields **Activation condition**, **Result expression**, and **Response body expression**, -you can use not only `request.body.` but also access headers via `request.headers.myheader` or params `request.params.param1`. - -There is also a Connector-specific special case of `connectorData` that is usually empty and used in rare cases, when body has to be crafted in a special way, but a Connectors user might still want access context data. - -See a list of [available inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) and their respective specific configuration instructions. diff --git a/versioned_docs/version-8.2/components/connectors/use-connectors/index.md b/versioned_docs/version-8.2/components/connectors/use-connectors/index.md deleted file mode 100644 index ae3d20963f7..00000000000 --- a/versioned_docs/version-8.2/components/connectors/use-connectors/index.md +++ /dev/null @@ -1,233 +0,0 @@ ---- -id: index -title: Using Connectors -description: Learn how to use Connectors in Web Modeler by creating a Connector task, configuring a Connector, and reviewing potential errors. ---- - -Any task can be transformed into a Connector task. This guide details the basic functionality all Connectors share. - -Find the available Connectors in Camunda 8 SaaS and how to use them in detail in the [out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) documentation. - -:::note -New to modeling with Camunda? The steps below assume some experience with Camunda modeling tools. [Model your first diagram](/components/modeler/web-modeler/model-your-first-diagram.md) to learn how to work with Web Modeler. -::: - -## Using secrets - -:::warning -`secrets.*` is a deprecated syntax. Instead, use `{{secrets.*}}` -::: - -You can use sensitive information in your Connectors without exposing it in your BPMN processes by referencing secrets. -Use the Console component to [create and manage secrets](/components/console/manage-clusters/manage-secrets.md). - -You can reference a secret like `MY_API_KEY` with `{{secrets.MY_API_KEY}}` in any Connector field in the properties panel that supports this. -Each of the [out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) details which fields support secrets. - -Secrets are **not variables** and must be wrapped in double quotes as follows when used in a FEEL expression: - -``` -= { myHeader: "{{secrets.MY_API_KEY}}"} -``` - -Using the secrets placeholder syntax, you can use secrets in any part of a text, like in the following FEEL expression: - -``` -= "https://" + baseUrl + "/{{secrets.TENANT_ID}}/accounting" -``` - -This example assumes there is a process variable `baseUrl` and a configured secret `TENANT_ID`. - -The engine will resolve the `baseUrl` variable and pass on the secrets placeholder to the Connector. Assuming the `baseUrl` variable resolves to `my.company.domain`, -the Connector receives the input `"https://my.company.domain/{{secrets.TENANT_ID}}/accounting"`. The Connector then replaces the secrets placeholder upon execution. - -For further details on how secrets are implemented in Connectors, consult our [Connector SDK documentation](/components/connectors/custom-built-connectors/connector-sdk.md#secrets). - -:::note Warning -`secrets.*` is a reserved syntax. Don't use this for other purposes than referencing your secrets in Connector fields. -Using this in other areas can lead to unexpected results and incidents. -::: - -## Variable/response mapping - -When a **Connector** is expected to return a result, **Connectors** feature a dedicated section known as `Response Mapping`, -comprising two essential fields: `Result Variable` and `Result Expression`. -These fields export responses from external **Connector** calls into process variables. - -### Result variable - -This field declares a singular process variable designated for the export of responses from a **Connector** call. -The resulting process variable can be subsequently utilized within the ongoing process. - -### Result expression - -This field facilitates the mapping of a **Connector** response into multiple process variables, -providing further flexibility of the variable utilization within the ongoing process. -Additionally, the extracted values can be transformed with [FEEL expressions](/components/concepts/expressions.md). - -To ensure process isolation, note that Connectors do not have access to process instance variables. - -:::note -While using this field, a process variable with the name `response` is reserved. -::: - -## Activation - -The `Activation` section pertains specifically to [inbound](/components/connectors/connector-types.md) **Connectors**. - -### Correlation key (process) - -This field is instrumental in specifying which variable within a **Connector** should function as the process correlation key. -Learn more about [message correlation](components/concepts/messages.md#message-correlation-overview). - -### Correlation key (payload) - -This field guides the **Connector** on how to extract a correlation value from the incoming message payload. - -### Message ID expression - -This field extracts a unique message identifier from the incoming message payload. Messages sharing the same identifier -within a defined TTL (Time To Live) will be correlated at most once. -Leaving this field empty may result in identical messages being submitted and processed multiple times. - -### Condition - -Utilized for validating conditions against the incoming message payload, this field enables the filtering -of payloads that can initiate a process. Leaving this field empty results in all incoming messages triggering a new process, -except those failing pre-validation checks, such as HMAC signature verification for specific Connectors. - -### Example - -Imagine your Connector makes an external call to an arbitrary weather service. The weather service returns the following response: - -```json -{ - "status": 200, - "headers": { - "date": "Thu, 19 Jan 2023 14:02:29 GMT", - "transfer-encoding": "chunked", - "content-type": "application/json; charset=utf-8", - "connection": "keep-alive" - }, - "body": { - "latitude": 52.52, - "longitude": 13.4, - "generationtime_ms": 0.22804737091064453, - "utc_offset_seconds": 0, - "timezone": "GMT", - "timezone_abbreviation": "GMT", - "elevation": 45.0, - "current_weather": { - "temperature": 1.0, - "windspeed": 10.1, - "winddirection": 186.0, - "weathercode": 2, - "time": "2023-01-19T14:00" - } - } -} -``` - -If you declare a variable `myWeatherResponse` in the `Result Variable` field, the entire response is mapped to the declared variable. - -Now, let's imagine that you wish to extract only temperature into a process variable `berlinWeather` and wind speed into `berlinWindSpeed`. Let's also imagine you need weather in Fahrenheit declared in `berlinWeatherInFahrenheit`. - -In that case, you could declare `Result Expression` as follows: - -``` -= { - berlinWeather: response.body.current_weather.temperature, - berlinWindSpeed: response.body.current_weather.windspeed, - berlinWeatherInFahrenheit: response.body.current_weather.temperature * 1.8 + 32 -} -``` - -## BPMN errors - -Being able to deal with exceptional cases is a common requirement for business process models. Read more about our general best practices around this topic in [dealing with exceptions](/components/best-practices/development/dealing-with-problems-and-exceptions.md). - -Connectors share this requirement for exception handling like any other task in a model. However, Connectors define reusable runtime behavior that is not aware of your specific business use case. Thus, they can not determine if an exceptional case is a technical or business error. -Therefore, a Connector's runtime behavior cannot throw BPMN errors, but only technical errors. However, those technical errors can optionally contain an error code as structured data that can be reused when configuring a Connector task. - -### Error expression - -To support flexible exception handling, the [out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) allow users to define an **Error Expression** in the **Error Handling** section at the bottom of the properties panel. - -The example below uses this property to automatically inform the right group of people depending on the result of an HTTP request against an internal website. If the website returns a valid result, this data is passed on to the regular team. -In case of a [404](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/404) website response, the administrator is informed, so they can check why the website cannot be reached. HTTP responses with status [500](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500) -indicate internal website errors, which is why the website team is informed. - -![feel Connectors](../img/use-connectors-error-general.png) - -The **Error Expression** property requires a [FEEL](/components/modeler/feel/what-is-feel.md) expression that yields a BPMN error object in the end. The BPMN error object can be an empty [context](/components/modeler/feel/language-guide/feel-data-types.md#context), -[null](/components/modeler/feel/language-guide/feel-data-types.md#null), or a context containing at least a non-empty `code`. You can use all available functionality provided by FEEL to produce this result. -Use the provided FEEL function [`bpmnError`](#function-bpmnerror) to conveniently create a BPMN error object. This triggers a [ThrowError call](/components/best-practices/development/dealing-with-problems-and-exceptions.md) to the workflow engine. - -Within the FEEL expression, you access the following temporary variables: - -- The result of the Connector in `response`. -- Any result variables created by the **Result Variable** and **Result Expression** properties (see the [REST Connector](/components/connectors/protocol/rest.md#response), for example). -- The technical exception that potentially occurred in `error`, containing a `message` and optionally a `code`. The code is only available if the Connector's runtime behavior provided a code in the exception it threw. - -Building on that, you can cover those use cases with BPMN errors that you consider as exceptional. This can build on technical exceptions thrown by a Connector as well as regular results returned by the external system you integrated. -The [example expressions](#bpmn-error-examples) below can serve as templates for such scenarios. - -### Function bpmnError() - -Returns a context entry with a `code` and `message`. - -- parameters: - - `code`: string - - `message`: string -- result: context - -```feel -bpmnError("123", "error received") -// { code: "123", message: "error received" } -``` - -### BPMN error examples - -#### HTTP errors to BPMN errors - -Using the [REST Connector](/components/connectors/protocol/rest.md), you can handle HTTP errors directly in your business process model by setting a Header named `errorExpression` with the following value: - -```feel -if error.code = "404" then - bpmnError("404", "Got a 404") -else if error.code = "500" then - bpmnError("500", "Got a 500") -else - null -``` - -This will create BPMN errors for HTTP requests that return with a status [404](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/404) or [500](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500). -You can extend that list to all HTTP errors you can handle as business use cases, e.g. by informing a website administrator directly via Slack using the [Slack Connector](/components/connectors/out-of-the-box-connectors/slack.md). - -#### Response value to BPMN error - -Using the [REST Connector](/components/connectors/protocol/rest.md) or any other Connector that returns a result, you can handle a response as BPMN error based on its value, by setting a Header named `errorExpression` with the following value: - -```feel -if response.body.main.humidity < 0 then - bpmnError("HUMIDITY-FAIL", "Received invalid humidity") -else null -``` - -This is assuming you requested data from a local weather station and received a value that is technically valid for the REST Connector. -However, you could define that for your business case a humidity value below `0` must be an error that should be checked manually. -You could automatically send a message to a technician to check the weather station. - -#### Generic Header to transform a ConnectorException to a BPMN Error - -If the Connector throws a `ConnectorException` like: - -```java - throw new ConnectorException("HUMIDITY-FAIL", "Received invalid humidity"); -``` - -Then you can transform this exception to a BPMN error with this expression in a Header item named `errorExpression`: - -```feel -if is defined(error) then bpmnError(error.code, error.message) else null -``` diff --git a/versioned_docs/version-8.2/components/connectors/use-connectors/outbound.md b/versioned_docs/version-8.2/components/connectors/use-connectors/outbound.md deleted file mode 100644 index 8785dd3604e..00000000000 --- a/versioned_docs/version-8.2/components/connectors/use-connectors/outbound.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: outbound -title: Using outbound Connectors -description: Learn how to use outbound Connectors ---- - -[Outbound Connectors](/components/connectors/connector-types.md#outbound-connectors) allow workflows to trigger external systems or services. - -## Creating the BPMN task - -Use the change type context menu item (spanner/wrench icon) to integrate Connectors in a business model. Users can search for keywords like `REST` or `email` to find specific Connectors. To discover all available Connectors in Camunda, input the term `Connector` into the search bar. - -Alternatively, you can directly create a Connector task by using the **Append Connector** context menu item. This creates a new Connector task directly following the currently selected element. - -## Configuring the outbound Connector - -Once a Connector task is selected, the available configuration is visible in the properties panel on the right side. The required fields are highlighted with an error message. - -Fields in the properties panel marked with an equals sign inside a circle indicate that [FEEL](/components/modeler/feel/what-is-feel.md) can be used to configure the property. If the icon includes an equals sign marked with a star, FEEL is required. Using FEEL allows the user to reference process data from variables in the expression to configure the properties. - -Each Connector defines its own set of properties you can fill in. Find the details for Connectors provided by Camunda in the [out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) documentation. - -## Retries - -By default, Connector execution is repeated `3` times if execution fails. To change the default retries value, edit the BPMN XML file and set the `retries` attribute at the `zeebe:taskDefinition`. For example: - -```xml -... - -... -``` diff --git a/versioned_docs/version-8.2/components/console/console-troubleshooting/common-pitfalls.md b/versioned_docs/version-8.2/components/console/console-troubleshooting/common-pitfalls.md deleted file mode 100644 index 14859714769..00000000000 --- a/versioned_docs/version-8.2/components/console/console-troubleshooting/common-pitfalls.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: common-pitfalls -title: Common pitfalls -description: "Let's take a closer look at common issues and resolutions." ---- - -Note a few common pitfalls below: - -## The button to create new clusters is disabled - -- Your organization is on a trial plan and you have already created a cluster. In this case, you cannot create another cluster, because only one cluster is included in the trial plan. - -- Your billing reservations do not allow any more clusters. You must increase the [reservations](../manage-plan/update-billing-reservations.md) if you want to create more clusters. If you do not have the necessary rights, contact an admin or the owner of the organization. - -## I cannot connect to Zeebe - -- Check if your [API client](../manage-clusters/manage-api-clients.md) has the necessary rights. To interact with Zeebe, the **Scope** `Zeebe` must be set. -- Check if your credentials are configured correctly. There is a CLI tool that allows you to check the status: [`zbctl`](https://www.npmjs.com/package/zbctl). With the command `zbctl status`, you can read the topology. If this command works, the connection can be established. -- Check if your cluster is **Healthy**: A Zeebe cluster may be temporarily unavailable. To check if your cluster is healthy, navigate to the cluster list. Click on the cluster to view its details for a closer view of the status over all products (Zeebe, Operate, Tasklist, Optimize). diff --git a/versioned_docs/version-8.2/components/console/console-troubleshooting/img/contact-feedback-and-support.png b/versioned_docs/version-8.2/components/console/console-troubleshooting/img/contact-feedback-and-support.png deleted file mode 100644 index 4eb2fbfa4e6..00000000000 Binary files a/versioned_docs/version-8.2/components/console/console-troubleshooting/img/contact-feedback-and-support.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/introduction-to-console.md b/versioned_docs/version-8.2/components/console/introduction-to-console.md deleted file mode 100644 index 1aba70d4a9d..00000000000 --- a/versioned_docs/version-8.2/components/console/introduction-to-console.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: introduction-to-console -title: Introduction to Camunda Console ---- - -Camunda Console is the management application for the included products. - -Using Camunda Console, you can do the following: - -- [Create](./manage-clusters/create-cluster.md) and [delete](./manage-clusters/delete-cluster.md) clusters. -- [Manage API clients](./manage-clusters/manage-api-clients.md) to interact with [Zeebe](/components/zeebe/zeebe-overview.md) and [Tasklist](/components/tasklist/introduction-to-tasklist.md). -- [Manage alerts](./manage-clusters/manage-alerts.md) to get notified when workflow errors occur. -- [Manage IP allowlists](./manage-clusters/manage-ip-allowlists.md) to restrict access to clusters. -- [Manage](./manage-organization/organization-settings.md) your organization. -- [Administration API clients (REST)](apis-tools/administration-api-reference.md) to manage clusters programmatically. - -If you don't have a Camunda 8 account yet, visit our [Getting Started Guide](../../guides/create-account.md). diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/create-cluster-include.md b/versioned_docs/version-8.2/components/console/manage-clusters/create-cluster-include.md deleted file mode 100644 index 4718defa2ff..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/create-cluster-include.md +++ /dev/null @@ -1,29 +0,0 @@ ---- ---- - -Let's take a closer look at creating clusters and viewing their details. - -![cluster-creating](./img/cluster-overview-empty.png) - -To create a cluster, follow the steps below: - -1. Click **Create new cluster** in the top right corner of the screen. - -2. Set a name and choose the channel. Currently, there are two channels available: - -- **Stable**: Provides the latest feature and patch releases ready for most users at a minimal risk. The releases follow semantic versioning and can be updated to the next minor or patch release without data loss. -- **Alpha**: Provides preview releases in preparation for the next stable release. They provide a short-term stability point to test new features and give feedback before they are released to the stable channel. Try these to ensure the upcoming release works with your infrastructure. These releases cannot be updated to a newer release, and therefore are not meant to be used in production. - -Here, you can also decide if you want to have automated updates to new versions of Camunda 8 activated. You can also toggle this feature anytime later in the **Settings** tab of your cluster. - -![cluster-creating-modal](./img/cluster-creating-modal.png) - -1. After you've made your selection and created the cluster, view the new entry in the **Clusters** tab: - -![cluster-creating](./img/cluster-overview-new-cluster-creating.png) - -4. The cluster is now being set up. During this phase, its state is **Creating**. After one or two minutes, the cluster is ready for use and changes its state to **Healthy**: - -![cluster-healthy](./img/cluster-overview-new-cluster-healthy.png) - -5. After the cluster is created, click on the cluster name to visit the cluster detail page. diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/create-cluster.md b/versioned_docs/version-8.2/components/console/manage-clusters/create-cluster.md deleted file mode 100644 index c288914ce2f..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/create-cluster.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: create-cluster -title: Create a cluster -description: "Let's take a closer look at creating clusters and viewing their details." ---- - -If you haven't created a cluster yet, the **Clusters** page will be empty. - -import CreateCluster from './create-cluster-include.md' - - diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/delete-cluster.md b/versioned_docs/version-8.2/components/console/manage-clusters/delete-cluster.md deleted file mode 100644 index 51f4a199526..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/delete-cluster.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: delete-cluster -title: Delete your cluster -description: "Follow these step-by-step instructions to remove your cluster permanently." ---- - -:::note -This action cannot be undone. -::: - -A cluster can be deleted at any time. To delete your cluster, follow the steps below: - -1. Open the cluster details by clicking on the cluster name. -2. Select the three vertical dots next to the cluster name near the top of the page to open the cluster's menu. -3. Click **Delete**. - -![cluster-delete](./img/cluster-delete.png) diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/client-auth.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/client-auth.png deleted file mode 100644 index afc916501a7..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/client-auth.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-creating-modal.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-creating-modal.png deleted file mode 100644 index 0017ebbb058..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-creating-modal.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-delete.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-delete.png deleted file mode 100644 index 3c57b49e5ae..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-delete.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-alerts.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-alerts.png deleted file mode 100644 index 4b05f0f3787..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-alerts.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-clients.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-clients.png deleted file mode 100644 index 3a9091ccafe..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-clients.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-create-alert.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-create-alert.png deleted file mode 100644 index 0dbbd8b3103..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-create-alert.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-create-ip-whitelist.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-create-ip-whitelist.png deleted file mode 100644 index 3cbefb7a006..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-create-ip-whitelist.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-ip-whitelists.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-ip-whitelists.png deleted file mode 100644 index 37d07f37282..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-ip-whitelists.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets-create.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets-create.png deleted file mode 100644 index 787fc2c7d95..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets-create.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets-view.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets-view.png deleted file mode 100644 index 00744eded16..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets-view.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets.png deleted file mode 100644 index 98c0322dc6a..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-detail-secrets.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-details-create-client.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-details-create-client.png deleted file mode 100644 index 36d3c92a6d8..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-details-create-client.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-details-created-client.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-details-created-client.png deleted file mode 100644 index 32c92b5aeb4..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-details-created-client.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-empty.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-empty.png deleted file mode 100644 index ad38541db5b..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-empty.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-new-cluster-creating.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-new-cluster-creating.png deleted file mode 100644 index 24f085e1d5d..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-new-cluster-creating.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-new-cluster-healthy.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-new-cluster-healthy.png deleted file mode 100644 index f01dc51844f..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-overview-new-cluster-healthy.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-rename.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-rename.png deleted file mode 100644 index 4d9a4ebc9ec..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/cluster-rename.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/img/feel-icon.png b/versioned_docs/version-8.2/components/console/manage-clusters/img/feel-icon.png deleted file mode 100644 index e75ae7cd815..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-clusters/img/feel-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/manage-alerts.md b/versioned_docs/version-8.2/components/console/manage-clusters/manage-alerts.md deleted file mode 100644 index 42b2b80ebfe..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/manage-alerts.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: manage-alerts -title: Manage alerts -description: "Camunda 8 can notify you when process instances stop with an error." ---- - -Camunda 8 can notify you when process instances stop with an error. - -There are two forms of notification: - -- By email to the email address of your user account -- By webhook - -### Create an alert - -To create a new alert, take the following steps: - -1. Select the **Alert** tab. - -![cluster-details](./img/cluster-detail-alerts.png) - -2. Click **Create** to create a new alert. - -![create-alert](./img/cluster-detail-create-alert.png) - -3. Choose between **Email** and **Webhook**. - -4. If you select **Email**, click **Create**. No further information is needed. For **Webhook**, complete the additional steps below. - -5. To create a **webhook** alert, provide a valid webhook URL that accepts `POST` requests. - -If your webhook requires [HMAC authentication](https://www.okta.com/identity-101/hmac/), you can specify an HMAC secret. The SHA-256 hash of the request body will then be generated using your HMAC secret, and it is included it in the HTTP header `X-Camunda-Signature-256` each time we send out a webhook alert to your endpoint. - -6. You will have one email alert per cluster, but you can create multiple webhook alerts if needed. - -### Webhook alerts - -Webhook alerts contain a JSON body with following structure: - -```json -{ - "clusterName": "cluster-name", - "clusterId": "88d32bfc-4f8e-4dd3-9ae2-adfee281e223", - "operateBaseUrl": "https://console.cloud.camunda.io/org/2b3bc239-ad5b-4eef-80e0-6ef5139ed66a/cluster/88d32bfc-4f8e-4dd3-9ae2-adfee281e223/operate", - "clusterUrl": "https://console.cloud.camunda.io/org/2b3bc239-ad5b-4eef-80e0-6ef5139ed66a/cluster/88d32bfc-4f8e-4dd3-9ae2-adfee281e223", - "alerts": [ - { - "operateUrl": "https://console.cloud.camunda.io/org/2b3bc239-ad5b-4eef-80e0-6ef5139ed66a/cluster/88d32bfc-4f8e-4dd3-9ae2-adfee281e223/operate/#/instances/2251799829404548", - "processInstanceId": "1234567890123456", - "errorMessage": "something went wrong", - "errorType": "JOB_NO_RETRIES", - "flowNodeId": "node-id", - "jobKey": 1234567890123456, - "creationTime": "2021-07-22T08:00:00.000+0000", - "processName": "process-name", - "processVersion": 1 - } - ] -} -``` diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/manage-api-clients.md b/versioned_docs/version-8.2/components/console/manage-clusters/manage-api-clients.md deleted file mode 100644 index 033bc500356..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/manage-api-clients.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -id: manage-api-clients -title: Manage API clients -description: "Let's create a client and manage our API clients." ---- - -To interact with Zeebe in the cloud from the outside, every client application must authenticate itself. An **OAuth Flow** is therefore used for authentication: - -![auth-flow](./img/client-auth.png) - -The client configuration is shown at the bottom of the cluster detail view. Create a new client and all necessary information is displayed. - -For the `Client Id` and `Client Secret`, a client application can request an access token at the authentication URL (**Steps 1 and 2**). The access token is necessary to interact with Zeebe in the cloud (**Step 3**). - -:::note -Access tokens have a validity period that can be found in the access token. After this time, a new access token must be requested. -::: - -:::note -The auth service has a built-in rate limit. If too many token requests are executed in a short time, the client is blocked for a certain time. Since the access tokens have a certain validity period, they must be cached on the client side. - -The officially offered client libraries (as well as the Node.js client) have already integrated with the auth routine, handle obtaining and refreshing an access token, and make use of a local cache. -::: - -### Create a client - -Currently, Camunda 8 SaaS supports the following scopes: - -- Zeebe - Access to the [Zeebe gRPC](/apis-tools/grpc.md) API. -- Tasklist - Access to the [Tasklist GraphQL](/apis-tools/tasklist-api/tasklist-api-overview.md) API. -- Operate - Access to the [Operate REST API](/apis-tools/operate-api/overview.md). -- Optimize - Access to the [Optimize REST API]($optimize$/apis-tools/optimize-api/optimize-api-authorization). -- Secrets - Access cluster secrets in a [hybrid setup](/guides/use-connectors-in-hybrid-mode.md). - -To create a client, take the following steps: - -1. Navigate into the **API** tab. - -![cluster-details](img/cluster-detail-clients.png) - -1. Click **Create new client** to create a new client and name your client accordingly. - -![create-client](img/cluster-details-create-client.png) - -1. Ensure you keep the generated client credentials in a safe place. The **client secret** will not be shown again. For your convenience, you can also download the client information to your computer. - -![created-client](img/cluster-details-created-client.png) - -The downloaded file contains all necessary information to communicate with your Zeebe instance in the future: - -- `ZEEBE_ADDRESS`: Address where your cluster can be reached. -- `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET`: Credentials to request a new access token. -- `ZEEBE_AUTHORIZATION_SERVER_URL`: A new token can be requested at this address. -- `ZEEBE_TOKEN_AUDIENCE`: The audience for a Zeebe token request. -- `CAMUNDA_CLUSTER_ID`: The UUID of the cluster. -- `CAMUNDA_CLUSTER_REGION`: The region of the cluster. -- `CAMUNDA_CREDENTIALS_SCOPES`: A comma-separated list of the scopes this credential set is valid for. -- `CAMUNDA_OAUTH_URL`: A new token can be requested at this address using the credentials. Duplicates the earlier Zeebe-focused variable. - -Depending on the scopes granted to these client credentials, the following variables may also be present: - -- `CAMUNDA_TASKLIST_BASE_URL`: The base URL for the Tasklist API. -- `CAMUNDA_OPTIMIZE_BASE_URL`: The base URL for the Optimize API. -- `CAMUNDA_OPERATE_BASE_URL`: The base URL for the Operate API. diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/manage-ip-allowlists.md b/versioned_docs/version-8.2/components/console/manage-clusters/manage-ip-allowlists.md deleted file mode 100644 index 963b7235887..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/manage-ip-allowlists.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: manage-ip-allowlists -title: Manage IP allowlists -description: "If your organization works within Camunda's Enterprise plan, you can restrict access to clusters with an IP allowlist." -keywords: [whitelist, allowlist, ip whitelist, ip allowlist] ---- - -:::note -Camunda SaaS automatically manages allowlist assignments for IP addresses used by Web Modeler. -::: - -If your organization works within Camunda's [Enterprise](https://camunda.com/enterprise/) plan, you can restrict access to clusters with an IP allowlist. - -### Create an IP allowlist - -To create an IP allowlist, take the following steps: - -1. Select the **IP Allowlist** tab. - -![cluster-details](./img/cluster-detail-ip-whitelists.png) - -2. Click **Create** to create an IP allowlist. - -![create-alert](./img/cluster-detail-create-ip-whitelist.png) - -3. Enter a list of IPs or CIDR blocks separated by commas. - -4. Enter an optional description for the allowlist. diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/manage-secrets.md b/versioned_docs/version-8.2/components/console/manage-clusters/manage-secrets.md deleted file mode 100644 index e557f19c2e9..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/manage-secrets.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: manage-secrets -title: Manage secrets -description: Manage secrets for Connectors. ---- - -Create secrets and reference them in your Connectors without exposing sensitive information in your BPMN processes. - -:::note Warning -**Connector secrets** are managed at the cluster level, so ensure you deploy your processes to the cluster that contains the necessary secrets. -If you deploy and the secret is missing, Operate will show an incident. -::: - -To create a new secret, go to your cluster and take the following steps: - -1. Select the **Connector secrets** tab. - -![secrets](./img/cluster-detail-secrets.png) - -1. Click **Create**. -2. Provide a **Key** for your secret that you will use to reference your secret from your Connector. -3. Provide the **Secret** that will be assigned to the **Key**. - -![secrets-create](./img/cluster-detail-secrets-create.png) - -4. Click **Create** and view your new secret in the list. - -![secrets-view](./img/cluster-detail-secrets-view.png) - -Now you can reference your secret in any Connector as described in the [Connectors guide](/components/connectors/use-connectors/index.md#using-secrets). diff --git a/versioned_docs/version-8.2/components/console/manage-clusters/rename-cluster.md b/versioned_docs/version-8.2/components/console/manage-clusters/rename-cluster.md deleted file mode 100644 index 1350b7249d9..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-clusters/rename-cluster.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: rename-cluster -title: Rename your cluster -description: "Follow these simple instructions to rename your cluster." ---- - -A cluster can be renamed at any time. To rename your cluster, follow the steps below: - -1. Open the cluster details by clicking on the cluster name. -2. Select the three vertical dots next to the cluster name near the top of the page to open the cluster's menu. -3. Click **Rename**. - -![cluster-rename](./img/cluster-rename.png) diff --git a/versioned_docs/version-8.2/components/console/manage-organization/advanced-search.md b/versioned_docs/version-8.2/components/console/manage-organization/advanced-search.md deleted file mode 100644 index 44c2c66572d..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/advanced-search.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: advanced-search -title: Advanced search -description: "Use the search bar to navigate through Camunda applications and locate necessary resources and documentation." ---- - -To ease navigation throughout Console, utilize the **search functionality** available in Camunda 8 SaaS under the **Console** component. - -This search functionality allows users to: - -- Navigate between apps, actions, and tasks (e.g. go to Modeler, invite users, API clients, etc.) -- Locate Camunda project assets (e.g. BPMN, DMN models, and clusters). -- Find all entries in the Camunda forum, documentation, and public GitHub issues. - -## Open the search bar - -Press `ctrl`+`k`, `⌘`+`k`, or click the magnifier in the top navigation bar to open the search bar. - -![Open the search bar](./img/open_console_search.png) - -## Tips - -- Type `>` to execute an action in Console. By using `#` as a prefix, you search only in docs. -- Use the keyboard arrows (◀ ▲ ▼ ▶) to navigate through results. -- If you see a ▶ symbol on the right-hand side, click to reveal more information. -- To select a result, press **Enter**. diff --git a/versioned_docs/version-8.2/components/console/manage-organization/delete-account.md b/versioned_docs/version-8.2/components/console/manage-organization/delete-account.md deleted file mode 100644 index 1da02387a8e..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/delete-account.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: delete-account -title: Delete your Camunda account -keywords: [account, delete, cancel] ---- - -To delete your Camunda account in Camunda Console, open the top-right user menu and click **Delete account**. - -![avatar-menue](./img/delete-account.png) - -If you are the only member of this organization, your free trial organization and its cluster will be deleted as well. diff --git a/versioned_docs/version-8.2/components/console/manage-organization/enable-alpha-features.md b/versioned_docs/version-8.2/components/console/manage-organization/enable-alpha-features.md deleted file mode 100644 index dabc5677118..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/enable-alpha-features.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: enable-alpha-features -title: Enable alpha features -description: "Learn how admin users on Enterprise plans can gain access to alpha features in Console and Modeler. " -keywords: [alpha-features, alpha, admin, pre-release] ---- - -:::note -Opting in to Camunda alpha terms currently only applies to Enterprise SaaS subscriptions. -::: - -If you aren't already familiar with accessing **alpha features**, learn more in our [alpha feature documentation](/reference/alpha-features.md). - -Alpha terms typically refer to the specific terms and conditions that govern the use and testing of this software during its alpha phase. These terms outline the rights, responsibilities, and limitations of both the software provider (Camunda) and the users (alpha testers or early adopters) during the testing and evaluation period. - -Alpha terms help protect Camunda´s interests (such as protecting our intellectual property, disclaiming warranties, or limiting our liability for any issues or damages that may arise during the alpha phase), manage user expectations, encourage active participation and feedback, and ensure legal compliance during the pre-release phase of software development. - -:::note -Enabling alpha features is limited to [admin users and owners](/docs/components/console/manage-organization/manage-users.md) of Camunda products. -::: - -## Accept alpha terms - -To accept alpha terms for Camunda products, follow the steps below: - -1. Log in to Camunda Console and click the **Organization** tab to view the overview for **Organization Management**. -2. Under the **Settings** tab, click **Opt-in** under the **Alpha features** box. -3. Note the **Alpha Terms** modal. As the admin accepting the alpha terms, you must scroll and read through the terms prior to accepting. -4. Once you have read the terms and scrolled through the modal, tick the box at the bottom reading **"I understand and agree to Alpha Terms"**. - -The system will confirm your acceptance and send a copy of the accepted alpha terms to your email address. - -## Utilize alpha features - -Once you accept the alpha terms, you can enable and disable any features you would like to use, and learn more about them: - -- In the **Settings** tab, toggle the switch under **Status** to enable and disable the feature. -- Click **View docs** under **Documentation** to learn more about the feature. -- Admins can know when someone accepts the alpha terms and when features are enabled or disabled under the **Activity** tab in **Organization Management**. diff --git a/versioned_docs/version-8.2/components/console/manage-organization/external-sso.md b/versioned_docs/version-8.2/components/console/manage-organization/external-sso.md deleted file mode 100644 index a6ab2b9d974..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/external-sso.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: external-sso -title: Connect your IDP with Camunda -keywords: [SSO, IDP, AzureAD, SAML] -description: "For enterprise customers, we support integrating external identity providers." ---- - -## Connecting your identity provider with Camunda - -For customers in the Enterprise and Starter plans, we support integrating external identity providers (IdPs). Therefore, users within your organization do not need to sign up by creating an account with Camunda. - -### Onboarding procedure - -We currently support both SAML and Azure Active Directory (Azure AD). As this requires changes in our environment, first raise a ticket in the [support queue](https://jira.camunda.com/projects/SUPPORT/). - -#### SAML - -After opening the ticket in the support queue, we will provide you: - -- **Assertion Customer URL** - e.g. `https://weblogin.cloud.camunda.io/login/callback?connection=CUSTOMER_CONNECTION` -- **Entity ID**: e.g. `urn:auth0:camunda:CUSTOMER_CONNECTION` - -You will then need to provide: - -- The domain used for the login email addresses -- A sign-in URL -- A x509 signing certificate - -#### Azure AD - -For Azure AD, you will need to provide: - -- The domain used for the login email addresses -- The Microsoft Azure AD domain -- The generated client id -- The client secret value - -To generate the client on your end, you will need to use the Camunda **Redirect URL** `https://weblogin.cloud.camunda.io/login/callback `. Ensure you attach the user permissions `Users > User.Read`. diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/activity-view.png b/versioned_docs/version-8.2/components/console/manage-organization/img/activity-view.png deleted file mode 100644 index 182f1b07af8..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/activity-view.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/avatar-menue-multiple-organisations.png b/versioned_docs/version-8.2/components/console/manage-organization/img/avatar-menue-multiple-organisations.png deleted file mode 100644 index bdc92f32a82..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/avatar-menue-multiple-organisations.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/avatar-menue.png b/versioned_docs/version-8.2/components/console/manage-organization/img/avatar-menue.png deleted file mode 100644 index bdc92f32a82..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/avatar-menue.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/billing-overview.png b/versioned_docs/version-8.2/components/console/manage-organization/img/billing-overview.png deleted file mode 100644 index 7abf8509e18..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/billing-overview.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/create-console-api-client.png b/versioned_docs/version-8.2/components/console/manage-organization/img/create-console-api-client.png deleted file mode 100644 index c4b60d5e03e..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/create-console-api-client.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/delete-account.png b/versioned_docs/version-8.2/components/console/manage-organization/img/delete-account.png deleted file mode 100644 index 355ce7eb89f..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/delete-account.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/edit_usage_alert.png b/versioned_docs/version-8.2/components/console/manage-organization/img/edit_usage_alert.png deleted file mode 100644 index 914165972a0..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/edit_usage_alert.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/email_usage_alert.png b/versioned_docs/version-8.2/components/console/manage-organization/img/email_usage_alert.png deleted file mode 100644 index d642497306a..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/email_usage_alert.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/notification_usage_alert.png b/versioned_docs/version-8.2/components/console/manage-organization/img/notification_usage_alert.png deleted file mode 100644 index cc17681417f..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/notification_usage_alert.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/open_console_search.png b/versioned_docs/version-8.2/components/console/manage-organization/img/open_console_search.png deleted file mode 100644 index e97194718bd..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/open_console_search.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/plans_usage_history_chart_view.gif b/versioned_docs/version-8.2/components/console/manage-organization/img/plans_usage_history_chart_view.gif deleted file mode 100644 index a122b67a047..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/plans_usage_history_chart_view.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/plans_usage_history_table_view.png b/versioned_docs/version-8.2/components/console/manage-organization/img/plans_usage_history_table_view.png deleted file mode 100644 index fe8b9b176cc..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/plans_usage_history_table_view.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/set_up_usage_alert.png b/versioned_docs/version-8.2/components/console/manage-organization/img/set_up_usage_alert.png deleted file mode 100644 index ffb25e97cb5..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/set_up_usage_alert.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/trial-usage-history.png b/versioned_docs/version-8.2/components/console/manage-organization/img/trial-usage-history.png deleted file mode 100644 index 37ba09f7f45..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/trial-usage-history.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/turn_off_usage_alert.png b/versioned_docs/version-8.2/components/console/manage-organization/img/turn_off_usage_alert.png deleted file mode 100644 index b2b88178e80..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/turn_off_usage_alert.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/img/usage_alerts_logs.png b/versioned_docs/version-8.2/components/console/manage-organization/img/usage_alerts_logs.png deleted file mode 100644 index 3bc02b3791a..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-organization/img/usage_alerts_logs.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-organization/manage-users.md b/versioned_docs/version-8.2/components/console/manage-organization/manage-users.md deleted file mode 100644 index 0fce458c96c..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/manage-users.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: manage-users -title: Manage users of your organization -description: "Let's take a closer look at the rights and responsibilities of users in your organization." ---- - -When a user signs up for Camunda 8 as the first user from their organization, company, or group, they become the owner of the Camunda organization. This organization owns Modeler files and Zeebe clusters. The owner and any admins they assign can control access to these resources through managing their organization. - -## Users - -An owner has all rights in an organization and can manage all settings accordingly. An organization cannot have more than one owner. - -To change the owner of the organization, utilize the user administration. The current owner selects another member of the organization, and selects **Assign as owner** from the menu. In the dialog that appears, select which new roles are to be assigned to the current owner. - -### Roles and permissions - -In addition to the owner, the **Admin** role is available as a second role with comprehensive rights. The admin role has the same rights as the owner, with the difference that an admin cannot manage other admins. - -The following roles are additionally available, providing dedicated rights for specific elements in Camunda 8. - -- **Operations Engineer**: Full access to Console, except deletion privileges. Full access to Operate and Web Modeler, except deployment privileges -- **Analyst**: Full access to Optimize and Web Modeler, except deployment privileges. Read-only access to Clusters -- **Task User**: Full access to Tasklist and Web Modeler, except deployment privileges. Read-only access to Clusters -- **Developer**: Full access to Console, except deletion privileges. Full access to Operate, Tasklist, and Web Modeler -- **Visitor**: Read-only access to Console, Operate, and Tasklist. Full access to Web Modeler, except deployment privileges - -Users can be assigned multiple roles. For example, a user can have the role of **Operations Engineer** and **Task User**, which gives them access to **Operate** and **Tasklist**. - -Users are invited to a Camunda 8 organization via their email address, which must be accepted by the user. The user remains in the `Pending` state until the invitation is accepted. - -People who do not yet have a Camunda 8 account can also be invited to an organization. To access the organization, however, the invited individual must first create a Camunda 8 account by following the instructions in the invitation email. - -## Limitations - -Depending on the plan to be used, the number of users that can be part of an organization varies. If an organization is on a Starter plan, the number of users can be updated via the **Billing** page. There, under **General users**, the number can be increased or decreased. - -## Restrictions - -In Enterprise plans, the hostname section of the email address for invites can be restricted to meet your internal security policies. Contact your Customer Success Manager to get this configured according to your needs. diff --git a/versioned_docs/version-8.2/components/console/manage-organization/organization-settings.md b/versioned_docs/version-8.2/components/console/manage-organization/organization-settings.md deleted file mode 100644 index 78c794aeed8..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/organization-settings.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: organization-settings -title: Organization management -description: "Follow these instructions to manage your organization." ---- - -Organization management can be accessed via the menu in the navigation bar. - -![avatar-menue](./img/avatar-menue.png) - -### Overview - -The overview provides a summary of the organization, including: - -- Organization name -- Pricing plan -- Owner of the organization - -#### Rename organization - -If you are the owner of the organization, you can change the name of your organization in the **Overview** tab. - -## Next steps - -- [Manage users of your organization](./manage-users.md) -- [View organization activity](./view-organization-activity.md) diff --git a/versioned_docs/version-8.2/components/console/manage-organization/switch-organization.md b/versioned_docs/version-8.2/components/console/manage-organization/switch-organization.md deleted file mode 100644 index 339da663284..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/switch-organization.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: switch-organization -title: Switch organization ---- - -If a user is assigned to more than one organization, the organization can be changed in the menu of the navigation bar. - -![avatar-menue-multiple-organisations](./img/avatar-menue-multiple-organisations.png) diff --git a/versioned_docs/version-8.2/components/console/manage-organization/usage-alerts.md b/versioned_docs/version-8.2/components/console/manage-organization/usage-alerts.md deleted file mode 100644 index d700533dabe..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/usage-alerts.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: usage-alerts -title: Usage Alerts -description: "As an organization owner or admin, set up alerts for process instances, decision instances, and task users." ---- - -:::note -Usage alerts apply **only to production clusters** and are visible only to owners and admins in **Starter** and **Enterprise** organizations. -::: - -Under the **Billing** tab, organization owners and admins can set up alerts for process instances, decision instances, and task users. -Usage is calculated daily. When the threshold for an alert is met, all organization owners and admins are alerted via email and in-app notification. - -## Creating a usage alert - -To create a usage alert, take the following steps: - -1. From the Console, click **Organization > Billing**. -2. On the **Billing** page, select **Edit alert** next to the metric you want to configure the usage alert for (e.g. process instances). - ![Edit Usage Alert](./img/edit_usage_alert.png) -3. In the modal, define the percentage threshold (e.g. 80 for 80%), turn on the alert, and click **Save**. Note that the threshold can be between 1% and 4999%. - ![Set Alert](./img/set_up_usage_alert.png) - -:::note -The threshold set is calculated on a percentage ratio between your consumption and the amounts included in your plan. -For example, when a threshold is set at 50% for a plan that includes 200 process instances, an alert is sent when 100 process instances are reached. -::: - -Usage is calculated daily. When the threshold for an alert is met, all org owners and admins are alerted via email and in-app notification (examples below). - -![Set Alert](./img/email_usage_alert.png) - -![Set Alert](./img/notification_usage_alert.png) - -## Editing a usage alert - -Usage alerts can be edited and turned on or off anytime by selecting **Edit alert** and updating the toggle. - -![Turn Off Alert](./img/turn_off_usage_alert.png) - -## Viewing an alert change log - -Users can track changes in the usage alerts under the logs of the **Activity** tab. - -![Usage Alert Logs](./img/usage_alerts_logs.png) diff --git a/versioned_docs/version-8.2/components/console/manage-organization/usage-history.md b/versioned_docs/version-8.2/components/console/manage-organization/usage-history.md deleted file mode 100644 index 74c5c6dd92c..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/usage-history.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: usage-history -title: Usage history -description: "Let's observe our workflow usage." ---- - -:::note -The usage history is visible only to owners and admins. -::: - -Three key metrics play a role in paid plans: the number of started process instances, decision instances, and the number of task users. The **Organization management** provides a usage view for these metrics across the organization. - -## Starter and Enterprise Organizations - -The information is available under the **Billing** tab at the bottom section of **Usage history**. - -The section is split into two areas: - -**Table view**, where the data is displayed aggregated on a monthly basis: - -![Usage History - Table View](./img/plans_usage_history_table_view.png) - -**Chart view**, where the data is displayed in charts and allows a closer look into usage patterns and spikes by customizing the date ranges. - -![Usage History - Chart View](./img/plans_usage_history_chart_view.gif) - -## Trial Organizations - -All metrics are aggregated on a monthly basis and displayed in the **Usage history**. - -![Usage History](./img/trial-usage-history.png) diff --git a/versioned_docs/version-8.2/components/console/manage-organization/view-organization-activity.md b/versioned_docs/version-8.2/components/console/manage-organization/view-organization-activity.md deleted file mode 100644 index 4b320724bcb..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-organization/view-organization-activity.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: view-organization-activity -title: View organization activity -description: "Let's analyze the capabilities of the Activity tab." ---- - -The **Activity** tab lists all activities within an organization. Here, you can see when a cluster was created or deleted. - -![activity-view](./img/activity-view.png) diff --git a/versioned_docs/version-8.2/components/console/manage-plan/available-plans.md b/versioned_docs/version-8.2/components/console/manage-plan/available-plans.md deleted file mode 100644 index 3b604682898..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-plan/available-plans.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: available-plans -title: Available plans -description: "Let's take a closer look at Camunda's current plan options." ---- - -To sign up for Camunda 8, visit the [sign up page](https://signup.camunda.com/accounts?utm_source=docs.camunda.io&utm_medium=referral). - -There is a free trial and two plans available for Camunda 8: - -- Starter (formerly Professional) -- Enterprise - -For more information on Camunda 8 plans, visit the Camunda 8 [product page](https://camunda.com/products/cloud/). diff --git a/versioned_docs/version-8.2/components/console/manage-plan/cancel-starter-subscription.md b/versioned_docs/version-8.2/components/console/manage-plan/cancel-starter-subscription.md deleted file mode 100644 index 1fb68024f95..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-plan/cancel-starter-subscription.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: cancel-starter-subscription -title: Cancel Starter plan subscription -description: "When utilizing Camunda's Starter plan, follow these steps to cancel your subscription." ---- - -:::note -This setting is only visible in the **Starter plan** (formerly Professional plan) for owners and admins. -::: - -When utilizing Camunda's Starter plan, you might need to cancel your subscription. To complete your cancelation, take the following steps: - -1. From the Console, click **Organization > Billing > Manage payment account**. - -![console entrypoint to manage payment account](./img/cc-entrypoint.png) - -2. Click on your plan. - -![manage subscription](./img/cancel-prof-plan-manage-subscription.png) - -3. Click **Cancel subscription** at the bottom of the page. - -![cancel subscription cta](./img/cancel-prof-plan-cancel-cta.png) - -4. Add a cancelation reason and some feedback and click **Confirm cancelation**. Note that the cancelation date will be the last day of the current billing period. - -![enter cancelation reason and details](./img/cancel-prof-plan-last-screen.png) diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/billing-overview.png b/versioned_docs/version-8.2/components/console/manage-plan/img/billing-overview.png deleted file mode 100644 index 795495a93e9..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/billing-overview.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-cancel-cta.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-cancel-cta.png deleted file mode 100644 index 5ef8f7423c4..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-cancel-cta.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-last-screen.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-last-screen.png deleted file mode 100644 index cec095d8057..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-last-screen.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-manage-subscription.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-manage-subscription.png deleted file mode 100644 index 2cf0e02c61c..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cancel-prof-plan-manage-subscription.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-ending.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cc-ending.png deleted file mode 100644 index 8ff7a8dd725..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-ending.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-enter.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cc-enter.png deleted file mode 100644 index d02e57b930d..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-enter.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-entrypoint.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cc-entrypoint.png deleted file mode 100644 index faf1bf47da9..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-entrypoint.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-manage.png b/versioned_docs/version-8.2/components/console/manage-plan/img/cc-manage.png deleted file mode 100644 index a8b44e1dffc..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/cc-manage.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/img/checkout.png b/versioned_docs/version-8.2/components/console/manage-plan/img/checkout.png deleted file mode 100644 index ed49c95ae8a..00000000000 Binary files a/versioned_docs/version-8.2/components/console/manage-plan/img/checkout.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/console/manage-plan/retrieve-invoices-or-update-billing-info.md b/versioned_docs/version-8.2/components/console/manage-plan/retrieve-invoices-or-update-billing-info.md deleted file mode 100644 index 2081f022140..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-plan/retrieve-invoices-or-update-billing-info.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: retrieve-invoices-or-update-billing-info -title: Retrieve invoices or update your billing information -description: "Retrieve and download your Starter plan invoices and update your billing information" ---- - -:::note -This setting is only visible in the **Starter plan** for owners and admins. -::: - -Once signed up for the **Starter plan**, you have access to the **Billing** page. - -You can access the **Billing** page by selecting **Organization management** in the Camunda Console navigation bar. - -![billing-overview](./img/billing-overview.png) - -### Retrieve invoices - -To retrieve your past invoices, click the **Manage subscriptiont** button and then select **Billing history**. From that point onwards, you will be able to retrieve and download your invoices. - -### Update billing information (e.g. billing address) - -To update account information like billing address or company name, click the **Manage subscription** button and then select **Account information** or **Billing & shipping addresses** depending on what information you need to update. diff --git a/versioned_docs/version-8.2/components/console/manage-plan/update-billing-reservations.md b/versioned_docs/version-8.2/components/console/manage-plan/update-billing-reservations.md deleted file mode 100644 index c5d043624fc..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-plan/update-billing-reservations.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: update-billing-reservations -title: Update billing reservations -description: "Let's manage our hardware packages and edit reservations." ---- - -:::note -This setting is only visible in the **Starter plan** and **Enterprise plan** for owners and admins. -::: - -## Managing hardware packages - -Once signed up for the **Starter plan** or **Enterprise plan**, you have access to the **Billing** page. - -- The created process instances from the current period are displayed at the top of the page. -- Find a history of the metrics on a monthly basis at the bottom of the page. -- View how many hardware packages are included on the right side of the page. - - **Starter plan**: Change the reservations for additional hardware packages. - -Reservations control how many clusters you can deploy. Increasing the number of reservations allows you to deploy more clusters, while decreasing the number of reservations allows you to deploy fewer clusters. - -You can access the **Billing** page by selecting **Organization management** in the Camunda Console navigation bar. - -![billing-overview](./img/billing-overview.png) - -### Edit reservations (Starter plan only) - -Use the **Edit** button to change the number of reserved clusters. The number of reserved clusters cannot exceed the maximum limit and cannot go below what is currently in use. - -### Edit development cluster reservations (Starter plan only) - -To use a **development cluster**, reservations must be available, as with hardware packages. To update reservations, scroll to the **Development cluster** section and click the **Edit** button to increase or decrease the reservations. The number of reserved development clusters cannot exceed the maximum limit and cannot go below what is currently in use. - -As soon as a reservation has been made, a development cluster can be set up via the [create cluster dialog](../manage-clusters/create-cluster-include.md). diff --git a/versioned_docs/version-8.2/components/console/manage-plan/update-creditcard.md b/versioned_docs/version-8.2/components/console/manage-plan/update-creditcard.md deleted file mode 100644 index fad454d154b..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-plan/update-creditcard.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: update-creditcard -title: Update your credit card -description: "When utilizing Camunda's Starter plan, follow these steps to update an expired or failing credit card." ---- - -:::note -This setting is only visible in the **Starter plan** for owners and admins. -::: - -When utilizing Camunda's Starter plan, you might need to update an expired or failing credit card. - -The steps to update a payment method are as follows: - -1. From the Console, click **Organization > Billing > Manage payment account**. - -![console entrypoint to manage payment account](./img/cc-entrypoint.png) - -2. Click **Payment methods**. - -![payment methods button](./img/cc-manage.png) - -3. Click **Edit payment method**. - -![edit payment method button](./img/cc-ending.png) - -4. Add the details of the new credit card and click **Update**. - -![enter credit card details and update button](./img/cc-enter.png) diff --git a/versioned_docs/version-8.2/components/console/manage-plan/upgrade-to-starter-plan.md b/versioned_docs/version-8.2/components/console/manage-plan/upgrade-to-starter-plan.md deleted file mode 100644 index 891dadfd138..00000000000 --- a/versioned_docs/version-8.2/components/console/manage-plan/upgrade-to-starter-plan.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: upgrade-to-starter-plan -title: Upgrade to a Starter plan -description: "Want to upgrade to a Starter plan? Follow these steps." ---- - -:::note -The terms under which the Starter plan (formerly Professional plan) is available might change in the future. -::: - -To convert to the Starter plan, use the **Upgrade now** button in the navigation bar. Here, you can either contact us about the Enterprise plan or update your subscription to the Starter plan. - -![paid-request](./img/checkout.png) diff --git a/versioned_docs/version-8.2/components/img/ComponentsAndArchitecture_SaaS.png b/versioned_docs/version-8.2/components/img/ComponentsAndArchitecture_SaaS.png deleted file mode 100644 index 313c50daa6b..00000000000 Binary files a/versioned_docs/version-8.2/components/img/ComponentsAndArchitecture_SaaS.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/about-modeler.md b/versioned_docs/version-8.2/components/modeler/about-modeler.md deleted file mode 100644 index fa83ba26234..00000000000 --- a/versioned_docs/version-8.2/components/modeler/about-modeler.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: about-modeler -title: About Modeler -description: "Any executable process needs a BPMN diagram designed and configured beforehand. Camunda offers Web Modeler and Desktop Modeler to design and implement these." -keywords: ["process mapping tool"] ---- - -Camunda 8 only - -Any executable process needs a **[Business Process Model and Notation (BPMN)](./bpmn/bpmn.md) diagram** designed and configured beforehand. - -A BPMN diagram is used to visually outline the structure and flow of a process. As a result, the process can be more easily understood by various stakeholders. - -In tandem, different events and implementation details (such as the conditions within a gateway or the specifications of a service task) must be configured in the model so the workflow engine understands what must be executed once the process reaches a certain task. - -Camunda offers two tools to design and implement your diagrams: - -- [Web Modeler](./web-modeler/launch-web-modeler.md): Integrate seamlessly with Camunda 8 SaaS and Self-Managed installations alongside [Console](../console/introduction-to-console.md). -- [Desktop Modeler](./desktop-modeler/index.md): Design, view, and edit models using this desktop application. Install and use Desktop Modeler locally, all while integrating your local development environment. - -:::note -Interested in editing the underlying XML of [BPMN](./bpmn/bpmn.md) and [Decision Model and Notation (DMN)](./dmn/dmn.md) diagrams directly? Be sure to utilize Desktop Modeler, as it features an XML editor tab. The XML editor comes with its own history (undo or redo), search and replace functionality, and XML syntax highlighting. -::: - -In this guide, we'll demonstrate modeling BPMN diagrams using both Web Modeler and Desktop Modeler. - -## Next steps - -- [Modeling BPMN](/guides/automating-a-process-using-bpmn.md) - Learn how to model an automated process in this tutorial using Web Modeler with Camunda 8 SaaS. -- [Camunda Forms](/guides/utilizing-forms.md) - Design and configure forms, and connect them to a user task or start event to implement a task form in your application. -- [DMN](./dmn/dmn.md) - In DMN, model and execute decisions using the same language. As a business analyst, model the rules that lead to a decision in comprehensive tables, and execute these tables directly by a decision engine like Camunda. -- [Out-of-the-box Connectors](/guides/configuring-out-of-the-box-connector.md) - Utilize pre-built connectivity with a number of outside systems. Connectors are modular by nature, reusable, and accelerate automation across those systems. -- [Custom connectors](/components/connectors/manage-connector-templates.md) - Learn how to build custom Connectors alongside external systems based on our out-of-the-box Connectors. diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/annotation.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/annotation.svg deleted file mode 100644 index 3f8e1c35326..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/annotation.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - Text - - Annotation - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg deleted file mode 100644 index 3a41d3c920c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Business Rule - Task - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg deleted file mode 100644 index a1eeace9113..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - Call Activity - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/cancel-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/cancel-boundary-event.svg deleted file mode 100644 index d9c1f6addab..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/cancel-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/cancel-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/cancel-end-event.svg deleted file mode 100644 index cfb1c54af20..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/cancel-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-boundary-event.svg deleted file mode 100644 index 70f87d1e3eb..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-end-event.svg deleted file mode 100644 index 9e6802eb944..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-event-subprocess.svg deleted file mode 100644 index 0e9046194fd..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-throw-event.svg deleted file mode 100644 index ebdce12af97..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation.svg deleted file mode 100644 index fb4dd8731b7..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/compensation.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - Compensation - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/complex-gateway.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/complex-gateway.svg deleted file mode 100644 index 4901c0a0721..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/complex-gateway.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Complex - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg deleted file mode 100644 index e35bc1b52b2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event.svg deleted file mode 100644 index b18e46103e5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-catch-event.svg deleted file mode 100644 index b18e46103e5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg deleted file mode 100644 index ecb9a0e4216..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess.svg deleted file mode 100644 index e1bd95cc866..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-start-event.svg deleted file mode 100644 index e1bd95cc866..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/conditional-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/data-object.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/data-object.svg deleted file mode 100644 index 4b485499d7b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/data-object.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - Data - Object - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/data-store.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/data-store.svg deleted file mode 100644 index d451a7795f9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/data-store.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Data - Store - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/embedded-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/embedded-subprocess.svg deleted file mode 100644 index 6d0a9695a1a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/embedded-subprocess.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - Subprocess - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-boundary-event.svg deleted file mode 100644 index f7da4680d22..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-end-event.svg deleted file mode 100644 index 58b179e3e40..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-event-subprocess.svg deleted file mode 100644 index 28655af00f2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/error-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg deleted file mode 100644 index c8f6b20ba96..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event.svg deleted file mode 100644 index b20177c5380..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-end-event.svg deleted file mode 100644 index 452576c5f1e..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg deleted file mode 100644 index 9342597d9c3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess.svg deleted file mode 100644 index 25d9829720a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-throw-event.svg deleted file mode 100644 index 7677824daf5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/escalation-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/event-based-gateway.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/event-based-gateway.svg deleted file mode 100644 index cb665488894..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/event-based-gateway.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Event - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/event-subprocess.svg deleted file mode 100644 index e3c1c82409a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/event-subprocess.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - Event - Subprocess - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/exclusive-gateway.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/exclusive-gateway.svg deleted file mode 100644 index 79723eb4c97..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/exclusive-gateway.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - XOR - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/group.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/group.svg deleted file mode 100644 index 8ddeb24aa3c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/group.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - Group - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/inclusive-gateway.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/inclusive-gateway.svg deleted file mode 100644 index 7993f382649..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/inclusive-gateway.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - OR - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/lane.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/lane.svg deleted file mode 100644 index cfbfd447bf8..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/lane.svg +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - Pool - - - - - - - - Lane - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/link-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/link-catch-event.svg deleted file mode 100644 index 97bbdd98734..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/link-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/link-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/link-throw-event.svg deleted file mode 100644 index 81f8ac8dce2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/link-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/loop.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/loop.svg deleted file mode 100644 index b4f07afbe5f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/loop.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - - Loop - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/manual-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/manual-task.svg deleted file mode 100644 index c69c249c3f6..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/manual-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Manual Task - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event-non-interrupting.svg deleted file mode 100644 index f156f4f49b0..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event.svg deleted file mode 100644 index b401af0ca1b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-catch-event.svg deleted file mode 100644 index b401af0ca1b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-end-event.svg deleted file mode 100644 index 0317ca99db8..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg deleted file mode 100644 index a6655b7d75d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess.svg deleted file mode 100644 index 8aef44c32b9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-start-event.svg deleted file mode 100644 index 8aef44c32b9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-throw-event.svg deleted file mode 100644 index ef9880a1ecc..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/message-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-parallel.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-parallel.svg deleted file mode 100644 index 1c564f27f4f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-parallel.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - Multi-Instance - Parallel - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-sequential.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-sequential.svg deleted file mode 100644 index 6099e144af3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multi-instance-sequential.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - Multi-Instance - Sequential - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg deleted file mode 100644 index 98c0ea186a2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event.svg deleted file mode 100644 index dc96d662c5c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-catch-event.svg deleted file mode 100644 index dc96d662c5c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-end-event.svg deleted file mode 100644 index 8b84b943fff..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg deleted file mode 100644 index c938312f93f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess.svg deleted file mode 100644 index 419383a38e3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg deleted file mode 100644 index 0a12743ba52..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event.svg deleted file mode 100644 index d2040edd54d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-catch-event.svg deleted file mode 100644 index d2040edd54d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg deleted file mode 100644 index dd175021ad2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess.svg deleted file mode 100644 index 665ea748c0a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-start-event.svg deleted file mode 100644 index 665ea748c0a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-parallel-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-start-event.svg deleted file mode 100644 index 419383a38e3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-throw-event.svg deleted file mode 100644 index fb649b893a1..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/multiple-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-end-event.svg deleted file mode 100644 index a8ddd388078..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-end-event.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-start-event.svg deleted file mode 100644 index 1fbe9b1ad70..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-start-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-throw-event.svg deleted file mode 100644 index c2f61d5af3e..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/none-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/parallel-gateway.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/parallel-gateway.svg deleted file mode 100644 index 04e961ab3b6..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/parallel-gateway.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - AND - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/pool.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/pool.svg deleted file mode 100644 index a94f7d0c090..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/pool.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - Pool - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/receive-task-instantiated.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/receive-task-instantiated.svg deleted file mode 100644 index b91ecfde13c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/receive-task-instantiated.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Receive Task - (instantiated) - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/receive-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/receive-task.svg deleted file mode 100644 index 0e3f54f0ee5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/receive-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Receive Task - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/script-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/script-task.svg deleted file mode 100644 index c4111af129c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/script-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Script Task - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/send-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/send-task.svg deleted file mode 100644 index 6d27a380b50..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/send-task.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - Send Task - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/service-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/service-task.svg deleted file mode 100644 index 58190c2ffc5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/service-task.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - Service Task - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg deleted file mode 100644 index ff5940246dd..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event.svg deleted file mode 100644 index 0eb546b465f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-boundary-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-catch-event.svg deleted file mode 100644 index 0eb546b465f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-catch-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-end-event.svg deleted file mode 100644 index 2e79ffa90f9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg deleted file mode 100644 index 4640651c8d0..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess.svg deleted file mode 100644 index 7c5e9e7222d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-event-subprocess.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-start-event.svg deleted file mode 100644 index 7c5e9e7222d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-start-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-throw-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-throw-event.svg deleted file mode 100644 index 1d353a28ea9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/signal-throw-event.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/termination-end-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/termination-end-event.svg deleted file mode 100644 index 3e2988fb8ed..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/termination-end-event.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg deleted file mode 100644 index deec2de4a62..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event.svg deleted file mode 100644 index fe487510406..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-boundary-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-catch-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-catch-event.svg deleted file mode 100644 index fe487510406..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-catch-event.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg deleted file mode 100644 index 09d63c08310..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess.svg deleted file mode 100644 index 38035befd81..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-event-subprocess.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-start-event.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-start-event.svg deleted file mode 100644 index 38035befd81..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/timer-start-event.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/transactional-subprocess.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/transactional-subprocess.svg deleted file mode 100644 index 9546777117b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/transactional-subprocess.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - Transaction - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/undefined-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/undefined-task.svg deleted file mode 100644 index eca69ecc221..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/undefined-task.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - Undefined Task - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/user-task.svg b/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/user-task.svg deleted file mode 100644 index 1950c3f12e1..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/bpmn-symbols/user-task.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - User Task - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/data-flow-job-worker.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/data-flow-job-worker.png deleted file mode 100644 index 76c1dce170a..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/data-flow-job-worker.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/data-flow.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/data-flow.png deleted file mode 100644 index e526d24a089..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/data-flow.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/demo.gif b/versioned_docs/version-8.2/components/modeler/bpmn/assets/demo.gif deleted file mode 100644 index 652f86459e7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/demo.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/merging-mapping.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/merging-mapping.png deleted file mode 100644 index 821240dea84..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/merging-mapping.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/modeler.gif b/versioned_docs/version-8.2/components/modeler/bpmn/assets/modeler.gif deleted file mode 100644 index f1999370e54..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/modeler.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/order-process.bpmn b/versioned_docs/version-8.2/components/modeler/bpmn/assets/order-process.bpmn deleted file mode 100644 index 795babfcdcf..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/order-process.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - SequenceFlow_0j6tsnn - - - - SequenceFlow_0j6tsnn - SequenceFlow_0baemzs - - - - SequenceFlow_0cu1bs2 - SequenceFlow_19klrd3 - - - SequenceFlow_19klrd3 - - - - - SequenceFlow_0baemzs - SequenceFlow_0cu1bs2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/order-process.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/order-process.png deleted file mode 100644 index c9185cf96a8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/parallel-gateway.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/parallel-gateway.png deleted file mode 100644 index c0601d42b89..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/parallel-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/process.bpmn b/versioned_docs/version-8.2/components/modeler/bpmn/assets/process.bpmn deleted file mode 100644 index 828a8c302ca..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/process.bpmn +++ /dev/null @@ -1,89 +0,0 @@ - - - - - SequenceFlow_1bq1azi - - - - - - SequenceFlow_0ojoaqz - - - - - - - SequenceFlow_1bq1azi - SequenceFlow_09hqjpg - - - - - - SequenceFlow_09hqjpg - SequenceFlow_1ea1mpb - - - - - - SequenceFlow_1ea1mpb - SequenceFlow_0ojoaqz - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/process.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/process.png deleted file mode 100644 index 8576c92b106..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/quickstart-2.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/quickstart-2.png deleted file mode 100644 index 7eea0539ba9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/quickstart-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/quickstart-3.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/quickstart-3.png deleted file mode 100644 index 95539b39d32..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/quickstart-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/react-components/iso-8601-date-time.md b/versioned_docs/version-8.2/components/modeler/bpmn/assets/react-components/iso-8601-date-time.md deleted file mode 100644 index 03eacb8fd18..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/assets/react-components/iso-8601-date-time.md +++ /dev/null @@ -1,5 +0,0 @@ -A specific point in time defined as ISO 8601 combined date and time representation. It must contain timezone information, either `Z` for UTC or a zone offset. Optionally, it can contain a zone id. - -- `2019-10-01T12:00:00Z` - UTC time -- `2019-10-02T08:09:40+02:00` - UTC plus two hours zone offset -- `2019-10-02T08:09:40+02:00[Europe/Berlin]` - UTC plus two hours zone offset at Berlin diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/sequenceflow.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/sequenceflow.png deleted file mode 100644 index 28e1e0f552d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/sequenceflow.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/sub-process.gif b/versioned_docs/version-8.2/components/modeler/bpmn/assets/sub-process.gif deleted file mode 100644 index 11d7467db21..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/sub-process.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/assets/variable-scopes.png b/versioned_docs/version-8.2/components/modeler/bpmn/assets/variable-scopes.png deleted file mode 100644 index 3caefe67e43..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/assets/variable-scopes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/bpmn-coverage.md b/versioned_docs/version-8.2/components/modeler/bpmn/bpmn-coverage.md deleted file mode 100644 index 1d41ea6d754..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/bpmn-coverage.md +++ /dev/null @@ -1,691 +0,0 @@ ---- -id: bpmn-coverage -title: "BPMN coverage" -description: "List of BPMN symbols supported in Modelers" ---- - -export const Highlight = ({children, color}) => ( - -{children} - -); - -The following BPMN elements are supported by our modeling tools. Elements highlighted in green are supported for execution by Camunda 8. Click on an element to navigate to the documentation. - -## Participants - -import PoolSvg from './assets/bpmn-symbols/pool.svg'; -import LaneSvg from './assets/bpmn-symbols/lane.svg'; - - - -## Subprocesses - -import EmbeddedSubprocessSvg from './assets/bpmn-symbols/embedded-subprocess.svg'; -import CallActivitySvg from './assets/bpmn-symbols/call-activity.svg'; -import EventSubprocessSvg from './assets/bpmn-symbols/event-subprocess.svg' -import TransactionalSubprocessSvg from './assets/bpmn-symbols/transactional-subprocess.svg' - - - -## Tasks - -import ServiceTaskSvg from './assets/bpmn-symbols/service-task.svg' -import UserTaskSvg from './assets/bpmn-symbols/user-task.svg' -import ReceiveTaskSvg from './assets/bpmn-symbols/receive-task.svg' -import SendTaskSvg from './assets/bpmn-symbols/send-task.svg' -import BusinessRuleTaskSvg from './assets/bpmn-symbols/business-rule-task.svg' -import ScriptTaskSvg from './assets/bpmn-symbols/script-task.svg' -import ManualTaskSvg from './assets/bpmn-symbols/manual-task.svg' -import UndefinedTaskSvg from './assets/bpmn-symbols/undefined-task.svg' -import ReceiveTaskInstantiatedSvg from './assets/bpmn-symbols/receive-task-instantiated.svg' - - - -## Gateways - -import ExclusiveGatewaySvg from './assets/bpmn-symbols/exclusive-gateway.svg' -import InclusiveGatewaySvg from './assets/bpmn-symbols/inclusive-gateway.svg' -import ParallelGatewaySvg from './assets/bpmn-symbols/parallel-gateway.svg' -import EventBasedGatewaySvg from './assets/bpmn-symbols/event-based-gateway.svg' -import ComplexGatewaySvg from './assets/bpmn-symbols/complex-gateway.svg' - - - -## Markers - -import MultiInstanceParallelSvg from './assets/bpmn-symbols/multi-instance-parallel.svg' -import MultiInstanceSequentialSvg from './assets/bpmn-symbols/multi-instance-sequential.svg' -import LoopSvg from './assets/bpmn-symbols/loop.svg' -import CompensationSvg from './assets/bpmn-symbols/compensation.svg' - - - -## Data - -import DataObjectSvg from './assets/bpmn-symbols/data-object.svg' -import DataStoreSvg from './assets/bpmn-symbols/data-store.svg' - - - -## Artifacts - -import AnnotationSvg from './assets/bpmn-symbols/annotation.svg' -import GroupSvg from './assets/bpmn-symbols/group.svg' - - - -## Events - -import NoneStartEventSvg from './assets/bpmn-symbols/none-start-event.svg' -import NoneThrowEventSvg from './assets/bpmn-symbols/none-throw-event.svg' -import NoneEndEventSvg from './assets/bpmn-symbols/none-end-event.svg' - -import MessageStartEventSvg from './assets/bpmn-symbols/message-start-event.svg' -import MessageEventSubprocessSvg from './assets/bpmn-symbols/message-event-subprocess.svg' -import MessageEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/message-event-subprocess-non-interrupting.svg' -import MessageCatchEventSvg from './assets/bpmn-symbols/message-catch-event.svg' -import MessageBoundaryEventSvg from './assets/bpmn-symbols/message-boundary-event.svg' -import MessageBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/message-boundary-event-non-interrupting.svg' -import MessageThrowEventSvg from './assets/bpmn-symbols/message-throw-event.svg' -import MessageEndEventSvg from './assets/bpmn-symbols/message-end-event.svg' - -import TimerStartEventSvg from './assets/bpmn-symbols/timer-start-event.svg' -import TimerEventSubprocessSvg from './assets/bpmn-symbols/timer-event-subprocess.svg' -import TimerEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/timer-event-subprocess-non-interrupting.svg' -import TimerCatchEventSvg from './assets/bpmn-symbols/timer-catch-event.svg' -import TimerBoundaryEventSvg from './assets/bpmn-symbols/timer-boundary-event.svg' -import TimerBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/timer-boundary-event-non-interrupting.svg' - -import ErrorEventSubprocessSvg from './assets/bpmn-symbols/error-event-subprocess.svg' -import ErrorBoundaryEventSvg from './assets/bpmn-symbols/error-boundary-event.svg' -import ErrorEndEventSvg from './assets/bpmn-symbols/error-end-event.svg' - -import SignalStartEventSvg from './assets/bpmn-symbols/signal-start-event.svg' -import SignalEventSubprocessSvg from './assets/bpmn-symbols/signal-event-subprocess.svg' -import SignalEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/signal-event-subprocess-non-interrupting.svg' -import SignalCatchEventSvg from './assets/bpmn-symbols/signal-catch-event.svg' -import SignalBoundaryEventSvg from './assets/bpmn-symbols/signal-boundary-event.svg' -import SignalBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/signal-boundary-event-non-interrupting.svg' -import SignalThrowEventSvg from './assets/bpmn-symbols/signal-throw-event.svg' -import SignalEndEventSvg from './assets/bpmn-symbols/signal-end-event.svg' - -import ConditionalStartEventSvg from './assets/bpmn-symbols/conditional-start-event.svg' -import ConditionalEventSubprocessSvg from './assets/bpmn-symbols/conditional-event-subprocess.svg' -import ConditionalEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/conditional-event-subprocess-non-interrupting.svg' -import ConditionalCatchEventSvg from './assets/bpmn-symbols/conditional-catch-event.svg' -import ConditionalBoundaryEventSvg from './assets/bpmn-symbols/conditional-boundary-event.svg' -import ConditionalBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/conditional-boundary-event-non-interrupting.svg' - -import EscalationEventSubprocessSvg from './assets/bpmn-symbols/escalation-event-subprocess.svg' -import EscalationEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/escalation-event-subprocess-non-interrupting.svg' -import EscalationBoundaryEventSvg from './assets/bpmn-symbols/escalation-boundary-event.svg' -import EscalationBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/escalation-boundary-event-non-interrupting.svg' -import EscalationThrowEventSvg from './assets/bpmn-symbols/escalation-throw-event.svg' -import EscalationEndEventSvg from './assets/bpmn-symbols/escalation-end-event.svg' - -import CompensationEventSubprocessSvg from './assets/bpmn-symbols/compensation-event-subprocess.svg' -import CompensationBoundaryEventSvg from './assets/bpmn-symbols/compensation-boundary-event.svg' -import CompensationThrowEventSvg from './assets/bpmn-symbols/compensation-throw-event.svg' -import CompensationEndEventSvg from './assets/bpmn-symbols/compensation-end-event.svg' - -import CancelBoundaryEventSvg from './assets/bpmn-symbols/cancel-boundary-event.svg' -import CancelEndEventSvg from './assets/bpmn-symbols/cancel-end-event.svg' - -import TerminationEndEventSvg from './assets/bpmn-symbols/termination-end-event.svg' - -import LinkCatchEventSvg from './assets/bpmn-symbols/link-catch-event.svg' -import LinkThrowEventSvg from './assets/bpmn-symbols/link-throw-event.svg' - -import MultipleStartEventSvg from './assets/bpmn-symbols/multiple-start-event.svg' -import MultipleEventSubprocessSvg from './assets/bpmn-symbols/multiple-event-subprocess.svg' -import MultipleEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/multiple-event-subprocess-non-interrupting.svg' -import MultipleCatchEventSvg from './assets/bpmn-symbols/multiple-catch-event.svg' -import MultipleBoundaryEventSvg from './assets/bpmn-symbols/multiple-boundary-event.svg' -import MultipleBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/multiple-boundary-event-non-interrupting.svg' -import MultipleThrowEventSvg from './assets/bpmn-symbols/multiple-throw-event.svg' -import MultipleEndEventSvg from './assets/bpmn-symbols/multiple-end-event.svg' - -import MultipleParallelStartEventSvg from './assets/bpmn-symbols/multiple-parallel-start-event.svg' -import MultipleParallelEventSubprocessSvg from './assets/bpmn-symbols/multiple-parallel-event-subprocess.svg' -import MultipleParallelEventSubprocessNonInterruptingSvg from './assets/bpmn-symbols/multiple-parallel-event-subprocess-non-interrupting.svg' -import MultipleParallelCatchEventSvg from './assets/bpmn-symbols/multiple-parallel-catch-event.svg' -import MultipleParallelBoundaryEventSvg from './assets/bpmn-symbols/multiple-parallel-boundary-event.svg' -import MultipleParallelBoundaryEventNonInterruptingSvg from './assets/bpmn-symbols/multiple-parallel-boundary-event-non-interrupting.svg' - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeStartIntermediateEnd
    NormalEvent SubprocessEvent Subprocess non-interruptingCatchBoundaryBoundary non-interruptingThrow
    - None - - - - - - - - - - - - -
    - Message - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Timer - - - - - - - - - - - - - - - - - - - - - - - - -
    - Error - - - - - - - - - - - - -
    - Signal - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Conditional - - - - - - - - - - - - - - - - - - - - - - - - -
    - Escalation - - - - - - - - - - - - - - - - - - - - - - - - -
    - Compensation - - - - - - - - - - - - - - - - -
    - Cancel - - - - - - - - -
    - Terminate - - - - -
    - Link - - - - - - - - -
    - Multiple - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Multiple Parallel - - - - - - - - - - - - - - - - - - - - - - - - -
    diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/bpmn-primer.md b/versioned_docs/version-8.2/components/modeler/bpmn/bpmn-primer.md deleted file mode 100644 index 18597307090..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/bpmn-primer.md +++ /dev/null @@ -1,238 +0,0 @@ ---- -id: bpmn-primer -title: "BPMN primer" -description: "Business Process Model and Notation 2.0 (BPMN) is an industry standard for process modeling and execution. Let's examine a few examples of the XML document." ---- - -import ReactPlayer from 'react-player' - -Business Process Model and Notation 2.0 (BPMN) is an industry standard for process modeling and execution. A BPMN process is an XML document that has a visual representation. For example, here is a BPMN process: - -![process](assets/process.png) - -
    - The corresponding XML -

    - -```xml - - - - - SequenceFlow_1bq1azi - - - - - - SequenceFlow_0ojoaqz - - - - - - - SequenceFlow_1bq1azi - SequenceFlow_09hqjpg - - - - - - SequenceFlow_09hqjpg - SequenceFlow_1ea1mpb - - - - - - SequenceFlow_1ea1mpb - SequenceFlow_0ojoaqz - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -

    -
    - -This duality makes BPMN very powerful. The XML document contains all the necessary information to be interpreted by workflow engines and modeling tools like Zeebe. At the same time, the visual representation contains just enough information to be quickly understood by humans, even when they are non-technical people. The BPMN model is source code and documentation in one artifact. - -The following is an introduction to BPMN 2.0, its elements, and their execution semantics. It tries to briefly provide an intuitive understanding of BPMN's power, but does not cover the entire feature set. For more exhaustive BPMN resources, see the [reference links](#additional-resources) at the end of this section. - -## Modeling BPMN diagrams - -The best tool for modeling BPMN diagrams for Zeebe is [Modeler](../about-modeler.md). - -![overview](./assets/modeler.gif) - -- [Download page](https://camunda.com/download/modeler/) -- [Source code repository](https://github.com/camunda/camunda-modeler) - -## BPMN elements - -### Sequence flow: Controlling the flow of execution - -A core concept of BPMN is a **sequence flow** that defines the order in which steps in the process happen. In BPMN's visual representation, a sequence flow is an arrow connecting two elements. The direction of the arrow indicates their order of execution. - -![sequence flow](./assets/sequenceflow.png) - -You can think of process execution as tokens running through the process model. When a process is started, a token is created at the beginning of the model and advances with every completed step. When the token reaches the end of the process, it is consumed and the process instance ends. Zeebe's task is to drive the token and to make sure the job workers are invoked whenever necessary. - -
    - -
    - -### Tasks: Units of work - -The basic elements of BPMN processes are tasks; these are atomic units of work composed to create a meaningful result. Whenever a token reaches a task, the token stops and Zeebe creates a job and notifies a registered worker to perform work. When that handler signals completion, the token continues on the outgoing sequence flow. - -
    - -
    - -Choosing the granularity of a task is up to the person modeling the process. For example, the activity of processing an order can be modeled as a single _Process Order_ task, or as three individual tasks _Collect Money_, _Fetch Items_, _Ship Parcel_. If you use Zeebe to orchestrate microservices, one task can represent one microservice invocation. - -See the [tasks](tasks.md) section on which types of tasks are currently supported and how to use them. - -### Gateways: Steering flow - -Gateways are elements that route tokens in more complex patterns than plain sequence flow. - -BPMN's **exclusive gateway** chooses one sequence flow out of many based on data: - -
    - -
    - -BPMN's **parallel gateway** generates new tokens by activating multiple sequence flows in parallel: - -
    - -
    - -See the [gateways](gateways.md) section on which types of gateways are currently supported and how to use them. - -### Events: Waiting for something to happen - -**Events** in BPMN represent things that _happen_. A process can react to events (_catching_ event) as well as emit events (_throwing_ event). For example: - -
    - -
    - -The circle with the envelope symbol is a catching message event. It makes the token continue as soon as a message is received. The XML representation of the process contains the criteria for which kind of message triggers continuation. - -Events can be added to the process in various ways. Not only can they be used to make a token wait at a certain point, but also for interrupting a token's progress. - -See the [events](events.md) section on which types of events are currently supported and how to use them. - -### Subprocesses: Grouping elements - -**Subprocesses** are element containers that allow defining common functionality. For example, we can attach an event to a subprocess's border: - -![payload](./assets/sub-process.gif) - -When the event is triggered, the subprocess is interrupted, regardless which of its elements is currently active. - -See the [subprocesses](subprocesses.md) section on which types of subprocesses are currently supported and how to use them. - -## Additional resources - -- [BPMN specification](http://www.bpmn.org/) -- [BPMN tutorial](https://camunda.com/bpmn/) -- [Full BPMN reference](https://camunda.com/bpmn/reference/) -- [BPMN book](https://www.amazon.com/dp/1086302095/) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/bpmn.md b/versioned_docs/version-8.2/components/modeler/bpmn/bpmn.md deleted file mode 100644 index 23cfe55faa2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/bpmn.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: modeler-bpmn -title: BPMN in Modeler -description: Let's start modeling by creating a BPMN diagram. ---- - -:::note -BPMN diagrams must be created for the process engine they intend to be deployed on. You cannot run a BPMN diagram modeled for Camunda 7 in Camunda 8, or vice versa, at this time. -::: - -## Start modeling - -![Start Modeling](./assets/quickstart-2.png) - -Web and Desktop Modeler both offer a similar core BPMN 2.0 Modeling experience: - -- Add BPMN elements from the palette on the left side of the page by dragging and dropping them onto the diagram canvas. -- Change the type of element in place by clicking on an element to reveal the context menu. Then, click the wrench icon to change the type of element to a [service task](./service-tasks/service-tasks.md) or [user task](./user-tasks/user-tasks.md), for example. - -## Demo - -![Demo](./assets/demo.gif) - -The demo above shows how to create more BPMN 2.0 elements like lanes, task types, and event definitions. - -## BPMN 2.0 coverage - -The Modeler [covers all BPMN 2.0 elements](/docs/components/modeler/bpmn/bpmn-coverage/) for modeling processes and collaborations. - -## BPMN 2.0 properties for execution - -![Save BPMN Diagram](./assets/quickstart-3.png) - -In the properties panel on the right side, view and edit attributes that apply to the selected element. diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/business-rule-tasks/assets/business-rule-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/business-rule-tasks/assets/business-rule-task.png deleted file mode 100644 index 3091a8948c1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/business-rule-tasks/assets/business-rule-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md deleted file mode 100644 index 4f6b981eed6..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -id: business-rule-tasks -title: "Business rule tasks" -description: "A business rule task is used to model the evaluation of a business rule." ---- - -A business rule task is used to model the evaluation of a business rule; for example, a decision -modeled in [Decision Model and Notation](https://www.omg.org/dmn/) (DMN). - -![task](assets/business-rule-task.png) - -:::info -Camunda 8 supports alternative task implementations for the business rule task. If you want -to use your own implementation for a business rule task, see the [job worker -implementation](#job-worker-implementation) section below. The sections before this job worker implementation apply to the DMN -decision implementation only. -::: - -:::info -If you only want to evaluate a DMN decision, you can use the -[`EvaluateDecision`](/apis-tools/grpc.md#evaluatedecision-rpc) API. -::: - -When the process instance arrives at a business rule task, a decision is evaluated using the -internal DMN decision engine. Once the decision is made, the process instance continues. - -If the decision evaluation is unsuccessful, an [incident](/components/concepts/incidents.md) is -raised at the business rule task. When the incident is resolved, the decision is evaluated again. - -## Defining a called decision - -A called decision links the business rule task to a DMN decision, either to -a [decision table](/components/modeler/dmn/decision-table.md) or to -a [decision literal expression](/components/modeler/dmn/decision-literal-expression.md) -. It can be defined using the -`zeebe:calledDecision` extension element. - -A business rule task must define the [DMN decision id](/components/modeler/dmn/decision-table.md#decision-id) of the -called decision as `decisionId`. Usually, the `decisionId` is defined as a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `shipping_box_size`), but -it can also be defined as an [expression](/components/concepts/expressions.md) ( -e.g. `= "shipping_box_size_" + countryCode`). The expression is evaluated on activating the business rule task (or when -an incident at the business rule task is resolved) after input mappings have been applied. The expression must result in -a `string`. - -A business rule task must define the process variable name of the decision result as -`resultVariable`. The result of the decision is stored in this variable. The `resultVariable` -is defined as a static value. - -## Variable mappings - -By default, the variable defined by `resultVariable` is merged into the process instance. This -behavior can be customized by defining an output mapping at the business rule task. - -All variables in scope of the business rule task are available to the decision engine when the -decision is evaluated. Input mappings can be used to transform the variables into a format accepted -by the decision. - -:::info -Input mappings are applied on activating the business rule task (or when an incident at the business -rule task is resolved), before the decision evaluation. When an incident is resolved at the business -rule task, the input mappings are applied again before evaluating the decision. This can affect -the result of the decision. -::: - -For more information about this topic, visit the documentation about [input/output variable -mappings](/components/concepts/variables.md#inputoutput-variable-mappings). - -## Job worker implementation - -A business rule task does not have to evaluate a decision modeled with DMN. Instead, you can also -use [job workers](/components/concepts/job-workers.md) to implement your business rule task. - -A job worker implementation can be defined using the `zeebe:taskDefinition` extension element. - -Business rule tasks with a job worker implementation behave exactly like [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md). The differences between these task -types are the visual representation (i.e. the task marker) and the semantics for the model. - -When a process instance enters a business rule task with alternative task implementation, it creates -a corresponding job and waits for its completion. A job worker should request jobs of this job type -and process them. When the job is completed, the process instance continues. - -A business rule task must define a [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) the same way as a service task does. This is used as reference to specify which job workers request the respective business rule task job. For example, `order-items`. Note that `type` can be specified as any static value (`myType`) or as a FEEL [expression](../../../concepts/expressions.md) prefixed by `=` that evaluates to any FEEL string; for example, `= "order-" + priorityGroup`. - -Use [task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) to pass static parameters to the job -worker (e.g. the key of the decision to evaluate). - -Define [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -the [same way as a service task does](/components/modeler/bpmn/service-tasks/service-tasks.md#variable-mappings) -to transform the variables passed to the job worker, or to customize how the variables of the job merge. - -## Additional resources - -### XML representation - -A business rule task with a called decision: - -```xml - - - - - -``` - -A business rule task with a job worker implementation and a custom header: - -```xml - - - - - - - - -``` - -### References - -- [DMN decision](/components/modeler/dmn/dmn.md) -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/bpmn-modeler-call-activity.gif b/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/bpmn-modeler-call-activity.gif deleted file mode 100644 index 515cd46ccf7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/bpmn-modeler-call-activity.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/call-activities-boundary-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/call-activities-boundary-events.png deleted file mode 100644 index 578f51f62f6..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/call-activities-boundary-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/call-activities-example.png b/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/call-activities-example.png deleted file mode 100644 index f765d428bbb..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/assets/call-activities-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/call-activities.md b/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/call-activities.md deleted file mode 100644 index 9390541e02b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/call-activities/call-activities.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -id: call-activities -title: "Call activities" -description: "A call activity (or reusable subprocess) allows you to call and invoke another process as part of this process." ---- - -A call activity (or reusable subprocess) allows you to call and invoke another process as part of this process. It's similar to an [embedded subprocess](../embedded-subprocesses/embedded-subprocesses.md), but the process is externalized (i.e. stored as separated BPMN) and can be invoked by different processes. - -![call-activity](assets/call-activities-example.png) - -When a call activity is entered, a new process instance of the referenced process is created. The new process instance is activated at the **none start event**. The process can have start events of other types, but they are ignored. - -When the created process instance is completed, the call activity is left and the outgoing sequence flow is taken. - -## Defining the called process - -A call activity must define the BPMN process id of the called process as `processId`. - -The new instance of the defined process is created by its **latest version** at the point when the call activity is activated. - -Usually, the `processId` is defined as a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `shipping-process`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "shipping-" + tenantId`). The expression is evaluated on activating the call activity and must result in a `string`. - -## Boundary events - -![call-activity-boundary-event](assets/call-activities-boundary-events.png) - -Interrupting and non-interrupting boundary events can be attached to a call activity. - -When an interrupting boundary event is triggered, the call activity and the created process instance are terminated. The variables of the created process instance are not propagated to the call activity. - -When a non-interrupting boundary event is triggered, the created process instance is not affected. The activities at the outgoing path have no access to the variables of the created process instance since they are bound to the other process instance. - -## Variable mappings - -By default, all variables of the call activity scope are copied to the created process instance. This can be limited to copying only the local variables of the call activity, by setting the attribute `propagateAllParentVariables` to `false`. - -By disabling this attribute, variables existing at higher scopes are no longer copied. If the attribute `propagateAllParentVariables` is set (default: `true`), all variables are propagated to the child process instance. - -Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. - -If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. - -It's recommended to disable the attribute `propagateAllChildVariables` or define output mappings if the call activity is in a parallel flow (e.g. when it is marked as [parallel multi-instance](../multi-instance/multi-instance.md#variable-mappings)). Otherwise, variables can be accidentally overridden when they are changed in the parallel flow. - -## Additional resources - -### XML representation - -A call activity with static process id and propagation of all child variables turned on: - -```xml - - - - - -``` - -A call activity with copying of all variables to the child process turned off: - -```xml - - - - - - - - -``` - -### References - -- [Expressions](/components/concepts/expressions.md) -- [Variable scopes](/components/concepts/variables.md#variable-scopes) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/data-flow.md b/versioned_docs/version-8.2/components/modeler/bpmn/data-flow.md deleted file mode 100644 index 1f46377997d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/data-flow.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: data-flow -title: "Data flow" ---- - -Every BPMN process instance can have one or more variables. - -Variables are key-value-pairs and hold the contextual data of the process instance required by job workers to do their work, or to decide which sequence flows to take. They can be provided when a process instance is created, when a job is completed, and when a message is correlated. - -![data-flow](assets/data-flow.png) - -## Job workers - -By default, a job worker gets all variables of a process instance; it can limit the data by -providing a list of required variables as **fetchVariables**. - -The worker uses the variables to do its work. When the work is done, it completes the job. If the -result of the work is needed by follow-up tasks, the worker sets the variables while completing -the job. These variables [merge](/components/concepts/variables.md#variable-propagation) into the -process instance. - -![job-worker](assets/data-flow-job-worker.png) - -If the job worker expects the variables in a different format or under different names, the variables can be transformed by defining **input mappings** in the process. **Output mappings** can be used to transform the job variables before merging them into the process instance. - -## Variable scopes vs. token-based data - -A process can have concurrent paths; for example, when using a parallel gateway. When the execution reaches the parallel gateway, new tokens are created which execute the following paths concurrently. - -Since the variables are part of the process instance and not of the token, they can be read globally from any token. If a token adds a variable or modifies the value of a variable, the changes are also visible to concurrent tokens. - -![variable-scopes](assets/variable-scopes.png) - -The visibility of variables is defined by the **variable scopes** of the process. - -## Concurrency considerations - -When multiple active activities exist in a process instance (i.e. there is a form of concurrent -execution like usage of a parallel gateway, multiple outgoing sequence flows, or a parallel -multi-instance marker), you may need to take extra care in dealing with variables. When variables -are altered by one activity, it might also be accessed and altered by another at the same time. Race -conditions can occur in such processes. - -We recommend taking care when writing variables in a parallel flow. Make sure the variables are -written to the correct [variable scope](/components/concepts/variables.md#variable-scopes) using variable -mappings and make sure to complete jobs and publish messages only with the minimum required -variables. - -These type of problems can be avoided by: - -- Passing only updated variables -- Using output variable mappings to customize the variable propagation -- Using an embedded subprocess and input variable mappings to limit the visibility and propagation of variables - -## Additional resources - -- [Job handling](/components/concepts/job-workers.md) -- [Variables](/components/concepts/variables.md) -- [Input/output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Variable scopes](/components/concepts/variables.md#variable-scopes) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.gif b/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.gif deleted file mode 100644 index 321a73efae5..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.png b/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.png deleted file mode 100644 index 369251e6636..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/assets/embedded-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md b/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md deleted file mode 100644 index a7dcc34efaf..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: embedded-subprocesses -title: "Embedded subprocess" -description: "An embedded subprocess allows you to group elements of the process." ---- - -An embedded subprocess allows you to group elements of the process. - -![embedded-subprocess](assets/embedded-subprocess.png) - -An embedded subprocess must have exactly **one** none start event. Other start events are not allowed. - -When an embedded subprocess is entered, the start event is activated. The subprocess stays active as long as one containing element is active. When the last element is completed, the subprocess is completed and the outgoing sequence flow is taken. - -Embedded subprocesses are often used together with **boundary events**. One or more boundary events can be attached to a subprocess. When an interrupting boundary event is triggered, the entire subprocess (including all active elements) is terminated. - -## Variable mappings - -Input mappings can be used to create new local variables in the scope of the subprocess. These variables are only visible within the subprocess. - -By default, the local variables of the subprocess are not propagated (i.e. they are removed with the scope.) This behavior can be customized by defining output mappings at the subprocess. The output mappings are applied on completing the subprocess. - -## Additional resources - -### XML representation - -An embedded subprocess with a start event: - -```xml - - - ... more contained elements ... - -``` - -### References - -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/bpmn-modeler-error-events.gif b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/bpmn-modeler-error-events.gif deleted file mode 100644 index cef0a5d91af..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/bpmn-modeler-error-events.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-catch-events.bpmn b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-catch-events.bpmn deleted file mode 100644 index a512054df2f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-catch-events.bpmn +++ /dev/null @@ -1,170 +0,0 @@ - - - - - Flow_0q5iltk - - - Flow_0q5iltk - Flow_0gp4ks5 - - - - Flow_0qyyem4 - - - - Flow_0gp4ks5 - Flow_1o6eqqj - Flow_05dg4v3 - - - - Flow_05dg4v3 - Flow_0qyyem4 - - - - - Flow_13dpc2k - - - - Flow_13dpc2k - Flow_1kkycez - - - - Flow_1kkycez - - - - - Flow_1c3ds97 - - - - Flow_1c3ds97 - Flow_1o6eqqj - - - - - - Flow_1lchg6k - - - - Flow_1lchg6k - Flow_04rnubw - - - Flow_04rnubw - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-catch-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-catch-events.png deleted file mode 100644 index f0f5418d65d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-catch-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-events.bpmn b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-events.bpmn deleted file mode 100644 index dbbf3cba4bf..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-events.bpmn +++ /dev/null @@ -1,92 +0,0 @@ - - - - - Flow_0qlmji5 - - - - Flow_1r5d8dq - Flow_0e9bbrx - Flow_1a05it6 - - - - - Flow_0qlmji5 - Flow_1r5d8dq - - - Flow_1a05it6 - Flow_1jdmx2e - - - Flow_0viou3d - - - - Flow_0viou3d - Flow_0e9bbrx - - - - - Flow_1jdmx2e - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-events.png deleted file mode 100644 index ebde63ce2e0..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-throw-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-throw-events.png deleted file mode 100644 index d29bf7f811d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/assets/error-throw-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/error-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/error-events/error-events.md deleted file mode 100644 index b9be6f03239..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/error-events/error-events.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -id: error-events -title: "Error events" -description: "In process automation, you often encounter deviations from the default scenario. BPMN error events allow a process model to react to errors within a task." ---- - -In process automation, you often encounter deviations from the default scenario. One way to resolve these deviations is using a BPMN error event, which allows a process model to react to errors within a task. - -For example, if an invalid credit card is used in the process below, the process takes a different path than usual and uses the default payment method to collect money. - -![process with error event](assets/error-events.png) - -## Defining the error - -In BPMN, **errors** define possible errors that can occur. **Error events** are elements in the process referring to -defined errors. An error can be referenced by one or more error events. - -An error must define an `errorCode` (e.g. `InvalidCreditCard`). The `errorCode` is a `string` used to match a thrown -error to the error catch events. - -For throwing error events, it is possible to define the `errorCode` as an `expression`. When the event is reached, -the expression is evaluated. An error with the result of this expression is thrown. If no expression is used the -statically defined `errorCode` is used. - -For error catch events, the `errorCode` can be a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) or it can be left empty. An expression can't be used. A -catch event with an empty `errorCode` will catch **all** thrown errors. - -## Throwing the error - -An error can be thrown within the process using an error **end event**. - -![process with error throw event](assets/error-throw-events.png) - -Alternatively, you can inform Zeebe that a business error occurred using a **client command**. This throw error client -command can only be used while processing a job. - -In addition to throwing the error, this also disables the job and stops it from being activated or completed by other job workers. See the [gRPC command](/apis-tools/grpc.md#throwerror-rpc) for details. - -## Catching the error - -A thrown error can be caught by an error catch event, specifically using an error **boundary event** or an error **event -subprocess**. - -![process with error catch event](assets/error-catch-events.png) - -Starting at the scope where the error was thrown, the error code is matched against the attached error boundary events -and error event subprocesses at that level. An error is caught by the first event in the scope hierarchy matching the -error code. At each scope, the error is either caught, or propagated to the parent scope. - -If the process instance is created via call activity, the error can also be caught in the calling parent process -instance. - -It is not possible to define multiple error catch events with the same `errorCode` in a single scope. It is also not -permitted to have multiple error catch events without an `errorCode` in a single scope. The deployment gets rejected in -these cases. However, it is possible to define both an error catch event **with** an `errorCode` and one **without** an -`errorCode` in the same scope. When this happens, the error catch event that matches the `errorCode` is prioritized. - -Error boundary events and error event subprocesses must be interrupting. This means the process instance will not -continue along the regular path, but instead follow the path that leads out of the catching error event. - -If the error is thrown for a job, the associated task is terminated first. To continue the execution, the error boundary -event or error event subprocess that caught the error is activated. - -## Unhandled errors - -When an error is thrown and not caught, an **incident** (i.e. `Unhandled error event`) is raised to indicate the failure. The incident is attached to the corresponding element where the error was thrown (i.e. the task of the processed job or the error end event). - -When you resolve the incident attached to a task, it ignores the error, re-enables the job, and allows it to be activated and completed by a job worker once again. - -The incident attached to an error end event cannot be resolved by a user because the failure is in the process itself. The process cannot be changed to catch the error for this process instance. - -## Business error vs. technical error - -In real life, you’ll also have to deal with technical problems that you don't want to treat using error events. - -Suppose the credit card service becomes temporarily unavailable. You don't want to model the retrying, as you would have to add it to each and every service task. This will bloat the visual model and confuse business personnel. Instead, either retry or fall back to incidents as described above. This is hidden in the visual. - -In this context, we found the terms **business error** and **technical error** can be confusing, as they emphasize the source of the error too much. This can lead to long discussions about whether a certain problem is technical or not, and if you are allowed to see technical errors in a business process model. - -It's much more important to look at how you _react_ to certain errors. Even a technical problem can qualify for a business reaction. For example, you could decide to continue a process in the event that a scoring service is not available, and simply give every customer a good rating instead of blocking progress. The error is clearly technical, but the reaction is a business decision. - -In general, we recommend talking about business reactions, which are modeled in your process, and technical reactions, which are handled generically using retries or incidents. - -## Variable mappings - -All error variables are merged into the error catch event. These variables can be merged into the process instance by defining an output mapping at the error catch event. - -Visit the documentation regarding [variable mappings](../../../concepts/variables/#inputoutput-variable-mappings) for more information. - -## Additional resources - -### XML representation - -A boundary error event: - -```xml - - - - - - -``` - -A boundary error event without `errorCode`: - -```xml - - - -``` - -### References - -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/escalation-events/assets/escalation-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/escalation-events/assets/escalation-events.png deleted file mode 100644 index 4c88ba250b1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/escalation-events/assets/escalation-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/escalation-events/escalation-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/escalation-events/escalation-events.md deleted file mode 100644 index 2b54fac9cda..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/escalation-events/escalation-events.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: escalation-events -title: "Escalation events" -description: "Escalation events are used to escalate part of process execution to a higher flow scope." ---- - -Escalation events are events which reference a named escalation, and are used to communicate to a higher flow scope. -Unlike an error, an escalation event is non-critical and execution continues at the location of throwing. - -![The process reached an escalation event. The escalation gets caught in a higher flow scope. As the escalation throw event is non-critical, the outgoing sequence flow of this event is taken.](assets/escalation-events.png) - -The example above shows the execution of an escalation event: - -1. The process reaches the `Throw` event. -2. This throws an escalation to a higher flow scope. -3. The escalation is caught by the `Catch` event. -4. As escalation events are non-critical, the outgoing sequence flows of `Throw` and `Catch` are both taken. - -## Defining an escalation - -In BPMN, an `escalation event` references an `escalation`. Escalations can be referenced by one or more escalation events. - -An escalation must define an `escalationCode`. The value of this `escalationCode` is used to determine which catch event -can catch the thrown escalation. - -For throwing escalation events, it is possible to define the `escalationCode` as an `expression`. When the event is reached, the expression is evaluated. -An escalation with the result of this expression is thrown. If no expression is used the statically defined `escalationCode` is used. - -For catching escalation events it is not possible to use an `expression`. The `escalationCode` must always be a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values). -Alternatively, the `escalationCode` can be left empty. A catch event with an empty `escalationCode` will catch **all** thrown escalations. - -## Throwing the escalation - -An escalation can be thrown by an escalation end event, or by an intermediate escalation throw event. Escalation events -are non-critical. This means that if the throwing event has any outgoing sequence flows, they will be taken. - -## Catching the escalation - -An escalation can be caught using a boundary event, or using an event subprocess. It is caught by one catch event at most, and this will be the catch event in the nearest parent flow scope. - -It is not possible to define multiple escalation catch events with the same `escalationCode` in a single scope. It is also not permitted to have multiple escalation catch events without an `escalationCode` in a single scope. -The deployment gets rejected in these cases. However, it is possible to define both an escalation catch event **with** an -`escalationCode` and one **without** an `escalationCode` in the same scope. When this happens, the escalation catch event -that matches the `escalationCode` is prioritized. - -If there are no escalation catch events that match the `escalationCode`, the escalation will not be caught. Unlike with -[error events](../error-events/error-events.md), no incident is raised. The process will continue without escalating. - -Even though escalations are non-critical, it is still possible make escalation catch events interrupting. This will -behave the same as other interrupting events. The catch event will terminate the scope it is attached to. In this case, -the outgoing sequence flows of the throwing escalation event are not taken. - -## Additional resources - -### XML representation - -An intermediate escalation throw event with expression: - -```xml - - - - - -``` - -An escalation boundary catch event: - -```xml - - - - - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.gif b/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.gif deleted file mode 100644 index 351494ed46c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.png b/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.png deleted file mode 100644 index 5d97d9b781a..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/assets/event-based-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/event-based-gateways.md b/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/event-based-gateways.md deleted file mode 100644 index 77fdeac0c24..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/event-based-gateways/event-based-gateways.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: event-based-gateways -title: "Event-based gateway" -description: "An event-based gateway allows you to make a decision based on events." ---- - -An event-based gateway allows you to make a decision based on events. - -![process](assets/event-based-gateway.png) - -An event-based gateway must have at least **two** outgoing sequence flows. Each sequence flow must to be connected to an intermediate catch event of type **timer or message**. - -When an event-based gateway is entered, the process instance waits at the gateway until one of the events is triggered. When the first event is triggered, the outgoing sequence flow of this event is taken. No other events of the gateway can be triggered afterward. - -## Additional resources - -### XML representation - -An event-based gateway with two outgoing sequence flows: - -```xml - - - - - - - - - - - - - PT1H - - -``` - -### References - -- [Timer events](../timer-events/timer-events.md) -- [Message events](../message-events/message-events.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/assets/event-subprocess.png b/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/assets/event-subprocess.png deleted file mode 100644 index 5942d9b7c18..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/assets/event-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/assets/zeebe-modeler-event-subprocess.gif b/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/assets/zeebe-modeler-event-subprocess.gif deleted file mode 100644 index 65778cfc275..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/assets/zeebe-modeler-event-subprocess.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/event-subprocesses.md b/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/event-subprocesses.md deleted file mode 100644 index a98ec641e6c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/event-subprocesses/event-subprocesses.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: event-subprocesses -title: "Event subprocess" -description: "An event subprocess is a subprocess triggered by an event." ---- - -An event subprocess is a subprocess triggered by an event. This can be added globally to the process, or locally inside an embedded subprocess. - -![event-subprocess](assets/event-subprocess.png) - -An event subprocess must have exactly **one** start event of one of the following types: - -- [Timer](../timer-events/timer-events.md) -- [Message](../message-events/message-events.md) -- [Error](../error-events/error-events.md) - -An event subprocess behaves like a boundary event, but is inside the scope instead of attached to the scope. Like a boundary event, the event subprocess can be interrupting or non-interrupting (indicated in BPMN by a solid or dashed border of the start event). The start event of the event subprocess can be triggered when its containing scope is activated. - -A non-interrupting event subprocess can be triggered multiple times. An interrupting event subprocess can be triggered only once. - -When an interrupting event subprocess is triggered, all active instances of its containing scope are terminated, including instances of other non-interrupting event subprocesses. - -If an event subprocess is triggered, its containing scope is not completed until the triggered instance is completed. - -## Variables - -Unlike a boundary event, an event subprocess is inside the scope. Therefore, it can access and modify all local variables of its containing scope. This is not possible with a boundary event because a boundary event is outside of the scope. - -Input mappings can be used to create new local variables in the scope of the event subprocess. These variables are only visible within the event subprocess. If no input mappings are defined, the [default behavior](../../../concepts/variables.md#variable-scopes) is applied to the variables alongside the event. - -By default, the local variables of the event subprocess are not propagated (i.e. removed with the scope). This behavior can be customized by defining output mappings at the event subprocess. The output mappings are applied on completion of the event subprocess. - -## Additional resources - -### XML representation - -An event subprocess with an interrupting timer start event: - -```xml - - - - PT5M - - ... other elements - -``` - -### References - -- [Embedded subprocess](../embedded-subprocesses/embedded-subprocesses.md) -- [Variable scopes](/components/concepts/variables.md#variable-scopes) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/events.md b/versioned_docs/version-8.2/components/modeler/bpmn/events.md deleted file mode 100644 index 0fc8827d98e..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/events.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: events -title: "Overview" -description: "This document outlines an overview of general events, intermediate events, and boundary events." ---- - -**Events** in BPMN represent things that _happen_. A process can react to events (_catching_ event) as well as emit events (_throwing_ event). For example, a catching message event makes the token continue as soon as a message is received. The XML representation of the process contains the criteria for which kind of message triggers continuation. - -Events can be added to the process in various ways. Not only can they be used to make a token wait at a certain point, but also for interrupting a token's progress. - -Currently supported events: - -- [None events](none-events/none-events.md) -- [Message events](message-events/message-events.md) -- [Timer events](timer-events/timer-events.md) -- [Error events](error-events/error-events.md) -- [Escalation events](escalation-events/escalation-events.md) -- [Terminate events](terminate-events/terminate-events.md) -- [Link events](link-events/link-events.md) -- [Signal events](signal-events/signal-events.md) - -:::note -Not all the signal events are supported yet. For a complete overview of supported events, refer to the [BPMN coverage](../bpmn-coverage#events). -::: - -## Events in general - -Events in BPMN can be **thrown** (i.e. sent), or **caught** (i.e. received), respectively referred to as **throw** or **catch** events (e.g. `message throw event`, `timer catch event`). - -Additionally, a distinction is made between start, intermediate, and end events: - -- **Start events** (catch events, as they can only react to something) are used to denote the beginning of a process or subprocess. -- **End events** (throw events, as they indicate something has happened) are used to denote the end of a particular sequence flow. -- **Intermediate events** can be used to indicate that something has happened (i.e. intermediate throw events), or to wait and react to certain events (i.e. intermediate catch events). - -Intermediate catch events can be inserted into your process in two different contexts: normal flow, or attached to an activity, and are called boundary events. - -## Intermediate events - -
    - -In normal flow, an intermediate throw event executes its event (e.g. send a message) once the token has reached it. Once complete, the token continues to all outgoing sequence flows (1). - -An intermediate catch event, however, stops the token and waits until the event it is waiting for occurs, at which point execution resumes and the token moves on (2). - -## Boundary events - -Boundary events provide a way to model what should happen if an event occurs while an activity is active. For example, if a process is waiting on a user task to happen which is taking too long, an intermediate timer catch event can be attached to the task, with an outgoing sequence flow to notification task, allowing the modeler to automate and sending a reminder email to the user. - -
    - -A boundary event must be an intermediate catch event, and can be either interrupting (1)or non-interrupting (2). Interrupting means that once triggered, before taking any outgoing sequence flow the activity the event is attached to is terminated. This allows modeling timeouts where we can prune certain execution paths if something happens (e.g. the process takes too long). diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.gif b/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.gif deleted file mode 100644 index c7a82ad2d7b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.png b/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.png deleted file mode 100644 index 8ed0aa57627..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/assets/exclusive-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md b/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md deleted file mode 100644 index 1db5ad6dba2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/exclusive-gateways/exclusive-gateways.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: exclusive-gateways -title: "Exclusive gateway" -description: "An exclusive gateway (or XOR-gateway) allows you to make a decision based on data." ---- - -An exclusive gateway (or XOR-gateway) allows you to make a decision based on data (i.e. on process instance variables). - -![process](assets/exclusive-gateway.png) - -If an exclusive gateway has multiple outgoing sequence flows, all sequence flows except one must have a `conditionExpression` to define when the flow is taken. The gateway can have one sequence flow without `conditionExpression`, which must be defined as the default flow. - -When an exclusive gateway is entered, the `conditionExpression` is evaluated. The process instance takes the first sequence flow where the condition is fulfilled. - -If no condition is fulfilled, it takes the **default flow** of the gateway. If the gateway has no default flow, an incident is created. - -An exclusive gateway can also be used to join multiple incoming flows together and improve the readability of the BPMN. A joining gateway has a pass-through semantic and doesn't merge the incoming concurrent flows like a parallel gateway. - -## Conditions - -A `conditionExpression` defines when a flow is taken. It is a [boolean expression](/components/modeler/feel/language-guide/feel-boolean-expressions.md) that can access the process instance variables and compare them with literals or other variables. The condition is fulfilled when the expression returns `true`. - -Multiple boolean values or comparisons can be combined as disjunction (`or`) or conjunction (`and`). - -For example: - -```feel -= totalPrice > 100 - -= order.customer = "Paul" - -= orderCount > 15 or totalPrice > 50 - -= valid and orderCount > 0 -``` - -## Additional resources - -### XML representation - -An exclusive gateway with two outgoing sequence flows: - -```xml - - - - - = totalPrice > 100 - - - - -``` - -### References - -- [Expressions](/components/concepts/expressions.md) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/gateways.md b/versioned_docs/version-8.2/components/modeler/bpmn/gateways.md deleted file mode 100644 index dfe1217027c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/gateways.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: gateways -title: "Overview" -description: "This document outlines an overview of currently supported gateways." ---- - -Gateways are elements that route tokens in more complex patterns than plain sequence flow. - -BPMN's **exclusive gateway** chooses one sequence flow out of many based on data, whereas BPMN's **parallel gateway** generates new tokens by activating multiple sequence flows in parallel, for example. - -Currently supported elements: - -- [Exclusive gateways](exclusive-gateways/exclusive-gateways.md) -- [Parallel gateways](parallel-gateways/parallel-gateways.md) -- [Event-based gateways](event-based-gateways/event-based-gateways.md) -- [Inclusive gateways](inclusive-gateways/inclusive-gateways.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-1.png b/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-1.png deleted file mode 100644 index dcb281d70d9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-2.png b/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-2.png deleted file mode 100644 index 7f2da26de60..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-default.png b/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-default.png deleted file mode 100644 index fed21360c27..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway-default.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway.png b/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway.png deleted file mode 100644 index 3d496b4b68b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/assets/inclusive-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md b/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md deleted file mode 100644 index bac2e804414..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: inclusive-gateways -title: "Inclusive gateway" -description: "An inclusive gateway (or OR-gateway) allows you to make multiple decisions based on data." ---- - -:::note - -Currently, Camunda 8 only supports the diverging (i.e. splitting, forking) inclusive gateway. It does not yet support the converging (i.e. merging, joining) inclusive gateway. A combination of parallel and exclusive gateways can be used as an alternative way to merge the flows. - -::: - -The inclusive gateway (or OR-gateway) allows for making multiple decisions based on data (i.e. on process instance variables). - -![A process model to prepare lunch at lunchtime can use an inclusive gateway to decide which steps to take to prepare the different lunch components, e.g. cook pasta,stir-fry steak, prepare salad, or any combination of these.](assets/inclusive-gateway.png) - -If an inclusive gateway has multiple outgoing sequence flows, all sequence flows must have a condition to define when the flow is taken. If the inclusive gateway only has one outgoing sequence flow, then it does not need to have a condition. - -Optionally, one of the sequence flows can be marked as the default flow. This sequence flow should not have a condition, because its behavior depends on the other conditions. - -When an inclusive gateway is entered, the conditions are evaluated. The process instance takes all sequence flows where the condition is fulfilled. - -For example: Courses selected include `pasta` and `salad`. - -![An inclusive gateway has decided to take the steps to cook pasta and prepare salad, but not stir-fry steak.](assets/inclusive-gateway-1.png) - -For example: Courses selected include `steak`, `pasta` and `salad`. - -![An inclusive gateway has decided to take the steps to cook pasta, stir-fry steak, and prepare salad.](assets/inclusive-gateway-2.png) - -If no condition is fulfilled, it takes the **default flow** of the gateway. Note that the default flow is not expected to have a condition, and is therefore not evaluated. If no condition is fulfilled and the gateway has no default flow, an incident is created. - -For example: No courses selected then the default flow is taken. - -![An inclusive gateway has decided to take the step to prepare salad as the default because none of the conditions were fulfilled.](assets/inclusive-gateway-default.png) - -## Conditions - -A `conditionExpression` defines when a flow is taken. It is a [boolean expression](/components/modeler/feel/language-guide/feel-boolean-expressions.md) that can access the process instance variables and compare them with literals or other variables. The condition is fulfilled when the expression returns `true`. - -Multiple boolean values or comparisons can be combined as disjunction (`and`) or conjunction (`or`). - -For example: - -```feel -= totalPrice > 100 - -= order.customer = "Paul" - -= orderCount > 15 or totalPrice > 50 - -= valid and orderCount > 0 - -= list contains(courses, "salad") -``` - -## Additional resources - -### XML representation - -An inclusive gateway with three outgoing sequence flows and the default sequence flow is `Salad`: - -```xml - - Flow_0mfam08 - Flow_0d3xogt - Flow_1le3l31 - Flow_05d0jjq - - - - = list contains(courses, "pasta") - - - - - = list contains(courses, "steak") - - - -``` - -### References - -- [Conditions](/components/modeler/bpmn/inclusive-gateways/inclusive-gateways.md#conditions) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/link-events/assets/link-events-example-in-practice.png b/versioned_docs/version-8.2/components/modeler/bpmn/link-events/assets/link-events-example-in-practice.png deleted file mode 100644 index 6b26e270157..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/link-events/assets/link-events-example-in-practice.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/link-events/assets/link-events-example.png b/versioned_docs/version-8.2/components/modeler/bpmn/link-events/assets/link-events-example.png deleted file mode 100644 index 595f5ea0368..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/link-events/assets/link-events-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/link-events/link-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/link-events/link-events.md deleted file mode 100644 index bcdc0ab0a61..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/link-events/link-events.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -id: link-events -title: "Link events" -description: "Link events are intermediate events that connect two sections of a process." ---- - -Link events are intermediate events that connect two sections of a process. - -They have no significance related to content, but facilitate the diagram-creation process. - -:::tip -You can use link events to create loops, to skip sections of a process, or to simplify the sequence flow lines in the diagram. -::: - -Link events have a throwing link event as the "exit point", and a catching link event as the "re-entrance point". -They are linked together by their link name. -Multiple throwing link events can link to the same catching link event. -A throwing link event cannot link to multiple catching link events. - -In practice, two paired link events function the same as two [intermediate none events] connected via a sequence flow. - -![A pair of link events is equivalent to a pair of intermediate none events connected via a sequence flow](./assets/link-events-example.png) - -Link events can be very useful if you draw comprehensive process diagrams with many sequence flows. -Links help avoid what otherwise might look like a “spaghetti” diagram. -In the example below, a retry loop is created using the link events pair `A`. - -![A pair of link events is used to form a retry loop](./assets/link-events-example-in-practice.png) - -:::info Link events are limited to a single scope -Link events can only be used to link sections of a process within the same scope. -I.e., they can only exist together on the root process level or within the same subprocess. - -Similarly, a sequence flow cannot be drawn between flow nodes at different scopes. -For example, a task in the root process level cannot connect to another task in a subprocess using a sequence flow. -Link events have the same limitation. -::: - -## Additional resources - -### XML representation - -A manual task: - -```xml - - - - - - -``` - -### References - -- [Intermediate none events] - -[intermediate none events]: ../none-events/none-events.md#intermediate-none-events-throwing diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/manual-tasks/assets/manual-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/manual-tasks/assets/manual-task.png deleted file mode 100644 index ce955bd1303..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/manual-tasks/assets/manual-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/manual-tasks/manual-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/manual-tasks/manual-tasks.md deleted file mode 100644 index 5d2ac406614..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/manual-tasks/manual-tasks.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: manual-tasks -title: "Manual tasks" -description: "A manual task defines a task that is external to the BPM engine." ---- - -A manual task defines a task that is external to the BPM engine. This is used to model work that is done -by somebody who the engine does not need to know of and there is no known system or UI interface. - -For the engine, a manual task is handled as a pass-through activity, automatically continuing the -process at the moment the process instance arrives. - -![task](assets/manual-task.png) - -Manual tasks have no real benefit for automating processes. Manual tasks instead provide insights into the tasks -that are performed outside of the process engine. - -## Additional resources - -### XML representation - -A manual task: - -```xml - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/markers.md b/versioned_docs/version-8.2/components/modeler/bpmn/markers.md deleted file mode 100644 index cc369fb0a42..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/markers.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: markers -title: "Overview" -description: "This document outlines an overview of supported markers." ---- - -You might want to execute some tasks for every element of a list, like the `for each` construct in programming languages. Refer to [Workflow Pattern 14: Multiple Instances with a priori Run-Time Knowledge](http://www.workflowpatterns.com/patterns/control/new/wcp14.php): "Multiple instances of a task can be created. The required number of instances may depend on a number of runtime factors, but is known before the task instances must be created. Once initiated, these instances are independent of each other and run concurrently. It is necessary to synchronize the instances at completion before any subsequent tasks can be triggered." - -In BPMN, this is implemented using [multiple instance activities](/components/modeler/bpmn/multi-instance/multi-instance.md): - -Parallel multiple instance markers define that a subprocess is executed multiple times - once for each element of a given collection (like a `for each` loop in a programming language). - -Currently supported markers: - -- [Multi-instance](multi-instance/multi-instance.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-catch-event-example.png b/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-catch-event-example.png deleted file mode 100644 index ab18e08b0df..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-catch-event-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-catch-event.png b/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-catch-event.png deleted file mode 100644 index fe3c90e99a9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-catch-event.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-event.gif b/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-event.gif deleted file mode 100644 index f2929004cd2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-event.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-events.png deleted file mode 100644 index d8bf47512bf..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/assets/message-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/message-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/message-events/message-events.md deleted file mode 100644 index a0b92984839..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/message-events/message-events.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -id: message-events -title: "Message events" -description: "Message events are events which reference a message; they are used to wait until a proper message is received." ---- - -Message events are events which reference a message; they are used to wait until a proper message is received. - -![process](assets/message-events.png) - -## Message start events - -A process can have one or more message start events (besides other types of start events). Each of the message events must have a unique message name. - -When a process is deployed, it creates a message subscription for each message start event. Message subscriptions of the previous version of the process (based on the BPMN process id) are closed. - -### Message correlation - -When the message subscription is created, a message can be correlated to the start event if the message name matches. On correlating the message, a new process instance is created and the corresponding message start event is activated. - -Messages are **not** correlated if they were published before the process was deployed or if a new version of the process is deployed without a proper start event. - -The `correlationKey` of a published message can be used to control the process instance creation. - -- If an instance of this process is active (independently from its version) and it was triggered by a message with the same `correlationKey`, the message is **not** correlated and no new instance is created. If the message has a time-to-live (TTL) > 0, it is buffered. -- When the active process instance is completed or terminated and a message with the same `correlationKey` and a matching message name is buffered (that is, TTL > 0), this message is correlated and a new instance of the latest version of the process is created. - -If the `correlationKey` of a message is empty, it creates a new process instance and does not check if an instance is already active. - -:::note - -You do not specify a `correlationKey` for a message start event in the BPMN model when designing a process. - -- When an application sends a message that is caught by a message start event, the application can specify a `correlationKey` in the message. -- If a message caught by a start event contains a `correlationKey` value, the created process is tagged with that `correlationKey` value. -- Follow-up messages are then checked against this `correlationKey` value (that is, is there an active process instance that was started by a message with the same `correlationKey`?). - -::: - -## Intermediate message catch events - -When an intermediate message catch event is entered, a corresponding message subscription is created. The process instance stops at this point and waits until the message is correlated. When a message is correlated, the catch event is completed and the process instance continues. - -:::note -An alternative to intermediate message catch events is a [receive task](../receive-tasks/receive-tasks.md), which behaves the same but can be used together with boundary events. -::: - -## Message boundary events - -An activity can have one or more message boundary events. Each of the message events must have a unique message name. - -When the activity is entered, it creates a corresponding message subscription for each boundary message event. If a non-interrupting boundary event is triggered, the activity is not terminated and multiple messages can be correlated. - -## Message throw events - -A process can contain intermediate message throw events or message end events to model the -publication of a message to an external system; for example, to a Kafka topic. - -Currently, intermediate message throw events and message end events behave exactly -like [service tasks](../service-tasks/service-tasks.md) or [send tasks](../send-tasks/send-tasks.md) -, and have the same job-related properties (e.g. job type, custom headers, etc.) The message throw -events and the tasks are based on jobs -and [job workers](../../../../components/concepts/job-workers.md). The differences between the message -throw events and the tasks are the visual representation and the semantics for the model. Read more -about the [job properties](../../../../components/concepts/job-workers.md). - -When a process instance enters a message throw event, it creates a corresponding job and waits for -its completion. A job worker should request jobs of this job type and process them. When the job is -complete, the process instance continues or completes if it is a message end event. - -:::note -Message throw events are not processed by Zeebe itself (i.e. to correlate a message to a message -catch event). Instead, it creates jobs with the defined job type. To process them, provide a job -worker. -::: - -## Messages - -A message can be referenced by one or more message events. It must define the name of the message (e.g. `Money collected`) and the `correlationKey` expression (e.g. `= orderId`). If the message is only referenced by message start events, the `correlationKey` is not required. - -Usually, the name of the message is defined as a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `order canceled`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "order " + awaitingAction`). If the expression belongs to a message start event of the process, it is evaluated on deploying the process. Otherwise, it is evaluated on activating the message event. The evaluation must result in a `string`. - -The `correlationKey` is an expression that usually [accesses a variable](/components/modeler/feel/language-guide/feel-variables.md#access-variable) of the process instance that holds the correlation key of the message. The expression is evaluated on activating the message event and must result either in a `string` or in a `number`. - -To correlate a message to the message event, the message is published with the defined name (e.g. `Money collected`) and the **value** of the `correlationKey` expression. For example, if the process instance has a variable `orderId` with value `"order-123"`, the message must be published with the correlation key `"order-123"`. - -## Variable mappings - -By default, all message variables are merged into the process instance. This behavior can be customized by defining an output mapping at the message catch event. - -Visit the documentation regarding [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) for more information on this topic. - -## Additional resources - -### XML representation - -A message start event with message definition: - -```xml - - - - - -``` - -An intermediate message catch event with message definition: - -```xml - - - - - - - - - -``` - -A boundary message event: - -```xml - - - -``` - -### References - -- [Message correlation](/components/concepts/messages.md) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Incidents](/components/concepts/incidents.md) -- [Job handling](/components/concepts/job-workers.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/bpmn-modeler-multi-instance.gif b/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/bpmn-modeler-multi-instance.gif deleted file mode 100644 index 6dcc71d791f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/bpmn-modeler-multi-instance.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-boundary-event.png b/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-boundary-event.png deleted file mode 100644 index 40e112ba8ca..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-boundary-event.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-example.png b/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-example.png deleted file mode 100644 index 4d0b1b1ed19..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-parallel.png b/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-parallel.png deleted file mode 100644 index b1bec0a99e4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-parallel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-sequential.png b/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-sequential.png deleted file mode 100644 index 9a40ce49737..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/assets/multi-instance-sequential.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/multi-instance.md b/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/multi-instance.md deleted file mode 100644 index 6817a484f57..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/multi-instance/multi-instance.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: multi-instance -title: "Multi-instance" -description: "A multi-instance activity is executed multiple times - once for each element of a given collection." ---- - -A multi-instance activity is executed multiple times - once for each element of a given collection (like a _foreach_ loop in a programming language). - -The following activities can be marked as multi-instance: - -- [Service tasks](../service-tasks/service-tasks.md) -- [Receive tasks](../receive-tasks/receive-tasks.md) -- [Embedded subprocesses](../embedded-subprocesses/embedded-subprocesses.md) -- [Call activities](../call-activities/call-activities.md) - -![multi-instance](assets/multi-instance-example.png) - -On the execution level, a multi-instance activity has two parts: a multi-instance body, and an inner activity. The multi-instance body is the container for all instances of the inner activity. - -When the activity is entered, the multi-instance body is activated and one instance for every element of the `inputCollection` is created (sequentially or in parallel). When all instances are completed, the body is completed and the activity is left. - -## Sequential vs. parallel - -A multi-instance activity is executed either sequentially or in parallel (default). In the BPMN, a sequential multi-instance activity is displayed with three horizontal lines at the bottom. A parallel multi-instance activity is represented by three vertical lines. - -In case of a **sequential** multi-instance activity, the instances are executed one at a time. When one instance is completed, a new instance is created for the next element in the `inputCollection`. - -![sequential multi-instance](assets/multi-instance-sequential.png) - -In case of a **parallel** multi-instance activity, all instances are created when the multi-instance body is activated. The instances are executed concurrently and independently from each other. - -![parallel multi-instance](assets/multi-instance-parallel.png) - -## Defining the collection to iterate over - -A multi-instance activity must have an `inputCollection` expression that defines the collection to iterate over (e.g. `= items`). Usually, it [accesses a variable](/components/modeler/feel/language-guide/feel-variables.md#access-variable) of the process instance that holds the collection. The expression is evaluated on activating the multi-instance body. It must result in an `array` of any type (e.g. `["item-1", "item-2"]`). - -:::tip - -If you need to iterate `n` times (like with a loop-cardinality), you can use the following expression with a [for-loop](/components/modeler/feel/language-guide/feel-control-flow.md#for-loops): `for i in 1..n return i`. - -::: - -To access the current element of the `inputCollection` value within the instance, the multi-instance activity can define the `inputElement` variable (e.g. `item`). The element is stored as a local variable of the instance under the given name. - -If the `inputCollection` value is **empty**, the multi-instance body is completed immediately and no instances are created. It behaves like the activity is skipped. - -## Collecting the output - -The output of a multi-instance activity (e.g. the result of a calculation) can be collected from the instances by defining the `outputCollection` and the `outputElement` expression. - -`outputCollection` defines the name of the variable under which the collected output is stored (e.g. `results`). It is created as a local variable of the multi-instance body and is updated when an instance is completed. When the multi-instance body is completed, the variable is propagated to its parent scope. - -`outputElement` is an expression that defines the output of the instance (e.g. `= result`). Usually, it [accesses a variable](/components/modeler/feel/language-guide/feel-variables.md#access-variable) of the instance that holds the output value. If the expression only accesses a variable or a nested property, it's created as a **local variable** of the instance. This variable should be updated with the output value; for example, by a job worker providing a variable with the name `result`. Since the variable is defined as a local variable, it is not propagated to its parent scope and is only visible within the instance. - -When the instance is completed, the `outputElement` expression is evaluated and the result is inserted into the `outputCollection` at the same index as the `inputElement` of the `inputCollection`. Therefore, the order of the `outputCollection` is determined and matches to the `inputCollection`, even for parallel multi-instance activities. If the `outputElement` variable is not updated, `null` is inserted instead. - -If the `inputCollection` value is empty, an empty array is propagated as `outputCollection`. - -## Boundary events - -![multi-instance with boundary event](assets/multi-instance-boundary-event.png) - -Interrupting and non-interrupting boundary events can be attached to a multi-instance activity. - -When an interrupting boundary event is triggered, the multi-instance body and all active instances are terminated. The `outputCollection` variable is not propagated to the parent scope (i.e. no partial output). - -When a non-interrupting boundary event is triggered, the instances are not affected. The activities at the outgoing path have no access to the local variables since they are bound to the multi-instance activity. - -## Special multi-instance variables - -Every instance has a local variable `loopCounter`. It holds the index in the `inputCollection` of this instance, starting with `1`. - -## Variable mappings - -Input and output variable mappings can be defined at the multi-instance activity; they are applied on each instance on activating and on completing. - -The input mappings can be used to create new local variables in the scope of an instance. These variables are only visible within the instance; it is a way to restrict the visibility of variables. By default, new variables (e.g. provided by a job worker) are created in the scope of the process instance and are visible to all instances of the multi-instance activity as well as outside of it. - -In case of a parallel multi-instance activity, this can lead to variables that are modified by multiple instances and result in race conditions. If a variable is defined as a local variable, it is not propagated to a parent or the process instance scope and can't be modified outside the instance. - -The input mappings can access the local variables of the instance (e.g. `inputElement`, `loopCounter`); for example, to extract parts of the `inputElement` variable and apply them to separate variables. - -The output mappings can be used to update the `outputElement` variable; for example, to extract a part of the job variables. - -**Example:** We have a call activity marked as a parallel multi-instance. When the called process instance completes, its variables are [merged](/components/concepts/variables.md#variable-propagation) into the call activity's process instance. Its result is collected in the output collection variable, but this has become a race condition where each completed child instance again overwrites this same variable. We end up with a corrupted output collection. An output mapping can be used to overcome this, because it restricts which variables are merged. In the case of: - -- Parallel multi-instance call activity -- Multi-instance output element: `=output` -- Variable in the child instance that holds the result: `x` - -The output mapping on the call activity should be: - -``` -source: =x -target: output -``` - -## Completion condition - -A `completionCondition` defines whether the multi-instance body can be completed immediately when the condition is satisfied. It is a [boolean expression](/components/modeler/feel/language-guide/feel-boolean-expressions.md) that will be evaluated each time the instance of the multi-instance body completes. Any instances that are still active are terminated and the multi-instance body is completed when the expression evaluates to `true`. - -The BPMN 2.0 specification defines the following properties of a multi-instance body: - -- `numberOfInstances`: The number of instances created. -- `numberOfActiveInstances`: The number of instances currently active. -- `numberOfCompletedInstances`: The number of instances already completed. -- `numberOfTerminatedInstances`: The number of instances already terminated. - -These properties are available for use in the `completionCondition` expression. For example, using these properties you can express "complete the multi-instance body when 50% or more of the instances already completed" as `= numberOfCompletedInstances / numberOfInstances >= 0.5`. Although these properties are available in this expression, they do not exist as process variables. These properties take precedence over process variables with the same name. - -Multiple boolean values or comparisons can be combined as disjunction (`and`) or conjunction (`or`). - -For example: - -```feel -= result.isSuccessful - -= count(["a", "b", "c", "d"]) > 3 - -= orderCount >= 5 and orderCount < 15 - -= list contains([6,7], today().weekday) - -= numberOfCompletedInstances = 2 - -= numberOfCompletedInstances / numberOfInstances >= 0.5 -``` - -## Additional resources - -### XML representation - -A sequential multi-instance service task: - -```xml - - - - - - - = result.isSuccessful - - - -``` - -### References - -- [Variable scopes](/components/concepts/variables.md#variable-scopes) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/end-event.gif b/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/end-event.gif deleted file mode 100644 index 5ca9e152d56..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/end-event.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/none-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/none-events.png deleted file mode 100644 index 79506b3ec75..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/none-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/start-event.gif b/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/start-event.gif deleted file mode 100644 index 80630c62aa9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/assets/start-event.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/none-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/none-events/none-events.md deleted file mode 100644 index e8ced7089a5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/none-events/none-events.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: none-events -title: "None events" -description: "None events are unspecified events, also called blank events." ---- - -None events are unspecified events, also called "blank" events. - -![process](assets/none-events.png) - -## None start events - -At most, a process can have **one** none start event (besides other types of start events). - -A none start event is where the process instance or a subprocess starts when the process or the subprocess is activated. - -## None end events - -A process or subprocess can have multiple none end events. When a none end event is entered, the current execution path ends. If the process instance or subprocess has no more active execution paths, it is completed. - -If an activity has no outgoing sequence flow, it behaves the same as it would be connected to a none end event. When the activity is completed, the current execution path ends. - -## Intermediate none events (throwing) - -Intermediate none events can be used to indicate some state achieved in the process. They are especially useful for monitoring to understand how the process is doing, for example, as milestones or key performance indicators (KPIs). - -The engine itself doesn't do anything in the event, it just passes through it. - -## Variable mappings - -All none events can have [variable output mappings](../../../../components/concepts/variables.md#output-mappings). - -For start events, this is often used to initialize process variables. - -## Additional resources - -### XML representation - -A none start event: - -```xml - -``` - -A none end event: - -```xml - -``` - -An intermediate none event: - -```xml - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/assets/parallel-gateway.gif b/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/assets/parallel-gateway.gif deleted file mode 100644 index a6d1f404bd5..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/assets/parallel-gateway.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/assets/parallel-gateways.png b/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/assets/parallel-gateways.png deleted file mode 100644 index 5dd17410f98..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/assets/parallel-gateways.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/parallel-gateways.md b/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/parallel-gateways.md deleted file mode 100644 index a561b048fba..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/parallel-gateways/parallel-gateways.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: parallel-gateways -title: "Parallel gateway" -description: "A parallel gateway (or AND-gateway) allows you to split the flow into concurrent paths." ---- - -A parallel gateway (or AND-gateway) allows you to split the flow into concurrent paths. - -![process](assets/parallel-gateways.png) - -When a parallel gateway with multiple outgoing sequence flows is entered, all flows are taken. The paths are executed concurrently and independently. - -The concurrent paths can be joined using a parallel gateway with multiple incoming sequence flows. The process instance waits at the parallel gateway until each incoming sequence is taken. - -:::note -The outgoing paths of the parallel gateway are executed concurrently and not parallel in the sense of parallel threads. All records of a process instance are written to the same partition (single stream processor). -::: - -## Additional resources - -### XML representation - -A parallel gateway with two outgoing sequence flows: - -```xml - - - - - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/assets/receive-task.gif b/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/assets/receive-task.gif deleted file mode 100644 index f5e3ade7e3b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/assets/receive-task.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/assets/receive-tasks.png b/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/assets/receive-tasks.png deleted file mode 100644 index ce6abaac16d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/assets/receive-tasks.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/receive-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/receive-tasks.md deleted file mode 100644 index a9fb47de6d3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/receive-tasks/receive-tasks.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -id: receive-tasks -title: "Receive tasks" -description: "Receive tasks reference a message; these are used to wait until a proper message is received." ---- - -Receive tasks reference a message; these are used to wait until a proper message is received. - -![Receive Tasks](assets/receive-tasks.png) - -When a receive task is entered, a corresponding message subscription is created. The process instance stops at this point and waits until the message is correlated. - -A message can be published using one of the Zeebe clients. When the message is correlated, the receive task is completed and the process instance continues. - -:::note -An alternative to receive tasks is [a message intermediate catch event](../message-events/message-events.md), which behaves the same way but can be used together with event-based gateways. -::: - -## Messages - -A message can be referenced by one or more receive tasks; it must define the name of the message (e.g. `Money collected`) and the `correlationKey` expression (e.g. `= orderId`). - -Usually, the name of the message is defined as a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `order canceled`), but it can also be defined as [expression](/components/concepts/expressions.md) (e.g. `= "order " + awaitingAction`). The expression is evaluated on activating the receive task and must result in a `string`. - -The `correlationKey` is an expression that usually [accesses a variable](/components/modeler/feel/language-guide/feel-variables.md#access-variable) of the process instance that holds the correlation key of the message. The expression is evaluated on activating the receive task and must result either in a `string` or `number`. - -To correlate a message to the receive task, the message is published with the defined name (e.g. `Money collected`) and the value of the `correlationKey` expression. For example, if the process instance has a variable `orderId` with value `"order-123"`, the message is published with the correlation key `"order-123"`. - -## Variable mappings - -Output variable mappings are used to customize how variables are merged into the process instance. -These can contain multiple elements that specify which variables should be mapped. -The `Process Variable Name` of an output denotes the variable name outside the activity. - -Visit our documentation on [input and output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) for more information on this topic. - -## Additional resources - -### XML representation - -A receive task with message definition: - -```xml - - - - - - - - -``` - -### References - -- [Message correlation](/components/concepts/messages.md) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/script-tasks/assets/script-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/script-tasks/assets/script-task.png deleted file mode 100644 index 11c0d1845b7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/script-tasks/assets/script-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/script-tasks/script-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/script-tasks/script-tasks.md deleted file mode 100644 index 1af1b9e1cec..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/script-tasks/script-tasks.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -id: script-tasks -title: "Script tasks" -description: "A script task is used to model the evaluation of a script; for example, a script written in Groovy, -JavaScript, or Python." ---- - -A script task is used to model the evaluation of a script; for example, a script written in Groovy, -JavaScript, or Python. - -![task](assets/script-task.png) - -:::info -Camunda 8 supports alternative task implementations for the script task. To use your own -implementation for a script task, see the [job worker implementation](#job-worker-implementation) section below. The -sections before this job worker implementation apply to the [FEEL expression](/components/modeler/feel/language-guide/feel-expressions-introduction.md) -implementation only. -::: - -When the process instance arrives at a script task, the integrated [FEEL Scala](https://github.com/camunda/feel-scala) -engine evaluates the script task FEEL expression. Once the FEEL expression is evaluated successfully, the process -instance continues. - -If the FEEL expression evaluation is unsuccessful, an [incident](/components/concepts/incidents.md) is -raised at the script task. When the incident is resolved, the script task is evaluated again. - -## Defining a script task - -To define a script task with an inline FEEL expression, use the `zeebe:script` extension element. In the -`zeebe:script` extension element, perform the following steps: - -1. Define the **FEEL expression** inside the `expression` attribute. -2. Define the name of process variable in the `resultVariable` attribute. This variable will store the result of the FEEL expression evaluation. - -### Variable mappings - -By default, the variable defined by `resultVariable` is merged into the process instance. This behavior can be -customized by defining an output mapping at the script task. - -All variables in scope of the script task are available to the FEEL engine when the FEEL expression in the script task -is evaluated. Input mappings can be used to transform the variables into a format accepted by the FEEL expression. - -:::info -Input mappings are applied on activating the script task (or when an incident at the script task is resolved) before -the FEEL expression evaluation. When an incident is resolved at the script task, the input mappings are applied again -before evaluating the FEEL expression. This can affect the result of the FEEL expression evaluation. -::: - -For more information about this topic, visit the documentation about [input and output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings). - -## Job worker implementation - -When the job worker implementation is used, script tasks behave exactly like [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md). Both task types are based on jobs and [job workers](/components/concepts/job-workers.md). The differences between these task types are the visual representation (i.e. the task marker) and the -semantics for the model. - -When a process instance enters a script task using a job worker implementation, it creates a corresponding job and waits -for its completion. A job worker should request jobs of this job type and process them. When the job is complete, the -process instance continues. - -:::note -Jobs for script tasks are not processed by Zeebe itself. To process them, provide a job worker. -::: - -### Defining a job worker script task - -A script task must define a [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) the -same way a service task does. It specifies the type of job workers should subscribe to (e.g. `script`). - -Use [task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) to pass static parameters to -the job worker (e.g. the script to evaluate). The community extension [Zeebe Script Worker](https://github.com/camunda-community-hub/zeebe-script-worker) -requires certain attributes to be set in the task headers. - -Define [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -the [same way a service task does](/components/modeler/bpmn/service-tasks/service-tasks.md#variable-mappings) -to transform the variables passed to the job worker, or to customize how the variables of the job merge. - -## Additional resources - -:::tip Community Extension - -Review the [Zeebe Script Worker](https://github.com/camunda-community-hub/zeebe-script-worker). This is a -community extension that provides a job worker to evaluate scripts. You can run it, or use it as a -blueprint for your own job worker. - -::: - -### XML representation - -A script task with a custom header: - -```xml - - - - - - - - - -``` - -A script task with an inline FEEL expression: - -```xml - - - - - -``` - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/send-tasks/assets/send-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/send-tasks/assets/send-task.png deleted file mode 100644 index ef65accebe3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/send-tasks/assets/send-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/send-tasks/send-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/send-tasks/send-tasks.md deleted file mode 100644 index 83ef7dc20f3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/send-tasks/send-tasks.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: send-tasks -title: "Send tasks" -description: "A send task is used to model the publication of a message to an external system." ---- - -A send task is used to model the publication of a message to an external system; for example, to a -Kafka topic or a mail server. - -![task](assets/send-task.png) - -Send tasks behave exactly like [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md). Both task -types are based on jobs and [job workers](/components/concepts/job-workers.md). The -differences between these task types are the visual representation (i.e. the task marker) and the -semantics for the model. - -When a process instance enters a send task, it creates a corresponding job and waits for its -completion. A job worker should request jobs of this job type and process them. When the job is -complete, the process instance continues. - -:::note - -Jobs for send tasks are not processed by Zeebe itself. To process them, provide -a job worker. - -::: - -## Defining a task - -A send task must define a [job type](/components/modeler/bpmn/service-tasks/service-tasks.md#task-definition) the same -way as a service task does. It specifies the type of job that workers should subscribe to (e.g. `kafka` or `mail`). - -Use [task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) to pass static parameters to the job -worker (e.g. the name of the topic to publish the message to). - -Define [variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -the [same way as a service task does](/components/modeler/bpmn/service-tasks/service-tasks.md#variable-mappings) -to transform the variables passed to the job worker, or to customize how the variables of the job merge. - -## Additional resources - -:::tip Community Extension - -Review the [Kafka Connect Zeebe](https://github.com/camunda-community-hub/kafka-connect-zeebe). This is a -community extension that provides a job worker to publish messages to a Kafka topic. You can run it, -or use it as a blueprint for your own job worker. - -::: - -### XML representation - -A script task with a custom header: - -```xml - - - - - - - - -``` - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/service-task.gif b/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/service-task.gif deleted file mode 100644 index 7105e52c3e2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/service-task.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/service-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/service-task.png deleted file mode 100644 index 2d16cd3debd..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/service-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/task-headers.gif b/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/task-headers.gif deleted file mode 100644 index 4e0d0acd48d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/task-headers.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/variable-mappings.gif b/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/variable-mappings.gif deleted file mode 100644 index dbf641387c5..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/assets/variable-mappings.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/service-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/service-tasks.md deleted file mode 100644 index 5d418635bb9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/service-tasks/service-tasks.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: service-tasks -title: "Service tasks" -description: "A service task represents a work item in the process with a specific type." ---- - -A service task represents a work item in the process with a specific type. - -![process](../assets/order-process.png) - -When a service task is entered, a corresponding job is created. The process instance stops here and waits until the job is complete. - -A [job worker](/components/concepts/job-workers.md) can subscribe to the job type, process the jobs, and complete them using one of the Zeebe clients. When the job is complete, the service task is completed and the process instance continues. - -## Task definition - -A service task must have a `taskDefinition`. The `taskDefinition` is used to specify which [job workers](../../../concepts/job-workers.md) handle the service task work. - -A `taskDefinition` specifies the following properties: - -- `type` (required): Used as reference to specify which job workers request the respective service task job. For example, `order-items`. - - `type` can be specified as any [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (`myType`) or as a FEEL [expression](../../../concepts/expressions.md) prefixed by `=` that evaluates to any FEEL string; for example, `= "order-" + priorityGroup`. -- `retries` (optional): Specifies the number of times the job is retried when a worker signals failure. The default is three. - -The expressions are evaluated on activating the service task and must result in a `string` for the job type and a `number` for the retries. - -See an example in the form of the [XML representation](#xml-representation) below. - -## Task headers - -A service task can define an arbitrary number of `taskHeaders`; they are static metadata handed to workers along with the job. The headers can be used as configuration parameters for the worker. - -## Variable mappings - -By default, all job variables merge into the process instance. This behavior can be customized by defining an output mapping at the service task. - -Input mappings can be used to transform the variables into a format accepted by the job worker. - -For more information about this topic visit the documentation about [Input/output variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings). - -## Additional resources - -### XML representation - -A service task with a custom header: - -```xml - - - - - - - - -``` - -## Next steps - -Learn more about the concept of job types and how to set up a job worker via our [manual on job workers](/components/concepts/job-workers.md). - -### References - -- [Job handling](/components/concepts/job-workers.md) -- [Expressions](/components/concepts/expressions.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) -- [Incidents](/components/concepts/incidents.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/signal-events/assets/signal-start-event.png b/versioned_docs/version-8.2/components/modeler/bpmn/signal-events/assets/signal-start-event.png deleted file mode 100644 index 62ec613f45f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/signal-events/assets/signal-start-event.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/signal-events/signal-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/signal-events/signal-events.md deleted file mode 100644 index dd1f4b32284..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/signal-events/signal-events.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: signal-events -title: "Signal events" -description: "Signal events are events which reference a signal; they are used to wait until a matching signal is received." ---- - -Signal events are events which reference a signal. Broadcasting a signal will trigger all signal events matching the -name of the broadcasted signal. - -## Signal start events - -![Process with a top-level signal start event](assets/signal-start-event.png) - -Signal start event can be used to start process instances. Deploying processes with a signal start event enables creating -multiple process instances by performing a single broadcast. - -Broadcasting a signal will iterate over the available subscriptions. If the name of the broadcasted signal matches the -name of the signal start event, the process instance is created. - -Signal subscriptions only exist for the latest version of a process definition. Deploying a new version of the same -process (based on the BPMN process id) will delete the old signal subscription. A new subscription is opened for the -new deployed process definition. - -## Variable mappings - -When broadcasting a signal you can pass along variables. By default, all signal variables are merged into the process -instance. This behavior can be customized by defining an output mapping at the signal catch event. - -For more information about variable scopes, visit the documentation about [variable scopes](../../../concepts/variables#variable-scopes). - -## Additional resources - -### XML representation - -A signal start event with signal definition: - -```xml - - - - - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/subprocesses.md b/versioned_docs/version-8.2/components/modeler/bpmn/subprocesses.md deleted file mode 100644 index 5bd8ae43088..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/subprocesses.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: subprocesses -title: "Overview" -description: "This document outlines an overview of supported elements." ---- - -**Subprocesses** are element containers that allow defining common functionality. For example, you can attach an event to a subprocess's border. - -When the event is triggered, the subprocess is interrupted, regardless which of its elements is currently active. - -Currently supported elements: - -- [Embedded subprocess](embedded-subprocesses/embedded-subprocesses.md) -- [Call activities](call-activities/call-activities.md) -- [Event subprocess](event-subprocesses/event-subprocesses.md) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/tasks.md deleted file mode 100644 index 75c0df8813a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/tasks.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -id: tasks -title: "Overview" -description: "This document outlines an overview of supported elements." ---- - -The basic elements of BPMN processes are tasks; these are atomic units of work composed to create a meaningful result. Whenever a token reaches a task, the token stops and Zeebe creates a job and notifies a registered worker to perform work. When that handler signals completion, the token continues on the outgoing sequence flow. - -Choosing the granularity of a task is up to the person modeling the process. For example, the activity of processing an order can be modeled as a single _Process Order_ task, or as three individual tasks _Collect Money_, _Fetch Items_, _Ship Parcel_. If you use Zeebe to orchestrate microservices, one task can represent one microservice invocation. - -Currently supported elements: - -- [Service tasks](service-tasks/service-tasks.md) -- [User tasks](user-tasks/user-tasks.md) -- [Receive tasks](receive-tasks/receive-tasks.md) -- [Business rule tasks](business-rule-tasks/business-rule-tasks.md) -- [Script tasks](script-tasks/script-tasks.md) -- [Send tasks](send-tasks/send-tasks.md) -- [Manual tasks](manual-tasks/manual-tasks.md) -- [Undefined tasks](undefined-tasks/undefined-tasks.mdx) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/assets/terminate-end-event-inside-subprocess.png b/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/assets/terminate-end-event-inside-subprocess.png deleted file mode 100644 index dada8e55b3d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/assets/terminate-end-event-inside-subprocess.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/assets/terminate-event-on-process-scope.png b/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/assets/terminate-event-on-process-scope.png deleted file mode 100644 index efb59d3bc85..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/assets/terminate-event-on-process-scope.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/terminate-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/terminate-events.md deleted file mode 100644 index 90b96645c53..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/terminate-events/terminate-events.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: terminate-events -title: "Terminate events" -decription: "BPMN terminate events allow a process model to cancel concurrent flows." ---- - -Terminate end events are the only kind of terminate events. When a process instance reaches a terminate end event, it -terminates all element instances in the same flow scope as the end event. - -They are often used to terminate a concurrent flow that is not required anymore. Consider the following example. - -![The process instance reached the terminate end event and canceled the concurrent flow.](assets/terminate-event-on-process-scope.png) - -The process has two concurrent tasks `B` and `C`. In the process instance, both tasks are active. We complete the -task `C`. The process instance reaches the terminate end event and cancels the task `B`. - -## On the process scope - -A terminate end event on the process scope (i.e. not embedded in a subprocess) terminates all element instances of the -process instance. After the termination, the process instance completes. - -If the process instance was created by a call activity from a parent process then the call activity completes and the -parent process instance takes the outgoing sequence flows. - -## Inside a subprocess - -A terminate end event inside an embedded or an event subprocess terminates all element instances of the -subprocess. After the termination, the subprocess completes, and the process instance takes the outgoing sequence flows. - -The terminate end event is limited to its subprocess. It doesn't terminate element instances outside the subprocess. - -![The process instance reached the terminate end event in the subprocess and canceled the concurrent task in the subprocess. The process instance took the outgoing sequence flow of the subprocess.](assets/terminate-end-event-inside-subprocess.png) - -If the subprocess is a multi-instance then the terminate end event terminates only the element instances of the current -iteration. It doesn't terminate element instances of other multi-instance iterations. - -## Additional resources - -### XML representation - -A terminate end event: - -```xml - - Flow_0zv9prm - - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/interrupting-timer-event.gif b/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/interrupting-timer-event.gif deleted file mode 100644 index 8049d97df26..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/interrupting-timer-event.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/non-interrupting-timer-event.gif b/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/non-interrupting-timer-event.gif deleted file mode 100644 index 8ea2392492e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/non-interrupting-timer-event.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/timer-events.png b/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/timer-events.png deleted file mode 100644 index dc997f9cec4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/assets/timer-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/timer-events.md b/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/timer-events.md deleted file mode 100644 index 34c69a72e38..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/timer-events/timer-events.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -id: timer-events -title: "Timer events" -description: "Timer events are events triggered by a defined timer." ---- - -Timer events are events triggered by a defined timer. - -![process](assets/timer-events.png) - -## Timer start events - -A process can have one or more timer start events (besides other types of start events). Each of the timer events must have either a time date or time cycle definition. - -When a process is deployed, it schedules a timer for each timer start event. Scheduled timers of the previous version of the process (based on the BPMN process id) are canceled. - -When a timer is triggered, a new process instance is created and the corresponding timer start event is activated. - -## Intermediate timer catch events - -An intermediate timer catch event must have a time duration definition that defines when it is triggered. - -When an intermediate timer catch event is entered, a corresponding timer is scheduled. The process instance stops at this point and waits until the timer is triggered. When the timer is triggered, the catch event is completed and the process instance continues. - -## Timer boundary events - -An interrupting timer boundary event must have a time duration definition. When the corresponding timer is triggered, the activity is terminated. Interrupting timer boundary events are often used to model timeouts; for example, canceling the processing after five minutes and doing something else. - -A non-interrupting timer boundary event must have either a time duration or time cycle definition. When the activity is entered, it schedules a corresponding timer. If the timer is triggered and defined as time cycle with repetitions greater than zero, it schedules the timer again until the defined number of repetitions is reached. - -Non-interrupting timer boundary events are often used to model notifications; for example, contacting support if the processing takes longer than an hour. - -## Timers - -Timers must be defined by providing either a date, a duration, or a cycle. - -A timer can be defined either as a [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `P3D`) or as an [expression](/components/concepts/expressions.md). There are two common ways to use an expression: - -- [Access a variable](/components/modeler/feel/language-guide/feel-variables.md#access-variable) (e.g. `= remainingTime`). -- [Use temporal values](/components/concepts/expressions.md#temporal-expressions) (e.g. `= date and time(expirationDate) - date and time(creationDate)`). - -If the expression belongs to a timer start event of the process, it is evaluated on deploying the process. Otherwise, it is evaluated on activating the timer catch event. The evaluation must result in either a `string` that has the same ISO 8601 format as the static value, or an equivalent temporal value (i.e. a date-time, a duration, or a cycle). - -:::note -Zeebe is an asynchronous system. As a result, there is no guarantee a timer triggers exactly at the configured time. - -Depending on how much load the system is under, timers could trigger later than their due date. However, timers will never trigger earlier than the due date. -::: - -### Time date - -import ISO8601DateTime from '../assets/react-components/iso-8601-date-time.md' - - - -### Time duration - -A duration is defined as a ISO 8601 durations format, which defines the amount of intervening time in a time interval and are represented by the format `P(n)Y(n)M(n)DT(n)H(n)M(n)S`. Note that the `n` is replaced by the value for each of the date and time elements that follow the `n`. - -The capital letters _P_, _Y_, _M_, _W_, _D_, _T_, _H_, _M_, and _S_ are designators for each of the date and time elements and are not replaced, but can be omitted. - -- _P_ is the duration designator (for period) placed at the start of the duration representation. -- _Y_ is the year designator that follows the value for the number of years. -- _M_ is the month designator that follows the value for the number of months. -- _W_ is the week designator that follows the value for the number of weeks. -- _D_ is the day designator that follows the value for the number of days. -- _T_ is the time designator that precedes the time components of the representation. -- _H_ is the hour designator that follows the value for the number of hours. -- _M_ is the minute designator that follows the value for the number of minutes. -- _S_ is the second designator that follows the value for the number of seconds. - -Examples: - -- `PT15S` - 15 seconds -- `PT1H30M` - 1 hour and 30 minutes -- `P14D` - 14 days -- `P14DT1H30M` - 14 days, 1 hour and 30 minutes -- `P3Y6M4DT12H30M5S` - 3 years, 6 months, 4 days, 12 hours, 30 minutes and 5 seconds - -If the duration is zero or negative, the timer fires immediately. - -### Time cycle - -A cycle defined as ISO 8601 repeating intervals format; it contains the duration and the number of repetitions. If the repetitions are not defined, the timer repeats infinitely until it is canceled. - -- `R5/PT10S`: Every 10 seconds, up to five times -- `R/P1D`: Every day, infinitely - -It's possible to define a start time. By doing this, the timer triggers for the first time on the given start time. Afterwards, it will follow the interval as usual. - -- `R3/2022-04-27T17:20:00Z/P1D`: Every day up to three times, starting from April 27, 2022 at 5:20 p.m. UTC -- `R/2022-01-01T10:00:00+02:00[Europe/Berlin]/P1D`: Every day infinitely, starting from January 1, 2022 at 10 a.m. UTC plus 2 hours - -Additionally, you can specify a time cycle using cron expressions. See the [CronExpression Tutorial](https://spring.io/blog/2020/11/10/new-in-spring-5-3-improved-cron-expressions) for additional information about using cron expressions. - -- `0 0 9-17 * * MON-FRI`: Every hour on the hour from 9-5 p.m. UTC Monday-Friday - -## Additional resources - -### XML representation - -A timer start event with time date: - -```xml - - - 2019-10-01T12:00:00Z - - -``` - -An intermediate timer catch event with time duration: - -```xml - - - PT10M - - -``` - -A non-interrupting boundary timer event with time cycle: - -```xml - - - R3/PT1H - - -``` - -### References - -- [Expressions](/components/concepts/expressions.md) -- [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/undefined-tasks/assets/undefined-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/undefined-tasks/assets/undefined-task.png deleted file mode 100644 index 7336e7aa977..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/undefined-tasks/assets/undefined-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/undefined-tasks/undefined-tasks.mdx b/versioned_docs/version-8.2/components/modeler/bpmn/undefined-tasks/undefined-tasks.mdx deleted file mode 100644 index aa65e12f972..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/undefined-tasks/undefined-tasks.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: undefined-tasks -title: "Undefined tasks" -description: "An undefined task defines a task that is external to the BPM engine." ---- - -import myImageUrl from "./assets/undefined-task.png"; - -An undefined task (also known as abstract task) defines a task for which the type of work is unspecified. -This is used to model work done by someone the engine does not need to know of and there is no known system or UI interface. -It is also used when modeling a process that is not automated, or while the process automation is in development. - -For the engine, an undefined task is handled as a pass-through activity, automatically continuing the process at the moment the process instance arrives. - -Example of multiple Undefined Tasks in sequence - -Undefined tasks have no real benefit for automating processes, and instead provide insights into the tasks that are performed outside of the process engine. - -:::tip modeling processes for automation -Both Web Modeler and Desktop Modeler default to undefined tasks when adding tasks to your process. -You can change the task type by clicking the wrench icon and selecting one of the other types. -::: - -## Additional resources - -### XML representation - -An undefined task: - -```xml - -``` diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/user-tasks/assets/user-task.png b/versioned_docs/version-8.2/components/modeler/bpmn/user-tasks/assets/user-task.png deleted file mode 100644 index 7dd964bb121..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/bpmn/user-tasks/assets/user-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/bpmn/user-tasks/user-tasks.md b/versioned_docs/version-8.2/components/modeler/bpmn/user-tasks/user-tasks.md deleted file mode 100644 index 61ddcbffd29..00000000000 --- a/versioned_docs/version-8.2/components/modeler/bpmn/user-tasks/user-tasks.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -id: user-tasks -title: "User tasks" -description: "A user task is used to model work that needs to be done by a human actor." ---- - -A user task is used to model work that needs to be done by a human actor. When -the process instance arrives at such a user task, a new job similar to a -[service task](/components/modeler/bpmn/service-tasks/service-tasks.md) is created. The process instance -stops at this point and waits until the job is completed. - -![user-task](assets/user-task.png) - -Applications like [Tasklist](/components/tasklist/introduction-to-tasklist.md) can be used by humans to complete these tasks. - -Alternatively, a job worker can subscribe to the job type -`io.camunda.zeebe:userTask` to complete the job manually. - -When the job is completed, the user task is completed and the process -instance continues. - -## User task forms - -User tasks support specifying a `formKey` attribute, using the -`zeebe:formDefinition` extension element. The form key can be used to specify -an identifier to associate a form to the user task. [Tasklist](/components/tasklist/introduction-to-tasklist.md) supports -embedded [Camunda Forms](/guides/utilizing-forms.md), -these can be embedded into the BPMN process XML as a `zeebe:UserTaskForm` -extension element of the process element. - -## Assignments - -User tasks support specifying assignments, using the `zeebe:AssignmentDefinition` extension element. -This can be used to define which user the task can be assigned to. One or all of the following -attributes can be specified simultaneously: - -- `assignee`: Specifies the user assigned to the task. [Tasklist](/components/tasklist/introduction-to-tasklist.md) will claim the task for this user. -- `candidateUsers`: Specifies the users that the task can be assigned to. -- `candidateGroups`: Specifies the groups of users that the task can be assigned to. - -Typically, the assignee, candidate users, and candidate groups are defined as [static values](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `some_username`, `some_username, another_username` and -`sales, operations`), but they can also be defined as -[expressions](/components/concepts/expressions.md) (e.g. `= book.author` and `= remove(reviewers, book.author)` and `= reviewer_roles`). The expressions are evaluated on activating the user task and must result in a -`string` for the assignee and a `list of strings` for the candidate users and a `list of strings` for the candidate groups. - -For [Tasklist](/components/tasklist/introduction-to-tasklist.md) to claim the task for a known Tasklist user, -the value of the `assignee` must be the user's **unique identifier**. -The unique identifier depends on the authentication method used to login to Tasklist: - -- Camunda 8 (login with email, Google, GitHub): `email` -- Default Basic Auth (Elasticsearch): `username` -- IAM: `username` - -:::note -For example, say you log into Tasklist using Camunda 8 login with email using your email address `foo@bar.com`. Every time a user task activates with `assignee` set to value `foo@bar.com`, Tasklist automatically assigns it to you. You'll be able to find your new task under the task dropdown option `Claimed by me`. -::: - -## Scheduling - -User tasks support specifying a task schedule using the `zeebe:taskSchedule` extension element. -This can be used to define **when** users interact with a given task. One or both of the following -attributes can be specified simultaneously: - -- `dueDate`: Specifies the due date of the user task. -- `followUpDate`: Specifies the follow-up date of the user task. - -:::note -For example, you can use the `followUpDate` to define the latest time a user should start working on a task, and then -use the `dueDate` to provide a deadline when the user task should be finished. -::: - -You can define the due date and follow-up date as static values (e.g. `2023-02-28T13:13:10+02:00`), but you can also use -[expressions](/components/concepts/expressions.md) (e.g. `= schedule.dueDate` and `= now() + duration("PT15S")`). The -expressions are evaluated on activating the user task and must result in a `string` conforming to an ISO 8601 combined -date and time representation. - -import ISO8601DateTime from '../assets/react-components/iso-8601-date-time.md' - -:::info - -::: - -## Variable mappings - -By default, all job variables are merged into the process instance. This -behavior can be customized by defining an output mapping at the user task. - -Input mappings can be used to transform the variables into a format accepted by the job worker. - -## Task headers - -A user task can define an arbitrary number of `taskHeaders`; they are static -metadata handed to workers along with the job. The headers can be used -as configuration parameters for the worker. - -## Additional resources - -### XML representation - -A user task with a user task form, an assignment definition, and a task schedule: - -```xml - - - - - - - - - - - - - - -``` - -### References - -- [Tasklist](/components/tasklist/introduction-to-tasklist.md) -- [Job handling](/components/concepts/job-workers.md) -- [Variable mappings](/components/concepts/variables.md#inputoutput-variable-mappings) diff --git a/versioned_docs/version-8.2/components/modeler/data-handling.md b/versioned_docs/version-8.2/components/modeler/data-handling.md deleted file mode 100644 index b6f36d98765..00000000000 --- a/versioned_docs/version-8.2/components/modeler/data-handling.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: data-handling -title: Data handling -description: "Get editor support for variables by defining the variables in the process model." ---- - -Camunda 8 only - -The FEEL editor will suggest variables in the current element's scope when defining input and output mappings in a process. The variables created by the mapping are automatically picked up and added to the suggestions. - -To get editor support for variables created by your [job workers](../concepts/job-workers.md) or passed as process start variables, define the variables in the process model. - -## Defining additional data - -You can add the schema for this data by adding a JSON return value in the `Data` section of the properties panel. The values are used to derive variable names and types in the FEEL editor. Nested objects are also supported. - -Providing this data is optional, but it's recommended if you want to take full advantage of the FEEL editor's suggestions. -This data will also be used during [playing your process](/components/modeler/web-modeler/play-your-process.md) to prefill the modal when performing the following actions: - -- Start a new instance with variables -- Complete job with variables -- Publish message with variables - -:::note -The provided data schema is only used by the FEEL editor to provide variable suggestions while modeling, and by Play to prefill variable forms. It is not used during process execution. -::: - -![Variable suggestions with additional Variables](img/data-handling-example-json.png) - -Data provided this way is added to scope of the element. To use the data in other parts of your process, you can use output mappings to make the variables available in the parent scope. Check the [variable concepts page](../concepts/variables.md) for more information on variables, scopes, and output mappings. diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/connect-to-camunda-8.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/connect-to-camunda-8.md deleted file mode 100644 index 0a5912c4b73..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/connect-to-camunda-8.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: connect-to-camunda-8 -title: Deploy your first diagram -description: "Follow these steps to directly deploy diagrams and start process instances in Camunda 8." ---- - -Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8. Follow the steps below to deploy a diagram to **Camunda 8 SaaS**. To deploy to a local installation, visit the [Camunda 8 Self-Managed guide](../../../self-managed/modeler/desktop-modeler/deploy-to-self-managed.md). - -1. Click the deployment icon: - -![deployment icon](./img/deploy-icon.png) - -2. Click **Camunda 8 SaaS**: - -![deployment configuration](./img/deploy-diagram-camunda-cloud.png) - -3. Input the `Cluster URL` and the credentials (`Client ID`, `Client Secret`) of your [API client](../../console/manage-clusters/manage-api-clients.md): - -![deployment via Camunda 8](./img/deploy-diagram-camunda-cloud-remember.png) - -4. Toggle **Remember credentials** to locally store the connection information. - -5. Click **Deploy** to perform the actual deployment. - -![deployment successful](./img/deploy-diagram-camunda-cloud-success.png) - -:::note -As a next step, [start a new process instance](./start-instance.md). -::: diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/custom-lint-rules/custom-lint-rules.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/custom-lint-rules/custom-lint-rules.md deleted file mode 100644 index fe79c003452..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/custom-lint-rules/custom-lint-rules.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: custom-lint-rules -title: Custom lint rules -description: "Lint rule plugins allow you to add custom lint rules and configure or disable existing rules." ---- - -Through Camunda Modeler plugins, you can add custom lint rules and configure or disable existing rules. [`bpmnlint`](https://github.com/bpmn-io/bpmnlint) is used to validate BPMN diagrams, so the plugins have to be [`bpmnlint` plugins](https://github.com/bpmn-io/bpmnlint#writing--consuming-custom-rules) at the core. - -## Getting started - -Get started with the [custom-linter-rules-plugin template](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin) and take the following steps: - -1. Clone or fork the repository: - -``` -git clone https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin.git -``` - -The plugin starter project comes with a client folder referenced in the [plugin entry point](../plugins#plugin-entry-point). It contains the script that adds a `bpmnlint` plugin to the modeler. Since this is a [client plugin](../plugins#extend-the-modeler-and-its-bpmn-and-dmn-components), you must bundle it. - -2. Install the dependencies with the following command: - -``` -npm install -``` - -3. Add a custom rule. To do this, add it to the [`bpmnlint-plugin-custom/rules`](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin/tree/master/bpmnlint-plugin-custom/rules) folder. The example project contains a [`no-manual-task.js`](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin/blob/master/bpmnlint-plugin-custom/rules/no-manual-task.js) file which implements a custom rule. - -Every rule must export a function that returns an object with a `check` function: - -```javascript -/** - * Rule that reports manual tasks being used. - */ -module.exports = function () { - function check(node, reporter) { - if (is(node, "bpmn:ManualTask")) { - reporter.report(node.id, "Element has disallowed type bpmn:ManualTask"); - } - } - - return { - check: check, - }; -}; -``` - -This function is called for every node when `bpmnlint` traverses the [model](https://github.com/bpmn-io/bpmn-moddle/). - -4. Change the configuration. Through the configuration in [.bpmnlintrc](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin/blob/master/.bpmnlintrc), you can add the custom rules you implemented in [`bpmnlint-plugin-custom/rules`](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin/tree/master/bpmnlint-plugin-custom/rules). - -```javascript -{ - "extends": [ - "bpmnlint:recommended", - "plugin:custom/recommended" - ], - "rules": { - "label-required": "off", - "custom/no-manual-task": "warn" - } -} -``` - -The example configuration adds all rules specified in the [`recommended`](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin/blob/master/bpmnlint-plugin-custom/index.js) configuration of the `bpmnlint` plugin. It also adds [all the rules that come with `bpmnlint`](https://github.com/bpmn-io/bpmnlint/tree/master/rules) and configures two rules. - -5. Bundle your plugin by running the following command: - -``` -npm run build -``` - -The custom lint rules and configuration will be used when validating a BPMN diagram. - -![Camunda Modeler with custom lint rule](./img/custom-lint-rule.png) - -## Additional resources - -- [Example plugin](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin) -- [bpmnlint](https://github.com/bpmn-io/bpmnlint) diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/custom-lint-rules/img/custom-lint-rule.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/custom-lint-rules/img/custom-lint-rule.png deleted file mode 100644 index b0551206f7d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/custom-lint-rules/img/custom-lint-rule.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/about-templates.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/about-templates.md deleted file mode 100644 index 34a04a232c0..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/about-templates.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: about-templates -title: About element templates -description: "Element templates are a way to extend Camunda Modeler with domain-specific diagram elements, such as service and user tasks." ---- - -:::note -Element templates are currently available in BPMN diagrams only. -::: - -Element templates are a way to extend the [modeler](https://camunda.org/bpmn/tool/) with domain-specific diagram elements, such as service and user tasks. - -![Custom fields in Desktop Modeler](./img/overview.png) - -Applicable element templates can be assigned to a BPMN element via the properties panel and/or the replace menu. - -Applying an element template configures the diagram element with pre-defined values for BPMN properties, input/output mappings, and extension properties. - -As seen in the _REST Connector_ example above, it also provides custom editing UI, input validation, and assistance. - -## Creating and editing element templates - -You can edit element templates in any text editor. With the [JSON schema](defining-templates.md#json-schema-compatibility), you may get additional editing support like formatting, code completion, and error highlighting. - -Connector templates are a specific kind of element template and get a visual preview and editing support in [Web Modeler](/components/connectors/manage-connector-templates.md). diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/additional-resources.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/additional-resources.md deleted file mode 100644 index 0b576d74755..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/additional-resources.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: additional-resources -title: Additional resources ---- - -Try playing around with custom elements and [our example templates](https://github.com/camunda/camunda-modeler/tree/master/resources/element-templates/cloud-samples.json). - -If you get stuck, ask for help in our [forums](https://forum.camunda.org/c/modeler). diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/c7-defining-templates.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/c7-defining-templates.md deleted file mode 100644 index 4eefb2e401f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/c7-defining-templates.md +++ /dev/null @@ -1,482 +0,0 @@ ---- -id: c7-defining-templates -title: Defining templates in Camunda 7 -description: "Learn about JSON schema compatibility, supported BPMN types, defining template properties, and more." ---- - -Camunda 7 only - -Templates are defined in template descriptor files as a JSON array: - -```json -[ - { - "$schema": "https://unpkg.com/@camunda/element-templates-json-schema/resources/schema.json", - "name": "Template 1", - "id": "sometemplate", - "description": "some description", - "version": 1, - "appliesTo": [ - "bpmn:Task" - ], - "elementType": { - "value": "bpmn:ServiceTask", - }, - "properties": [ - ... - ] - }, - { - "name": "Template 2", - ... - } -] -``` - -As seen in the code snippet, a template consists of a number of important components: - -- `$schema : String`: URI pointing towards the [JSON schema](https://json-schema.org/) which defines the structure of the element template `.json` file. Element template schemas are maintained in the [element templates JSON schema](https://github.com/camunda/element-templates-json-schema) repository. Following the [JSON schema](https://json-schema.org/) standard, you may use them for validation or to get assistance (e.g., auto-completion) when working with them in your favorite IDE. - - Example: - - ```json - "$schema": "https://unpkg.com/@camunda/element-templates-json-schema/resources/schema.json" - ``` - -- `name : String`: Name of the template. Shown in the element template selection modal and in the properties panel (after applying an element template). -- `id : String`: ID of the template. -- `description : String`: Optional description of the template. Shown in the element template selection modal and in the properties panel (after applying an element template). -- `documentationRef : String`: Optional URL pointing to a template documentation. Shown in the properties panel (after applying an element template). -- `version : Integer`: Optional version of the template. If you add a version to a template it will be considered unique based on its ID and version. Two templates can have the same ID if their version is different. -- `appliesTo : Array`: List of BPMN types the template can be applied to. -- `elementType : Object`: Optional type of the element. If you configure `elementType` on a template, the element is replaced with the specified type when a user applies the template. -- `properties : Array`: List of properties of the template. - -## JSON schema compatibility - -The application uses the `$schema` property to ensure compatibility for a given element template. Find the latest supported version [here](https://www.npmjs.com/package/@camunda/element-templates-json-schema). - -The tooling ignores element templates defining a higher `$schema` version and logs a warning message. - -For example, given the following `$schema` definition, the application takes `0.9.1` as the JSON Schema version of the element template: - -```json -"$schema": "https://unpkg.com/@camunda/element-templates-json-schema@0.9.1/resources/schema.json" -``` - -The JSON schema versioning is backward-compatible, meaning all versions including or below the current one are supported. In case no `$schema` is defined, Camunda Modeler assumes the latest JSON schema version for Camunda 7 element templates. - -## Supported BPMN types - -Currently, element templates may be used on the following BPMN elements: - -- `bpmn:Activity` (including tasks, service tasks, and others) -- `bpmn:SequenceFlow` (for maintaining `condition`) -- `bpmn:Process` -- `bpmn:Event` - -## Defining template properties - -With each template, you define some user-editable fields, their mapping to BPMN 2.0 XML, and Camunda extension elements. - -Let us consider the following example that defines a template for a mail sending task: - -```json -{ - "$schema": "https://unpkg.com/@camunda/element-templates-json-schema/resources/schema.json", - "name": "Mail Task", - "id": "com.camunda.example.MailTask", - "appliesTo": ["bpmn:ServiceTask"], - "properties": [ - { - "label": "Implementation Type", - "type": "String", - "value": "com.mycompany.MailTaskImpl", - "editable": false, - "binding": { - "type": "property", - "name": "camunda:class" - } - }, - { - "label": "Sender", - "type": "String", - "binding": { - "type": "camunda:inputParameter", - "name": "sender" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Receivers", - "type": "String", - "binding": { - "type": "camunda:inputParameter", - "name": "receivers" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Template", - "description": "By the way, you can use freemarker templates ${...} here", - "value": "Hello ${firstName}!", - "type": "Text", - "binding": { - "type": "camunda:inputParameter", - "name": "messageBody", - "scriptFormat": "freemarker" - }, - "constraints": { - "notEmpty": true - } - }, - { - "label": "Result Status", - "description": "The process variable to which to assign the send result to", - "type": "String", - "value": "mailSendResult", - "binding": { - "type": "camunda:outputParameter", - "source": "${ resultStatus }" - } - }, - { - "label": "Async before?", - "type": "Boolean", - "binding": { - "type": "property", - "name": "camunda:asyncBefore" - } - } - ] -} -``` - -The example defines five custom fields, each mapped to different technical properties: - -- _Implementation Type_ is mapped to the `camunda:class` property in BPMN 2.0 XML. -- _Sender_, _Receivers_, and _Template_ properties are mapped to `input parameters`. -- _Result Status_ is mapped back from the Java Delegate into a process variable via an `output parameter`. - -All but the _Implementation Type_ are editable by the user through the properties panel as shown in the following screenshot: - -![Custom Fields](./img/c7-defining-templates/custom-fields.png) - -As seen in the example, the important attributes in a property definition are: - -- `label`: A descriptive text shown with the property. -- `type`: Defining the visual appearance in the properties panel (may be any of `String`, `Text`, `Boolean`, `Dropdown`, or `Hidden`). -- `value`: An optional default value to be used if the property to be bound is not yet set. -- `binding`: Specifying how the property is mapped to BPMN or Camunda extensions (cf. [bindings](#bindings)). -- `constraints`: A list of editing constraints to apply to the template. - -### Types - -The input types `String`, `Text`, `Boolean`, `Dropdown`, and `Hidden` are available. As seen above, `String` maps to a single-line input, while `Text` maps to a multi-line input. - -#### Boolean/checkbox type - -The `Boolean` type maps to a checkbox that can be toggled by the user. It renders as shown below: - -![Boolean / Checkbox control](./img/c7-defining-templates/field-boolean.png) - -When checked, it maps to `true` in the respective field (see [bindings](#bindings)). Note that it does not map to `${true}` and can therefore not be used e.g., for mapping a boolean to a process variable. - -#### Dropdown type - -The `Dropdown` type allows users to select from a number of pre-defined options that are stored in a custom properties `choices` attribute as `{ name, value }` pairs: - -```json -... - "properties": [ - ... - { - "label": "Task Priority", - "type": "Dropdown", - "value": "50", - "choices": [ - { "name": "low", "value": "20" }, - { "name": "medium", "value": "50" }, - { "name": "height", "value": "100" } - ] - } - ] -... -``` - -The resulting properties panel control looks like this: - -![properties panel drop down](./img/c7-defining-templates/field-dropdown.png) - -#### Omitted type - -:::note -Omitting the type is supported in Camunda 7 element templates only. -::: - -By omitting the `type` configuration, the default UI component is rendered for the respective binding. - -For `camunda:inputParameter` and `camunda:outputParameter` bindings an input/output parameter mapping component is rendered. The component includes a toggle to enable or disable the `Variable Assignment`. When untoggling, the respective `camunda:inputParameter` or `camunda:outputParameter` element will not be created in the BPMN XML. - -![default-rendering](./img/c7-defining-templates/default-rendering.png) - -:::note -The configuration options `editable` and `constraints` have no effect for the `camunda:inputParameter` and `camunda:outputParameter` default component. -::: - -For `camunda:errorEventDefinition` bindings, an error component is rendered. The component will include all properties of the referenced `bpmn:Error` element. - -![default-errors-rendering](./img/c7-defining-templates/default-errors-rendering.png) - -:::note -The configuration options `editable` and `constraints` have no effect for the `camunda:errorEventDefinition` default component. -::: - -For the `property`, `camunda:property`, `camunda:in`, `camunda:in:businessKey`, `camunda:out`, and `camunda:field` bindings, an omitted `type` renders the `String` component (single line input). - -For the `camunda:executionListener` binding, an omitted `type` leads to the `Hidden` component (ie. no visible input for the user). - -### Bindings - -The following ways exist to map a custom field to the underlying BPMN 2.0 XML. The _"mapping result"_ in the following section will use `[userInput]` to indicate where the input provided by the user in the `Properties Panel` is set in the BPMN XML. As default or if no user input was given, the value specified in `value` is displayed and used for `[userInput]`. `[]` brackets are used to indicate where the parameters are mapped to in the XML. - -Notice that adherence to the following configuration options is enforced by design. If not adhering, it logs a validation error and ignores the respective element template. - -#### `property` - -| **Binding `type`** | `property` | -| --------------------------- | -------------------------------- | -| **Valid property `type`'s** | All property types are supported | -| **Binding parameters** | `name`: the name of the property | -| **Mapping result** | `<... [name]=[userInput] ... />` | - -#### `camunda:property` - -| **Binding `type`** | `camunda:property` | -| --------------------------- | -------------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: The name of the extension element property | -| **Mapping result** | `` | - -#### `camunda:inputParameter` - -| **Binding `type`** | `camunda:inputParameter` | -| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: The name of the input parameter
    `scriptFormat`: the format of the script (if script is to be mapped) | -| **Mapping result** | If `scriptFormat` is not set:
    `[userInput]`

    If `scriptFormat` is set:
    `[userInput]` | - -#### `camunda:outputParameter` - -| **Binding `type`** | `camunda:outputParameter` | -| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `source`: The source value to be mapped to the `outputParameter`
    `scriptFormat`: the format of the script (if script is to be mapped) | -| **Mapping result (example)** | If `scriptFormat` is not set:
    `[source]`

    If `scriptFormat` is set:
    `[source]` | - -#### `camunda:in` - -| **Binding `type`** | `camunda:in` | -| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `target`: the target value to be mapped to
    `expression`: `true` indicates that the userInput is an expression
    `variables`: either `all` or `local` indicating the variable mapping | -| **Mapping result** | If `target` is set:
    ``

    If `target` is set and `expression` is set to `true`:
    ``

    If `variables` is set to `local`:
    ` ` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`)

    If `variables` is set to `local` and `target` is set:
    ``

    If `variables` is set to `local`, `target` is set and `expression` is set to `true`:
    ``

    If `variables` is set to `all`:
    `` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`) | - -#### `camunda:in:businessKey` - -| **Binding `type`** | `camunda:in:businessKey` | -| --------------------------- | ------------------------------------------ | -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | | -| **Mapping result** | `` | - -#### `camunda:out` - -| **Binding `type`** | `camunda:out` | -| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `source`: the source value to be mapped
    `sourceExpression`: a string containing the expression for the source attribute
    `variables`: either `all` or `local` indicating the variable mapping | -| **Mapping result** | If `source` is set:
    ``

    If `sourceExpression` is set:
    ``

    If `variables` is set to `all`:
    `` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`)

    If `variables` is set to `local` and `source` is set:
    ``

    If `variables` is set to `local` and `sourceExpression` is set:
    ``

    If `variables` is set to `local`:
    `` (Notice there is no `[userInput]`, therefore has to use property `type` of value `Hidden`) | - -#### `camunda:executionListener` - -| **Binding `type`** | `camunda:executionListener` | -| --------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Valid property `type`'s** | `Hidden` | -| **Binding parameters** | `event`: Value for the `event` attribute
    `scriptFormat`: value for the `scriptFormat` attribute | -| **Mapping result** | `[value]`
    (Notice that `[value]` needs to be set, since only `Hidden` is allowed as a type hence the user can not set a `[userInput]`). | - -#### `camunda:field` - -| **Binding `type`** | `camunda:field` | -| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: Value for the `name` attribute
    `expression`: `true` that an expression is passed | -| **Mapping result** | `[userInput]`

    If `expression` is set to `true`:
    `[userInput]` | - -#### `camunda:errorEventDefinition` - -| **Binding `type`** | `camunda:errorEventDefinition` | -| --------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `errorRef`: Reference to a scoped `bpmn:Error` binding, generates the `errorRef` attribute as unique id.
    | -| **Mapping result** | ``

    For the referenced scoped `bpmn:Error` binding: `` | - -### Scoped bindings - -Scoped bindings allow you to configure nested elements, such as [Camunda 7 Connectors](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/#use-connectors). - -```json -{ - "name": "ConnectorGetTask", - "id": "my.connector.http.get.Task", - "appliesTo": [ - "bpmn:Task" - ], - "properties": [], - "scopes": [ - { - "type": "camunda:Connector", - "properties": [ - { - "label": "ConnectorId", - "type": "String", - "value": "My Connector HTTP - GET", - "binding": { - "type": "property", - "name": "connectorId" - } - }, - ... - ] - } - ] -} -``` - -The example shows how a Connector is configured as part of the task. -On task creation, the Connector is created with it and the Connector bindings are -exposed to the user in a separate custom fields section. - -![Scoped Custom Fields](./img/c7-defining-templates/scope-custom-fields.png) - -#### Supported scopes - -Camunda 7 supports the following scope bindings: - -| Name | Target | -| ------------------- | ------------------------------------------------------------------------------------------ | -| `camunda:Connector` | [Connectors](https://docs.camunda.org/manual/latest/user-guide/process-engine/connectors/) | -| `bpmn:Error` | Global BPMN Error Element | - -### Groups - -You may define `groups` to organize custom fields into: - -```json -{ - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "Groups", - "id": "group-example", - "appliesTo": [ - "bpmn:ServiceTask" - ], - "groups": [ - { - "id": "definition", - "label": "Task definition" - }, - { - "id": "request", - "label": "Request payload" - }, - { - "id": "result", - "label": "Result mapping" - } - ], - "properties": [ - ... - ] -} -``` - -Associate a field with a group (ID) via the fields `group` key: - -```json -{ - ... - "properties": [ - { - "label": "Implementation Type", - "type": "String", - "group": "definition", - "binding": { - "type": "property", - "name": "camunda:class" - } - }, - ... - ], - ... -} -``` - -![Groups](./img/c7-defining-templates/groups.png) - -### Constraints - -Custom fields may have a number of constraints associated with them: - -- `notEmpty`: Input must be non-empty -- `minLength`: Minimal length for the input -- `maxLength`: Maximal length for the input -- `pattern`: Regular expression to match the input against - -#### Regular expression - -Together with the `pattern` constraint, you may define your custom error messages: - -```json -... - "properties": [ - { - "label": "Web service URL", - "type": "String", - "binding": { ... }, - "constraints": { - "notEmpty": true, - "pattern": { - "value": "https://.*", - "message": "Must be https URL" - } - } - } - ] -``` - -### Display all entries - -Per default, the element template defines the visible entries of the properties panel. All other property controls are hidden. If you want to bring all the default entries back, it is possible to use the `entriesVisible` property. - -```json -[ - { - "name": "Template 1", - "id": "sometemplate", - "entriesVisible": true, - "appliesTo": [ - "bpmn:ServiceTask" - ], - "properties": [ - ... - ] - } -] -``` - -![Display default entries](./img/c7-defining-templates/entries-visible.png) diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/configuring-templates.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/configuring-templates.md deleted file mode 100644 index 523f00ce914..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/configuring-templates.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: configuring-templates -title: Configuring templates -description: "Learn about global and local templates, which are loaded by the modeler at application startup." ---- - -Templates are loaded by Desktop Modeler at application startup. Reloading it using `CtrlOrCmd+R` reloads also all templates. Templates are treated as global or local depending on their location in your file system. - -## Global templates - -For templates to be available for all diagrams store them in the `resources/element-templates` directory containing the Camunda Modeler executable. Alternatively, for element templates to be available across Camunda Modeler installations, you can store them in the `resources/element-templates` directory in the modeler's [data directory](../../search-paths#user-data-directory). - -### Example (Windows) - -``` -└── camunda-modeler-5.10.0-win-x64 - ├── Camunda Modeler.exe - └── resources - └── element-templates - └── my-element-templates.json -``` - -## Local templates - -For element templates to only be available for specific diagrams, you can store them in a `.camunda/element-templates` directory in the diagrams parent directory or any of their parent directories. - -### Example - -``` -├── diagram.bpmn -└── .camunda - └── element-templates - └── my-element-templates.json -``` - -Learn more about search paths [here](../../search-paths). diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/defining-templates.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/defining-templates.md deleted file mode 100644 index 7234c116b52..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/element-templates/defining-templates.md +++ /dev/null @@ -1,581 +0,0 @@ ---- -id: defining-templates -title: Defining templates -description: "Learn about JSON schema compatibility, supported BPMN types, defining template properties, and more." ---- - -Templates are defined in template descriptor files as a JSON array: - -```json -[ - { - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "Template 1", - "id": "sometemplate", - "description": "some description", - "version": 1, - "appliesTo": [ - "bpmn:Task" - ], - "elementType": { - "value": "bpmn:ServiceTask", - }, - "properties": [ - ... - ] - }, - { - "name": "Template 2", - ... - } -] -``` - -As seen in the code snippet, a template consists of a number of important components: - -- `$schema : String`: URI pointing towards the [JSON schema](https://json-schema.org/) which defines the structure of the element template `.json` file. Element template schemas are maintained in the [element templates JSON schema](https://github.com/camunda/element-templates-json-schema) repository. Following the [JSON schema](https://json-schema.org/) standard, you may use them for validation or to get assistance (e.g., auto-completion) when working with them in your favorite IDE. - -:::note - -The `$schema` attribute is **required** for Camunda 8 element templates. -::: - -Example: - -```json -"$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json" -``` - -- `name : String`: Name of the template. Shown in the element template selection modal and in the properties panel (after applying an element template). -- `id : String`: ID of the template. -- `description : String`: Optional description of the template. Shown in the element template selection modal and in the properties panel (after applying an element template). -- `documentationRef : String`: Optional URL pointing to a template documentation. Shown in the properties panel (after applying an element template). -- `version : Integer`: Optional version of the template. If you add a version to a template, it is considered unique based on its ID and version. Two templates can have the same ID if their version is different. -- `appliesTo : Array`: List of BPMN types the template can be applied to. -- `elementType : Object`: Optional type of the element. If you configure `elementType` on a template, the element is replaced with the specified type when a user applies the template. -- `properties : Array`: List of properties of the template. - -## Creating and editing Connector templates - -Connector templates are a specific kind of element template. You can edit them with visual preview and edit support like formatting, code completion, and error highlighting in [Web Modeler](/components/connectors/manage-connector-templates.md). - -## JSON schema compatibility - -The application uses the `$schema` property to ensure compatibility for a given element template. You find [the latest supported versions here](https://www.npmjs.com/package/@camunda/zeebe-element-templates-json-schema). - -The tooling ignores element templates defining a higher `$schema` version and logs a warning message. - -For example, given the following `$schema` definition, the application takes `0.9.1` as the JSON schema version of the element template: - -```json -"$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema@0.9.1/resources/schema.json" -``` - -The JSON schema versioning is backward-compatible, meaning that all versions including or below the current one are supported. - -## Supported BPMN types - -Currently, element templates may be used on the following BPMN elements: - -- `bpmn:Activity` (including tasks, service tasks, and others) -- `bpmn:SequenceFlow` (for maintaining `condition`) -- `bpmn:Process` -- `bpmn:Event` - -## Defining template properties - -With each template, you define some user-editable fields as well as their mapping to BPMN 2.0 XML, and Camunda extension elements. - -Let us consider the following example that defines a template for a mail sending task: - -```json -{ - "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", - "name": "REST Connector", - "id": "io.camunda.examples.RestConnector", - "description": "A REST API invocation task.", - "appliesTo": ["bpmn:ServiceTask"], - "properties": [ - { - "type": "Hidden", - "value": "http", - "binding": { - "type": "zeebe:taskDefinition:type" - } - }, - { - "label": "REST Endpoint URL", - "description": "Specify the url of the REST API to talk to.", - "type": "String", - "binding": { - "type": "zeebe:taskHeader", - "key": "url" - }, - "constraints": { - "notEmpty": true, - "pattern": { - "value": "^https?://.*", - "message": "Must be http(s) URL." - } - } - }, - { - "label": "REST Method", - "description": "Specify the HTTP method to use.", - "type": "Dropdown", - "value": "get", - "choices": [ - { "name": "GET", "value": "get" }, - { "name": "POST", "value": "post" }, - { "name": "PATCH", "value": "patch" }, - { "name": "DELETE", "value": "delete" } - ], - "binding": { - "type": "zeebe:taskHeader", - "key": "method" - } - }, - { - "label": "Request Body", - "description": "Data to send to the endpoint.", - "value": "", - "type": "String", - "optional": true, - "binding": { - "type": "zeebe:input", - "name": "body" - } - }, - { - "label": "Result Variable", - "description": "Name of variable to store the response data in.", - "value": "response", - "type": "String", - "optional": true, - "binding": { - "type": "zeebe:output", - "source": "= body" - } - } - ] -} -``` - -The example defines five custom fields, each mapped to different technical properties: - -- The task type `http` is mapped to the `zeebe:taskDefinition:type` property in BPMN 2.0 XML. -- The `REST Endpoint URL` and `REST Method` are mapped to `task headers`. -- The `Request Body` is mapped to a local variable via an `input parameter`. -- The `Result Variable` is mapped into a process variable via an `output parameter`. - -The task type is hidden to the user. Properties specified in the template can be edited through the properties panel as shown in the following screenshot: - -![Custom Fields](./img/overview.png) - -As seen in the example, the important attributes in a property definition are: - -- `label`: A descriptive text shown with the property. -- `type`: Defining the visual appearance in the properties panel (may be any of `String`, `Text`, `Boolean`, `Dropdown` or `Hidden`). -- `value`: An optional default value to be used if the property to be bound is not yet set. -- `binding`: Specifying how the property is mapped to BPMN or Camunda extensions (cf. [bindings](#bindings)). -- `constraints`: A list of editing constraints to apply to the template. - -In addition, fields can be activated conditionally via these properties: - -- `id`: An identifier that can be used to reference the property in conditional properties -- `condition`: A condition that determines when [the property is active](#defining-conditional-properties) - -### Types - -The input types `String`, `Text`, `Boolean`, `Dropdown`, and `Hidden` are available. As seen above `String` maps to a single-line input, while `Text` maps to a multi-line input. - -#### Boolean / checkbox type - -The `Boolean` type maps to a checkbox that can be toggled by the user. - -When checked, it maps to `true` in the respective field (see [bindings](#bindings)). - -#### Dropdown type - -The `Dropdown` type allows users to select from a number of pre-defined options that are stored in a custom properties `choices` attribute as `{ name, value }` pairs: - -```json -... - "properties": [ - ... - { - "label": "REST Method", - "description": "Specify the HTTP method to use.", - "type": "Dropdown", - "value": "get", - "choices": [ - { "name": "GET", "value": "get" }, - { "name": "POST", "value": "post" }, - { "name": "PATCH", "value": "patch" }, - { "name": "DELETE", "value": "delete" } - ], - "binding": { - "type": "zeebe:taskHeader", - "key": "method" - } - } - ] -... -``` - -The resulting properties panel control looks like this: - -![properties panel drop down](./img/field-dropdown.png) - -#### FEEL - -We support the feel properties `optional` and `required`. -When set, the input field offers visual indications that a FEEL expression is expected. - -```json - "properties": [ - { - "label": "Optional FEEL Expression", - "type": "String", - "feel": "optional" - }, - { - "label": "Required FEEL Expression", - "type": "Text", - "feel": "required" - } - ] -``` - -##### Supported types - -The property `feel` is supported on the following input types: - -- `String` -- `Text` - -### Bindings - -The following ways exist to map a custom field to the underlying BPMN 2.0 XML. The **mapping result** in the following section uses `[userInput]` to indicate where the input provided by the user in the `Properties Panel` is set in the BPMN XML. As default or if no user input was given, the value specified in `value` is displayed and used for `[userInput]`. `[]` brackets are used to indicate where the parameters are mapped to in the XML. - -Notice that adherence to the following configuration options is enforced by design. If not adhering, it logs a validation error and ignores the respective element template. - -#### `property` - -| **Binding `type`** | `property` | -| --------------------------- | -------------------------------- | -| **Valid property `type`'s** | All property types are supported | -| **Binding parameters** | `name`: The name of the property | -| **Mapping result** | `<... [name]=[userInput] ... />` | - -#### `zeebe:input` - -| **Binding `type`** | `zeebe:input` | -| --------------------------- | ----------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: The name of the input parameter | -| **Mapping result** | `` | - -#### `zeebe:property` - -| **Binding `type`** | `zeebe:property` | -| --------------------------- | ----------------------------------------------------- | -| **Valid property `type`'s** | `String`
    `Text`
    `Hidden`
    `Dropdown` | -| **Binding parameters** | `name`: The name of the property | -| **Mapping result** | ` -``` - -It also sets up custom fields on the diagram element and make these available to the user for inspection and editing. -Properties which were not configured in the element template using custom fields will not be available for editing for -the user. - -## Removing templates - -To remove an applied template from an element, either the _Unlink_ or _Remove_ function can be used: - -- **Remove**: Remove the element template from the `modelerTemplate` property and reset all properties of the respective element. -- **Unlink**: Remove the element template from the `modelerTemplate` property but keep the properties which were set. - -![Unlink or Remove](./img/unlink-remove.png) - -## Updating templates - -If a template is applied and a new version of the template is found you can _update_ the template. - -![Update Template](./img/update-template.png) - -Templates are updated according to the following rules: - -- If the property is set in new template, it will override unless the property was set by the old template and changed afterwards. -- If the property is not defined in the new template, it will unset. -- Sub-properties of complex properties (e.g. `zeebe:input`, `zeebe:output`) are handled - according to these rules if they can be identified. - -### Replacing templates - -If a template is deprecated with a new element template and you want to keep the same input values as in the -deprecated template, you can: - -- **Unlink**: Remove the current template that is deprecated from the `modelerTemplate` property, but keep the properties - which - were set. -- **Select** and apply the new element template. - -## Missing templates - -If a template is applied to an element but the respective template cannot be found on the system, the editing of the -element is disabled. _Unlinking_ or _removing_ the template for the element or adding the element template config -enables the editing again. - -![Template not Found](./img/template-not-found.png) diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/flags.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/flags.md deleted file mode 100644 index 4d918d1b5f2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/flags.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -id: flags -title: Flags -description: "Flags allow you to control the availability of certain features within Desktop Modeler." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Flags allow you to control the availability of certain features within Desktop Modeler. - -## Configuring Flags - -You may configure flags in a `flags.json` file or pass them via CLI. - -### Configure in `flags.json` - -Place a `flags.json` file inside the `resources` folder of your local [`{USER_DATA}`](../search-paths#user-data-directory) or [`{APP_DATA_DIRECTORY}`](../search-paths#app-data-directory) directory to persist them. - -### Configure via CLI - -Pass flags via the command line when starting the application. - - - - - -```plain -"Camunda Modeler.exe" --disable-plugins -``` - - - - - -```plain -camunda-modeler --disable-plugins -``` - - - - - -```plain -camunda-modeler --disable-plugins -``` - - - - -Flags passed as command line arguments take precedence over those configured via a configuration file. - -## Available Flags - -| flag | default value | -| -------------------------------------------------- | ----------------------------------- | -| ["disable-plugins"](#disable-plug-ins) | false | -| "disable-adjust-origin" | false | -| "disable-cmmn" | true | -| "disable-dmn" | false | -| "disable-form" | false | -| "disable-platform" | false | -| "disable-zeebe" | false | -| "disable-remote-interaction" | false | -| "single-instance" | false | -| "user-data-dir" | [Electron default](../search-paths) | -| ["display-version"](#custom-display-version-label) | `undefined` | -| ["zeebe-ssl-certificate"](#zeebe-ssl-certificate) | `undefined` | - -## Examples - -### Disable Plug-ins - -Start the modeler without activating installed plug-ins. This is useful to debug modeler errors. - -### BPMN-only Mode - -To disable the DMN and Form editing capabilities of the App, configure your `flags.json` like this: - -```js -{ - "disable-dmn": true, - "disable-form": true -} -``` - -As a result, the app will only allow users to model BPMN diagrams. - -![BPMN only mode](./img/bpmn-only.png) - -### Custom `display-version` label - -To display a custom version information in the status bar of the app, configure your `flags.json` like this: - -```js -{ - "display-version": "1.2.3" -} -``` - -![Custom version info](./img/display-version.png) - -### Zeebe SSL certificate - -> :information_source: The Modeler will read trusted certificates from your operating system's trust store. - -Provide additional certificates to validate secured connections to a Camunda 8 installation. - -Configure your `flags.json` like this: - -```js -{ - "zeebe-ssl-certificate": "C:\\path\\to\\certs\\trusted-custom-roots.pem" -} -``` - -Additional information adapted from the [upstream documentation](https://nodejs.org/docs/latest/api/tls.html#tlscreatesecurecontextoptions): - -> The peer (Camunda 8) certificate must be chainable to a CA trusted by the app for the connection to be authenticated. When using certificates that are not chainable to a well-known CA, the certificate's CA must be explicitly specified as trusted or the connection will fail to authenticate. If the peer uses a certificate that doesn't match or chain to one of the default CAs, provide a CA certificate that the peer's certificate can match or chain to. For self-signed certificates, the certificate is its own CA, and must be provided. diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/img/bpmn-only.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/img/bpmn-only.png deleted file mode 100644 index b525087a02e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/img/bpmn-only.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/img/display-version.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/img/display-version.png deleted file mode 100644 index 58c044e38bc..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/flags/img/display-version.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png deleted file mode 100644 index f9904806a29..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-remember.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png deleted file mode 100644 index 3b6e7780ff7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud-success.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png deleted file mode 100644 index 7be1a6adef5..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram-camunda-cloud.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram.png deleted file mode 100644 index 0f17049c7a3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-diagram.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-icon.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-icon.png deleted file mode 100644 index 426760838fe..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deploy-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deployment-successful.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deployment-successful.png deleted file mode 100644 index bfc621d5b6f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/deployment-successful.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/element-configuration.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/element-configuration.png deleted file mode 100644 index a609f86946e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/element-configuration.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/elements.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/elements.png deleted file mode 100644 index e0332af565e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/elements.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/empty.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/empty.png deleted file mode 100644 index 197865c443c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/empty.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/new-diagram.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/new-diagram.png deleted file mode 100644 index 7ac5fa6a387..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/new-diagram.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/properties-panel.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/properties-panel.png deleted file mode 100644 index c7de339b48f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/start-instance-icon.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/start-instance-icon.png deleted file mode 100644 index cc2f995eb1a..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/start-instance-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/start-instance-successful.png b/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/start-instance-successful.png deleted file mode 100644 index 0338961cded..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/desktop-modeler/img/start-instance-successful.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/index.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/index.md deleted file mode 100644 index dcf1d88fbf2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: index -title: Desktop Modeler -sidebar_label: About -description: "Camunda Desktop Modeler is a desktop app for modeling BPMN, DMN, and Forms, compatible with Camunda 7 and Camunda 8." ---- - -Camunda 7 and 8 - -Desktop Modeler is a desktop application for modeling BPMN, DMN, and Forms and supports you in building executable diagrams with Camunda. - -![Desktop Modeler Screenshot](./img/new-diagram.png) - -## Features - -- Design [BPMN](../bpmn/bpmn.md), [DMN](../dmn/dmn.md), and [Forms](../forms/camunda-forms-reference.md) -- Implement process applications for Camunda 7 and 8 -- Deploy and run processes directly from the application -- Validate your diagrams using [configurable lint rules](https://github.com/camunda/camunda-modeler-custom-linter-rules-plugin) -- [Customize](./flags/flags.md) and [extend](./plugins/plugins.md) the application - -## Download - -Download the app for Windows, Linux, or macOS from the [Camunda downloads page](https://camunda.com/download/modeler/). - -## Get started - -Learn how to [develop your first process](./model-your-first-diagram.md) and [deploy it](./connect-to-camunda-8.md) to Camunda 8. - -## Resources - -- [Report an issue](https://github.com/camunda/camunda-modeler/issues) -- [Source code](https://github.com/camunda/camunda-modeler) -- [Troubleshooting](./troubleshooting.md) diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/install-the-modeler.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/install-the-modeler.md deleted file mode 100644 index 9f4328b2ddc..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/install-the-modeler.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: install-the-modeler -title: Install Desktop Modeler -sidebar_label: Installation -description: "Learn how to install Camunda Desktop Modeler." ---- - -Download [Desktop Modeler](./index.md) for Windows, macOS, and Linux from the [Camunda downloads page](https://camunda.com/download/modeler/). diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/model-your-first-diagram.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/model-your-first-diagram.md deleted file mode 100644 index a37ab52d7b3..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/model-your-first-diagram.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: model-your-first-diagram -title: Model your first diagram ---- - -After starting [Desktop Modeler](./index.md), you can model your first BPMN diagram. Follow the steps below: - -1. Create a [BPMN](../bpmn/bpmn.md) diagram: - -![empty application](./img/empty.png) - -2. View the BPMN diagram with a start event: - -![new diagram](./img/new-diagram.png) - -3. On the left side of the screen you will find the element palette. Drag and drop the elements onto the diagram: - -![elements](./img/elements.png) - -Elements that support different types can be reconfigured by clicking on the corresponding icon. In the following screenshot, a task has been added to the diagram. It can be converted to a [service task](../bpmn/service-tasks/service-tasks.md), for example, by clicking on the task and the wrench-shaped icon. - -![task configuration](img/element-configuration.png) - -4. Use the properties panel on the right side of the page to edit the properties of the currently selected element: - -![properties panel](img/properties-panel.png) - -1. Once you finish modeling and configuring your diagram, you can deploy it to a [Camunda 8 cluster](./connect-to-camunda-8.md). diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/plugins/plugins.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/plugins/plugins.md deleted file mode 100644 index aec6a119f91..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/plugins/plugins.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -id: plugins -title: Plugins -description: "Plugins allow you to change the appearance and behavior of Desktop Modeler and add new features." ---- - -:::note -The Camunda Modeler plugins API is not stable and might change in the future. -::: - -Plugins allow you to change the appearance and behavior of Camunda Modeler and add new features. - -## Plugging into Camunda Modeler - -You can plug into the modeler to change its appearance, add new menu entries, extend the modeling tools for [BPMN](https://github.com/bpmn-io/bpmn-js) and [DMN](https://github.com/bpmn-io/dmn-js), or even slot React.js components into the Camunda Modeler UI. - -To add a plugin, put it into the `resources/plugins` directory relative to your [`{APP_DATA_DIRECTORY}`](../search-paths#app-data-directory) or [`{USER_DATA_DIRECTORY}`](../search-paths#user-data-directory) directory. - -Camunda Modeler searches for available plugin entry points via the `resources/plugins/*/index.js` pattern. This means that each plugin must reside in it's own folder which is a direct child of the `plugins` directory. - -:::note -If you download and extract plugins from GitHub, the extracted directory contains the actual plugin, so make sure to copy the plugin, not its parent directory. -::: - -## Overview of your possibilities as a plugin developer - -There are many ways for a developer to extend Camunda Modeler and its modeling tools. The following table shows an overview: - -| Plugin type | Functionality | Example | -| ---------------------- | -------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -| Menu Entries | Add new entries to the menu bar - useful to interact with your plugins, link to external pages, add settings, etc. | [Menu Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/menu-plugin-example) | -| Custom Styles | Change the look and feel of Camunda Modeler by adding stylesheets. | [Styles Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/style-plugin-example) | -| React Components | Embed custom React.js components into specific anchor points of Camunda Modeler. | [React Plugin Example](https://github.com/pinussilvestrus/camunda-modeler-autosave-plugin) | -| bpmn-js Modules | Extend our BPMN editor by injecting your own custom [bpmn-js](https://github.com/bpmn-io/bpmn-js) modules. | [bpmn-js Module Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/bpmn-js-plugin-example) | -| bpmn-moddle Extensions | Extend the BPMN language model by injecting your own custom [bpmn-moddle](https://github.com/bpmn-io/bpmn-moddle) modules. | [bpmn-moddle Extension Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/bpmn-js-plugin-moddle-extension-example) | -| dmn-js Modules | Extend our DMN editor by injecting your own custom [dmn-js](https://github.com/bpmn-io/dmn-js) modules. | [dmn-js Module Example](https://github.com/camunda/camunda-modeler-plugins/tree/master/dmn-js-plugin-example) | -| dmn-moddle Extensions | Extend the DMN language model by injecting your own custom [dmn-moddle](https://github.com/bpmn-io/dmn-moddle) modules | n/a | -| bpmnlint Plugins | Add custom lint rules through [bpmnlint](https://github.com/bpmn-io/bpmnlint) plugins | [Custom lint rules](../custom-lint-rules) | - -## Getting started with development - -### Plugin entry point - -Regardless of the type of your plugin, you have to export a [Node.js module](https://nodejs.org/api/modules.html) named `index.js` that acts as a plugin entry point. The following shows an example of such entry point: - -```javascript -module.exports = { - name: "My Awesome Plugin", // the name of your plugin - style: "./style.css", // changing the appearance of the modeler - menu: "./menu.js", // adding menu entries to the modeler - script: "./script.js", // extending the modeler, and its BPMN and DMN components -}; -``` - -The modeler will automatically load your plugins on startup. - -### Changing the appearance of the modeler - -You can change the appearance of the modeler using CSS. - -Your stylesheet might look like this: - -```css -body { - background: linear-gradient(0deg, #52b415, #eee); -} -``` - -Plug it into the modeler like this: - -```javascript -module.exports = { - style: "./style.css", -}; -``` - -### Adding menu entries to the modeler - -You can add new menu entries to the modeler's menu. - -Describe your menu entries like this: - -```javascript -module.exports = function (electronApp, menuState) { - return [ - { - label: "Open BPMN Reference", - accelerator: "CommandOrControl+[", - enabled: function () { - // only enabled for BPMN diagrams - return menuState.bpmn; - }, - action: function () { - var shell = require("electron").shell; - shell.openExternal("https://camunda.org/bpmn/reference/"); - }, - }, - ]; -}; -``` - -Plug them into the modeler like this: - -```javascript -module.exports = { - menu: "./menu-entries", -}; -``` - -:::note -The code within the menu entries executes on [the main process](https://www.electronjs.org/docs/latest/tutorial/process-model) of Electron. This comes with the advantage of allowing you to use [Node.js](https://nodejs.org/en/) modules, but you need to consider that you cannot debug the respective code in Chromium. For more information regarding main process debugging, refer to the [official Electron documentation](https://www.electronjs.org/docs/latest/tutorial/debugging-main-process). -::: - -For more information on how the modeler's menu works, take a look at its [implementation](https://github.com/camunda/camunda-modeler/blob/master/app/lib/menu/menu-builder.js). - -### Extend the modeler and its BPMN and DMN components - -You can extend the modeling tools for [BPMN](https://github.com/bpmn-io/bpmn-js) and [DMN](https://github.com/bpmn-io/dmn-js) with your own modules, as well as embedding React.js components into certain sections of Camunda Modeler. - -Since the client of the modeler uses [Chromium](https://www.chromium.org/Home), you can't use Node.js modules to extend the modeling tools. You need to bundle your plugin first. The easiest way to get started with client-side plugins is through [this example project](https://github.com/camunda/camunda-modeler-plugin-example). - -> In this example, we are building a bpmn-js plugin, but this basic structure applies to all extensions besides menu entries and style. The modules themselves will be different however, so refer to our [examples](https://github.com/camunda/camunda-modeler-plugins) for more information on how to build different kinds. - -Take the following steps: - -1. Clone or fork the repository: - -``` -git clone https://github.com/camunda/camunda-modeler-plugin-example.git -``` - -The plugin starter project comes with a menu and style folder which are referenced in the plugin entry point. If you do not need those, you can remove them from the entry point and delete the respective folder. - -2. Install the dependencies: - -``` -npm install -``` - -3. Create your module: - -```javascript -function LoggingPlugin(eventBus) { - eventBus.on("shape.added", function () { - console.log("A shape was added to the diagram!"); - }); -} - -module.exports = { - __init__: ["loggingPlugin"], - loggingPlugin: ["type", LoggingPlugin], -}; -``` - -4. Require your file in `client.js` and register it via our [helper functions](https://github.com/camunda/camunda-modeler-plugin-helpers): - -```javascript -var registerBpmnJSPlugin = - require("camunda-modeler-plugin-helpers").registerBpmnJSPlugin; -var plugin = require("./LoggingPlugin"); - -registerBpmnJSPlugin(plugin); -``` - -5. You may want to create a plugin which specifically targets Camunda 7 or Camunda 8. To do this, use the appropriate variations of the registration helper function for your plugin type. - -```javascript -registerPlatformBpmnJSPlugin(plugin); // Register plugin for Camunda 7 BPMN diagrams only -registerCloudBpmnJSPlugin(plugin); // Register plugin for Camunda 8 BPMN diagrams only -registerBpmnJSPlugin(plugin); // Register plugin for Camunda 7 and 8 BPMN diagrams -``` - -6. You can use the globally available functions `getModelerDirectory` and `getPluginsDirectory` to load additional resources: - -```javascript -function LoggingPlugin(eventBus, canvas) { - var img = document.createElement(img); - img.src = getPluginsDirectory + "/logging-plugin/image.png"; - - canvas.getContainer().appendChild(img); -} -``` - -7. Bundle your plugin: - -``` -npm run build -``` - -8. Put the folder into the `resources/plugins` directory relative to your Camunda Modeler installation directory. You can now use your plugin! - -### Development workflow - -When creating a plugin, you can place the directory containing your plugin in the aforementioned `resources/plugins` directory. - -Plugins will be loaded on application startup (menu plugins) or reload (style and modeling tool plugins). To reload the application, open the developer tools F12 and press `CtrlOrCmd + R`. This will clear all unsaved diagrams. - -## Additional resources - -- [Example Plugins](https://github.com/camunda/camunda-modeler-plugins) -- [Plugin Starter Project](https://github.com/camunda/camunda-modeler-plugin-example) diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/search-paths/search-paths.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/search-paths/search-paths.md deleted file mode 100644 index a2d0af056f7..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/search-paths/search-paths.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: search-paths -title: Search paths -description: "Features like element templates and plugins allow you to add your own resources to Desktop Modeler." ---- - -Features like element templates and plugins allow you to add your own resources to Desktop Modeler. For these resources to be found, they have to be in one of two directories depending on how local or global you want them to be. - -## App data directory - -The `resources` directory relative to the directory containing the Camunda Modeler executable file. In our documentation we refer to it as `{APP_DATA_DIRECTORY}`. - -Resources in the app data directory will be found by any local Camunda Modeler instance. - -### Example (Windows) - -``` -└── camunda-modeler-5.10.0-win-x64 - ├── Camunda Modeler.exe - └── resources - ├── element-templates - | └── my-element-templates.json - └── plugins - └── my-plugin - └── index.js -``` - -## User data directory - -The `camunda-modeler/resources` directory relative to the per-user application data directory, which by default points to: - -- `%APPDATA%` on [Windows](https://www.pcworld.com/article/2690709/whats-in-the-hidden-windows-appdata-folder-and-how-to-find-it-if-you-need-it.html) -- `$XDG_CONFIG_HOME` or `~/.config` on [Linux](https://wiki.archlinux.org/index.php/XDG_user_directories) -- `~/Library/Application Support` on macOS - -In our documentation we refer to it as `{USER_DATA_DIRECTORY}`. - -Resources in the user data directory will be found by all Camunda Modeler instances. - -### Example (Windows) - -``` -└── AppData - └── Roaming - └── camunda-modeler - └── resources - ├── element-templates - | └── my-element-templates.json - └── plugins - └── my-plugin - └── index.js -``` - -It is possible to change the user data directory using the `--user-data-dir` option via when starting Camunda Modeler from the command line. Refer to the [flags documentation](../flags) on how to configure the application with a flags file. diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/start-instance.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/start-instance.md deleted file mode 100644 index 99832ceb6d5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/start-instance.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: start-instance -title: Start a new process instance -description: "After you have deployed your process to Camunda 8, you can start a new instance of this process via the play icon." ---- - -After you have [deployed your process to Camunda 8](./connect-to-camunda-8.md), you can start a new instance of this process via the play icon: - -![start instance icon](./img/start-instance-icon.png) - -After the instance was started successfully, a corresponding message is displayed: - -![start instance successful](./img/start-instance-successful.png) diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/telemetry/telemetry.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/telemetry/telemetry.md deleted file mode 100644 index ec7f9ba2bec..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/telemetry/telemetry.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -id: telemetry -title: Telemetry -description: "Opt in for the collection of telemetry data using Desktop Modeler. This data is used to better understand how the application is used and to improve it." ---- - -You can opt in for the collection of telemetry data when using Desktop Modeler. This data is used to better understand how the application is used and to improve it based on data. This page summarizes the data collected. - -## General structure of the events - -The events **Desktop Modeler** sends share a similar payload which usually (but not exclusively) includes information like: - -- **event name**: The name of the event triggered (e.g. `diagram:opened`) -- **application version**: The version of Desktop Modeler being used (e.g. Version 5.0.0) -- **editor id**: A randomly generated id assigned to your Desktop Modeler installation - -## Definition of events - -### Ping event - -The `Ping Event` is sent in the following situations: - -- The modeler is opened (given that `Usage Statistics` option is enabled). -- `Usage Statistics` option is enabled for the first time. -- Once every 24 hours (given that `Usage Statistics` option is enabled). - -The `Ping Event` also sends the list of plugins installed and flags defined: - -```json - "plugins": ["PLUGIN_NAME"], - "flags": { - "FLAG_NAME": "FLAG_VALUE" - } -``` - -### Diagram opened/closed event - -The `Diagram Opened Event` is sent in the following situations: - -- User created a new BPMN diagram -- User created a new DMN diagram -- User created a new Form -- User opened an existing BPMN diagram -- User opened an existing DMN diagram -- User opened an existing Form - -The `Diagram Closed Event` is sent in the following situations: - -- User closed a BPMN diagram -- User closed a DMN diagram -- User closed a Form - -These events include the following properties: - -- `diagramType`: BPMN, DMN, or Form -- Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: - -### Deployment and start instance events - -The `Deployment Event` is sent in the following situations: - -- User deploys a BPMN or DMN diagram to Camunda 7 or Camunda 8 -- User deploys a Form to Camunda 7 - -The `Deployment Event` and `Start Instance` have the following properties: - -- `diagramType`: BPMN, DMN, or Form -- Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: - -In the event of an unsuccessful deployment, an `error` property will be present in the payload containing an error code. - -If provided, as is the case when deploying to a Zeebe-based platform, the payload also includes the target type of the deployment: - -```json -"targetType": "[camundaCloud or selfHosted]" -``` - -If the target engine profile is set in the diagram, the payload will also contain it. - -```json - "executionPlatform": "" -``` - -### Tracked click events - -The `Tracked Click Events` are sent when a user clicks a link or button contained within a tracked parent 'container'. - -Currently, these containers are: - -- Each of the welcome page columns -- The version info overlay - -The event supplies: - -- The `parent` container id to locate the application section -- The button label or link text (generalized as label) for identification of what was specifically clicked -- A type to differentiate buttons, internal links, and external links -- The link target (optional for external links) - -Example event: - -```json -{ - "type": "[button or external-link or internal-link]", - "parent": "welcome-page-learn-more", - "label": "Click here to read more about Camunda", - "link": "https://camunda.com/" -} -``` - -:::note -`"link"` is only present for `"type": "external-link"`. -::: - -### Overlay opened event - -The `Overlay Opened Event` is sent when an overlay is opened via user interaction. Currently, this event is sent in the following circumstances: - -- Version Info overlay is opened -- Deployment overlay is opened -- Start instance overlay is opened -- Deployment overlay is closed -- Start Instance overlay is closed - -For the **Version Info** overlay, the event also sends `source` of the click (`"menu"` or `"statusBar"`). - -For the **Deployment** and **Start Instance** overlays, the event also send the `diagramType` (BPMN, DMN or Form). - -### Form editor events - -The `Form editor events` are sent on different interactions with the form builder: - -- User opened or collapsed a panel in the form editor. The event includes the current open state for each form preview panel and the interaction that triggered the change. - -```json -{ - "layout": { - "form-input": { - "open": true - }, - "form-output": { - "open": true - }, - "form-preview": { - "open": true - } - }, - "triggeredBy": "keyboardShortcut|previewPanel|statusBar|windowMenu" -} -``` - -- User interacted with the form input data panel. -- User interacted with the form preview panel. - -In all events [the execution platform and version](#diagram-openedclosed-event) are sent as well. diff --git a/versioned_docs/version-8.2/components/modeler/desktop-modeler/troubleshooting.md b/versioned_docs/version-8.2/components/modeler/desktop-modeler/troubleshooting.md deleted file mode 100644 index 5ff8f6149ab..00000000000 --- a/versioned_docs/version-8.2/components/modeler/desktop-modeler/troubleshooting.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -id: troubleshooting -title: Troubleshooting -description: "This page lists common issues with Desktop Modeler and potential resolutions." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -## How to start Desktop Modeler without plugins - -You can start Desktop Modeler with the [`--disable-plugins` flag](./flags/flags.md#disable-plug-ins). - -## How to obtain Desktop Modeler logs - -Depending on your operating system, you can find Desktop Modeler logs in different places: - - - - - -```plain -%APPDATA%\camunda-modeler\logs -``` - - - - - -```plain -~/Library/Logs/Camunda Modeler -``` - - - - - -```plain -~/.config/camunda-modeler/logs -``` - - - - -To produce logging output, you can also run Desktop Modeler from the command line. - -## I cannot connect to Zeebe - -You try to connect (i.e., to deploy) to a remote Zeebe instance, and Desktop Modeler tells you it "cannot find a running Zeebe." - -To resolve this issue, check if you can connect to Zeebe through another client, i.e., [`zbctl`](../../../apis-tools/cli-client/index.md). If that works, [further debug your Zeebe connection](#debug-zeebe-connection-issues). If that does not work, resolve the [general connection issue](#resolve-a-general-zeebe-connection-issue) first. - -## Resolve a general Zeebe connection issue - -You try to connect to Zeebe from both Desktop Modeler _and_ [`zbctl`](../../../apis-tools/cli-client/index.md), and neither of them works. General connection failures can have a couple of reasons: - -### The (remote) Zeebe instance is not reachable - -Ensure your computer has access to the (remote) network. - -### The connection to Zeebe happens through a proxy - -[Inspect the connection](#how-can-i-get-details-about-a-secure-remote-connection) to understand if it can be established. - -Secure connections to Zeebe require [HTTP/2 over TLS with protocol negotiation via ALPN](../../../self-managed/platform-deployment/troubleshooting.md#zeebe-ingress-grpc). Ensure your proxy supports these features and does not forcefully downgrade the connection to HTTP/1. - -## Debug Zeebe connection issues - -You can connect to Zeebe via [`zbctl`](../../../apis-tools/cli-client/index.md) or another API client. However, connecting through Desktop Modeler fails. - -### Secure connection to Zeebe fails - -When connecting securely to Camunda 8 SaaS, Camunda 8 Self-Managed, or a standalone Zeebe instance (via `https` endpoint URL), Desktop Modeler tries to establish a secure connection. In the process, it strictly validates the server certificates presented against well-known certificate authorities. Failure to connect may have several reasons: - -#### The (remote) endpoint is not configured for secure connections - -Ensure you properly configured the remote endpoint. - -#### The (remote) endpoint presents an untrusted certificate - -[Inspect the connection](#how-can-i-get-details-about-a-secure-remote-connection) to understand which certificates are being returned by the server. - -Ensure you configure Desktop Modeler for [custom SSL certificates](#how-can-i-provide-a-custom-ssl-certificate). - -If intermediate signing authorities sign the server certificate, ensure the remote endpoint [serves both server and intermediate certificates](https://nginx.org/en/docs/http/configuring_https_servers.html#chains) to Desktop Modeler. - -## How can I provide a custom SSL certificate? - -You configured a custom SSL certificate in your (remote) Zeebe endpoint and want Desktop Modeler to accept that certificate. - -The app [strictly validates](./flags/flags.md#zeebe-ssl-certificate) the remote server certificate trust chain. If you use a custom SSL server certificate, you must make the signing CA certificate known to Desktop Modeler, not the server certificate itself. - -Desktop Modeler reads trusted certificate authorities from your operating systems trust store. Installing custom CA certificates in that trust store is recommended for most users. Alternatively, you may provide custom trusted CA certificates via the [`--zeebe-ssl-certificate` flag](./flags/flags.md#zeebe-ssl-certificate). - -## How can I get details about a secure remote connection? - -You can use the following command to retrieve information about HTTP/2 over TLS support (ALPN) and certificates provided by a remote endpoint: - -```sh -> openssl s_client -alpn h2 -connect google.com:443 -servername google.com -[...] ---- -Certificate chain - 0 s:/CN=*.google.com - i:/C=US/O=Google Trust Services LLC/CN=GTS CA 1C3 - 1 s:/C=US/O=Google Trust Services LLC/CN=GTS CA 1C3 - i:/C=US/O=Google Trust Services LLC/CN=GTS Root R1 - 2 s:/C=US/O=Google Trust Services LLC/CN=GTS Root R1 - i:/C=BE/O=GlobalSign nv-sa/OU=Root CA/CN=GlobalSign Root CA ---- -[...] ---- -New, TLSv1/SSLv3, Cipher is AEAD-CHACHA20-POLY1305-SHA256 -Server public key is 256 bit -Secure Renegotiation IS NOT supported -Compression: NONE -Expansion: NONE -ALPN protocol: h2 -SSL-Session: - Protocol : TLSv1.3 - Cipher : AEAD-CHACHA20-POLY1305-SHA256 - Session-ID: - Session-ID-ctx: - Master-Key: - Start Time: 1687516295 - Timeout : 7200 (sec) - Verify return code: 0 (ok) ---- -``` - -## How can I debug log gRPC / Zeebe communication? - -You can also start Desktop Modeler with gRPC logging turned on to get detailed [logging output](#how-to-obtain-desktop-modeler-logs) on communication to Zeebe: - - - - - -```plain -set DEBUG=* && set ZEEBE_NODE_LOG_LEVEL=DEBUG && set GRPC_VERBOSITY=DEBUG && set GRPC_TRACE=all && "Camunda Modeler.exe" -``` - - - - - -```plain -DEBUG=* ZEEBE_NODE_LOG_LEVEL=DEBUG GRPC_VERBOSITY=DEBUG GRPC_TRACE=all camunda-modeler -``` - - - - - -```plain -DEBUG=* ZEEBE_NODE_LOG_LEVEL=DEBUG GRPC_VERBOSITY=DEBUG GRPC_TRACE=all camunda-modeler -``` - - - - -## Other questions? - -Head over to the [Modeler category on the forum](https://forum.camunda.io/c/modeler/6) to receive help from the community. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-id.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-id.png deleted file mode 100644 index a6515280e10..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-literal-expression.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-literal-expression.png deleted file mode 100644 index 15075ddf9a3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-literal-expression.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-name.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-name.png deleted file mode 100644 index e3e28485802..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-literal-expression/decision-name.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/decision.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/decision.png deleted file mode 100644 index 6da51acd9b1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/decision.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drd.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drd.png deleted file mode 100644 index ab35482e5e6..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drd.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drg-id.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drg-id.png deleted file mode 100644 index e152ad0a613..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drg-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drg-name.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drg-name.png deleted file mode 100644 index 30d66b7d8d1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/drg-name.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/input-data.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/input-data.png deleted file mode 100644 index 61aa02687e0..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/input-data.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/knowledge-source.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/knowledge-source.png deleted file mode 100644 index 4d7f5f47eae..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/knowledge-source.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/required-decision.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/required-decision.png deleted file mode 100644 index 98b0396dfb4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-requirements-graph/required-decision.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/decision-id.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/decision-id.png deleted file mode 100644 index a5a9f391e71..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/decision-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/decision-name.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/decision-name.png deleted file mode 100644 index 2cab2c86b64..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/decision-name.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/description.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/description.png deleted file mode 100644 index ba2d088845d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/description.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/dish-table.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/dish-table.png deleted file mode 100644 index 3aa17a46c51..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/dish-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/dish-table.svg b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/dish-table.svg deleted file mode 100644 index 5633b2c8aa7..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/dish-table.svg +++ /dev/null @@ -1,1176 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - Output Entry (Conclusion) - - - - - Input Entry (Condition) - - - - - Rule - - - - - Output Type Definition - - - - - Output Name - - - - Input Type Definition - - - - - - Input Expression - - - - - Hit Policy - - - - - Decision Name & Id - - - - - - - - - diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-any.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-any.png deleted file mode 100644 index 0c61806be04..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-any.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-count.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-count.png deleted file mode 100644 index 94f7d586af1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-count.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-max.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-max.png deleted file mode 100644 index acb28866c67..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-max.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-min.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-min.png deleted file mode 100644 index b2e9686e0c3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-min.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-sum.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-sum.png deleted file mode 100644 index e4857618631..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect-sum.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect.png deleted file mode 100644 index 9a6ce79e196..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-collect.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-first.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-first.png deleted file mode 100644 index 458f10e2a16..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-first.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-rule-order.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-rule-order.png deleted file mode 100644 index 5afc537e605..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-rule-order.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-unique.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-unique.png deleted file mode 100644 index 00d93ae7933..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy-unique.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy.png deleted file mode 100644 index c845b5e8d08..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/hit-policy.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-entry.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-entry.png deleted file mode 100644 index cafbaf5599d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-entry.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-expression.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-expression.png deleted file mode 100644 index 89da4ddd094..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-expression.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-label.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-label.png deleted file mode 100644 index 4b55c4a56bd..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-label.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-type-definition.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-type-definition.png deleted file mode 100644 index 1970fee8234..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input-type-definition.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input.png deleted file mode 100644 index 10d7988af43..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/input.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/map.js b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/map.js deleted file mode 100644 index 83c159d53ac..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/map.js +++ /dev/null @@ -1,178 +0,0 @@ -/* Use this as helper to determine the overlay % / regions -const img = document.querySelector(".no-lightbox img"); - -function offset(element) { - const rect = element.getBoundingClientRect(), - const left = window.pageXOffset || document.documentElement.scrollLeft, - const top = window.pageYOffset || document.documentElement.scrollTop; - return { top: rect.top + top, left: rect.left + left } -} - -img.addEventListener("click", (e) => { - console.log(`top: ${(e.pageY - offset(img).top) / img.height * 100}`); - console.log(`left: ${(e.pageX - offset(img).left) / img.width * 100}`); -}); */ - -(function () { - "use strict"; - var keys = Object.keys; - - function eachKeys(obj, ite) { - keys(obj).forEach(function (key) { - ite(key, obj[key]); - }); - } - - function query(sel, ctx) { - ctx = ctx || document; - return ctx.querySelector(sel); - } - - function mkEl(tagName, attrs) { - attrs = attrs || {}; - var el = document.createElement(tagName); - eachKeys(attrs, function (key, val) { - el.setAttribute(key, val); - }); - return el; - } - - var regions = { - name: { - title: "Decision Name", - href: "#decision-name", - coords: [ - { - top: 0, - left: 0.5, - width: 13.5, - height: 5.5, - }, - { - top: 7, - left: 0.7, - width: 12, - height: 14.5, - }, - ], - }, - - hitPolicy: { - title: "Hit policy", - href: "hit-policy/", - coords: [ - { - top: 3.6, - left: 19, - width: 8, - height: 5, - }, - { - top: 10, - left: 13, - width: 20, - height: 9.5, - }, - ], - }, - - rule: { - title: "Rule", - href: "rule", - coords: [ - { - top: 95, - left: 30, - width: 4, - height: 5.5, - }, - { - top: 52, - left: 0, - width: 100, - height: 7.5, - }, - ], - }, - - conditions: { - title: "Input Entry (Condition)", - href: "rule/#input-entry-condition", - coords: [ - { - top: 95, - left: 6, - width: 20.5, - height: 5.5, - }, - { - top: 65, - left: 6, - width: 18, - height: 7.5, - }, - ], - }, - - conclusions: { - title: "Output Entry (Conclusion)", - href: "rule/#output-entry-conclusion", - coords: [ - { - top: 95, - left: 41.5, - width: 23, - height: 5.5, - }, - { - top: 66, - left: 43, - width: 19.0, - height: 6.5, - }, - ], - }, - }; - - var fig = query("figure.no-lightbox"); - var img = query("img", fig); - - var wrapper = mkEl("div", { - style: - "transition:all 0.218s linear;position:absolute;" + - "top:0;left:0;" + - "width:100%;" + - "height:100%;", - }); - - var holder = img.parentNode; - holder.appendChild(wrapper); - holder.style.position = "relative"; - - keys(regions).forEach(function (name) { - regions[name].coords.forEach(function (coords /*, c*/) { - var el = mkEl("a", { - href: regions[name].href, - title: regions[name].title, - style: - "position:absolute;" + - "display:block;" + - // 'background-color:rgba('+ (c ? '0,122' : '122,0') +',0,0.5);' + - "cursor:pointer;" + - "top:" + - coords.top + - "%;" + - "left:" + - coords.left + - "%;" + - "width:" + - coords.width + - "%;" + - "height:" + - coords.height + - "%;", - }); - wrapper.appendChild(el); - }); - }); -})(); diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-entry.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-entry.png deleted file mode 100644 index add59d59c9f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-entry.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-label.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-label.png deleted file mode 100644 index 2c26031cd3f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-label.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-name.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-name.png deleted file mode 100644 index f514fbeb709..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-name.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-type-definition.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-type-definition.png deleted file mode 100644 index 9550194ac33..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output-type-definition.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output.png deleted file mode 100644 index c3df7df04a2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/output.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/rule.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/rule.png deleted file mode 100644 index 79e1bdd82b2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/decision-table/rule.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/decision-table.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/decision-table.png deleted file mode 100644 index 3c82b341f71..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/decision-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/demo.gif b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/demo.gif deleted file mode 100644 index b0e078feec9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/demo.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-double-click.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-double-click.png deleted file mode 100644 index 55408d90653..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-double-click.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-drd-prop-panel.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-drd-prop-panel.png deleted file mode 100644 index 59c13da9b9c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-drd-prop-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-right-click.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-right-click.png deleted file mode 100644 index 0e3ac34a657..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-right-click.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-toggle-overview.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-toggle-overview.png deleted file mode 100644 index 99928a3a23a..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/dmn-modeler-toggle-overview.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/literal-expression.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/literal-expression.png deleted file mode 100644 index dcc56f404d9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/literal-expression.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/main.png b/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/main.png deleted file mode 100644 index 02df5cf7bc5..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/dmn/assets/desktop-modeler-dmn/main.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-literal-expression.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-literal-expression.md deleted file mode 100644 index 90e8871ad87..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-literal-expression.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -id: decision-literal-expression -title: Decision literal expression -description: A decision literal expression represents decision logic which can be depicted as an expression in DMN. ---- - -![Decision literal expression](assets/decision-literal-expression/decision-literal-expression.png) - -A decision literal expression represents decision logic which can be depicted as an expression. It consists of -a [literal expression](#literal-expression) and a [variable](#variable). - -A decision literal expression is represented by a `literalExpression` element inside a `decision` XML element. - -```xml - - - - - - calendar.getSeason(date) - - - -``` - -## Decision name - -![Decision Name](assets/decision-literal-expression/decision-name.png) - -The name describes the decision for which the literal expression provides the decision logic. It is set as the `name` -attribute on the `decision` element. - -```xml - - - - -``` - -## Decision id - -![Decision Id](assets/decision-literal-expression/decision-id.png) - -The id is the technical identifier of the decision. It is set in the `id` -attribute on the `decision` element. - -Each decision should have an unique id when it is deployed to Camunda. - -:::caution - -The decision id may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). - -The decision id can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to -use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the -operator `-`. - -If the decision id contain a special character or symbol then the decision result can't be accessed in -a [dependent decision](decision-requirements-graph.md#required-decisions). - -::: - -```xml - - - - -``` - -## Literal expression - -The literal expression specifies how the value of the decision is generated. It can be used to do a complex calculation, -or to combine the output values of [required decisions](decision-requirements-graph.md#required-decisions). - -The expression language of the literal expression -is [FEEL](/components/modeler/feel/language-guide/feel-expressions-introduction.md). - -The expression is set inside a `text` element that is a child of the `literalExpression` XML element. - -```xml - - - calendar.getSeason(date) - -``` - -## Variable - -A decision literal expression must have a variable which specifies the name and the type of the decision result. A -variable is represented by a `variable` element inside a `decision` XML element. - -```xml - - - - -``` - -### Variable name - -The name of the variable is used to reference the value of the literal expression in the decision result. It is -specified by the `name` attribute on the `variable` XML element. - -:::caution - -The variable name may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). - -The variable name can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended -to use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the -operator `-`. - -If the variable name contain a special character or symbol then the decision result can't be accessed in -a [dependent decision](decision-requirements-graph.md#required-decisions). - -::: - -:::tip - -It is recommended to use the decision id as the variable name. - -In contrast to decision tables, the result of a decision literal expression can be accessed in -a [dependent decision](decision-requirements-graph.md#required-decisions) by its variable name instead of its decision -id. - -::: - -```xml - - -``` - -### Variable type definition - -The type of the decision result can be specified by the `typeRef` attribute on the -`variable` XML element. - -After the expression is evaluated it checks if the result converts to the specified type. The type should be one of the -supported [data types](dmn-data-types.md). - -```xml - - -``` - -Note that the type is not required but recommended since it provides a type safety of the expression result. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-requirements-graph.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-requirements-graph.md deleted file mode 100644 index 04efa0946b6..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-requirements-graph.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -id: decision-requirements-graph -title: Decision requirements graph -description: A Decision Requirements Graph (DRG) models a domain of decision-making, showing the most important elements involved in it and the dependencies between them. ---- - -![Decision Requirements Graph](assets/decision-requirements-graph/drd.png) - -A Decision Requirements Graph (DRG) models a domain of decision-making, showing the most important elements involved in -it and the dependencies between them. The elements modeled are [decisions](#decision), [input data](#input-data), -and [knowledge sources](#knowledge-source). - -The visual representation of a DRG is called Decision Requirements Diagram (DRD). - -In the XML a DRG is represented by the `definitions` element. - -```xml - - - - - - - - - -``` - -## Decision requirements graph name - -![Decision Requirements Graph Name](assets/decision-requirements-graph/drg-name.png) - -The name describes the DRG. It is set as the `name` attribute on the `definitions` element. - -```xml - - - - -``` - -## Decision requirements graph id - -![Decision Requirements Graph Id](assets/decision-requirements-graph/drg-id.png) - -The id is the technical identifier of the DRG. It is set in the `id` attribute on the `definitions` element. - -Each DRG should have an unique id when it is deployed to Camunda. - -```xml - - - - -``` - -## Decision - -![Decision](assets/decision-requirements-graph/decision.png) - -A decision requirements graph can have one or more decisions. A decision has a [name](decision-table.md#decision-name) -which is shown in the DRD and an [id](decision-table.md#decision-id). The decision logic inside the decision must be -either a [decision table](decision-table.md) or a [decision literal expression](decision-literal-expression.md). - -A decision is represented by a `decision` element inside the `definitions` XML element. - -```xml - - - - - - - - -``` - -## Required decisions - -![Required Decision](assets/decision-requirements-graph/required-decision.png) - -A decision can have one or more required decisions which it depends on. - -A required decision is represented by a `requiredDecision` element inside an `informationRequirement` XML element. It -has a `href` attribute and the value starts with `#` followed by the [decision id](decision-table.md#decision-id) of the -required decision. - -:::tip - -The result of a required decision can be accessed in the dependent decision by its decision id. - -If the required decision is a decision table and has more than one output then the output values are grouped under the -decision id and can be accessed by their [output names](decision-table-output.md#output-name) ( -e.g. `decisionId.outputName`). The structure of the result depends on the decision -table [hit policy](decision-table-hit-policy.md). - -::: - -```xml - - - - - - - -``` - -## Input data - -![Input Data](assets/decision-requirements-graph/input-data.png) - -An input data denotes information used as an input by one or more decisions. - -It is represented by an `inputData` element inside the `definitions` element. - -```xml - - - - - - - - - - -``` - -Note that an input data has no execution semantics and is ignored on the evaluation. - -## Knowledge source - -![Knowledge Source](assets/decision-requirements-graph/knowledge-source.png) - -A knowledge source denotes an authority for a Decision. - -It is represented by a `knowledgeSource` element inside the `definitions` element. - -```xml - - - - - - - - - - -``` - -Note that a knowledge source has no execution semantics and is ignored on the evaluation. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-hit-policy.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-table-hit-policy.md deleted file mode 100644 index 20b2bbd4242..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-hit-policy.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -id: decision-table-hit-policy -title: Hit policy -description: Specifies what the results of the evaluation of a decision table consist of. ---- - -![Hit Policy](assets/decision-table/hit-policy.png) - -A decision table has a hit policy that specifies what the results of the evaluation of a decision table consist of. - -The hit policy is set in the `hitPolicy` attribute on the `decisionTable` XML element. If no hit policy is set, then the -default hit policy `UNIQUE` is used. - -```xml - - - - - - - - -``` - -The following hit policies are supported: - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Hit PolicyXML representation
    UniqueUNIQUE
    AnyANY
    FirstFIRST
    Rule orderRULE ORDER
    CollectCOLLECT
    - -## The role of a hit policy - -A hit policy specifies how many rules of a decision table can be satisfied and which of the satisfied rules are included -in the decision result. - -The hit policies [Unique](#unique), [Any](#any) and [First](#first) will always return a maximum of one satisfied rule. -The hit policies [Rule Order](#rule-order) and [Collect](#collect) can return multiple satisfied rules. - -## Unique - -Only a single rule can be satisfied or no rule at all. The decision table result contains the output entries of the -satisfied rule. - -If more than one rule is satisfied, the Unique hit policy is violated. - -See the following decision table. - -![Hit Policy Unique](assets/decision-table/hit-policy-unique.png) - -Depending on the current season the dish should be chosen. Only one dish can be chosen, since only one season can exist -at the same time. - -## Any - -Multiple rules can be satisfied. However, all satisfied rules must generate the same output. The decision table result -contains only the output of one of the satisfied rules. - -If multiple rules are satisfied which generate different outputs, the hit policy is violated. - -See the following example: - -![Hit Policy Any](assets/decision-table/hit-policy-any.png) - -This is a decision table for the leave application. If the applier has no vacation days left or is currently in the -probation period, the application will be refused. Otherwise the application is applied. - -## First - -Multiple rules can be satisfied. The decision table result contains only the output of the first satisfied rule. - -![Hit Policy First](assets/decision-table/hit-policy-first.png) - -See the above decistion table for advertisement. Regarding the current age of the user, which advertisement should be -shown is decided. For example, the user is 19 years old. All the rules will match, but since the hit policy is set to -first only, the advertisement for Cars is used. - -## Rule order - -Multiple rules can be satisfied. The decision table result contains the output of all satisfied rules in the order of -the rules in the decision table. - -![Hit Policy Rule Order](assets/decision-table/hit-policy-rule-order.png) - -Again, see the advertisement example with the rule order policy. Say we have a user at the age of 19 again. All rules -are satisfied so all outputs are given, ordered by the rule ordering. It can perhaps be used to indicate the priority of -the displayed advertisements. - -## Collect - -Multiple rules can be satisfied. The decision table result contains the output of all satisfied rules in an arbitrary -order as a list. - -![Hit Policy Collect](assets/decision-table/hit-policy-collect.png) - -With this hit policy, the output list has no ordering. So the advertisement will be arbitrary if, for example, the age -is 19. - -Additionally, an aggregator can be specified for the Collect hit policy. If an aggregator is specified, the decision -table result will only contain a single output entry. The aggregator will generate the output entry from all satisfied -rules. - -:::info If the Collect hit policy is used with an aggregator, the decision table can only have one output. -::: - -The aggregator is set as the `aggregation` attribute of the `decisionTable` -XML element. - -```xml - - - - -``` - -### Aggregators for collect - -In the visual representation of the decision table an aggregator can be selected in addition to the `COLLECT` hit -policy. The following aggregators are supported: - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Visual representationXML representationResult of the aggregation
    Collect (Sum)SUMthe sum of all output values
    Collect (Min)MINthe smallest value of all output values
    Collect (Max)MAXthe largest value of all output values
    Collect (Count)COUNTthe number of output values
    - -#### SUM aggregator - -The SUM aggregator sums up all outputs from the satisfied rules. - -![Hit Policy Collect SUM](assets/decision-table/hit-policy-collect-sum.png) - -The showed decision table can be used to sum up the salary bonus for an employee. For example, the employee has been -working in the company for 3.5 years. So the first, second and third rule will match and the result of the decision -table is 600, since the output is summed up. - -#### MIN aggregator - -The MIN aggregator can be used to return the smallest output value of all satisfied rules. See the following example of -a car insurance. After years without a car crash the insurance fee will be reduced. - -![Hit Policy Collect MIN](assets/decision-table/hit-policy-collect-min.png) - -For example, if the input for the decision table is 3.5 years, the result will be 98.83, since the first three rules -match but the third rule has the minimal output. - -#### MAX aggregator - -The MAX aggregator can be used to return the largest output value of all satisfied rules. - -![Hit Policy Collect MAX](assets/decision-table/hit-policy-collect-max.png) - -This decision table represents the decision for the amount of pocket money for a child. Depending of the age, the amount -grows. For example, an input of 9 will satisfy the first and second rules. The output of the second rule is larger then -the output of the first rule, so the output will be 5. A child at the age of 9 will get 5 as pocket money. - -#### COUNT aggregator - -The COUNT aggregator can be use to return the count of satisfied rules. - -![Hit Policy Collect COUNT](assets/decision-table/hit-policy-collect-count.png) - -For example, see the salary bonus decision table again, this time with the COUNT aggregator. With an input of 4, the -first three rules will be satisfied. Therefore, the result from the decision table will be 3, which means that after 4 -years the result of the decision table is 3 salary bonuses. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-input.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-table-input.md deleted file mode 100644 index 479d9f181ad..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-input.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -id: decision-table-input -title: Input -description: Specify the inputs of decision tables. ---- - -![Input](assets/decision-table/input.png) - -A decision table can have one or more inputs, also called input clauses. An input clause defines the id, label, -expression and type of a decision table input. - -An input can be edited by double-clicking on the respective colum header in the decision table. - -An input clause is represented by an `input` element inside a `decisionTable` -XML element. - -```xml - - - - - - - season - - - - - - -``` - -## Input id - -The input id is a unique identifier of the decision table input. It is used by Camunda to reference the -input clause. Therefore, it is required. It is set as the `id` attribute of the `input` XML element. - -```xml - - - - season - - -``` - -## Input label - -![Input Label](assets/decision-table/input-label.png) - -An input label is a short description of the input. It is set on the `input` -XML element in the `label` attribute. Note that the label is not required but recommended, since it helps to understand -the decision. - -```xml - - - - season - - -``` - -## Input expression - -![Input Expression](assets/decision-table/input-expression.png) - -An input expression specifies how the value of the input clause is generated. It is usually simple and references a -variable which is available during the evaluation. - -The expression language of the input expression -is [FEEL](/components/modeler/feel/language-guide/feel-expressions-introduction.md). - -The expression is set inside a `text` element that is a child of the -`inputExpression` XML element. - -```xml - - - - season - - -``` - -## Input type definition - -![Input Type Definition](assets/decision-table/input-type-definition.png) - -The type of the input clause can be specified by the `typeRef` attribute on the -`inputExpression` XML element. - -After the input expression is evaluated, it checks if the result converts to the specified type. The type should be one -of the supported [data types](dmn-data-types.md). - -```xml - - - - season - - -``` - -Note that the type is not required but recommended, since it helps to understand the possible input values and provides -a type safety to be aware of unexpected input values. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-output.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-table-output.md deleted file mode 100644 index 61c74f129d5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-output.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -id: decision-table-output -title: Output -description: Specify the outputs of decision tables. ---- - -![Output](assets/decision-table/output.png) - -A decision table can have one or more outputs, also called output clauses. An output clause defines the id, label, name -and type of a decision table output. - -An output clause is represented by an `output` element inside a `decisionTable` -XML element. - -```xml - - - - - - - - - - - -``` - -## Output id - -The output id is a unique identifier of the decision table output. It is used by Camunda to reference the -output clause. Therefore, it is required. It is set as the `id` attribute of the `output` XML element. - -```xml - - -``` - -## Output label - -![Output Label](assets/decision-table/output-label.png) - -An output label is a short description of the output. It is set on the `output` -XML element in the `label` attribute. Note that the label is not required but recommended, since it helps to understand -the decision. - -```xml - - -``` - -## Output name - -![Output Name](assets/decision-table/output-name.png) - -The name of the output is used to reference the value of the output. - -It is specified by the `name` attribute on the `output` XML element. - -If the decision table has more than one output, then all outputs must have a unique name. - -:::caution - -The output name may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). - -The output name can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to -use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the -operator `-`. - -If the output name contain a special character or symbol then the output can't be accessed in -a [dependent decision](decision-requirements-graph.md#required-decisions) nor in a calling BPMN process. - -::: - -:::tip - -If the decision table has only one output then it is recommended that the [decision id](decision-table.md#decision-id) -is used as the output name. - -The decision result can be accessed in a [dependent decision](decision-requirements-graph.md#required-decisions) by its -decision id. Only if the decision table has more than one output then the output values are grouped under the decision -id and can be accessed by their output names ( e.g. `decisionId.outputName`). - -::: - -```xml - - -``` - -## Output type definition - -![Output Type Definition](assets/decision-table/output-type-definition.png) - -The type of the output clause can be specified by the `typeRef` attribute on the -`output` XML element. - -After an output entry is evaluated, it checks if the result converts to the specified type. The type should be one of -the supported [data types](dmn-data-types.md). - -```xml - - -``` - -Note that the type is not required but recommended, since it provides a type safety of the output values. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-rule.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-table-rule.md deleted file mode 100644 index 0ac6aa60e5f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-table-rule.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -id: decision-table-rule -title: Rule -description: Specify conditions and conclusions. ---- - -![Rule](assets/decision-table/rule.png) - -A decision table can have one or more rules. Each rule contains input and output entries. The input entries are the -condition and the output entries the conclusion of the rule. If each input entry (condition) is satisfied, then the rule -is satisfied and the decision result contains the output entries -(conclusion) of this rule. - -A rule is represented by a `rule` element inside a `decisionTable` XML element. - -```xml - - - - - - - - "Winter" - - - - - - "Roastbeef" - - - - - - -``` - -## Input entry - -![Input Entry](assets/decision-table/input-entry.png) - -A rule can have one or more input entries, which are the conditions of the rule. Each input entry contains an expression -in a `text` element as child of an -`inputEntry` XML element. - -The expression language of the input entry is [FEEL](/components/modeler/feel/language-guide/feel-unary-tests.md) (unary-tests). - -The input entry is satisfied when the evaluated expression returns `true`. - -```xml - - - "Spring" - -``` - -### Empty input entry - -In case an input entry is irrelevant for a rule, the expression is empty, which is always satisfied. In FEEL, an empty -input entry is represented by a `-`. - -```xml - - - - -``` - -## Output entry - -![Output Entry](assets/decision-table/output-entry.png) - -A rule can have one or more output entries, which are the conclusions of the rule. Each output entry contains an -expression in a `text` element as child of an `outputEntry` XML element. - -The expression language of the output entry is [FEEL](/components/modeler/feel/language-guide/feel-expressions-introduction.md). - -```xml - - - "Steak" - -``` - -## Description - -![Description](assets/decision-table/description.png) - -A rule can be annotated with a description that provides additional information. The description text is set inside -the `description` XML element. - -```xml - - - Save money - - -``` diff --git a/versioned_docs/version-8.2/components/modeler/dmn/decision-table.md b/versioned_docs/version-8.2/components/modeler/dmn/decision-table.md deleted file mode 100644 index b9d9f9a3c30..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/decision-table.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: decision-table -title: Overview -description: This document outlines an overview of decision tables and their general properties. ---- - -![Decision Table](assets/decision-table/dish-table.png) - -A decision table represents decision logic which can be depicted as a table. It consists -of [inputs](decision-table-input.md), [outputs](decision-table-output.md) and [rules](decision-table-rule.md). - -A decision table is represented by a `decisionTable` element inside a -`decision` XML element. - -```xml - - - - - - - - -``` - -## Decision name - -![Decision Name](assets/decision-table/decision-name.png) - -The name describes the decision for which the decision table provides the decision logic. It is set as the `name` -attribute on the `decision` element. It can be changed via the Properties Panel after selecting the respective -"Decision" in the Decision Requirements Diagram view. - -```xml - - - - - - -``` - -## Decision id - -![Decision Id](assets/decision-table/decision-id.png) - -The id is the technical identifier of the decision. It is set in the `id` -attribute on the `decision` element. Just as the `name`, the `id` can be changed via the Properties Panel after -selecting the respective "Decision" in the Decision Requirements Diagram view. - -Each decision should have an unique id when it is deployed to Camunda. - -:::caution - -The decision id may not contain any special characters or symbols (e.g. whitespace, dashes, etc.). - -The decision id can be any alphanumeric string including the `_` symbol. For a combination of words, it's recommended to -use the `camelCase` or the `snake_case` format. The `kebab-case` format is not allowed because it contains the -operator `-`. - -If the decision id contain a special character or symbol then the decision result can't be accessed in -a [dependent decision](decision-requirements-graph.md#required-decisions). - -::: - -```xml - - - - - - -``` diff --git a/versioned_docs/version-8.2/components/modeler/dmn/dmn-data-types.md b/versioned_docs/version-8.2/components/modeler/dmn/dmn-data-types.md deleted file mode 100644 index fa44f27d1b0..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/dmn-data-types.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: dmn-data-types -title: Data types -description: On overview of the available data types in DMN. ---- - -Camunda supports the following data types for DMN: - -| Type name | Associated FEEL type | -| ----------------- | --------------------------------------------------------------------------------------------------------- | -| number | [Number](/components/modeler/feel/language-guide/feel-data-types.md#number) | -| string | [String](/components/modeler/feel/language-guide/feel-data-types.md#string) | -| boolean | [Boolean](/components/modeler/feel/language-guide/feel-data-types.md#boolean) | -| time | [Time](/components/modeler/feel/language-guide/feel-data-types.md#time) | -| date | [Date](/components/modeler/feel/language-guide/feel-data-types.md#date) | -| dateTime | [Date-Time](/components/modeler/feel/language-guide/feel-data-types.md#date-time) | -| dayTimeDuration | [Days-Time-Duration](/components/modeler/feel/language-guide/feel-data-types.md#days-time-duration) | -| yearMonthDuration | [Years-Months-Duration](/components/modeler/feel/language-guide/feel-data-types.md#years-months-duration) | -| Any | Wildcard for any type | - -The data types can be used in the type definitions of DMN elements, for example: - -- [Decision table input](decision-table-input.md#input-type-definition) -- [Decision table output](decision-table-output.md#output-type-definition) -- [Decision literal expression variable](decision-literal-expression.md#variable-type-definition) diff --git a/versioned_docs/version-8.2/components/modeler/dmn/dmn.md b/versioned_docs/version-8.2/components/modeler/dmn/dmn.md deleted file mode 100644 index 2391bf99176..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/dmn.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: desktop-modeler-dmn -title: DMN in Modeler -description: "Camunda Desktop and Web Modeler both offer the same Modeling experience for DMN 1.3 models: Modeling starts in the Decision Requirements Diagram (DRD) view." ---- - -## Start modeling - -![Start Modeling](assets/desktop-modeler-dmn/main.png) - -Camunda Desktop and Web Modeler both offer the same Modeling experience for DMN 1.3 models: Modeling starts in the Decision Requirements Diagram (DRD) view. From there, you can add DMN elements from the palette on the left side by dragging and dropping them onto the diagram canvas. - -Alternatively, you can add new elements by using the context menu that appears when you select an element in the diagram. Using the wrench icon in the context menu, you can change the type of an element in place. Use the properties panel on the right side to change the name or id of the DMN diagram. - -## Demo - -![Demo](assets/desktop-modeler-dmn/demo.gif) - -The demo above shows how to model a decision table. After creating a decision and morphing it into a decision table, you can start editing the table by clicking the overlay on the upper left corner of the decision. Using the overview in the decision table view, you can jump between decision tables. - -## DMN coverage - -Modeler supports the following DMN elements: - -- Decision (tables and literal expressions) -- Input data -- Knowledge source -- Business knowledge model - -## Decision tables - -![Decision Table](assets/desktop-modeler-dmn/decision-table.png) - -By clicking the blue icon on a decision table, you can open the decision table view and start to edit it. Add **Input**, **Output**, and **Rule** elements by clicking the plus signs. Edit a table cell by clicking on it. Alternatively, the tabulator and enter keys can be used to walk through the table cells. - -Delete a rule or a column, copy, or insert a new rule by right clicking in the cell: - -![Delete or copy rules](assets/desktop-modeler-dmn/dmn-modeler-right-click.png) - -Adjust the details of an input or output column (e.g., name, expression, and type) by double clicking in the header row: - -![Change input or output column](assets/desktop-modeler-dmn/dmn-modeler-double-click.png) - -Jump between decision tables or literal expressions in your decision requirement diagram by opening and using the `Overview` on the left side: - -![Jump between decision tables](assets/desktop-modeler-dmn/dmn-modeler-toggle-overview.png) - -## Literal expressions - -![New DMN Literal Expression](assets/desktop-modeler-dmn/literal-expression.png) - -You can also edit literal expressions. Just as with decision tables, in the decision requirement diagram view, click the blue icon to _drill-down_ into the literal expression view and start editing. diff --git a/versioned_docs/version-8.2/components/modeler/dmn/sidebar-schema.js b/versioned_docs/version-8.2/components/modeler/dmn/sidebar-schema.js deleted file mode 100644 index 5e13797a8ed..00000000000 --- a/versioned_docs/version-8.2/components/modeler/dmn/sidebar-schema.js +++ /dev/null @@ -1,17 +0,0 @@ -module.exports = { - DMN: [ - "components/modeler/dmn/desktop-modeler-dmn", - "components/modeler/dmn/decision-requirements-graph", - { - "Decision table": [ - "components/modeler/dmn/decision-table", - "components/modeler/dmn/decision-table-input", - "components/modeler/dmn/decision-table-output", - "components/modeler/dmn/decision-table-rule", - "components/modeler/dmn/decision-table-hit-policy", - ], - }, - "components/modeler/dmn/decision-literal-expression", - "components/modeler/dmn/dmn-data-types", - ], -}; diff --git a/versioned_docs/version-8.2/components/modeler/feel/assets/feel-built-in-functions-range-overview.png b/versioned_docs/version-8.2/components/modeler/feel/assets/feel-built-in-functions-range-overview.png deleted file mode 100644 index 793a40493e4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/feel/assets/feel-built-in-functions-range-overview.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-boolean.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-boolean.md deleted file mode 100644 index e40ee1a3d37..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-boolean.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: feel-built-in-functions-boolean -title: Boolean functions -description: "This document outlines current boolean functions and a few examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -## not(negand) - -Returns the logical negation of the given value. - -**Function signature** - -```feel -not(negand: boolean): boolean -``` - -**Examples** - -```feel -not(true) -// false - -not(null) -// null -``` - -## is defined(value) - - - -Checks if a given value is defined. A value is defined if it exists, and it is an instance of one of the FEEL data types including `null`. - -The function can be used to check if a variable or a context entry (e.g. a property of a variable) exists. It allows differentiating between a `null` variable and a value that doesn't exist. - -**Function signature** - -```feel -is defined(value: Any): boolean -``` - -**Examples** - -```feel -is defined(1) -// true - -is defined(null) -// true - -is defined(x) -// false - if no variable "x" exists - -is defined(x.y) -// false - if no variable "x" exists or it doesn't have a property "y" -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-context.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-context.md deleted file mode 100644 index b779b20df60..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-context.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: feel-built-in-functions-context -title: Context functions -description: "This document outlines context functions and a few examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -## get value(context, key) - -Returns the value of the context entry with the given key. - -**Function signature** - -```feel -get value(context: context, key: string): Any -``` - -**Examples** - -```feel -get value({foo: 123}, "foo") -// 123 - -get value({a: 1}, "b") -// null -``` - -## get value(context, keys) - - - -Returns the value of the context entry for a context path defined by the given keys. - -If `keys` contains the keys `[k1, k2]` then it returns the value at the nested entry `k1.k2` of the context. - -If `keys` are empty or the nested entry defined by the keys doesn't exist in the context, it returns `null`. - -**Function signature** - -```feel -get value(context: context, keys: list): Any -``` - -**Examples** - -```feel -get value({x:1, y: {z:0}}, ["y", "z"]) -// 0 - -get value({x: {y: {z:0}}}, ["x", "y"]) -// {z:0} - -get value({a: {b: 3}}, ["b"]) -// null -``` - -## get entries(context) - -Returns the entries of the context as a list of key-value-pairs. - -**Function signature** - -```feel -get entries(context: context): list -``` - -The return value is a list of contexts. Each context contains two entries for "key" and "value". - -**Examples** - -```feel -get entries({foo: 123}) -// [{key: "foo", value: 123}] -``` - -## context put(context, key, value) - -Adds a new entry with the given key and value to the context. Returns a new context that includes the entry. - -If an entry for the same key already exists in the context, it overrides the value. - -**Function signature** - -```feel -context put(context: context, key: string, value: Any): context -``` - -**Examples** - -```feel -context put({x:1}, "y", 2) -// {x:1, y:2} -``` - -:::info -The function `context put()` replaced the previous function `put()` (Camunda Extension). The -previous function is deprecated and should not be used anymore. -::: - -## context put(context, keys, value) - -Adds a new entry with the given value to the context. The path of the entry is defined by the keys. Returns a new context that includes the entry. - -If `keys` contains the keys `[k1, k2]` then it adds the nested entry `k1.k2 = value` to the context. - -If an entry for the same keys already exists in the context, it overrides the value. - -If `keys` are empty, it returns `null`. - -**Function signature** - -```feel -context put(context: context, keys: list, value: Any): context -``` - -**Examples** - -```feel -context put({x:1}, ["y"], 2) -// {x:1, y:2} - -context put({x:1, y: {z:0}}, ["y", "z"], 2) -// {x:1, y: {z:2}} - -context put({x:1}, ["y", "z"], 2) -// {x:1, y: {z:2}} -``` - -## context merge(contexts) - -Union the given contexts. Returns a new context that includes all entries of the given contexts. - -If an entry for the same key already exists in a context, it overrides the value. The entries are overridden in the same order as in the list of contexts. - -**Function signature** - -```feel -context merge(contexts: list): context -``` - -**Examples** - -```feel -context merge([{x:1}, {y:2}]) -// {x:1, y:2} - -context merge([{x:1, y: 0}, {y:2}]) -// {x:1, y:2} -``` - -:::info -The function `context merge()` replaced the previous function `put all()` (Camunda Extension). The -previous function is deprecated and should not be used anymore. -::: diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-conversion.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-conversion.md deleted file mode 100644 index 6522611d83e..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-conversion.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -id: feel-built-in-functions-conversion -title: Conversion functions -description: "This document outlines conversion functions and a few examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -Convert a value into a different type. - -## string(from) - -Returns the given value as a string representation. - -**Function signature** - -```feel -string(from: Any): string -``` - -**Examples** - -```feel -string(1.1) -// "1.1" - -string(date("2012-12-25")) -// "2012-12-25" -``` - -## number(from) - -Parses the given string to a number. - -Returns `null` if the string is not a number. - -**Function signature** - -```feel -number(from: string): number -``` - -**Examples** - -```feel -number("1500.5") -// 1500.5 -``` - -## context(entries) - -Constructs a context of the given list of key-value pairs. It is the reverse function to [get entries()](feel-built-in-functions-context.md#get-entriescontext). - -Each key-value pair must be a context with two entries: `key` and `value`. The entry with name `key` must have a value of the type `string`. - -It might override context entries if the keys are equal. The entries are overridden in the same order as the contexts in the given list. - -Returns `null` if one of the entries is not a context or if a context doesn't contain the required entries. - -**Function signature** - -```feel -context(entries: list): context -``` - -**Examples** - -```feel -context([{"key":"a", "value":1}, {"key":"b", "value":2}]) -// {a:1, b:2} -``` - -## date(from) - -Returns a date from the given value. - -Returns `null` if the string is not a valid calendar date. For example, `"2024-06-31"` is invalid because June has -only 30 days. - -**Function signature** - -```feel -date(from: string): date -``` - -Parses the given string into a date. - -```feel -date(from: date and time): date -``` - -Extracts the date component from the given date and time. - -**Examples** - -```feel -date("2018-04-29") -// date("2018-04-29") - -date(date and time("2012-12-25T11:00:00")) -// date("2012-12-25") -``` - -## date(year, month, day) - -Returns a date from the given components. - -Returns `null` if the components don't represent a valid calendar date. For example, `2024,6,31` is invalid because -June has only 30 days. - -**Function signature** - -```feel -date(year: number, month: number, day: number): date -``` - -**Examples** - -```feel -date(2012, 12, 25) -// date("2012-12-25") -``` - -## time(from) - -Returns a time from the given value. - -**Function signature** - -```feel -time(from: string): time -``` - -Parses the given string into a time. - -```feel -time(from: date and time): time -``` - -Extracts the time component from the given date and time. - -**Examples** - -```feel -time("12:00:00") -// time("12:00:00") - -time(date and time("2012-12-25T11:00:00")) -// time("11:00:00") -``` - -## time(hour, minute, second) - -Returns a time from the given components. - -**Function signature** - -```feel -time(hour: number, minute: number, second: number): time -``` - -**Examples** - -```feel -time(23, 59, 0) -// time("23:59:00") -``` - -## time(hour, minute, second, offset) - -Returns a time from the given components, including a timezone offset. - -**Function signature** - -```feel -time(hour: number, minute: number, second: number, offset: days and time duration): time -``` - -**Examples** - -```feel -time(14, 30, 0, duration("PT1H")) -// time("14:30:00+01:00") -``` - -## date and time(from) - -Parses the given string into a date and time. - -Returns `null` if the string is not a valid calendar date. For example, `"2024-06-31T10:00:00"` is invalid because -June has only 30 days. - -**Function signature** - -```feel -date and time(from: string): date and time -``` - -**Examples** - -```feel -date and time("2018-04-29T09:30:00") -// date and time("2018-04-29T09:30:00") -``` - -## date and time(date, time) - -Returns a date and time from the given components. - -**Function signature** - -```feel -date and time(date: date, time: time): date and time -``` - -```feel -date and time(date: date and time, time: time): date and time -``` - -Returns a date and time value that consists of the date component of `date` combined with `time`. - -**Examples** - -```feel -date and time(date("2012-12-24"),time("T23:59:00")) -// date and time("2012-12-24T23:59:00") - -date and time(date and time("2012-12-25T11:00:00"),time("T23:59:00")) -// date and time("2012-12-25T23:59:00") -``` - -## date and time(date, timezone) - - - -Returns the given date and time value at the given timezone. - -If `date` has a different timezone than `timezone` then it adjusts the time to match the local time of `timezone`. - -**Function signature** - -```feel -date and time(date: date and time, timezone: string): date and time -``` - -**Examples** - -```feel -date and time(@"2020-07-31T14:27:30@Europe/Berlin", "America/Los_Angeles") -// date and time("2020-07-31T05:27:30@America/Los_Angeles") - -date and time(@"2020-07-31T14:27:30", "Z") -// date and time("2020-07-31T12:27:30Z") -``` - -## duration(from) - -Parses the given string into a duration. The duration is either a days and time duration or a years and months duration. - -**Function signature** - -```feel -duration(from: string): days and time duration -``` - -```feel -duration(from: string): years and months duration -``` - -**Examples** - -```feel -duration("P5D") -// duration("P5D") - -duration("P32Y") -// duration("P32Y") -``` - -## years and months duration(from, to) - -Returns the years and months duration between `from` and `to`. - -**Function signature** - -```feel -years and months duration(from: date, to: date): years and months duration -``` - -**Examples** - -```feel -years and months duration(date("2011-12-22"), date("2013-08-24")) -// duration("P1Y8M") -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-introduction.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-introduction.md deleted file mode 100644 index fe267aa0dfa..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-introduction.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: feel-built-in-functions-introduction -title: Introduction -description: "FEEL includes many built-in functions. These functions can be invoked -in expressions and unary-tests." ---- - -FEEL includes many built-in functions. These functions can be invoked -in [expressions](../language-guide/feel-expressions-introduction.md) -and [unary-tests](../language-guide/feel-unary-tests.md). - -```feel -contains("me@camunda.com", ".com") -// invoke function with positional arguments - -contains(string: "me@camunda.com", match: ".de") -// invoke function with named arguments -``` - -Read more about functions [here](../language-guide/feel-functions.md#invocation). - -This section is split into functions based on their primary operational data type: - -- [Boolean](./feel-built-in-functions-boolean.md) -- [String](./feel-built-in-functions-string.md) -- [Numeric](./feel-built-in-functions-numeric.md) -- [List](./feel-built-in-functions-list.md) -- [Context](./feel-built-in-functions-context.md) -- [Temporal](./feel-built-in-functions-temporal.md) -- [Range](./feel-built-in-functions-range.md) - -Additionally, there are [conversion](./feel-built-in-functions-conversion.md) functions that allow -you to construct new values of a data type (factory functions). diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md deleted file mode 100644 index cc200b5b222..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-list.md +++ /dev/null @@ -1,582 +0,0 @@ ---- -id: feel-built-in-functions-list -title: List functions -description: "This document outlines built-in list functions and examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -## list contains(list, element) - -Returns `true` if the given list contains the element. Otherwise, returns `false`. - -**Function signature** - -```feel -list contains(list: list, element: Any): boolean -``` - -**Examples** - -```feel -list contains([1,2,3], 2) -// true -``` - -## count(list) - -Returns the number of elements of the given list. - -**Function signature** - -```feel -count(list: list): number -``` - -**Examples** - -```feel -count([1,2,3]) -// 3 -``` - -## min(list) - -Returns the minimum of the given list. - -**Function signature** - -```feel -min(list: list): Any -``` - -All elements in `list` should have the same type and be comparable. - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -min([1,2,3]) -// 1 - -min(1,2,3) -// 1 -``` - -## max(list) - -Returns the maximum of the given list. - -**Function signature** - -```feel -max(list: list): Any -``` - -All elements in `list` should have the same type and be comparable. - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -max([1,2,3]) -// 3 - -max(1,2,3) -// 3 -``` - -## sum(list) - -Returns the sum of the given list of numbers. - -**Function signature** - -```feel -sum(list: list): number -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -sum([1,2,3]) -// 6 - -sum(1,2,3) -// 6 -``` - -## product(list) - -Returns the product of the given list of numbers. - -**Function signature** - -```feel -product(list: list): number -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -product([2, 3, 4]) -// 24 - -product(2, 3, 4) -// 24 -``` - -## mean(list) - -Returns the arithmetic mean (i.e. average) of the given list of numbers. - -**Function signature** - -```feel -mean(list: list): number -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -mean([1,2,3]) -// 2 - -mean(1,2,3) -// 2 -``` - -## median(list) - -Returns the median element of the given list of numbers. - -**Function signature** - -```feel -median(list: list): number -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -median(8, 2, 5, 3, 4) -// 4 - -median([6, 1, 2, 3]) -// 2.5 -``` - -## stddev(list) - -Returns the standard deviation of the given list of numbers. - -**Function signature** - -```feel -stddev(list: list): number -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -stddev(2, 4, 7, 5) -// 2.0816659994661326 - -stddev([2, 4, 7, 5]) -// 2.0816659994661326 -``` - -## mode(list) - -Returns the mode of the given list of numbers. - -**Function signature** - -```feel -mode(list: list): number -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -mode(6, 3, 9, 6, 6) -// [6] - -mode([6, 1, 9, 6, 1]) -// [1, 6] -``` - -## all(list) - -Returns `false` if any element of the given list is `false`. Otherwise, returns `true`. - -If the given list is empty, it returns `true`. - -**Function signature** - -```feel -all(list: list): boolean -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -and([true,false]) -// false - -and(false,null,true) -// false -``` - -:::info -The function `all()` replaced the previous function `and()`. The previous function is deprecated and -should not be used anymore. -::: - -## any(list) - -Returns `true` if any element of the given list is `true`. Otherwise, returns `false`. - -If the given list is empty, it returns `false`. - -**Function signature** - -```feel -any(list: list): boolean -``` - -The parameter `list` can be passed as a list or as a sequence of elements. - -**Examples** - -```feel -or([false,true]) -// true - -or(false,null,true) -// true -``` - -:::info -The function `any()` replaced the previous function `or()`. The previous function is deprecated and -should not be used anymore. -::: - -## sublist(list, start position) - -Returns a partial list of the given value starting at `start position`. - -**Function signature** - -```feel -sublist(list: list, start position: number): list -``` - -The `start position` starts at the index `1`. The last position is `-1`. - -**Examples** - -```feel -sublist([1,2,3], 2) -// [2,3] -``` - -## sublist(list, start position, length) - -Returns a partial list of the given value starting at `start position`. - -**Function signature** - -```feel -sublist(list: list, start position: number, length: number): list -``` - -The `start position` starts at the index `1`. The last position is `-1`. - -**Examples** - -```feel -sublist([1,2,3], 1, 2) -// [1,2] -``` - -## append(list, items) - -Returns the given list with all `items` appended. - -**Function signature** - -```feel -append(list: list, items: Any): list -``` - -The parameter `items` can be a single element or a sequence of elements. - -**Examples** - -```feel -append([1], 2, 3) -// [1,2,3] -``` - -## concatenate(lists) - -Returns a list that includes all elements of the given lists. - -**Function signature** - -```feel -concatenate(lists: list): list -``` - -The parameter `lists` is a sequence of lists. - -**Examples** - -```feel -concatenate([1,2],[3]) -// [1,2,3] - -concatenate([1],[2],[3]) -// [1,2,3] -``` - -## insert before(list, position, newItem) - -Returns the given list with `newItem` inserted at `position`. - -**Function signature** - -```feel -insert before(list: list, position: number, newItem: Any): list -``` - -The `position` starts at the index `1`. The last position is `-1`. - -**Examples** - -```feel -insert before([1,3],1,2) -// [1,2,3] -``` - -## remove(list, position) - -Returns the given list without the element at `position`. - -**Function signature** - -```feel -remove(list: list, position: number): list -``` - -The `position` starts at the index `1`. The last position is `-1`. - -**Examples** - -```feel -remove([1,2,3], 2) -// [1,3] -``` - -## reverse(list) - -Returns the given list in revered order. - -**Function signature** - -```feel -reverse(list: list): list -``` - -**Examples** - -```feel -reverse([1,2,3]) -// [3,2,1] -``` - -## index of(list, match) - -Returns an ascending list of positions containing `match`. - -**Function signature** - -```feel -index of(list: list, match: Any): list -``` - -**Examples** - -```feel -index of([1,2,3,2],2) -// [2,4] -``` - -## union(list) - -Returns a list that includes all elements of the given lists without duplicates. - -**Function signature** - -```feel -union(list: list): list -``` - -The parameter `list` is a sequence of lists. - -**Examples** - -```feel -union([1,2],[2,3]) -// [1,2,3] -``` - -## distinct values(list) - -Returns the given list without duplicates. - -**Function signature** - -```feel -distinct values(list: list): list -``` - -**Examples** - -```feel -distinct values([1,2,3,2,1]) -// [1,2,3] -``` - -## flatten(list) - -Returns a list that includes all elements of the given list without nested lists. - -**Function signature** - -```feel -flatten(list: list): list -``` - -**Examples** - -```feel -flatten([[1,2],[[3]], 4]) -// [1,2,3,4] -``` - -## sort(list, precedes) - -Returns the given list sorted by the `precedes` function. - -**Function signature** - -```feel -sort(list: list, precedes: function<(Any, Any) -> boolean>): list -``` - -**Examples** - -```feel -sort(list: [3,1,4,5,2], precedes: function(x,y) x < y) -// [1,2,3,4,5] -``` - -## string join(list) - -Joins a list of strings into a single string. This is similar to -Java's [joining]() -function. - -If an item of the list is `null`, the item is ignored for the result string. If an item is -neither a string nor `null`, the function returns `null` instead of a string. - -**Function signature** - -```feel -string join(list: list): string -``` - -**Examples** - -```feel -string join(["a","b","c"]) -// "abc" - -string join(["a",null,"c"]) -// "ac" - -string join([]) -// "" -``` - -## string join(list, delimiter) - -Joins a list of strings into a single string. This is similar to -Java's [joining]() -function. - -If an item of the list is `null`, the item is ignored for the result string. If an item is -neither a string nor `null`, the function returns `null` instead of a string. - -The resulting string contains a `delimiter` between each element. - -**Function signature** - -```feel -string join(list: list, delimiter: string): string -``` - -**Examples** - -```feel -string join(["a"], "X") -// "a" - -string join(["a","b","c"], ", ") -// "a, b, c" -``` - -## string join(list, delimiter, prefix, suffix) - - - -Joins a list of strings into a single string. This is similar to -Java's [joining]() -function. - -If an item of the list is `null`, the item is ignored for the result string. If an item is -neither a string nor `null`, the function returns `null` instead of a string. - -The resulting string starts with `prefix`, contains a `delimiter` between each element, and ends -with `suffix`. - -**Function signature** - -```feel -string join(list: list, delimiter: string, prefix: string, suffix: string): string -``` - -**Examples** - -```feel -string join(["a","b","c"], ", ", "[", "]") -// "[a, b, c]" -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-numeric.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-numeric.md deleted file mode 100644 index 28c4f9cd25d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-numeric.md +++ /dev/null @@ -1,414 +0,0 @@ ---- -id: feel-built-in-functions-numeric -title: Numeric functions -description: "This document outlines built-in numeric functions and examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -## decimal(n, scale) - -Rounds the given value at the given scale. - -**Function signature** - -```feel -decimal(n: number, scale: number): number -``` - -**Examples** - -```feel -decimal(1/3, 2) -// .33 - -decimal(1.5, 0) -// 2 -``` - -## floor(n) - -Rounds the given value with rounding mode flooring. - -**Function signature** - -```feel -floor(n: number): number -``` - -**Examples** - -```feel -floor(1.5) -// 1 - -floor(-1.5) -// -2 -``` - -## floor(n, scale) - -Rounds the given value with rounding mode flooring at the given scale. - -**Function signature** - -```feel -floor(n: number, scale: number): number -``` - -**Examples** - -```feel -floor(-1.56, 1) -// -1.6 -``` - -## ceiling(n) - -Rounds the given value with rounding mode ceiling. - -**Function signature** - -```feel -ceiling(n: number): number -``` - -**Examples** - -```feel -ceiling(1.5) -// 2 - -ceiling(-1.5) -// -1 -``` - -## ceiling(n, scale) - -Rounds the given value with rounding mode ceiling at the given scale. - -**Function signature** - -```feel -ceiling(n: number, scale: number): number -``` - -**Examples** - -```feel -ceiling(-1.56, 1) -// -1.5 -``` - -## round up(n) - -Rounds the given value with the rounding mode round-up. - -**Function signature** - -```feel -round up(n: number): number -``` - -**Examples** - -```feel -round up(5.5) -// 6 - -round up(-5.5) -// -6 -``` - -## round up(n, scale) - -Rounds the given value with the rounding mode round-up at the given scale. - -**Function signature** - -```feel -round up(n: number, scale: number): number -``` - -**Examples** - -```feel -round up(5.5) -// 6 - -round up(-5.5) -// -6 - -round up(1.121, 2) -// 1.13 - -round up(-1.126, 2) -// -1.13 -``` - -## round down(n) - -Rounds the given value with the rounding mode round-down. - -**Function signature** - -```feel -round down(n: number): number -``` - -**Examples** - -```feel -round down(5.5) -// 5 - -round down (-5.5) -// -5 -``` - -## round down(n, scale) - -Rounds the given value with the rounding mode round-down at the given scale. - -**Function signature** - -```feel -round down(n: number, scale: number): number -``` - -**Examples** - -```feel -round down (1.121, 2) -// 1.12 - -round down (-1.126, 2) -// -1.12 -``` - -## round half up(n) - -Rounds the given value with the rounding mode round-half-up. - -**Function signature** - -```feel -round half up(n: number): number -``` - -**Examples** - -```feel -round half up(5.5) -// 6 - -round half up(-5.5) -// -6 -``` - -## round half up(n, scale) - -Rounds the given value with the rounding mode round-half-up at the given scale. - -**Function signature** - -```feel -round half up(n: number, scale: number): number -``` - -**Examples** - -```feel -round half up(1.121, 2) -// 1.12 - -round half up(-1.126, 2) -// -1.13 -``` - -## round half down(n) - -Rounds the given value with the rounding mode round-half-down. - -**Function signature** - -```feel -round half down(n: number): number -``` - -**Examples** - -```feel -round half down (5.5) -// 5 - -round half down (-5.5) -// -5 -``` - -## round half down(n, scale) - -Rounds the given value with the rounding mode round-half-down at the given scale. - -**Function signature** - -```feel -round half down(n: number, scale: number): number -``` - -**Examples** - -```feel -round half down (1.121, 2) -// 1.12 - -round half down (-1.126, 2) -// -1.13 -``` - -## abs(number) - -Returns the absolute value of the given numeric value. - -**Function signature** - -```feel -abs(number: number): number -``` - -**Examples** - -```feel -abs(10) -// 10 - -abs(-10) -// 10 -``` - -## modulo(dividend, divisor) - -Returns the remainder of the division of dividend by divisor. - -**Function signature** - -```feel -modulo(dividend: number, divisor: number): number -``` - -**Examples** - -```feel -modulo(12, 5) -// 2 -``` - -## sqrt(number) - -Returns the square root of the given value. - -**Function signature** - -```feel -sqrt(number: number): number -``` - -**Examples** - -```feel -sqrt(16) -// 4 -``` - -## log(number) - -Returns the natural logarithm (base e) of the given value. - -**Function signature** - -```feel -log(number: number): number -``` - -**Examples** - -```feel -log(10) -// 2.302585092994046 -``` - -## exp(number) - -Returns the Euler’s number e raised to the power of the given number . - -**Function signature** - -```feel -exp(number: number): number -``` - -**Examples** - -```feel -exp(5) -// 148.4131591025766 -``` - -## odd(number) - -Returns `true` if the given value is odd. Otherwise, returns `false`. - -**Function signature** - -```feel -odd(number: number): boolean -``` - -**Examples** - -```feel -odd(5) -// true - -odd(2) -// false -``` - -## even(number) - -Returns `true` if the given is even. Otherwise, returns `false`. - -**Function signature** - -```feel -even(number: number): boolean -``` - -**Examples** - -```feel -even(5) -// false - -even(2) -// true -``` - -## random number() - - - -Returns a random number between `0` and `1`. - -**Function signature** - -```feel -random number(): number -``` - -**Examples** - -```feel -random number() -// 0.9701618132579795 -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-range.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-range.md deleted file mode 100644 index fd2a3d60873..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-range.md +++ /dev/null @@ -1,558 +0,0 @@ ---- -id: feel-built-in-functions-range -title: Range functions -description: "This document outlines range functions and examples." ---- - -A set of functions establish relationships between single scalar values and ranges of such values. -All functions take two arguments and return `true` if the relationship between the argument holds, -or `false` otherwise. - -A scalar value must be of the following type: - -- number -- date -- time -- date-time -- days-time-duration -- years-months-duration - -![range functions overview](../assets/feel-built-in-functions-range-overview.png) - -## before(point1, point2) - -**Function signature** - -```js -before(point1: Any, point2: Any): boolean -``` - -**Examples** - -```js -before(1, 10); -// true - -before(10, 1); -// false -``` - -## before(range, point) - -**Function signature** - -```js -before(range: range, point: Any): boolean -``` - -**Examples** - -```js -before([1..5], 10) -// true -``` - -## before(point, range) - -**Function signature** - -```js -before(point: Any, range: range): boolean -``` - -**Examples** - -```js -before(1, [2..5]) -// true -``` - -## before(range1, range2) - -**Function signature** - -```js -before(range1: range, range2: range): boolean -``` - -**Examples** - -```js -before([1..5], [6..10]) -// true - -before([1..5),[5..10]) -// true -``` - -## after(point1, point2) - -**Function signature** - -```js -after(point1: Any, point2: Any): boolean -``` - -**Examples** - -```js -after(10, 1); -// true - -after(1, 10); -// false -``` - -## after(range, point) - -**Function signature** - -```js -after(range: range, point: Any): boolean -``` - -**Examples** - -```js -after([1..5], 10) -// false -``` - -## after(point, range) - -**Function signature** - -```js -after(point: Any, range: range): boolean -``` - -**Examples** - -```js -after(12, [2..5]) -// true -``` - -## after(range1, range2) - -**Function signature** - -```js -after(range1: range, range2: range): boolean -``` - -**Examples** - -```js -after([6..10], [1..5]) -// true - -after([5..10], [1..5)) -// true -``` - -## meets(range1, range2) - -**Function signature** - -```js -meets(range1: range, range2: range): boolean -``` - -**Examples** - -```js -meets([1..5], [5..10]) -// true - -meets([1..3], [4..6]) -// false - -meets([1..3], [3..5]) -// true - -meets([1..5], (5..8]) -// false - -``` - -## met by(range1, range2) - -**Function signature** - -```js -met by(range1: range, range2: range): boolean -``` - -**Examples** - -```js -met by([5..10], [1..5]) -// true - -met by([3..4], [1..2]) -// false - -met by([3..5], [1..3]) -// true - -met by((5..8], [1..5)) -// false - -met by([5..10], [1..5)) -// false -``` - -## overlaps(range1, range2) - -**Function signature** - -```js -overlaps(range1: range, range2: range): boolean -``` - -**Examples** - -```js -overlaps([5..10], [1..6]) -// true - -overlaps((3..7], [1..4]) -// true - -overlaps([1..3], (3..6]) -// false - -overlaps((5..8], [1..5)) -// false - -overlaps([4..10], [1..5)) -// treu -``` - -## overlaps before(range1, range2) - -**Function signature** - -```js -overlaps before(range1: range, range2: range): boolean -``` - -**Examples** - -```js -overlaps before([1..5], [4..10]) -// true - -overlaps before([3..4], [1..2]) -// false - -overlaps before([1..3], (3..5]) -// false - -overlaps before([1..5), (3..8]) -// true - -overlaps before([1..5), [5..10]) -// false -``` - -## overlaps after(range1, range2) - -**Function signature** - -```js -overlaps after(range1: range, range2: range): boolean -``` - -**Examples** - -```js -overlaps after([4..10], [1..5]) -// true - -overlaps after([3..4], [1..2]) -// false - -overlaps after([3..5], [1..3)) -// false - -overlaps after((5..8], [1..5)) -// false - -overlaps after([4..10], [1..5)) -// true -``` - -## finishes(point, range) - -**Function signature** - -```js -finishes(point: Any, range: range): boolean -``` - -**Examples** - -```js -finishes(5, [1..5]) -// true - -finishes(10, [1..7]) -// false -``` - -## finishes(range1, range2) - -**Function signature** - -```js -finishes(range1: range, range2: range): boolean -``` - -**Examples** - -```js -finishes([3..5], [1..5]) -// true - -finishes((1..5], [1..5)) -// false - -finishes([5..10], [1..10)) -// false -``` - -## finished by(range, point) - -**Function signature** - -```js -finished by(range: range, point: Any): boolean -``` - -**Examples** - -```js -finishes by([5..10], 10) -// true - -finishes by([3..4], 2) -// false -``` - -## finished by(range1, range2) - -**Function signature** - -```js -finished by(range1: range, range2: range): boolean -``` - -**Examples** - -```js -finishes by([3..5], [1..5]) -// true - -finishes by((5..8], [1..5)) -// false - -finishes by([5..10], (1..10)) -// true -``` - -## includes(range, point) - -**Function signature** - -```js -includes(range: range, point: Any): boolean -``` - -**Examples** - -```js -includes([5..10], 6) -// true - -includes([3..4], 5) -// false -``` - -## includes(range1, range2) - -**Function signature** - -```js -includes(range1: range, range2: range): boolean -``` - -**Examples** - -```js -includes([1..10], [4..6]) -// true - -includes((5..8], [1..5)) -// false - -includes([1..10], [1..5)) -// true -``` - -## during(point, range) - -**Function signature** - -```js -during(point: Any, range: range): boolean -``` - -**Examples** - -```js -during(5, [1..10]) -// true - -during(12, [1..10]) -// false - -during(1, (1..10]) -// false -``` - -## during(range1, range2) - -**Function signature** - -```js -during(range1: range, range2: range): boolean -``` - -**Examples** - -```js -during([4..6], [1..10)) -// true - -during((1..5], (1..10]) -// true -``` - -## starts(point, range) - -**Function signature** - -```js -starts(point: Any, range: range): boolean -``` - -**Examples** - -```js -starts(1, [1..5]) -// true - -starts(1, (1..8]) -// false -``` - -## starts(range1, range2) - -**Function signature** - -```js -starts(range1: range, range2: range): boolean -``` - -**Examples** - -```js -starts((1..5], [1..5]) -// false - -starts([1..10], [1..10]) -// true - -starts((1..10), (1..10)) -// true -``` - -## started by(range, point) - -**Function signature** - -```js -started by(range: range, point: Any): boolean -``` - -**Examples** - -```js -started by([1..10], 1) -// true - -started by((1..10], 1) -// false -``` - -## started by(range1, range2) - -**Function signature** - -```js -started by(range1: range, range2: range): boolean -``` - -**Examples** - -```js -started by([1..10], [1..5]) -// true - -started by((1..10], [1..5)) -// false - -started by([1..10], [1..10)) -// true -``` - -## coincides(point1, point2) - -**Function signature** - -```js -coincides(point1: Any, point2: Any): boolean -``` - -**Examples** - -```js -coincides(5, 5); -// true - -coincides(3, 4); -// false -``` - -## coincides(range1, range2) - -**Function signature** - -```js -coincides(range1: range, range2: range): boolean -``` - -**Examples** - -```js -coincides([1..5], [1..5]) -// true - -coincides((1..5], [1..5)) -// false - -coincides([1..5], [2..6]) -// false -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md deleted file mode 100644 index 59308327d04..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md +++ /dev/null @@ -1,334 +0,0 @@ ---- -id: feel-built-in-functions-string -title: String functions -description: "This document outlines built-in string functions and examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -## substring(string, start position) - -Returns a substring of the given value starting at `start position`. - -**Function signature** - -```feel -substring(string: string, start position: number): string -``` - -The `start position` starts at the index `1`. The last position is `-1`. - -**Examples** - -```feel -substring("foobar", 3) -// "obar" - -substring("foobar", -2) -// "ar" -``` - -## substring(string, start position, length) - -Returns a substring of the given value, starting at `start position` with the given `length`. If `length` is greater than -the remaining characters of the value, it returns all characters from `start position` until the end. - -**Function signature** - -```feel -substring(string: string, start position: number, length: number): string -``` - -The `start position` starts at the index `1`. The last position is `-1`. - -**Examples** - -```feel -substring("foobar", 3, 3) -// "oba" - -substring("foobar", -3, 2) -// "ba" - -substring("foobar", 3, 10) -// "obar" -``` - -## string length(string) - -Returns the number of characters in the given value. - -**Function signature** - -```feel -string length(string: string): number -``` - -**Examples** - -```feel -string length("foo") -// 3 -``` - -## upper case(string) - -Returns the given value with all characters are uppercase. - -**Function signature** - -```feel -upper case(string: string): string -``` - -**Examples** - -```feel -upper case("aBc4") -// "ABC4" -``` - -## lower case(string) - -Returns the given value with all characters are lowercase. - -**Function signature** - -```feel -lower case(string: string): string -``` - -**Examples** - -```feel -lower case("aBc4") -// "abc4" -``` - -## substring before(string, match) - -Returns a substring of the given value that contains all characters before `match`. - -**Function signature** - -```feel -substring before(string: string, match: string): string -``` - -**Examples** - -```feel -substring before("foobar", "bar") -// "foo" -``` - -## substring after(string, match) - -Returns a substring of the given value that contains all characters after `match`. - -**Function signature** - -```feel -substring after(string: string, match: string): string -``` - -**Examples** - -```feel -substring after("foobar", "ob") -// "ar" -``` - -## contains(string, match) - -Returns `true` if the given value contains the substring `match`. Otherwise, returns `false`. - -**Function signature** - -```feel -contains(string: string, match: string): boolean -``` - -**Examples** - -```feel -contains("foobar", "of") -// false -``` - -## starts with(string, match) - -Returns `true` if the given value starts with the substring `match`. Otherwise, returns `false`. - -**Function signature** - -```feel -starts with(string: string, match: string): boolean -``` - -**Examples** - -```feel -starts with("foobar", "fo") -// true -``` - -## ends with(string, match) - -Returns `true` if the given value ends with the substring `match`. Otherwise, returns `false`. - -**Function signature** - -```feel -ends with(string: string, match: string): boolean -``` - -**Examples** - -```feel -ends with("foobar", "r") -// true -``` - -## matches(input, pattern) - -Returns `true` if the given value matches the `pattern`. Otherwise, returns `false`. - -**Function signature** - -```feel -matches(input: string, pattern: string): boolean -``` - -The `pattern` is a string that contains a regular expression. - -**Examples** - -```feel -matches("foobar", "^fo*bar") -// true -``` - -## matches(input, pattern, flags) - -Returns `true` if the given value matches the `pattern`. Otherwise, returns `false`. - -**Function signature** - -```feel -matches(input: string, pattern: string, flags: string): boolean -``` - -The `pattern` is a string that contains a regular expression. - -The `flags` can contain one or more of the following characters: - -- `s` (dot-all) -- `m` (multi-line) -- `i` (case insensitive) -- `x` (comments) - -**Examples** - -```feel -matches("FooBar", "foo", "i") -// true -``` - -## replace(input, pattern, replacement) - -Returns the resulting string after replacing all occurrences of `pattern` with `replacement`. - -**Function signature** - -```feel -replace(input: string, pattern: string, replacement: string): string -``` - -The `pattern` is a string that contains a regular expression. - -The `replacement` can access the match groups by using `$` and the number of the group, for example, -`$1` to access the first group. - -**Examples** - -```feel -replace("abcd", "(ab)|(a)", "[1=$1][2=$2]") -// "[1=ab][2=]cd" - -replace("0123456789", "(\d{3})(\d{3})(\d{4})", "($1) $2-$3") -// "(012) 345-6789" -``` - -## replace(input, pattern, replacement, flags) - -Returns the resulting string after replacing all occurrences of `pattern` with `replacement`. - -**Function signature** - -```feel -replace(input: string, pattern: string, replacement: string, flags: string): string -``` - -The `pattern` is a string that contains a regular expression. - -The `replacement` can access the match groups by using `$` and the number of the group, for example, -`$1` to access the first group. - -The `flags` can contain one or more of the following characters: - -- `s` (dot-all) -- `m` (multi-line) -- `i` (case insensitive) -- `x` (comments) - -**Examples** - -```feel -replace("How do you feel?", "Feel", "FEEL", "i") -// "How do you FEEL?" -``` - -## split(string, delimiter) - -Splits the given value into a list of substrings, breaking at each occurrence of the `delimiter` pattern. - -**Function signature** - -```feel -split(string: string, delimiter: string): list -``` - -The `delimiter` is a string that contains a regular expression. - -**Examples** - -```feel -split("John Doe", "\s" ) -// ["John", "Doe"] - -split("a;b;c;;", ";") -// ["a", "b", "c", "", ""] -``` - -## extract(string, pattern) - - - -Returns all matches of the pattern in the given string. Returns an empty list if the pattern doesn't -match. - -**Function signature** - -```feel -extract(string: string, pattern: string): list -``` - -The `pattern` is a string that contains a regular expression. - -**Examples** - -```feel -extract("references are 1234, 1256, 1378", "12[0-9]*") -// ["1234","1256"] -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-temporal.md b/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-temporal.md deleted file mode 100644 index 633b7a3f999..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/builtin-functions/feel-built-in-functions-temporal.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -id: feel-built-in-functions-temporal -title: Temporal functions -description: "This document outlines built-in temporal functions and examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -## now() - -Returns the current date and time including the timezone. - -**Function signature** - -```feel -now(): date and time -``` - -**Examples** - -```feel -now() -// date and time("2020-07-31T14:27:30@Europe/Berlin") -``` - -## today() - -Returns the current date. - -**Function signature** - -```feel -today(): date -``` - -**Examples** - -```feel -today() -// date("2020-07-31") -``` - -## day of week(date) - -Returns the day of the week according to the Gregorian calendar. Note that it always returns the English name of the day. - -**Function signature** - -```feel -day of week(date: date): string -``` - -```feel -day of week(date: date and time): string -``` - -**Examples** - -```feel -day of week(date("2019-09-17")) -// "Tuesday" -``` - -## day of year(date) - -Returns the Gregorian number of the day within the year. - -**Function signature** - -```feel -day of year(date: date): number -``` - -```feel -day of year(date: date and time): number -``` - -**Examples** - -```feel -day of year(date("2019-09-17")) -// 260 -``` - -## week of year(date) - -Returns the Gregorian number of the week within the year, according to ISO 8601. - -**Function signature** - -```feel -week of year(date: date): number -``` - -```feel -week of year(date: date and time): number -``` - -**Examples** - -```feel -week of year(date("2019-09-17")) -// 38 -``` - -## month of year(date) - -Returns the month of the week according to the Gregorian calendar. Note that it always returns the English name of the month. - -**Function signature** - -```feel -month of year(date: date): string -``` - -```feel -month of year(date: date and time): string -``` - -**Examples** - -```feel -month of year(date("2019-09-17")) -// "September" -``` - -## abs(n) - -Returns the absolute value of a given duration. - -**Function signature** - -```feel -abs(n: days and time duration): days and time duration -``` - -```feel -abs(n: years and months duration): years and months duration -``` - -**Examples** - -```feel -abs(duration("-PT5H")) -// "duration("PT5H")" - -abs(duration("PT5H")) -// "duration("PT5H")" - -abs(duration("-P2M")) -// duration("P2M") -``` - -## last day of month(date) - - - -Takes the month of the given date or date-time value and returns the last day of this month. - -**Function signature** - -```feel -last day of month(date: date): date -``` - -```feel -last day of month(date: date and time): date -``` - -**Examples** - -```feel -last day of month(date("2022-10-01")) -// date("2022-10-31")) - -last day of month(date and time("2022-10-16T12:00:00")) -// date("2022-10-31")) -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-boolean-expressions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-boolean-expressions.md deleted file mode 100644 index 66fcc9c0bd1..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-boolean-expressions.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -id: feel-boolean-expressions -title: Boolean expressions -description: "This document outlines boolean expressions and examples." ---- - -### Literal - -Creates a new boolean value. - -```feel -true - -false -``` - -### Comparison - -Two values of the same type can be compared using the following operators: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OperatorDescriptionSupported types
    =equal toany
    !=not equal toany
    <less thannumber, date, time, date-time, duration
    <=less than or equal tonumber, date, time, date-time, duration
    >greater thannumber, date, time, date-time, duration
    >=greater than or equalnumber, date, time, date-time, duration
    between [x] and [y]same as (_ >= x and _ <= y)number, date, time, date-time, duration
    - -```feel -5 = 5 -// true - -5 != 5 -// false - -date("2020-04-05") < date("2020-04-06") -// true - -time("08:00:00") <= time("08:00:00") -// true - -duration("P1D") > duration("P5D") -// false - -duration("P1Y") >= duration("P6M") -// true - -5 between 3 and 7 -// true - -date("2020-04-06") between date("2020-04-05") and date("2020-04-09") -// true -``` - -:::caution Be Careful! -The equals operator has only **one** equals sign (e.g. `x = y`). In other languages, the operator has two equals signs (e.g. `x == y`). -::: - -### Null check - -Any value or variable can be compared with `null` to check if it is equal to `null`, or if it exists. - -Comparing `null` to a value different from `null` results in `false`. It returns `true` if the -value is `null` or the variable doesn't exist. - -Comparing a context entry with `null` results in `true` if the value of the entry is `null` or if -the context doesn't contain an entry with this key. - -```feel -null = null -// true - -"foo" = null -// false - -{x: null}.x = null -// true - -{}.y = null -// true -``` - -:::tip - -The built-in -function [is defined()](/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-boolean.md#is-definedvalue) can be -used to differentiate between a value that is `null` and a variable or context entry that doesn't -exist. - -```feel -is defined(null) -// true - -is defined({x: null}.x) -// true - -is defined({}.y) -// false -``` - -::: - -### Conjunction/and - -Combines multiple boolean values following the ternary logic. - -- The result is `true` if all values are `true`. -- The result is `false` if one value is `false`. -- Otherwise, the result is `null` (i.e. if a value is not a boolean.) - -```feel -true and true -// true - -true and false -// false - -true and null -// null - -true and "otherwise" -// null - -false and null -// false - -false and "otherwise" -// false -``` - -### Disjunction/or - -Combines multiple boolean values following the ternary logic. - -- The result is `true` if at least one value is `true`. -- The result is `false` if all values are `false`. -- Otherwise, the result is `null` (i.e. if a value is not a boolean.) - -```feel -true or false -// true - -false or false -// false - -true or null -// true - -true or "otherwise" -// true - -false or null -// null - -false or "otherwise" -// null -``` - -### Instance of - -Checks if the value is of the given type. Available type names: - -- `boolean` -- `number` -- `string` -- `date` -- `time` -- `date and time` -- `days and time duration` -- `years and months duration` -- `list` -- `context` -- `function` -- `Any` - -Use the type `Any` to check if the value is not `null`. - -```feel -1 instance of number -// true - -1 instance of string -// false - -1 instance of Any -// true - -null instance of Any -// false - -duration("P3M") instance of years and months duration -// true - -duration("PT4H") instance of days and time duration -// true -``` - -### Unary-tests/in - -Evaluates a [unary-tests](/docs/components/modeler/feel/language-guide/feel-unary-tests) with the given value. The keyword `in` separates the value from the unary-tests. - -```feel -5 in (3..7) -// true - -date("2021-06-04") in [date("2021-05-01")..date("2021-05-31")] -// false - -5 in (3,5,7) -// true - -5 in [2,4,6,8] -// false -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-context-expressions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-context-expressions.md deleted file mode 100644 index b0a010c469e..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-context-expressions.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -id: feel-context-expressions -title: Context expressions -description: "This document outlines context expressions and examples." ---- - -### Literal - -Creates a new context with the given entries. Each entry has a key and a value. The key is either a -name or a string. The value can be any type. - -See the [naming conventions](./feel-variables.md#variable-names) for valid key names. - -```feel -{ - a: 1, - b: 2 -} -// {a:1, b:2} - -{ - "a": 1, - "b": 2 -} -// {a:1, b:2} -``` - -Inside the context, the previous entries can be accessed. - -```feel -{ - a: 2, - b: a * 2 -} -// {a:2, b:4} -``` - -A context value can embed other context values. - -```feel -{ - a: 1, - b: { - c: 2 - } -} -// {a:1, b:{c:2}} -``` - -### Get entry/path - -```feel -a.b -``` - -Accesses the entry with the key `b` of the context `a`. The path is separated by `.`. - -If the value of the entry `b` is also a context, the path can be chained (i.e. `a.b.c`). - -```feel -{ - a: 2 -}.a -// 2 - -{ - a: { - b: 3 - } -}.a -// {b: 3} - -{ - a: { - b: 3 - } -}.a.b -// 3 -``` - -### Filter - -Filters a list of context elements. It is a special kind of the [filter expression](/docs/components/modeler/feel/language-guide/feel-list-expressions#filter) for lists. - -While filtering, the entries of the current context element can be accessed by their key. - -```feel -[ - { - a: "p1", - b: 5 - }, - { - a: "p2", - b: 10 - } -][b > 7] -// [{a: "p2", b: 10}] -``` - -### Projection - -Extracts the entries of a list of context elements by a given key (i.e. a projection). It returns a -list containing the values of the context elements for the given key. - -```feel -[ - { - a: "p1", - b: 5 - }, - { - a: "p2", - b: 10 - } -].a -// ["p1", "p2"] -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-control-flow.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-control-flow.md deleted file mode 100644 index d52534163b2..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-control-flow.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: feel-control-flow -title: Control flow -description: "This document outlines control flow and examples." ---- - -### If conditions - -```feel -if c then a else b -``` - -Executes the expression `a` if the condition `c` evaluates to `true`. Otherwise, it executes the -expression `b`. - -```feel -if 5 < 10 then "low" else "high" -// "low" - -if 12 < 10 then "low" else "high" -// "high" -``` - -:::info good to know -If the condition `c` doesn't evaluate to a boolean value (e.g. `null`), it -executes the expression `b`. - -```feel -if null then "low" else "high" -// "high" -``` - -::: - -### For loops - -```feel -for a in b return c -``` - -Iterates over the list `b` and executes the expression `c` for each element in the list. The current -element is assigned to the variable `a`. The result of the expression is a list. - -If multiple lists are passed to the `for` loop then it iterates over the cross-product of the -elements in the given lists. - -```feel -for x in [1,2,3] return x * 2 -// [2,4,6] - -for x in [1,2], y in [3,4] return x * y -// [3, 4, 6, 8] -``` - -While iterating over the list, the previous elements are assigned to the variable `partial`. - -```feel -for i in 1..10 return if (i <= 2) then 1 else partial[-1] + partial[-2] -// [1, 1, 2, 3, 5, 8, 13, 21, 34, 55] -``` - -Instead of a list, the `for` loop can also iterate over a given range. - -```feel -for x in 0..8 return 2 ** x -// [1, 2, 4, 8, 16, 32, 64, 128, 256] - -for x in 3..1 return 2 * x -// [6,4,2] -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-data-types.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-data-types.md deleted file mode 100644 index 117a9535503..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-data-types.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -id: feel-data-types -title: Data types -description: "This document outlines data types, including null, number, string, boolean, and more." ---- - -FEEL defines the following types: - -### Null - -Nothing, null, or nil (i.e. the value is not present). - -- Java Type: `null` - -```feel -null -``` - -### Number - -A whole or floating point number. The number can be negative. - -- not-a-number (NaN), positive/negative infinity are represented as `null` -- Java Type: `java.math.BigDecimal` - -```feel -1 - -2.3 - -.4 - --5 -``` - -### String - -A sequence of characters enclosed in double quotes `"`. The sequence can also contain escaped characters starting with `\` (e.g. `\'`, `\"`, `\\`, `\n`, `\r`, `\t`, unicode like `\u269D` or `\U101EF`). - -- Java Type: `java.lang.String` - -```feel -"valid" -``` - -### Boolean - -A boolean value. It is either true or false. - -- Java Type: `java.lang.Boolean` - -```feel -true - -false -``` - -### Date - -A date value without a time component. - -- Format: `yyyy-MM-dd`. -- Java Type: `java.time.LocalDate` - -```feel -date("2017-03-10") - -@"2017-03-10" -``` - -### Time - -A local or zoned time. The time can have an offset or time zone id. - -- Format: `HH:mm:ss` / `HH:mm:ss+/-HH:mm` / `HH:mm:ss@ZoneId` -- Java Type: `java.time.LocalTime` / `java.time.OffsetTime` - -```feel -time("11:45:30") -time("13:30") -time("11:45:30+02:00") -time("10:31:10@Europe/Paris") - -@"11:45:30" -@"13:30" -@"11:45:30+02:00" -@"10:31:10@Europe/Paris" -``` - -### Date-time - -A date with a local or zoned time component. The time can have an offset or time zone id. - -- Format: `yyyy-MM-dd'T'HH:mm:ss` / `yyyy-MM-dd'T'HH:mm:ss+/-HH:mm` / `yyyy-MM-dd'T'HH:mm:ss@ZoneId` -- Java Type: `java.time.LocalDateTime` / `java.time.DateTime` - -```feel -date and time("2015-09-18T10:31:10") -date and time("2015-09-18T10:31:10+01:00") -date and time("2015-09-18T10:31:10@Europe/Paris") - -@"2015-09-18T10:31:10" -@"2015-09-18T10:31:10+01:00" -@"2015-09-18T10:31:10@Europe/Paris" -``` - -### Days-time-duration - -A duration based on seconds. It can contain days, hours, minutes, and seconds. - -- Format: `PxDTxHxMxS` -- Java Type: `java.time.Duration` - -```feel -duration("P4D") -duration("PT2H") -duration("PT30M") -duration("P1DT6H") - -@"P4D" -@"PT2H" -@"PT30M" -@"P1DT6H" -``` - -### Years-months-duration - -A duration based on the calendar. It can contain years and months. - -- Format: `PxYxM` -- Java Type: `java.time.Period` - -```feel -duration("P2Y") -duration("P6M") -duration("P1Y6M") - -@"P2Y" -@"P6M" -@"P1Y6M" -``` - -### List - -A list of elements. The elements can be of any type. The list can be empty. - -- Java Type: `java.util.List` - -```feel -[] -[1,2,3] -["a","b"] - -[["list"], "of", [["lists"]]] -``` - -### Context - -A list of entries. Each entry has a key and a value. The key is either a name or a string. The value -can be any type. The context can be empty. - -- Java Type: `java.util.Map` - -```feel -{} - -{a:1} -{b: 2, c: "valid"} -{nested: {d: 3}} - -{"a": 1} -{"b": 2, "c": "valid"} -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-expressions-introduction.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-expressions-introduction.md deleted file mode 100644 index 132bb65fc5a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-expressions-introduction.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: feel-expressions-introduction -title: Introduction -description: "FEEL expressions are powerful and can be used for various cases." ---- - -FEEL expressions are powerful and can be used for various cases. - -This section is split into expressions based on their operational data type: - -- [Boolean](./feel-boolean-expressions.md) -- [String](./feel-string-expressions.md) -- [Numeric](./feel-numeric-expressions.md) -- [List](./feel-list-expressions.md) -- [Context](./feel-context-expressions.md) -- [Temporal](./feel-temporal-expressions.md) - -The following sections cover more general areas that are not restricted to one data type: - -- [Variables](./feel-variables.md) -- [Control Flow](./feel-control-flow.md) -- [Functions](./feel-functions.md) - -### Comments - -An expression can contain comments to explain it and give it more context. This can be done using -Java-style comments: `//` to the end of line, or `/*.... */` for blocks. - -```feel -// returns the last item -[1,2,3,4][-1] - -/* returns the last item */ -[1,2,3,4][-1] - -/* - * returns the last item - */ -[1,2,3,4][-1] -``` - -### Parentheses - -Parentheses `( .. )` can be used in expressions to separate different parts of an -expression or to influence the precedence of the operators. - -```feel -(5 - 3) * (4 / 2) - -x < 5 and (y > 10 or z > 20) - -if (5 < 10) then "low" else "high" -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-functions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-functions.md deleted file mode 100644 index 0b7115bb7b8..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-functions.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: feel-functions -title: Functions -description: "This document outlines various functions and examples." ---- - -### Invocation - -Invokes a built-in function (e.g. [contains()](/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-string.md#containsstring-match)) or user-defined -function by its name. The arguments of the function can be passed positional or named. - -- Positional: Only the values, in the same order as defined by the function (e.g. `f(1,2)`). -- Named: The values with the argument name as prefix, in any order (e.g. `f(a: 1, b: 2)`). - -```feel -contains("me@camunda.com", ".com") -// true - -contains(string: "me@camunda.com", match: ".de") -// false -``` - -:::info GOOD TO KNOW - -If an expression invokes a built-in function but the argument types don't match the function -signature, the invocation returns `null`. - -::: - -### User-defined - -```feel -function(a,b) e -``` - -Defines a function with a list of argument names, and an expression (i.e. the function body). When -the function is invoked, it assigns the values to the arguments and evaluates the expression. - -Within an expression, a function can be defined and invoked in a context. - -```feel -{ - age: function(birthday) (today() - birthday).years -} -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-list-expressions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-list-expressions.md deleted file mode 100644 index b3b4b86ee7b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-list-expressions.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -id: feel-list-expressions -title: List expressions -description: "This document outlines list expressions and examples." ---- - -### Literal - -Creates a new list of the given elements. The elements can be of any type. - -```feel -[1,2,3,4] -``` - -A list value can embed other list values. - -```feel -[[1,2], [3,4], [5,6]] -``` - -### Get element - -```feel -a[i] -``` - -Accesses an element of the list `a` at index `i`. The index starts at `1`. - -If the index is out of the range of the list, it returns `null`. - -```feel -[1,2,3,4][1] -// 1 - -[1,2,3,4][2] -// 2 - -[1,2,3,4][4] -// 4 - -[1,2,3,4][5] -// null - -[1,2,3,4][0] -// null -``` - -If the index is negative, it starts counting the elements from the end of the list. The last -element of the list is at index `-1`. - -```feel -[1,2,3,4][-1] -// 4 - -[1,2,3,4][-2] -// 3 - -[1,2,3,4][-5] -// null -``` - -:::caution be careful! -The index of a list starts at `1`. In other languages, the index starts at `0`. -::: - -### Filter - -```feel -a[c] -``` - -Filters the list `a` by the condition `c`. The result of the expression is a list that contains all elements where the condition `c` evaluates to `true`. - -While filtering, the current element is assigned to the variable `item`. - -```feel -[1,2,3,4][item > 2] -// [3,4] - -[1,2,3,4][item > 10] -// [] - -[1,2,3,4][even(item)] -// [2,4] -``` - -### Some - -```feel -some a in b satisfies c -``` - -Iterates over the list `b` and evaluate the condition `c` for each element in the list. The current -element is assigned to the variable `a`. - -It returns `true` if `c` evaluates to `true` for **one or more** elements of `b`. Otherwise, it -returns `false`. - -```feel -some x in [1,2,3] satisfies x > 2 -// true - -some x in [1,2,3] satisfies x > 5 -// false - -some x in [1,2,3] satisfies even(x) -// true - -some x in [1,2], y in [2,3] satisfies x < y -// true -``` - -### Every - -Iterates over the list `b` and evaluate the condition `c` for each element in the list. The current -element is assigned to the variable `a`. - -It returns `true` if `c` evaluates to `true` for **all** elements of `b`. Otherwise, it -returns `false`. - -```feel -every x in [1,2,3] satisfies x >= 1 -// true - -every x in [1,2,3] satisfies x >= 2 -// false - -every x in [1,2,3] satisfies even(x) -// false - -every x in [1,2], y in [2,3] satisfies x < y -// false -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-numeric-expressions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-numeric-expressions.md deleted file mode 100644 index 2469d4df980..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-numeric-expressions.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: feel-numeric-expressions -title: Numeric expressions -description: "This document outlines numeric expressions and examples." ---- - -### Literal - -Creates a new numeric value. Leading zeros are valid. - -```feel -1 - -0.5 -.5 - --2 - -01 - --0002 -``` - -### Addition - -```feel -2 + 3 -// 5 -``` - -### Subtraction - -```feel -5 - 3 -// 2 -``` - -### Multiplication - -```feel -5 * 3 -// 15 -``` - -### Division - -```feel -6 / 2 -// 3 -``` - -### Exponentiation - -```feel -2 ** 3 -// 8 -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-string-expressions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-string-expressions.md deleted file mode 100644 index f1af04a1a76..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-string-expressions.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: feel-string-expressions -title: String expressions -description: "This document outlines string expressions and examples." ---- - -### Literal - -Creates a new string value. - -```feel -"valid" -``` - -### Addition/concatenation - -An addition concatenates the strings. The result is a string containing the characters of both strings. - -```feel -"foo" + "bar" -// "foobar" -``` - -:::tip - -The concatenation is only available for string values. For other types, you can use -the [string()](/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-conversion.md#stringfrom) function to convert -the value into a string first. - -```feel -"order-" + string(123) -``` - -::: diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-temporal-expressions.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-temporal-expressions.md deleted file mode 100644 index 9d90e4bdbf9..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-temporal-expressions.md +++ /dev/null @@ -1,414 +0,0 @@ ---- -id: feel-temporal-expressions -title: Temporal expressions -description: "This document outlines temporal expressions and examples." ---- - -### Literal - -Creates a new temporal value. A value can be written in one of the following ways: - -- using a temporal function (e.g. `date("2020-04-06")`) -- using the `@` - notation (e.g. `@"2020-04-06"`) - -```feel -date("2020-04-06") -@"2020-04-06" - -time("08:00:00") -time("08:00:00+02:00") -time("08:00:00@Europe/Berlin") -@"08:00:00" -@"08:00:00+02:00" -@"08:00:00@Europe/Berlin" - -date and time("2020-04-06T08:00:00") -date and time("2020-04-06T08:00:00+02:00") -date and time("2020-04-06T08:00:00@Europe/Berlin") -@"2020-04-06T08:00:00" -@"2020-04-06T08:00:00+02:00" -@"2020-04-06T08:00:00@Europe/Berlin" - -duration("P5D") -duration("PT6H") -@"P5D" -@"PT6H" - -duration("P1Y6M") -duration("P3M") -@"P1Y6M" -@"P3M" -``` - -The value is `null` if a date or date-time literal doesn't represent a valid calendar date. For example, `@"2024-06-31"` is invalid because June has only 30 days. - -### Addition - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    First argumentSecond argumentResult
    datedurationdate
    timedays-time-durationtime
    date-timedurationdate-time
    durationdatedate
    durationtimetime
    durationdate-timedate-time
    durationdurationduration
    - -```feel -date("2020-04-06") + duration("P1D") -// date("2020-04-07") - -time("08:00:00") + duration("PT1H") -// time("09:00:00") - -date and time("2020-04-06T08:00:00") + duration("P7D") -// date and time("2020-04-13T08:00:00") - -duration("P2D") + duration("P5D") -// duration("P7D") -``` - -### Subtraction - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    First argumentSecond argumentResult
    datedatedays-time-duration
    datedurationdate
    timetimedays-time-duration
    timedays-time-durationtime
    date-timedate-timedays-time-duration
    date-timedurationdate-time
    days-time-durationdays-time-durationdays-time-duration
    years-months-durationyears-months-durationyears-months-duration
    - -```feel -date("2020-04-06") - date("2020-04-01") -// duration("P5D") - -date("2020-04-06") - duration("P5D") -// date("2020-04-01") - -time("08:00:00") - time("06:00:00") -// duration("PT2H") - -time("08:00:00") - duration("PT2H") -// time("06:00:00") - -duration("P7D") - duration("P2D") -// duration("P5D") - -duration("P1Y") - duration("P3M") -// duration("P9M") -``` - -### Multiplication - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    First argumentSecond argumentResult
    days-time-durationnumberdays-time-duration
    numberdays-time-durationdays-time-duration
    years-months-durationnumberyears-months-duration
    numberyears-months-durationyears-months-duration
    - -```feel -duration("P1D") * 5 -// duration("P5D") - -duration("P1M") * 6 -// duration("P6M") -``` - -### Division - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    First argumentSecond argumentResult
    days-time-durationdays-time-durationnumber
    days-time-durationnumberdays-time-duration
    years-months-durationyears-months-durationnumber
    years-months-durationnumberyears-months-duration
    - -```feel -duration("P5D") / duration("P1D") -// 5 - -duration("P5D") / 5 -// duration("P1D") - -duration("P1Y") / duration("P1M") -// 12 - -duration("P1Y") / 12 -// duration("P1M") -``` - -### Properties - -A temporal value has multiple properties for its components. The following properties are available -for the given types: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    PropertyAvailable forDescription
    yeardate, date-timethe year as number
    monthdate, date-timethe month as number [1..12], where 1 is January
    daydate, date-timethe day of the month as number [1..31]
    weekdaydate, date-timethe day of the week as number [1..7], where 1 is Monday
    hourtime, date-timethe hour of the day as number [0..23]
    minutetime, date-timethe minute of the hour as number [0..59]
    secondtime, date-timethe second of the minute as number [0..59]
    time offsettime, date-timethe duration offset corresponding to the timezone or null
    timezonetime, date-timethe timezone identifier or null
    daysdays-time-durationthe normalized days component as number
    hoursdays-time-durationthe normalized hours component as number [0..23]
    minutesdays-time-durationthe normalized minutes component as number [0..59]
    secondsdays-time-durationthe normalized seconds component as number [0..59]
    yearsyears-months-durationthe normalized years component as number
    monthsyears-months-durationthe normalized months component as number [0..11]
    - -```feel -date("2020-04-06").year -// 2020 - -date("2020-04-06").month -// 4 - -date("2020-04-06").weekday -// 1 - -time("08:00:00").hour -// 8 - -date and time("2020-04-06T08:00:00+02:00").time offset -// duration("PT2H") - -date and time("2020-04-06T08:00:00@Europe/Berlin").timezone -// "Europe/Berlin" - -duration("PT2H30M").hours -// 2 - -duration("PT2H30M").minutes -// 30 - -duration("P6M").months -// 6 -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-unary-tests.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-unary-tests.md deleted file mode 100644 index b271c1666f4..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-unary-tests.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -id: feel-unary-tests -title: Unary-tests -description: "This document outlines unary-tests and examples." ---- - -A unary-tests expression is a special kind of boolean expression. Unary-tests expressions should be used for the input -entries of a decision table (i.e. the conditions of a rule). - -A unary-tests expression returns `true` if one of the following conditions is fulfilled: - -- The expression evaluates to `true` when the input value is applied to the unary operators. -- The expression evaluates to `true` when the input value is assigned to the special variable `?`. -- The expression evaluates to a value, and the input value is equal to that value. -- The expression evaluates to a list, and the input value is equal to at least one of the values. -- The expression is equal to `-` (a dash). - -### Comparison - -Compares the input value with a given value. Both values must be of the same type. - -The input value is passed implicitly as the first argument of the operator. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OperatorDescriptionSupported types
    (none)equal toany
    <less thannumber, date, time, date-time, duration
    <=less than or equal tonumber, date, time, date-time, duration
    >greater thannumber, date, time, date-time, duration
    >=greater than or equalnumber, date, time, date-time, duration
    - -```feel -"valid" - -< 10 - -<= date("2020-04-06") - -> time("08:00:00") - ->= duration("P5D") -``` - -### Interval - -Checks if the input value is within a given interval between `x` and `y`. - -An interval has two boundaries that can be open `(x..y)` / `]x..y[` or closed `[x..y]`. If a -boundary is closed, it includes the given value (i.e. less/greater than or equal). Otherwise, it -excludes the value (i.e. less/greater than). - -The input value is passed implicitly to the operator. - -```feel -(2..5) -// input > 2 and input < 5 - -]2..5[ -// input > 2 and input < 5 - -[2..5] -// input >= 2 and input <= 5 - -(2..5] -// input > 2 and input <= 5 -``` - -### Disjunction/or - -Combines multiple unary-test expressions following the ternary logic. - -- Returns `true` if at least one unary-test evaluates to `true`. -- Otherwise, it returns `false`. - -```feel -2, 3, 4 -// input = 2 or input = 3 or input = 4 - -< 10, > 50 -// input < 10 or input > 50 -``` - -### Negation/not - -Negates a given unary-test expression. The expression can be a comparison, an interval, or a -disjunction. - -It returns `true` if the given unary-test evaluates to `false`. - -```feel -not("valid") -// input != "valid" - -not(2, 3) -// input != 2 and input != 3 -``` - -### Expressions - -When a unary operator is not enough to express the condition, any expression that returns a boolean value can be used, -such as [invoking a function](/components/modeler/feel/language-guide/feel-functions.md#invocation). - -In the expression, the input value can be accessed by the special variable `?`. - -```feel -contains(?, "good") -// checks if the input value (string) contains "good" - -ends with(?, "@camunda.com") -// checks if the input value (string) ends with "@camunda.com" -``` diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-variables.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-variables.md deleted file mode 100644 index 8e65249b2c1..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/feel-variables.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: feel-variables -title: Variables -description: "This document outlines variables and examples." ---- - -import MarkerCamundaExtension from "@site/src/mdx/MarkerCamundaExtension"; - -### Access variables - -Access the value of a variable by its variable name. - -```feel -a + b -``` - -If the value of the variable is a context, a [context entry can be accessed](/docs/components/modeler/feel/language-guide/feel-context-expressions#get-entrypath) by its key. - -```feel -a.b -``` - -:::tip - -Use a [null-check](/docs/components/modeler/feel/language-guide/feel-boolean-expressions#null-check) if the variable can be `null` or is optional. - -```feel -a != null and a.b > 10 -``` - -::: - -### Variable names - -The name of a variable can be any alphanumeric string including the `_` symbol. For a combination of -words, it's recommended to use the `camelCase` or the `snake_case` format. The `kebab-case` format -is not allowed because it contains the operator `-`. - -When accessing a variable in an expression, keep in mind the variable name is case-sensitive. - -Restrictions of a variable name: - -- It may not start with a _number_ (e.g. `1stChoice` is not allowed; you can - use `firstChoice` instead). -- It may not contain _whitespaces_ (e.g. `order number` is not allowed; you can use `orderNumber` - instead). -- It may not contain an _operator_ (e.g. `+`, `-`, `*`, `/`, `=`, `>`, `<`, `?`, `.`). -- It may not be a _literal_ (e.g. `null`, `true`, `false`) or a _keyword_ (e.g. `function`, `if` - , `then`, `else`, `for`, `return`, `between`, `instance`, `of`, `not`, `in`, `and`, `or`, `some`, - `every`, `satisfies`). - -### Escape variable names - - - -If a variable name or a context key contains any special character (e.g. whitespace, dash, etc.) -then the name can be wrapped into single backquotes/backticks (e.g. `` `foo bar` ``). - -```feel -`first name` - -`tracking-id` - -order.`total price` -``` - -:::tip -Use the [`get value()`](/docs/components/modeler/feel/builtin-functions/feel-built-in-functions-context.md#get-valuecontext-key) function -to retrieve the context value of an arbitrary key. - -```feel -get value(order, "total price") -``` - -::: diff --git a/versioned_docs/version-8.2/components/modeler/feel/language-guide/language-guide-introduction.md b/versioned_docs/version-8.2/components/modeler/feel/language-guide/language-guide-introduction.md deleted file mode 100644 index 8dbd6806dbc..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/language-guide/language-guide-introduction.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: language-guide-introduction -title: Introduction ---- - -Everything begins with [data types](./feel-data-types.md). Read on this subject first to have a better -understanding of the other content like [expressions](./feel-expressions-introduction.md) and [built-in functions](./feel-functions.md). - -If you're writing an expression for an input entry of a decision table, visit our documentation on [unary-tests](./feel-unary-tests.md). - -Otherwise, have a look at the more general section -about [expressions](./feel-expressions-introduction.md). - -Looking for a function to call? Visit our documentation on [built-in functions](../builtin-functions/feel-built-in-functions-introduction.md). diff --git a/versioned_docs/version-8.2/components/modeler/feel/sidebar-schema.js b/versioned_docs/version-8.2/components/modeler/feel/sidebar-schema.js deleted file mode 100644 index e9c1d7d927c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/sidebar-schema.js +++ /dev/null @@ -1,34 +0,0 @@ -module.exports = { - "FEEL expressions": [ - "components/modeler/feel/what-is-feel", - "components/modeler/feel/language-guide/feel-data-types", - "components/modeler/feel/language-guide/feel-unary-tests", - { - Expressions: [ - "components/modeler/feel/language-guide/feel-expressions-introduction", - "components/modeler/feel/language-guide/feel-boolean-expressions", - "components/modeler/feel/language-guide/feel-string-expressions", - "components/modeler/feel/language-guide/feel-numeric-expressions", - "components/modeler/feel/language-guide/feel-list-expressions", - "components/modeler/feel/language-guide/feel-context-expressions", - "components/modeler/feel/language-guide/feel-temporal-expressions", - "components/modeler/feel/language-guide/feel-variables", - "components/modeler/feel/language-guide/feel-control-flow", - "components/modeler/feel/language-guide/feel-functions", - ], - }, - { - "Built-in Functions": [ - "components/modeler/feel/builtin-functions/feel-built-in-functions-introduction", - "components/modeler/feel/builtin-functions/feel-built-in-functions-conversion", - "components/modeler/feel/builtin-functions/feel-built-in-functions-boolean", - "components/modeler/feel/builtin-functions/feel-built-in-functions-string", - "components/modeler/feel/builtin-functions/feel-built-in-functions-numeric", - "components/modeler/feel/builtin-functions/feel-built-in-functions-list", - "components/modeler/feel/builtin-functions/feel-built-in-functions-context", - "components/modeler/feel/builtin-functions/feel-built-in-functions-temporal", - "components/modeler/feel/builtin-functions/feel-built-in-functions-range", - ], - }, - ], -}; diff --git a/versioned_docs/version-8.2/components/modeler/feel/what-is-feel.md b/versioned_docs/version-8.2/components/modeler/feel/what-is-feel.md deleted file mode 100644 index 19156a68fca..00000000000 --- a/versioned_docs/version-8.2/components/modeler/feel/what-is-feel.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: what-is-feel -title: What is FEEL? -description: "FEEL is a part of DMN specification of the Object Management Group." ---- - -FEEL (Friendly Enough Expression Language) is a part of -the [DMN specification](http://www.omg.org/spec/DMN/) of the Object Management Group (OMG). It is designed to write expressions for decision tables and literal expressions in a way that is easily understood by business professionals and developers. - -## Unary-tests vs. expressions - -FEEL has two types of expressions for different use cases: - -### Unary-tests - -A [unary-tests expression](./language-guide/feel-unary-tests.md) is a special kind of boolean expression. It should be used for the input -entries of a decision table (i.e. the conditions of a rule). - -```feel -< 7 -// checks if the input value is less than 7 - -not(2,4) -// checks if the input value is neither 2 nor 4 - -[date("2015-09-17")..date("2015-09-19")] -// checks if the input value is between '2015-09-17' and '2015-09-19' - -<= duration("P1D") -// checks if the input value is less than or equal to one day -``` - -### Expressions - -[General expressions](./language-guide/feel-expressions-introduction.md) that can return values of different types. They can be used everywhere; for -example, in a decision table as an input expression or as an output entry. - -```feel -applicant.monthly.income * 12 - -if applicant.maritalStatus in ("M","S") then "valid" else "not valid" - -sum( [applicant.monthly.repayments, applicant.monthly.expenses] ) - -sum( credit_history[record_date > date("2011-01-01")].weight ) - -some ch in credit_history satisfies ch.event = "bankruptcy" -``` diff --git a/versioned_docs/version-8.2/components/modeler/forms/assets/checklist-example.png b/versioned_docs/version-8.2/components/modeler/forms/assets/checklist-example.png deleted file mode 100644 index f9da6eee451..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/forms/assets/checklist-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/forms/assets/static-options-example.png b/versioned_docs/version-8.2/components/modeler/forms/assets/static-options-example.png deleted file mode 100644 index b1ebd7094e3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/forms/assets/static-options-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/forms/assets/taglist-example.png b/versioned_docs/version-8.2/components/modeler/forms/assets/taglist-example.png deleted file mode 100644 index 5bc2b012e5b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/forms/assets/taglist-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/forms/camunda-forms-reference.md b/versioned_docs/version-8.2/components/modeler/forms/camunda-forms-reference.md deleted file mode 100644 index 830a960114b..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/camunda-forms-reference.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: camunda-forms-reference -title: What are Camunda Forms? -description: Streamline your human workflows with the help of Camunda Forms ---- - -:::note Support for Camunda Forms -The initial release of Camunda Forms includes a debut minimal feature set, which will be expanded with upcoming versions. The Camunda Forms feature was added with the 7.15.0 release of Camunda 7 and the 4.7.0 release of [Camunda Modeler](../about-modeler.md). Therefore, they can be used within BPMN diagrams running on Camunda 7 version 7.15.0 or later. -::: - -The Camunda Forms feature allows you to design and configure forms. Once configured, they can be connected to a user task or start event to implement a task form in your application. - -While you can incorporate Camunda Forms solely within Camunda 8, you can also utilize Camunda Forms in Camunda 7. After deploying a diagram with an embedded form, Tasklist imports this form schema and uses it to render the form on every task assigned to it. - -To learn more about how Camunda Forms are created in Camunda Modeler and embedded in Camunda Tasklist, visit our guide on [user task forms](../../../guides/utilizing-forms.md). - -Camunda Forms are powered by the open source [bpmn-io form-js library](https://github.com/bpmn-io/form-js). Visit the [open source repository](https://github.com/bpmn-io/form-js) to find out how to render a form using plain JavaScript in a custom application (note that this also requires you to fetch the form from the respective BPMN 2.0 element and provide data as needed to the form.) - -Visit the [form element library](./form-element-library/forms-element-library.md) for an overview of the components supported by Camunda Forms. diff --git a/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-data-binding.md b/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-data-binding.md deleted file mode 100644 index f42ea3a8c13..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-data-binding.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: forms-config-data-binding -title: Data binding -description: How data from the process interacts with your form ---- - -## Binding form fields to process data - -Each **form element** which allows data manipulation has a **Key** attribute, they are known as **form fields**. This attribute is used as an identifier to map data of the respective field (1) during the initial loading and (2) during submission of the form. - -When a form is referenced by a user task or start event and viewed in [Camunda Tasklist](../../../tasklist/introduction-to-tasklist.md), the key will be used to refer to a process variable. This means that the value of the process variable will be used to populate the respective field initially and then mapped back to the process during the submission of the form. diff --git a/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-options.md b/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-options.md deleted file mode 100644 index d98878487f5..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-options.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -id: forms-config-options -title: Options Source -description: How to configure an options source on a form field ---- - -## Configuring form field options sources - -Certain form fields work from a set of pre-populated options that your user will want to select from, such as the [Radio](../form-element-library/forms-element-library-radio.md) or [Select](../form-element-library/forms-element-library-select.md) fields. This source can be configured several ways, as described below. - -### Static configured on the form schema - -The options will be defined directly on your form schema, the only way to modify them in the future will be to change the form definition itself. - -The static options group will appear to allow the configuration of the individual options. - -![Static Options Group Image](../assets/static-options-example.png) - -For each of these options, a unique `value` corresponding to the form submitted data must be provided, as well as a `label` which will be displayed to the user filling in the form. You may configure as many options as you want by using the add (+) button in the group header. - -### Input data driven by process data - -The options are mapped from form variables, similarly to how [Form field data binding](./forms-config-data-binding.md) works. Here, the `Input values key` property within the `Dynamic Options` configuration group is used to set which form variable to use as a source. - -The expected format for the data is an array of options, each defining its label and value in JSON. The below example provides an equivalent configuration to the above statically defined one: - -```json -{ - "languageData": [ - { - "label": "French", - "value": "fr" - }, - { - "label": "English (UK)", - "value": "en-gb" - }, - { - "label": "German", - "value": "de" - } - ] -} -``` - -#### Shorthand definitions - -If the value and label are equal, shorthand formats may be used instead: - -```json -{ - "languageData": [{ "value": "fr" }, { "value": "en-gb" }, { "value": "de" }] -} -``` - -```json -{ - "languageData": ["fr", "en-gb", "de"] -} -``` - -#### Supported types - -The `label` parameter should be a `string`. - -If a label is provided, the `value` parameter can be any non-null type. Otherwise, it is restricted to types `string`, `number`, and `boolean`. diff --git a/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-templating-syntax.md b/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-templating-syntax.md deleted file mode 100644 index 12d4b69968a..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/configuration/forms-config-templating-syntax.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -id: forms-config-templating-syntax -title: Templating syntax -description: Learn about templated properties configuration, which provides dynamic content creation within forms using a templating language called feelers. ---- - -Templated properties configuration allows for dynamic content creation within forms using a templating language called [**feelers**](https://github.com/bpmn-io/feelers). - -## Feelers syntax - -### Variables/inserts - -To insert a variable, use double curly braces `{{variable}}`, and the value of this variable will be inserted. You can use **any valid [FEEL expression](../../feel/language-guide/feel-expressions-introduction.md)** within these double braces. - -``` -Hello {{username}}, you are {{if isAdmin then "an admin" else "a user"}}. -``` - -### Iterating through arrays - -Iterate through arrays using the _loop_ tags. Within the loop, reference each array element with `{{this}}`, or if your array elements are objects, via their properties. To access data outside the scope of the individual items, use the `{{parent}}` accessor. - -**Data** - -```json -{ - "currency": "$", - "items": [ - { - "name": "bananas", - "price": 2.5 - }, - { - "name": "mangos", - "price": 4 - }, - { - "name": "strawberries", - "price": 3 - } - ] -} -``` - -**Template** - -``` -{{#loop items}} -Item name: {{name}} -Item price: {{parent.currency}}{{price}} -{{/loop}} -``` - -### Conditional sections - -Conditionally render a section of your template using the `if` tags. This is a quick way to write out large blocks you may or may not want evaluated based on a condition: - -``` -{{#if user.isCook}} -Ingredients list: -{{#loop ingredients}} -* {{this}} -{{/loop}} -{{/if}} -``` - -## Additional details - -### Nest loops - -If you have an array of users, each with an array of purchases, you may loop over both in a nested manner: - -**Data** - -```json -{ - "users": [ - { - "name": "jane1995", - "purchases": ["mango", "strawberry"] - }, - { - "name": "rob1992", - "purchases": ["pineapple", "guava"] - } - ] -} -``` - -**Template** - -``` -{{#loop users}} -The user '{{name}}' purchased: -{{#loop purchases}} -* {{this}} -{{/loop}} -{{/loop}} -``` - -In this situation, you may need to use the `parent` accessor several times to access data outside the scope. - -### More on the `parent` and `this` accessors - -If the data you are using somehow already makes use of those keywords, there is an alternative syntax which surrounds it with an underscore: `_this_` and `_parent_`. - -**Data** - -```json -{ - "root": "nodes", - "nodes": [ - { - "id": "021321321", - "parent": "228321321" - }, - { - "id": "021321321", - "parent": "228321321" - } - ] -} -``` - -**Template** - -``` -Listing out all node paths: -{{#loop nodes}} -* http://www.myNodeWebsite/{{_parent_.root}}/{{id}} -{{/loop}} -``` - -In the example above, if you are not surrounding the parent accessor with underscores, you access the parent property of the node, which is not what we're looking for. This also applies to the `this` accessor. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-button.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-button.md deleted file mode 100644 index 48dfa89ad0c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-button.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: forms-element-library-button -title: Button -description: A form element to trigger form actions ---- - -A button allowing the user to trigger form actions. - -![Form Button Symbol](/img/form-icons/form-button.svg) - -### Configurable properties - -- **Field label**: Label to be displayed on top of the button. -- **Action**: The button can either trigger a **Submit** or a **Reset** action. - - **Submit**: Submit the form (given there are no validation errors). - - **Reset**: Reset the form, all user inputs will be lost. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the button. -- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-checkbox.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-checkbox.md deleted file mode 100644 index 5f0d197c519..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-checkbox.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: forms-element-library-checkbox -title: Checkbox -description: A form element to read and edit boolean data ---- - -A checkbox allowing the user to read and edit boolean data. - -![Form Checkbox Symbol](/img/form-icons/form-checkbox.svg) - -### Configurable properties - -- **Field label**: Label displayed besides the checkbox. -- **Field description**: Description provided below the checkbox. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Default value**: Provides a default state for the checkbox in case no input data exists for the given key. -- **Disabled**: Disables the checkbox, for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox. -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Checkbox must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). - -### Datatypes - -Checkboxes can be bound to data of the `boolean` type. Any other datatype will be treated as a `false` by default. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-checklist.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-checklist.md deleted file mode 100644 index 881e91ce52e..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-checklist.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: forms-element-library-checklist -title: Checklist -description: A form element to select multiple values from set options ---- - -A set of checkboxes providing data multi-selection for small datasets. - -![Form Checklist Symbol](/img/form-icons/form-checklist.svg) - -### Configurable properties - -- **Field label**: Label displayed on top of the checklist. -- **Field description**: Description provided below the checklist. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Options source**: Checklists can be configured with an options source defining the individual choices your user can make, see [options source docs](../configuration/forms-config-options.md). -- **Disabled**: Disables the checklist, for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checklist. -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Checklist must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). - -### Datatypes - -Checklists can be bound to data of the `any[]` type, although for most practical cases we recommend `string[]` instead. The checklist component correlates the bound data with the values of the different options. - -The data representation of this checklist: - -![Checklist Selection Image](../assets/checklist-example.png) - -Looks like this: - -``` -{ - "mailto": [ - "regional-manager", - "approver" - ], -} -``` diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-datetime.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-datetime.md deleted file mode 100644 index f904cd820ac..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-datetime.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: forms-element-library-datetime -title: Datetime -description: Learn about the datetime form element to read and edit date and time data. ---- - -A component allowing the user to read and edit date and time data. - -![Form Datetime Symbol](/img/form-icons/form-datetime.svg) - -## Configurable properties - -- **Date label**: Label displayed beside the date input field. -- **Time label**: Label displayed beside the time input field. -- **Field description**: Description provided below the checkbox. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Subtype**: Selects the type of the datetime component. This can either be **Date**, **Time**, or **Date & Time**. -- **Use 24h**: Enables 24-hour time format. -- **Disabled**: Disables the checkbox, for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the datetime component. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Time format**: Defines the time data format. This can either be **UTC offset**, **UTC normalized**, or **No timezone**. -- **Time interval**: Defines the steps of time that can be selected in the time input field. -- **Disallow past dates**: Enables the restriction to not allow past dates. -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Datetime component must contain a value. - -## Datatypes - -Datetime components can be bound to data of the `string` type. The format of the string depends on the subtype: - -- **date**: ISO 8601 string of the format `YYYY-MM-DD`. -- **datetime**: ISO 8601 string of the format `YYYY-MM-DDTHH:MM`. Note that leading zeroes must be present in the hour and minutes (e.g., 01:30 not 1:30); this is an ISO 8601 requirement. -- **time**: String of the format `HH:MM`. Leading zeros can be omitted. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-image.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-image.md deleted file mode 100644 index 607b957fa16..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-image.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: forms-element-library-image -title: Image view -description: Learn about the image view form element to display an image. ---- - -An element allowing the user to display images. - -![Form Image Symbol](/img/form-icons/form-image.svg) - -## Configurable properties - -- **Image source**: Specifies the image source via [expression](../../feel/language-guide/feel-expressions-introduction.md) or [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (hyperlink or data URI). -- **Alternative text**: Provides an alternative text to the image in case it cannot be displayed. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the image. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-number.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-number.md deleted file mode 100644 index c15c74eb666..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-number.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: forms-element-library-number -title: Number -description: A form element to read and edit numeric data ---- - -A number field allowing the user to read and edit numeric data. - -![Form Number Symbol](/img/form-icons/form-number.svg) - -### Configurable properties - -- **Field label**: Label displayed on top of the number field. -- **Field description**: Description provided below the number field. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Default value**: Provides a default value for the number field in case no input data exists for the given key. -- **Decimal digits**: Defines the maximum number of digits after the decimal. -- **Increment**: Defines the increment between valid field values. -- **Disabled**: Disables the number field, for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the number. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Serialize to string**: Configures the output format of the datetime value. This enables unlimited precision digits. -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Number field must contain a value. - - **Minimum**: Number field value must be at least `n`. - - **Maximum**: Number field value must be no larger than `n`. -- **Appearance**: Changes the visual appearance of the number field. - - **Prefix**: Adds an appendage before the input. - - **Suffix**: Adds an appendage after the input. - -### Datatypes - -Number can be bound to numeric data, or `strings` which can be parsed to numeric data (as per [JavaScript's tryParse](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/parseInt)), but will always output strictly `integer` data. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-radio.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-radio.md deleted file mode 100644 index 11ac3357955..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-radio.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: forms-element-library-radio -title: Radio -description: A form element to select a value from set options ---- - -A radio button allowing the user to select one of multiple data option for small datasets. - -![Form Radio Symbol](/img/form-icons/form-radio.svg) - -### Configurable properties - -- **Field label**: Label displayed above the radio component. -- **Field description**: Description provided below the radio component. -- **Key**: Binds the radio component to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Default value**: Provides a default selection in case no input data exists for the given key. Only available for _static_ options sources. -- **Disabled**: Disables the radio component, for use during development. -- **Options source**: Radio components can be configured with an options source defining the individual choices the component provides, see [options source docs](../configuration/forms-config-options.md). -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the radio. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: One radio option must be selected. - -### Datatypes - -Radio components can be bound to `any` data, but we recommend working with `strings`. The component will correlate the data value with the appropriate option defined in the options source. If no option is found, the data will simply be ignored. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-select.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-select.md deleted file mode 100644 index 255d1034860..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-select.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: forms-element-library-select -title: Select -description: A form element to select a value from set options ---- - -A Select dropdown allowing the user to select one of multiple data option from larger datasets. - -![Form Select Symbol](/img/form-icons/form-select.svg) - -### Configurable properties - -- **Field label**: Label displayed above the select. -- **Field description**: Description provided below the select. -- **Key**: Binds the selected value to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Default value**: Provides a default selection in case no input data exists for the given key. Only available for _static_ options sources. -- **Searchable**: Allows the select entries to be searched via keyboard. -- **Disabled**: Disables the select, for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the select. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Options source**: Selects can be configured with an options source defining the individual choices the select provides, see [options source docs](../configuration/forms-config-options.md). -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: One select entry must be selected. - -### Datatypes - -Select components can be bound to `any` data, but we recommend working with `strings`. The component will correlate the data value with the appropriate option defined in the options source. If no option is found, the data will simply be ignored. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-taglist.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-taglist.md deleted file mode 100644 index 23bc2bbfb4f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-taglist.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: forms-element-library-taglist -title: Taglist -description: A form element to select multiple values from set options ---- - -A complex and searchable tag based component providing multi-selection for large datasets. - -![Form Taglist Symbol](/img/form-icons/form-taglist.svg) - -### Configurable properties - -- **Field label**: Label displayed on top of the taglist. -- **Field description**: Description provided below the taglist. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the taglist. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Taglist must contain a value. -- **Options source**: Taglists can be configured with an options source defining the individual choices your user can make, see [options source docs](../configuration/forms-config-options.md). -- **Disabled**: Disables the taglist, for use during development. - -### Datatypes - -Taglists can be bound to data of the `any[]` type, although for most practical cases we recommend `string[]` instead. The Taglist component will correlate the bound data with the values of the different options defined for the component. - -The data representation of this taglist: - -![Checklist Selection Image](../assets/taglist-example.png) - -Would look like this: - -``` -{ - "cc_empl": [ - "john_doe", - "anna_belle" - ] -} -``` diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-text.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-text.md deleted file mode 100644 index 4da6171d127..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-text.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -id: forms-element-library-text -title: Text view -description: A form element to display static information. ---- - -A text component allowing to display static information to the user. - -![Form Text Symbol](/img/form-icons/form-text.svg) - -## Configurable properties - -- **Text**: Either an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). After evaluation, the result is processed using a Markdown renderer that supports basic HTML and [GitHub-flavored Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). To ensure safety and prevent cross-site scripting in Camunda Forms, potentially harmful HTML elements will not be rendered. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). - -## Example text configurations - -Note that these configurations work in combination with one another. You may use templating syntax to leverage Markdown and HTML. You may also mix Markdown and HTML in a single definition. - -**Markdown**: - -``` -# This is a heading - -This shows an image: -![alternative image text](https://someurl.com/image.png) - -## This is a sub-heading - -Text can be shown for example using -**bold**, or *italic* font. - -* This is an unordered list... -* ...with two list items - -1. This is an ordered list... -2. ...with two list items -``` - -**HTML**: - -``` -

    This is a heading

    - -This shows an image: - - -

    This is a sub-heading

    - -Text can be shown for example -using bold, or italic font. - -
      -
    • This is an unordered list...
    • -
    • ...with two list items
    • -
    - -
      -
    1. This is an ordered list...
    2. -
    3. ...with two list items
    4. -
    -``` - -**Template syntax**: - -``` -{{#if usingTemplating}} - -Hello {{user.name}}, we are inside a conditional template block. - -Your hobbies are: -{{#loop user.hobbies}} -* {{this}} -{{/loop}} - -{{/if}} -``` diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-textarea.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-textarea.md deleted file mode 100644 index 64b2e086262..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-textarea.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: forms-element-library-textarea -title: Text area -description: Learn about the text area form element to read and edit multiline textual data. ---- - -A text area allowing the user to read and edit multiline textual data. - -![Form Textarea Symbol](/img/form-icons/form-textArea.svg) - -## Configurable properties - -- **Field label**: Label displayed on top of the text area. -- **Field description**: Description provided below the text area. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Default value**: Provides a default value for the text area in case no input data exists for the given key. -- **Disabled**: Disables the text area; for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text area. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Text area must contain a value. - - **Minimum length**: Text area must have at least `n` characters. - - **Maximum length**: Text area must not have more than `n` characters. - -## Datatypes - -Text area can be bound to `boolean`, `string`, and `number` data, but will coerce the data into a string, which will lead the data to be written back to the process as a `string` when the form is submitted. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-textfield.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-textfield.md deleted file mode 100644 index 376bfddb90c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library-textfield.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: forms-element-library-textfield -title: Textfield -description: A form element to read and edit textual data ---- - -A text field allowing the user to read and edit textual data. - -![Form Text Field Symbol](/img/form-icons/form-textField.svg) - -### Configurable properties - -- **Field label**: Label displayed on top of the text field. -- **Field description**: Description provided below the text field. -- **Key**: Binds the field to a form variable, see [data binding docs](../configuration/forms-config-data-binding.md). -- **Default value**: Provides a default value for the text field in case no input data exists for the given key. -- **Disabled**: Disables the text field, for use during development. -- **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text field. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). -- **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - - **Required**: Text field must contain a value. - - **Regular expression validation**: Use predefined validation patterns. Available options are: `Email`, `Phone`, and `Custom`. - - **Minimum length**: Text field must have at least `n` characters. - - **Maximum length**: Text field must not have more than `n` characters. - - **Regular expression pattern**: Text field value must match the provided [RegEx](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions/Cheatsheet) pattern. -- **Appearance**: Changes the visual appearance of the text field. - - **Prefix**: Adds an appendage before the input. - - **Suffix**: Adds an appendage after the input. - -:::info - -The phone pattern adheres to the international [E.164](https://www.twilio.com/docs/glossary/what-e164) standard, omitting spaces; for example, `+491234567890`. - -For custom formats, use the `Custom` validation option. - -::: - -### Datatypes - -Text Fields can be bound to `boolean`, `string`, and `number` data, but will cohere the data into a string, which will lead the data to be written back to the process as a `string` when the form is submitted. diff --git a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library.md b/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library.md deleted file mode 100644 index 3c4609ebc5d..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/form-element-library/forms-element-library.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -id: forms-element-library -title: Overview of Form Elements -description: Library of all currently available Form Elements ---- - -# Form Elements - -A form configuration starts off as a composition of **Form Elements** to define the structure. The specific behaviors are then defined as properties on the individual elements via the properties panel. - -Most form elements are intended to be bound to a **Form Variable** for the purpose of data entry, in which case we refer to them as **Form Fields**. Other elements may be used for layout purposes or to provide more specific functionality to the form which doesn't directly affect its state. - -The following form elements are currently available within Camunda Forms: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    SymbolNameDescription
    Text SymbolText viewDisplay static information
    Text Field SymbolText fieldRead and edit textual data
    Text Area SymbolText areaRead and edit multiline textual data
    Number Field SymbolNumber fieldRead and edit numeric data
    Datetime SymbolDatetimeRead and edit date and time data
    Checkbox SymbolCheckboxRead and edit boolean data
    Radio SymbolRadioSmall dataset single data selector
    Select SymbolSelectLarge dataset single data selector
    Checklist SymbolChecklistSmall dataset multi data selector
    Taglist SymbolTaglistLarge dataset multi data selector
    Image SymbolImage viewDisplay images
    Button SymbolButtonTrigger form actions
    diff --git a/versioned_docs/version-8.2/components/modeler/forms/sidebar-schema.js b/versioned_docs/version-8.2/components/modeler/forms/sidebar-schema.js deleted file mode 100644 index 055108005cd..00000000000 --- a/versioned_docs/version-8.2/components/modeler/forms/sidebar-schema.js +++ /dev/null @@ -1,32 +0,0 @@ -const root_dir = "components/modeler/forms/"; -const lib_dir = root_dir + "form-element-library/"; -const config_dir = root_dir + "configuration/"; - -module.exports = { - "Camunda Forms": [ - root_dir + "camunda-forms-reference", - { - "Form Element Library": [ - lib_dir + "forms-element-library", - lib_dir + "forms-element-library-text", - lib_dir + "forms-element-library-textfield", - lib_dir + "forms-element-library-textarea", - lib_dir + "forms-element-library-number", - lib_dir + "forms-element-library-datetime", - lib_dir + "forms-element-library-checkbox", - lib_dir + "forms-element-library-radio", - lib_dir + "forms-element-library-select", - lib_dir + "forms-element-library-checklist", - lib_dir + "forms-element-library-taglist", - lib_dir + "forms-element-library-image", - lib_dir + "forms-element-library-button", - ], - }, - { - Configuration: [ - config_dir + "forms-config-data-binding", - config_dir + "forms-config-options", - ], - }, - ], -}; diff --git a/versioned_docs/version-8.2/components/modeler/img/data-handling-example-json.png b/versioned_docs/version-8.2/components/modeler/img/data-handling-example-json.png deleted file mode 100644 index 0fdda619a19..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/img/data-handling-example-json.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/business-rule-task-linking.md b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/business-rule-task-linking.md deleted file mode 100644 index c51ee6abc35..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/business-rule-task-linking.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: business-rule-task-linking -title: Business rule task linking -description: Use one of the following approaches to link the DMN decision to be called by a business rule task. ---- - -You can use one of the following approaches to link the DMN decision to be called by a [business rule task](/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md). - -## Using the link button - -1. Select a business rule task from the canvas and a link icon will appear at the bottom right. -2. Click the link icon and choose any decision from the same project. -3. Click **Link** to complete the linking process. In the properties panel, the value **DMN decision** is chosen for the **Implementation** property, and the Decision ID of the decision you chose to link is automatically copied to the **Called decision** section. - -![overlay](img/brt_overlay.png) - -:::note -For business rule tasks that are already linked, clicking on the link icon opens a dialog which shows the name of the decision the business rule task is linked to. It is possible to navigate to the linked decision by clicking on it, or you can use the **Unlink** button to remove the link. -::: - -![overlay](img/brt_linked.png) - -## Using the properties panel - -You may also enter the Decision ID directly in the **Called decision** section in the properties panel after selecting **DMN decision** for the **Implementation**. - -![overlay](img/brt_properties-panel.png) - -:::info -Deploying a diagram does not automatically deploy linked diagrams. Ensure you deploy linked diagrams separately. -::: diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/call-activity-linking.md b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/call-activity-linking.md deleted file mode 100644 index 8122d30552c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/call-activity-linking.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: call-activity-linking -title: Call activity linking -description: Use one of the following approaches to link the process to be called by a call activity. ---- - -You can use one of the following approaches to link the process to be called by a [call activity](/components/modeler/bpmn/call-activities/call-activities.md). - -### Using the link button - -1. Select a call activity task from the canvas and a link button will appear at the bottom right. -2. Click on the button and choose any diagram from the same project. -3. Click the **Link** button to complete the linking process. The process ID of the diagram you chose to link is automatically copied to the **Called element** section in the properties panel. - -![overlay](img/overlay.png) - -For call activities that are already linked, clicking on the link button opens a dialog which shows the name of the diagram the call activity is linked to. It is possible to navigate to the linked diagram by clicking on it, or you can use the **Unlink** button to remove the link. - -![overlay](img/linked.png) - -### Using the properties panel - -You may also enter the process ID directly in the **Called element** section in the properties panel. - -![overlay](img/properties-panel.png) - -:::info -Deploying a diagram does not automatically deploy linked diagrams. Ensure you deploy linked diagrams separately. -::: diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_linked.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_linked.png deleted file mode 100644 index 165910c601d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_linked.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_overlay.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_overlay.png deleted file mode 100644 index bf47ecf498f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_overlay.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_properties-panel.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_properties-panel.png deleted file mode 100644 index 6e9bd586a8f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/brt_properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-1.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-1.png deleted file mode 100644 index 9ea40b01675..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-2.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-2.png deleted file mode 100644 index 1aad91ed9ec..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-3.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-3.png deleted file mode 100644 index 6e9cefb4615..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/create-connector-template-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/fix-connector-template-problems-2.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/fix-connector-template-problems-2.png deleted file mode 100644 index 92158564df1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/fix-connector-template-problems-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/fix-connector-template-problems.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/fix-connector-template-problems.png deleted file mode 100644 index 341de0b0184..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/fix-connector-template-problems.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/replace-via-upload.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/replace-via-upload.png deleted file mode 100644 index ddb49ea79fc..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/replace-via-upload.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/upload-files.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/upload-files.png deleted file mode 100644 index cd92cb7255c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/connector-templates/upload-files.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/linked.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/linked.png deleted file mode 100644 index 46ebd5d599e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/linked.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/overlay.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/overlay.png deleted file mode 100644 index 6570ed594d4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/overlay.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/properties-panel.png b/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/properties-panel.png deleted file mode 100644 index f3e9080a7f3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/advanced-modeling/img/properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/collaborate-with-modes.md b/versioned_docs/version-8.2/components/modeler/web-modeler/collaborate-with-modes.md deleted file mode 100644 index 36a70e9b828..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/collaborate-with-modes.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: collaborate-with-modes -title: Collaborate with modes -description: "Dedicated modes are ways for business and IT professionals to collaborate effectively." ---- - -Camunda 8 only - -Collaboration between business and IT professionals can be challenging, which is why we introduced three modes in BPMN diagrams that help users with different technical backgrounds to collaborate effectively: **design**, **implement**, and **play**. - -The **Design** mode view is tailored to business users, and the **Implement** and **Play** mode views are tailored to developers. - -:::note -**Play** mode is an alpha feature that is being progressively rolled out. Review the [Play documentation](/components/modeler/web-modeler/play-your-process.md) for details. -::: - -Business users can now focus on modeling, sharing, and collaborating, while developers can work on implementation and debugging with ease. - -When accessing a BPMN diagram for the [first time](/components/modeler/web-modeler/model-your-first-diagram.md), the **Design** mode is the first selected option. To switch between modes, you can select one of the tabs on the left side of the screen, above the diagram; any further selection is remembered and kept for the next sessions. - -![modes tab navigation](img/mode-tab-navigation.png) - -:::note -When a process template is selected, the default mode is **Implement**. -::: diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/collaboration.md b/versioned_docs/version-8.2/components/modeler/web-modeler/collaboration.md deleted file mode 100644 index e61378903a1..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/collaboration.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -id: collaboration -title: Collaborate with your team -description: Collaboration features and access rights for Web Modeler. ---- - -Camunda 8 only - -## Projects - -Files and folders are stored in projects. -The user access on files and folders is defined at the project level. - -When you access Web Modeler via the Camunda 8 dashboard, you see the **Home** page with all the projects you can access: -![home page](img/collaboration/web-modeler-home.png) - -### Access rights and permissions - -Users can have various levels of access to a project in Web Modeler, outlined in this section. - -After creating a project, you can invite members of your Camunda 8 organization to collaborate in Web Modeler. -There are four roles with different levels of access rights that can be assigned to each user: - -- **Project Admin**: The user can edit the project itself, all folders and diagrams within the project, and invite more users to collaborate. -- **Editor**: The user can edit all folders and diagrams within the project. -- **Commenter**: the user cannot edit folders or diagrams nor invite users, but can view and leave comments on diagrams. -- **Viewer**: The user cannot edit folders or diagrams nor leave comments, but can only view diagrams. - -### Inviting users to projects - -:::note -Web Modeler expects users to have an email address associated with their account in the identity management system to receive invitations correctly. -::: - -On the right side of a project, view a list of your collaborators and invite more by taking the steps below: - -1. Click **Add user**. - ![invite user](img/collaboration/web-modeler-collaborator-invite-modal-opened.png) - -2. Choose a role for your new collaborator. - ![invite choose role](img/collaboration/web-modeler-collaborator-invite-choose-role.png) - -3. Begin typing the name of the individual and Web Modeler will suggest Camunda 8 organization members that you can invite to the project. - ![invite suggestions](img/collaboration/web-modeler-collaborator-invite-suggestions.png) - -4. Write a message to your new collaborator about their invitation to the project. - ![invite type message](img/collaboration/web-modeler-collaborator-invite-type-message.png) - -5. Click **Send** and your new collaborator will receive an email with the invitation. - ![invite sent](img/collaboration/web-modeler-collaborator-invite-sent.png) - ![invite email](img/collaboration/web-modeler-collaborator-invite-email.png) - -### Folders - -You can create folders in a project to semantically group and organize your diagrams. -The user access on a folder is inherited from the project. - -## Sharing and embedding diagrams - -Diagrams can also be shared with others in read-only mode via a sharing link. -This link can also be protected with an additional password. - -1. Navigate to a diagram and click on the share icon button. - ![share button](img/collaboration/web-modeler-share-icon-button.png) - -2. Click **Create link**. - ![share create link](img/collaboration/web-modeler-share-modal.png) - -3. Click **Copy** to copy the link to your clipboard. - ![share copy link](img/collaboration/web-modeler-share-modal-create.png) - -4. Click **Add** and type a new password to protect your link. - ![share copy link](img/collaboration/web-modeler-share-modal-password-protect.png) - -5. Click **Email** to share the new link with multiple recipients. - ![share copy link](img/collaboration/web-modeler-share-modal-email.png) - -Similar to the sharing link, a diagram can be embedded into HTML pages via an iframe tag. The iframe tag can be copied from the sharing dialog via the **Embed** button. - -For wiki systems like [Confluence](https://www.atlassian.com/software/confluence), we recommend using the HTML macro and adding the iframe tag from the sharing dialog. This way, diagrams can be easily included in documentation pages. To adjust the dimensions of the diagram, the width and height values of the iframe tag can be modified. - -## Comments - -When selecting an element of the BPMN diagram, a discussion can be attached to this element. If no element is selected, the discussion will be attached directly to the diagram. -Switch between the **Properties Panel** and **Comments** using the two tabs present at the top of the right side panel. -![comment](img/collaboration/web-modeler-comment-type-here.png) - -New comments can be added to the discussion by any collaborator with Admin, Editor, or Commenter access rights. - -Afterwards, the comment can be edited or deleted via the context menu icon. -![comment context menu](img/collaboration/web-modeler-comment-with-context-menu.png) - -Elements with discussions attached will always have a visible blue overlay, so you can easily identify discussion points. -![comment context menu](img/collaboration/web-modeler-comment-overlay-on-diagram.png) - -### Mention others in comments - -By typing the **@** character, you are able to filter the collaborators on the project and select one of them. -![comment suggestion](img/collaboration/web-modeler-comment-mention-suggestions.png) - -When submitting the comment, this user will receive an email as a notification about the new comment. -![comment suggestion email](img/collaboration/web-modeler-comment-mention-email.png) - -## Interact with your collaborators - -### Model a diagram together - -When others are opening the same diagram as you, the updates on the diagram are sent in real time. You can also see who is in the diagram with you. -![real time collaboration](img/real-time-collaboration.png) - -### Draw other's attention - -Whether you are in a presentation or if others are in the same diagram as you are, use the attention grabber pointer to draw attention to a specific part of the diagram. To do this, take the following steps: - -1. Switch on the attention grabber pointer from the canvas tools. - ![attention grabber](img/attention-grabber.png) - -2. Drop the pointer by clicking anywhere on the canvas. - ![attention grabber gif](img/attention-grabber-pointer-pulse.gif) - -The pointer will pulsate to draw attention and will match your avatar color. -It can also be seen in real-time by others that are looking at the same diagram as you. diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/design-your-process.md b/versioned_docs/version-8.2/components/modeler/web-modeler/design-your-process.md deleted file mode 100644 index 33ebd4c8e34..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/design-your-process.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: design-your-process -title: Design mode for business users -description: "Design mode is tailored to business users and allows strategic modeling" ---- - -Camunda 8 only - -In the **Design** mode view, business users have access to a different workspace of Web Modeler with a reduced properties panel. Only comments are shown, which provides a decluttered user interface. - -All the technicalities, such as triggers to deploy the diagram or start the instance, are hidden. Linting is disabled, and problem annotations are discarded. - -The comments panel, inside the sidebar on the right-hand side of the screen, is collapsed when switching to the design mode, and when expanded, the state is persisted even when switching to another diagram. - -As a business user, you can [**link decision models**](/components/modeler/web-modeler/advanced-modeling/business-rule-task-linking.md) and [**process models**](/components/best-practices/modeling/creating-readable-process-models.md) via [call activities](/components/modeler/bpmn/call-activities/call-activities.md), and you can still be a [**project owner**](/components/modeler/web-modeler/collaboration.md#access-rights-and-permissions), even if you don't execute implementation. - -![design mode](img/design-mode.png) - -With the **Design** mode view, users can model a process without need for a complex development tool that does not speak their language. This provides a clear journey for the user, all while incorporating modeling, sharing, and collaborating in a user-friendly way. diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/fix-problems-in-your-diagram.md b/versioned_docs/version-8.2/components/modeler/web-modeler/fix-problems-in-your-diagram.md deleted file mode 100644 index 57cd3afc758..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/fix-problems-in-your-diagram.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: fix-problems-in-your-diagram -title: Fix problems in your diagram -description: This feature assists you in debugging and fixing errors in your processes. ---- - -### Design time errors - -Based on a set of lint rules, Web Modeler continuously validates implementation properties for a process diagram while the user is modeling. The validation errors are added to the panel at the bottom of Web Modeler. Expand the panel to view the errors by clicking the **Problems** header. The panel is collapsed by default and the latest state (expanded or collapsed) is remembered for the next time you open Web Modeler. - -![error panel](img/diagram-errors/error-panel.png) - -### Engine version selection - -The version selector at the top right in the problems panel can be used to choose the Zeebe version the diagram is validated against. The version chosen should match the Zeebe version of the cluster the diagram is going to be deployed in to get the correct set of errors (if you do not know the version, it is shown alongside the cluster name in the deployment dialog which can be opened by clicking the **Deploy diagram** button.) - -The version selector also provides information about the number of clusters available for each Zeebe version within the current organization. - -![error panel](img/diagram-errors/version-selector.png) - -### Interactivity - -The errors are interactive. Clicking on the row highlights the corresponding element in the canvas and points to the specific property in the properties panel where you can resolve the issue. - -![error panel](img/diagram-errors/interactivity.png) - -### Deploy time errors - -If all the design time errors are fixed and further errors are thrown by the engine when deploying the diagram, a separate output tab will open up where you can see the error thrown by the engine. - -![error panel](img/diagram-errors/engine-error.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/attention-grabber-pointer-pulse.gif b/versioned_docs/version-8.2/components/modeler/web-modeler/img/attention-grabber-pointer-pulse.gif deleted file mode 100644 index 0719efdf3a3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/attention-grabber-pointer-pulse.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/attention-grabber.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/attention-grabber.png deleted file mode 100644 index 285672262b9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/attention-grabber.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/cloud-web-modeler-menu-item.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/cloud-web-modeler-menu-item.png deleted file mode 100644 index 3aacf5ffb9d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/cloud-web-modeler-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png deleted file mode 100644 index 4dbd018b1e8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-email.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-email.png deleted file mode 100644 index d18a0bd1bba..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-email.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png deleted file mode 100644 index 181ef2fe17c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png deleted file mode 100644 index 7f58e0844db..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png deleted file mode 100644 index d5dfda7d208..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png deleted file mode 100644 index c03ecf4b021..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-email.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-email.png deleted file mode 100644 index e255630449c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-email.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png deleted file mode 100644 index 89b87af5218..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png deleted file mode 100644 index e69b47a3b0a..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png deleted file mode 100644 index a25925bce92..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png deleted file mode 100644 index 54660f1fd52..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-home.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-home.png deleted file mode 100644 index 6374c015832..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-home.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png deleted file mode 100644 index c30496a06b2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png deleted file mode 100644 index fff1f8ad0f1..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png deleted file mode 100644 index e535d5c54b8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png deleted file mode 100644 index 86838c1950e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png deleted file mode 100644 index 5219a796154..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/design-mode.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/design-mode.png deleted file mode 100644 index 967bf95c372..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/design-mode.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/engine-error.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/engine-error.png deleted file mode 100644 index bc8eb913b36..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/engine-error.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/error-panel.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/error-panel.png deleted file mode 100644 index 2f93dc6b674..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/error-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/interactivity.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/interactivity.png deleted file mode 100644 index 952d4de2436..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/interactivity.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/version-selector.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/version-selector.png deleted file mode 100644 index b015b144fc6..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/diagram-errors/version-selector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/fullscreen.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/fullscreen.png deleted file mode 100644 index 56a1ec70b71..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/fullscreen.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/implement-mode.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/implement-mode.png deleted file mode 100644 index c54fe4ef18b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/implement-mode.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png deleted file mode 100644 index cc2cda7fff3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png deleted file mode 100644 index e2c75332646..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png deleted file mode 100644 index 42613495b8b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png deleted file mode 100644 index 10466afdff0..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png deleted file mode 100644 index c21b5dc4567..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png deleted file mode 100644 index d98c2b940d0..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-menu-item.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-menu-item.png deleted file mode 100644 index a2e02fb927b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png deleted file mode 100644 index 5018b3ef613..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png deleted file mode 100644 index f8f54538a3b..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png deleted file mode 100644 index ca70a36d5c9..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png deleted file mode 100644 index f5440304331..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png deleted file mode 100644 index 20fce005c76..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png deleted file mode 100644 index 2516876f300..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/minimap.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/minimap.png deleted file mode 100644 index 39941e40fe4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/minimap.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/mode-tab-navigation.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/mode-tab-navigation.png deleted file mode 100644 index e804c323548..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/mode-tab-navigation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-definition.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-definition.png deleted file mode 100644 index 18dad6f07f7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-definition.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-example-data.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-example-data.png deleted file mode 100644 index ba69b11d418..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-example-data.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-instance.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-instance.png deleted file mode 100644 index 0ade41385f3..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-instance.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-rewind.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-rewind.png deleted file mode 100644 index c0a0ca9f4f8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/play-rewind.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/real-time-collaboration.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/real-time-collaboration.png deleted file mode 100644 index 9516ed0aa6c..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/real-time-collaboration.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/reset-viewport.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/reset-viewport.png deleted file mode 100644 index d397219d0f8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/reset-viewport.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/save-and-deploy-successful.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/save-and-deploy-successful.png deleted file mode 100644 index b09c8f441c5..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/save-and-deploy-successful.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/save-and-deploy.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/save-and-deploy.png deleted file mode 100644 index 43af4a3d7a6..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/save-and-deploy.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/signal-start-event.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/signal-start-event.png deleted file mode 100644 index 62ec613f45f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/signal-start-event.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/start-process-instance-done.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/start-process-instance-done.png deleted file mode 100644 index c00cafd7beb..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/start-process-instance-done.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/start-process-instance-variables.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/start-process-instance-variables.png deleted file mode 100644 index 9ee92a83bef..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/start-process-instance-variables.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/tasklist-processes.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/tasklist-processes.png deleted file mode 100644 index cb6797d0564..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/tasklist-processes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/play.gif b/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/play.gif deleted file mode 100644 index 2f8cc467a01..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/play.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/speed.gif b/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/speed.gif deleted file mode 100644 index e529a347394..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/speed.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/start.gif b/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/start.gif deleted file mode 100644 index 82a22287933..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/start.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/toggle.gif b/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/toggle.gif deleted file mode 100644 index 967dd4b21d8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/token-simulation/toggle.gif and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-add-endevent.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-add-endevent.png deleted file mode 100644 index 1c9ccf8c843..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-add-endevent.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-add-task.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-add-task.png deleted file mode 100644 index a1880dd3aab..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-add-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-convert-to-timer.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-convert-to-timer.png deleted file mode 100644 index 41e4af96650..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-convert-to-timer.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-convert-to-webhook.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-convert-to-webhook.png deleted file mode 100644 index cab42f506a2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-convert-to-webhook.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-deploy-modal-healthy.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-deploy-modal-healthy.png deleted file mode 100644 index a708121affc..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-deploy-modal-healthy.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-deploy.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-deploy.png deleted file mode 100644 index 6597df42fed..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-deploy.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png deleted file mode 100644 index fe3d00c4fc2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png deleted file mode 100644 index 027520cf222..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-bpmn.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-browse-templates.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-browse-templates.png deleted file mode 100644 index d25e0d66ed2..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-choose-browse-templates.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-modal.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-modal.png deleted file mode 100644 index a874e57451f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram-modal.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram.png deleted file mode 100644 index 232753c58b4..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-diagram.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-project.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-project.png deleted file mode 100644 index 3a8378db7d8..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-new-project.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-template-details-modal.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-template-details-modal.png deleted file mode 100644 index 12661e0449e..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-template-details-modal.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-templates-list-modal.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-templates-list-modal.png deleted file mode 100644 index 2eb8f127c4d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-new-user-templates-list-modal.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-start-instance-modal-healthy.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-start-instance-modal-healthy.png deleted file mode 100644 index 08fedba2b6f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-start-instance-modal-healthy.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-start-instance.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-start-instance.png deleted file mode 100644 index 4e50d016129..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-start-instance.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-webhook-panel.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-webhook-panel.png deleted file mode 100644 index 58d818853a0..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-webhook-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-with-end-event.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-with-end-event.png deleted file mode 100644 index c41a36414b7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-with-end-event.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-with-end-event2.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-with-end-event2.png deleted file mode 100644 index f23df1ee18a..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/web-modeler-with-end-event2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/wrench.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/wrench.png deleted file mode 100644 index 419002de4f7..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/wrench.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/zoom-in.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/zoom-in.png deleted file mode 100644 index e0483a2a60d..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/zoom-in.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/img/zoom-out.png b/versioned_docs/version-8.2/components/modeler/web-modeler/img/zoom-out.png deleted file mode 100644 index a695f755a6f..00000000000 Binary files a/versioned_docs/version-8.2/components/modeler/web-modeler/img/zoom-out.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/implement-your-process.md b/versioned_docs/version-8.2/components/modeler/web-modeler/implement-your-process.md deleted file mode 100644 index 8db9636d616..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/implement-your-process.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: implement-your-process -title: Implement mode for developers -description: "Implement mode is tailored to developers and allows full access to an implementation-focused workspace." ---- - -Camunda 8 only - -In the **Implement** mode view, developers have access to a full, implementation-focused workspace of Web Modeler. - -The view offers all possible implementation details, and the problems panel (accessed by clicking **Problems** in the bottom left corner) shows all implementation problems that need fixing before deployment. - -The properties panel automatically opens when switching to the **Implement** mode, and if collapsed, it stays collapsed as long as you navigate between diagrams. - -Developers can switch between the modes as they like, and when they open a process template, it opens in **Implement**. - -![implement mode](img/implement-mode.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/import-diagram.md b/versioned_docs/version-8.2/components/modeler/web-modeler/import-diagram.md deleted file mode 100644 index 41201206215..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/import-diagram.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: import-diagram -title: Import diagram -description: "You can import a BPMN or DMN diagram at any time with Web Modeler." ---- - -Camunda 8 only - -You can import a BPMN or DMN diagram at any time with Web Modeler, and there are several ways to accomplish this: - -- In a project, click **New > Upload files** and select the files from your computer. - ![import diagram](img/import-diagram/web-modeler-upload-file-menu-item.png) - ![import diagram](img/import-diagram/web-modeler-upload-file-choose.png) - ![import diagram](img/import-diagram/web-modeler-upload-file-completed.png) - -- In a project, drag one file from your computer and drop it. - ![import diagram](img/import-diagram/web-modeler-project-drag-and-drop.png) - -:::note -For the two options below, the content of the diagram is replaced with the content of the file. -::: - -- In a diagram, open the breadcrumb menu and choose **Replace via upload**. Then, select a file from your computer. - -![import diagram](img/import-diagram/web-modeler-replace-via-upload-menu-item.png) -![import diagram](img/import-diagram/web-modeler-replace-via-upload-choose.png) - -- In a diagram, drag one file from your computer and drop it onto the canvas. - ![import diagram](img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/launch-web-modeler.md b/versioned_docs/version-8.2/components/modeler/web-modeler/launch-web-modeler.md deleted file mode 100644 index fc9a40c6383..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/launch-web-modeler.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: launch-web-modeler -title: Launch Web Modeler -description: "Since Web Modeler is embedded in the Camunda 8 Console, it does not need to be started separately. To launch Web Modeler, take these steps." ---- - -Camunda 8 only - -Since Web Modeler is embedded in the Camunda 8 Console, it does not need to be started separately. - -To launch Web Modeler, follow the steps below: - -1. Click the **Modeler** tab at the top of the page. - ![cloud web modeler menu item](img/cloud-web-modeler-menu-item.png) -2. Click **New project** to create a new project to store all your diagrams. - ![web modeler empty home](img/web-modeler-new-user-new-project.png) - :::note - You can go back and rename your project at any time. - ::: -3. Click **New** and choose **Browse templates**. Alternatively, you can also create blank BPMN diagrams, DMN diagrams, or forms. - ![web modeler empty project](img/web-modeler-new-user-new-diagram-choose-browse-templates.png) -4. From the modal, hover a tile and select an existing template (i.e. Absence Request) by clicking on the **Use template** button. Alternatively, you can create a blank diagram by clicking on the **Create a blank model** tile. - ![web modeler templates list modal](img/web-modeler-new-user-templates-list-modal.png) -5. From the modal, you can also open the details of a specific template by clicking **Details**. - - In the details modal, you can find information about the author, the technical level, the description, and the categories of the selected template. You can also browse the diagram from the preview on the right side. To proceed with creating the diagram, click **Create model from template**. - ![web modeler template details modal](img/web-modeler-new-user-template-details-modal.png) - -Congrats! You just created your first diagram. -![web modeler new diagram created](img/web-modeler-new-user-new-diagram.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/milestones.md b/versioned_docs/version-8.2/components/modeler/web-modeler/milestones.md deleted file mode 100644 index ce8c542900f..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/milestones.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: milestones -title: Milestones -description: Working with milestones in Web Modeler ---- - -Camunda 8 only - -## Milestones - -You can save a snapshot of your diagram as a milestone any time. - -If you make any mistakes while modeling, you can always go back to previously saved snapshots. You can also identify and compare the differences between two milestones. - -Find your milestones by opening the actions menu, and clicking on **History**. -![milestones](img/milestones/web-modeler-milestone-action-menu-item.png) - -### Creating milestones - -In the milestone history view, the latest version can be saved as a new milestone. -![milestones create via icon](img/milestones/web-modeler-milestone-create-via-icon.png) - -Alternatively, you can create a new milestone via the breadcrumb menu in the diagram view. -![milestones create via icon](img/milestones/web-modeler-milestone-create-via-breadcrumb.png) - -When dragging and dropping a file into the diagram view, or when using the **Replace via upload** option under the breadcrumb menu, a new milestone is created automatically. - -### Restoring milestones - -Hover over a milestone, click on the three vertical dots, and expand for more options. -![milestones restore](img/milestones/web-modeler-milestone-restore.png) -![milestones restore](img/milestones/web-modeler-milestone-restore-complete.png) - -### Comparing milestones - -Milestones can be compared visually. By enabling the diffing feature, the currently selected milestone is compared to its predecessor. - -The differences that are highlighted are only those that affect the execution of the BPMN process. Pure visual changes like position changes are not highlighted. -![milestones diffing](img/milestones/web-modeler-milestone-diffing.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/model-your-first-diagram.md b/versioned_docs/version-8.2/components/modeler/web-modeler/model-your-first-diagram.md deleted file mode 100644 index b3ef8ba4100..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/model-your-first-diagram.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: model-your-first-diagram -title: Model your first diagram -description: "After you've created a BPMN diagram, you can start modeling it." ---- - -Camunda 8 only - -After you've created a BPMN diagram, you can start modeling it. - -We've preconfigured a diagram consisting of a start event. To convert it to something meaningful, append a **Task** to it, and afterwards append an **EndEvent**: - -![add task](img/web-modeler-add-task.png) -![add task](img/web-modeler-add-endevent.png) - -Each element has adjustable attributes. Use the properties panel on the right side of the page. - -Elements supporting different types can be reconfigured by clicking on the corresponding icon. In the following screenshot, a task has been added to the diagram. It can be converted to a service task, for example. - -![task configuration](img/web-modeler-new-diagram-with-configuration.png) - -Use the canvas tools in the bottom right corner to interact with your diagram. - -1. Zoom in. - ![zoom in](img/zoom-in.png) - -2. Zoom out. - ![zoom in](img/zoom-out.png) - -3. Reset viewport if you get lost on the canvas. - ![reset view port](img/reset-viewport.png) - -4. Open the minimap to navigate complex diagrams. - ![mini map](img/minimap.png) - -5. Enter the fullscreen mode for distraction-free modeling. - ![full screen](img/fullscreen.png) - -6. Drop an attention point and use it as a laser pointer in your presentations. - ![attention grabber](img/attention-grabber.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/play-your-process.md b/versioned_docs/version-8.2/components/modeler/web-modeler/play-your-process.md deleted file mode 100644 index 140748a56a6..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/play-your-process.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -id: play-your-process -title: Play mode for rapid validation -description: "Play mode gives developers a playground environment to quickly iterate and manually test their processes." -keywords: - ["demo", "demoing", "collaboration", "rapid development", "Play", "Play mode"] ---- - -Camunda 8 only - -Play is a Zeebe-powered playground environment within Web Modeler for validating a process at any stage of development. Developers can debug their process logic, testers can manually test the process, and process owners can demo to stakeholders - all within Play. - -## Opening Play - -To use Play, open a BPMN diagram and click the **Play** mode. Read the [limitations and availability section](#limitations-and-availability) if you don't see it. - -You get a private Play environment that takes about 30 seconds to prepare and is automatically deleted after 20 minutes of inactivity. Even when the environment is deleted, your secrets persist in the browser's local storage. - -The current version of the active process and all its dependencies, like called processes or DMN files, are automatically deployed to the Play environment. An error or warning is raised if a file fails to deploy, is missing, or a Connector secret isn’t filled out. - -## Getting started with Play - -![play process definition view](img/play-definition.png) - -The first view in Play is the process definition view. It shows deployment problems, active process instances, and start events. - -Click a **start event's** play button to begin your process. Open the button's menu to start a process with variables. These variables can also be prefilled from the example data defined for the start event in the **Implement** mode. Play presents this example data in a readable JSON format, as illustrated below. See [data handling](/components/modeler/data-handling.md) for additional details. - -![play example data](img/play-example-data.png) - -## Play a process - -![play process instance view](img/play-instance.png) - -Click the action icons next to a task or event to play the process. - -The **Instance History** panel tracks the path taken throughout the diagram. - -The **Variables** panel tracks the data collected. Global variables are shown by default. To view local variables, select the corresponding task or event. Variables can be edited or added here, and Play supports JSON format to represent complex data. - -Play executes all logic of the process and its linked files, such as FEEL, forms, DMN tables, and outbound Connectors. - -However, actions in Play cannot be triggered by any external system, such as external user interfaces, job workers, message systems, or inbound Connectors. - -You have a few options to mock an external system: - -- In **Implement** mode, hard-code an example payload in the task or event **Output** section. -- When completing a task or event, use the secondary action to complete it with variables. - -Incidents are raised in Play just like in Operate. Use the variables and incident messages to debug the process instance. - -## Replay a process - -To replay a process, rewind to an earlier element by clicking on the **Rewind** button on a previously completed element. - -:::note -You can also return to the definition view by clicking **View all** on the top banner, or start a new process instance by clicking on the **Restart process** button on the start event. -::: - -### Rewind a process - -After completing part of your process, you can **rewind** to a previous element to test a different scenario. Play will start a new instance and replay your actions up to, but not including, the selected previous task. - -![rewind process](img/play-rewind.png) - -Play's rewind operation currently does not support the following elements: - -- Call activities -- Timer events that complete without being skipped - -If you completed an unsupported element before rewinding, you will rewind farther than expected. - -In addition, Play rewinds to an element, not to an element instance. For example, if you wanted to rewind your process to a sequential multi-instance service task which ran five times, it will rewind your process to the first instance of that service task. - -## Rapid iteration - -To make changes, switch back to **Implement** mode. When returning to Play, your process is redeployed. Play only shows process instances from the process’s most recent version, so you may not see your previous instances. - -Play saves your inputs when completing user task forms. It auto-fills your last response if you open the same form later in the session. You can click **Reset** to reset the form to its defaults. - -## Details - -Depending on the BPMN element, there may be a different action: - -- **User tasks** with an embedded form are displayed on click. However, you cannot track assignment logic. -- **Outbound Connectors** are executed as defined on click. -- **Call activities** can be navigated into and performed. -- **Timer events** are executed as defined, with the option to skip the wait. -- **Manual tasks**, **undefined tasks**, **script tasks**, **business rule tasks**, **gateways**, and other BPMN elements that control the process’s path are automatically completed based on their configuration. -- **Service tasks**, **inbound Connectors**, message-related tasks or events, and **timer catch events** are simulated on click. -- Many action icons have secondary actions. For example, **user tasks** can be completed with variables rather than a form, and **service tasks** can trigger an error event. - -## Operate vs. Play - -[Operate](/components/operate/operate-introduction.md) is designed to monitor many production process instances and intervene only as necessary, while Play is designed to drive a single process instance through the process and mock external systems. - -Both offer monitoring of a single process instance, its variables and path, incidents, and actions to modify or repair a process instance. Operate offers bulk actions and guardrails against breaking production processes, while Play offers a streamlined UX to run through scenarios quickly. - -## Limitations and availability - -:::note -Play is being rebuilt and progressively rolled out to more users. This section explains why you might not see the **Play** tab. -::: - -For Camunda 8 SaaS, Play is available to all Web Modeler users with editor or admin permissions within a project. -Enterprise users need an admin to enable Play by opting in to [alpha features](/components/console/manage-organization/enable-alpha-features.md). - -For Self-Managed, Play is controlled by the **PLAY_ENABLED** flag. It is `true` by default for the Docker distribution for development and `false` by default on the Kubernetes distribution for production use. - -Play uses Zeebe 8.2. Any BPMN elements unavailable in Zeebe 8.2, such as signal events, will not be available in Play. - -:::note -[Inbound Connectors](/components/connectors/connector-types.md#inbound-connectors) and [Connectors in hybrid mode](/guides/use-connectors-in-hybrid-mode.md) do not connect to external systems and must be completed manually. -[Decision table rule](/components/modeler/dmn/decision-table-rule.md) evaluations are not viewable. However, they can be inferred from the output variable. -::: - -There are some bugs related to Play’s architecture. These will be resolved when Play is integrated with Camunda development clusters. - -- **Timer events** sometimes fail silently. Try refreshing the page. -- Deployment sometimes fails with a network error in the **Output** panel of the **Implement** mode. If these errors repeat, let your Play cluster expire and try again. - -## Alpha feature - -Play is an alpha feature for a few reasons: - -- Play runs on community-built projects, as described in the [Zeebe-Play repository](https://github.com/camunda-community-hub/zeebe-play). - -- Play is run on completely isolated Camunda-hosted infrastructure from the core SaaS or Self-Managed Camunda deployment. It can only receive information from Web Modeler and can only communicate externally using Connectors and the user-defined secrets in the secret store or BPMN diagram. - - :::note - Play is not authenticated, so anyone with the URL can access it. You should not submit personal or confidential information to Play. - ::: - -- To mitigate this risk, a 34-character randomly generated UUID is in the URL. Each session lasts approximately 20 minutes, and the Play environment and its data are automatically deleted at the end of each session. You can reset the session timer by re-opening Play. - -For more information about terms, refer to our [licensing and terms page](https://legal.camunda.com/licensing-and-other-legal-terms#c8-saas-trial-edition-and-free-tier-edition-terms). diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/run-or-publish-your-process.md b/versioned_docs/version-8.2/components/modeler/web-modeler/run-or-publish-your-process.md deleted file mode 100644 index 176238b111c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/run-or-publish-your-process.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -id: run-or-publish-your-process -title: Run or publish your process -description: "Flexible options to run or publish a process in any environment and to any audience." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Camunda 8 only - -When you design a process in Camunda Modeler, you have multiple flexible options to either run or publish it on Camunda 8. This page explains the differences between running and publishing a process, and outlines the various options to publish a process into any environment, and to any audience. - -## Deploy a process - -Web Modeler autosaves all your changes on a diagram. If you change a diagram and it is autosaved, this has no effect on deployed or running processes your cluster(s). - -To make any change live in your cluster(s), you need to deploy it. If you deploy a process, it becomes available on the selected cluster and you can run or publish it. - -To deploy, click **Deploy** in the upper right corner of the modeling screen: - -![The deploy dialog of a BPMN diagram](img/web-modeler-deploy.png) - -### Before deploying a process - -- Make sure your process is free of errors, otherwise it can't be deployed. Use the [problems panel to detect and fix errors](./fix-problems-in-your-diagram.md). -- Make sure all dependent files are deployed first, such as DMN diagrams, forms, or called processes. You can use the [link tool](./advanced-modeling/call-activity-linking.md) to drill-down into linked resources and deploy them. -- Implement and run your [job workers](../../concepts/job-workers.md) if you use tasks such as service or send tasks. - -## Run a process - -Running a process means that you execute the process as a process instance on Camunda 8. It allows you to test and debug your process and see how it performs in a live environment. - -- [Test run using Play mode](#test-run-using-play-mode) -- [Run programmatically](#deploy-to-run-programmatically) -- [Run manually from Modeler](#run-manually-from-modeler) -- [Schedule via timer](#schedule-via-timer) - -### Test run using Play mode - -Before you publish or run a process, you can test it manually using the Play mode. With the Play mode, you can build and test your process iteratively in small steps. To enter the Play mode, click the Play tab in the top left corner of the modeling screen. Refer to the [Play mode documentation](./play-your-process.md) for details of how the Play environment works. - -### Run manually from Modeler - -You can also test your process thoroughly on a development cluster to see how it behaves in Operate and Tasklist, in order to run your job workers, and to access your running process instances [programmatically](#deploy-to-run-programmatically). To start a process instance manually, take the following steps: - -1. Click **Run** in the top right corner of the modeling screen. - -![Running a process from Web Modeler](img/web-modeler-start-instance.png) - -2. Select the target cluster. - -3. To test your process with data, you can also specify variables written to the process context at startup. The variables must be formatted in valid JSON. As an example, you can use the following JSON: - -```json -{ - "hello": "world" -} -``` - -4. Click on **Run** to confirm. This will start a process instance on the selected cluster. If required, it (re-)deploys the process beforehand on the cluster. - -After the process instance has been started, you will receive a notification with a link to the process instance view in [Operate](../../operate/operate-introduction.md). Follow this link to see the progress of the process instance and interact with it if required. - -:::info -Starting an instance from Web Modeler [deploys](#deploy-a-process) recent changes to the target cluster, which changes future runs of this process definition in case it has already been deployed and used. Existing process instances are not affected. -::: - -### Schedule via timer - -You can also schedule a process to run at a specific time or interval using timers. Timers can be added to one or multiple start events of your process. - -To schedule a process using a timer, follow these steps: - -1. Select the start event. -2. Change the start event type to a timer event using the **wrench tool**. - -![Converting the start event to a timer start event](img/web-modeler-convert-to-timer.png) - -3. [Configure the timer start event](../bpmn/timer-events/timer-events.md#timer-start-events) using the **properties panel** to define when the process should be executed. You can set the timer to trigger at a specific date and time or to repeat at a certain interval. - -4. Click on **Deploy** to [deploy](#deploy-a-process) the process. - -Once the process is deployed, the timer will be activated and the process will be executed at the scheduled time or interval. - -Read more in the [timers documentation](../bpmn/timer-events/timer-events.md). - -### Best practices for running a process - -- Use the [Play mode](#test-run-using-play-mode) to run a process instance with test data before running it with live data. -- Verify that the process is running as expected on a development cluster before running it with live data in your production environment. -- Use [Operate](../../operate/operate-introduction.md) to help you diagnose any problems with the process. - -:::tip -You can also define the success of your processes by setting key performance indicators (KPIs) for your process using [Optimize]($optimize$/components/what-is-optimize). -::: - -## Publishing a process - -Publishing a process means that you make it available to other users inside and outside of Camunda 8. Once published, other users can access and start instances of the process. - -You have the following options to publish a process: - -- [Deploy a process](#deploy-a-process) - - [Before deploying a process](#before-deploying-a-process) -- [Run a process](#run-a-process) - - [Test run using Play mode](#test-run-using-play-mode) - - [Run manually from Modeler](#run-manually-from-modeler) - - [Schedule via timer](#schedule-via-timer) - - [Best practices for running a process](#best-practices-for-running-a-process) -- [Publishing a process](#publishing-a-process) - - [Deploy to run programmatically](#deploy-to-run-programmatically) - - [Publish via webhook](#publish-via-webhook) - - [Publish to Tasklist](#publish-to-tasklist) - - [Listen to message or signal events](#listen-to-message-or-signal-events) - - [Best practices for publishing a process](#best-practices-for-publishing-a-process) - -### Deploy to run programmatically - -In order to be able to call a process programmatically from or inside another application or service, you simply have to [deploy](#deploy-a-process) it. Once deployed, you can run a process via our APIs, using an API client, or via one the various community SDKs. Read the [documentation on APIs & clients](../../../apis-tools/working-with-apis-tools.md) to learn more. - -### Publish via webhook - -You can publish a process via webhook, which allows you to integrate it easily with any system or service that can make an HTTP request. When a webhook is triggered in another system, it sends a HTTP request to a specified URL, which starts a process instance with the payload of the request. - -Follow these steps to publish a process via a webhook: - -1. Select the start event. -2. Switch your start event to a [HTTP webhook connector](/components/connectors/protocol/http-webhook.md) using the **wrench tool**. - ![Converting a start event to a webhook start event](img/web-modeler-convert-to-webhook.png) - -3. Define the webhook configuration in the properties panel of the start event. -4. Finally, [deploy the process](#deploy-a-process) to activate the webhook connector. - -When the process is deployed, the webhook URL can be found in the properties panel, and called from any outside system. - -![Webhook URL after a process has been deployed](img/web-modeler-webhook-panel.png) - -You have multiple options to ensure that the webhook connection is safe for use by your target audience only. Please refer to the [full documentation](/components/connectors/protocol/http-webhook.md) for configuration details. - -### Publish to Tasklist - -Publishing a process to Tasklist makes it available to users through the web-based [Tasklist application](../../tasklist/introduction-to-tasklist.md). - - - - -To publish a process to Tasklist, you first need to [deploy](#deploy-a-process) it. Once the process is deployed, it will automatically appear in the Tasklist application, where users can start new instances of the process. - - - - -To publish a process to Tasklist, you first need to [deploy](#deploy-a-process) it. Once the process is deployed, you need to [set permissions in Identity](../../../self-managed/tasklist-deployment/tasklist-authentication.md#resource-based-permissions) in order to make it accessible in the Tasklist application. - - - - -![Processes published to Tasklist](img/tasklist-processes.png) - -To learn more about publishing processes to Tasklist, refer to our [documentation on Tasklist](../../tasklist/userguide/using-tasklist.md#processes). - -### Listen to message or signal events - -Camunda 8 supports message and signal events, which can be used to trigger a process instance when a specific event occurs. Everyone on the platform that knows the message or signal correlation keys can call such a process. To listen to a message or signal event, you need to define a [message](../bpmn/message-events/message-events.md#message-start-events) or [signal start event](../bpmn/signal-events/signal-events.md#signal-start-events) in your process model and configure it to listen for the desired event. Follow these steps to configure a message or signal start event: - -1. Select the start event. -2. Change the start event type to a message or signal start event using the **wrench tool**. -3. Configure the message or signal start event using the **properties panel** to define the message or signal to listen to. Using messages, you can create a 1:1 relationship between calling processes. With signals, you can create broadcast-like message distributions. -4. Click on **Deploy** to [deploy](#deploy-a-process) the process. - -As soon as a matching event is received, a process instance will be started. To learn more about message and signal events, refer to our [documentation on events](../bpmn/events.md). - -### Best practices for publishing a process - -- Use the [problems panel](./fix-problems-in-your-diagram.md) to make sure that the process free of errors before publishing it. -- Ensure the process works by testing it interactively using the [Play mode](./play-your-process.md). -- Use meaningful names and descriptions for the process and its elements. -- Document the process with clear instructions and details on how it should be used. -- Make sure that the process is accessible to the appropriate users only. - -:::note -When working on Camunda 8 Self-Managed, you can define access permissions on a per-process level using [Identity Resource Authorizations](../../../self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md). -::: diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/save-and-deploy.md b/versioned_docs/version-8.2/components/modeler/web-modeler/save-and-deploy.md deleted file mode 100644 index ae6f5497f55..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/save-and-deploy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: save-and-deploy -title: Save and deploy your diagram -description: "If a diagram is changed and autosaved, it has no effect on your cluster." ---- - -Camunda 8 only - -Web Modeler will autosave all your changes on a diagram. The changes will also be visible in real-time to any collaborators opening the same diagram. - -If you change a diagram and it is autosaved, this has no effect on your cluster(s). - -If you deploy the diagram, it becomes available on the selected cluster and new instances can start. - -To deploy, click **Deploy diagram**: - -![save and deploy](img/web-modeler-deploy-modal-healthy.png) diff --git a/versioned_docs/version-8.2/components/modeler/web-modeler/token-simulation.md b/versioned_docs/version-8.2/components/modeler/web-modeler/token-simulation.md deleted file mode 100644 index 5fd00307b6c..00000000000 --- a/versioned_docs/version-8.2/components/modeler/web-modeler/token-simulation.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -id: token-simulation -title: Token simulation -description: You can use the token simulation feature to see how the process will behave when it is executed. ---- - -You can use the token simulation feature to see how the process will behave when it is executed. - -### Turn on/off - -To turn the feature on/off, use the **Token Simulation** toggle (or the keyboard shortcut `T` inside the canvas). The modeling features will not work while you are in token simulation mode. - -![token simulation toggle](img/token-simulation/toggle.gif) - -### Start simulation - -The simulation can be started by triggering an event using the corresponding button: - -![token simulation start](img/token-simulation/start.gif) - -### Token simulation palette - -The palette on the left side provides the following controls: - -- Play/pause simulation -- Reset simulation -- Show simulation log - -![token simulation play](img/token-simulation/play.gif) - -### Animation speed palette - -The speed of the simulation can be changed using the controls in the bottom palette: - -![token simulation speed](img/token-simulation/speed.gif) diff --git a/versioned_docs/version-8.2/components/operate/img/operate-dashboard-no-processes_dark.png b/versioned_docs/version-8.2/components/operate/img/operate-dashboard-no-processes_dark.png deleted file mode 100644 index eff22424f41..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/img/operate-dashboard-no-processes_dark.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/img/operate-dashboard-no-processes_light.png b/versioned_docs/version-8.2/components/operate/img/operate-dashboard-no-processes_light.png deleted file mode 100644 index 15dcacd32f2..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/img/operate-dashboard-no-processes_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/img/operate-introduction.png b/versioned_docs/version-8.2/components/operate/img/operate-introduction.png deleted file mode 100644 index b4fabb99d54..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/img/operate-introduction.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/operate-introduction.md b/versioned_docs/version-8.2/components/operate/operate-introduction.md deleted file mode 100644 index dbf05ff9a2d..00000000000 --- a/versioned_docs/version-8.2/components/operate/operate-introduction.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: operate-introduction -title: Introduction -description: "Operate is a tool for monitoring and troubleshooting process instances running in Zeebe." ---- - -Operate is a tool for monitoring and troubleshooting process instances running in Zeebe. - - - -In addition to providing visibility into active and completed process instances, Operate also makes it possible to carry out key operations such as resolving [incidents](./userguide/resolve-incidents-update-variables.md), and updating process instance variables. - -![operate-introduction](img/operate-introduction.png) - -Learn how to use Operate to monitor process instances and more features in the [Operate user guide](/components/operate/userguide/basic-operate-navigation.md). - -Operate is also available for production use (with support) in the Camunda 8 offering. - -To try out Operate in Camunda 8, sign up [here](https://signup.camunda.com/accounts?utm_source=docs.camunda.io&utm_medium=referral). - -Because Operate can be a helpful tool when getting started with Zeebe and building an initial proof of concept, we make it available under the [Operate trial license](https://camunda.com/legal/terms/cloud-terms-and-conditions/general-terms-and-conditions-for-the-operate-trial-version/). There are no restrictions under this license when it comes to the length of the evaluation period or the available feature set _as long as you use Operate in non-production environments only._ diff --git a/versioned_docs/version-8.2/components/operate/userguide/assets/order-process.bpmn b/versioned_docs/version-8.2/components/operate/userguide/assets/order-process.bpmn deleted file mode 100644 index 843a703ee8f..00000000000 --- a/versioned_docs/version-8.2/components/operate/userguide/assets/order-process.bpmn +++ /dev/null @@ -1,145 +0,0 @@ - - - - - Flow_0biglsj - - - Flow_0yovrqa - - - Flow_1wtuk91 - Flow_1n8m1op - Flow_1fosyfk - - - =orderValue >= 100 - - - Flow_1g6qdv6 - Flow_0vv7a45 - Flow_0yovrqa - - - - - - - - Flow_0biglsj - Flow_09wy0mk - - - - Flow_09wy0mk - Flow_1wtuk91 - - - - - - - - - Flow_1fosyfk - Flow_0vv7a45 - - - - - - Flow_1n8m1op - Flow_1g6qdv6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/versioned_docs/version-8.2/components/operate/userguide/basic-operate-navigation.md b/versioned_docs/version-8.2/components/operate/userguide/basic-operate-navigation.md deleted file mode 100644 index bbb764dd0f0..00000000000 --- a/versioned_docs/version-8.2/components/operate/userguide/basic-operate-navigation.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: basic-operate-navigation -title: Getting familiar with Operate -description: "An overview of navigating Operate and its features" ---- - -This section and the next section, [variables and incidents](./resolve-incidents-update-variables.md), assumes you’ve deployed a process to Zeebe and created at least one process instance. - -If you’re not sure how to deploy processes or create instances, visit our [Guides section](/guides/introduction-to-camunda-8.md). - -In the following sections, we’ll use the same [`order-process.bpmn`](./assets/order-process.bpmn) process model. - -## View a deployed process - -To view a deployed process, take the following steps: - -1. In the **Process Instances by Name** panel on your dashboard, note the list of your deployed processes and running instances. - -![operate-view-process](../img/operate-introduction.png) - -2. When you click on the name of a deployed process in the **Process Instances by Name** panel, you’ll navigate to a view of that process model and all running instances. - -![operate-view-process](./img/operate-view-process.png) - -3. From this **Processes** tab, you can cancel a single running process instance. - -![operate-cancel-process-instance](./img/operate-view-process-cancel.png) - -## Inspect a process instance - -Running process instances appear in the **Instances** section below the process model. To inspect a specific instance, click on the instance id. - -![operate-inspect-instance](./img/operate-process-instance-id.png) - -Here, see details about the process instance, including the instance history and the variables attached to the instance. - -![operate-view-instance-detail](./img/operate-view-instance-detail.png) - -To visualize the performance of process instances, we recommend utilizing [Optimize]($optimize$/components/what-is-optimize). diff --git a/versioned_docs/version-8.2/components/operate/userguide/delete-finished-instances.md b/versioned_docs/version-8.2/components/operate/userguide/delete-finished-instances.md deleted file mode 100644 index 38d94ae919b..00000000000 --- a/versioned_docs/version-8.2/components/operate/userguide/delete-finished-instances.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -id: delete-finished-instances -title: Delete finished instances -description: "Let's delete a canceled or completed process instance." ---- - -A finished process instance, meaning a canceled or a completed process instance, can be deleted from the **Processes** page or instance detail page. - -## Delete process instance from Processes page - -To delete a process instance from the **Processes** page, take the following steps: - -1. On the **Processes** page, apply the **Finished Instances** filter. - -![operate-view-finished-instances](./img/operate-instances-finished-instances.png) - -1. Click the trash can icon on any process instance you want to delete. - -![operate-perform-delete-operation](./img/operate-instances-click-delete-operation.png) - -1. Confirm the delete operation by clicking **Delete**. - -![operate-confirm-delete-operation](./img/operate-instances-delete-operation-confirm.png) - -4. In the **Operations** panel on the right side of the screen, view the deleted process instance. - -![operate-view-delete-operation](./img/operate-operations-panel-delete-operation.png) - -## Delete process instance from instance detail page - -1. On the **Processes** page, apply the **Finished Instances** filter. - -![operate-view-finished-instances-instance-detail](./img/operate-instance-detail-finished-instances.png) - -2. Navigate to the instance detail page by clicking the **Instance id** of the process instance you want to delete. - -![operate-navigate-finished-instance-detail](./img/operate-instance-detail-finished-instances-navigate.png) - -3. Click the delete icon. - -![operate-instance-detail-perform-delete](./img/operate-finished-instance-detail.png) - -1. Confirm the delete operation by clicking **Delete**. - -![operate-instance-detail-confirm-delete-operation](./img/operate-instance-detail-delete-operation-confirm.png) - -:::note -Use caution as the process instance is now deleted and you may not access it again. -::: - -![operate-instance-deleted-notification](./img/operate-instance-deleted-notification.png) diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-token-result.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-token-result.png deleted file mode 100644 index ecca2375314..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-token-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-token.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-token.png deleted file mode 100644 index 6bc33e679a1..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-variable-result.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-variable-result.png deleted file mode 100644 index d6807625ac2..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-variable-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-variable-to-new-scope.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-variable-to-new-scope.png deleted file mode 100644 index 4650e943a51..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/add-variable-to-new-scope.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/applied-modifications.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/applied-modifications.png deleted file mode 100644 index da37ae03d84..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/applied-modifications.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/apply-modifications-button.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/apply-modifications-button.png deleted file mode 100644 index af1f7d10dfd..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/apply-modifications-button.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/cancel-token-result.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/cancel-token-result.png deleted file mode 100644 index bf4afe7cc78..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/cancel-token-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/cancel-token.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/cancel-token.png deleted file mode 100644 index d23371d95bb..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/cancel-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-on-existing-scope.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-on-existing-scope.png deleted file mode 100644 index 84e35dd4aa1..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-on-existing-scope.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-result.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-result.png deleted file mode 100644 index 66cf9c84080..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-value.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-value.png deleted file mode 100644 index c5bcaf46dff..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/edit-variable-value.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/enter-modification-mode.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/enter-modification-mode.png deleted file mode 100644 index b3342d5c441..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/enter-modification-mode.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/modification-mode.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/modification-mode.png deleted file mode 100644 index 807f8af7540..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/modification-mode.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/modification-summary-modal.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/modification-summary-modal.png deleted file mode 100644 index a5be26afa01..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/modification-summary-modal.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token-result.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token-result.png deleted file mode 100644 index 045d5859a70..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token-result.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token-select-target.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token-select-target.png deleted file mode 100644 index 11e173bc1d7..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token-select-target.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token.png deleted file mode 100644 index 21a1183de95..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/move-token.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/not-supported-flow-nodes.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/not-supported-flow-nodes.png deleted file mode 100644 index aca454bbc8c..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/not-supported-flow-nodes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/select-new-scope.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/select-new-scope.png deleted file mode 100644 index c6d4835363a..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/select-new-scope.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/undo-modification.png b/versioned_docs/version-8.2/components/operate/userguide/img/modifications/undo-modification.png deleted file mode 100644 index 1641d7f0eb9..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/modifications/undo-modification.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-create-selection.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-create-selection.png deleted file mode 100644 index e1cb176e2a5..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-create-selection.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-finished-instance-detail.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-finished-instance-detail.png deleted file mode 100644 index d8de5297224..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-finished-instance-detail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-incident-resolved-path.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-incident-resolved-path.png deleted file mode 100644 index abf9bd2b089..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-incident-resolved-path.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-incident-resolved.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-incident-resolved.png deleted file mode 100644 index 63a865b4d00..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-incident-resolved.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-deleted-notification.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-deleted-notification.png deleted file mode 100644 index ab581b69b94..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-deleted-notification.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm.png deleted file mode 100644 index b7c56e932ef..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-delete-operation-confirm.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate.png deleted file mode 100644 index a246391d865..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-finished-instances-navigate.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-finished-instances.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-finished-instances.png deleted file mode 100644 index 7db6982b449..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instance-detail-finished-instances.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-click-delete-operation.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-click-delete-operation.png deleted file mode 100644 index 1b3bef9ac89..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-click-delete-operation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-delete-operation-confirm.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-delete-operation-confirm.png deleted file mode 100644 index 4ee3dded6aa..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-delete-operation-confirm.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-finished-instances.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-finished-instances.png deleted file mode 100644 index 15d45c3faaa..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-instances-finished-instances.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-introduction.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-introduction.png deleted file mode 100644 index b4fabb99d54..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-introduction.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-many-instances-with-incident.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-many-instances-with-incident.png deleted file mode 100644 index a87af172053..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-many-instances-with-incident.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-operations-panel-delete-operation.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-operations-panel-delete-operation.png deleted file mode 100644 index 7c873bee50f..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-operations-panel-delete-operation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-operations-panel.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-operations-panel.png deleted file mode 100644 index 7913c0a2c26..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-operations-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-instance-id.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-instance-id.png deleted file mode 100644 index 3ba68fdca63..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-instance-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-retry-incident.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-retry-incident.png deleted file mode 100644 index f227b0d21e0..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-retry-incident.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-view-incident.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-view-incident.png deleted file mode 100644 index 933d8cf7fb5..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-process-view-incident.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-select-operation.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-select-operation.png deleted file mode 100644 index d36b2b10385..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-select-operation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-detail.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-detail.png deleted file mode 100644 index 400822c0f96..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-detail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-edit-icon.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-edit-icon.png deleted file mode 100644 index eec8be60160..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-edit-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-incident.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-incident.png deleted file mode 100644 index d05133ee169..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-incident.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-save-variable-icon.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-save-variable-icon.png deleted file mode 100644 index 20a212d8ba0..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-instance-save-variable-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process-cancel.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process-cancel.png deleted file mode 100644 index 8c5cdcb983c..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process-cancel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process.png deleted file mode 100644 index e462ca509ef..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process_light.png b/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process_light.png deleted file mode 100644 index 466f3281707..00000000000 Binary files a/versioned_docs/version-8.2/components/operate/userguide/img/operate-view-process_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/operate/userguide/process-instance-modification.md b/versioned_docs/version-8.2/components/operate/userguide/process-instance-modification.md deleted file mode 100644 index b795f39f989..00000000000 --- a/versioned_docs/version-8.2/components/operate/userguide/process-instance-modification.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -id: process-instance-modification -title: Process instance modification -description: "You may need to modify an active process instance to allow execution to continue." ---- - -You may need to modify an active process instance to allow execution to continue. The execution may be stuck, and you may want to continue the execution on a different activity (i.e. skip or repeat activities). - -## Modification mode - -To enter modification mode, click the modify icon on the process instance header. - -![enter-modification-mode](./img/modifications/enter-modification-mode.png) - -The UI will change when you enter modification mode, including a blue banner at the top and two buttons for applying or discarding modifications at the bottom. - -![modification-mode](./img/modifications/modification-mode.png) - -## Cancel all running tokens on a flow node - -To cancel all running tokens on a flow node, take the following steps: - -1. Select the flow node you want to cancel all the running tokens on. - -2. Click **Cancel** from the dropdown. - -![cancel-token](./img/modifications/cancel-token.png) - -View the pending modification reflected in the instance history. - -![cancel-token-result](./img/modifications/add-token-result.png) - -## Add a new token to a flow node - -To add a new token to a flow node, take the following steps: - -1. Select the flow node you want to add the new token to. - -2. Click **Add** from the dropdown. - -![add-token](./img/modifications/add-token.png) - -View the pending modification reflected in the instance history. - -![add-token-result](./img/modifications/add-token-result.png) - -## Move all running tokens from one flow node to another - -The move operation is equivalent to the combination of **Cancel** and **Add** modifications. The modifications described previously can also be achieved with one single move modification. - -1. Select the flow node you want to move the running tokens from. - -2. Click **Move** from the dropdown. - -![move-token](./img/modifications/move-token.png) - -3. Select the flow node you want to move the running tokens to. - -![move-token-select-target](./img/modifications/move-token-select-target.png) - -View the pending modification reflected in the instance history. - -![move-token-result](./img/modifications/move-token-result.png) - -## Add variable to new scopes - -During the modification mode, if there are new scopes generated it will be possible to add variables to these new scopes by following these steps: - -1. Select the new scope from the instance history you want to add a variable to. - -![select-new-scope](./img/modifications/select-new-scope.png) - -2. Click **Add Variable** from the variables panel. - -![add-variable-to-new-scope](./img/modifications/add-variable-to-new-scope.png) - -3. Fill out the **Name** and **Value** fields for the variable you want to add. - -4. Once you blur out of the field (click anywhere on the screen other than the last edited variable field), assuming the fields have the valid values, the new variable will be added to the pending modifications. - -![add-variable-result](./img/modifications/add-variable-result.png) - -## Edit variable on existing scopes - -During modification mode it is possible to edit existing variables in existing scopes by following these steps: - -1. Select the existing scope from the instance history you want to edit variables on. - -![edit-variable-on-existing-scope](./img/modifications/edit-variable-on-existing-scope.png) - -2. Edit the variable value from the variables panel. - -![edit-variable-value](./img/modifications/edit-variable-value.png) - -3. Once you blur out of the field (click anywhere in the screen other than the last edited variable field), assuming the new value is valid, the **Edit Variable** modification will be added to the pending modifications. - -![edit-variable-result](./img/modifications/edit-variable-result.png) - -## View summary of pending modifications - -To display the pending modifications, click **Apply Modifications** in the footer. - -![apply-modifications-button](./img/modifications/apply-modifications-button.png) - -A modal will be displayed where all modifications can be seen. - -![modification-summary-modal](./img/modifications/modification-summary-modal.png) - -Within this modal, you can take the following actions: - -- (1) Delete any modification by clicking the **Delete Icon**. -- (2) View an added variable in a JSON Viewer. -- (3) View an edited variable in a Diff Viewer. -- (4) Cancel/close the modal and continue with modification mode. -- (5) Apply the modifications and exit modification mode. - -## Undo modification - -Clicking **Undo** from the modification footer will undo the latest modification. - -![undo-modification](./img/modifications/undo-modification.png) - -## Apply modifications - -If you click the **Apply** button from the summary modal as described [here](#view-summary-of-pending-modifications), and modification operation is created successfully, you will see a success notification and changes will be reflected in a short time. - -![applied-modifications](./img/modifications/applied-modifications.png) - -## Non-supported modifications - -Some elements do not support specific modifications: - -- Elements within multi-instance flow nodes are not supported for any kind of modification. -- **Add token**/**Move tokens to** modifications are not possible for the following type of elements: - - Start events - - Boundary events - - Events attached to event-based gateways -- **Move tokens from** modification is not possible for a subprocess itself. -- **Add token**/**Move tokens to** modifications are currently not possible for elements with multiple running scopes. - -![not-supported-flow-nodes](./img/modifications/not-supported-flow-nodes.png) diff --git a/versioned_docs/version-8.2/components/operate/userguide/resolve-incidents-update-variables.md b/versioned_docs/version-8.2/components/operate/userguide/resolve-incidents-update-variables.md deleted file mode 100644 index 7d6d7b8c7a9..00000000000 --- a/versioned_docs/version-8.2/components/operate/userguide/resolve-incidents-update-variables.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -id: resolve-incidents-update-variables -title: Variables and incidents -description: "Let's examine variable and incidents." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Every process instance created for the [`order-process.bpmn`](./assets/order-process.bpmn) process model requires an `orderValue` so the XOR gateway evaluation will happen properly. - -Let’s look at a case where `orderValue` is present and was set as a string, but our `order-process.bpmn` model required an integer to properly evaluate the `orderValue` and route the instance. - - - - - -``` -./bin/zbctl --insecure create instance order-process --variables '{"orderId": "1234", "orderValue":"99"}' -``` - - - - - -``` -./bin/zbctl.darwin --insecure create instance order-process --variables '{"orderId": "1234", "orderValue":"99"}' -``` - - - - - -``` -./bin/zbctl.exe --insecure create instance order-process --variables '{\"orderId\": \"1234\", \ -"orderValue\": \"99\"}' -``` - - - - -## Advance an instance to an XOR gateway - -To advance the instance to our XOR gateway, we’ll create a job worker to complete the `Initiate Payment` task: - - - - - -``` -./bin/zbctl --insecure create worker initiate-payment --handler cat -``` - - - - - -``` -./bin/zbctl.darwin --insecure create worker initiate-payment --handler cat -``` - - - - - -``` -./bin/zbctl.exe --insecure create worker initiate-payment --handler "findstr .*" -``` - - - - -We’ll publish a message that will be correlated with the instance, so we can advance past the `Payment Received` intermediate message catch event: - - - - - -``` -./bin/zbctl --insecure publish message "payment-received" --correlationKey="1234" -``` - - - - - -``` -./bin/zbctl.darwin --insecure publish message "payment-received" --correlationKey="1234" -``` - - - - - -``` -./bin/zbctl.exe --insecure publish message "payment-received" --correlationKey="1234" -``` - - - - -In the Operate interface, you should now observe the process instance has an [incident](/components/concepts/incidents.md), which means there’s a problem with process execution that must be fixed before the process instance can progress to the next step. - -![operate-incident-process-view](./img/operate-process-view-incident.png) - -## Diagnosing and resolving incidents - -Operate provides tools for diagnosing and resolving incidents. Let’s go through incident diagnosis and resolution step by step. - -When we inspect the process instance, we can see exactly what our incident is: `Expected to evaluate condition 'orderValue>=100' successfully, but failed because: Cannot compare values of different types: STRING and INTEGER` - -![operate-incident-instance-view](./img/operate-view-instance-incident.png) - -To resolve this incident, we must edit the `orderValue` variable so it’s an integer. To do so, take the following steps: - -1. Click on the edit icon next to the variable you’d like to edit. - -![operate-incident-edit-variable](./img/operate-view-instance-edit-icon.png) - -2. Edit the variable by removing the quotation marks from the `orderValue` value. -3. Click the checkmark icon to save the change. - -![operate-incident-save-variable](./img/operate-view-instance-save-variable-icon.png) - -We were able to solve this particular problem by _editing_ a variable, but it’s worth noting you can also _add_ a variable if a variable is missing from a process instance altogether. - -There’s one last step: initiating a “retry” of the process instance. There are two places on the process instance page where you can initiate a retry: - -![operate-retry-instance](./img/operate-process-retry-incident.png) - -You should now see the incident has been resolved, and the process instance has progressed to the next step. - -![operate-incident-resolved-instance-view](./img/operate-incident-resolved.png) - -## Complete a process instance - -If you’d like to complete the process instance, create a worker for the `Ship Without Insurance` task: - - - - - -``` -./bin/zbctl --insecure create worker ship-without-insurance --handler cat -``` - - - - - -``` -./bin/zbctl.darwin --insecure create worker ship-without-insurance --handler cat -``` - - - - - -``` -./bin/zbctl.exe --insecure create worker ship-without-insurance --handler "findstr .*" -``` - - - - -The completed process instance with the path taken: - -![operate-incident-resolved-path-view](./img/operate-incident-resolved-path.png) diff --git a/versioned_docs/version-8.2/components/operate/userguide/selections-operations.md b/versioned_docs/version-8.2/components/operate/userguide/selections-operations.md deleted file mode 100644 index 55b1a884a69..00000000000 --- a/versioned_docs/version-8.2/components/operate/userguide/selections-operations.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: selections-operations -title: Selections and operations -description: "In some cases, you’ll need to retry or cancel many process instances at once." ---- - -In some cases, you’ll need to retry or cancel many process instances at once. Operate also supports this type of operation. - -Imagine a case where many process instances have an incident caused by the same issue. At some point, the underlying problem will have been resolved (for example, maybe a microservice was down for an extended period of time, then was brought back up.) - -Though the underlying problem was resolved, the affected process instances are stuck until they are “retried." - -![operate-batch-retry](./img/operate-many-instances-with-incident.png) - -Let's create a **selection** in Operate. A selection is a set of process instances on which you can carry out a batch retry or batch cancellation. - -To create a selection, take the following steps: - -1. Check the box next to the process instances you'd like to include. -2. Click the blue **Apply Operation on N Instances** button. - -![operate-batch-retry](img/operate-create-selection.png) - -3. Select the operation you want to apply. - -![operate-batch-retry](./img/operate-select-operation.png) - -After confirmation, you can see the **Operations** panel with the current status of all operations. - -![operate-batch-retry](./img/operate-operations-panel.png) diff --git a/versioned_docs/version-8.2/components/tasklist/img/tasklist-introduction_light.png b/versioned_docs/version-8.2/components/tasklist/img/tasklist-introduction_light.png deleted file mode 100644 index 8fc6d384669..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/img/tasklist-introduction_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/img/tasklist-start-screen_light.png b/versioned_docs/version-8.2/components/tasklist/img/tasklist-start-screen_light.png deleted file mode 100644 index 5accbb26e4a..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/img/tasklist-start-screen_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/introduction-to-tasklist.md b/versioned_docs/version-8.2/components/tasklist/introduction-to-tasklist.md deleted file mode 100644 index 50f29d8c235..00000000000 --- a/versioned_docs/version-8.2/components/tasklist/introduction-to-tasklist.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: introduction-to-tasklist -title: Introduction -description: "Tasklist is a tool to work with user tasks in Zeebe." ---- - -Tasklist is a ready-to-use application to rapidly implement business processes alongside [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) in Zeebe. - -With Tasklist, orchestrate human workflows critical to your business and reduce time-to-value for your process orchestration projects with an interface for manual work. - -As you model a business process using BPMN and deploy it to the workflow engine, users are notified in Tasklist when they're assigned a task. - -Tasklist provides two APIs: a [GraphQL API](/docs/apis-tools/tasklist-api/tasklist-api-overview.md) -and a [REST API](/docs/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). Both APIs provide equal capabilities, -allowing you to build your own applications or use the general [UI](/docs/components/tasklist/userguide/using-tasklist.md) that we have prepared for you. - -:::note -The GraphQL and REST APIs are currently available, but the GraphQL API will be deprecated in future releases, although it will still receive updates for a limited period. If you are building new applications, -we recommend using the REST API to ensure long-term compatibility. -::: - -Tasklist is also available for production use (with support) in the Camunda 8 offering. To try out Tasklist in Camunda 8, sign up [here](https://signup.camunda.com/accounts?utm_source=docs.camunda.io&utm_medium=referral). diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/order-icon.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/order-icon.png deleted file mode 100644 index d19e0686e7e..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/order-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claim_light.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claim_light.png deleted file mode 100644 index 8c0455a1757..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claim_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me-empty_light.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me-empty_light.png deleted file mode 100644 index d264a008918..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me-empty_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me-list_light.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me-list_light.png deleted file mode 100644 index af80914c812..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me-list_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me_light.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me_light.png deleted file mode 100644 index 73d6b32ff90..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-claimed-by-me_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-complete-task_light.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-complete-task_light.png deleted file mode 100644 index 72145603490..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-complete-task_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes-search.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes-search.png deleted file mode 100644 index 850969e177f..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes-search.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes-start.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes-start.png deleted file mode 100644 index d69dee9fd89..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes-start.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes.png deleted file mode 100644 index a6d3a3f335f..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-processes.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-task-completed_light.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-task-completed_light.png deleted file mode 100644 index 02011be964b..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-task-completed_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-task-ordering.png b/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-task-ordering.png deleted file mode 100644 index 2d58a009477..00000000000 Binary files a/versioned_docs/version-8.2/components/tasklist/userguide/img/tasklist-task-ordering.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/tasklist/userguide/using-tasklist.md b/versioned_docs/version-8.2/components/tasklist/userguide/using-tasklist.md deleted file mode 100644 index 27db6563265..00000000000 --- a/versioned_docs/version-8.2/components/tasklist/userguide/using-tasklist.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -id: using-tasklist -title: Overview and example use case -description: "What you can do with Tasklist and an example use case." ---- - -## What can I do with Tasklist? - -Tasklist shows you all user tasks that appeared in processes; those processes are running in Zeebe. - -User tasks need an interaction from the user. This can be updating, adding variables, filling out a [Camunda Form](../../../guides/utilizing-forms.md), or simply completion of the task. The user must first claim a task or unclaim an already claimed task. - -If the user claimed a task, the task can be completed. Different task status filters help the user choose the desired task. - -:::note -When a user is granted Tasklist access, the user has full access to the respective process instance data. -::: - -## Example use case - -If you've successfully logged in, you'll see a screen similar to the following: - -![tasklist-start-screen](../img/tasklist-start-screen_light.png) - -On the left side of the screen, you can see tasks. On the right side of the screen, you can see details of the current selected task. - -Change the list of tasks by applying filters. You can also collapse and expand the task list. - -Click on the selection field in the left panel to choose which tasks you want to see: - -- All open -- Assigned to me -- Unassigned -- Completed - -Click on the icon ![order-icon](img/order-icon.png) to order the tasks. You can order them by the date of creation, the due date, or the follow up date. - -The follow up date defines the latest time you should start working on a task, helping you to prioritize work. -The due date provides a deadline when the task should be finished: - -![tasklist-task-ordering](img/tasklist-task-ordering.png) - -### Assign tasks - -When no tasks are assigned to you, the list appears empty - -![tasklist-claimed-by-me-empty](img/tasklist-claimed-by-me-empty_light.png) - -Select the **Unassigned** list and assign a task to yourself using the **Assign to me** button on the top panel: - -![tasklist-claim](img/tasklist-claim_light.png) - -### Work on assigned tasks - -Select the **Assigned to me** list to see the tasks that are assigned to you. Select a task to work on it. - -![tasklist-claimed-by-me-list](img/tasklist-claimed-by-me-list_light.png) - -### Complete a task - -When a task is assigned to you, you can complete the task by filling out the given form, and clicking on the Complete Task button. There are also cases where no form is available. In these cases, you have to add and/or update variables directly. - -![tasklist-claimed-by-me](img/tasklist-claimed-by-me_light.png) - -Always choose a list of tasks with a specified status. Then, select the task you want to work on. - -Complete the task and check if it is shown in the **Completed** list. - -Change variables as needed and begin completion with the **Complete Task** button. - -#### Add and update variables - -Update variables in the **Variables** section by adjusting their text field. - -To add a new variable, click **Add Variable**. - -![tasklist-complete-task](img/tasklist-complete-task_light.png) - -### Completed tasks - -You will now see the completed task by selecting the **Completed** task list: - -![tasklist-task-completed](img/tasklist-task-completed_light.png) - -### Processes - -It is possible to start processes by demand using Tasklist. To do this, click **Processes** in the top menu. All the processes you have access to start will be listed in the **Processes** page. - -![tasklist-processes](img/tasklist-processes.png) - -On the **Search** checkbox, it's possible to filter the processes. Start typing the process name and the list will be updated. - -![tasklist-processes-search](img/tasklist-processes-search.png) - -To start a process, click **Start Process** on the process you want to start. - -![tasklist-processes-search](img/tasklist-processes-start.png) - -Tasklist will then wait for the process to be executed. If the process generates a task, you will be redirected to the generated task. - -### I'm not seeing a process - -There could be multiple reasons why you are not seeing any process in the **Processes** tab: - -- There is no process deployed to your environment. -- For Self-Managed environments, permissions to start a process are managed in [Identity](docs/self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md). It is likely your user doesn't yet have privileges to start processes on Tasklist. - -For all the above scenarios, contact your administrator to understand why no processes are displayed. diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/architecture.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/architecture.md deleted file mode 100644 index 8e0151ba683..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/architecture.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -id: architecture -title: "Architecture" -description: "There are four main components in Zeebe's architecture: clients, gateways, brokers, and exporters." ---- - -There are four main components in Zeebe's architecture: clients, gateways, brokers, and exporters. - -![zeebe-architecture](assets/zeebe-architecture.png) - -In Camunda 8, you work exclusively with clients. Gateways, brokers, and exporters are pre-configured to provide the service, but are not accessible. - -In local or private cloud deployments, all components are relevant. - -## Clients - -Clients send commands to Zeebe to: - -- Deploy processes -- Carry out business logic - - Start process instances - - Publish messages - - Activate jobs - - Complete jobs - - Fail jobs -- Handle operational issues - - Update process instance variables - - Resolve incidents - -Client applications can be scaled up and down separately from Zeebe. The Zeebe brokers do not execute any business logic. - -Clients are libraries you embed in an application (e.g. a microservice that executes your business logic) to connect to a Zeebe cluster. - -Clients connect to the Zeebe gateway via [gRPC](https://grpc.io), which uses HTTP/2-based transport. To learn more about gRPC in Zeebe, review the [Zeebe API (gRPC)](/apis-tools/grpc.md). - -The Zeebe project includes officially-supported Java and Go clients. [Community clients](/apis-tools/community-clients/index.md) have been created in other languages, including C#, Ruby, and JavaScript. The gRPC protocol makes it possible to [generate clients](/apis-tools/build-your-own-client.md) in a range of different programming languages. - -### Job workers - -A job worker is a Zeebe client that uses the client API to first activate jobs, and upon completion, either complete or fail the job. - -## Gateways - -A gateway serves as a single entry point to a Zeebe cluster and forwards requests to brokers. - -The gateway is stateless and sessionless, and gateways can be added as necessary for load balancing and high availability. - -## Brokers - -The Zeebe broker is the distributed workflow engine that tracks the state of active process instances. - -Brokers can be partitioned for horizontal scalability and replicated for fault tolerance. A Zeebe deployment often consists of more than one broker. - -It's important to note that no application business logic lives in the broker. Its only responsibilities are: - -- Processing commands sent by clients -- Storing and managing the state of active process instances -- Assigning jobs to job workers - -Brokers form a peer-to-peer network in which there is no single point of failure. This is possible because all brokers perform the same kind of tasks and the responsibilities of an unavailable broker are transparently reassigned in the network. - -## Exporters - -The exporter system provides an event stream of state changes within Zeebe. This data has many potential uses, including but not limited to: - -- Monitoring the current state of running process instances -- Analysis of historic process data for auditing, business intelligence, etc. -- Tracking [incidents](/components/concepts/incidents.md) created by Zeebe - -The exporter includes an API you can use to stream data into a storage system of your choice. Zeebe includes an out-of-the-box [Elasticsearch exporter](https://github.com/camunda/camunda/tree/stable/8.2/exporters/elasticsearch-exporter), and other [community-contributed exporters](https://awesome.zeebe.io) are also available. diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/activity-lifecycle.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/activity-lifecycle.png deleted file mode 100644 index b603cba33f7..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/activity-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/client-server.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/client-server.png deleted file mode 100644 index 67a693a8445..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/client-server.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/cluster.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/cluster.png deleted file mode 100644 index e7869e7a2ad..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/cluster.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/commit.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/commit.png deleted file mode 100644 index 1b57c518c8f..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/commit.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/data-distribution.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/data-distribution.png deleted file mode 100644 index 6632588188b..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/data-distribution.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/event-lifecycle.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/event-lifecycle.png deleted file mode 100644 index 329e1004845..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/event-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/internal-processing-job.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/internal-processing-job.png deleted file mode 100644 index baa0491081a..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/internal-processing-job.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/order-process.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/order-process.png deleted file mode 100644 index ea97e941d39..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/order-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/partition.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/partition.png deleted file mode 100644 index c60deb34726..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/partition.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/pass-through-lifecycle.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/pass-through-lifecycle.png deleted file mode 100644 index 5ade0b31af9..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/pass-through-lifecycle.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-conditions.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-conditions.png deleted file mode 100644 index 6b3483e9519..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-conditions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-data-flow.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-data-flow.png deleted file mode 100644 index 29b0470dd9a..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-data-flow.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-events.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-events.png deleted file mode 100644 index 499e5562651..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-events.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-parallel-gw.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-parallel-gw.png deleted file mode 100644 index b9208f0ec82..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-parallel-gw.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-parallel-mi.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-parallel-mi.png deleted file mode 100644 index 2ff63f00b68..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-parallel-mi.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-sequence.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-sequence.png deleted file mode 100644 index 55cebecee05..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process-sequence.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process.png deleted file mode 100644 index 8576c92b106..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/processes-data-based-conditions.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/processes-data-based-conditions.png deleted file mode 100644 index 63126a12e57..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/processes-data-based-conditions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/processes-parallel-gateway.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/processes-parallel-gateway.png deleted file mode 100644 index e32ce06f1b9..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/processes-parallel-gateway.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/task-workers-subscriptions.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/task-workers-subscriptions.png deleted file mode 100644 index e55e2650c65..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/task-workers-subscriptions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/zeebe-architecture.png b/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/zeebe-architecture.png deleted file mode 100644 index dd78e4d57ef..00000000000 Binary files a/versioned_docs/version-8.2/components/zeebe/technical-concepts/assets/zeebe-architecture.png and /dev/null differ diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/clustering.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/clustering.md deleted file mode 100644 index 2413e5047f9..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/clustering.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: clustering -title: "Clustering" -description: "Zeebe can operate as a cluster of brokers, forming a peer-to-peer network." ---- - -Zeebe can operate as a cluster of brokers, forming a peer-to-peer network. - -In this network, all brokers have the same responsibilities and there is no single point of failure. - -![cluster](assets/cluster.png) - -## Gossip membership protocol - -Zeebe implements the [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) to know which brokers are currently part of the cluster. - -The cluster is bootstrapped using a set of well-known bootstrap brokers, to which the others can connect. To achieve this, each broker must have at least one bootstrap broker as its initial contact point in their configuration: - -```yaml ---- -cluster: - initialContactPoints: [node1.mycluster.loc:26502] -``` - -When a broker is connected to the cluster for the first time, it fetches the topology from the initial contact points and starts gossiping with the other brokers. Brokers keep cluster topology locally across restarts. - -## Raft consensus and replication protocol - -To ensure fault tolerance, Zeebe replicates data across servers using the [raft protocol](). - -Data is divided into partitions (shards). Each partition has a number of replicas. Among the replica set, a **leader** is determined by the raft protocol, which takes in requests and performs all of the processing. All other brokers are passive **followers**. When the leader becomes unavailable, the followers transparently select a new leader. - -Each broker in the cluster may be both leader and follower at the same time for different partitions. In an ideal world, this leads to client traffic distributed evenly across all brokers. - -![cluster](assets/data-distribution.png) - -:::note -There is no active load balancing across partitions. Each leader election for any partition is autonomous and independent of leader elections for other partitions. - -This may lead to one node becoming the leader for all partitions. This is not a problem for fault tolerance as the guarantees of replication remain. However, this may negatively impact throughput as all traffic hits one node. - -To reach a well-distributed leadership again, the [Rebalancing API](../../../self-managed/zeebe-deployment/operations/rebalancing.md) can be used. Be aware that this is on a best-effort basis. -::: - -## Commit - -Before a new record on a partition can be processed, it must be replicated to a quorum (typically majority) of brokers. This procedure is called **commit**. Committing ensures a record is durable, even in case of complete data loss on an individual broker. The exact semantics of committing are defined by the raft protocol. - -![cluster](assets/commit.png) diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/internal-processing.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/internal-processing.md deleted file mode 100644 index 22f2f0a862d..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/internal-processing.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: internal-processing -title: "Internal processing" -description: "This document analyzes the state machines, events and commands, stateful stream processing, driving the engine, and handling backpressure within Zeebe." -keywords: [back-pressure, backpressure, back pressure] ---- - -Internally, Zeebe is implemented as a collection of **stream processors** working on record streams \(partitions\). The stream processing model is used since it is a unified approach to provide: - -- Command protocol \(request-response\), -- Record export \(streaming\), -- Process evaluation \(asynchronous background tasks\) - -Record export solves the history problem and the stream provides the kind of exhaustive audit log a workflow engine needs to produce. - -## State machines - -Zeebe manages stateful entities: jobs, processes, etc. Internally, these entities are implemented as **state machines** managed by a stream processor. - -An instance of a state machine is always in one of several logical states. From each state, a set of transitions defines the next possible states. Transitioning into a new state may produce outputs/side effects. - -Let's look at the state machine for jobs: - -![partition](assets/internal-processing-job.png) - -Every oval is a state. Every arrow is a state transition. Note how each state transition is only applicable in a specific state. For example, it is not possible to complete a job when it is in state `CREATED`. - -## Events and commands - -Every state change in a state machine is called an **event**. Zeebe publishes every event as a record on the stream. - -State changes can be requested by submitting a **command**. A Zeebe broker receives commands from two sources: - -- Clients send commands remotely. For example, Deploying processes, starting process instances, creating and completing jobs, etc. -- The broker itself generates commands. For example, locking a job for exclusive processing by a worker. - -Once received, a command is published as a record on the addressed stream. - -## Stateful stream processing - -A stream processor reads the record stream sequentially and interprets the commands with respect to the addressed entity's lifecycle. More specifically, a stream processor repeatedly performs the following steps: - -1. Consume the next command from the stream. -1. Determine whether the command is applicable based on the state lifecycle and the entity's current state. -1. If the command is applicable, apply it to the state machine. If the command was sent by a client, send a reply/response. -1. If the command is not applicable, reject it. If it was sent by a client, send an error reply/response. -1. Publish an event reporting the entity's new state. - -For example, processing the **Create Job** command produces the event **Job Created**. - -## Driving the engine - -As a workflow engine, Zeebe must continuously drive the execution of its processes. Zeebe achieves this by also writing follow-up commands to the stream as part of the processing of other commands. - -For example, when the **Complete Job** command is processed, it does not just complete the job; it also writes the **Complete Activity** command for the corresponding service task. -This command can in turn be processed, completing the service task and driving the execution of the process instance to the next step. - -## Handling backpressure - -When a broker receives a client request, it is written to the **event stream** first, and processed later by the stream processor. If the processing is slow or if there are many client requests in the stream, it might take too long for the processor to start processing the command. If the broker keeps accepting new requests from the client, the backlog increases and the processing latency can grow beyond an acceptable time. - -To avoid such problems, Zeebe employs a [backpressure](/self-managed/zeebe-deployment/operations/backpressure.md) mechanism. -When the broker receives more requests than it can process with an acceptable latency, it rejects some requests. - -Backpressure is indicated to the client by throwing a **resource exhausted** exception. If a client sees this exception, it can retry the requests with an appropriate retry strategy. If the rejection rate is high, it indicates the broker is constantly under high load and you need to reduce the rate of requests. Alternatively, you can also increase broker resources to adjust to your needs. In high-load scenarios, it is recommended to [benchmark](https://camunda.com/blog/2022/05/how-to-benchmark-your-camunda-platform-8-cluster/) your Zeebe broker up front to size it correctly. - -The maximum rate of requests that can be processed by a broker depends on the processing capacity of the machine, the network latency, current load of the system, etc. There is no fixed limit configured in Zeebe for the maximum rate of requests it accepts. Instead, Zeebe uses an adaptive algorithm to dynamically determine the limit of the number of in-flight requests (the requests that are accepted by the broker, but not yet processed). - -The in-flight request count is incremented when a request is accepted, and decremented when a response is sent back to the client. The broker rejects requests when the in-flight request count reaches the limit. diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/partitions.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/partitions.md deleted file mode 100644 index a892bc54da0..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/partitions.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -id: partitions -title: "Partitions" -description: "In Zeebe, all data is organized into partitions. A partition is a persistent stream of process-related events." ---- - -In Zeebe, all data is organized into **partitions**. A **partition** is a persistent stream of process-related events. - -In a cluster of brokers, partitions are distributed among the nodes so it can be thought of as a **shard**. When you bootstrap a Zeebe cluster, you can configure how many partitions you need. - -:::note -If you've worked with the [Apache Kafka System](https://kafka.apache.org/) before, the concepts presented on this page will sound very familiar to you. -::: - -## Usage examples - -Whenever you deploy a process, you deploy it to the first partition. The process is then distributed to all partitions. On all partitions, this process receives the same key and version such that it can be consistently identified. - -:::note -To learn more about aligning partitions with the same version of deployment and why it is built this way, visit our [Zeebe Chaos blog](https://zeebe-io.github.io/zeebe-chaos/2021/01/26/deployments/#deployments). -::: - -When you start an instance of a process, the client library then routes the request to one partition in which the process instance is published. All subsequent processing of the process instance happens in that partition. - -## Distribution over partitions - -When a process instance is created in a partition, its state is stored and managed by the same partition until its execution is terminated. The partition in which it is created is determined by various factors. - -- When a client sends a command `CreateProcessInstance` or `CreateProcessInstanceWithResult`, gateway chooses a partition in a round-robin manner and forwards the requests to that partition. The process instance is created in that partition. -- When a client publishes a message to trigger a **message start event**, the message is forwarded to a partition based on the correlation key of the message. The process instance is created on the same partition where the message is published. -- Process instances created by **timer start events** are always created on partition 1. - -## Scalability - -Use partitions to scale your process processing. Partitions are dynamically distributed in a Zeebe cluster and for each partition there is one leading broker at a time. This **leader** accepts requests and performs event processing for the partition. Let's assume you want to distribute process processing load over five machines. You can achieve that by bootstraping five partitions. - -:::note -While each partition has one leading broker, _not all brokers are guaranteed to lead a partition_. A broker can lead more than one partition, and, at times, a broker in a cluster may act only as a replication back-up for partitions. This broker will not be doing any active work on processes until a partition fail-over happens and the broker gets elected as the new leader for that partition. -::: - -## Partition data layout - -A partition is a persistent append-only event stream. Initially, a partition is empty. As the first entry is inserted, it takes the place of the first entry. As the second entry comes in and is inserted, it takes the place as the second entry, and so on and so forth. Each entry has a position in the partition which uniquely identifies it. - -![partition](assets/partition.png) - -## Replication - -For fault tolerance, data in a partition is replicated from the **leader** of the partition to its **followers**. Followers are other Zeebe broker nodes that maintain a copy of the partition without performing event processing. - -We recommend an **odd replication factor**, as it ensures high fault-tolerance and availability. **Even replication factors** have no benefit over the previous odd value and are weaker than the next. - -For example, a replication factor of four has no benefit over a replication factor of three. A replication factor for four would be weaker than a replication factor of five. - -## Partition distribution - -If no other configuration is specified, partitions are distributed in a guaranteed round-robin fashion across all brokers in the cluster, considering the number of nodes, number of partitions, and the replication factor. For example, the first partition will always be hosted by the first node, plus the following nodes based on the replication factor. The second partition will be hosted on the second node and the following to fulfill the replication factor. - -As an example, the following partition schemes are guaranteed: - -### Example 1 - -#### Context - -- Number of nodes: 4 -- Number of partitions: 7 -- Replication factor: 3 - -#### Partition layout - -| | Node 1 | Node 2 | Node 3 | Node 4 | -| ----------: | :----: | :----: | :----: | :----: | -| Partition 1 | X | X | X | | -| Partition 2 | | X | X | X | -| Partition 3 | X | | X | X | -| Partition 4 | X | X | | X | -| Partition 5 | X | X | X | | -| Partition 6 | | X | X | X | -| Partition 7 | X | | X | X | - -### Example 2 - -#### Context - -- Number of nodes: 5 -- Number of partitions: 3 -- Replication factor: 3 - -#### Partition layout - -| | Node 1 | Node 2 | Node 3 | Node 4 | Node 5 | -| ----------: | :----: | :----: | :----: | :----: | :----: | -| Partition 1 | X | X | X | | | -| Partition 2 | | X | X | X | | -| Partition 3 | | | X | X | X | - -## Recommendations - -Choosing the number of partitions depends on the use case, workload, and cluster setup. Here are some rules of thumb: - -- For testing and early development, start with a single partition. Note that Zeebe's process processing is highly optimized for efficiency, so a single partition can already handle high event loads. -- With a single Zeebe broker, a single partition is usually enough. However, if the node has many cores and the broker is configured to use them, more partitions can increase the total throughput (around two threads per partition). -- Base your decisions on data. Simulate the expected workload, measure, and compare the performance of different partition setups. diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/process-lifecycles.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/process-lifecycles.md deleted file mode 100644 index ad556551c3a..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/process-lifecycles.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -id: process-lifecycles -title: "Process lifecycles" -description: "In Zeebe, the process execution is represented internally by events of type `ProcessInstance`." ---- - -In Zeebe, the process execution is represented internally by events of type `ProcessInstance`. The events are written to the log stream and can be observed by an exporter. - -Each event is one step in a process instance lifecycle. All events of one process instance have the same `processInstanceKey`. - -Events which belong to the same element instance (e.g. a task) have the same `key`. The element instances have different lifecycles depending on the type of element. - -## (Sub-)Process/Activity/Gateway lifecycle - -![activity lifecycle](assets/activity-lifecycle.png) - -## Event lifecycle - -![event lifecycle](assets/event-lifecycle.png) - -## Sequence flow lifecycle - -![sequence flow lifecycle](assets/pass-through-lifecycle.png) - -## Example - -![order process](assets/process.png) - -Given the above process, a successful execution yields the following records in the commit log: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    IntentElement idElement type
    ELEMENT_ACTIVATINGorder-processprocess
    ELEMENT_ACTIVATEDorder-processprocess
    ELEMENT_ACTIVATINGorder-placedstart event
    ELEMENT_ACTIVATEDorder-placedstart event
    ELEMENT_COMPLETINGorder-placedstart event
    ELEMENT_COMPLETEDorder-placedstart event
    SEQUENCE_FLOW_TAKENto-collect-moneysequence flow
    ELEMENT_ACTIVATINGcollect-moneytask
    ELEMENT_ACTIVATEDcollect-moneytask
    ELEMENT_COMPLETINGcollect-moneytask
    ELEMENT_COMPLETEDcollect-moneytask
    SEQUENCE_FLOW_TAKENto-fetch-itemssequence flow
    .........
    SEQUENCE_FLOW_TAKENto-order-deliveredsequence flow
    ELEMENT_ACTIVATINGorder-deliveredend event
    ELEMENT_ACTIVATEDorder-deliveredend event
    ELEMENT_COMPLETINGorder-deliveredend event
    ELEMENT_COMPLETEDorder-deliveredend event
    ELEMENT_COMPLETINGorder-processprocess
    ELEMENT_COMPLETEDorder-processprocess
    diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/protocols.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/protocols.md deleted file mode 100644 index 87b3f286554..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/protocols.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: protocols -title: "Protocols" -description: "Let's discuss gRPC and supported clients." ---- - -Zeebe clients connect to brokers via a stateless gateway. - -For the communication between client and gateway, [gRPC](https://grpc.io/) is used. The communication protocol is defined using Protocol Buffers v3 ([proto3](https://developers.google.com/protocol-buffers/docs/proto3)), and you can find it in the -[Zeebe repository](https://github.com/camunda/camunda/tree/stable/8.2/gateway-protocol). - -## What is gRPC? - -gRPC was first developed by Google and is now an open source project and part of the Cloud Native Computing Foundation. - -If you’re new to gRPC, see [What is gRPC](https://grpc.io/docs/guides/index.html) on the project website for an introduction. - -## Why gRPC? - -gRPC has many beneficial features that make it a good fit for Zeebe, including the following: - -- Supports bi-directional streaming for opening a persistent connection and sending or receiving a stream of messages between client and server -- Uses the common HTTP/2 protocol by default -- Uses Protocol Buffers as an interface definition and data serialization mechanism–specifically, Zeebe uses proto3, which supports easy client generation in ten different programming languages. - -## Supported clients - -Currently, Zeebe officially supports two gRPC clients: one in [Java](/apis-tools/java-client/index.md), and one in [Golang](/apis-tools/go-client/go-get-started.md). - -[Community clients](/apis-tools/community-clients/index.md) have been created in other languages, including C#, Ruby, and JavaScript. - -If there is no client in your target language yet, you can [build your own client](/apis-tools/build-your-own-client.md) in a range of different programming languages. - -## Intercepting calls - -Zeebe supports [loading arbitrary gRPC server interceptors](self-managed/zeebe-deployment/zeebe-gateway/interceptors.md) to intercept incoming -calls. diff --git a/versioned_docs/version-8.2/components/zeebe/technical-concepts/technical-concepts-overview.md b/versioned_docs/version-8.2/components/zeebe/technical-concepts/technical-concepts-overview.md deleted file mode 100644 index 2d43766982a..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/technical-concepts/technical-concepts-overview.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: technical-concepts-overview -sidebar_label: "Overview" -title: "Technical concepts" -description: "This section gives an overview of Zeebe's underlying technical concepts." ---- - -This section gives an overview of Zeebe's underlying technical concepts. - -- [Architecture](architecture.md) - Introduces you to the internal components of Zeebe, as well as interfaces for external systems to interact with Zeebe. -- [Clustering](clustering.md) - Discusses the internal structure and properties of a Zeebe cluster. -- [Partitions](partitions.md) - Sheds light on how Zeebe achieves horizontal scalability. -- [Internal processing](internal-processing.md) - Explains the basics of Zeebe's event processing. -- [Process lifecycles](process-lifecycles.md) - Expands on the event processing concept and goes into more detail regarding the lifecycles of selected process elements. -- [Protocols](protocols.md) - Explains how external clients communicate with Zeebe. - -In addition to these sections, you may also be interested in our [Best Practices](/components/best-practices/best-practices-overview.md). diff --git a/versioned_docs/version-8.2/components/zeebe/zeebe-overview.md b/versioned_docs/version-8.2/components/zeebe/zeebe-overview.md deleted file mode 100644 index 85bbfa8deb7..00000000000 --- a/versioned_docs/version-8.2/components/zeebe/zeebe-overview.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: zeebe-overview -title: "Zeebe" -sidebar_label: "Introduction" -description: "Zeebe is the process automation engine powering Camunda 8." ---- - -Zeebe is the process automation engine powering Camunda 8. While written in Java, you do not need to be a Java developer to use Zeebe. - -With Zeebe you can: - -- Define processes graphically in [BPMN 2.0](../modeler/bpmn/bpmn-coverage.md). -- Choose any [gRPC](/apis-tools/grpc.md)-supported programming language to implement your workers. -- Build processes that react to events from Apache Kafka and other messaging platforms. -- Use as part of a software as a service (SaaS) offering with Camunda 8 or deploy with Docker and Kubernetes (in the cloud or on-premises) with Camunda 8 Self-Managed. -- Scale horizontally to handle very high throughput. -- Rely on fault tolerance and high availability for your processes. -- Export processes data for monitoring and analysis (currently only available through the [Elasticsearch exporter](../../self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md) added in Camunda 8 Self-Managed). -- Engage with an active community. - -For documentation on deploying Zeebe as part of Camunda 8 Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/zeebe-installation.md). - -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda 8 Starter or Camunda 8 Enterprise. Customers can choose either plan based on their process automation requirements. Camunda 8 Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda 8, you can always find support through the [community](/contact/). - -## Next steps - -- Get familiar with [technical concepts](technical-concepts/technical-concepts-overview.md). diff --git a/versioned_docs/version-8.2/guides/assets/analysis.png b/versioned_docs/version-8.2/guides/assets/analysis.png deleted file mode 100644 index d9a610f960c..00000000000 Binary files a/versioned_docs/version-8.2/guides/assets/analysis.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/assets/dashboard.png b/versioned_docs/version-8.2/guides/assets/dashboard.png deleted file mode 100644 index 14b563e8e0c..00000000000 Binary files a/versioned_docs/version-8.2/guides/assets/dashboard.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/assets/heatmap.png b/versioned_docs/version-8.2/guides/assets/heatmap.png deleted file mode 100644 index 0e63f7a4ec9..00000000000 Binary files a/versioned_docs/version-8.2/guides/assets/heatmap.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/assets/react-components/create-cluster.md b/versioned_docs/version-8.2/guides/assets/react-components/create-cluster.md deleted file mode 100644 index 0c1b7afaaf5..00000000000 --- a/versioned_docs/version-8.2/guides/assets/react-components/create-cluster.md +++ /dev/null @@ -1,21 +0,0 @@ ---- ---- - -To deploy and run your process, you must create a cluster in Camunda 8. - -1. To create a cluster, navigate to **Console** by clicking the square-shaped icon labeled **Camunda components** in the top left corner, and click **Console**. -2. Click the **Clusters** tab, and click **Create new cluster**. -3. Name your cluster. For the purpose of this guide, we recommend using the **Stable** channel and the latest generation. Additionally, select your region. Click **Create cluster**. -4. Your cluster will take a few moments to create. Check the status on the **Clusters** page or by clicking into the cluster itself and looking at the **Applications** section. - -Even while the cluster shows a status **Creating**, you can still proceed to begin modeling. - -:::note -Zeebe must show a status of **Healthy** to properly deploy your model. -::: - -## Development clusters - -Starter plan users have the option to create **development clusters** offering free execution for development. This must be enabled through your [billing reservations](/components/console/manage-plan/update-billing-reservations.md). - -Visit the [clusters page](/components/concepts/clusters.md) to learn more about the differences between **development clusters** and **production clusters**. diff --git a/versioned_docs/version-8.2/guides/automating-a-process-using-bpmn.md b/versioned_docs/version-8.2/guides/automating-a-process-using-bpmn.md deleted file mode 100644 index 52d3ff226b7..00000000000 --- a/versioned_docs/version-8.2/guides/automating-a-process-using-bpmn.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -id: automating-a-process-using-bpmn -title: Design a process using BPMN -description: A quickstart on how to use BPMN, an easy-to-adopt visual modeling language, together with Camunda to automate your business processes. -keywords: [workflow, modeling] ---- - -Beginner -Time estimate: 20 minutes - -Business Process Model and Notation (BPMN) is the global standard for process modeling. Combining BPMN, an easy-to-adopt visual modeling language, with Camunda, you can automate your business processes. - -Processes are the algorithms that determine how an organization runs based on independent tasks. Successful businesses grow from proven, effective processes. Therefore, Camunda’s workflow engine executes processes defined in BPMN to ensure these processes can be swiftly orchestrated within a diagram. - -Take the following example where we've outlined a process in a BPMN diagram to send an email. Don't worry too much about the symbols as we'll get to that shortly. For now, recognize the start and end of the process, comprised of entering a message, and sending the email. - -![sending email bmmn diagram](./img/simple-bpmn-process.png) - -BPMN offers control and visibility over your critical business processes in a way that is understandable for both experienced engineers and business stakeholders. The workflow engine orchestrates processes that span across a wide variety of elements, including APIs, microservices, business decisions and rules, human work, IoT devices, RPA bots, and more. - -## Set up - -Begin by building your BPMN diagrams with [Modeler](../components/modeler/about-modeler.md). -To get started, ensure you’ve [created a Camunda 8 account](/guides/create-account.md). - -## Getting started with BPMN - -Once logged in to your Camunda 8 account, take the following steps: - -1. Within Modeler, click **Create new file > BPMN Diagram**. -2. Right after creating your diagram, you can name it by replacing the **New BPMN Diagram** text with the name of your choice. In this case, we'll name it "Bake a Cake." - -### BPMN elements - -Before building out the diagram to bake a cake, let's examine the significance of the components on the left side of the screen. - -You can build out a BPMN diagram for a process using several elements, including the following: - -- Events: The things that happen. For example, start and end events which begin and terminate the process. -- Tasks: For example, user tasks for a particular user to complete, or service tasks to invoke various web services. -- Gateways: For example, parallel gateways that move the process along between two tasks at the same time. - - Utilize [variables](../components/concepts/variables.md) to reflect the data of process instances. - - Leverage [expressions](../components/concepts/expressions.md) to access variables and calculate their value(s). -- Subprocesses: For example, a transaction subprocess which can be used to group multiple activities to a transaction. - -For a complete list of BPMN elements and their capabilities, visit the [BPMN reference material](../components/modeler/bpmn/bpmn.md). - -### BPMN in action - -Using these elements, let's build out a BPMN diagram to examine the process of baking a cake. - -Take the following steps: - -1. On our diagram, we've already been given an element as a start event in the shape of a circle. Click on the circle, and then the wrench icon to adjust this element. For now, keep it as a start event. Double click on the circle to add text. -2. Drag and drop an arrow to the first task (the rectangle shape), or click the start event, and then click the task element to automatically attach it. -3. Click on the task, then click on the wrench icon to declare it a user task, which will be named "Purchase Ingredients." Note that each element added has adjustable attributes. Use the properties panel on the right side of the page to adjust these attributes. -4. Click on the user task to connect a gateway to it. By clicking the wrench icon on the gateway and declaring it a parallel gateway, you can connect it to two tasks that can happen at the same time: mixing the ingredients, and preheating the oven. - ![baking a cake bpmn sample](./img/bake-cake-bpmn.png) -5. Attach the next gateway once these two tasks have completed to move forward. -6. Add a user task to bake the cake, and finally a user task to ice the cake. -7. Add an end event, represented by a bold circle. -8. No need to save. Web Modeler will autosave every change you make. - -![completed bpmn diagram](./img/complete-baking-cake-bpmn.png) - -:::note -You can also import a BPMN diagram with Web Modeler. See how to do that [here](../components/modeler/web-modeler/import-diagram.md). -::: - -## Execute your process diagram - -:::note -If you change a diagram and it is auto-saved, this has no effect on your cluster(s). - -When you deploy the diagram, it becomes available on the selected cluster and new instances can start. -::: - -To execute your completed process diagram, click the blue **Deploy** button. - -You can now start a new process instance to initiate your process diagram. Click the blue **Run** button. - -You can now monitor your instances in [Operate](/components/operate/operate-introduction.md). Click the square-shaped **Camunda components** button to move between apps, and view process instances once in Operate. - -You can also visit an ongoing list of user tasks required in your BPMN diagram. Navigate to [Tasklist](/components/tasklist/introduction-to-tasklist.md) for a closer look. - -:::note -Variables are part of a process instance and represent the data of the instance. To learn more about these values, variable scope, and input/output mappings, visit our documentation on [variables](../components/concepts/variables.md). -::: - -## Additional resources and next steps - -- [Camunda BPMN tutorial](https://camunda.com/bpmn/) -- [BPMN implementation reference](https://docs.camunda.org/manual/latest/reference/bpmn20/) -- [Zeebe engine](../components/zeebe/zeebe-overview.md) -- [BPMN reference](../components/modeler/bpmn/bpmn.md) -- [Camunda 8 overview with Camunda Academy](https://bit.ly/3TjNEm7) -- [Operate](/components/operate/operate-introduction.md) -- [Tasklist](/components/tasklist/introduction-to-tasklist.md) diff --git a/versioned_docs/version-8.2/guides/configuring-out-of-the-box-connector.md b/versioned_docs/version-8.2/guides/configuring-out-of-the-box-connector.md deleted file mode 100644 index f97cb5d9a71..00000000000 --- a/versioned_docs/version-8.2/guides/configuring-out-of-the-box-connector.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -id: configuring-out-of-the-box-connectors -title: Configure an out-of-the-box Connector -description: "Connectors communicate with any system or technology, reducing the time it takes to automate and orchestrate business processes across systems." -keywords: [connector, modeling, connectors, low-code, no-code] ---- - -Beginner -Time estimate: 20 minutes - -The launch of [Camunda 8](../components/concepts/what-is-camunda-8.md) also introduced an integration framework with a key goal: integrate faster to reduce the time it takes to automate and orchestrate business processes across systems. - -:::note -New to Connectors? Review our [introduction to Connectors](/docs/components/connectors/introduction.md) to get familiar with their capabilities. -::: - -[Connectors](../components/connectors/introduction.md) achieve this goal. Ready to use out of the box, Connectors help automate complex [business processes](../components/concepts/processes.md) by inserting them into [BPMN diagrams](./automating-a-process-using-bpmn.md) within [Web Modeler](../components/modeler/about-modeler.md), and configuring them via the properties panel. - -You can also orchestrate APIs, for example by working with a [REST Connector](/docs/guides/getting-started-orchestrate-apis.md). Learn more about [types of Connectors](/docs/components/connectors/connector-types.md). - -Connectors technically consist of two parts: the business logic is implemented as a [job worker](../components/concepts/job-workers.md), and the user interface during modeling is provided using an element template. In this guide, we'll walk step-by-step through the implementation of a sample Connector. - -## Set up - -We'll implement our Connector with [Modeler](../components/modeler/about-modeler.md). To get started, ensure you’ve [created a Camunda 8 account](/guides/create-account.md). - -You'll also need to [create a SendGrid account](https://signup.sendgrid.com/) if you don't have one already, as we'll use SendGrid in our example Connector. Once you've created your account, you will immediately be prompted to create a [sender](https://docs.sendgrid.com/ui/sending-email/senders). - -### Create a cluster - -import CreateCluster from './assets/react-components/create-cluster.md' - - - -## Getting started - -Once logged in to your Camunda 8 account, take the following steps: - -1. From Modeler, click **New project > Create new file > BPMN Diagram**. -2. Name your project by replacing the **New Project** text at the top of the page. In this example, we'll name ours `Expense process`. -3. Select **Create new file > BPMN Diagram**. -4. Give your model a descriptive name by replacing the **New BPMN Diagram** text at the top of the page. Then, give your model a descriptive id within the **General** tab inside the properties panel on the right side of the screen. In this case, we've named our model `Submit expense` with an id of `submitting-expense`. - -## Build a BPMN diagram - -Use Web Modeler to design a BPMN flow with the appropriate tasks. To get started, create a task by dragging the rectangular task icon from the palette, or click the existing start event and the displayed task icon. - -In this example, we've designed the following BPMN diagram: - -![bpmn example diagram](./img/bpmn-expense-sample.png) - -:::note -To learn more about building your own BPMN diagram from scratch, visit our guide on [automating a process using BPMN](./automating-a-process-using-bpmn.md). -::: - -## Add a Connector - -Here, a receipt is initially uploaded for review. The first task we need to complete is notifying the manager of the uploaded receipt. If we want to leverage our email service to notify the manager, we can utilize a productivity applications Connector to replace this task. - -:::note -Camunda offers a variety of available Connectors. For example, utilize cloud Connectors to communicate with cloud-native applications and conform to REST, GraphQL, or SOAP protocols. Or, employ service Connectors to integrate with technology enablers like RPA, AI or IOT services. Learn more about our [available Connectors](../components/connectors/out-of-the-box-connectors/available-connectors-overview.md) to find out which may best suit your business needs. -::: - -To add our productivity applications Connector, take the following steps: - -1. Click the start event. A context pad to the right of the start event will appear. -2. Click the **Append Connector** item in the panel. -3. To send an email via SendGrid, for example, select the **SendGrid Email Connector** option. Name this newly-created task `Notify manger of receipt`. This now replaces our original task. - ![adding a connector](./img/adding-connector.png) -4. You need to fill out the required information in the properties panel of this task on the right side of the screen. Here, we'll add an example API key obtained from our [SendGrid account](https://app.sendgrid.com/settings/api_keys), a sender and receiver name and email address, and the email message content. - -![filling out connector properties panel](./img/connector-properties-panel.png) - -Our Connector is now attached and ready to use. Your completed diagram should look like the following: - -![completed connectors and BPMN diagram](./img/connectors-bpmn-diagram.png) - -## Execute your process diagram - -:::note -If you change a diagram and it is auto-saved, this has no effect on your cluster(s). - -When you deploy the diagram, it becomes available on the selected cluster and new instances can start. -::: - -To execute your completed process diagram, click **Deploy**. - -You can now start a new process instance to initiate your process diagram. Click **Run**. - -You can now monitor your instances in [Operate](components/operate/operate-introduction.md). - -:::note -Variables are part of a process instance and represent the data of the instance. To learn more about these values, variable scope, and input/output mappings, visit our documentation on [variables](../components/concepts/variables.md). -::: - -## Observe your running process - -After the [user task](./getting-started-orchestrate-human-tasks.md) **Upload receipt** is completed in [Tasklist](../components/tasklist/introduction-to-tasklist.md), an email is automatically sent to the address as specified in the Connectors properties panel we configured earlier. - -![email via SendGrid](./img/sendgrid-email.png) - -In [Operate](../components/operate/operate-introduction.md), you will now see the process move forward to **Review receipt**. - -![operate example](./img/operate-example.png) - -## Additional resources and next steps - -- [Use Connectors in your BPMN process](/docs/components/connectors/use-connectors/index.md) -- [Available Connectors](../components/connectors/out-of-the-box-connectors/available-connectors-overview.md) -- [Connectors & Integration Framework](https://camunda.com/platform/modeler/connectors/) -- [Camunda BPMN Tutorial](https://camunda.com/bpmn/) -- [Automate processes using BPMN](./automating-a-process-using-bpmn.md) diff --git a/versioned_docs/version-8.2/guides/create-account.md b/versioned_docs/version-8.2/guides/create-account.md deleted file mode 100644 index 90a510fdc9a..00000000000 --- a/versioned_docs/version-8.2/guides/create-account.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: create-account -title: Create an account -slug: /guides/getting-started/ -description: "Set up your Camunda 8 account to get started." ---- - -Beginner -Time estimate: Under 5 minutes - -:::note -We're gradually rolling out changes that affect this page to users; your experience may vary. -::: - -Create a Camunda 8 account so you can create clusters, deploy processes, and create a new instance. - -Visit [signup.camunda.com/accounts](https://signup.camunda.com/accounts?utm_source=docs.camunda.io&utm_medium=referral) to create an account: - -![signup](./img/signup.png) - -### Create an account - -Fill out the form and submit, or sign up using the social sign up buttons like Google or GitHub. - -When you fill out the form, you'll receive a confirmation email. Click on the link to verify your email address. - -If you choose to create an account through the social sign up buttons, you'll be redirected to [Console](../components/console/introduction-to-console.md) directly. - -## Log in to your Camunda 8 account - -Log in with the email address and password you used in the previous form, or use the social login buttons. To access the login site directly, navigate to [camunda.io](https://weblogin.cloud.camunda.io/). - -![login](./img/login.png) - -After login, select the square-shaped **Camunda components** icon in the upper-left corner, and select Console to view the Console overview page. This is the central place to manage the clusters, diagrams, and forms you want to deploy to Camunda 8. - -![overview-home](./img/home.png) diff --git a/versioned_docs/version-8.2/guides/create-cluster.md b/versioned_docs/version-8.2/guides/create-cluster.md deleted file mode 100644 index 8c727a2361a..00000000000 --- a/versioned_docs/version-8.2/guides/create-cluster.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: create-cluster -title: Create your cluster -description: "To deploy and run your process, you must create a cluster in Camunda 8. While the cluster is being created, you can still proceed to begin modeling." ---- - -import CreateCluster from './assets/react-components/create-cluster.md' - - diff --git a/versioned_docs/version-8.2/guides/create-decision-tables-using-dmn.md b/versioned_docs/version-8.2/guides/create-decision-tables-using-dmn.md deleted file mode 100644 index ec5dac057fb..00000000000 --- a/versioned_docs/version-8.2/guides/create-decision-tables-using-dmn.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -id: create-decision-tables-using-dmn -title: Create decision tables using DMN -description: Model and execute decisions using a language both business analysts and developers can understand. -keywords: [workflow, modeling] ---- - -Beginner -Time estimate: 20 minutes - -Decision Model and Notation (DMN) is a modeling approach owned by an institution called the Object Management Group ([OMG](https://www.omg.org/)), which also operates worldwide standards for [BPMN](./automating-a-process-using-bpmn.md). - -In [DMN](../components/modeler/dmn/dmn.md), decisions are modeled and executed using a language both business analysts and developers can understand. Model a set of rules within a table, and this will yield a decision to rapidly execute a process using a decision engine like Camunda. - -In this guide, we'll step through the lightweight implementation of a DMN diagram in [Camunda 8](../components/concepts/what-is-camunda-8.md), as both Camunda [Desktop](../components/modeler/desktop-modeler/index.md) and [Web Modeler](../components/modeler/about-modeler.md) both offer the same modeling experience for DMN 1.3 models. - -## Set up - -### Create a Camunda 8 account - -We'll begin building our DMN diagrams with [Modeler](../components/modeler/about-modeler.md). To get started, ensure you've [created a Camunda 8 account](./create-account.md). - -### Create a cluster - -import CreateCluster from './assets/react-components/create-cluster.md' - - - -## Getting started with DMN - -Once logged in to your Camunda 8 account, take the following steps: - -1. From Modeler, click **New project** and name your project. For this example, we'll name our project "Deciding what to wear". -2. Click **Create new file > DMN Diagram** and name your diagram. We'll name our diagram "Picking an outfit". - -## DMN in action - -Modeling starts here in the Decision Requirements Diagram (DRD) view. From here, we can add DMN elements from the palette on the left side by dragging and dropping them onto the diagram canvas. - -1. We're automatically set up with a rectangular decision symbol, which we'll rename to "Clothes to wear" by double-clicking on the symbol. We'll also rename the ID in the properties panel to "clothingChoice" by clicking the **General** dropdown menu. -2. In this example, we'll append two input data which may determine what we wear for the day: the temperature, and the activity. You can do this in one of two ways: - - Drag and drop the oval-shaped input data elements from the menu on the left side of the DRD, name them, and connect them by clicking the decision and the arrow icon. - - Click the decision directly, and click the input data element to drag and drop it onto the canvas. With the second option, you can see this input data will automatically be connected to the decision. -3. Ensure you have also given appropriate IDs to the input data icons. In this example, our IDs are "temperature" and "activity". - -![dmn model example](./img/dmn-model-example.png) - -:::note -Now that you've created a decision, add some logic to make it executable by morphing it into a decision table in the next section. -::: - -## Create your DMN table - -Now that our DRD is complete, let's build out the DMN table for our decision. - -1. Click the table icon in the top left corner of the box containing our "Clothes to wear" decision. Here, we'll give a set of rules among weather and the activity to determine what we wear. For example, when a certain value is true (or in this case, when two values are true among the weather and the activity), then one outcome is determined for clothes to wear. - ![dmn blank table](./img/dmn-table-blank.png) - :::note - At any time, you can click **Edit DRD** to return to your DRD and continue moving elements around on the palette. - ::: -2. Click the **+** icon next to the **When** column so we can analyze both the weather and the activity. -3. Double-click the first column. In the **Expression** field, we'll enter "temperature". Given the **Type** will remain **string**, we'll enter "Hot" and "Cold" in the **Add Predefined Values** field, separated by commas. Click your keyboards "enter" key to save these values. - :::note - Utilizing a data type other than a string? Take a look at our documentation on different [data types](../components/modeler/dmn/dmn-data-types.md), like booleans and numbers. - ::: -4. Similar to the first column, enter "activity" for the expression and predefined values of "inside" and "outside". - ![dmn table example](./img/dmn-table-example.png) -5. Double-click the **Output** column. Here, we'll plug in an **Output Name** of "clothingChoice", and Predefined Values of "shorts" and "pants". -6. Once finished, we'll click the pencil icon to write out our possible outcomes. For example, we can select the predefined values of "hot" and "outside" to determine we will wear shorts. These inputs can also be combined with [rules](../components/modeler/dmn/decision-table-rule.md). - -![completed dmn table](./img/dmn-table-complete-example.png) - -:::note -A decision table has a hit policy that specifies what the results of the evaluation of a decision table consist of. You'll notice the hit policy of our table is **Unique**, meaning only one rule can be satisfied or no rule at all. Learn more about different [hit policies](../components/modeler/dmn/decision-table-hit-policy.md). -::: - -## Deploy your diagram - -Click **Deploy** to launch your DMN decision table. - -You can now implement your DMN table in a BPMN diagram using features like a [business rule task](../components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md). - -When a process instance arrives at a business rule task, a decision is evaluated using the internal DMN decision engine. Once the decision is made, the process instance continues. - -If the decision evaluation is unsuccessful, an [incident](../components/concepts/incidents.md) is raised at the business rule task. When the incident is resolved, the decision is evaluated again. - -## Additional resources and next steps - -- [DMN in Modeler](../components/modeler/dmn/dmn.md) -- [DMN Tutorial](https://camunda.com/dmn/#introduction-overview) -- [What is FEEL?](../components/modeler/feel/what-is-feel.md) diff --git a/versioned_docs/version-8.2/guides/devops-lifecycle/img/modeler-ci-cd.png b/versioned_docs/version-8.2/guides/devops-lifecycle/img/modeler-ci-cd.png deleted file mode 100644 index 36834c2aa00..00000000000 Binary files a/versioned_docs/version-8.2/guides/devops-lifecycle/img/modeler-ci-cd.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/devops-lifecycle/img/visual-diff.png b/versioned_docs/version-8.2/guides/devops-lifecycle/img/visual-diff.png deleted file mode 100644 index 8292d0aa8af..00000000000 Binary files a/versioned_docs/version-8.2/guides/devops-lifecycle/img/visual-diff.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md b/versioned_docs/version-8.2/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md deleted file mode 100644 index 94f64322563..00000000000 --- a/versioned_docs/version-8.2/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -id: integrate-web-modeler-in-ci-cd -title: Integrate Web Modeler into CI/CD -description: Empower DevOps with Web Modeler and integrate into CI/CD pipelines to streamline deployments of process applications. -keywords: - [CI/CD, devops, modeler, processops, process applications, integration guide] ---- - -:::note -Some features in this guide are only available starting in 8.3, however the Web Modeler API is already available. -::: - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -

    - Intermediate - Time estimate: 1 hour -

    - -[Web Modeler](../../components/modeler/about-modeler.md) serves as a powerful tool for the development and deployment of processes and process applications. While Web Modeler simplifies one-click deployment for development, professional teams often rely on continuous integration and continuous deployment (CI/CD) pipelines for automated production deployments. The [Web Modeler API](/apis-tools/web-modeler-api/index.md) facilitates integration of Web Modeler into these pipelines, aligning with team practices and organizational process governance. - -Continuous integration and deployment are pivotal for rapid and reliable software development, testing, and delivery. These practices automate the building, testing, and deployment processes, leading to shorter development cycles, enhanced collaboration, and higher-quality releases. - -Integrating Web Modeler into your CI/CD pipelines can significantly enhance process application development and deployment workflows. By automating process application deployment, changes can be promptly and accurately reflected in the production environment. This agility empowers teams to swiftly respond to evolving business needs, fostering a flexible and adaptable process orchestration approach. - -## Prerequisites - -Each pipeline is unique. The Web Modeler API offers flexibility to tailor integrations according to your pipelines. To get started, there are a few prerequisites based on your setup: - -- A platform to host a version control system (VCS) such as GitHub or GitLab. -- An existing pipeline or a plan to set one up using tools like [CircleCI](https://circleci.com/) or [Jenkins](https://www.jenkins.io/), cloud platforms such as [Azure DevOps Pipelines](https://azure.microsoft.com/de-de/products/devops), or built-in solutions of VCS platforms like [GitHub Actions](https://github.com/features/actions) or [GitLab's DevSecOps Lifecycle](https://about.gitlab.com/stages-devops-lifecycle/). -- Make yourself familiar with the [Web Modeler API](/apis-tools/web-modeler-api/index.md) through the [OpenAPI documentation](https://modeler.camunda.io/swagger-ui/index.html). -- Understand how [clusters](/components/concepts/clusters.md) work in Camunda 8. -- Ensure you’ve [created a Camunda 8 account](/guides/create-account.md), or installed [Camunda 8 Self-Managed](/self-managed/about-self-managed.md). - -## Setup - -While a pipeline for process application integration and deployment resembles general software CI/CD pipelines, key distinctions exist. Consider the following: - -- Web Modeler uses [milestones](/components/modeler/web-modeler/milestones.md) to indicate specific process states, such as readiness for developer handover, review, or deployment. -- A process application comprises main processes and diverse resources, such as subprocesses, forms, DMN decision models, Connectors, job workers, and orchestrated services. Some applications bundle these resources, while others focus on a single process for deployment. -- Process reviews differ from code reviews, occurring on visual diagrams rather than XML. - -![Sample CI/CD setup with Web Modeler](img/modeler-ci-cd.png) - -### Obtain API clients and tokens - -Before getting started, obtain API clients and tokens for integrating Web Modeler and accessing the process engine via API: - -- [Obtain an API token for Web Modeler](/apis-tools/web-modeler-api/index.md#authentication) -- [Obtain an API client for Zeebe](/guides/setup-client-connection-credentials.md) - -### Disable manual deployments from Web Modeler - -To enforce pipeline-driven deployments to your environments, consider disabling manual deployments. - - - - -Disable manual deployments for any user by configuring environment variables `ZEEBE_BPMN_DEPLOYMENT_ENABLED` and `ZEEBE_DMN_DEPLOYMENT_ENABLED` as documented [here](/self-managed/modeler/web-modeler/configuration/configuration.md#general). - - - - -Remove the **Developer** role from users in Console to restrict their deployment permissions. Read more in the [user roles documentation](/components/console/manage-organization/manage-users.md). - - - - -### Triggering CI/CD - -You need triggers to initiate the pipeline for files or projects. Choose between manual pipeline start or automatic background triggers based on events. Common approaches include: - -- Initiating the pipeline manually from your CI/CD tool/platform by uploading the file intended for deployment. -- Starting the CI pipeline by creating a pull/merge request in the version control system. -- Triggering pipelines by listening to milestones with certain characteristics. - -#### Sync files with version control - -Synchronize files between Web Modeler and version control systems (VCS) and vice versa. Manage both files and projects by using a complete set of CRUD (create, read, update, delete) operations provided by the Web Modeler API. By syncing files from Web Modeler to your VCS, you benefit from full file ownership and avoid duplicated data housekeeping. - -For automatic file synchronization, consider maintaining a secondary system of record for mapping Web Modeler projects to VCS repositories. This system also monitors the project-to-repository mapping and update timestamps. - -To listen to changes, starting from 8.3 Web Modeler offers a polling approach that compares the update dates with the last sync dates recorded, see [the 8.3 documentation](/versioned_docs/version-8.3/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md#sync-files-with-version-control). - -Real-time synchronization isn't always what you need. Consider Web Modeler as a local repository, and update your remote repository only after files are committed and pushed. This aligns with the concept of [milestones](/components/modeler/web-modeler/milestones.md). - -#### Listening to milestone creation - -A milestone reflects a state of a file in Web Modeler with a certain level of qualification, such as being ready for deployment. You can use this property to trigger deployments when a certain milestone is created. - -Starting from 8.3, Web Modeler offers a polling approach to listen to milestone creation, see [the 8.3 documentation](/versioned_docs/version-8.3/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md#listening-to-milestone-creation). - -To retrieve the content of this particular milestone, use the `GET /api/beta/milestones/:id` endpoint. To obtain the latest edit state of the file, use the `GET /api/beta/files/:id` endpoint. This endpoint also provides the `projectId` necessary for the `POST /api/beta/projects/search` endpoint offered from Web Modeler 8.3 on if you want to push the full project via the pipeline. - -Progress is underway to introduce webhook registration or event subscription for milestone creation monitoring. - -Combine these two approaches and listen to milestones to sync files to your version control, create a pull/merge request, and trigger pipelines. - -## Pipeline stages - -The following examples illustrate setting up **build**, **test**, **review**, and **publish** stages within a pipeline. - -### Build stage - -While there is no distinct concept for a build package in Camunda 8, artifact structuring depends on your overall software architecture. The build stage should primarily focus on acquiring dependencies and deploying them to a preview environment. - -#### Set up preview environments - -Offering an automatically testable and review-ready process preview mandates a dedicated preview cluster. Numerous options exist, varying with software development lifecycle design, preferences, and Camunda 8 deployment type (SaaS, self-managed, or hybrid). This guide proposes a setup with lightweight local self-managed preview clusters (or embedded engines) and full-fledged staging and production clusters (Self-Managed or SaaS). - -##### Using fully-featured clusters - -For local preview environments, you can deploy a comprehensive [Zeebe](https://github.com/camunda/camunda) cluster including Operate and Tasklist. Options include using docker-compose or Kubernetes via Helm. All necessary endpoints and UIs are available for thorough process/application testing. Opt for a cluster version aligned with your production cluster to ensure process compatibility. - -##### Using embedded Zeebe engines - -If you don't need to spawn all apps such as Operate or Tasklist, you can use the lightweight [embedded Zeebe engine](https://github.com/camunda-community-hub/eze), which is a community-maintained project, to set up a cost-effective solution with an in-memory database. Together with the [Zeebe Hazelcast exporter](https://github.com/camunda-community-hub/zeebe-hazelcast-exporter) (community-maintained as well), you can consume data generated from your process for reporting or testing. - -In the build stage, deploy your process or project to a cluster or embedded engine. Post-pipeline completion, such as deployment to staging or production, preview environments can be discarded. - -:::tip -For GitLab users, consider using [GitLab Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) to provide preview environments. -::: - -Deploy resources using the [`zbctl` CLI](/apis-tools/cli-client/index.md) in this pipeline step, compatible with both SaaS and Self-Managed clusters. Alternately, utilize the [Java](/apis-tools/java-client/index.md) or [Go](/apis-tools/go-client/index.md) client library or any [community-built alternatives](/apis-tools/community-clients/index.md). - -:::info Feature branches and Web Modeler installations -To maintain a single source of truth, avoid multiple Web Modeler instances for different feature branches. Instead, maintain a single Web Modeler installation for all environments, utilizing milestones to signify versioning and pipeline stages. Feature branches can be managed by cloning and merging files or projects, ensuring synchronization using VCS. -::: - -#### Automate deployment of linked resources/dependencies - -Pipeline-driven deployment can be executed for a single file or an entire project. A separate system of record, maintained outside Web Modeler, can handle finer-grained dependency management. Fetch the full project for a file using the `GET /api/beta/files/:id` endpoint to acquire the project's `projectId`. Subsequently, use the `POST /api/beta/files/search` offered starting from Web Modeler 8.3 on to retrieve all project files, see [the 8.3 documentation](/versioned_docs/version-8.3/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md#automate-deployment-of-linked-resourcesdependencies). - -To retrieve the actual file `content`, iterate over the response and fetch it via `GET /api/beta/files/:id`. Parse the XML of the diagram for the `zeebe:taskDefinition` tag to retrieve job worker types. Utilizing a job worker registry mapping, deploy these workers along with the process if required. - -If you are running Connectors in your process or application, you need to deploy the runtimes as well. Parse the process XML for `zeebe:taskDefinition` bindings to identify the necessary runtimes (in addition to job workers). To learn how to deploy Connector runtimes, read more [here](/self-managed/connectors-deployment/install-and-start.md) for Self-Managed, or [here](/components/connectors/custom-built-connectors/connector-sdk.md#runtime-environments) for SaaS. - -Deploy resources in this pipeline step using the [`zbctl` CLI](/apis-tools/cli-client/index.md), compatible with both SaaS and Self-Managed clusters. Alternatively, utilize the Java or Go client library or any community-built alternatives. - -#### Add environment variables via secrets - -If you are running Connectors, you need to provide environment variables, such as service endpoints and API keys, for your preview environment. You can manage these via secrets. Read the [Connectors configuration documentation](/self-managed/connectors-deployment/connectors-configuration.md) to learn how to set these up in SaaS or Self-Managed. - -### Test stage - -Keep strict quality standards for your processes with automatic testing and reporting. - -#### Lint your diagrams - -Add a step to your pipeline for automatic process verification using the [bpmnlint](https://github.com/bpmn-io/bpmnlint) and [dmnlint](https://github.com/bpmn-io/dmnlint) libraries. Maintained by the bpmn-io team at Camunda, these open source libraries provide a default set of verification rules, as well as the option to add custom rules. They provide reporting capabilities to report back when the verification fails. These are the same libraries Web Modeler uses to verify diagrams during modeling. - -You could even report the wrong diagram patterns together with examples to resolve it using [this extension](https://github.com/bpmn-io/bpmnlint-generate-docs-images). - -#### Unit and integration tests - -For unit tests, select a test framework suitable for your environment. If working with Java, the [zeebe-process-test](/apis-tools/java-client/zeebe-process-test.md) library is an excellent option. Alternatively, employ the [Java client](/apis-tools/java-client/index.md) with JUnit for testing your BPMN and [DMN diagrams](/apis-tools/java-client-examples/decision-evaluate.md) in dev or preview environments. Similar testing can be performed using [community-built clients](/apis-tools/community-clients/index.md) in Node.js, Python, or Go. - -### Review stage - -During the review stage, stakeholders and team members access the built and tested environment for review purposes. Both the deployed process/application and a visual diagram diff are available for examination. - -#### Create a link to a visual diff for reviews - -Use milestones to indicate a state for review. Use the `POST /api/beta/milestones` endpoint to create a new milestone, and provide a description to reflect the state of this milestone using the `name` property. The current content of the file is copied over on milestone creation. - -While it is possible to do a diff of your diagrams by comparing the XML in your VCS system, this is often not very convenient, and lacks insight into process flow changes. This approach is also less effective when involving business stakeholders in the review. - -The Web Modeler API addresses this by providing an endpoint to generate visual diff links for milestones. Utilize the `GET /api/beta/milestones/compare/{milestone1Id}...{milestone2Id}` [endpoint](https://modeler.camunda.io/swagger-ui/index.html#/Milestones/compareMilestones) to compare two milestones. Obtain IDs for the latest milestones via the `POST /api/beta/milestones/search` [endpoint](https://modeler.camunda.io/swagger-ui/index.html#/Milestones/searchMilestones) available from Web Modeler 8.3, utilizing the `fileId` filter to identify the file to review. The resulting URL leads to a visual diff page similar to this: - -![Visual diff of two milestones](img/visual-diff.png) - -##### Example review flow - -The following process diagram demonstrates an example flow of how to run a preview using milestones and a diff link in GitHub: - - - -#### Review a running process application - -If deployed in a review environment, processes/applications can be shared with peers for interactive review. For comprehensive review, full clusters inclusive of Operate and Tasklist can be used for process execution. This closely simulates the final experience. To integrate the preview environment with custom applications, leverage the Operate and Tasklist APIs and deploy them within the review environment. - -In case you use an embedded Zeebe engine, or want to provide a lightweight, focused review experience, you can use [Zeebe Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor), which is a community-maintained Web App similar to the [Play mode](/components/modeler/web-modeler/play-your-process.md) in Web Modeler. Deploying Zeebe SimpleMonitor allows for thorough process testing and review. - -### Publish stage - -Push approved changes to staging or production by deploying them to the respective clusters. You can use the [`zbctl` CLI](/apis-tools/cli-client/index.md) to deploy via your pipeline, which works both for a SaaS or Self-Managed cluster. Deployments work slightly different on SaaS and Self-Managed, since there are differences in the cluster connection. Read more about deployments [here](/apis-tools/working-with-apis-tools.md#deploy-processes-start-process-instances-and-more-using-zeebe-client-libraries). - -#### Define resource authorizations - -For clusters with [resource authorizations](/self-managed/concepts/access-control/resource-authorizations.md) enabled (via the `RESOURCE_PERMISSIONS_ENABLED` [feature flag](/self-managed/identity/deployment/configuration-variables.md#feature-flags)), use the Identity API to assign the necessary authorizations through the pipeline. This step ensures appropriate accessibility for process/application stakeholders or updating existing authorizations. - -##### Monitoring and error handling - -As with any CI/CD integration, it's crucial to set up monitoring and error handling mechanisms. These can include: - -- Monitoring the CI/CD pipeline execution for errors and failures. -- Using Operate to catch incidents and send alerts to the pipeline in the test stage. -- Sending notifications or alerts in case of deployment issues in both the build and publish stages. -- Implementing rollback mechanisms in case a faulty BPMN diagram gets deployed. - -## FAQ - -#### Can I do blue-green deployments on Camunda 8? - -Blue-green deployments are possible with Camunda 8 with limitations. While switching clusters is quick for new process instances, audit logs and existing process instances remain tied to the previous cluster. Consider exporting audit logs from Elasticsearch or OpenSearch to your own streams if needed. If you don't have to migrate running process instances, keeping them running on the previous cluster alongside new instances on the new cluster is also an option. - -#### Can I implement blue-green deployments with Camunda 8 SaaS? - -While blue-green deployments are more straightforward with Self-Managed setups, you can implement similar deployment strategies with Camunda 8 SaaS. Keep in mind the limitations and differences between clusters when planning your deployment approach. - -#### How can I prevent manual deployments from Web Modeler? - -To enforce CI/CD pipelines and restrict manual deployments, you can disable manual deployments. For Self-Managed setups, set environment variables `ZEEBE_BPMN_DEPLOYMENT_ENABLED` and `ZEEBE_DMN_DEPLOYMENT_ENABLED`. In Camunda 8 SaaS, only the **Developer** role allows deployments from Web Modeler. Assigning any other role effectively removes deployment privileges. - -#### How can I sync files between Web Modeler and version control? - -Use the Web Modeler API's CRUD operations to sync files between Web Modeler and your version control system. Consider maintaining a second system of record to map Web Modeler projects to VCS repositories and track sync/update dates. - -#### What is the purpose of the build stage in my pipeline? - -The build stage focuses on preparing dependencies and deploying them to a preview environment. This environment provides a preview of your process that can be tested and reviewed by team members. - -#### Can I lint my process diagrams for verification? - -Yes, you can use the `bpmnlint` and `dmnlint` libraries to automatically verify your process diagrams against predefined rules. These libraries provide reporting capabilities to identify and fix issues during the build stage. - -#### How can I perform unit and integration tests on my processes? - -You can use the `zeebe-process-test` library for Java-based unit tests or community-built clients for other programming languages. These libraries allow you to execute your BPMN and DMN diagrams with assertions in your development or preview environments. - -#### How do I provide environment variables to Connectors in preview environments? - -You can manage environment variables for Connectors using secrets. This can be set up in both Camunda 8 SaaS and Self-Managed. Refer to the [Connectors configuration documentation](/components/connectors/introduction.md) for details. - -#### How can I monitor and handle errors in my CI/CD pipeline? - -Implement monitoring mechanisms in your CI/CD pipeline to catch errors and failures during the deployment process. Additionally, consider implementing rollback mechanisms in case a faulty BPMN diagram is deployed. - -## Additional resources and next steps - -- [Camunda 8 overview](https://bit.ly/3TjNEm7) -- [Web Modeler API documentation](/apis-tools/web-modeler-api/index.md) diff --git a/versioned_docs/version-8.2/guides/getting-started-orchestrate-apis.md b/versioned_docs/version-8.2/guides/getting-started-orchestrate-apis.md deleted file mode 100644 index 42f008bdaf8..00000000000 --- a/versioned_docs/version-8.2/guides/getting-started-orchestrate-apis.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -id: orchestrate-apis -title: Getting started with API orchestration -sidebar_label: Getting started with API orchestration -description: "Use Connectors to build low code process automation solutions" -keywords: - [api endpoints, orchestration, getting started, user guide, connectors] ---- - -Beginner -Time estimate: 15 minutes - -import clsx from "clsx"; - -This guide will walk you through working with a REST Connector task as a first time Camunda 8 user. The REST Connector is a [protocol Connector](/docs/components/connectors/out-of-the-box-connectors/available-connectors-overview.md#protocol-connectors), where you can make a request to a REST API and use the response in the next steps of your process. - -A Connector is a reusable building block that works out of the box. Each Connector task can be configured with domain-specific parameters without implementing custom business logic. - -:::note -New to Connectors? Review our [introduction to Connectors](/docs/components/connectors/introduction.md) to get familiar with their capabilities, and have a closer look at all of the available [out-of-the-box Connectors](/docs/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). -::: - -The concept of a Connector consists of two parts: the business logic is implemented as a job worker, and the user interface during modeling is provided using an element template. In this guide, you will create a REST Connector task in your process, handle the HTTP response, and deploy your process. New to creating a process? Get started by [modeling your first diagram](/docs/guides/model-your-first-process.md). - -## Create a REST Connector task - -To use a **REST Connector** in your process, follow the steps below: - -1. Create a BPMN diagram. To do this, click **New project** within Modeler. -2. Name your project and select **Create new file > BPMN Diagram**. -3. Give your model a descriptive name and id. On the right side of the page, expand the **General** section of the properties panel to find the name and id fields. For this guide, we'll use `API Orchestration Tutorial` for the name and `api-orchestration-tutorial` for the id. -4. Use Web Modeler to design a BPMN flow with a Connector. Create a Connector by dragging the rectangular task element from the palette, or click the existing start event and the displayed task element to the right of the start event. -5. Change the task type by clicking the wrench icon and select **REST Outbound Connector** in the **Connectors** section. Alternatively, you can directly choose a **REST Outbound Connector** by using the context pad. - - ![Blank task on Web Modeler canvas with properties panel open](img/connectors-blank-task.png) - -6. Add a descriptive name using the **General** section in the properties panel. For this guide, we'll use `Make a request`. - -## Make your REST Connector executable - -![Connector on Web Modeler canvas with properties panel open](img/connectors-rest-red-properties.png) - -To make the **REST Connector** executable, fill out the mandatory **URL** field in the HTTP Endpoint section (highlighted in red) in the properties panel with `https://catfact.ninja/fact` so we can get a random cat fact from the [Cat Fact API](https://catfact.ninja/) for this example. - -## Handle your response - -The HTTP response will be available in a temporary local response variable. This variable can be mapped to the process by specifying **Result Variable**. -In the **Response Mapping** section use `={"body" : body}` as the **Result Expression** so you can see the entire JSON object returned if it's successful. - -## Deploy your process - -To deploy your process, take the following steps: - -1. Drag the bolded circular end event element from the palette and onto the canvas, or by clicking on the final service task, and then the end event element alongside it. Ensure there is an arrow connecting the service task to the end event. -2. In the top right corner click the blue **Deploy** button. Your diagram is now deployed to your cluster. - :::note - If you have not yet created a cluster, clicking **Deploy** will take you to the console to create a cluster. Once you make your cluster creation request, you will automatically be redirected back to Modeler. The creation of a cluster can take 1 to 5 minutes. To read more about creating clusters, visit our documentation on [creating a cluster](create-cluster.md). - ::: -3. Start a new process instance by clicking on the blue **Run** button. -4. In the top left corner of the screen, click the square-shaped **Camunda components** button. Navigate to Operate to see your process instance with a token waiting at the service task by clicking **View process instances**. - -## Wrap up - -Congratulations! You successfully built your first API orchestration solution with Camunda 8. - -Camunda 8 empowers users to automate processes faster. Connectors are reusable components that allow you to access APIs without writing code. - -Don't want to build the process yourself? Click this button to create it from a template in Camunda 8 SaaS, or sign up first. - - - -## Additional resources and next steps - -- Learn more about Camunda 8 and what it can do by reading [What is Camunda 8](/components/concepts/what-is-camunda-8.md) or watching our [Overview video](https://bit.ly/3TjNEm7) in Camunda Academy. -- [Learn about types of Connectors](/docs/components/connectors/connector-types.md) -- [Use Connectors in your BPMN process](/docs/components/connectors/use-connectors/index.md) diff --git a/versioned_docs/version-8.2/guides/getting-started-orchestrate-human-tasks.md b/versioned_docs/version-8.2/guides/getting-started-orchestrate-human-tasks.md deleted file mode 100644 index 8a138d4dace..00000000000 --- a/versioned_docs/version-8.2/guides/getting-started-orchestrate-human-tasks.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -id: orchestrate-human-tasks -title: Get started with human task orchestration -sidebar_label: Getting started with human task orchestration -description: "Efficiently allocate work through user tasks." -keywords: [human tasks, orchestration, getting started, user guide] ---- - -Beginner -Time estimate: 15 minutes - -import ExpressionInputImg from './img/expression-input-example.png'; -import FormValuesImg from './img/form-values-example.png'; -import ImplementModeImg from './img/implement-mode-active.png'; -import FormLinkingImg from './img/form-linking.png'; -import ModelerNavImg from './img/modeler-navigation.png'; -import ModelerGlobalNavImg from './img/modeler-global-nav.png'; -import ModelerFormMenuImg from './img/modeler-form-menu.png'; -import RunProcessImg from './img/run-process.png'; -import OperateHumanTasks from './img/operate-human-tasks.png'; -import FormEditorImg from './img/form-editor.png'; -import NavigationHistoryImg from './img/modeler-navigation-history.png'; - -import clsx from "clsx"; - -Camunda 8 allows you to orchestrate processes with human tasks of any complexity. Utilizing user tasks, you can create and assign tasks to users. Then, users can perform their work and enter the necessary data to drive the business process. - -This guide introduces you to the basics of human task orchestration. You will create a simple process to decide on dinner, and drive the process flow according to that decision. This process is entirely executable in the browser. - -:::note -If you prefer a video-based learning experience or a more complex example, visit [this Camunda Academy course](https://bit.ly/3PJJocB). -::: - -Take the following five steps to create and run your first process with a human in the loop: - -### Step 1: Create a new process - -In this step, you will design a process that demonstrates how to route the process flow based on a user decision. In this example, you will create a process to decide what is for dinner. - -#### Create a new file - -1. Every file in Web Modeler requires a project. Within Modeler, click **New project**. -2. Name your project and select **Create new file > BPMN Diagram**. -3. Give your file a descriptive name. In this case, name it `Decide for Dinner`. -4. Make sure to name the process itself as well. Click the empty canvas, and specify the process name and technical ID in the properties panel. This specifies how the process will appear in other tools of Camunda 8. - -#### Design the process - -:::note -To run this guide, make sure to be in **Implement** mode to specify the technical details of the process. -Active implement mode tab -::: - -1. A **start event** is automatically added to the canvas. Click it to display configuration and append options. -2. Click the rectangular **Append Task** icon to append a task. -3. Enter a descriptive name for the task, such as `Decide what's for dinner`. -4. Change the task type by clicking the **wrench** icon. Select **User Task**. -5. Select the user task and click on the diamond-shaped icon to append an exclusive gateway. The gateway allows to route the process flow differently, depending on conditions. -6. Select the gateway and append a task by clicking the task icon. Repeat it to create a second process flow. Name the tasks based on what the user decides to eat: in this case, we've named ours `Prepare chicken` and `Prepare salad`. -7. To route the user to the right task, add [expressions](/components/concepts/expressions.md) to the **sequence flows**. Sequence flows are represented by arrows connecting the gateway to the tasks. To add an expression, click on a sequence flow to view the **properties panel**, and open the **Condition** section. -8. Verify the sequence flows have the following expressions: `meal = "Salad"` on one side, and `meal = "Chicken"` on the other. You will define the variable `meal` later when designing a form for the user task. - Example of a conditional expression - -9. Connect the split process flows again. Append another exclusive gateway to one of the tasks. Select the other task and drag the arrow-shaped sequence flow tool to connect it to the gateway. -10. Select the gateway and add an **end event** to your process, denoted by the circle with the thick outline. - -:::note -New to BPMN or want to learn more? Visit our [BPMN cheat sheet](https://page.camunda.com/wp-bpmn-2-0-business-process-model-and-notation-en) for an overview of all BPMN symbols. -Variables are part of a process instance and represent the data of the instance. To learn more about these values, variable scope, and input/output mappings, visit our documentation on [variables](/components/concepts/variables.md). -::: - - - -### Step 2: Design a form - -You have now designed the process. To allow the user to make the decision, you will now design a [form](../components/modeler/forms/camunda-forms-reference.md). Forms can be added to user tasks and start events to capture user input, and the user input can be used to route the process flow, to make calls to APIs, or to orchestrate your services. - -1. Select the user task you created in **[Step 1](#step-1-create-a-new-process)**. -2. Click the blue **form link icon** in the lower right corner. A menu expands that allows you to create a new form. - Annotation to open the form menu -3. Click **Create new form**. A form will be created and opened in the form editor. The form is automatically named. - -:::note -Don't worry to save your process diagram—Modeler automatically saves every change you make. -::: - -4. Click and drag the **Text** component to the empty form. - Dragging a component to a form - -5. Open the **General** section in the properties panel and enter a text, such as `What's for dinner?`. -6. Click and drag the **Radio** component to the form to create a radio group. Give it a descriptive name within the properties panel. -7. Additionally, set a **key** which maps to a process variable. The value of the component will be stored in this variable, and it can be read by the process that uses this form. As already defined by the conditions in the process earlier, use the variable `meal`. -8. Scroll down to the **Static options** section of the properties panel to add radio options. Since there are two options for the dinner, add an extra value by clicking on the plus sign. Enter the value `Chicken` with the same label as `Chicken` and enter the value `Salad` with the label as `Salad` in the other value. - -Defining a radio group and its values - -### Step 3: Connect the form to your process - -Once the form is designed, you must connect it to your process. - -1. Click on the project name in the navigation history in the top bar to navigate back, and open the process you created in **[Step 1](#step-1-create-a-new-process)**. - Navigation history in Web Modeler -2. Select the user task. Click the blue **form link icon** to open the form menu. -3. Select the form you just created, and click **embed** to confirm. - Selecting a form from the project - -4. You can check if you embedded the right form by clicking the form linking icon again. A preview of the form will appear. - -:::note -Forms are embedded in the user task and deployed together with the process. If you make changes to a form, you have to remove and re-embed it again to make the changes appear. -::: - -### Step 4: Run your process - -Your process is now ready to run. Given its human-centric nature, it is well suited to be run in Tasklist. In order to make it accessible from Tasklist, the process must be deployed first. - -:::note -Human-centric processes involving user tasks seamlessly unfold within Tasklist, offering a cost-effective orchestration solution for human work with forms. However, the versatility of these processes extends beyond Tasklist, encompassing various alternative methods and applications. For instance, users can be redirected to external applications to fulfill tasks, bespoke task applications can be developed for any domain, or interactions with the physical world can be captured through event signals from sensors and IoT devices. -::: - -#### Deploy and test run - -1. Click **Deploy** to deploy the process to your cluster. - :::note - If you have not yet created a cluster, clicking **Deploy** will take you to Console to [create a cluster](create-cluster.md) first. Continue with this guide after cluster creation. - ::: -2. After you deploy your process, it can be executed on the cluster. There are multiple ways to run a process. This time, click **Run** in Modeler for a test run. - -:::tip -Other options to run a process are to start it via Tasklist, test it in the Play mode, or call it via the API or an inbound trigger. Read more about [run options](/components/modeler/web-modeler/run-or-publish-your-process.md). -::: - -#### Check successful start in Operate - -1. The process start will be confirmed via a notification message on the screen. Click the **chevron icon** next to **Run** to open more options. Click **View process instances** to see the running process in Operate. - Run action in Modeler - -2. In Operate, you will see a visualization of the running process instance. Notice that a green **token** is waiting at the user task. This means that a task is waiting to be worked on in Tasklist. - Process instance monitoring in Operate - -:::tip -In production, Operate is used to monitor both long-running and straight-through, high-throughput processes. In development environments, use Operate to confirm if the process flow works as expected. For faster in-place validation during development, use the [Play mode](/components/modeler/web-modeler/play-your-process.md). -::: - -### Step 5: Complete a user task - -When the process instance arrives at the user task, a new job is created. The process instance stops at this point and waits until the job is completed. Applications like [Tasklist](/components/tasklist/introduction-to-tasklist.md) can be used by humans to complete these tasks. In this last step, you will open Tasklist to run the user task you created. - -:::tip -While it may originally seem like the goal of automating a process is to remove humans entirely, efficiently allocating work through user tasks can be even more beneficial. Within this example, we've included a form to demonstrate the completion of a user task. - -Using the Zeebe or Tasklist API, many other ways to complete a user task are possible, such as redirecting to another application to complete the task, or even listening to IoT devices to capture human interaction with the real world via job workers. -::: - -1. Click the **navigation menu icon** next to the Camunda logo in the top bar to open the global navigation. -2. Click **Tasklist** to open the Tasklist application. - Navigation to other applications - -3. On the left, you will notice a list of **tasks**. There should be one open task `Decide what's for dinner`. Click this task to open it in the detail view. -4. In the detail view, the form you created in **[Step 2](#step-2-design-a-form)** appears. It is read only since this task is currently unassigned. You have to claim the task to work on it. Next to **Assignee**, click **Assign to me** to claim the task. -5. Select one of the radio options. -6. Click **Complete Task** to submit the form. - ![complete a human task in Tasklist](./img/user-task-tasklist.png) -7. To verify your task completion, you can filter by **Completed** tasks in the left task list panel. - -You can now navigate back to Operate and notice the process instance has continued as the token has moved forward to the selected option. - -The token moves through the exclusive gateway (also called the XOR gateway), and is used to model the decision in the process. When the execution arrives at this gateway, all outgoing sequence flows are evaluated in the order in which they have been defined. The sequence flow which condition evaluates to ‘true’ is selected for continuing the process. - -In this case, the token will move through the gateway and (according to the conditional expressions we outlined earlier) to the selected dinner based on the **Decide what's for dinner** user task we completed. If we select **Chicken**, the token moves forward to **Prepare chicken**. If we select **Salad**, the token moves forward to **Prepare salad**. - -## Wrap up - -At this point, you've successfully crafted a human-centered process that routes the process flow based on a decision made by a user. - -A core value of Camunda 8 lies in the combination of automation and human interaction. Continue with the following resources to learn about intelligent task assignments, flexible forms to capture data and decisions, operational insights to refine task efficiency, and pathways to publish your processes to users via Tasklist or even publicly. - -Don't want to build the process yourself? Click this button to create it from a template in Camunda 8 SaaS, or sign up first. - - - -## Additional resources and next steps - -- Watch the [video-based Human Task Orchestration Course](https://bit.ly/3PJJocB). -- Learn how to use [BPMN user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) to route tasks to the right users. -- Learn how to [build more complex forms](./utilizing-forms.md) using the form editor. -- Learn how to write powerful [expressions](/components/concepts/expressions.md) and utilize [variables](/components/concepts/variables.md) to route complex process flows. -- Get an [introduction to Operate](/components/operate/operate-introduction.md). -- Learn how to [set up Tasklist](/components/tasklist/introduction-to-tasklist.md) for efficient task management. diff --git a/versioned_docs/version-8.2/guides/getting-started-orchestrate-microservices.md b/versioned_docs/version-8.2/guides/getting-started-orchestrate-microservices.md deleted file mode 100644 index 9c4ca395f39..00000000000 --- a/versioned_docs/version-8.2/guides/getting-started-orchestrate-microservices.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -id: orchestrate-microservices -title: Get started with microservice orchestration -sidebar_label: Getting started with microservice orchestration -description: "Orchestrate Microservices along a business process for visibility and resilience." -keywords: [microservices, orchestration, getting-started] ---- - -Beginner -Time estimate: 25 minutes - -import clsx from "clsx"; -import CreateCluster from './assets/react-components/create-cluster.md'; - -Using Camunda 8, you can orchestrate the microservices necessary to achieve your end-to-end automated business process. Whether you have existing microservices or are looking to build out your microservices, this guide will help you understand how you can start your microservice orchestration journey with Camunda 8. - -While this guide uses code snippets in Java, you do not need to be a Java developer to be successful. Additionally, you can orchestrate microservices with Camunda 8 in other programming languages. - -## Prerequisites - -- Ensure you have a valid [Camunda 8 account](create-account.md), or sign up if you still need one. -- Java >= 8 -- Maven -- IDE (IntelliJ, VSCode, or similar) -- Download and unzip or clone the [repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java` - -### Design your process with BPMN - -Start by designing your automated process using BPMN. This guide introduces you to the palette and a few BPMN symbols in Web Modeler. - -1. To create a BPMN diagram, click **New project** within Modeler. -2. Name your project and select **Create new file > BPMN Diagram**. -3. Give your model a descriptive name and id. On the right side of the page, expand the **General** section of the properties panel to find the name and id fields. For this guide, we'll use `Microservice Orchestration Tutorial` for the name and `microservice-orchestration-tutorial` for the id. -4. Use Web Modeler to design a BPMN process with service tasks. These service tasks are used to call your microservices via workers. Create a service task by dragging the task icon from the palette, or by clicking the existing start event and clicking the task icon. Make sure there is an arrow connecting the start event to the task. Click the wrench icon and select **Service Task** to change the task type. - ![Task with dropdown showing config, including service task](./img/microservice-orchestration-config-service-task.png) -5. Add a descriptive name using the **General** section in the properties panel. For this guide, we'll use `Call Microservice`. -6. In the properties panel, expand the **Task definition** section and use the **Type** field to enter a string used in connecting this service task to the corresponding microservice code. For this guide, we'll use `orchestrate-something` as the type. You will use this while [creating a worker for the service task](#create-a-worker-for-the-service-task). If you do not have an option to add the **Type**, use the wrench icon and select **Service Task**. - - ![Service task with properties panel open](./img/microservice-orchestration-service-task.png) - -7. Add an end event by dragging one from the palette, or by clicking the end event when the last service task in your diagram has focus. Make sure there is an arrow connecting the service task to the end event. -8. On the right upper corner click the blue **Deploy** button. Your diagram is now deployed to your cluster. -9. Start a new process instance by clicking on the blue **Run** button. -10. In the top left corner of the screen, click the square-shaped **Camunda components** button. Navigate to Operate to see your process instance with a token waiting at the service task by clicking **View process instances**. - -### Create a cluster - - - -### Create credentials for your Zeebe client - -To interact with your Camunda 8 cluster, you'll use the Zeebe client. First, you'll need to create credentials. - -1. The main page for Console should be open on another tab. Use Console to navigate to your clusters either through the navigation **Clusters** or by using the section under **View all** on the **Clusters** section of the main dashboard. Click on your existing cluster. This will open the **Overview** for your cluster, where you can find your cluster id and region. You will need this information later when creating a worker in the next section. - :::note - If your account is new, you should have a cluster already available. If no cluster is available, or you’d like to create a new one, click **Create New Cluster**. - ::: -2. Navigate to the **API** tab. Click **Create**. -3. Provide a descriptive name for your client like `microservice-worker`. For this tutorial, the scope can be the default Zeebe scope. Click **Create**. -4. Your client credentials can be copied or downloaded at this point. You will need your client id and your client secret when creating a worker in the next section, so keep this window open. Once you close or navigate away from this screen, you will not be able to see them again. - -### Create a worker for the service task - -Next, we’ll create a worker for the service task by associating it with the type we specified on the service task in the BPMN diagram. - -1. Open the downloaded or cloned project ([repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java`) in your IDE . -2. Add your credentials to `application.properties`. Your client id and client secret are available from the previous section in the credential text file you downloaded or copied. Go to the cluster overview page to find your cluster id and region. -3. In the `Worker.java` file, change the type to match what you specified in the BPMN diagram. If you followed the previous steps for this guide and entered “orchestrate-something”, no action is required. -4. After making these changes, perform a Maven install, then run the Worker.java `main` method via your favorite IDE. If you prefer using a terminal, run `mvn package exec:java`. -5. Using the Modeler tab in your browser, navigate to Operate and you will see your token has moved to the end event, completing this process instance. - -## Wrap up - -Congratulations! You successfully built your first microservice orchestration solution with Camunda 8. - -A core value of Camunda 8 lies in the flexibility offered to developers. You can write workers in many different languages. Camunda takes care of the orchestration. - -Don't want to build the process yourself? Click this button to create it from a template in Camunda 8 SaaS, or sign up first. - - - -## Additional resources and next steps - -- Learn more about Camunda 8 and what it can do by reading [What is Camunda 8](/components/concepts/what-is-camunda-8.md) or watching our [Overview video](https://bit.ly/3TjNEm7) in Camunda Academy. -- Get your local environment ready for development with Camunda 8 by [setting up your first development project](setting-up-development-project.md). diff --git a/versioned_docs/version-8.2/guides/host-custom-connector.md b/versioned_docs/version-8.2/guides/host-custom-connector.md deleted file mode 100644 index a28624c9521..00000000000 --- a/versioned_docs/version-8.2/guides/host-custom-connector.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -id: host-custom-connectors -title: Host custom Connectors -description: "Learn how to host a custom Connector developed with Connector SDK." ---- - -This guide explains how to host your own **Connectors** developed with [Connector SDK](../../components/connectors/custom-built-connectors/connector-sdk/). - -## Prerequisites - -- Ensure you have to have a working Camunda cluster in SaaS or Self-Managed. -- Ensure you have a distribution version of your Connector in the form of "fat" `jar` file. - -For the purpose of this guide, we will be using a generic [Connector template](https://github.com/camunda/connector-template-outbound) -as a reference. Clone the repository, and execute `mvn clean verify package`. -This will produce a file called `target/connector-template-0.1.0-SNAPSHOT-with-dependencies.jar`. In this guide, -we will refer this file as `connector.jar`. - -## Wiring your Connector with a Camunda cluster - -This approach is equivalent to the [hybrid mode](./use-connectors-in-hybrid-mode.md), except you don't need to override -existing Connectors and instead add a new one. You need to have a running Camunda cluster, and a pair -of `Client ID`/`Client Secret` with `Zeebe` and `Operate` scopes. -Learn more about [how to obtain required credentials](../../components/console/manage-clusters/manage-api-clients/). - -Run the following command: - -```shell -docker run --rm --name=CustomConnectorInSaaS \ - -v $PWD/connector.jar:/opt/app/connector.jar \ - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=false \ - -e ZEEBE_CLIENT_CLOUD_CLUSTER-ID='' \ - -e ZEEBE_CLIENT_CLOUD_CLIENT-ID='' \ - -e ZEEBE_CLIENT_CLOUD_CLIENT-SECRET='' \ - -e ZEEBE_CLIENT_CLOUD_REGION='' \ - -e CAMUNDA_OPERATE_CLIENT_URL='https://.operate.camunda.io/' \ - camunda/connectors-bundle: -``` - -The line `-v $PWD/connector.jar:/opt/app/connector.jar` binds a volume with your Connector at the path `$PWD/connector.jar` -of you local machine. - -## Wiring your Connector with Camunda Docker instance (without Keycloak) - -This option is applicable if you launch your cluster in a Self-Managed version with -[Camunda Docker Compose variant without Keycloak](https://github.com/camunda/camunda-platform/blob/main/docker-compose-core.yaml). - -Run the following command: - -```shell -docker run --rm --name=CustomConnectorInSMCore \ - -v $PWD/connector.jar:/opt/app/connector.jar \ - --network=camunda-platform_camunda-platform \ - -e ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=zeebe:26500 \ - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=true \ - -e CAMUNDA_OPERATE_CLIENT_URL=http://operate:8080 \ - -e CAMUNDA_OPERATE_CLIENT_USERNAME=demo \ - -e CAMUNDA_OPERATE_CLIENT_PASSWORD=demo \ - camunda/connectors-bundle: -``` - -:::note -Exact values of the environment variables related to Zeebe, Operate, or network may depend on your own configuration. -::: - -## Wiring your Connector with Camunda Docker instance (with Keycloak) - -This option is applicable if you launch your cluster in a Self-Managed version with -[Camunda Docker Compose variant with Keycloak](https://github.com/camunda/camunda-platform/blob/main/docker-compose.yaml). - -Run the following command: - -```shell -docker run --rm --name=CustomConnectorInSMWithKeyCloak \ - -v $PWD/connector.jar:/opt/app/connector.jar \ - --network=camunda-platform_camunda-platform \ - -e ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=zeebe:26500 \ - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=true \ - -e ZEEBE_CLIENT_ID= \ - -e ZEEBE_CLIENT_SECRET= \ - -e ZEEBE_TOKEN_AUDIENCE=zeebe-api \ - -e ZEEBE_AUTHORIZATION_SERVER_URL=http://keycloak:8080/auth/realms/camunda-platform/protocol/openid-connect/token \ - -e CAMUNDA_OPERATE_CLIENT_KEYCLOAK-URL=http://keycloak:8080 \ - -e CAMUNDA_OPERATE_CLIENT_CLIENT-ID=connectors \ - -e CAMUNDA_OPERATE_CLIENT_CLIENT-SECRET= \ - -e CAMUNDA_OPERATE_CLIENT_KEYCLOAK-REALM= \ - -e CAMUNDA_OPERATE_CLIENT_URL=http://operate:8080 \ - camunda/connectors-bundle: -``` - -:::note -Exact values of the environment variables related to Zeebe, Operate, Keycloak, or network may depend on -your own configuration. -::: - -## Wiring your Connector with Camunda Helm charts - -There are multiple ways to configure a Helm/Kubernetes Self-Managed cluster. -Refer to the [official guide](../../self-managed/platform-deployment/helm-kubernetes/overview/) to learn more. - -For the purpose of this section, imagine you installed Helm charts with `helm install dev camunda/camunda-platform`, -and forwarded Zeebe, Operate, and Keycloak ports: - -- `kubectl port-forward svc/dev-zeebe-gateway 26500:26500` -- `kubectl port-forward svc/dev-operate 8081:80` -- `kubectl port-forward svc/dev-keycloak 18080:80` - -Now, you need to obtain both Zeebe and Connectors' Operate OAuth clients. You can do it with `kubectl get secret dev-zeebe-identity-secret -o jsonpath="{.data.*}" | base64 --decode` -and `kubectl get secret dev-connectors-identity-secret -o jsonpath="{.data.*}" | base64 --decode` respectively. - -Run the following command: - -```shell -docker run --rm --name=CustomConnectorInSMWithHelm \ - -v $PWD/connector.jar:/opt/app/connector.jar \ - -e ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=host.docker.internal:26500 \ - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=true \ - -e ZEEBE_CLIENT_ID=zeebe \ - -e ZEEBE_CLIENT_SECRET= \ - -e ZEEBE_TOKEN_AUDIENCE=zeebe-api \ - -e ZEEBE_AUTHORIZATION_SERVER_URL=http://host.docker.internal:18080/auth/realms/camunda-platform/protocol/openid-connect/token \ - -e CAMUNDA_OPERATE_CLIENT_KEYCLOAK-URL=http://host.docker.internal:18080 \ - -e CAMUNDA_OPERATE_CLIENT_CLIENT-ID=connectors \ - -e CAMUNDA_OPERATE_CLIENT_CLIENT-SECRET= \ - -e CAMUNDA_OPERATE_CLIENT_KEYCLOAK-REALM=camunda-platform \ - -e CAMUNDA_OPERATE_CLIENT_URL=http://host.docker.internal:8081 \ - camunda/connectors-bundle: -``` - -:::note -Exact values of the environment variables related to Zeebe, Operate, Keycloak, or network may depend on -your own configuration. -::: diff --git a/versioned_docs/version-8.2/guides/img/MigrationGuidePics.pptx b/versioned_docs/version-8.2/guides/img/MigrationGuidePics.pptx deleted file mode 100644 index a88c383b29a..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/MigrationGuidePics.pptx and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/adding-connector.png b/versioned_docs/version-8.2/guides/img/adding-connector.png deleted file mode 100644 index 4274d4373b0..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/adding-connector.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/architecture-container-managed.png b/versioned_docs/version-8.2/guides/img/architecture-container-managed.png deleted file mode 100644 index 1e726c73390..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/architecture-container-managed.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/architecture-polyglot.png b/versioned_docs/version-8.2/guides/img/architecture-polyglot.png deleted file mode 100644 index f90c8ad050f..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/architecture-polyglot.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/architecture-spring-boot.png b/versioned_docs/version-8.2/guides/img/architecture-spring-boot.png deleted file mode 100644 index e82263b52d0..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/architecture-spring-boot.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/automate-any-process-anywhere.png b/versioned_docs/version-8.2/guides/img/automate-any-process-anywhere.png deleted file mode 100644 index dacc614fa29..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/automate-any-process-anywhere.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/bake-cake-bpmn.png b/versioned_docs/version-8.2/guides/img/bake-cake-bpmn.png deleted file mode 100644 index 2793a2602e7..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/bake-cake-bpmn.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/blank-project.png b/versioned_docs/version-8.2/guides/img/blank-project.png deleted file mode 100644 index cbfb0e7befb..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/blank-project.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/bpmn-expense-sample.png b/versioned_docs/version-8.2/guides/img/bpmn-expense-sample.png deleted file mode 100644 index c17b008e693..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/bpmn-expense-sample.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/camunda7-vs-camunda8-deployment-view.png b/versioned_docs/version-8.2/guides/img/camunda7-vs-camunda8-deployment-view.png deleted file mode 100644 index 45c40de7349..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/camunda7-vs-camunda8-deployment-view.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/complete-baking-cake-bpmn.png b/versioned_docs/version-8.2/guides/img/complete-baking-cake-bpmn.png deleted file mode 100644 index e66a62a1a44..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/complete-baking-cake-bpmn.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/connector-properties-panel.png b/versioned_docs/version-8.2/guides/img/connector-properties-panel.png deleted file mode 100644 index 30949444b47..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/connector-properties-panel.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/connectors-blank-task.png b/versioned_docs/version-8.2/guides/img/connectors-blank-task.png deleted file mode 100644 index 59357f9873c..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/connectors-blank-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/connectors-bpmn-diagram.png b/versioned_docs/version-8.2/guides/img/connectors-bpmn-diagram.png deleted file mode 100644 index 4593fac8c08..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/connectors-bpmn-diagram.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/connectors-create-task-append.png b/versioned_docs/version-8.2/guides/img/connectors-create-task-append.png deleted file mode 100644 index 5e753223cf1..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/connectors-create-task-append.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/connectors-rest-red-properties.png b/versioned_docs/version-8.2/guides/img/connectors-rest-red-properties.png deleted file mode 100644 index ea869378e32..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/connectors-rest-red-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/dinner-bpmn-model.png b/versioned_docs/version-8.2/guides/img/dinner-bpmn-model.png deleted file mode 100644 index 43800ec789b..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/dinner-bpmn-model.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/dmn-model-example.png b/versioned_docs/version-8.2/guides/img/dmn-model-example.png deleted file mode 100644 index a98da658716..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/dmn-model-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/dmn-table-blank.png b/versioned_docs/version-8.2/guides/img/dmn-table-blank.png deleted file mode 100644 index f229cc78f19..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/dmn-table-blank.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/dmn-table-complete-example.png b/versioned_docs/version-8.2/guides/img/dmn-table-complete-example.png deleted file mode 100644 index 17716a6ca7e..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/dmn-table-complete-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/dmn-table-example.png b/versioned_docs/version-8.2/guides/img/dmn-table-example.png deleted file mode 100644 index 9d29f569410..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/dmn-table-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/expression-input-example.png b/versioned_docs/version-8.2/guides/img/expression-input-example.png deleted file mode 100644 index 90f025d8ead..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/expression-input-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-editor.png b/versioned_docs/version-8.2/guides/img/form-editor.png deleted file mode 100644 index 103db1fefdb..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-editor.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-email-example.png b/versioned_docs/version-8.2/guides/img/form-email-example.png deleted file mode 100644 index b5643cdd93e..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-email-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-import-example.png b/versioned_docs/version-8.2/guides/img/form-import-example.png deleted file mode 100644 index d1d628ff727..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-import-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-linking.png b/versioned_docs/version-8.2/guides/img/form-linking.png deleted file mode 100644 index 6fa2e2e42af..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-linking.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-palette.png b/versioned_docs/version-8.2/guides/img/form-palette.png deleted file mode 100644 index c0b64ffbaa3..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-palette.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-properties-email.png b/versioned_docs/version-8.2/guides/img/form-properties-email.png deleted file mode 100644 index 4b4469fd258..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-properties-email.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/form-values-example.png b/versioned_docs/version-8.2/guides/img/form-values-example.png deleted file mode 100644 index 728b9f4ea7f..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/form-values-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/gateway-example-dinner.png b/versioned_docs/version-8.2/guides/img/gateway-example-dinner.png deleted file mode 100644 index 755d8bf492e..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/gateway-example-dinner.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/gateway-symbol-example.png b/versioned_docs/version-8.2/guides/img/gateway-symbol-example.png deleted file mode 100644 index c39578a3128..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/gateway-symbol-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/getting-started-aspnet-thumbnail.png b/versioned_docs/version-8.2/guides/img/getting-started-aspnet-thumbnail.png deleted file mode 100644 index 57b534b6206..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/getting-started-aspnet-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/getting-started-go-thumbnail.png b/versioned_docs/version-8.2/guides/img/getting-started-go-thumbnail.png deleted file mode 100644 index c09f5f6c93d..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/getting-started-go-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/getting-started-java-thumbnail.png b/versioned_docs/version-8.2/guides/img/getting-started-java-thumbnail.png deleted file mode 100644 index 773bbe092be..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/getting-started-java-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/getting-started-kotlin-thumbnail.png b/versioned_docs/version-8.2/guides/img/getting-started-kotlin-thumbnail.png deleted file mode 100644 index 26e2838e5b8..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/getting-started-kotlin-thumbnail.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/getting-started-node-thumbnail.jpg b/versioned_docs/version-8.2/guides/img/getting-started-node-thumbnail.jpg deleted file mode 100644 index 86cbc33ca10..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/getting-started-node-thumbnail.jpg and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/gettingstarted_first-model.png b/versioned_docs/version-8.2/guides/img/gettingstarted_first-model.png deleted file mode 100644 index c1132a923fb..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/gettingstarted_first-model.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart.bpmn b/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart.bpmn deleted file mode 100644 index 8f6f1a9e4ca..00000000000 --- a/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart.bpmn +++ /dev/null @@ -1,26 +0,0 @@ - - - - - SequenceFlow_1jbw0ni - - - - SequenceFlow_1jbw0ni - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart.png b/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart.png deleted file mode 100644 index 1471c14fa2b..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart_advanced.bpmn b/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart_advanced.bpmn deleted file mode 100644 index e65cc2f2890..00000000000 --- a/versioned_docs/version-8.2/guides/img/gettingstarted_quickstart_advanced.bpmn +++ /dev/null @@ -1,80 +0,0 @@ - - - - - Flow_15yg3k5 - - - - - - - Flow_15yg3k5 - Flow_13k1knz - - - Flow_13k1knz - Flow_0qhnfdq - Flow_1vlnqoi - - - - Flow_0qhnfdq - - - =return="Pong" - - - Flow_1vlnqoi - - - =return!="Pong" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/versioned_docs/version-8.2/guides/img/gettingstarted_second-model.png b/versioned_docs/version-8.2/guides/img/gettingstarted_second-model.png deleted file mode 100644 index 536ebdae8ee..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/gettingstarted_second-model.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/home.png b/versioned_docs/version-8.2/guides/img/home.png deleted file mode 100644 index d4721b1d695..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/home.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/honeycomb-icon-example.png b/versioned_docs/version-8.2/guides/img/honeycomb-icon-example.png deleted file mode 100644 index 53437912d11..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/honeycomb-icon-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/human-task-tasklist.png b/versioned_docs/version-8.2/guides/img/human-task-tasklist.png deleted file mode 100644 index a2213f3462d..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/human-task-tasklist.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/implement-mode-active.png b/versioned_docs/version-8.2/guides/img/implement-mode-active.png deleted file mode 100644 index c315d82b437..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/implement-mode-active.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/invite-collaborators.png b/versioned_docs/version-8.2/guides/img/invite-collaborators.png deleted file mode 100644 index f1f1113494e..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/invite-collaborators.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/login.png b/versioned_docs/version-8.2/guides/img/login.png deleted file mode 100644 index 2267fd9986c..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/login.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-buffered.png b/versioned_docs/version-8.2/guides/img/message-correlation-buffered.png deleted file mode 100644 index 3578d2b9a16..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-buffered.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-completed.png b/versioned_docs/version-8.2/guides/img/message-correlation-completed.png deleted file mode 100644 index 49ade20d935..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-completed.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-correlated.png b/versioned_docs/version-8.2/guides/img/message-correlation-correlated.png deleted file mode 100644 index 30f545ec65f..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-correlated.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-like-this.png b/versioned_docs/version-8.2/guides/img/message-correlation-like-this.png deleted file mode 100644 index 32dc37aec03..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-like-this.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-message-properties.png b/versioned_docs/version-8.2/guides/img/message-correlation-message-properties.png deleted file mode 100644 index 326d0eb1e0a..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-message-properties.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-message-subscriptions.png b/versioned_docs/version-8.2/guides/img/message-correlation-message-subscriptions.png deleted file mode 100644 index 902e6197d14..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-message-subscriptions.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-not-like-this.png b/versioned_docs/version-8.2/guides/img/message-correlation-not-like-this.png deleted file mode 100644 index 4c4bd44f869..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-not-like-this.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-variables.png b/versioned_docs/version-8.2/guides/img/message-correlation-variables.png deleted file mode 100644 index 273dd66cea6..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-variables.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-wait-on-message.png b/versioned_docs/version-8.2/guides/img/message-correlation-wait-on-message.png deleted file mode 100644 index 05b8da33f6c..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-wait-on-message.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-workflow-state.png b/versioned_docs/version-8.2/guides/img/message-correlation-workflow-state.png deleted file mode 100644 index 59ccf786277..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-workflow-state.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/message-correlation-workflow.png b/versioned_docs/version-8.2/guides/img/message-correlation-workflow.png deleted file mode 100644 index 0edbc8a1d11..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/message-correlation-workflow.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/microservice-orchestration-config-service-task.png b/versioned_docs/version-8.2/guides/img/microservice-orchestration-config-service-task.png deleted file mode 100644 index 4238333e236..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/microservice-orchestration-config-service-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/microservice-orchestration-service-task.png b/versioned_docs/version-8.2/guides/img/microservice-orchestration-service-task.png deleted file mode 100644 index 3b810909e73..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/microservice-orchestration-service-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/migration-service-task.png b/versioned_docs/version-8.2/guides/img/migration-service-task.png deleted file mode 100644 index ab14e27612f..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/migration-service-task.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/modeler-example.png b/versioned_docs/version-8.2/guides/img/modeler-example.png deleted file mode 100644 index cedbc85dd5d..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/modeler-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/modeler-form-menu.png b/versioned_docs/version-8.2/guides/img/modeler-form-menu.png deleted file mode 100644 index 47478b8dbb2..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/modeler-form-menu.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/modeler-global-nav.png b/versioned_docs/version-8.2/guides/img/modeler-global-nav.png deleted file mode 100644 index 3f3ab6be105..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/modeler-global-nav.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/modeler-navigation-history.png b/versioned_docs/version-8.2/guides/img/modeler-navigation-history.png deleted file mode 100644 index 15803471f46..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/modeler-navigation-history.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/modeler-navigation.png b/versioned_docs/version-8.2/guides/img/modeler-navigation.png deleted file mode 100644 index 13c0c00daa4..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/modeler-navigation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/operate-advanced-instances-other.png b/versioned_docs/version-8.2/guides/img/operate-advanced-instances-other.png deleted file mode 100644 index 9d721db703f..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/operate-advanced-instances-other.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/operate-advanced-instances-pong.png b/versioned_docs/version-8.2/guides/img/operate-advanced-instances-pong.png deleted file mode 100644 index 7f7b13fd6a9..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/operate-advanced-instances-pong.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/operate-advanced-instances.png b/versioned_docs/version-8.2/guides/img/operate-advanced-instances.png deleted file mode 100644 index 074e085cd4f..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/operate-advanced-instances.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/operate-dashboard.png b/versioned_docs/version-8.2/guides/img/operate-dashboard.png deleted file mode 100644 index 0ffa4378fb8..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/operate-dashboard.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/operate-example.png b/versioned_docs/version-8.2/guides/img/operate-example.png deleted file mode 100644 index b1c4ad18df4..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/operate-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/operate-human-tasks.png b/versioned_docs/version-8.2/guides/img/operate-human-tasks.png deleted file mode 100644 index 072cdf1f849..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/operate-human-tasks.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/process-solution-packaging.png b/versioned_docs/version-8.2/guides/img/process-solution-packaging.png deleted file mode 100644 index 8bc52c065e3..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/process-solution-packaging.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/run-process.png b/versioned_docs/version-8.2/guides/img/run-process.png deleted file mode 100644 index d910bbb3bb9..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/run-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/sendgrid-email.png b/versioned_docs/version-8.2/guides/img/sendgrid-email.png deleted file mode 100644 index cdd6ce17bc3..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/sendgrid-email.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/sequence-flow-example.png b/versioned_docs/version-8.2/guides/img/sequence-flow-example.png deleted file mode 100644 index b7a46931be6..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/sequence-flow-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/share-link.png b/versioned_docs/version-8.2/guides/img/share-link.png deleted file mode 100644 index 0e7ca09c445..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/share-link.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/signup.png b/versioned_docs/version-8.2/guides/img/signup.png deleted file mode 100644 index 9170d78fd6a..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/signup.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/simple-bpmn-process.png b/versioned_docs/version-8.2/guides/img/simple-bpmn-process.png deleted file mode 100644 index a58acc8142a..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/simple-bpmn-process.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/simple-task-creation.png b/versioned_docs/version-8.2/guides/img/simple-task-creation.png deleted file mode 100644 index 9457286367e..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/simple-task-creation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/update-guide-100-to-110-copy-region.png b/versioned_docs/version-8.2/guides/img/update-guide-100-to-110-copy-region.png deleted file mode 100644 index b2c52dde530..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/update-guide-100-to-110-copy-region.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/user-task-example.png b/versioned_docs/version-8.2/guides/img/user-task-example.png deleted file mode 100644 index f3734e6bbab..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/user-task-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/user-task-tasklist.png b/versioned_docs/version-8.2/guides/img/user-task-tasklist.png deleted file mode 100644 index 19c5153ade1..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/user-task-tasklist.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/user-task-token-1.png b/versioned_docs/version-8.2/guides/img/user-task-token-1.png deleted file mode 100644 index abd186204a4..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/user-task-token-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/web-modeler-advanced-process-id.png b/versioned_docs/version-8.2/guides/img/web-modeler-advanced-process-id.png deleted file mode 100644 index 6d167dafbf5..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/web-modeler-advanced-process-id.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/web-modeler-advanced-sequence-flows.png b/versioned_docs/version-8.2/guides/img/web-modeler-advanced-sequence-flows.png deleted file mode 100644 index 2a1a1a19ed6..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/web-modeler-advanced-sequence-flows.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/web-modeler-advanced.png b/versioned_docs/version-8.2/guides/img/web-modeler-advanced.png deleted file mode 100644 index 48918f8685d..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/web-modeler-advanced.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/img/wrench-icon-example.png b/versioned_docs/version-8.2/guides/img/wrench-icon-example.png deleted file mode 100644 index 37c1164b77e..00000000000 Binary files a/versioned_docs/version-8.2/guides/img/wrench-icon-example.png and /dev/null differ diff --git a/versioned_docs/version-8.2/guides/improve-processes-with-optimize.md b/versioned_docs/version-8.2/guides/improve-processes-with-optimize.md deleted file mode 100644 index 0e739233254..00000000000 --- a/versioned_docs/version-8.2/guides/improve-processes-with-optimize.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -id: improve-processes-with-optimize -title: Improve processes with Optimize -sidebar_label: Improve processes with Optimize -description: "The following document provides a basic end-to-end glance into Optimize and its features for new business users." ---- - -Beginner -Time estimate: 20 minutes - -## Purpose - -The following document provides a basic end-to-end glance into Optimize and its features for new business users. - -Optimize offers business intelligence tooling for Camunda customers. By leveraging data collected during process execution, you can access reports, share process intelligence, analyze bottlenecks, and examine areas in business processes for improvement. - -With Optimize, review heatmap displays for instances which took longer than average to discover long-running flow nodes. As a result, reap actionable insights and rapidly identify the constraints of your system. - -For an in-depth overview of Optimize’s capabilities, visit our [Optimize documentation]($optimize$/components/what-is-optimize). - -## Set up - -Within Camunda 8, you can launch Optimize from Console — the interface where you can create clusters, and launch both Operate and Tasklist. Therefore, ensure you’ve [created a Camunda 8 account](./create-account.md) before getting started with Optimize for SaaS users. - -:::note -So long as you are operating with [Camunda 8 1.2+](https://camunda.com/blog/2021/10/camunda-cloud-1-2-0-released/) when creating a cluster, you can access Optimize. From here, Optimize requires no additional set up. You can immediately obtain process insights as Optimize already continuously collects data for analysis. -::: - -Once you’ve [created a cluster](/guides/create-cluster.md), click the square-shaped icon in the top left corner of the page and select **Optimize**. - -You can begin analyzing reports and dashboards with just two process versions. However, the more process versions you work with in Optimize, the more performance attributes and data trends you’ll be able to study. For the purposes of this guide, we’ve preconfigured several processes to demonstrate Optimize’s capabilities. - -## Create and analyze dashboards - -Within Optimize, **reports** are based on a _single_ visualization, similar to a single chart or graph. **Dashboards** are aggregations of these visualizations, similar to a full spreadsheet of data collections, or a combination of several comparative charts and graphs. **Collections** are groups of these data sets, similar to project folders for organizational purposes where we can nest a series of dashboards and/or reports within. - -Once you open Optimize, you’ll first view the homepage for these collections, dashboards, and reports. - -To create a collection on the **Home** page, select **Create New > Collection**. Then, you can name your collection and select which data sources and processes will be available. Note that you can select up to 10 processes at once. - -From within your collection, you can again select **Create New** and draft reports and dashboards. Add users and additional data sources by navigating between the tabs inside the collection. - -Let’s create a dashboard inside our first collection. Take the following steps: - -1. Return to the **Home** page to view a list of existing collections, dashboards, and reports. You’ll be able to view all process instances you’ve already run and retrieve additional data on these instances within the Camunda engine. -2. Select the collection where you’d like to create a dashboard. -3. Click **Create New > New Dashboard**. -4. Optimize offers preconfigured dashboard templates, or you can start from a blank dashboard. In this example, we’ll select a preconfigured template by clicking the **Process performance overview** option. Note that you can also create dashboards with multi-process templates. -5. Under **Select Process**, choose the process you’d like to analyze and the version. -6. Click **Create Dashboard**. -7. Name your dashboard, and add any additional existing reports or create filters. Click **Save**. - -![dashboard example](./assets/dashboard.png) - -In the sample above, Optimize drafted a dashboard filled with reports for review. These reports include objectives like process instance counts, aggregated process duration, active incidents, and heatmaps. - -Select **Edit > Add a Report** to incorporate additional reports you’ve already created (see [create and access reports](#create-and-access-reports) below). Click and drag the reports on the grid to arrange the dashboard to your liking. - -:::note -Optimize offers collaborative capabilities, too. Click the **Share** tab to share your dashboard. Toggle to **Enable sharing**, and copy or embed the provided link. Colleagues without access to Optimize can still view your report with the shared link. -::: - -## Create and access reports - -To create a custom report based on a key performance indicator (KPI) you’d like to analyze, and to incorporate this report into a dashboard, follow the steps below: - -1. On the right side of the **Home** page, select **Create New > New Report**. Here we’ll take a look at a single process, though you can also view data from multiple processes. -2. Click the text box under **Select Process** and select the process you’d like to analyze. -3. Select the type of report you’d like to use on the right side of the **Create new Report** box. As with dashboards, Optimize offers preconfigured templates such as heatmaps and tables. We’ll begin with a heatmap. -4. Click **Create Report**. -5. Set up and customize your report. Begin by naming your report in the text box at the top of the page, pre-filled with **New Report**. -6. In the gray text box to the right, confirm your data source, and select what you’d like to review from the process (in this case, we are viewing flow nodes.) You can also group by topics such as duration or start date. -7. If you’d like, filter the process instance or flow nodes. For example, you can filter by duration, only viewing process instances running for more than seven days. -8. Finally, you have the option to view particular sets of data from the instance, like instance count or absolute value, by selecting the gear icon to the left of your data customization. You can also choose how you’d like to visualize your data in the box beneath **Visualization** (i.e. bar chart, pie chart, etc.). Once you’ve made your selections, click **Save**. - -## Alerts - -You don’t have to log in or view reports and dashboards to be alerted that something may need correction or further analysis in your process. - -For this purpose, you can create new alerts for reports within your collections. These alerts watch reports for you among collections, and email you an alert if a set outlier occurs in your process flow. - -To create an alert, take the following steps: - -1. Create a report with a number visualization inside a collection for a KPI you want to track. -2. Inside your collection, select the **Alerts** tab. -3. Select the type of alert you would like to receive. For example, you can receive an email notification when the backlog on your bottleneck becomes too high. - -As you’re notified, you can begin to examine if the process is broken and if additional teams need to be notified. - -## Collections - -Within your collection, you can also access the **Users** and **Data Sources** tabs to further customize your collection. - -### Users - -Within the **Users** tab, review the users and user groups with access to your collection. - -Select **Add** to search for a user or user group to add, of which may be assigned as a viewer, editor, or manager. - -### Data sources - -Within the **Data Sources** tab, review and add source(s) of your data to create reports and dashboards inside the collection. - -## Additional analysis - -Now that we’ve created data sets within the **Home** page, let’s shift into the **Analysis** tab. - -Inside this tab, you’ll notice **Outlier Analysis** and **Branch Analysis**. - -### Outlier analysis - -Inside **Outlier Analysis**, we utilize heatmap displays. Click **Select Process**, choose your process, and choose your version. - -![heatmap example](./assets/heatmap.png) - -Within the example above, we notice increased heat (recognized as red) surrounding our invoice approved gateway. Several instances have taken significantly longer than average, so we may choose to take a closer look at these instances by downloading the instance IDs, or viewing the details for further analysis. Here, you can also find if the outliers have a shared variable. - -### Branch analysis - -Inside the **Branch Analysis** tab, we can select a ​​process and analyze how particular gateway branches impact the probability of reaching an end event. - -Fill in the process field, click on a gateway, and choose your end event. In the example below, we can further analyze the likelihood of an invoice being processed once it reaches the gateway for approval: - -![branch analysis example](./assets/analysis.png) - -Here, we’ve selected a process flow, gateway, and endpoint for a breakdown of all the instances that went through a particular gateway to a specific endpoint. Hover over the gateway for a breakdown of the process itself. - -## Additional resources and next steps - -We’ve only touched the surface of Optimize. The component is full of additional ways to analyze your data for effective process improvement. We recommend taking a look at several resources to catch up on Optimize’s latest release, new features, and many usage examples: - -- [Camunda Optimize 3.6.0 Release](https://camunda.com/blog/2021/10/camunda-optimize-360-released/) -- [The Ultimate Guide to Solving Bottlenecks with Camunda: Part 1](https://camunda.com/blog/2021/10/the-ultimate-guide-to-solving-bottlenecks-with-camunda-part-1/) -- [Camunda Optimize examples](https://github.com/camunda/camunda-optimize-examples) -- [Process performance made transparent](https://camunda.com/platform/optimize/reports/) diff --git a/versioned_docs/version-8.2/guides/introduction-to-camunda-8.md b/versioned_docs/version-8.2/guides/introduction-to-camunda-8.md deleted file mode 100644 index 838f1b11b6f..00000000000 --- a/versioned_docs/version-8.2/guides/introduction-to-camunda-8.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: introduction-to-camunda -title: Introduction to Camunda 8 -sidebar_label: Introduction to Camunda 8 -slug: /guides/ -description: "Step through an introduction to Camunda 8, creating an account, modeling your first process, getting started with microservice orchestration, and more." ---- - -[Camunda 8](https://camunda.io) delivers scalable, on-demand process automation as-a-service. Camunda 8 is combined with powerful execution engines for BPMN processes and DMN decisions, and paired with tools for collaborative modeling, operations, and analytics. - -Camunda 8 consists of six [components](/components/components-overview.md): - -- [Console](/components/console/introduction-to-console.md) - Configure and deploy clusters with Console. -- [Web Modeler](/components/modeler/about-modeler.md) - Collaborate, model processes, and deploy or start new instances. Note that Camunda 8 can be used with both [Desktop Modeler](/components/modeler/desktop-modeler/index.md) and [Web Modeler](/components/modeler/web-modeler/launch-web-modeler.md). -- [Zeebe](/components/zeebe/zeebe-overview.md) - The cloud-native process engine of Camunda 8. -- [Tasklist](/components/tasklist/introduction-to-tasklist.md) - Complete tasks which require human input. -- [Operate](/components/operate/operate-introduction.md) - Manage, monitor, and troubleshoot your processes. -- [Optimize]($optimize$/components/what-is-optimize) - Improve your processes by identifying constraints in your system. - -For more conceptual information about Camunda 8, see [What is Camunda 8](components/concepts/what-is-camunda-8.md). -For an on-demand demonstration of the product, visit [Camunda Academy](https://bit.ly/3CvooTX). - -:::note -Interested in migrating process solutions developed for Camunda 7 to run them on Camunda 8? Visit our guide on [migrating from Camunda 7](/guides/migrating-from-camunda-7/index.md). -::: - -## Getting started - -In this section of the Camunda 8 documentation, you'll find guides to get started with Camunda 8 before learning more about each individual [component](/components/components-overview.md). Our getting started guides walk you through the following steps: - -1. [Create a Camunda 8 account](./create-account.md) - Create a Camunda 8 account to create clusters, deploy processes, and create a new instance. -2. [Model your first process](./model-your-first-process.md) - Design and deploy a process, and share the process with your teammates or other stakeholders to begin collaborating. -3. [Create a cluster](./create-cluster.md) - Create your first cluster to execute processes. -4. [Orchestrate human tasks](./getting-started-orchestrate-human-tasks.md) - Assign human tasks to users so they can enter the necessary data to drive the business process forward. -5. [Get started with API orchestration](./getting-started-orchestrate-apis.md) - Configure Connector tasks with domain-specific parameters. -6. [Orchestrate microservices](./getting-started-orchestrate-microservices.md) - Orchestrate the microservices necessary to achieve your end-to-end automated business process. - -## Learn more - -After getting started with Camunda 8, dive deeper in our next steps section of the guides by learning about the following: - -- [Set up your first development project](./setting-up-development-project.md) - Set up your first project to model, deploy, and start a process instance. -- [Set up client connection credentials](./setup-client-connection-credentials.md) - Create, name, and connect your client. -- [Automate a process using BPMN](./automating-a-process-using-bpmn.md) - Learn more about the mechanics and elements of BPMN, and build your first BPMN diagram. -- [Configuring an out-of-the-box Connector](./configuring-out-of-the-box-connector.md) - Automate complex business processes by inserting them into BPMN diagrams within Web Modeler. -- [Create decision tables with DMN](./create-decision-tables-using-dmn.md) - Learn more about Decision Model and Notation to model a set of rules within a table, and yield a decision to rapidly execute a process using a decision engine like Camunda. -- [Build forms with Modeler](./utilizing-forms.md) - Design and configure forms and connect them to a user task or start event to implement a task form in your application. -- [Improve processes with Optimize](./improve-processes-with-optimize.md) - Leverage data collected during process execution to access reports, share process intelligence, analyze bottlenecks, and examine areas in business processes for improvement. -- [Message correlation](./message-correlation.md) - Target a running workflow with a state update from an external system asynchronously. diff --git a/versioned_docs/version-8.2/guides/message-correlation.md b/versioned_docs/version-8.2/guides/message-correlation.md deleted file mode 100644 index 7c0dc0ef853..00000000000 --- a/versioned_docs/version-8.2/guides/message-correlation.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: message-correlation -title: Message correlation -description: "Message correlation allows you to target a running workflow with a state update from an external system asynchronously." ---- - -Intermediate -Time estimate: 20 minutes - -## Prerequisites - -- [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js) -- [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) -- [Desktop Modeler](https://camunda.com/download/modeler/) - -## Message correlation - -Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously. - -This tutorial uses the [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js), but it serves to illustrate message correlation concepts that are applicable to all language clients. - -We will use [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production. However, it is useful during development. - -## Workflow - -Here is a basic example from [the Camunda 8 documentation](/components/concepts/messages.md): - -![message correlation workflow](img/message-correlation-workflow.png) - -Use [Desktop Modeler](https://camunda.com/download/modeler/) to open the [test-messaging](https://github.com/jwulf/zeebe-message-correlation/blob/master/bpmn/test-messaging.bpmn) file in [this GitHub project](https://github.com/jwulf/zeebe-message-correlation). - -Click on the intermediate message catch event to see how it is configured: - -![message properties](img/message-correlation-message-properties.png) - -A crucial piece here is the **Subscription Correlation Key**. In a running instance of this workflow, an incoming **Money Collected** message will have a `correlationKey` property: - -```typescript - zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid" - }); -``` - -The concrete value of the message `correlationKey` is matched against running workflow instances by comparing the supplied value against the `orderId` variable of running instances subscribed to this message. This is the relationship established by setting the `correlationKey` to `orderId` in the message catch event in the BPMN. - -## Running the demonstration - -To run the demonstration, take the following steps: - -1. Clone this repository. -2. Install dependencies: - :::note - This guide requires `npm` version 6. - ::: - `npm i && npm i -g ts-node typescript` -3. In another terminal, start the Zeebe Broker in addition to [simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor). -4. Deploy the workflow and start an instance: - `ts-node start-workflow.ts` - This starts a workflow instance with the `orderId` set to 345: - -```typescript -await zbc.createProcessInstance("test-messaging", { - orderId: "345", - customerId: "110110", - paymentStatus: "unpaid", -}); -``` - -5. Open Simple Monitor at [http://localhost:8082](http://localhost:8082). -6. Click on the workflow instance. You will see the current state of the workflow: - ![workflow state](img/message-correlation-workflow-state.png) - The numbers above the BPMN symbols indicate that no tokens are waiting at the start event, and one has passed through. One token is waiting at the **Collect Money** task, and none have passed through. -7. Take a look at the **Variables** tab at the bottom of the screen. (If you don't see it, you are probably looking at the workflow, rather than the instance. In that case, drill down into the instance): - ![message correlation variables](img/message-correlation-variables.png) - You can see that this workflow instance has the variable `orderId` set to the value 345. -8. Start the workers: - `ts-node workers.ts` -9. Refresh Simple Monitor to see the current state of the workflow: - ![message correlation wait on message](img/message-correlation-wait-on-message.png) - Now, the token is at the message catch event, waiting for a message to be correlated. -10. Take a look at the **Message Subscriptions** tab: - ![message subscriptions](img/message-correlation-message-subscriptions.png) - You can see the broker has opened a message subscription for this workflow instance with the concrete value of the `orderId` 345. This was created when the token entered the message catch event. -11. Send the message in another terminal: - `ts-node send-message.ts` -12. Refresh Simple Monitor, and note that the message has been correlated and the workflow has run to completion: - -![message correlation completed](img/message-correlation-completed.png) - -The **Message Subscriptions** tab now reports that the message was correlated: - -![message correlation correlated](img/message-correlation-correlated.png) - -## Message buffering - -Messages are buffered on the broker, so your external systems can emit messages before your process arrives at the catch event. The amount of time a message is buffered is configured when publishing the message from the client library. - -For example, to send a message buffered for 10 minutes with the JavaScript client: - -```typescript -zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid", - }, - timeToLive: 600000, -}); -``` - -To see it in action, take the following steps: - -1. Keep the workers running. -2. Publish the message: - -```typescript -ts-node send-message.ts -``` - -3. Click on **Messages** at the top of the Simple Monitor page. You will see the message buffered on the broker: - -![message buffered on broker](img/message-correlation-buffered.png) - -4. Start another instance of the workflow: - -```typescript -ts-node start-workflow.ts -``` - -Note that the message is correlated to the workflow instance, even though it arrived before the workflow instance was started. - -## Common mistakes - -A couple of common gotchas: - -- The `correlationKey` in the BPMN message definition is the name of the workflow variable to match against. The `correlationKey` in the message is the concrete value to match against that variable in the workflow instance. - -- The message subscription _is not updated after it is opened_. That is not an issue in the case of a message catch event. However, for boundary message events (both interrupting and non-interrupting,) the subscription is opened _as soon as the token enters the bounding subprocess_. If any service task modifies the `orderId` value inside the subprocess, the subscription is not updated. - -For example, the interrupting boundary message event in the following example will not be correlated on the updated value, because the subscription is opened when the token enters the subprocess, using the value at that time: - -![not correlating](img/message-correlation-not-like-this.png) - -If you need a boundary message event correlated on a value modified somewhere in your process, put the boundary message event in a subprocess after the task that sets the variable. The message subscription for the boundary message event will open when the token enters the subprocess, with the current variable value. - -![correlating](img/message-correlation-like-this.png) - -## Summary - -Message Correlation is a powerful feature in Camunda 8. Knowing how messages are correlated, and how and when the message subscription is created is important to design systems that perform as expected. - -Simple Monitor is a useful tool for inspecting the behavior of a local Camunda 8 system to figure out what is happening during development. diff --git a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-bpmn-models.md b/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-bpmn-models.md deleted file mode 100644 index 476f00d14b1..00000000000 --- a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-bpmn-models.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -id: adjusting-bpmn-models -title: Adjust BPMN models -description: "Learn how to adjust your BPMN models when migrating from Camunda 7 to Camunda 8." ---- - -Ensure your BPMN process models are adjusted as follows to migrate them from Camunda 7 to Camunda 8: - -- The namespace of extensions has changed from `http://camunda.org/schema/1.0/bpmn` to `http://camunda.org/schema/zeebe/1.0`. -- Different configuration attributes are used between platform versions, as described for each BPMN element below. -- Camunda 8 has a _different coverage_ of BPMN elements (see [Camunda 8 BPMN coverage](/components/modeler/bpmn/bpmn-coverage.md) versus [Camunda 7 BPMN coverage](https://docs.camunda.org/manual/latest/reference/bpmn20/)), which might require some model changes. Note that the coverage of Camunda 8 will increase over time. - -The following sections describe the capabilities of the existing community-supported [backend diagram converter](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/backend-diagram-converter) for relevant BPMN symbols, including unsupported element attributes that cannot be migrated. - -### General considerations - -The following attributes/elements **cannot** be migrated: - -- `camunda:asyncBefore`: Every task in Zeebe is always asyncBefore and asyncAfter. -- `camunda:asyncAfter`: Every task in Zeebe is always asyncBefore and asyncAfter. -- `camunda:exclusive`: Jobs are always exclusive in Zeebe. -- `camunda:jobPriority`: There is no way to prioritize jobs in Zeebe (yet). -- `camunda:failedJobRetryTimeCycle`: You cannot yet configure the retry time cycle. Alternatively, you can [modify your code](/apis-tools/grpc.md#input-failjobrequest) to use the `retryBackOff` timeout (in ms) for the next retry. - -### Service tasks - -:::note -Migrating a service task is described in [adjusting your source code](/guides/migrating-from-camunda-7/adjusting-source-code.md). You will have both BPMN and source code changes. -::: - -![Service Task](../../components/modeler/bpmn/assets/bpmn-symbols/service-task.svg) - -A service task might have **attached Java code**. In this case, the following attributes/elements are migrated and put into a task header: - -- `camunda:class` -- `camunda:delegateExpression` -- `camunda:expression` and `camunda:resultVariable` - -The topic `camunda-7-adapter` is set. - -- `camunda:failedJobRetryTimeCycle`: Here, the amount of defined retries is set to the `zeebe:taskDefinition retries` attribute. - -A service task might leverage **external tasks** instead. In this case, the following attributes/elements are migrated: - -- `camunda:topic` becomes `zeebe:taskDefinition type`. - -The following attributes/elements **cannot** be migrated: - -- `camunda:taskPriority` - -Service tasks using `camunda:connector` will be migrated with the following changes: - -- `camunda:connectorId` becomes `zeebe:taskDefinition type` -- All inputs and outputs are treated like all other inputs and outputs. - -### Send tasks - -![Send Task](../../components/modeler/bpmn/assets/bpmn-symbols/send-task.svg) - -In both engines, a send task has the same behavior as a service task. A send task is migrated exactly like a service task. - -### Gateways - -Gateways rarely need migration. The relevant configuration is mostly in the [expressions](/components/concepts/expressions.md) on outgoing sequence flows. - -### Expressions - -Expressions must be in [friendly-enough expression language (FEEL)](/components/concepts/expressions.md#the-expression-language) instead of [Java unified expression language (JUEL)](https://docs.camunda.org/manual/latest/user-guide/process-engine/expression-language/). - -Migrating simple expressions is doable (as you can see in [these test cases](https://github.com/camunda-community-hub/camunda-7-to-8-migration/blob/main/backend-diagram-converter/core/src/test/java/org/camunda/community/migration/converter/ExpressionTransformerTest.java)), but not all expressions can be automatically converted. - -The following is **not** possible: - -- Calling out to functional Java code using beans in expressions -- Registering custom function definitions within the expression engine -- Using SPIN library (the FEEL data structure behaves like JSON natively, so SPIN can be omitted or replaced) -- Using the `execution` or `task` - -### User tasks - -![User Task](../../components/modeler/bpmn/assets/bpmn-symbols/user-task.svg) - -Human task management is also available in Camunda 8, but uses a different Tasklist user interface and API. - -In Camunda 7, you have [different ways to provide forms for user tasks](https://docs.camunda.org/manual/latest/user-guide/task-forms/): - -- Embedded Task Forms (embedded custom HTML and JavaScript) -- External Task Forms (link to custom applications) -- [Camunda Forms](/guides/utilizing-forms.md) - -:::note -Only Camunda Forms are currently supported in Camunda 8 and can be migrated. -::: - -The following attributes/elements can be migrated: - -- Task assignment (to users or groups): - - `bpmn:humanPerformer` - - `bpmn:potentialOwner` - - `camunda:assignee` - - `camunda:candidateGroups` - - `camunda:formKey`, but Camunda 8 requires you to embed the form definition itself into the root element of your BPMN XML models, see [this guide](/guides/utilizing-forms.md#connect-your-form-to-a-bpmn-diagram). - -The following attributes/elements **cannot** yet be migrated: - -- Form handling: - - `camunda:formHandlerClass` - - `camunda:formData` - - `camunda:formProperty` -- `camunda:taskListener` -- `camunda:dueDate` -- `camunda:followUpDate` -- `camunda:priority` - -### Business rule tasks - -![Business Rule Task](../../components/modeler/bpmn/assets/bpmn-symbols/business-rule-task.svg) - -Camunda 8 supports the DMN standard just as Camunda 7 does, so the business rule task can be migrated with the following slight changes: - -The following attributes/elements can be migrated: - -- `camunda:decisionRef` to `zeebe:calledDecision decisionId` -- `camunda:resultVariable` to `zeebe:calledDecision resultVariable` - -The following attributes are **not** yet supported: - -- `camunda:decisionRefBinding`, `camunda:decisionRefVersion`, and `camunda:decisionRefVersionTag`(always use the latest version) -- `camunda:mapDecisionResult` (no mapping happens) -- `camunda:decisionRefTenantId` - -A business rule task can also _behave like a service task_ to allow integration of third-party rule engines. In this case, all attributes described above for the service task migration can also be converted. - -### Call activities - -![Call Activity](../../components/modeler/bpmn/assets/bpmn-symbols/call-activity.svg) - -Call activities are generally supported in Zeebe. The following attributes/elements can be migrated: - -- `camunda:calledElement` will be converted into `zeebe:calledElement` -- Data mapping - - `camunda:in` to `zeebe:input` - - `camunda:out` to `zeebe:output` - -The following attributes/elements **cannot** be migrated: - -- `camunda:calledElementBinding`: Currently Zeebe always assumes 'late' binding. -- `camunda:calledElementVersionTag`: Zeebe does not know a version tag. -- `camunda:variableMappingClass`: You cannot execute code to do variable mapping in Zeebe. -- `camunda:variableMappingDelegateExpression`: You cannot execute code to do variable mapping in Zeebe. - -### Script task - -![Script Task](../../components/modeler/bpmn/assets/bpmn-symbols/script-task.svg) - -Only FEEL scripts can be executed by Zeebe. The converter will create internal scripts as long as you are using FEEL scripts. - -If you require a different scripting language, a script task can behave like normal service tasks instead, which means you must run a job worker that can execute scripts. One available option is to use the [Zeebe Script Worker](https://github.com/camunda-community-hub/zeebe-script-worker), provided as a community extension. - -If you do this, the following attributes/elements are migrated: - -- `camunda:scriptFormat` -- `camunda:script` -- `camunda:resultVariable` - -The task type is set to `script`. - -### Message receive events and receive tasks - -Message correlation works slightly different between the two products: - -- Camunda 7 waits for a message, and the code implementing that the message is received queries for a process instance the message will be correlated to. If no process instance is ready to receive that message, an exception is raised. - -- Camunda 8 creates a message subscription for every waiting process instance. This subscription requires a value for a `correlationKey` to be generated when entering the receive task. The code receiving the external message correlates using the value of the `correlationKey`. - -:::note -This means you must inspect and adjust **all** message receive events or receive tasks in your model to define a reasonable `correlationKey`. You also must adjust your client code accordingly. -::: - -The `bpmn message name` is used in both products and doesn't need migration. - -### Multi-instance activities - -Multi-instance activities do exist in the same flavor in Camunda 8 as they did in Camunda 7 (parallel and sequential multi-instance are supported, a loop is not.) - -For implementation, the only current limitation is that a loop cardinality is not supported. - -These elements **cannot** be migrated: - -- `bpmn:loopCardinality` - -These elements can still be used: - -- `bpmn:completionCondition`: Here, the expression has to be transformed to FEEL. - -These elements will be converted: - -- `bpmn:multiInstanceLoopCharacteristics camunda:collection` to `zeebe:loopCharacteristics inputCollection` -- `bpmn:multiInstanceLoopCharacteristics camunda:elementVariable` to `zeebe:loopCharacteristics inputElement` - -Additionally, there is now a built-in way to collect results using `zeebe:loopCharacteristics outputCollection` and `zeebe:loopCharacteristics outputElement`. You should consider this before using a workaround (for example, collecting local variables to a collection in parent scope in an exclusive job). diff --git a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-dmn-models.md b/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-dmn-models.md deleted file mode 100644 index 7bdd5663a70..00000000000 --- a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-dmn-models.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: adjusting-dmn-models -title: Adjust DMN models -description: "Learn how to adjust your DMN models when migrating from Camunda 7 to Camunda 8." ---- - -For Camunda 8, [a former community extension](https://github.com/camunda-community-hub/dmn-scala), built by core Camunda developers is productized. This engine has a higher coverage of DMN elements. This engine can execute DMN models designed for Camunda 7. However, there are some small differences as outlined in this document. - -To evaluate Camunda 7 DMN files in Camunda 8, change the following in the XML: - -`modeler:executionPlatform` should be set to "Camunda Cloud". Prior to this change, you will see "Camunda Platform", indicating designed compatibility with Camunda 7. - -`modeler:executionPlatformVersion` should be set to "8.2.0". Prior to this change, you will see "7.19.0" or similar. - -The following elements/attributes are **not** supported in Camunda 8: - -- `Version Tag` -- `History Time to Live` -- You cannot select the `Expression Language`, only FEEL is supported -- The property `Input Variable` is removed. In FEEL, the input value can be accessed by using `?` if needed. - -Furthermore, legacy behavior can still be executed but the following should be kept in mind: - -- Remove data types `integer` + `long` + `double` in favor of `number` for inputs and outputs (in FEEL, there is only a number type represented as `BigDecimal`). diff --git a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-source-code.md b/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-source-code.md deleted file mode 100644 index 07c3534acec..00000000000 --- a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/adjusting-source-code.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -id: adjusting-source-code -title: Adjust source code -description: "Learn how and where to adjust your source code when migrating from Camunda 7 to Camunda 8." -keywords: ["migrating to Zeebe"] ---- - -Camunda 8 has a different workflow engine than Camunda 7 - [Zeebe](/components/zeebe/zeebe-overview.md). As a result, you must migrate some of your code to work with the Zeebe API, especially code that does the following: - -- Uses the Client API (e.g. to start process instances) -- Implements [service tasks](/components/modeler/bpmn/service-tasks/service-tasks.md), which can be: - - [External tasks](/components/best-practices/development/invoking-services-from-the-process-c7.md#external-tasks), where workers subscribe to the engine. - - [Java code attached to a service task](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/) and called by the engine directly (in-VM). - -For example, to migrate an existing Spring Boot application, take the following steps: - -1. Adjust Maven dependencies: - -- Remove Camunda 7 Spring Boot Starter and all other Camunda dependencies. -- Add [Spring Zeebe Starter](https://github.com/camunda-community-hub/spring-zeebe). - -2. Adjust config: - -- Set [Camunda 8 credentials](https://github.com/camunda-community-hub/spring-zeebe#configuring-camunda-platform-8-saas-connection) (for example, in `src/main/resources/application.properties`) and point it to an existing Zeebe cluster. -- Remove existing Camunda 7 settings. - -3. Add `@ZeebeDeployment(resources = "classpath*:**/*.bpmn")` to automatically deploy all BPMN models. - -4. Adjust your source code and process model as described in the sections below. - -### Client API - -The Zeebe API (e.g. the workflow engine API - start process instances, subscribe to tasks, or complete them) has been completely redesigned and is not compatible with Camunda 7. While conceptually similar, the API uses different method names, data structures, and protocols. - -If this affects large parts of your code base, you could write a small abstraction layer implementing the Camunda 7 API delegating to Camunda 8, probably marking unavailable methods as deprecated. We welcome community extensions that facilitate this but have not yet started our own efforts. - -### Service tasks as external tasks - -[External task workers](https://docs.camunda.org/manual/latest/user-guide/process-engine/external-tasks/) in Camunda 7 are conceptually comparable to [job workers](/components/concepts/job-workers.md) in Camunda 8. This means they are generally easier to migrate. - -The "external task topic" from Camunda 7 is directly translated in a "task type name" in Camunda 8, therefore `camunda:topic` gets `zeebe:taskDefinition type` in your BPMN model. - -The community-supported [Camunda 7 Adapter](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/camunda-7-adapter) picks up your `@ExternalTaskHandler` beans, wraps them into a JobWorker, and subscribes to the `camunda:topic` you defined as `zeebe:taskDefinition type`. - -### Service tasks with attached Java code (Java delegates, expressions) - -In Camunda 7, there are three ways to attach Java code to service tasks in the BPMN model using different attributes in the BPMN XML: - -- Specify a class that implements a JavaDelegate or ActivityBehavior: `camunda:class`. -- Evaluate an expression that resolves to a delegation object: `camunda:delegateExpression`. -- Invoke a method or value expression: `camunda:expression`. - -Camunda 8 cannot directly execute custom Java code. Instead, there must be a [job worker](/components/concepts/job-workers.md) executing code. - -The community-supported [Camunda 7 Adapter](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/camunda-7-adapter) implements such a job worker using [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe). It subscribes to the task type `camunda-7-adapter`. [Task headers](/components/modeler/bpmn/service-tasks/service-tasks.md#task-headers) are used to configure a delegation class or expression for this worker. - -![Service task in Camunda 7 and Camunda 8](../img/migration-service-task.png) - -You can use this worker directly, but more often it might serve as a starting point or simply be used for inspiration. - -The community-supported [Camunda 7 to Camunda 8 Converter](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/backend-diagram-converter) will adjust the service tasks in your BPMN model automatically for this adapter. - -The topic `camunda-7-adapter` is set and the following attributes/elements are migrated and put into a task header: - -- `camunda:class` -- `camunda:delegateExpression` -- `camunda:expression` and `camunda:resultVariable` diff --git a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/conceptual-differences.md b/versioned_docs/version-8.2/guides/migrating-from-camunda-7/conceptual-differences.md deleted file mode 100644 index 2a122f7cb3b..00000000000 --- a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/conceptual-differences.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -id: conceptual-differences -title: Conceptual differences with Camunda 7 and Camunda 8 -sidebar_label: Conceptual differences -description: "Understand conceptual differences with Camunda 7 and Camunda 8 before migrating." ---- - -### Conceptual differences - -This section does not compare Camunda 7 with Camunda 8 in detail, but rather lists differing aspects important to know when thinking about migration. - -#### No embedded engine in Camunda 8 - -Camunda 7 allows embedding the workflow engine as a library in your application. This means both run in the same JVM, share thread pools, and can even use the same data source and transaction manager. - -In contrast, the workflow engine in Camunda 8, Zeebe, is always a remote resource for your application, while the embedded engine mode is not supported. - -If you are interested in the reasons why we switched our recommendation from embedded to remote workflow engines, refer to [this blog post](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -The implications for your process solution and the programming model are described below. Conceptually, the only big difference is that with a remote engine, you cannot share technical [ACID transactions](https://en.wikipedia.org/wiki/ACID) between your code and the workflow engine. You can read more about it in the blog post on [achieving consistency without transaction managers](https://blog.bernd-ruecker.com/achieving-consistency-without-transaction-managers-7cb480bd08c). - -#### Different data types - -In Camunda 7, you can store different data types, including serialized Java objects. - -Camunda 8 only allows storage of primary data types or JSON as process variables. This might require some additional data mapping in your code when you set or get process variables. - -Camunda 7 provides [Camunda Spin](https://docs.camunda.org/manual/latest/reference/spin/) to ease XML and JSON handling. This is not available with Camunda 8, and ideally you migrate to an own data transformation logic you can fully control (e.g. using Jackson). - -To migrate existing process solutions that use Camunda Spin heavily, you can still add the Camunda Spin library to your application itself and use its API to do the same data transformations as before in your application code. - -#### Expression language - -Camunda 7 uses [Java Unified Expression Language (JUEL)](https://docs.camunda.org/manual/latest/user-guide/process-engine/expression-language/) as the expression language. In the embedded engine scenario, expressions can even read into beans (Java object instances) in the application. - -Camunda 8 uses [Friendly-Enough Expression Language (FEEL)](/components/modeler/feel/what-is-feel.md) and expressions can only access the process instance data and variables. - -Most expressions can be converted (see [this community extension](https://github.com/camunda-community-hub/camunda-7-to-8-migration/blob/main/backend-diagram-converter/core/src/main/java/org/camunda/community/migration/converter/expression/ExpressionTransformer.java) as a starting point), some might need to be completely rewritten, and some might require an additional service task to prepare necessary data (which may have been calculated on the fly when using Camunda 7). - -#### Different Connector infrastructure - -Through Camunda Connect, Camunda 7 provides an HTTP and a SOAP HTTP [Connector](https://docs.camunda.org/manual/latest/reference/connect/). Camunda 8 offers multiple [Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) out-of-the-box. - -To migrate existing Connectors, consider the following options: - -- Use the [REST protocol Connector](components/connectors/protocol/rest.md) to leverage an out-of-the-box Connector. -- Create a small bridging layer via custom [job workers](/components/concepts/job-workers.md). - -### Process solutions using Spring Boot - -With Camunda 7, a frequented architecture to build a process solution (also known as process applications) is composed out of: - -- Java -- Spring Boot -- Camunda Spring Boot Starter with embedded engine -- Glue code implemented in Java delegates (being Spring beans) - -This is visualized on the lefthand side of the picture below. With Camunda 8, a comparable process solution would look like the righthand side of the picture and leverage: - -- Java -- Spring Boot -- Spring Zeebe Starter (embedding the Zeebe client) -- Glue code implemented as workers (being Spring beans) - -![spring boot](../img/architecture-spring-boot.png) - -The difference is that the engine is no longer embedded, which is also our latest [greenfield stack recommendation in Camunda 7](/docs/components/best-practices/architecture/deciding-about-your-stack-c7/#the-java-greenfield-stack). If you are interested in the reasons why we switched our recommendation from embedded to remote workflow engines, refer to [this blog post](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -The packaging of a process solution is the same with Camunda 7 and Camunda 8. Your process solution is one Java application that consists of your BPMN and DMN models, as well as all glue code needed for connectivity or data transformation. The big difference is that the configuration of the workflow engine itself is not part of the Spring Boot application anymore. - -![Process Solution Packaging](../img/process-solution-packaging.png) - -Process solution definition taken from [Practical Process Automation](https://processautomationbook.com/). - -You can find a complete Java Spring Boot example, showing the Camunda 7 process solution alongside the comparable Camunda 8 process solution in the [Camunda 7 to Camunda 8 migration example](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/example). - -### Programming model - -The programming models of Camunda 7 and Camunda 8 are very similar if you program in Java and use Spring. - -For example, a worker in Camunda 8 can be implemented like this (using [spring-zeebe](https://github.com/camunda-community-hub/spring-zeebe)): - -```java -@JobWorker(type = "payment") -public void retrievePayment(ActivatedJob job) { - // Do whatever you need to, e.g. invoke a remote service: - String orderId = job.getVariablesMap().get("orderId"); - paymentRestClient.invoke(...); -} -``` - -You can find more information on the programming model in Camunda 8 in [this blog post](https://blog.bernd-ruecker.com/how-to-write-glue-code-without-java-delegates-in-camunda-cloud-9ec0495d2ba5). - -:::note -JUnit testing with an embedded in-memory engine is also possible with Camunda 8, see [spring-zeebe documentation](https://github.com/camunda-community-hub/spring-zeebe#writing-test-cases). -::: - -### Camunda deployment - -A typical deployment of the workflow engine itself looks different because the workflow engine is no longer embedded into your own deployment artifacts. - -With Camunda 7 a typical deployment includes: - -- Your Spring Boot application with all custom code and the workflow engine, Cockpit, and Tasklist embedded. This application is typically scaled to at least two instances (for resilience) -- A relational database -- An Elasticsearch database (for Optimize) -- Optimize (a Java application) - -With Camunda 8 you deploy: - -- Your Spring Boot application with all custom code and the Zeebe client embedded. This application is typically scaled to at least two instances (for resilience) -- The Zeebe broker, typically scaled to at least three instances (for resilience) -- An elastic database (for Operate, Tasklist, and Optimize) -- Optimize, Operate, and Tasklist (each one is a Java application). You can scale those applications to increase availability if you want. - -![Camunda 7 vs Camunda 8 Deployment View](../img/camunda7-vs-camunda8-deployment-view.png) - -Camunda 8 deployments happen within Kubernetes. There are [Helm charts available](self-managed/platform-deployment/helm-kubernetes/overview.md) if you want to run Camunda 8 Self-Managed. - -Camunda 8 is also available as a SaaS offering from Camunda. In this case, deploy your own process solution and Camunda operates the rest. - -:::note -For local development purposes, you can [spin up Camunda 8 on a developer machine using Docker or Docker Compose](self-managed/platform-deployment/docker.md). Developers could also create a cluster for development purposes in the SaaS offering of Camunda. -::: - -### Other process solution architectures - -Besides Spring Boot, there are other environments used to build process solutions. - -#### Container-managed engine (Tomcat, WildFly, WebSphere & co) - -Camunda 8 doesn't provide integration into Jakarta EE application servers like Camunda 7 does. Instead, Jakarta EE applications need to manually add the Zeebe client library. The implications are comparable to what is described for Spring Boot applications in this guide. - -![container-managed](../img/architecture-container-managed.png) - -#### CDI or OSGI - -Due to limited adoption, there is no support for CDI or OSGI in Camunda 8. A lightweight integration layer comparable to [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe) might evolve in the feature, and we are happy to support this as a community extension to the Zeebe project. - -#### Polyglot applications (C#, Node.js) - -When you run your application in Node.js or C#, for example, you exchange one remote engine (Camunda 7) with another (Camunda 8). As Zeebe comes with a different API, you need to adjust your source code. Zeebe does not use REST as API technology, but gRPC, and you will need to leverage a [client library](apis-tools/working-with-apis-tools.md#deploy-processes-start-process-instances-and-more-using-zeebe-client-libraries) instead. - -![polygot architecture](../img/architecture-polyglot.png) - -### Plugins - -[**Process engine plugins**](https://docs.camunda.org/manual/latest/user-guide/process-engine/process-engine-plugins/) are not available in Camunda 8, as such plugins can massively change the behavior or even harm the stability of the engine. Some use cases might be implemented using [exporters](/self-managed/concepts/exporters.md) or [interceptors](self-managed/zeebe-deployment/zeebe-gateway/interceptors.md#implementing-an-interceptor). - -:::note -Exporters are only available for Self-Managed Zeebe clusters and not in Camunda 8 SaaS. -::: - -Migrating **Desktop Modeler Plugins** is generally possible, as the same modeler infrastructure is used. - -**Cockpit or Tasklist plugins** _cannot_ be migrated. diff --git a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/index.md b/versioned_docs/version-8.2/guides/migrating-from-camunda-7/index.md deleted file mode 100644 index 000e09dd12f..00000000000 --- a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: index -title: Pre-migration details -description: "Migrate process solutions developed for Camunda 7 to run them on Camunda 8." -keywords: - [ - Camunda 8, - Camunda 7, - migration guide, - transition, - transition guide, - Camunda 7, - ] ---- - -:::note -Migration of existing projects to Camunda 8 is optional. Camunda 7 still has ongoing [support](https://docs.camunda.org/enterprise/announcement/). -::: - -This guide describes how to migrate process solutions developed for Camunda 7 to run them on Camunda 8, including: - -- Differences in application architecture -- How process models and code can generally be migrated, whereas runtime and history data cannot -- How migration can be very simple for some models, but also marked limitations, where migration might get very complicated -- You need to adjust code that uses the workflow engine API -- How you might be able to reuse glue code -- Community extensions that can help with migration -- The Clean Delegate approach, which helps you write Camunda 7 solutions that are easier to migrate - -We are watching all customer migration projects closely and will update this guide in the future. - -## What to expect - -Before diving into concrete steps on migrating your models and code, let's cover some conceptual topics and migration readiness steps. The list below provides an outline of the sections in this guide: - -- [Conceptual differences](./conceptual-differences.md) -- [Migration readiness](./migration-readiness.md) -- [Adjusting BPMN models](./adjusting-bpmn-models.md) -- [Adjusting DMN models](./adjusting-dmn-models.md) -- [Adjusting source code](./adjusting-source-code.md) - -## Open issues - -As described earlier in this guide, migration is an ongoing topic and this guide is far from complete. Open issues include the following: - -- Describe implications on testing -- Discuss adapters for Java or REST client -- Discuss more concepts around BPMN: - ** [Field injection](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/#field-injection) that is using `camunda:field` available on many BPMN elements. - ** Multiple instance markers available on most BPMN elements. - ** `camunda:inputOutput` available on most BPMN elements. - ** `camunda:errorEventDefinition` available on several BPMN elements. -- Discuss workload migrations (operations) -- Eventual consistency - -[Reach out to us](/contact/) to discuss your specific migration use case. diff --git a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/migration-readiness.md b/versioned_docs/version-8.2/guides/migrating-from-camunda-7/migration-readiness.md deleted file mode 100644 index bda0326e88e..00000000000 --- a/versioned_docs/version-8.2/guides/migrating-from-camunda-7/migration-readiness.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -id: migration-readiness -title: Migration preparation -description: "Learn readiness indicators for migrating from Camunda 7 to Camunda 8." ---- - -:::note -Migration of existing projects to Camunda 8 is optional. Camunda 7 still has ongoing [support](https://docs.camunda.org/enterprise/announcement/). -::: - -Let's discuss if you need to migrate before diving into the necessary steps and what tools can help you achieve the migration. - -## When to migrate? - -New projects should typically be started using Camunda 8. - -Existing solutions using Camunda 7 might simply keep running on Camunda 7. Camunda has ongoing [support](https://docs.camunda.org/enterprise/announcement/), so there is no need to rush on a migration project. - -You should consider migrating existing Camunda 7 solutions if: - -- You are looking to leverage a SaaS offering (e.g. to reduce the effort for hardware or infrastructure setup and maintenance). -- You are in need of performance at scale and/or improved resilience. -- You are in need of certain features that can only be found in Camunda 8 (e.g. [BPMN message buffering](/components/concepts/messages.md#message-buffering), big [multi-instance constructs](/components/modeler/bpmn/multi-instance/multi-instance.md), the new [Connectors framework](/components/connectors/introduction.md), or the improved collaboration features in Web Modeler). - -## Migration steps - -For migration, examine development artifacts (BPMN models and application code), and workflow engine data (runtime and history) in case you migrate a process solution running in production. - -The typical steps are: - -- Analyze your current development artifacts with the community-supported [diagram converter](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/backend-diagram-converter) to gain a general overview of required steps. -- Migrate development artifacts: - - [Adjust your BPMN models](/guides/migrating-from-camunda-7/adjusting-bpmn-models.md) - - [Adjust your DMN models](/guides/migrating-from-camunda-7/adjusting-dmn-models.md) - - Adjust your development project (remove embedded engine, add Zeebe client). - - Refactor your code to use the Zeebe API, likely via a Zeebe client. - - Refactor your glue code or use [the Java delegate adapter project](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/camunda-7-adapter), a community-supported tool. -- Migrate workflow engine data. - -If you follow the migration steps linearly, you can run into issues individually or one after the other. Starting with a more complete picture of what needs to be done provides a more holistic approach for your migration journey. You may find tackling a particular topic or focus area easier than trying to adjust all your BPMN models before moving to the next step. - -In general, **development artifacts** _can_ be migrated: - -- **BPMN models:** Camunda 8 uses BPMN like Camunda 7 does, which generally allows use of the same model files, but you might need to configure _different extension atrributes_ (at least by using a different namespace). Furthermore, Camunda 8 has a _different coverage_ of BPMN concepts that are supported (see [Camunda 8 BPMN coverage](/components/modeler/bpmn/bpmn-coverage.md) vs [Camunda 7 BPMN coverage](https://docs.camunda.org/manual/latest/reference/bpmn20/)), which might require some model changes. Note that the coverage of Camunda 8 will increase over time. For more details, see [adjust your BPMN models](/guides/migrating-from-camunda-7/adjusting-bpmn-models.md). - -- **DMN models:** Camunda 8 uses DMN like Camunda 7 does. There are a few necessary changes in the models. Some rarely used features of Camunda 7 are not supported in Camunda 8. For more details, see [adjust your DMN models](/guides/migrating-from-camunda-7/adjusting-dmn-models.md). - -- **CMMN models:** It is not possible to run CMMN on Zeebe, _CMMN models cannot be migrated_. You can remodel cases in BPMN according to [Building Flexibility into BPMN Models](https://camunda.com/best-practices/building-flexibility-into-bpmn-models/), keeping in mind the [Camunda 8 BPMN coverage](/components/modeler/bpmn/bpmn-coverage.md). - -- **Application code:** The application code needs to use _a different client library and different APIs_. This will lead to code changes you must implement. - -- **Architecture:** The different architecture of the core workflow engine might require _changes in your architecture_ (e.g. if you used the embedded engine approach). Furthermore, certain concepts of Camunda 7 are no longer possible (like hooking in Java code at various places, or control transactional behavior with asynchronous continuations) which might lead to _changes in your model and code_. - -In general, **workflow engine data** is harder to migrate to Camunda 8: - -- **Runtime data:** Running process instances of Camunda 7 are stored in the Camunda 7 relational database. Like with a migration from third party workflow engines, you can read this data from Camunda 7 and use it to create the right process instances in Camunda 8 in the right state. This way, you can migrate running process instances from Camunda 7 to Camunda 8. [A process instance migration tool](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/process-instance-migration) is in place to ease this task. This tool is community supported. - -- **History data:** Historic data from the workflow engine itself cannot be migrated. However, data in Optimize can be kept. - -## Migration tooling - -:::note -These tools are community maintained. For more assistance, create an issue on the repo directly. -::: - -The [Camunda 7 to Camunda 8 migration tooling](https://github.com/camunda-community-hub/camunda-7-to-8-migration), available as a community extension, contains three components that will help you with migration: - -1. [A converter available in different flavors (web app, CLI) to convert BPMN models from Camunda 7 to Camunda 8](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/backend-diagram-converter). This maps possible BPMN elements and technical attributes into the Camunda 8 format and gives you warnings where this is not possible. The result of a conversion is a model with mapped implementation details as well as hints on what changed, needs to be reviewed, or adjusted to function properly in Camunda 8. - -2. [The Camunda 7 Adapter](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/camunda-7-adapter). This is a library providing a worker to hook in Camunda 7-based glue code. For example, it can invoke existing JavaDelegate classes. - -3. [A process instance migration tool](https://github.com/camunda-community-hub/camunda-7-to-8-migration/tree/main/process-instance-migration) to migrate running process instances from Camunda 7 to Camunda 8. Ideally, you should let running instances finish prior to migrating. - -The tools mentioned above are a good starting point, but are only one option for how you can approach your migration, as described below. - -## Prepare for smooth migrations - -Sometimes you might not be able to use Camunda 8 right away as described in [What to do When You Can’t Quickly Migrate to Camunda 8](https://camunda.com/blog/2022/05/what-to-do-when-you-cant-quickly-migrate-to-camunda-8/). In this case, you will keep developing Camunda 7 process solutions, but you should establish some practices as quickly as possible to ease migration projects later on. - -To implement Camunda 7 process solutions that can be easily migrated, stick to the following rules and development practices: - -- Implement what we call **Clean Delegates** - concentrate on reading and writing process variables, plus business logic delegation. Data transformations will be mostly done as part of your delegate (and especially not as listeners, as mentioned below). Separate your actual business logic from the delegates and all Camunda APIs. Avoid accessing the BPMN model and invoking Camunda APIs within your delegates. -- Don’t use listeners or Spring beans in expressions to do data transformations via Java code. -- Don’t rely on an ACID transaction manager spanning multiple steps or resources. -- Don’t expose Camunda APIs (REST or Java) to other services or frontend applications. -- Use primitive variable types or JSON payloads only (no XML or serialized Java objects). -- Use simple expressions or plug-in FEEL. FEEL is the only supported expression language in Camunda 8. JSONPath is also relatively easy to translate to FEEL. Avoid using special variables in expressions, e.g. `execution` or `task`. -- Use your own user interface or Camunda Forms; the other form mechanisms are not supported out of the box in Camunda 8. -- Avoid using any implementation classes from Camunda; generally, those with `\*.impl.\*` in their package name. -- Avoid using engine plugins. - -We also recommend reviewing [BPMN elements supported in Camunda 8](/components/modeler/bpmn/bpmn-coverage.md). We are actively working on closing feature gaps. - -[Execution Listeners](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/#execution-listener) and [Task Listeners](https://docs.camunda.org/manual/latest/user-guide/process-engine/delegation-code/#task-listener) are areas in Camunda 8 that are still under discussion. Currently, those use cases need to be solved slightly differently. Depending on your use case, the following Camunda 8 features can be used: - -- [Input and output mappings using FEEL](/components/modeler/feel/what-is-feel.md) -- [Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) -- [Operate API](/apis-tools/operate-api/overview.md) including historical info on processes -- [Exporters](/self-managed/zeebe-deployment/exporters/exporters.md) -- Client interceptors -- [Gateway interceptors](/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md) -- [Job workers](/components/concepts/job-workers.md) on user tasks -- [Job workers](/components/concepts/job-workers.md) on service tasks - -Expect to soon have a solution in Camunda 8 for most of the problems that listeners solve. Still, it might be good practice to use as few listeners as possible, and especially don’t use them for data mapping as described below. - -### Clean delegates - -Given Java delegates and the workflow engine are embedded as a library, projects can do dirty hacks in their code. Casting to implementation classes? No problem. Using a ThreadLocal or trusting a specific transaction manager implementation? Yeah, possible. Calling complex Spring beans hidden behind a simple Java Unified Expression Language (JUEL) expression? Well, you guessed it — doable! - -Those hacks are the real showstoppers for migration, as they cannot be migrated to Camunda 8. In fact, [Camunda 8 increased isolation intentionally](https://blog.bernd-ruecker.com/moving-from-embedded-to-remote-workflow-engines-8472992cc371). - -Concentrate on what a Java delegate is intended to do: - -1. Read variables from the process and potentially manipulate or transform that data to be used by your business logic. -2. Delegate to business logic — this is where Java delegates got their name from. In a perfect world, you would simply issue a call to your business code in another Spring bean or remote service. -3. Transform the results of that business logic into variables you write into the process. - -Here's an example of a good Java delegate: - -```java -@Component -public class CreateCustomerInCrmJavaDelegate implements JavaDelegate { - - @Autowired - private CrmFacade crmFacade; - - public void execute(DelegateExecution execution) throws Exception { - // Data Input Mapping - CustomerData customerData = (CustomerData) execution.getVariable("customerData"); - - // Delegate to business logic - String customerId = crmFacade.createCustomer(customerData); - - // Data Output Mapping - execution.setVariable("customerId", customerId); - } -} -``` - -Never cast to Camunda implementation classes, use any ThreadLocal object, or influence the transaction manager in any way. Java delegates should always be stateless and not store any data in their fields. - -The resulting delegate can be migrated to a Camunda 8 API, or reused by the adapter provided in [this migration community extension](https://github.com/camunda-community-hub/camunda-7-to-8-migration/). - -### No transaction managers - -You should not trust ACID [transaction managers](https://blog.bernd-ruecker.com/achieving-consistency-without-transaction-managers-7cb480bd08c) to glue together the workflow engine with your business code. Instead, embrace eventual consistency and make every service task its own transactional step. If you are familiar with Camunda 7 lingo, this means that all BPMN elements will be `async=true`. A process solution that relies on five service tasks to be executed within one ACID transaction, probably rolling back in case of an error, will make migration challenging. - -### Don’t expose Camunda API - -You should apply the [information hiding principle](https://en.wikipedia.org/wiki/Information_hiding) and not expose too much of the Camunda API to other parts of your application. - -In the below example, you should not hand over an execution context to your `CrmFacade``: - -```java -// DO NOT DO THIS! -crmFacade.createCustomer(execution); -``` - -The same holds true when a new order is placed, and your order fulfillment process should be started. Instead of the frontend calling the Camunda API to start a process instance, provide your own endpoint to translate between the inbound REST call and Camunda, for example: - -```java -@RestController -public class OrderFulfillmentRestController { - - @Autowired - private ProcessEngine camunda; - - @RequestMapping(path = "/order", method = POST) - public ResponseEntity placeOrder(@RequestBody OrderDto orderPayload) throws Exception { - // TODO: Somehow extract data from orderPayload - OrderData orderData = OrderData.from(orderPayload); - - ProcessInstance pi = camunda.getRuntimeService() // - .startProcessInstanceByKey("orderFulfillment", // - Variables.putValue("order", orderData)); - - response.setStatus(HttpServletResponse.SC_ACCEPTED); - return ResponseEntity.accepted().body(StatusDto.of("pending")); - } -} -``` - -### Use primitive variable types or JSON - -Camunda 7 provides flexible ways to add data to your process. For example, you could add Java objects that would be serialized as byte code. Java byte code is brittle and also tied to the Java runtime environment. - -Another possibility is transforming those objects on the fly to JSON or XML using Camunda Spin. It turned out this was black magic and led to regular problems, which is why Camunda 8 does not offer this anymore. Instead, you should do any transformation within your code before talking to the Camunda API. Camunda 8 only takes JSON as a payload, which automatically includes primitive values. - -In the below Java delegate example, you can see Spin and Jackson were used in the delegate for JSON to Java mapping: - -```java -@Component -public class CreateCustomerInCrmJavaDelegate implements JavaDelegate { - - @Autowired - private ObjectMapper objectMapper; - //... - - public void execute(DelegateExecution execution) throws Exception { - // Data Input Mapping - JsonNode customerDataJson = ((JacksonJsonNode) execution.getVariable("customerData")).unwrap(); - CustomerData customerData = objectMapper.treeToValue(customerDataJson, CustomerData.class); - // ... - } -} -``` - -This way, you have full control over what is happening, and such code is also easily migratable. The overall complexity is even lower, as Jackson is quite known to Java people — a kind of de-facto standard with a lot of best practices and recipes available. - -### Simple expressions and FEEL - -[Camunda 8 uses FEEL as its expression language](/components/modeler/feel/what-is-feel.md). There are big advantages to this decision. Not only are the expression languages between BPMN and DMN harmonized, but also the language is really powerful for typical expressions. One of my favorite examples is the following onboarding demo we regularly show. A decision table will hand back a list of possible risks, whereas every risk has a severity indicator (yellow, red) and a description. - -![onboarding demo](https://camunda.com/wp-content/uploads/2022/05/Migrating-to-Camunda-Platform-8-image-1-1024x367.png) - -The result of this decision shall be used in the process to make a routing decision: - -![routing decision](https://camunda.com/wp-content/uploads/2022/05/Migrate-to-Camunda-Platform-8-25052022-image-2-1024x481.png) - -To unwrap the DMN result in Camunda 7, you could write some Java code and attach that to a listener when leaving the DMN task (this is already an anti-pattern for migration as you will read next). The code is not super readable: - -```java -@Component -public class MapDmnResult implements ExecutionListener { - - @Override - public void notify(DelegateExecution execution) throws Exception { - List risks = new ArrayList(); - Set riskLevels = new HashSet(); - - Object oDMNresult = execution.getVariable("riskDMNresult"); - for (Object oResult : (List) oDMNresult) { - Map result = (Map) oResult; - risks.add(result.containsKey("risk") ? (String) result.get("risk") : ""); - if (result.get("riskLevel") != null) { - riskLevels.add(((String) result.get("riskLevel")).toLowerCase()); - } - } - - String accumulatedRiskLevel = "green"; - if (riskLevels.contains("rot") || riskLevels.contains("red")) { - accumulatedRiskLevel = "red"; - } else if (riskLevels.contains("gelb") || riskLevels.contains("yellow")) { - accumulatedRiskLevel = "yellow"; - } - - execution.setVariable("risks", Variables.objectValue(risks).serializationDataFormat(SerializationDataFormats.JSON).create()); - execution.setVariable("riskLevel", accumulatedRiskLevel); - } -} -``` - -With FEEL, you can evaluate that data structure directly and have an expression on the "red" path: - -``` -= some risk in riskLevels satisfies risk = "red" -``` - -Additionally, you can even hook in FEEL as the scripting language in Camunda 7 (as explained by [Scripting with DMN inside BPMN](https://camunda.com/blog/2018/07/dmn-scripting/) or [User Task Assignment based on a DMN Decision Table](https://camunda.com/blog/2020/05/camunda-bpm-user-task-assignment-based-on-a-dmn-decision-table/)). - -However, more commonly you will keep using JUEL in Camunda 7. If you write simple expressions, they can be migrated automatically, as you can see in [the test case](https://github.com/camunda-community-hub/camunda-7-to-8-migration/blob/main/modeler-plugin-7-to-8-converter/client/JuelToFeelConverter.test.js) of the [migration community extension](https://github.com/camunda-community-hub/camunda-7-to-8-migration). You should avoid more complex expressions if possible. - -Very often, a good workaround to achieve this is to adjust the output mapping of your Java delegate to prepare data in a form that allows for easy expressions. - -Avoid hooking in Java code during an expression evaluation. The above listener to process the DMN result was one example of this, but a more diabolic example could be the following expression in Camunda 7: - -```java -// DON'T DO THIS: -#{ dmnResultChecker.check( riskDMNresult ) } -``` - -Now, the `dmnResultChecker` is a Spring bean that can contain arbitrary Java logic, possibly even querying some remote service to query whether we currently accept yellow risks or not. Such code can not be executed within Camunda 8 FEEL expressions, and the logic needs to be moved elsewhere. - -### Camunda Forms - -Finally, while Camunda 7 supports [different types of task forms](https://docs.camunda.org/manual/latest/user-guide/task-forms/), Camunda 8 only supports [Camunda Forms](/guides/utilizing-forms.md) (and will actually be extended over time). If you rely on other form types, you either need to make Camunda Forms out of them or use a bespoke tasklist where you still support those forms. diff --git a/versioned_docs/version-8.2/guides/model-your-first-process.md b/versioned_docs/version-8.2/guides/model-your-first-process.md deleted file mode 100644 index 9bc97e47374..00000000000 --- a/versioned_docs/version-8.2/guides/model-your-first-process.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -id: model-your-first-process -title: Model your first process -description: "Use Modeler to design and deploy a process." ---- - -Beginner -Time estimate: 15 minutes - -## Design and deploy a process - -In Camunda 8, you have two options to design and deploy a process, but for the purposes for this guide you will find instructions for [Web Modeler](../components/modeler/about-modeler.md). - -1. From Modeler, click **New project**. -2. Name your project and select **Create new file > BPMN Diagram**. - ![blank project create bpmn diagram](./img/blank-project.png) -3. Give your model a descriptive name, and then give your model a descriptive id within the **General** tab inside the properties panel on the right side of the screen. -4. Create a task by dragging the rectangular task icon from the palette, or by clicking the existing start event and clicking the task icon. Make sure there is an arrow connecting the start event to the task. -5. Name the task by double-clicking the task or using the properties panel. -6. Create an end event by dragging the end event icon from the palette, or by clicking the existing start event and clicking the end event icon. -7. No need to save. Web Modeler will autosave every change you make. - -![simple process](./img/simple-task-creation.png) - -## Share your process - -You can share read-only models with teammates and other stakeholders via a link. To do this, follow the steps below: - -1. From your diagram, click the three vertical dots to the right of the **Run** button. Click **Share**. -2. The **Create share link** modal will appear. Click **Create link** to generate a sharable link. -3. Your **Link URL** will appear along with a blue **Copy** button. Note other options and features available when sharing a diagram, such as sharing via email. -4. Send your link to your stakeholder to allow them to view the process. They do not need a Camunda account to view the process. - -![share link sample](./img/share-link.png) - -## Collaborate on a process - -If you want to invite collaborators to work on your process together, you'll need to first make sure they have the proper permissions and roles. - -1. First, add your colleague to the Organization. Click your account name then **Organization Management**. -2. Navigate to **Users > Add New User**. Add your colleague's email and assign them an appropriate role - **Developer** is likely a good default option here as the user will have full access to Console, Operate, and Tasklist without deletion privileges. See [all roles and permissions](/components/console/manage-organization/manage-users.md#roles-and-permissions) for additional details. -3. Click **Add**. An email will be sent to the email you provided. Your colleague must hit **Join** to finish adding them to the organization. -4. Now that they're added to the organization, you can add them to a project. Open **Modeler**, navigate to your project and open the **Collaborators** panel on the right side. -5. Click **Add user** and find your colleague you added to your organization. Assign their role with the dropdown and click **Send invites**. - ![add new user](./img/invite-collaborators.png) -6. After your colleague clicks **Accept invitation**, they will have access to the project based on the role you assigned. diff --git a/versioned_docs/version-8.2/guides/setting-up-development-project.md b/versioned_docs/version-8.2/guides/setting-up-development-project.md deleted file mode 100644 index 8f099e9a8d4..00000000000 --- a/versioned_docs/version-8.2/guides/setting-up-development-project.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: setting-up-development-project -title: Set up your first development project -description: "Set up your first project to model, deploy, and start a process instance." -keywords: [get-started, local-install] ---- - -Beginner -Time estimate: 20 minutes - -## Prerequisites - -- [Camunda 8 SaaS](https://camunda.io) -- [Desktop Modeler](https://camunda.com/download/modeler/) -- [Operate](/self-managed/operate-deployment/install-and-start.md) -- [Tasklist](/self-managed/tasklist-deployment/install-and-start.md) -- [Optimize]($optimize$/components/what-is-optimize) - -## Setting up your project - -Let's set up your first project to model, deploy, and start a process instance. - -The [camunda-platform-get-started GitHub repository](https://github.com/camunda/camunda-platform-get-started) -contains a hands-on guide for setting up a Camunda 8 project locally. - -The guide offers a general walk-through on how to model, deploy, and start a -process instance. It also includes code examples on how to connect to the -cluster and complete jobs. diff --git a/versioned_docs/version-8.2/guides/setup-client-connection-credentials.md b/versioned_docs/version-8.2/guides/setup-client-connection-credentials.md deleted file mode 100644 index 661604332c8..00000000000 --- a/versioned_docs/version-8.2/guides/setup-client-connection-credentials.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: setup-client-connection-credentials -title: Set up client connection credentials -description: "Set up client connection credentials to create, name, and connect your client." ---- - -Beginner -Time estimate: Under 5 minutes - -Here, we'll set up client connection credentials to create, name, and connect your client. - -## Create a client - -Currently, Camunda 8 SaaS supports the following scopes: - -- Zeebe - Access to the [Zeebe gRPC](/apis-tools/grpc.md) API. -- Tasklist - Access to the [Tasklist GraphQL](/apis-tools/tasklist-api/tasklist-api-overview.md) API. -- Operate - Access to the [Operate REST API](/apis-tools/operate-api/overview.md). -- Optimize - Access to the [Optimize REST API]($optimize$/apis-tools/optimize-api/optimize-api-authorization). -- Secrets - Access cluster secrets in a [hybrid setup](/guides/use-connectors-in-hybrid-mode.md). - -To create a new client, take the following steps: - -1. Navigate to the API tab [in Camunda Console](https://console.cloud.camunda.io/) by clicking **Organization > Cluster name > API**. - ![cluster-details](../components/console/manage-clusters/img/cluster-detail-clients.png) -2. Click **Create your first client** to create a new client and name your client accordingly. -3. Determine the scoped access for client credentials. Select **Zeebe client** so the newly-created client can access your Zeebe instance. - ![create-client](../components/console/manage-clusters/img/cluster-details-create-client.png) -4. Make sure you keep the generated client credentials in a safe place. The **Client secret** will not be shown again. For your convenience, you can also download the client information to your computer. - -![created-client](../components/console/manage-clusters/img/cluster-details-created-client.png) - -The downloaded file contains all the necessary information to communicate with your Camunda 8 instance in the future: - -- `ZEEBE_ADDRESS`: Address where your cluster can be reached. -- `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET`: Credentials to request a new access token. -- `ZEEBE_AUTHORIZATION_SERVER_URL`: A new token can be requested at this address using the credentials. -- `ZEEBE_TOKEN_AUDIENCE`: The audience for a Zeebe token request. -- `CAMUNDA_CLUSTER_ID`: The UUID of the cluster. -- `CAMUNDA_CLUSTER_REGION`: The region of the cluster. -- `CAMUNDA_CREDENTIALS_SCOPES`: A comma-separated list of the scopes this credential set is valid for. -- `CAMUNDA_OAUTH_URL`: A new token can be requested at this address using the credentials. Duplicates the earlier Zeebe-focused variable. - -Depending on the scopes granted to these client credentials, the following variables may also be present: - -- `CAMUNDA_TASKLIST_BASE_URL`: The base URL for the Tasklist API. -- `CAMUNDA_OPTIMIZE_BASE_URL`: The base URL for the Optimize API. -- `CAMUNDA_OPERATE_BASE_URL`: The base URL for the Operate API. diff --git a/versioned_docs/version-8.2/guides/update-guide/026-to-100.md b/versioned_docs/version-8.2/guides/update-guide/026-to-100.md deleted file mode 100644 index e57d0903f2c..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/026-to-100.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -id: 026-to-100 -title: Update 0.26 to 1.0 -description: "Review which adjustments must be made to migrate from Camunda Cloud 0.26.x to 1.0.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 0.26.x to 1.0.0 for each component of the system. - -:::caution - -Be aware that the major version update from 0.26 to 1.0 is not backwards -compatible. Therefore, data cannot be migrated from 0.26 to 1.0 and client -applications must be adjusted to the new API versions. - -::: - -## Server - -### Zeebe - -#### Distribution - -With Zeebe 1.0.0, the Java package names were adjusted. They changed from -`io.zeebe` to `io.camunda.zeebe`. Therefore, any logging configurations and -similar, which are based on the package names, must be adjusted. - -Additionally, the group id of the Java artifacts were migrated from `io.zeebe` -to `io.camunda`. This requires all dependencies to the artifacts to be updated -to use the new group id. - -The downloadable artifact of the Zeebe distribution was renamed from: - -- `zeebe-distribution-${VERSION}.tar.gz` to `camunda-cloud-zeebe-${VERSION}.tar.gz`, -- `zeebe-distribution-${VERSION}.zip` to `camunda-cloud-zeebe-${VERSION}.zip` - -#### Workflow Engine - -The support for YAML workflows was removed from the workflow engine, after the -deprecation with 0.26. This means only [BPMN -processes](/components/modeler/bpmn/bpmn-primer.md) are supported from now on. - -#### Elasticsearch Exporter - -The supported Elasticsearch version of the Elasticsearch Exporter was increased -from `6.8` to `7.10`, read more about this in the -[Elasticsearch](#elasticsearch) section. - -The index templates of the Elasticsearch Exporter were migrated to use -[composable index templates](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html). - -#### Protocol - -The Java protocol received the following adjustments: - -- Enum `IncidentIntent` - - - remove `CREATE` - - change short value of `CREATED` to `0` - - change short value of `RESOLVE` to `1` - - change short value of `RESOLVED` to `2` - -- Enum `WorkflowInstanceIntent` renamed to `ProcessInstanceIntent` - -- Enum `WorkflowInstanceSubscriptionIntent` renamed to `ProcessMessageSubscriptionIntent` - -- Enum `WorkflowInstanceCreationIntent` renamed to `ProcessInstanceCreationIntent` - -- Enum `JobIntent` - - - remove `CREATE` and `ACTIVATED` - - change short value of `CREATED` to `0` - - change short value of `COMPLETE` to `1` - - change short value of `COMPLETED` to `2` - - change short value of `TIME_OUT` to `3` - - change short value of `TIMED_OUT` to `4` - - change short value of `FAIL` to `5` - - change short value of `FAILED` to `6` - - change short value of `UPDATE_RETRIES` to `7` - - change short value of `RETRIES_UPDATED` to `8` - - change short value of `CANCEL` to `9` - - change short value of `CANCELED` to `10` - - change short value of `THROW_ERROR` to `11` - - change short value of `ERROR_THROWN` to `12` - -- Enum `MessageIntent` - - - rename `DELETE` to `EXPIRE` - - rename `DELETED` to `EXPIRED` - -- Enum `MessageStartEventSubscriptionIntent` - - - remove `OPEN`, `OPENED`, `CLOSE` and `CLOSED` - - add `CREATED`, `CORRELATED` and `DELETED` - -- Enum `MessageSubscriptionIntent` - - - rename `OPEN` to `CREATE` - - rename `OPENED` to `CREATED` - - rename `CLOSE` to `DELETE` - - rename `CLOSED` to `DELETED` - - add `CORRELATING - -- Enum `TimerIntent` - - - remove `CREATE` - - change short value of `CREATED` to `0` - - change short value of `TRIGGER` to `1` - - change short value of `TRIGGERED` to `2` - - change short value of `CANCEL` to `3` - - change short value of `CANCELED` to `4` - -- Interface `DeploymentRecordValue` - - - rename method `getDeployedWorkflows` to `getProcessMetadata` and change type from `List` to `List` - -- Interface `IncidentRecordValue` - - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `JobRecordValue` - - - rename method `getWorkflowDefinitionVersion` to `getProcessDefinitionVersion` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - -- Interface `MessageStartEventSubscriptionRecordValue` - - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `MessageSubscriptionRecordValue` - - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `TimerRecordValue` - - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - -- Interface `VariableRecordValue` - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - -#### gRPC - -The following changes were made to the gRPC protocol definition: - -- `ActivatedJob` message - - - rename field `workflowInstanceKey` to `processInstanceKey` - - rename field `workflowDefinitionVersion` to `processDefinitionVersion` - - rename field `workflowKey` to `processDefinitionKey` - -- `CancelWorkflowInstanceRequest` message renamed to - `CancelProcessInstanceRequest` - - - rename field `workflowInstanceKey` to `processInstanceKey` - -- `CancelWorkflowInstanceResponse` message renamed to - `CancelProcessInstanceResponse` - -- `CreateWorkflowInstanceRequest` message renamed to - `CreateProcessInstanceRequest` - - - rename field `workflowKey` to `processDefinitionKey` - -- `CreateWorkflowInstanceResponse` message renamed to - `CreateProcessInstanceResponse` - - - rename field `workflowKey` to `processDefinitionKey` - - rename field `workflowInstanceKey` to `processInstanceKey` - -- `CreateWorkflowInstanceWithResultRequest` message renamed to - `CreateProcessInstanceWithResultRequest` - - - change type of field `request` from `CreateWorkflowInstanceRequest` to `CreateProcessInstanceRequest` - -- `CreateWorkflowInstanceWithResultResponse` message renamed to - `CreateProcessInstanceWithResultResponse` - - - rename field `workflowKey` to `processDefinitionKey` - - rename field `workflowInstanceKey` to `processInstanceKey` - -- `DeployWorkflowRequest` message renamed to `DeployProcessRequest` - - - rename field `workflows` to `processes` and change type from `WorkflowRequestObject` to `ProcessRequestObject` - -- `WorkflowRequestObject` message renamed to `ProcessRequestObject` - - - remove enum `ResourcetType` - - remove field type - - change field id of `definition` field to 2 - -- `DeployWorkflowResponse` message renamed to `DeployProcessResponse` - - - rename field `wokrflows` to `processes` and change type from `WorkflowMetadata` to `ProcessMetadata` - -- `WorkflowMetadata` message renamed to `ProcessMetadata` - - - rename field `workflowKey` to `processDefinitionKey` - -- `Partition` message - - - enum `PartitionBrokerRole` added `INACTIVE` state - -- `Gateway` service - - - rename rpc `CancelWorkflowInstance` to `CancelProcessInstance` and change input type from `CancelWorkflowInstanceRequest` to `CancelProcessInstanceRequest` and output type from `CancelWorkflowInstanceResponse` to `CancelProcessInstanceResponse` - - - rename rpc `CreateWorkflowInstance` to `CreateProcessInstance` and change input type from `CreateWorkflowInstanceRequest` to `CreateProcessInstanceRequest` and output type from `CreateWorkflowInstanceResponse` to `CreateProcessInstanceResponse` - - - rename rpc `CreateWorkflowInstanceWithResult` to `CreateProcessInstance` and change input type from `CreateWorkflowInstanceWithResultRequest` to `CreateProcessInstanceWithResultRequest` and output type from `CreateWorkflowInstanceWithResultResponse` to `CreateProcessInstanceResponse` - - - rename rpc `DeployWorkflow` to `DeployProcess` and change input type from `DeployWorkflowRequest` to `DeployProcessRequest` and output type from `DeployWorkflowResponse` to `DeployProcessResponse` - -#### Exporter API - -In the Java Exporter API, the depracted method `Controller#scheduleTask` was removed. - -### Operate - -With Operate 1.0.0, the Java package names were adjusted. They changed from -`org.camunda.operate` to `io.camunda.operate`. Therefore, any logging -configurations and similar, which are based on the package names, must be -adjusted. - -The downloadable artifact of the Operate distribution was renamed from: - -- `camunda-operate-${VERSION}.tar.gz` to `camunda-cloud-operate-${VERSION}.tar.gz`, -- `camunda-operate-${VERSION}.zip` to `camunda-cloud-operate-${VERSION}.zip` - -The supported Elasticsearch version was increased from `6.8` to `7.10`. Read -more about this in the [Elasticsearch](#elasticsearch) section. - -### Tasklist - -With Tasklist 1.0.0, the Java package names were adjusted. They changed from -`io.zeebe.tasklist` to `io.camunda.tasklist`. Therefore, any logging -configurations and similar, which are based on the package names, must be -adjusted. - -Additionally, the configuration prefix was migrated from `zeebe.tasklist` to -`camunda.tasklist`, which requires all configurations to be adjusted to the new -prefix. - -The downloadable artifact of the Tasklist distribution was renamed from: - -- `zeebe-tasklist-${VERSION}.tar.gz` to `camunda-cloud-tasklist-${VERSION}.tar.gz`, -- `zeebe-tasklist-${VERSION}.zip` to `camunda-cloud-tasklist-${VERSION}.zip` - -The supported Elasticsearch version was increased from `6.8` to `7.10`. Read -more about this in the [Elasticsearch](#elasticsearch) section. - -### Elasticsearch - -Zeebe, Operate, and Tasklist use Elasticsearch as Datastore to exchange the event -stream from Zeebe's exporter, and store their own data model representation. - -Camunda Cloud 1.0 requires an update from Elasticsearch 6.8 to 7.10. - -Follow the [update guide from -Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html), -to migrate existing data. - -:::note -Zeebe, Operate, and Tasklist data inside Elasticsearch cannot be migrated, it can only be preserved for -histroy or audit purpose, but cannot be loaded by Camunda Cloud 1.0. -::: - -If you want to keep the existing data in Elasticsearch, ensure you set a -new index prefix for all systems. See the configuration documentation for -[Zeebe](self-managed/zeebe-deployment/zeebe-installation.md), -[Operate](self-managed/operate-deployment/operate-configuration.md), and [Tasklist](self-managed/tasklist-deployment/tasklist-configuration.md). - -## Client - -### Zeebe Java Client - -With Zeebe 1.0.0, the Java package names were adjusted. They changed from -`io.zeebe` to `io.camunda.zeebe`. Therefore, any imports and logging -configurations and similar, which are based on the package names, must be -adjusted. - -Additionally, the group id of the Java artifacts were migrated from `io.zeebe` -to `io.camunda`. This requires all dependencies to the artifacts to be updated -to use the new group id. - -The public API of the Java client changed as follows: - -- Interface `ActivatedJob` - - - rename method `getWorkflowInstanceKey` to `getProcessInstanceKey` - - rename method `getWorkflowDefinitionVersion` to `getProcessDefinitionVersion` - - rename method `getWorkflowKey` to `getProcessDefinitionKey` - -- Class `ClientProperties` - - - remove deprecated field `BROKER_CONTACTPOINT` - -- Interface `ZeebeClientBuilder` - - - remove deprecated method `brokerContactPoint` - -- Interface `ZeebeClientConfiguration` - - - remove deprecated method `getBrokerContactPoint` - -- Interface `ZeebeClient` - - change return type of `newDeployCommand` from `DeployWorkflowCommandStep1` to `DeployProcessCommandStep1` - - change return type of `newCreateInstanceCommand` from `CreateWorkflowInstanceCommandStep1` to `CreateProcessInstanceCommandStep1` - - change return type of `newCancelInstanceCommand` from `CancelWorkflowInstanceCommandStep1` to `CancelProcessInstanceCommandStep1` - -### Zeebe Go Client - -The repository of Zeebe was moved from `github.com/zeebe-io/zeebe` to -`github.com/camunda-cloud/zeebe`. Therefore, all go dependencies and imports must be adjusted to the new GitHub URL. - -The public API of the Go client was changed as follows: - -- Interface `CancelInstanceStep1` - - - rename method `WorkflowInstanceKey` to `ProcessInstanceKey` and change return type from `DispatchCancelWorkflowInstanceCommand` to `DispatchCancelProcessInstanceCommand` - -- Interface `DispatchCancelWorkflowInstanceCommand` renamed to `DispatchCancelProcessInstanceCommand` - -- Interface `CancelWorkflowInstanceCommand` renamed to `CancelProcessInstanceCommand` - - - rename method `WorkflowInstanceKey` to `ProcessInstanceKey` - -- Interface `CreateInstanceCommandStep1` - - - rename method `WorkflowKey` to `ProcessDefinitionKey` and change `DispatchCancelProcessInstanceCommand` - -- Struct `DeployCommand` - - - method `AddResource(definition, name, resourceType)` remove `resourceType` from parameter list - -- Interface `Client` - - rename method `NewDeployWorkflowCommand` to `NewDeployProcessCommand` diff --git a/versioned_docs/version-8.2/guides/update-guide/100-to-110.md b/versioned_docs/version-8.2/guides/update-guide/100-to-110.md deleted file mode 100644 index 7bff5fe4604..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/100-to-110.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: 100-to-110 -title: Update 1.0 to 1.1 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.0.x to 1.1.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.0.x to 1.1.0 for each component of the system. - -:::caution - -We identified an issue in our recent release of Operate 1.1.0 (part of Camunda -Cloud 1.1.0). This issue only applies if you are updating from a previous -version of Operate 1.0.x to Operate 1.1. - -We recommend to immediately update to 1.1.1 by skipping the 1.1.0 release. - -::: - -## Server - -### Operate - -With Operate 1.1, a new feature was introduced to navigate between call activity -hierarchies. This feature is only available for instances started after the -version 1.1 update. Older instances will not expose this information to the user. - -## Client - -### Zeebe Java Client - -To prepare to support multiple regions in Camunda Cloud SaaS, we adopted the -Zeebe URLs used to connect to your cluster to contain a region sub-domain, i.e -`${CLUSTE_ID}.zeebe.camunda.io` is now `${CLUSTER_ID}.bru-2.zeebe.camunda.io`. - -We are confident that we rolled out this change transparently and as backwards -compatible as possible. Still, there exists scenarios which this might impact -you on your update path. For existing clusters, the old URLs will still be -functional. We recommend you update the configuration of your clients to -the new URL format. - -To support this feature, we expose a new configuration method -[`withRegion`]() -in the Camunda Cloud builder of the Java Client to set the region. - -By default, the builder will assume the `bru-2` region, which is the region of -any clusters created after Camunda Cloud GA in May 2021. - -If you are using the Java Client Camunda Cloud builder with a pre GA -cluster, you must set the region to the specific value of your cluster. - -To copy the region of your cluster, visit the clusters details page in -Camunda Cloud Console, and select the copy button next to the region. - -![region icon to copy cluster details region](../img/update-guide-100-to-110-copy-region.png) diff --git a/versioned_docs/version-8.2/guides/update-guide/110-to-120.md b/versioned_docs/version-8.2/guides/update-guide/110-to-120.md deleted file mode 100644 index dc9b7dbc15e..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/110-to-120.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: 110-to-120 -title: Update 1.1 to 1.2 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.1.x to 1.2.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.1.x to 1.2.0 for each component of the system. - -:::caution - -We identified an issue in our recent release of Zeebe 1.2 related to our -concept of processing on followers. Version 1.2.0 and 1.2.1 are affected by a -bug, resulting in an inconsistent state -([#8044](https://github.com/camunda-cloud/zeebe/issues/8044)). - -Therefore we recommend updating to 1.2.4 directly. - -::: - -## Server - -### Operate - -With Operate 1.2, a new feature was introduced to quickly navigate a call -activity hierarchy. This feature is only available for instances started after -the version 1.2 update. Older instances will not expose this information to the -user. diff --git a/versioned_docs/version-8.2/guides/update-guide/120-to-130.md b/versioned_docs/version-8.2/guides/update-guide/120-to-130.md deleted file mode 100644 index fecae209e45..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/120-to-130.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -id: 120-to-130 -title: Update 1.2 to 1.3 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.2.x to 1.3.2." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.2.x to 1.3.2 for each component of the system. - -## Server - -### Zeebe - -:::caution -A critical [issue](https://github.com/camunda-cloud/zeebe/issues/8611) which may lead to data loss was identified in 1.3.0 and 1.3.1. This issue is related to the new assignee and candidate group feature introduced in 1.3.0, and only affects users which make use of it. However, when updating, it's still recommended that you skip versions 1.3.0 and 1.3.1 and update directly from 1.2.9 to 1.3.2. - -Please refer to the [release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.2) for more. -::: - -With Zeebe 1.3.0, we removed the monitoring specific configuration from the broker and the gateway. Instead, the monitoring configuration is now completely handled via the Spring Boot management server. Read [this Spring Boot documentation](https://docs.spring.io/spring-boot/docs/2.6.x/reference/htmlsingle/#actuator) for more information on how to configure the server. - -For the broker, this replaces the following configuration settings: - -```yaml -zeebe: - broker: - network: - monitoringApi: - host: 0.0.0.0 - port: 9600 -``` - -For the gateway, the following was removed: - -```yaml -zeebe: - gateway: - monitoring: - enabled: true - host: 0.0.0.0 - port: 9600 -``` - -To configure the monitoring URL for the broker and gateway, you can instead use the `server.port` and `server.address` properties. Given how Spring loads its configuration, it's not possible to configure this directly in the YAML configuration. However, you can use environment variables - respectively `SERVER_PORT` and `SERVER_ADDRESS` - or system properties - respectively `-Dserver.port=` or `-Dserver.address=` - to configure them. - -To disable monitoring the monitoring server, configure the broker or gateway with the following: - -```yaml -spring: - main: - web-application-type: none -``` - -### Operate - -:::caution -A critical issue was found on Operate data importer which may lead to incidents not being imported to Operate. This issue is affecting versions `1.3.0`, `1.3.1`, `1.3.2` and `1.3.3`. -We strongly recommend to skip affected versions and make sure you are running version `1.3.4` if updating from `1.2.x`. - -Please refer to the [release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.4) for more. -::: - -With Operate 1.3, a new feature was introduced to propagate incidents from called instances to calling instances. -This feature is only available for instances started after the version 1.3 update. Older instances will still be shown as active -even though incidents in called instances may exist. - -### Tasklist - -Because of internal changes in user data processing update to Tasklist 1.3 will erase all information about task assignments. -End users would need to claim their tasks again. diff --git a/versioned_docs/version-8.2/guides/update-guide/130-to-800.md b/versioned_docs/version-8.2/guides/update-guide/130-to-800.md deleted file mode 100644 index cd2228a8cea..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/130-to-800.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: 130-to-800 -title: Update 1.3 to 8.0 -description: "Review which adjustments must be made to migrate from Camunda Cloud 1.3.x to Camunda 8.0.0" ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda Cloud 1.3.x to 8.0.0 for each component of the system. - -## Server - -### Zeebe - -No changes are required to update Zeebe 1.3 to 8.0. - -The following new configuration parameters where introduced which can be enabled if needed. - -#### Message compression - -To configure compression algorithm for all message sent between the brokers and between the broker and the gateway. Available options are NONE, GZIP and SNAPPY. - -This feature is useful when the network latency between the brokers is very high (for example when the brokers are deployed in different data centers). When latency is high, the network bandwidth is severely reduced. Hence enabling compression helps to improve the throughput. - -Note: When there is no latency enabling this may have a performance impact. - -- application.yaml - - Broker: `zeebe.broker.cluster.messageCompression` - - Gateway: `zeebe.gateway.cluster.messageCompression` -- Environment variable: - - Broker: `ZEEBE_BROKER_CLUSTER_MESSAGECOMPRESSION` - - Gateway: `ZEEBE_GATEWAY_CLUSTER_MESSAGECOMPRESSION` - -#### Experimental: Consistency Checks - -To configure if the basic operations on RocksDB, such as inserting or deleting key-value pairs, should check preconditions, for example that a key does not already exist when inserting. - -- application.yaml - - Broker: `zeebe.broker.experimental.consistencyChecks.enablePreconditions` -- Environment variable: - - Broker: `ZEEBE_BROKER_EXPERIMENTAL_CONSISTENCYCHECKS_ENABLEPRECONDITIONS` - -To configure if inserting or updating key-value pairs on RocksDB should check that foreign keys exist. - -- application.yaml - - Broker: `zeebe.broker.experimental.consistencyChecks.enableForeignKeyChecks` -- Environment variable: - - Broker: `ZEEBE_BROKER_EXPERIMENTAL_CONSISTENCYCHECKS_ENABLEFOREIGNKEYCHECKS` - -### Operate - -With Camunda 8 the IAM component was replaced with the new [Identity](/self-managed/identity/what-is-identity.md) project. There is no update path from IAM to Identity, which means all configured roles and permissions have to be recreated after the update. Please refer to the [Operate documentation](/self-managed/operate-deployment/operate-authentication.md#identity) how to connect Operate with Identity. - -With version 8.0, Operate supports configuring secure connections to the Zeebe gateway, see the [connection configuration section](/self-managed/operate-deployment/operate-configuration.md#settings-to-connect-1) in the Operate documentation. - -With version 8.0, Operate offers a new [REST API](/apis-tools/operate-api/overview.md) to access process data. - -### Tasklist - -With Camunda 8 the IAM component was replaced with the new [Identity](/self-managed/identity/what-is-identity.md) project. There is no update path from IAM to Identity, which means all configured roles and permissions have to be recreated after the update. Please refer to the [Tasklist documentation](/self-managed/tasklist-deployment/tasklist-authentication.md#identity) how to connect Tasklist with Identity. - -With version 8.0, Tasklist supports configuring secure connections to the Zeebe gateway, see the [connection configuration section](/self-managed/tasklist-deployment/tasklist-configuration.md#settings-to-connect-1) in the Tasklist documentation. - -### IAM - -With Camunda 8 the IAM component was replaced with the new [Identity](/self-managed/identity/what-is-identity.md) project. Please refer to the [Identity documentation](/self-managed/identity/getting-started/install-identity.md) to learn how to install and configure it. - -## Client - -### Zeebe - -Zeebe 8.0 adds the possibility to evaluate decisions, therefore the [new deployment command](/apis-tools/grpc.md#deployresource-rpc) was introduced to better support DMN files during deployment. The [old deployment command](/apis-tools/grpc.md#deployprocess-rpc) is deprecated and we recommend to migrate your application code to use the new deployment function. - -The Zeebe Java client offers the new deploy command using the `newDeployResourceCommand()` method, refer to the [JavaDocs]() for more information. - -```java - zeebeClient - .newDeployResourceCommand() - .addResourceFile("~/wf/process1.bpmn") - .addResourceFile("~/wf/process2.bpmn") - .addResourceFile("~/dmn/decision.dmn") - .send(); -``` - -The command line tool `zbctl` exposes the new command using the `deploy resource` sub-command. - -```bash -zbctl deploy resource ... -``` diff --git a/versioned_docs/version-8.2/guides/update-guide/800-to-810.md b/versioned_docs/version-8.2/guides/update-guide/800-to-810.md deleted file mode 100644 index 2100e3ab6fa..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/800-to-810.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -id: 800-to-810 -title: Update 8.0 to 8.1 -description: "Review which adjustments must be made to migrate from Camunda 8.0.x to Camunda 8.1.0" ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda 8.0.x to 8.1.0 for each component of the system. - -## Server - -### Zeebe - -:::caution -A critical issue was found in the Zeebe Elasticsearch exporter, resulting in records not getting exported after performing a process instance modification. This issue affects only Zeebe version 8.1.1. -When updating, it is therefore recommended you skip version 8.1.1 and update directly to 8.1.2. - -Refer to the [release notes](https://github.com/camunda/camunda-platform/releases/tag/8.1.2) for more details. -::: - -No changes are required to update Zeebe 8.0 to 8.1. However, there are deprecated properties you should be aware of. - -:::note -Even if these parameters are deprecated, you can still use them because of the backward compatibility of the Zeebe versioning system. -::: - -Deprecated properties: - -- Zeebe Gateway's property `contactPoint` - - Remove the usage of `zeebe.gateway.cluster.contactPoint` property or the `ZEEBE_GATEWAY_CLUSTER_CONTACTPOINT` environment variable. - - Use the `zeebe.gateway.cluster.initialContactPoints` property or the `ZEEBE_GATEWAY_CLUSTER_INITIALCONTACTPOINTS` environment variable. - -:::note -You can set the list of broker addresses here to make the start process of the Zeebe gateway more resilient. Refer to [the Zeebe gateway's configuration guide](../../self-managed/zeebe-deployment/configuration/gateway.md) for more details. -::: - -### Operate - -:::caution -A critical issue was found on Operate data importer which may lead to incidents not being imported to Operate. -This issue is affecting only Operate installations which where updated from 8.0, and not new installations of Operate. -When updating, it is recommended that you skip versions 8.1.0 and update directly to 8.1.1. - -Please refer to the [release notes](https://github.com/camunda/camunda-platform/releases/tag/8.1.1) for more. -::: diff --git a/versioned_docs/version-8.2/guides/update-guide/810-to-820.md b/versioned_docs/version-8.2/guides/update-guide/810-to-820.md deleted file mode 100644 index 1a35c8a9859..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/810-to-820.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -id: 810-to-820 -title: Update 8.1 to 8.2 -description: "Review which adjustments must be made to migrate from Camunda 8.1.x to Camunda 8.2.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from Camunda 8.1.x to 8.2.0 for each component. - -## Zeebe - -### New configuration for disk usage - -A new configuration for [tuning disk usage](/self-managed/zeebe-deployment/operations/disk-space.md) is added. -The old configurations are still accepted, but they are deprecated. We recommend using the new configuration. - -### Changes to default backpressure - -The default [backpressure](/self-managed/zeebe-deployment/operations/backpressure.md) algorithm changed from **Vegas** to **additive-increase/multiplicative-decrease (AIMD)**. - -:::note -This may change the performance characteristics. If you have tuned your system for specific latency or throughput, we recommend you reevaluate with the new version. -::: - -### Spring Boot 3.0 - URL Matching Changes - -Zeebe now uses Spring Boot 3.0, which comes with a breaking change regarding [trailing slash URL matching](https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-3.0-Migration-Guide#spring-mvc-and-webflux-url-matching-changes). -This only applies to the Actuator endpoints such as the [Management API](../../self-managed/zeebe-deployment/operations/management-api.md) and the [Backup API](../../self-managed/backup-restore/zeebe-backup-and-restore.md), as well as to the [health status endpoints](../../self-managed/zeebe-deployment/operations/health.md). Ensure you remove any trailing slashes when you access these endpoints, otherwise you will encounter a `404 - Not Found` error. - -The [GRPC API](../../apis-tools/grpc.md) is not affected by this change. - -### AWS S3 Backup store connectivity - -Zeebe uses an updated version of the AWS S3 client which automatically chooses between [path-style and virtual-hosted-style access](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html). -If your S3 backup store does not support virtual-hosted-style access and the client fails to detect this, Zeebe will not connect to S3 and backups are not usable. - -Self-managed deployments that use [MinIO](https://min.io/) as an S3 compatible backup target might need to force path-style access. -You can use a new configuraton setting to always force path-style access: `ZEEBE_BROKER_DATA_BACKUP_S3_FORCEPATHSTYLEACCESS`. - -## Operate - -- It is recommended to follow a sequential update path when updating to version 8.2. For example, if running on version 8.0, first update to 8.1, then update to 8.2. -- Migration of data during the version 8.2 update could take longer than previous versions, especially for datasets containing a large amount of incidents data. -- Operate now uses Spring Boot version 3.0. Therefore, [URLs with a trailing slash will fail with an HTTP status 404](https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-3.0-Migration-Guide#spring-mvc-and-webflux-url-matching-changes). - -## Identity - -A new feature for Identity has been added called resource authorizations. By default, this feature is disabled. To learn more about this feature see the [resource authorizations concepts page](/self-managed/concepts/access-control/resource-authorizations.md). - -## Tasklist - -- New configuration is required to enable the Tasklist **Process** page: - - `CAMUNDA_TASKLIST_IDENTITY_BASEURL` environment variable should be configured to your Identity host. - - `CAMUNDA_TASKLIST_IDENTITY_RESOURCE_PERMISSIONS_ENABLED` environment variable should be set to `true` or `false` (default). This configures Tasklist to use authorizations from Identity. - - To enable this variable in Tasklist, resource authorizations must be [enabled in Identity](/self-managed/concepts/access-control/resource-authorizations.md). - - If `CAMUNDA_TASKLIST_IDENTITY_RESOURCE_PERMISSIONS_ENABLED` is set to `true`, you must configure `CAMUNDA_TASKLIST_IDENTITY_BASEURL`. Otherwise, Tasklist will not be able to reach Identity. -- Tasklist now uses Spring Boot version 3.0. Therefore, [URLs with a trailing slash will fail with an HTTP status 404](https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-3.0-Migration-Guide#spring-mvc-and-webflux-url-matching-changes). diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/010-to-020.md b/versioned_docs/version-8.2/guides/update-guide/connectors/010-to-020.md deleted file mode 100644 index 12d336c91d8..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/010-to-020.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -id: 010-to-020 -title: Update 0.1 to 0.2 -description: "Review which adjustments must be made to migrate from Connector SDK 0.1.x to 0.2.0." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.1.x to 0.2.0. - -:::caution - -Be aware that the update from 0.1 to 0.2 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.2.0, we introduce the following structural changes: - -- Input validation and secret replacement move from writing imperative code to declaratively using annotations. -- The Outbound aspect of APIs is more explicit. Classes have been moved to more explicit packages and have been renamed. -- New required annotation for outbound Connectors. - -### Declarative validation and secrets - -Input objects previously had to implement the `ConnectorInput` interface to participate in validation and secret replacement -initiated from the `ConnectorContext` using its `validate` and `replaceSecrets` methods respectively. - -With version 0.2.0, we remove the imperative approach for validation and secret replacement from the SDK. -Instead, you can use annotations to describe the constraints of input attributes and mark those that can contain -secrets. - -These are two input objects written with the SDK version 0.1.x: - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class MyConnectorRequest implements ConnectorInput { - - private String message; - private Authentication authentication; - - @Override - public void validateWith(final Validator validator) { - validator.require(message, "message"); - validator.require(authentication, "authentication"); - validateIfNotNull(authentication, validator); - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - replaceSecretsIfNotNull(authentication, secretStore); - } -} -``` - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class Authentication implements ConnectorInput { - - private String user; - private String token; - - @Override - public void validateWith(final Validator validator) { - validator.require(user, "user"); - validator.require(token, "token"); - if (token != null && !(token.startsWith("xobx") || token.startsWith("secrets."))) { - validator.addErrorMessage("Token must start with \"xobx\" or be a secret"); - } - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - token = secretStore.replaceSecret(token); - } -} -``` - -You can express the same input objects with SDK version 0.2.0 as follows: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty - private String message; - - @NotNull - @Valid - @Secret - private Authentication authentication; -} -``` - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.Pattern; - -public class Authentication { - - @NotEmpty - private String user; - - @NotEmpty - @Pattern("^(xobx-|secret).+") - @Secret - private String token; -} -``` - -As a result, you have to remove the `ConnectorInput` interface implementation and the imperative code that comes with `validateWith` -and `replaceSecrets`. You can now concisely describe the constraints of attributes rather then expressing them in imperative code. - -In order to use annoation-based validation out of the box, you can include the new artifact `connector-validation` that -comes with the SDK. - - - - - -```xml - - io.camunda.connector - connector-validation - 0.2.0 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.2.0' -``` - - - - -You can read more about validation and secret replacement in our -[SDK Guide](/components/connectors/custom-built-connectors/connector-sdk.md). - -### Explicit Outbound aspect - -With version 0.2.0 of the SDK, we make the Outbound aspect of those components specific to oubound connectivity -more visible. This separates those SDK components that are tightly coupled to Outbound from those that -will be reusable for Inbound. - -With this change, the names of the following classes need to be adjusted: - -- Rename `io.camunda.connector.api.ConnectorContext` to `io.camunda.connector.api.outbound.OutboundConnectorContext` -- Rename `io.camunda.connector.api.ConnectorFunction` to `io.camunda.connector.api.outbound.OutboundConnectorFunction` -- Rename `io.camunda.connector.api.SecretProvider` to `io.camunda.connector.api.secret.SecretProvider` -- Rename `io.camunda.connector.api.SecretStore` to `io.camunda.connector.api.secret.SecretStore` -- Rename `io.camunda.connector.test.ConnectorContextBuilder` to `io.camunda.connector.test.outbound.OutboundConnectorContextBuilder` - -As a result, you must replace all occurrences of the old class names and imports with the new ones. This includes the -SPI for the connector function itself. Therefore, rename the file `META-INF/services/io.camunda.connector.api.ConnectorFunction` to -`META-INF/services/io.camunda.connector.api.outbound.OutboundConnectorFunction`. - -### `@OutboundConnector` annotation - -For best interoperability, Connectors provide default meta-data (`name`, `type`, `inputVariables`) via the `@OutboundConnector` annotation: - -```java -@OutboundConnector( - name = "PING", - inputVariables = {"caller"}, - type = "io.camunda.example.PingConnector:1" -) -public class PingConnector implements OutboundConnectorFunction { - ... -} -``` - -## Connector runtime environment - -If using the -[pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that comes with the SDK does not fit your use case, you can create a custom runtime environment. - -With version 0.2.0 of the [job worker runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#connector-job-handler), you need to make the following changes: - -- Rename `io.camunda.connector.runtime.jobworker.ConnectorJobHandler` to `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` -- Rename connector related env variables from `ZEEBE_` to `CONNECTOR_`. Zeebe configuration properties remain unchanged - -As a general change in behavior the module will now pick up connectors from classpath unless it is explicitly configured via environment variables. - -Also take the name changes in the [SDK core](#explicit-outbound-aspect) into account. - -Implementing your own Connector wrapper you need to provide a Connector context specific to -your environment. Consider extending the `io.camunda.connector.impl.outbound.AbstractConnectorContext` -instead of implementing the `io.camunda.connector.api.ConnectorContext` yourself. Most of the commonly needed functionality -is already provided in there. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/0100-to-0110.md b/versioned_docs/version-8.2/guides/update-guide/connectors/0100-to-0110.md deleted file mode 100644 index 8ab329fe2f7..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/0100-to-0110.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -id: 0100-to-0110 -title: Update 0.10 to 0.11 -description: "Review which adjustments must be made to migrate from Connector SDK 0.10.x to 0.11.0." ---- - -Beginner - -:::note -Migrate directly to version 0.11.2 of the SDK. This contains a fix for several issues in the 0.11.0 release. -::: - -This SDK release is not backwards-compatible. We are moving towards a stable Connectors release and continue to improve the experience of developing custom connectors. - -In this SDK version, we changed the `OutboundConnectorContext` and `InboundConnectorContext` interfaces significantly. -You can no longer use the `getVariablesAsType` or `getPropertiesAsType` methods in outbound and inbound Connectors, respectively. - -Use the new `bindVariables` method instead, as this takes care of secret replacement, payload validation, and deserialization automatically. - -We are moving away from a mandatory `@Secret` annotation. From this release onwards, secrets are automatically replaced in all input variables/properties without the need to explicitly declare an annotation. - -To migrate your Connector implementations, complete the following: - -1. If you used the `OutboundConnectorContext::getVariablesAsType` method in you outbound Connector functions, replace it with `OutboundConnectorContext::bindVariables`. -2. If you used the `InboundConnectorContext::getPropertiesAsType` method in you inbound Connector executables, replace it with `InboundConnectorContext::bindProperties`. -3. Remove calls to `OutboundConnectorContext::replaceSecrets` and `InboundConnectorContext::replaceSecrets` methods. The secrets are now replaced automatically. -4. Remove calls to `OutboundConnectorContext::validate` and `InboundConnectorContext::validate` methods. The validation is now performed automatically. -5. If you used the `@Secret` annotation in your Connector implementations, you can safely remove it as it has no effect. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/020-to-030.md b/versioned_docs/version-8.2/guides/update-guide/connectors/020-to-030.md deleted file mode 100644 index 248d7e7cc31..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/020-to-030.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: 020-to-030 -title: Update 0.2 to 0.3 -description: "Review which adjustments must be made to migrate from Connector SDK 0.2.x to 0.3.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.2.x to 0.3.0. - -:::caution - -Be aware that the update from 0.2 to 0.3 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.3.0, we introduce the following structural changes: - -- Input validation moves from Jakarta Bean Validation API version 3.0 to 2.0. -- SDK artifacts have to be in scope `provided`. - -### Update to Validation API 2.0 - -To better integrate in the current Java ecosystem and widely used frameworks like Spring 5 and Spring Boot 2, the `connector-validation` module -now operates on Jakarta Bean Validation API version 2.0 instead of version 3.0. Adjust your Connector input objects using validation as follows: - -Replace all class imports starting with `jakarta.validation` by `javax.validation`. A Connector input class on SDK 0.2.x with the following imports: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -changes to the following: - -```java -import io.camunda.connector.api.annotation.Secret; -import javax.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -This way, the Connector runtime environments are able to pick up your validations correctly. - -### Provided SDK artifacts - -The Connector runtime environments can execute multiple Connectors at once. The environments also provide the base SDK artifacts and their classes -to any Connector they execute. This comprises runtime-specific classes related to the Connector context as well as the Connector core and the validation -classes. To minimize the possibility of incompatible classes being on the same classpath, Connectors are required to depend on `connector-core` and -`connector-validation` in Maven's dependency scope `provided`. Other dependency management frameworks like Gradle offer similar scopes. - -As a result, you need to include the SDK artifacts as follows in Maven: - -```xml - - io.camunda.connector - connector-core - provided - - - io.camunda.connector - connector-validation - provided - -``` - -## Connector runtime environment - -The SDK provides a [pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that you can start manually. With version 0.3.0, this runtime moves from the [SDK repository](https://github.com/camunda/connector-sdk/tree/stable/0.2/runtime-job-worker) -to [Connector Runtime](https://github.com/camunda/connectors/blob/main/connector-runtime/README.md). This also means that the provided runtime now is -a Spring Boot application, based on Spring Zeebe. Thus, it offers all out-of-the-box capabilities Spring Zeebe provides. - -The Connector runtime JAR for manual installation can now be fetched from https://repo1.maven.org/maven2/io/camunda/spring-zeebe-connector-runtime/ -(starting with version `8.1.3`) instead of https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-job-worker/. You can start the runtime -environment with the following command: - -```bash -java -cp 'spring-zeebe-connector-runtime-VERSION-with-dependencies.jar:connector-http-json-VERSION-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` - -The Docker image is still accessible at https://hub.docker.com/r/camunda/connectors/tags. - -### Custom runtime environments - -If you are building a custom runtime environment, note the following adjustments: - -- The `runtime-util` artifact replaces the `runtime-job-worker` artifact. -- The `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` has moved to `import io.camunda.connector.runtime.util.outbound.ConnectorJobHandler`. -- The `io.camunda.connector.impl.outbound.AbstractOutboundConnectorContext` has moved to `io.camunda.connector.impl.context.AbstractConnectorContext`. -- To build your own context class, we recommend using the following signature: - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext {} -``` - -- The `SecretStore` class has been removed. Initialize your context class with a `super(SecretProvider)` call. Remove the `getSecretStore` method if you used it. - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext { - - public MyContext(final SecretProvider provider) { - super(provider); - ... - } -} -``` diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/030-to-040.md b/versioned_docs/version-8.2/guides/update-guide/connectors/030-to-040.md deleted file mode 100644 index 5d79ef5f3f8..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/030-to-040.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 030-to-040 -title: Update 0.3 to 0.4 -description: "Review which adjustments must be made to migrate from Connector SDK 0.3.x to 0.4.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.3.x to 0.4.0. - -:::caution - -Be aware that the update from 0.3 to 0.4 requires manual migration steps as described below. - -::: - -With SDK version 0.4.0, we introduce many basic structural changes: - -- Switching default Connector Runtime to Spring Boot/Spring Zeebe for outbound Connectors. -- Introducing webhook inbound Connector. -- Moved out-of-the-box connectors to mono-repo at https://github.com/camunda/connectors-bundle/tree/main/connectors to ease dependency management and conflict resolution. -- Build Connector bundle artifact and Docker image by Maven as default (done by adding various fat jars to one Docker image). -- Adding GCP Secret Provider used in Camunda SaaS. - -### Inbound webhook - -Spring Zeebe runtime with version `0.4.0` SDK introduces support of inbound webhook capabilities. -See the [list of available inbound Connectors](../../../components/connectors/out-of-the-box-connectors/available-connectors-overview.md). - -To function properly, Spring Zeebe runtime requires connection to [Operate API](../../../apis-tools/operate-api/overview.md). Read more on [how to connect to Operate or disable it completely](../../../self-managed/connectors-deployment/connectors-configuration.md#local-installation). - -### What happens if I don't properly configure connection to Operate API? - -If you don't configure properly connection to Operate API, it will be not possible to poll process definitions from the Operate therefore the webhook functionality won't be working. -In addition to that, you may observe exception spam in your log file every 5 seconds complaining to inability to connect to Operate. -Overall, this is not critical and given there are no other issues, the connector runtime should function properly. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/040-to-050.md b/versioned_docs/version-8.2/guides/update-guide/connectors/040-to-050.md deleted file mode 100644 index 637cabc8899..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/040-to-050.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: 040-to-050 -title: Update 0.4 to 0.5 -description: "Review which adjustments must be made to migrate from Connector SDK 0.4.x to 0.5.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.4.x to 0.5.0. - -With SDK version 0.5.0, we introduced minor changes: - -- Removing Spring Zeebe dependency management -- Managing the GCP Secret Provider module version diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/050-to-060.md b/versioned_docs/version-8.2/guides/update-guide/connectors/050-to-060.md deleted file mode 100644 index 5e2bf64a928..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/050-to-060.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: 050-to-060 -title: Update 0.5 to 0.6 -description: "Review which adjustments must be made to migrate from Connector SDK 0.5.x to 0.6.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.5.x to 0.6.0. - -With SDK version 0.6.0, we introduced the following changes: - -- Replacing secrets in parent classes -- Supporting intermediate inbound events -- Defining interfaces for inbound connectors -- Fixing failing datetime serialization diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/060-to-070.md b/versioned_docs/version-8.2/guides/update-guide/connectors/060-to-070.md deleted file mode 100644 index 5ffe6d5d61d..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/060-to-070.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: 060-to-070 -title: Update 0.6 to 0.7 -description: "Review which adjustments must be made to migrate from Connector SDK 0.6.x to 0.7.0." ---- - -Beginner - -With the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), we made -breaking changes to the inbound Connectors. Please update -[HTTP Webhook](https://github.com/camunda/connectors/tree/main/connectors/webhook/element-templates) -and [GitHub Webhook](https://github.com/camunda/connectors/tree/main/connectors/github/element-templates) -element templates to the latest versions. - -If you have used inbound webhook Connectors with Connector Runtime 0.6.x, you need to **manually** -apply the new element template version to your diagrams: - -1. Download the new element template from the [GitHub release page](https://github.com/camunda/connectors-bundle/releases/tag/0.17.0). -2. Follow the [installation guide](../../../../components/modeler/desktop-modeler/element-templates/configuring-templates) to reinstall the element template. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/070-to-080.md b/versioned_docs/version-8.2/guides/update-guide/connectors/070-to-080.md deleted file mode 100644 index 1145b3450fb..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/070-to-080.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 070-to-080 -title: Update 0.7 to 0.8 -description: "Review which adjustments must be made to migrate from Connector SDK 0.7.x to 0.8.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.7.x to 0.8.0. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/080-to-090.md b/versioned_docs/version-8.2/guides/update-guide/connectors/080-to-090.md deleted file mode 100644 index 51055c0aefc..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/080-to-090.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 080-to-090 -title: Update 0.8 to 0.9 -description: "Review which adjustments must be made to migrate from Connector SDK 0.8.x to 0.9.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.8.x to 0.9.0. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/090-to-0100.md b/versioned_docs/version-8.2/guides/update-guide/connectors/090-to-0100.md deleted file mode 100644 index 1e6172bb692..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/090-to-0100.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 090-to-0100 -title: Update 0.9 to 0.10 -description: "Review which adjustments must be made to migrate from Connector SDK 0.9.x to 0.10.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.9.x to 0.10.0. diff --git a/versioned_docs/version-8.2/guides/update-guide/connectors/introduction.md b/versioned_docs/version-8.2/guides/update-guide/connectors/introduction.md deleted file mode 100644 index 034d80e7388..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/connectors/introduction.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: introduction -title: Connector SDK updates ---- - -These documents guide you through the process of updating your Camunda 8 -Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). - -There is a dedicated update guide for each version: - -### [Connector SDK 0.10.x to 0.11](../0100-to-0110) - -Update from 0.10.x to 0.11.2 - -### [Connector SDK 0.9 to 0.10](../090-to-0100) - -Update from 0.9.x to 0.10.0 - -### [Connector SDK 0.8 to 0.9](../080-to-090) - -Update from 0.8.x to 0.9.0 - -### [Connector SDK 0.7 to 0.8](../070-to-080) - -Update from 0.7.x to 0.8.0 - -### [Connector SDK 0.6 to 0.7](../060-to-070) - -Update from 0.6.x to 0.7.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.7.0) - -### [Connector SDK 0.5 to 0.6](../050-to-060) - -Update from 0.5.x to 0.6.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.6.0) - -### [Connector SDK 0.4 to 0.5](../040-to-050) - -Update from 0.4.x to 0.5.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.5.0) - -### [Connector SDK 0.3 to 0.4](../030-to-040) - -Update from 0.3.x to 0.4.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.4.0) - -### [Connector SDK 0.2 to 0.3](../020-to-030) - -Update from 0.2.x to 0.3.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.3.0) - -### [Connector SDK 0.1 to 0.2](../010-to-020) - -Update from 0.1.x to 0.2.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.2.0) diff --git a/versioned_docs/version-8.2/guides/update-guide/elasticsearch/7-to-8.md b/versioned_docs/version-8.2/guides/update-guide/elasticsearch/7-to-8.md deleted file mode 100644 index 3b4e8109fb4..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/elasticsearch/7-to-8.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -id: 7-to-8 -title: Update 7 to 8 -description: "Review which adjustments must be made to migrate from Elasticsearch 7 to Elasticsearch 8." ---- - -Camunda is not compatible with the Elasticsearch 8 Curator. This Curator is used to manage -[data retention](../../../components/concepts/data-retention.md) within Elasticsearch. To update to Elasticsearch 8, -the Curator must be replaced with -[Index Lifecycle Management](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-lifecycle-management.html) -(ILM). Once the update to Elasticsearch 8 is complete, the Curator can be disabled. Disabling the Curator before could -result in indexes without a configured ILM policy. These will not get deleted automatically. - -Details on how to configure ILM for Zeebe can be found on the -[Elasticsearch exporter](../../../../self-managed/zeebe-deployment/exporters/elasticsearch-exporter#retention) page. - -:::tip -The Elasticsearch Curator can be replaced with ILM when using Elasticsearch 7 as well. This is useful when incrementally -updating from Elasticsearch 7 to 8. -::: diff --git a/versioned_docs/version-8.2/guides/update-guide/introduction.md b/versioned_docs/version-8.2/guides/update-guide/introduction.md deleted file mode 100644 index 9286fa01ff5..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/introduction.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -id: introduction -title: Introduction ---- - -These documents guide you through the process of updating your Camunda 8 -application or server installation from one Camunda 8 version to the other. - -:::note -When updating from one minor version to the next, you do not need to update to each **patch** version along the way. However, do not skip **minor** versions when updating. -::: - -Depending on your amount of data, run a minor version for at least 24 hours before updating to the next version. - -You can find guides on how to update your Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -in the **Connectors** section. - -:::note -Versions prior to Camunda 8 are listed below and identified as Camunda Cloud versions. -::: - -There is a dedicated update guide for each version: - -### [Camunda 8.1 to Camunda 8.2](../810-to-820) - -Update from 8.1.x to 8.2.0 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.2.0) -[Release blog](https://camunda.com/blog/2023/04/camunda-platform-8-2-key-to-scaling-automation/) - -### [Camunda 8.0 to Camunda 8.1](../800-to-810) - -Update from 8.0.x to 8.1.0 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.1.0) -[Release blog](https://camunda.com/blog/2022/10/camunda-platform-8-1-released-whats-new/) - -### [Camunda Cloud 1.3 to Camunda 8.0](../130-to-800) - -Update from 1.3.x to 8.0.0 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.0.0) -[Release blog](https://camunda.com/blog/2022/04/camunda-platform-8-0-released-whats-new/) - -### [Camunda Cloud 1.2 to 1.3](../120-to-130) - -Update from 1.2.x to 1.3.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.0) -[Release blog](https://camunda.com/blog/2022/01/camunda-cloud-1-3-0-released/) - -### [Camunda Cloud 1.1 to 1.2](../110-to-120) - -Update from 1.1.x to 1.2.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.0) -[Release blog](https://camunda.com/blog/2021/10/camunda-cloud-1-2-0-released/) - -### [Camunda Cloud 1.0 to 1.1](../100-to-110) - -Update from 1.0.x to 1.1.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.0) -[Release blog](https://camunda.com/blog/2021/07/camunda-cloud-110-released/) - -### [Camunda Cloud 0.26 to 1.0](../026-to-100) - -Update from 0.26.x to 1.0.0 - -[Release notes](https://github.com/camunda-cloud/zeebe/releases/tag/1.0.0) -[Release blog](https://camunda.com/blog/2021/05/camunda-cloud-10-released/) diff --git a/versioned_docs/version-8.2/guides/update-guide/keycloak/keycloak-update.md b/versioned_docs/version-8.2/guides/update-guide/keycloak/keycloak-update.md deleted file mode 100644 index 9fc02d192d0..00000000000 --- a/versioned_docs/version-8.2/guides/update-guide/keycloak/keycloak-update.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: keycloak-update -title: Update Keycloak -description: "Review what has to be taken into account when updating Keycloak." ---- - -When updating Keycloak, follow the [Keycloak upgrade guide](https://www.keycloak.org/docs/latest/upgrading/index.html) and refer to the [supported environments](reference/supported-environments.md#camunda-8-self-managed) to ensure compatibility with tested Keycloak versions. - -:::danger -When updating Keycloak, ensure that you carry along its existing database. -**Do not** update by creating a new Keycloak instance and re-importing your users from external sources (e.g. LDAP) as this will result in new Keycloak-internal ids. -Otherwise, users may not be able to access their data (e.g. Optimize collections) or log in to Web Modeler (see [Web Modeler's login troubleshooting guide](self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login.md#unique-constraint-violation)) after the update. -::: diff --git a/versioned_docs/version-8.2/guides/use-connectors-in-hybrid-mode.md b/versioned_docs/version-8.2/guides/use-connectors-in-hybrid-mode.md deleted file mode 100644 index cbf0eb7f902..00000000000 --- a/versioned_docs/version-8.2/guides/use-connectors-in-hybrid-mode.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -id: use-connectors-in-hybrid-mode -title: Use Connectors in hybrid mode -description: "Learn how to run Connectors in hybrid mode." ---- - -:::note -Hybrid mode is supported as of the Connectors `0.23.0` release. -::: - -**Hybrid mode** is where you can run a Self-Managed Connector runtime instance attached to a Camunda SaaS cluster or another Self-Managed cluster that has another instance of the Connector runtime attached. - -To name few use-cases where this approach might be useful: - -- When you deal with services that must be isolated within private network and must never be exposed to the public internet. -- Infrastructure amendments need to be applied to the Connector runtime, such as SSL certificates, mounted volumes, etc. -- Code modifications applied to Connector runtime, or specific connector logic. - -## How it works - -Every Connector has its ID (type definition), and name. Every Connector element template has a hidden property that -defines which Connector is to be used to execute with a given template. - -For example, see a relation between [Kafka element template](https://github.com/camunda/connectors/tree/main/connectors/kafka/element-templates) -and [Kafka Connector](https://github.com/camunda/connectors/blob/main/connectors/kafka/src/main/java/io/camunda/connector/kafka/inbound/KafkaExecutable.java#L20). - -For the hybrid Connector runtime to work properly, you must override the Connector type. - -For the purpose of this guide, imagine you would like to override an HTTP REST Connector with type `io.camunda:http-json:1`. -Refer to the [element template](https://github.com/camunda/connectors/blob/main/connectors/http/rest/element-templates/http-json-connector.json#L50) and its related [runtime](https://github.com/camunda/connectors/blob/main/connectors/http/rest/src/main/java/io/camunda/connector/http/rest/HttpJsonFunction.java#L43). - -## Start Connector runtime in hybrid mode - -### Prerequisites - -Ensure you have a running Camunda cluster, and a pair of `Client ID`/`Client Secret` with `Zeebe` and `Operate` scopes. Learn more about [how to obtain required credentials](../../components/console/manage-clusters/manage-api-clients/). - -### Option 1: Get Connector runtime from Docker registry - -:::note When to use? -Use this option when you don't need to make any code modifications to either Connector runtime, or a specific Connector. -This option allows you to start the Connector runtime bundle that runs all of [Camunda's officially-supported Connectors](../../components/connectors/out-of-the-box-connectors/available-connectors-overview/). -::: - -Run the following script: - -```shell -docker run --rm --name=HybridConnectorRuntime \ - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=false \ - -e ZEEBE_CLIENT_CLOUD_CLUSTER-ID='' \ - -e ZEEBE_CLIENT_CLOUD_CLIENT-ID='' \ - -e ZEEBE_CLIENT_CLOUD_CLIENT-SECRET='' \ - -e ZEEBE_CLIENT_CLOUD_REGION='' \ - -e CAMUNDA_OPERATE_CLIENT_URL='https://.operate.camunda.io/' \ - -e CONNECTOR_HTTP_REST_TYPE='io.camunda:http-json:local' \ - camunda/connectors-bundle: -``` - -### Option 2: Build your own runtime - -:::note When to use? -Use this option when you make modifications to the original Connector runtime, existing Connectors, or -other related changes. -This option allows you to start the Connector runtime bundle with provided Connectors. -::: - -1. Ensure `docker` is installed. -2. Clone [https://github.com/camunda/connectors](https://github.com/camunda/connectors). -3. Go to `/bundle/default-bundle`. -4. Build a Connector image, e.g. `docker build -f Dockerfile -t myorg/my-connectors-bundle: .`. -5. Run the same `docker run ...` command as in [Option 1](#option-a-get-connectors-runtime-from-docker-registry). - -### Explanation - -Note the line `-e CONNECTOR_HTTP_REST_TYPE='io.camunda:http-json:local'`. This line means we have to override -`CONNECTOR_X_TYPE` with a given type. In this case, we want to register a local Self-Managed HTTP REST Connector as `io.camunda:http-json:local`. - -The `X` is normalized to the environment variable Connector name. For example, the [HTTP REST Connector](https://github.com/camunda/connectors/blob/main/connectors/http/rest/src/main/java/io/camunda/connector/http/rest/HttpJsonFunction.java#L33) -`HTTP REST` name becomes `HTTP_REST`, or the [Kafka Consumer Connector](https://github.com/camunda/connectors/blob/main/connectors/kafka/src/main/java/io/camunda/connector/kafka/inbound/KafkaExecutable.java#L20) name -becomes `KAFKA_CONSUMER`. Therefore, to override it one would need to pass in the `CONNECTOR_KAFKA_CONSUMER_TYPE=xxx` environment variable. - -## Preparing element template for hybrid mode - -As mentioned, to relate Connector element templates with Connector runtime, you must modify the task definition type. - -To do this, take the following steps: - -1. Obtain a copy of the element template you wish to override. All latest versions of the official element - templates can be found in the [official Connectors repository](https://github.com/camunda/connectors) at path `connectors//element-templates/`. -2. Modify the `value` to the desired new type of the property. Use `zeebe:taskDefinition:type` for outbound Connectors, or `inbound.type` for inbound ones. -3. Publish the new element template, and use it in your BPMN diagram. - -There are several options to deliver element templates to the target user: - -### Option 1: Hide task definition type value - -Use this option when you plan to clearly indicate that a specific Connector will only be used in a specific use-case. -Otherwise, users might be confused between two of the same Connector types. - -For example, if you defined `CONNECTOR_HTTP_REST_TYPE='io.camunda:http-json:local'` argument variable when running Connectors -runtime, you must implement the following in the element template for it to function properly: - -```json -{ - "type": "Hidden", - "value": "io.camunda:http-json:local", - "binding": { - "type": "zeebe:taskDefinition:type" - } -} -``` - -### Option 2: Expose task definition type as plain text - -Use this option when the target user building a BPMN process is deciding which Connector to use, or you have -more than one dedicated Self-Managed Connector instance. - -Be mindful that the user will be dealing with different -task definition types and has to know which is what. For example, if you defined `CONNECTOR_HTTP_REST_TYPE='io.camunda:http-json:local'` in runtime, you must implement the following in the -element template for it to function properly: - -```json -{ - "type": "String", - "label": "Task definition type", - "value": "io.camunda:http-json:local", - "binding": { - "type": "zeebe:taskDefinition:type" - } -} -``` - -However, the target user can change the value back to the original `"value": "io.camunda:http-json:1",` to execute the process in a SaaS -environment. You can also add this field to a group for UX purposes. - -### Option 3: Expose task definition type as dropdown - -Use this option if you would like to achieve the most user-friendly experience. However, this approach may take a larger time investment in modifying element templates, plus additional time to support whenever you launch a new -Connector runtime or disable an old one. - -The following example demonstrates this approach: - -```json -{ - "label": "Task definition type", - "type": "Dropdown", - "value": "io.camunda:http-json:1", - "choices": [ - { - "name": "SaaS environment", - "value": "io.camunda:http-json:1" - }, - { - "name": "SM environment 1", - "value": "io.camunda:http-json:local1" - }, - { - "name": "SM environment 2", - "value": "io.camunda:http-json:local2" - } - ], - "binding": { - "type": "zeebe:taskDefinition:type" - } -} -``` - -## Appendix - -See ready-to-use hybrid element templates examples for [HTTP REST](https://github.com/camunda/connectors/blob/main/connectors/http/rest/element-templates/hybrid/http-json-connector-hybrid.json) and [Kafka Consumer](https://github.com/camunda/connectors/tree/main/connectors/kafka/element-templates/hybrid). diff --git a/versioned_docs/version-8.2/guides/utilizing-forms.md b/versioned_docs/version-8.2/guides/utilizing-forms.md deleted file mode 100644 index 9d93e546dd1..00000000000 --- a/versioned_docs/version-8.2/guides/utilizing-forms.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -id: utilizing-forms -title: Build forms with Modeler -description: "Let's learn about Camunda Forms, how to use them, how to model them with a diagram, and deploying." ---- - -Beginner -Time estimate: 15 minutes - -:::note -The initial release of Camunda Forms includes a debut minimal feature set, which will be expanded with upcoming versions. - -If using with Camunda 7, note that the Camunda Forms feature was added with the 4.7.0 release of Desktop Modeler. Therefore, they can be used within BPMN diagrams running on Camunda 7 version 7.15.0 or later. -::: - -## Overview - -The Camunda Forms feature allows you to easily design and configure forms. Once configured, they can be connected to a user task or start event to implement a task form in your application. - -While you can incorporate Camunda Forms solely within Camunda 8, you can also utilize Camunda Forms in Camunda 7. After deploying a diagram with an embedded form, Tasklist imports this form schema and uses it to render the form on every task assigned to it. - -## Quickstart - -### Create new form - -To start building a form, log in to your [Camunda 8](https://camunda.io) account or open [Desktop Modeler](/components/modeler/about-modeler.md) and take the following steps: - -1. Navigate to Web Modeler or alternatively open the **File** menu in Desktop Modeler. -2. Open any project from your Web Modeler home view. -3. Click **Create new file** and choose **Form**. - -### Build your form - -Now you can start to build your Camunda form. Right after creating your form, you can name it by replacing the **New Form** text with the name of your choice. In this example, we'll build a form to help with a task in obtaining an email message. - -![form email example](./img/form-email-example.png) - -Add your desired elements from the palette on the left side by dragging and dropping them onto the canvas. - -![form palette](./img/form-palette.png) - -Within Forms, we have the option to add text fields, numerical values, checkboxes, radio elements, selection menus, text components, and buttons. - -:::note -Within Camunda 7, you can also utilize [embedded forms](https://docs.camunda.org/manual/latest/reference/forms/embedded-forms/). -::: - -In the properties panel on the right side of the page, view and edit attributes that apply to the selected form element. For example, apply a minimum or maximum length to a text field, or require a minimum or maximum value within a number element. In this case, we have labeled the field, described the field, and required an input for our email message. - -![email properties](./img/form-properties-email.png) - -Refer to the [camunda forms reference](../components/modeler/forms/camunda-forms-reference.md) to explore all form elements and configuration options in detail. - -### Save your form - -To save your form in Camunda 8, you don't have to do anything. Web Modeler will autosave every change you make. - -To save your form in Camunda 7, click **File > Save File As...** in the top-level menu. Select a location on your file system to store the form as `.form` file. You can load that file again by clicking **File > Open File...**. - -### Connect your form to a BPMN diagram - -Next, let's implement a task form into a diagram. In tandem, we can connect your form to a user task or start event. - -:::note -For Camunda 7, refer to the [user task forms guide](https://docs.camunda.org/manual/latest/user-guide/task-forms/#camunda-forms) to learn how to implement a task form in your application. -::: - -Navigate to Modeler and open any project from your Web Modeler home view. - -Take the following steps: - -1. Select the diagram where you'd like to apply your form. -2. Select the user task requiring the help of a form. -3. On the right side of the selected user task, select the blue overlay with three white horizontal lines to open the navigation menu. -4. Navigate to the form you want to connect and click the blue **Import** button. -5. When a user task has a connected form, the blue overlay will always stay visible on the right side of the task. - -Note that when using Camunda Forms, any submit button present in the form schema is hidden so we can control when a user can complete a task. - -:::note Using Camunda 7? -Click on the bottom left corner that says **JSON** to switch to the JSON view. Use caution when naming the fields of your form. Fields have their values pre-filled from variables with the same name. - -Copy the JSON schema, and go back to the BPMN diagram you modeled earlier. Select the **user task** and click on the **Forms** tab. After switching tabs, you should see the field where you can paste the form JSON schema. Paste the schema and save the file. -::: - -## Deploy your diagram and start an instance - -To execute your completed process diagram, click the blue **Deploy** button. You can now start a new process instance to initiate your process diagram. Click the blue **Run** button. You can now monitor your instances in [Operate](../components/operate/operate-introduction.md). - -To [complete a user task](./getting-started-orchestrate-human-tasks.md), navigate to [Tasklist](../components/tasklist/introduction-to-tasklist.md). - -:::note -To deploy with Camunda 7, use the [process engine](https://docs.camunda.org/manual/7.16/user-guide/process-engine/). -::: - -## Additional resources - -- [Desktop and Web Modeler](/components/modeler/about-modeler.md) -- [User task reference](/components/modeler/bpmn/user-tasks/user-tasks.md) diff --git a/versioned_docs/version-8.2/reference/alpha-features.md b/versioned_docs/version-8.2/reference/alpha-features.md deleted file mode 100644 index 54f5ad2da23..00000000000 --- a/versioned_docs/version-8.2/reference/alpha-features.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: alpha-features -title: Alpha features -sidebar_label: Alpha features -description: "Use alpha features to learn about upcoming changes, try them out, and share feedback." ---- - -You can use alpha features to learn about upcoming changes, try them out, and share feedback. - -:::info -To understand the difference between an alpha feature and an alpha release, see [alpha features and releases](/reference/release-policy.md#alpha-features-and-releases). -::: - -## Alpha - -Selected Camunda features and components are released as **alpha** versions. We release these in an early state for you to test and participate in development by sharing your feedback before they reach [general availability (GA)](#general-availability-ga). - -Limitations of alpha features and components include: - -- Not for production use. -- APIs, dependencies, and configuration are likely to change. -- Not necessarily feature-complete. -- Might lack full documentation. -- No guaranteed updates to newer releases. -- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://docs.camunda.org/enterprise/support/). -- No maintenance service. -- (SaaS) No availability targets. -- Released outside the standard [release policy](/reference/release-policy.md). - -To learn more about using alpha features, see [enabling alpha features](/components/console/manage-organization/enable-alpha-features.md). - -:::note - -- Alpha features can also be included in a minor version (stable) release. -- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/contact). - -::: - -## General availability (GA) - -Once features and components are released and considered stable, they become generally available. - -Stable features and components are: - -- Ready for production use for most users with minimal risk. -- Supported by [L1 Priority-level support](https://docs.camunda.org/enterprise/support/#priority-level) for production use. -- Fully documented. - -A release or component is considered stable if it has passed all verification and test stages and can be released to production. - -:::note -Alpha releases can also have **limited availability**, such as features that are only available to enterprise customers. -::: diff --git a/versioned_docs/version-8.2/reference/announcements.md b/versioned_docs/version-8.2/reference/announcements.md deleted file mode 100644 index e4812255d8a..00000000000 --- a/versioned_docs/version-8.2/reference/announcements.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -id: announcements -title: "Announcements" -description: "Important announcements including deprecation & removal notices" ---- - -## Versioning changes in Helm chart - -[Helm charts versioning](/self-managed/platform-deployment/helm-kubernetes/overview.md) changed in July 2023. - -Starting from July 2023 (v8.2.8), the Camunda 8 **Helm chart** version follows the same unified schema -and schedule as [Camunda 8 applications](https://github.com/camunda/camunda-platform). - -Before this change, the Camunda 8 **Helm chart** version only followed the minor version. - -## Camunda 8.2 - -Release date: 11th of April 2023 - -End of maintenance: 8th of October 2024 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.2.0) -[Release blog](https://camunda.com/blog/2023/04/camunda-platform-8-2-key-to-scaling-automation/) - -### Camunda 8 SaaS - Required cluster update - -:::caution -By **August 30th, 2024** all automation clusters in Camunda 8 SaaS must be updated to the following versions at a **minimum**: - -- **8.2+gen27** -- **8.3+gen11** -- **8.4+gen7** -- **8.5+gen2** - -::: - -auth0 announced an End-Of-Life for one of the functionalities that is being utilized by previous automation clusters. The new versions are not using this functionality anymore. This update ensures your cluster will work seamlessly after auth0 deactivates the feature in production. - -You minimally need to take the following update path: - -- 8.0.x -> 8.2+gen27 -- 8.1.x -> 8.2+gen27 -- 8.2.x -> 8.2+gen27 -- 8.3.x -> 8.3+gen11 -- 8.4.x -> 8.4+gen7 -- 8.5.x -> 8.5+gen2 - -If you do not update the cluster by August 30th 2024, we will update the cluster for you. **Without an update, you would lose access to your cluster.** - -Camunda 8 Self-Managed clusters are not affected by this. - -### Update from Web Modeler 8.2 to a later minor version - -Web Modeler versions 8.2.7 to 8.2.12 are affected by [camunda/issues#677](https://github.com/camunda/issues/issues/677). - -If you are using one of these versions, you should first update to Web Modeler 8.2.13 (or a subsequent patch version) before upgrading to a later minor version (8.3 or higher). - -If your current version of Web Modeler is 8.2.6 or earlier, you may directly upgrade to a later minor version. - -### Do not update to Camunda 8.2.22 - -:::caution -Zeebe release `8.2.22` suffers from [camunda/zeebe#16406](https://github.com/camunda/camunda/issues/16406), which results in a Zeebe broker being unable to start if at least one DMN model is deployed. We urge users to skip this release and update to `8.2.23` right away. -::: - -### Do not update from Camunda 8.1.X to 8.2.6 - -An issue in the Operate 8.2.6 patch was discovered after it was published on June 8th. - -You should not update directly from 8.1.x to 8.2.6 (it will require manual intervention as indices break), you either first update to 8.2.5 then 8.2.6 or straight from 8.1.x to 8.2.7. - -To prevent this entirely we removed the Operate 8.2.6 artifacts from this release. - -As Camunda 8.2.7 was already released on Tuesday Jun 13th, you can just update to 8.2.7 directly, skipping 8.2.6. - -### OpenSearch 1.3.x support - -- Operate version 8.2+ support OpenSearch 1.3.x. However, 8.2.x patches will only be released on the OS 1.3 branch until end of 2023 given that OS 1.3 maintenance period ends by then. We recommend customers to go to 8.4.x which supports OS 2.5+. - -### Optimize and Helm chart compatibility - -For Optimize 3.10.1, a new environment variable introduced redirection URL. However, the change is not compatible with Camunda Helm charts until it is fixed in 3.10.3 (and Helm chart 8.2.9). Therefore, those versions are coupled to certain Camunda Helm chart versions: - -| Optimize version | Camunda Helm chart version | -| --------------------------------- | -------------------------- | -| Optimize 3.10.1 & Optimize 3.10.2 | 8.2.0 - 8.2.8 | -| Optimize 3.10.3+ | 8.2.9 - 8.2.22 | -| Optimize 8.2.7+ | 8.2.23+ | - -## Camunda 8.1 - -Release date: 11th of October 2022 - -End of maintenance: 10th of April 2024 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.1.0) -[Release blog](https://camunda.com/blog/2022/10/camunda-platform-8-1-released-whats-new/) - -### Do not update to Camunda 8.1.23 - -:::caution -Zeebe release `8.1.23` suffers from [camunda/zeebe#16406](https://github.com/camunda/camunda/issues/16406), which results in a Zeebe broker being unable to start if at least one DMN model is deployed. We urge users to skip this release and update to `8.1.24` right away. -::: - -## Camunda 8.0 - -Release date: 12th of April 2022 - -End of maintenance: 11th of October 2023 - -[Release notes](https://github.com/camunda/camunda-platform/releases/tag/8.0.0) -[Release blog](https://camunda.com/blog/2022/04/camunda-platform-8-0-released-whats-new/) - -### Camunda 8.0.15 release is skipped - -The `Camunda 8.0.15` release pipeline lead to corrupted `Zeebe 8.0.15` artifacts getting published. -The whole [Camunda 8.0.15 release](https://github.com/camunda/camunda-platform/releases/tag/8.0.15) was thus skipped and updates from `Camunda 8.0.14` should go straight to `Camunda 8.0.16`. - -### Deprecated in 8.0 - -The [DeployProcess RPC](/apis-tools/grpc.md#deployprocess-rpc) was deprecated in 8.0. -It is replaced by the [DeployResource RPC](/apis-tools/grpc.md#deployresource-rpc). - -## Camunda Cloud 1.3 - -Release date: 11th of January 2022 - -Camunda Cloud is out of maintenance. - -### Deprecated in 1.3 - -The `zeebe-test` module was deprecated in 1.3.0. We are currently planning to remove `zeebe-test` for the 1.4.0 release. - -## Camunda Cloud 1.2 - -Release date: 12th of October 2021 - -Camunda Cloud is out of maintenance. - -## Camunda Cloud 1.1 - -Release date: 13th of July 2021 - -Camunda Cloud is out of maintenance. - -## Camunda Cloud 1.0 - -Release date: 11th of May 2021 - -Camunda Cloud is out of maintenance. - -### Removed in 1.0 - -The support for YAML processes was removed as of release 1.0. The `resourceType` in Deployment record and Process grpc request are deprecated; they will always contain `BPMN` as value. - -## Zeebe 0.26.0 - -### Deprecated in 0.26.0 - -#### YAML workflows descriptions - -YAML workflows are an alternative way to specify simple workflows using a proprietary YAML description. This feature is deprecated and no longer advertised in the documentation. YAML workflows gained little traction with users and we do not intend to support them in the future. - -We recommend all users of YAML workflows to migrate to BPMN workflows as soon as possible. The feature will eventually be removed completely, though the date when this will occur has yet to be defined. - -## Zeebe 0.23.0-alpha2 - -### Deprecated in 0.23.0-alpha2 - -- TOML configuration - deprecated and removed in 0.23.0-alpha2 -- Legacy environment variables - deprecated in 0.23.0-alpha2, removed in 0.25.0 - -New configuration: - -```yaml -exporters: - elasticsearch: - className: io.camunda.zeebe.exporter.ElasticsearchExporter - debughttp: - className: io.camunda.zeebe.broker.exporter.debug.DebugHttpExporter -``` - -In terms of specifying values, there were two minor changes: - -- Memory sizes are now specified like this: `512MB` (old way: `512M`) -- Durations (e.g. timeouts) can now also be given in ISO-8601 Durations format. However, you can still use the established method and specify a timeout of `30s` diff --git a/versioned_docs/version-8.2/reference/dependencies.md b/versioned_docs/version-8.2/reference/dependencies.md deleted file mode 100644 index 2e55d2dd583..00000000000 --- a/versioned_docs/version-8.2/reference/dependencies.md +++ /dev/null @@ -1,1780 +0,0 @@ ---- -id: dependencies -title: "Dependencies" -description: "Dependencies and Third Party Libraries for all the components of Camunda 8" -keywords: ["dependencies", "third party", "third party libraries"] ---- - -A complete list of all dependencies and third-party libraries for all the components of Camunda 8 (including Self-Managed). - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - - - - - -### All Zeebe Dependencies - -- agrona (Version: 1.17.1, License: [The Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- animal-sniffer-annotations (Version: 1.21, License: [MIT license](http://www.opensource.org/licenses/mit-license.php)) -- annotations (Version: 4.1.1.4, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)) -- annotations (Version: 24.0.1, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- annotations (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- apache-client (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- api-common (Version: 2.6.3, License: [BSD](https://github.com/googleapis/api-common-java/blob/main/LICENSE)) -- arns (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- auth (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- auth0 (Version: 1.44.2, License: [The MIT License (MIT)](https://raw.githubusercontent.com/auth0/auth0-java/master/LICENSE)) -- auto-value (Version: 1.10.1, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- auto-value-annotations (Version: 1.10.1, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- aws-core (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- aws-query-protocol (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- aws-xml-protocol (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- cache-api (Version: 1.1.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- camunda-dmn-model (Version: 7.18.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- camunda-xml-model (Version: 7.18.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- checker-qual (Version: 3.32.0, License: [The MIT License](http://opensource.org/licenses/MIT)) -- commons-codec (Version: 1.15, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-collections4 (Version: 4.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-compress (Version: 1.23.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-lang3 (Version: 3.12.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-logging (Version: 1.2, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-math3 (Version: 3.6.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-text (Version: 1.10.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- concurrency-limits-core (Version: 0.4.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- conscrypt-openjdk-uber (Version: 2.5.2, License: [Apache 2](https://www.apache.org/licenses/LICENSE-2.0)) -- cron-utils (Version: 9.2.1, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.html)) -- crt-core (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- dmn-engine (Version: 1.8.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- ehcache (Version: 3.10.8, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- elasticsearch-rest-client (Version: 7.17.9, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)) -- endpoints-spi (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- eventstream (Version: 1.0.1, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- failsafe (Version: 2.4.4, License: [Apache License, Version 2.0](http://apache.org/licenses/LICENSE-2.0)) -- failureaccess (Version: 1.0.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- fastparse_2.13 (Version: 2.3.3, License: [MIT](https://spdx.org/licenses/MIT.html)) -- feel-engine (Version: 1.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- gapic-google-cloud-storage-v2 (Version: 2.20.2-alpha, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- gax (Version: 2.23.3, License: [BSD](https://github.com/googleapis/gax-java/blob/master/LICENSE)) -- gax-grpc (Version: 2.23.3, License: [BSD](https://github.com/googleapis/gax-java/blob/master/LICENSE)) -- gax-httpjson (Version: 0.108.3, License: [BSD](https://github.com/googleapis/gax-java/blob/master/LICENSE)) -- geny_2.13 (Version: 0.6.10, License: [MIT](https://spdx.org/licenses/MIT.html)) -- google-api-client (Version: 2.2.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-api-services-storage (Version: v1-rev20230301-2.0.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-auth-library-credentials (Version: 1.16.0, License: [BSD New license](http://opensource.org/licenses/BSD-3-Clause)) -- google-auth-library-oauth2-http (Version: 1.16.0, License: [BSD New license](http://opensource.org/licenses/BSD-3-Clause)) -- google-cloud-core (Version: 2.13.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-cloud-core-grpc (Version: 2.13.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-cloud-core-http (Version: 2.13.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-cloud-storage (Version: 2.20.2, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-http-client (Version: 1.43.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-http-client-apache-v2 (Version: 1.43.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-http-client-appengine (Version: 1.43.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-http-client-gson (Version: 1.43.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-http-client-jackson2 (Version: 1.43.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- google-oauth-client (Version: 1.34.1, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- grpc-alts (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-api (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-auth (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-context (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-core (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-google-cloud-storage-v2 (Version: 2.20.2-alpha, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- grpc-googleapis (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-grpclb (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-netty (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-netty-shaded (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-protobuf (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-protobuf-lite (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-services (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-stub (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-xds (Version: 1.54.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- gson (Version: 2.10.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- guava (Version: 31.1-jre, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- http-client-spi (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- httpasyncclient (Version: 4.1.5, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpclient (Version: 4.5.14, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpcore (Version: 4.4.16, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpcore-nio (Version: 4.4.16, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- j2objc-annotations (Version: 1.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-annotations (Version: 2.14.2, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-core (Version: 2.14.3, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-databind (Version: 2.14.3, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-databind-nullable (Version: 0.2.6, License: [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0.html)) -- jackson-dataformat-msgpack (Version: 0.9.3, License: [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-dataformat-yaml (Version: 2.14.3, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-datatype-jdk8 (Version: 2.14.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-datatype-jsr310 (Version: 2.14.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-module-parameter-names (Version: 2.14.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jakarta.annotation-api (Version: 2.1.1, License: [EPL 2.0](http://www.eclipse.org/legal/epl-2.0)) -- jakarta.validation-api (Version: 3.0.2, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- java-grpc-prometheus (Version: 0.6.0, License: [Apache 2.0 License](https://github.com/dinowernli/java-grpc-prometheus/blob/master/LICENSE)) -- java-jwt (Version: 4.3.0, License: [The MIT License (MIT)](https://raw.githubusercontent.com/auth0/java-jwt/master/LICENSE)) -- javax.annotation-api (Version: 1.3.2, License: [CDDL + GPLv2 with classpath exception](https://github.com/javaee/javax.annotation/blob/master/LICENSE)) -- json-utils (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- jul-to-slf4j (Version: 2.0.7, License: [MIT License](http://www.opensource.org/licenses/mit-license.php)) -- jwks-rsa (Version: 0.22.0, License: [The MIT License (MIT)](https://raw.githubusercontent.com/auth0/jwks-rsa-java/master/LICENSE)) -- kotlin-stdlib (Version: 1.6.20, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kotlin-stdlib-common (Version: 1.5.31, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kotlin-stdlib-jdk7 (Version: 1.6.10, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kotlin-stdlib-jdk8 (Version: 1.6.10, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kryo (Version: 5.4.0, License: [3-Clause BSD License](https://opensource.org/licenses/BSD-3-Clause)) -- LatencyUtils (Version: 2.0.3, License: [Public Domain, per Creative Commons CC0](http://creativecommons.org/publicdomain/zero/1.0/)) -- listenablefuture (Version: 9999.0-empty-to-avoid-conflict-with-guava, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-api (Version: 2.20.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-core (Version: 2.20.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-slf4j2-impl (Version: 2.20.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- logging-interceptor (Version: 4.10.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- metrics-spi (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- micrometer-commons (Version: 1.10.7, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-core (Version: 1.10.7, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-observation (Version: 1.10.7, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-registry-prometheus (Version: 1.10.7, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- minlog (Version: 1.3.1, License: [3-Clause BSD License](https://opensource.org/licenses/BSD-3-Clause)) -- msgpack-core (Version: 0.9.3, License: [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- netty-buffer (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec-http (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec-http2 (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec-socks (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-common (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-handler (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-handler-proxy (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-nio-client (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- netty-resolver (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-tcnative-boringssl-static (Version: 2.0.60.Final, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- netty-tcnative-classes (Version: 2.0.60.Final, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- netty-transport (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-transport-classes-epoll (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-transport-native-epoll (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-transport-native-unix-common (Version: 4.1.92.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- objenesis (Version: 3.3, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- okhttp (Version: 4.10.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- okio-jvm (Version: 3.0.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- opencensus-api (Version: 0.31.1, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- opencensus-contrib-http-util (Version: 0.31.1, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- opencensus-proto (Version: 0.2.0, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- opensearch-rest-client (Version: 2.6.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- perfmark-api (Version: 0.25.0, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- profiles (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- proto-google-cloud-storage-v2 (Version: 2.20.2-alpha, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- proto-google-common-protos (Version: 2.14.3, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- proto-google-iam-v1 (Version: 1.9.3, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- protobuf-java (Version: 3.22.5, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -- protobuf-java-util (Version: 3.22.5, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -- protocol-core (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- re2j (Version: 1.6, License: [Go License](https://golang.org/LICENSE)) -- reactive-streams (Version: 1.0.4, License: [MIT-0](https://spdx.org/licenses/MIT-0.html)) -- reflectasm (Version: 1.11.9, License: [3-Clause BSD License](https://opensource.org/licenses/BSD-3-Clause)) -- regions (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- rocksdbjni (Version: 8.0.0, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html)) -- s3 (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- scala-library (Version: 2.13.10, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- sdk-core (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- simpleclient (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_common (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_hotspot (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_tracer_common (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_tracer_otel (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_tracer_otel_agent (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- slf4j-api (Version: 2.0.7, License: [MIT License](http://www.opensource.org/licenses/mit-license.php)) -- snakeyaml (Version: 2.0, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- sourcecode_2.13 (Version: 0.2.3, License: [MIT](https://spdx.org/licenses/MIT.html)) -- spring-aop (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-beans (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-actuator (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-actuator-autoconfigure (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-autoconfigure (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-json (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-logging (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-tomcat (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-web (Version: 3.0.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-context (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-core (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-expression (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-jcl (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-web (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-webmvc (Version: 6.0.9, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- sts (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- swagger-annotations (Version: 2.2.10, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html)) -- third-party-jackson-core (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- threetenbp (Version: 1.6.5, License: [BSD-3-Clause](https://raw.githubusercontent.com/ThreeTen/threetenbp/main/LICENSE.txt)) -- tomcat-embed-core (Version: 10.1.7, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- tomcat-embed-el (Version: 10.1.7, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- tomcat-embed-websocket (Version: 10.1.7, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- utils (Version: 2.20.66, License: [Apache License, Version 2.0](https://aws.amazon.com/apache2.0)) -- zstd-jni (Version: 1.5.5-2, License: [BSD 2-Clause License](https://opensource.org/licenses/BSD-2-Clause)) - - - - - -### Operate Dependencies (Front end) - -- @camunda-cloud/common-ui-react (Version: 0.0.18-rc13, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @camunda-cloud/common-ui (Version: 0.0.18-rc13, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @camunda/camunda-composite-components (Version: 0.0.28, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/colors (Version: 11.13.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/elements (Version: 11.20.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/feature-flags (Version: 0.13.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/grid (Version: 11.12.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/icon-helpers (Version: 10.39.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/icons-react (Version: 11.17.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/icons (Version: 11.17.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/layout (Version: 11.12.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/motion (Version: 11.10.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/react (Version: 1.25.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/styles (Version: 1.25.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/telemetry (Version: 0.1.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/themes (Version: 11.17.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- @carbon/type (Version: 11.16.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- htm (Version: 3.1.1, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- mixpanel-browser (Version: 2.45.0, Licence: [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html)) -- hoist-non-react-statics (Version: 3.3.2, Licence: [BSD-3-Clause](https://spdx.org/licenses/BSD-3-Clause.html)) -- react-transition-group (Version: 4.4.5, Licence: [BSD-3-Clause](https://spdx.org/licenses/BSD-3-Clause.html)) -- source-map-js (Version: 1.0.2, Licence: [BSD-3-Clause](https://spdx.org/licenses/BSD-3-Clause.html)) -- anymatch (Version: 3.1.3, Licence: [ISC](https://spdx.org/licenses/ISC.html)) -- css-color-keywords (Version: 1.0.0, Licence: [ISC](https://spdx.org/licenses/ISC.html)) -- glob-parent (Version: 5.1.2, Licence: [ISC](https://spdx.org/licenses/ISC.html)) -- inherits-browser (Version: 0.0.1, Licence: [ISC](https://spdx.org/licenses/ISC.html)) -- inherits-browser (Version: 0.1.0, Licence: [ISC](https://spdx.org/licenses/ISC.html)) -- @babel/code-frame (Version: 7.22.13, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/generator (Version: 7.23.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-annotate-as-pure (Version: 7.18.6, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-environment-visitor (Version: 7.22.20, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-function-name (Version: 7.23.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-hoist-variables (Version: 7.22.5, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-module-imports (Version: 7.18.6, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-split-export-declaration (Version: 7.22.6, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-string-parser (Version: 7.19.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-string-parser (Version: 7.22.5, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-validator-identifier (Version: 7.19.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/helper-validator-identifier (Version: 7.22.20, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/highlight (Version: 7.22.20, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/parser (Version: 7.23.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/runtime (Version: 7.20.7, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/template (Version: 7.22.15, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/traverse (Version: 7.23.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/types (Version: 7.20.7, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @babel/types (Version: 7.23.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @bpmn-io/diagram-js-ui (Version: 0.2.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @bpmn-io/element-template-icon-renderer (Version: 0.5.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @csstools/cascade-layer-name-parser (Version: 1.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @csstools/css-parser-algorithms (Version: 2.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @csstools/css-tokenizer (Version: 2.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @devbookhq/splitter (Version: 1.4.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @emotion/is-prop-valid (Version: 1.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @emotion/memoize (Version: 0.8.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @emotion/stylis (Version: 0.8.5, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @emotion/unitless (Version: 0.7.5, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @floating-ui/core (Version: 1.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @floating-ui/dom (Version: 1.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @floating-ui/react-dom (Version: 1.3.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/gen-mapping (Version: 0.3.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/resolve-uri (Version: 3.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/resolve-uri (Version: 3.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/set-array (Version: 1.1.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/sourcemap-codec (Version: 1.4.14, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/sourcemap-codec (Version: 1.4.15, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/trace-mapping (Version: 0.3.17, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @jridgewell/trace-mapping (Version: 0.3.20, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @monaco-editor/loader (Version: 1.3.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @monaco-editor/react (Version: 4.4.6, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @remix-run/router (Version: 1.0.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- ansi-styles (Version: 3.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- babel-plugin-styled-components (Version: 2.0.7, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- babel-plugin-syntax-jsx (Version: 6.18.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- binary-extensions (Version: 2.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- bpmn-moddle (Version: 8.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- bpmn-moddle (Version: 8.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- braces (Version: 3.0.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- camelize (Version: 1.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- chalk (Version: 2.4.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- chokidar (Version: 3.5.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- classnames (Version: 2.3.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- clsx (Version: 1.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- color-convert (Version: 1.9.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- color-convert (Version: 2.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- color-name (Version: 1.1.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- color-name (Version: 1.1.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- color-string (Version: 1.9.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- color (Version: 4.2.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- component-event (Version: 0.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- component-xor (Version: 0.0.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- compute-scroll-into-view (Version: 1.0.20, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- copy-to-clipboard (Version: 3.3.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- css-to-react-native (Version: 3.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- css.escape (Version: 1.5.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- csstype (Version: 3.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- date-fns (Version: 2.29.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- debug (Version: 4.3.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- diagram-js-direct-editing (Version: 1.8.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- diagram-js-direct-editing (Version: 2.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- diagram-js (Version: 12.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- diagram-js (Version: 8.9.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- didi (Version: 8.0.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- didi (Version: 9.0.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dmn-moddle (Version: 10.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dom-helpers (Version: 5.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dom-iterator (Version: 1.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- domify (Version: 1.4.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- downshift (Version: 5.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- escape-html (Version: 1.0.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- escape-string-regexp (Version: 1.0.5, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- fill-range (Version: 7.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- final-form-arrays (Version: 3.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- final-form (Version: 4.20.9, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- flatpickr (Version: 4.6.9, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- fsevents (Version: 2.3.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- globals (Version: 11.12.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- hammerjs (Version: 2.0.8, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- has-flag (Version: 3.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- hat (Version: 0.0.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- history (Version: 5.3.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- ids (Version: 0.2.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- ids (Version: 1.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- immutable (Version: 4.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- inferno-shared (Version: 5.6.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- inferno-vnode-flags (Version: 5.6.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- inferno (Version: 5.6.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- invariant (Version: 2.2.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- is-arrayish (Version: 0.3.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- is-binary-path (Version: 2.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- is-extglob (Version: 2.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- is-glob (Version: 4.0.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- is-number (Version: 7.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- js-tokens (Version: 4.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- jsesc (Version: 2.5.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- lodash.debounce (Version: 4.0.8, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- lodash.findlast (Version: 4.6.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- lodash.isequal (Version: 4.5.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- lodash.omit (Version: 4.5.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- lodash.throttle (Version: 4.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- lodash (Version: 4.17.21, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- loose-envify (Version: 1.4.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- matches-selector (Version: 1.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- min-dash (Version: 3.8.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- min-dash (Version: 4.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- min-dash (Version: 4.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- min-dom (Version: 3.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- min-dom (Version: 4.0.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- min-dom (Version: 4.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- mobx-react-lite (Version: 3.4.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- mobx-react (Version: 7.6.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- mobx (Version: 6.8.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- moddle-xml (Version: 10.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- moddle-xml (Version: 9.0.6, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- moddle (Version: 5.0.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- moddle (Version: 6.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- ms (Version: 2.1.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- normalize-path (Version: 3.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- object-assign (Version: 4.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- object-refs (Version: 0.3.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- opencollective-postinstall (Version: 2.0.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- path-intersection (Version: 2.2.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- picomatch (Version: 2.3.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- polished (Version: 4.2.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- postcss-custom-properties (Version: 13.1.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- postcss-value-parser (Version: 4.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- preact (Version: 10.15.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- prop-types (Version: 15.8.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-dom (Version: 18.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-final-form-arrays (Version: 3.1.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-final-form (Version: 6.5.9, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-is (Version: 16.13.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-is (Version: 17.0.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-router-dom (Version: 6.4.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react-router (Version: 6.4.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- react (Version: 18.2.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- readdirp (Version: 3.6.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- regenerator-runtime (Version: 0.13.11, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- resize-observer-polyfill (Version: 1.5.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- sass (Version: 1.60.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- saxen (Version: 8.1.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- scheduler (Version: 0.23.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- selection-ranges (Version: 3.0.3, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- selection-update (Version: 0.1.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- shallowequal (Version: 1.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- simple-swizzle (Version: 0.2.2, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- state-local (Version: 1.0.7, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- styled-components (Version: 5.3.9, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- supports-color (Version: 5.5.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- table-js (Version: 7.3.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- tiny-svg (Version: 2.2.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- tiny-svg (Version: 3.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- tiny-svg (Version: 3.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- to-fast-properties (Version: 2.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- to-regex-range (Version: 5.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- toggle-selection (Version: 1.0.6, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- use-resize-observer (Version: 6.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- window-or-global (Version: 1.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- component-event (Version: 0.1.4, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- component-props (Version: 1.1.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- indexof (Version: 0.0.1, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- @ibm/plex (Version: 6.0.0-next.6, Licence: [OFL-1.1](https://spdx.org/licenses/OFL-1.1.html)) -- bpmn-js (Version: 13.1.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dmn-js-decision-table (Version: 13.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dmn-js-drd (Version: 13.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dmn-js-literal-expression (Version: 13.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dmn-js-shared (Version: 13.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- dmn-js (Version: 13.0.0, Licence: [MIT](https://spdx.org/licenses/MIT.html)) -- wicg-inert (Version: 3.1.2, Licence: [W3C-20150513](https://spdx.org/licenses/W3C-20150513.html)) - -### Operate Dependencies (Back end) - -- aggs-matrix-stats-client (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- agrona (Version: 1.17.2, License: [The Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- agrona (Version: 1.18.2, License: [The Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- animal-sniffer-annotations (Version: 1.23, License: [MIT license](https://spdx.org/licenses/MIT.txt)) -- annotations (Version: 4.1.1.4, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)) -- annotations (Version: 13.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- annotations (Version: 17.0.0, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- apiguardian-api (Version: 1.1.2, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- attoparser (Version: 2.0.7.RELEASE, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- auth0 (Version: 1.44.2, License: [The MIT License (MIT)](https://raw.githubusercontent.com/auth0/auth0-java/master/LICENSE)) -- cache-api (Version: 1.1.1, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- camunda-xml-model (Version: 7.18.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- checker-qual (Version: 3.12.0, License: [The MIT License](http://opensource.org/licenses/MIT)) -- checker-qual (Version: 3.5.0, License: [The MIT License](http://opensource.org/licenses/MIT)) -- classgraph (Version: 4.8.149, License: [The MIT License (MIT)](http://opensource.org/licenses/MIT)) -- classmate (Version: 1.6.0, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-codec (Version: 1.16.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-collections4 (Version: 4.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-compress (Version: 1.23.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-lang3 (Version: 3.13.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- commons-logging (Version: 1.1.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- compiler (Version: 0.9.6, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0)) -- docker-java-api (Version: 3.3.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- docker-java-transport (Version: 3.3.6, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- docker-java-transport-zerodep (Version: 3.3.6, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- duct-tape (Version: 1.0.8, License: [MIT](http://opensource.org/licenses/MIT)) -- ehcache (Version: 3.10.8, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- elasticsearch (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch (Version: 1.18.3, License: [MIT](http://opensource.org/licenses/MIT)) -- elasticsearch-cli (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-core (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-geo (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-lz4 (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-plugin-classloader (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-rest-client (Version: 7.17.16, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)) -- elasticsearch-rest-high-level-client (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-secure-sm (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- elasticsearch-x-content (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- error_prone_annotations (Version: 2.20.0, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- error_prone_annotations (Version: 2.3.4, License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- failsafe (Version: 2.4.1, License: [Apache License, Version 2.0](http://apache.org/licenses/LICENSE-2.0)) -- failureaccess (Version: 1.0.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- feign-core (Version: 12.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- feign-jackson (Version: 12.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- feign-slf4j (Version: 12.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- grpc-api (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-context (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-core (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-netty (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-protobuf (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-protobuf-lite (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-stub (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-util (Version: 1.59.1, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- gson (Version: 2.10.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- guava (Version: 30.0-jre, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- guava (Version: 31.1-jre, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- guava-annotations (Version: r03, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- hamcrest (Version: 2.2, License: [BSD License 3](http://opensource.org/licenses/BSD-3-Clause)) -- hamcrest-core (Version: 2.2, License: [BSD License 3](http://opensource.org/licenses/BSD-3-Clause)) -- HdrHistogram (Version: 2.1.9, License: [Public Domain, per Creative Commons CC0](http://creativecommons.org/publicdomain/zero/1.0/)) -- hibernate-validator (Version: 8.0.1.Final, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- hppc (Version: 0.8.1, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpasyncclient (Version: 4.1.5, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpclient (Version: 4.5.14, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpclient5 (Version: 5.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpcore (Version: 4.4.16, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpcore-nio (Version: 4.4.16, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpcore5 (Version: 5.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- httpcore5-h2 (Version: 5.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- j2objc-annotations (Version: 1.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-annotations (Version: 2.15.4, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-core (Version: 2.15.4, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-databind (Version: 2.15.4, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-dataformat-cbor (Version: 2.15.4, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-dataformat-smile (Version: 2.15.4, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-dataformat-yaml (Version: 2.15.4, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-datatype-jdk8 (Version: 2.15.4, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-datatype-jsr310 (Version: 2.15.4, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jackson-module-parameter-names (Version: 2.15.4, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jakarta.activation-api (Version: 2.1.3, License: [EDL 1.0](http://www.eclipse.org/org/documents/edl-v10.php)) -- jakarta.annotation-api (Version: 2.1.1, License: [EPL 2.0](http://www.eclipse.org/legal/epl-2.0)) -- jakarta.json (Version: 2.0.1, License: [Eclipse Public License 2.0](https://projects.eclipse.org/license/epl-2.0)) -- jakarta.servlet-api (Version: 6.0.0, License: [EPL 2.0](http://www.eclipse.org/legal/epl-2.0)) -- jakarta.validation-api (Version: 3.0.2, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jakarta.xml.bind-api (Version: 4.0.2, License: [Eclipse Distribution License - v 1.0](http://www.eclipse.org/org/documents/edl-v10.php)) -- java-jwt (Version: 4.3.0, License: [The MIT License (MIT)](https://raw.githubusercontent.com/auth0/java-jwt/master/LICENSE)) -- javassist (Version: 3.29.0-GA, License: [MPL 1.1](http://www.mozilla.org/MPL/MPL-1.1.html)) -- jboss-logging (Version: 3.5.3.Final, License: [Apache License 2.0](https://repository.jboss.org/licenses/apache-2.0.txt)) -- jcip-annotations (Version: 1.0-1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- jna (Version: 5.10.0, License: [LGPL-2.1-or-later](https://www.gnu.org/licenses/old-licenses/lgpl-2.1)) -- joda-time (Version: 2.10.10, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- jopt-simple (Version: 5.0.2, License: [The MIT License](http://www.opensource.org/licenses/mit-license.php)) -- jsr305 (Version: 3.0.2, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- junit (Version: 4.13.2, License: [Eclipse Public License 1.0](http://www.eclipse.org/legal/epl-v10.html)) -- jwks-rsa (Version: 0.21.3, License: [The MIT License (MIT)](https://raw.githubusercontent.com/auth0/jwks-rsa-java/master/LICENSE)) -- kotlin-stdlib (Version: 1.9.23, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kotlin-stdlib-common (Version: 1.9.23, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kotlin-stdlib-jdk7 (Version: 1.9.23, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- kotlin-stdlib-jdk8 (Version: 1.9.23, License: [The Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lang-mustache-client (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- LatencyUtils (Version: 2.0.3, License: [Public Domain, per Creative Commons CC0](http://creativecommons.org/publicdomain/zero/1.0/)) -- listenablefuture (Version: 9999.0-empty-to-avoid-conflict-with-guava, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-api (Version: 2.21.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-core (Version: 2.21.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-jul (Version: 2.21.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- log4j-slf4j2-impl (Version: 2.21.1, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- logging-interceptor (Version: 4.12.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-analyzers-common (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-backward-codecs (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-core (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-grouping (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-highlighter (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-join (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-memory (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-misc (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-queries (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-queryparser (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-sandbox (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-spatial3d (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lucene-suggest (Version: 8.11.1, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- lz4-java (Version: 1.8.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- mapper-extras-client (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- micrometer-commons (Version: 1.10.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-core (Version: 1.10.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-jakarta9 (Version: 1.12.4, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-observation (Version: 1.10.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- micrometer-registry-prometheus (Version: 1.10.3, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- netty-buffer (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec-http (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec-http2 (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-codec-socks (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-common (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-handler (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-handler-proxy (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-resolver (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-tcnative-boringssl-static (Version: 2.0.61.Final, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- netty-tcnative-classes (Version: 2.0.61.Final, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- netty-transport (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- netty-transport-native-unix-common (Version: 4.1.100.Final, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- nimbus-jose-jwt (Version: 9.37.2, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- ognl (Version: 3.3.4, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- okhttp (Version: 4.12.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- okio (Version: 3.6.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- okio-jvm (Version: 3.4.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- parent-join-client (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- perfmark-api (Version: 0.26.0, License: [Apache 2.0](https://opensource.org/licenses/Apache-2.0)) -- proto-google-common-protos (Version: 2.22.0, License: [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- protobuf-java (Version: 3.22.5, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -- rank-eval-client (Version: 7.17.16, License: [Elastic License 2.0](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.16/licenses/ELASTIC-LICENSE-2.0.txt)) -- simpleclient (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_common (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_tracer_common (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_tracer_otel (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- simpleclient_tracer_otel_agent (Version: 0.16.0, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- slf4j-api (Version: 2.0.12, License: [MIT License](http://www.opensource.org/licenses/mit-license.php)) -- snakeyaml (Version: 2.0, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- spring-aop (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-beans (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-actuator (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-actuator-autoconfigure (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-autoconfigure (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-configuration-processor (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-actuator (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-json (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-log4j2 (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-oauth2-resource-server (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-security (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-thymeleaf (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-tomcat (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-validation (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-boot-starter-web (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-context (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-context-support (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-core (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-data-commons (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-data-keyvalue (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-data-redis (Version: 3.2.4, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-expression (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-jcl (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-ldap-core (Version: 3.2.2, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-oxm (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-config (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-core (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-crypto (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-ldap (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-oauth2-core (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-oauth2-jose (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-oauth2-resource-server (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-security-web (Version: 6.2.3, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-session-core (Version: 3.2.2, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-session-data-redis (Version: 3.2.2, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-test (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-tx (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-web (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- spring-webmvc (Version: 6.1.5, License: [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)) -- springdoc-openapi-starter-common (Version: 2.0.4, License: [The Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- springdoc-openapi-starter-webmvc-api (Version: 2.0.4, License: [The Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- springdoc-openapi-starter-webmvc-ui (Version: 2.0.4, License: [The Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- swagger-annotations-jakarta (Version: 2.2.8, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html)) -- swagger-core-jakarta (Version: 2.2.8, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html)) -- swagger-models-jakarta (Version: 2.2.8, License: [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html)) -- swagger-ui (Version: 4.18.1, License: [Apache 2.0](https://github.com/swagger-api/swagger-ui)) -- t-digest (Version: 3.2, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- testcontainers (Version: 1.19.7, License: [MIT](http://opensource.org/licenses/MIT)) -- thymeleaf (Version: 3.1.2.RELEASE, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- thymeleaf-spring6 (Version: 3.1.2.RELEASE, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) -- tomcat-embed-core (Version: 10.1.19, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- tomcat-embed-el (Version: 10.1.19, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- tomcat-embed-websocket (Version: 10.1.19, License: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- unbescape (Version: 1.1.6.RELEASE, License: [The Apache Software License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt)) -- unboundid-ldapsdk (Version: 6.0.8, License: [Apache License, Version 2](http://www.apache.org/licenses/LICENSE-2.0)) -- zeebe-test-container (Version: 3.6.0, License: [The Apache Software License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)) - - - - - -### Tasklist Dependencies (Front end) - -- @apollo/client (Version: 3.3.12, License: [MIT](https://opensource.org/licenses/MIT)) -- @babel/runtime (Version: 7.13.10, License: [MIT](https://opensource.org/licenses/MIT)) -- @camunda-cloud/common-ui-react (Version: 0.0.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- @camunda-cloud/common-ui (Version: 0.0.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- @graphql-typed-document-node/core (Version: 3.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -- @types/ungap\_\_global-this (Version: 0.3.1, License: [MIT](https://opensource.org/licenses/MIT)) -- @types/zen-observable (Version: 0.8.2, License: [MIT](https://opensource.org/licenses/MIT)) -- @ungap/global-this (Version: 0.4.4, License: [ISC](https://opensource.org/licenses/ISC)) -- @wry/context (Version: 0.5.4, License: [MIT](https://opensource.org/licenses/MIT)) -- @wry/equality (Version: 0.3.4, License: [MIT](https://opensource.org/licenses/MIT)) -- @wry/trie (Version: 0.2.2, License: [MIT](https://opensource.org/licenses/MIT)) -- date-fns (Version: 2.19.0, License: [MIT](https://opensource.org/licenses/MIT)) -- fast-json-stable-stringify (Version: 2.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -- final-form-arrays (Version: 3.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -- final-form (Version: 4.20.2, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-tag (Version: 2.12.3, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql (Version: 15.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -- history (Version: 4.10.1, License: [MIT](https://opensource.org/licenses/MIT)) -- hoist-non-react-statics (Version: 3.3.2, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -- isarray (Version: 0.0.1, License: [MIT](https://opensource.org/licenses/MIT)) -- js-tokens (Version: 4.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- loose-envify (Version: 1.4.0, License: [MIT](https://opensource.org/licenses/MIT)) -- mini-create-react-context (Version: 0.4.1, License: [MIT](https://opensource.org/licenses/MIT)) -- mobx-react-lite (Version: 3.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -- mobx (Version: 6.1.8, License: [MIT](https://opensource.org/licenses/MIT)) -- object-assign (Version: 4.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -- optimism (Version: 0.14.1, License: [MIT](https://opensource.org/licenses/MIT)) -- path-to-regexp (Version: 1.8.0, License: [MIT](https://opensource.org/licenses/MIT)) -- polished (Version: 4.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -- prop-types (Version: 15.7.2, License: [MIT](https://opensource.org/licenses/MIT)) -- react-dom (Version: 17.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -- react-final-form-arrays (Version: 3.1.3, License: [MIT](https://opensource.org/licenses/MIT)) -- react-final-form (Version: 6.5.3, License: [MIT](https://opensource.org/licenses/MIT)) -- react-is (Version: 16.13.1, License: [MIT](https://opensource.org/licenses/MIT)) -- react-router-dom (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -- react-router (Version: 5.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -- react-textarea-autosize (Version: 8.3.2, License: [MIT](https://opensource.org/licenses/MIT)) -- react (Version: 17.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -- regenerator-runtime (Version: 0.13.7, License: [MIT](https://opensource.org/licenses/MIT)) -- resolve-pathname (Version: 3.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- scheduler (Version: 0.20.2, License: [MIT](https://opensource.org/licenses/MIT)) -- symbol-observable (Version: 2.0.3, License: [MIT](https://opensource.org/licenses/MIT)) -- tiny-invariant (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -- tiny-warning (Version: 1.0.3, License: [MIT](https://opensource.org/licenses/MIT)) -- ts-essentials (Version: 2.0.12, License: [MIT](https://opensource.org/licenses/MIT)) -- ts-invariant (Version: 0.6.2, License: [MIT](https://opensource.org/licenses/MIT)) -- tslib (Version: 1.14.1, License: [0BSD](https://opensource.org/licenses/0BSD)) -- tslib (Version: 2.1.0, License: [0BSD](https://opensource.org/licenses/0BSD)) -- use-composed-ref (Version: 1.1.0, License: [MIT](https://opensource.org/licenses/MIT)) -- use-isomorphic-layout-effect (Version: 1.1.1, License: [MIT](https://opensource.org/licenses/MIT)) -- use-latest (Version: 1.2.0, License: [MIT](https://opensource.org/licenses/MIT)) -- value-equal (Version: 1.0.1, License: [MIT](https://opensource.org/licenses/MIT)) -- zen-observable (Version: 0.8.15, License: [MIT](https://opensource.org/licenses/MIT)) - -### Tasklist Dependencies (Back end) - -- auth0 (Version: 1.28.0, License: [MIT](https://opensource.org/licenses/MIT)) -- java-jwt (Version: 3.13.0, License: [MIT](https://opensource.org/licenses/MIT)) -- jwks-rsa (Version: 0.15.0, License: [MIT](https://opensource.org/licenses/MIT)) -- mvc-auth-commons (Version: 1.6.0, License: [MIT](https://opensource.org/licenses/MIT)) -- hppc (Version: 0.7.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-annotations (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-core (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-databind (Version: 2.11.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-dataformat-cbor (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-dataformat-smile (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-dataformat-yaml (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-datatype-jdk8 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-datatype-jsr310 (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-module-kotlin (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jackson-module-parameter-names (Version: 2.11.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- classmate (Version: 1.5.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- compiler (Version: 0.9.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jcip-annotations (Version: 1.0-1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- annotations (Version: 4.1.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- proto-google-common-protos (Version: 1.17.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jsr305 (Version: 3.0.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- gson (Version: 2.8.6, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- error_prone_annotations (Version: 2.3.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- failureaccess (Version: 1.0.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- guava-annotations (Version: r03, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- guava (Version: 30.0-jre, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- listenablefuture (Version: 9999.0-empty-to-avoid-conflict-with-guava, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- j2objc-annotations (Version: 1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- protobuf-java (Version: 3.14.0, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -- graphql-java-kickstart (Version: 10.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- graphql-java-servlet (Version: 10.0.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- graphql-java-tools (Version: 6.3.0, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-kickstart-spring-boot-autoconfigure-tools (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-kickstart-spring-boot-starter-tools (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-kickstart-spring-support (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-spring-boot-autoconfigure (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-spring-boot-starter (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- playground-spring-boot-autoconfigure (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- playground-spring-boot-starter (Version: 8.0.0, License: [MIT](https://opensource.org/licenses/MIT)) -- graphql-java (Version: 15.0, License: [MIT](https://opensource.org/licenses/MIT)) -- java-dataloader (Version: 2.2.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- nimbus-jose-jwt (Version: 9.1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- logging-interceptor (Version: 3.14.9, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- okhttp (Version: 3.14.9, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- okio (Version: 1.17.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- t-digest (Version: 3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- commons-codec (Version: 1.15, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- commons-logging (Version: 1.1.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-api (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-context (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-core (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-netty (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-protobuf-lite (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-protobuf (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- grpc-stub (Version: 1.34.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- micrometer-core (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- micrometer-registry-prometheus (Version: 1.6.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-buffer (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-codec-http2 (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-codec-http (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-codec-socks (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-codec (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-common (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-handler-proxy (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-handler (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-resolver (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-tcnative-boringssl-static (Version: 2.0.35.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- netty-transport (Version: 4.1.55.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- perfmark-api (Version: 0.19.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- simpleclient (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- simpleclient_common (Version: 0.9.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- zeebe-bpmn-model (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- zeebe-client-java (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- zeebe-gateway-protocol-impl (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- zeebe-gateway-protocol (Version: 0.26.0, License: [Zeebe Community License v1.0](https://camunda.com/legal/terms/cloud-terms-and-conditions/zeebe-community-license-v1-0/)) -- zeebe-protocol (Version: 0.26.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- zeebe-util (Version: 0.26.0, License: [Zeebe Community License v1.0](https://camunda.com/legal/terms/cloud-terms-and-conditions/zeebe-community-license-v1-0/)) -- jakarta.annotation-api (Version: 1.3.5, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -- jakarta.validation-api (Version: 2.0.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- javax.servlet-api (Version: 4.0.1, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -- javax.websocket-api (Version: 1.1, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -- joda-time (Version: 2.10.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jopt-simple (Version: 5.0.2, License: [MIT](https://opensource.org/licenses/MIT)) -- agrona (Version: 1.8.0, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- antlr4-runtime (Version: 4.7.2, License: [BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause)) -- commons-lang3 (Version: 3.11, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- httpasyncclient (Version: 4.1.4, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- httpclient (Version: 4.5.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- httpcore-nio (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- httpcore (Version: 4.4.14, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- log4j-api (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- log4j-core (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- log4j-jul (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- log4j-slf4j-impl (Version: 2.13.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-analyzers-common (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-backward-codecs (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-core (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-grouping (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-highlighter (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-join (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-memory (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-misc (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-queries (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-queryparser (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-sandbox (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-spatial-extras (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-spatial3d (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-spatial (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lucene-suggest (Version: 7.7.3, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- tomcat-embed-core (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- tomcat-embed-websocket (Version: 9.0.41, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- attoparser (Version: 2.0.5.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- camunda-xml-model (Version: 7.14.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- checker-qual (Version: 3.5.0, License: [MIT](https://opensource.org/licenses/MIT)) -- animal-sniffer-annotations (Version: 1.18, License: [MIT](https://opensource.org/licenses/MIT)) -- elasticsearch-rest-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- elasticsearch-rest-high-level-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- aggs-matrix-stats-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- lang-mustache-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- parent-join-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- rank-eval-client (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- elasticsearch-cli (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- elasticsearch-core (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- elasticsearch-secure-sm (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- elasticsearch-x-content (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- elasticsearch (Version: 6.8.13, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jna (Version: 5.5.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jakarta.el (Version: 3.0.3, License: [EPL-2.0](https://www.eclipse.org/legal/epl-2.0/)) -- javax.json (Version: 1.1.4, License: [Dual license consisting of the CDDL v1.1 and GPL v2](https://oss.oracle.com/licenses/CDDL+GPL-1.1)) -- HdrHistogram (Version: 2.1.9, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -- hibernate-validator (Version: 6.1.6.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- jboss-logging (Version: 3.4.1.Final, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- kotlin-reflect (Version: 1.4.21, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- kotlin-stdlib-common (Version: 1.4.21, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- kotlin-stdlib (Version: 1.4.21, License: The [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- kotlinx-coroutines-core (Version: 1.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- kotlinx-coroutines-jdk8 (Version: 1.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- kotlinx-coroutines-reactive (Version: 1.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- annotations (Version: 13.0, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- LatencyUtils (Version: 2.0.3, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -- reactive-streams (Version: 1.0.3, License: [CC0](https://creativecommons.org/publicdomain/zero/1.0/)) -- jul-to-slf4j (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -- slf4j-api (Version: 1.7.30, License: [MIT](https://opensource.org/licenses/MIT)) -- spring-boot-actuator-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-autoconfigure (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-actuator (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-json (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-log4j2 (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-oauth2-resource-server (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-security (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-thymeleaf (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-tomcat (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-validation (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-web (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter-websocket (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot-starter (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-boot (Version: 2.4.1, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-security-config (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-security-core (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-security-oauth2-core (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-security-oauth2-jose (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-security-oauth2-resource-server (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-security-web (Version: 5.4.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-aop (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-beans (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-context (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-core (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-expression (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-jcl (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-messaging (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-web (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-webmvc (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- spring-websocket (Version: 5.3.2, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- thymeleaf-extras-java8time (Version: 3.0.4.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- thymeleaf-spring5 (Version: 3.0.11.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- thymeleaf (Version: 3.0.11.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- unbescape (Version: 1.1.6.RELEASE, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) -- snakeyaml (Version: 1.27, License: [Apache-2.0](https://opensource.org/licenses/Apache-2.0)) - - - - - -:::note - -Identity is only available for Camunda 8 Self-Managed at this time. - -::: - -### Identity Dependencies (Front end) - -This section covers third-party libraries used by the Identity frontend. -All of these libraries are required for core functionality. - -- [@babel/code-frame@7.16.0](https://babel.dev/team) (MIT) -- [@babel/generator@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-annotate-as-pure@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-function-name@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-get-function-arity@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-hoist-variables@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-module-imports@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-split-export-declaration@7.16.0](https://babel.dev/team) (MIT) -- [@babel/helper-validator-identifier@7.15.7](https://babel.dev/team) (MIT) -- [@babel/highlight@7.16.0](https://babel.dev/team) (MIT) -- [@babel/parser@7.16.3](https://babel.dev/team) (MIT) -- [@babel/runtime@7.16.3](https://babel.dev/team) (MIT) -- [@babel/runtime@7.16.7](https://babel.dev/team) (MIT) -- [@babel/runtime@7.18.6](https://babel.dev/team) (MIT) -- [@babel/runtime@7.20.1](https://babel.dev/team) (MIT) -- [@babel/runtime@7.20.6](https://babel.dev/team) (MIT) -- [@babel/template@7.16.0](https://babel.dev/team) (MIT) -- [@babel/traverse@7.16.3](https://babel.dev/team) (MIT) -- [@babel/types@7.16.0](https://babel.dev/team) (MIT) -- [@camunda/camunda-composite-components@0.0.30](https://www.npmjs.com/package/@camunda/camunda-composite-components@0.0.30) (Apache-2.0) -- [@carbon/colors@11.13.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/colors@11.7.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/feature-flags@0.9.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/grid@11.12.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/grid@11.7.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/icon-helpers@10.34.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/icons-react@11.10.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/layout@11.12.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/layout@11.7.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/motion@11.10.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/motion@11.5.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/react@1.16.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/styles@1.16.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/telemetry@0.1.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/themes@11.11.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/themes@11.17.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/type@11.11.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/type@11.16.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@emotion/is-prop-valid@1.1.2](https://github.com/emotion-js/emotion/tree/main/packages/is-prop-valid) (MIT) -- [@emotion/memoize@0.7.5](https://github.com/emotion-js/emotion/tree/master/packages/memoize) (MIT) -- [@emotion/stylis@0.8.5](https://github.com/emotion-js/emotion/tree/master/packages/stylis) (MIT) -- [@emotion/unitless@0.7.5](https://github.com/emotion-js/emotion/tree/master/packages/unitless) (MIT) -- [@ibm/plex@6.0.0-next.6](https://github.com/ibm/plex) (OFL-1.1) -- [@ibm/plex@6.2.0](https://github.com/ibm/plex) (OFL-1.1) -- [@remix-run/router@1.4.0](https://github.com/remix-run/react-router) (MIT) -- [@types/cookie@0.3.3](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [ansi-styles@3.2.1](https://github.com/chalk/ansi-styles) (MIT) -- [anymatch@3.1.2](https://github.com/es128) (ISC) -- [asynckit@0.4.0](https://github.com/alexindigo/asynckit) (MIT) -- [axios@1.3.4](https://github.com/axios/axios) (MIT) -- [babel-plugin-styled-components@1.13.3](https://github.com/styled-components/babel-plugin-styled-components) (MIT) -- [babel-plugin-syntax-jsx@6.18.0](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-jsx) (MIT) -- [binary-extensions@2.2.0](https://github.com/sindresorhus/binary-extensions) (MIT) -- [braces@3.0.2](https://github.com/jonschlinkert) (MIT) -- [camelize@1.0.0](http://substack.net) (MIT) -- [chalk@2.4.2](https://github.com/chalk/chalk) (MIT) -- [chokidar@3.5.3](https://paulmillr.com) (MIT) -- [classnames@2.3.2](https://github.com/JedWatson/classnames) (MIT) -- [color-convert@1.9.3](https://github.com/Qix-/color-convert) (MIT) -- [color-convert@2.0.1](https://github.com/Qix-/color-convert) (MIT) -- [color-name@1.1.3](https://github.com/dfcreative/color-name) (MIT) -- [color-name@1.1.4](https://github.com/colorjs/color-name) (MIT) -- [color-string@1.9.1](https://github.com/Qix-/color-string) (MIT) -- [color@4.2.3](https://github.com/Qix-/color) (MIT) -- [combined-stream@1.0.8](http://debuggable.com/) (MIT) -- [compute-scroll-into-view@1.0.17](https://github.com/stipsan/compute-scroll-into-view) (MIT) -- [cookie@0.4.1](https://github.com/jshttp/cookie) (MIT) -- [copy-to-clipboard@3.3.1](https://github.com/sudodoki/copy-to-clipboard) (MIT) -- [copy-to-clipboard@3.3.3](https://github.com/sudodoki/copy-to-clipboard) (MIT) -- [css-color-keywords@1.0.0](https://github.com/sonicdoe/css-color-keywords) (ISC) -- [css-to-react-native@3.0.0](https://github.com/styled-components/css-to-react-native) (MIT) -- [csstype@3.0.9](https://github.com/frenic/csstype) (MIT) -- [debug@4.3.2](https://github.com/visionmedia/debug) (MIT) -- [delayed-stream@1.0.0](http://debuggable.com/) (MIT) -- [dom-helpers@5.2.1](https://github.com/react-bootstrap/dom-helpers) (MIT) -- [downshift@5.2.1](http://kentcdodds.com/) (MIT) -- [escape-string-regexp@1.0.5](https://github.com/sindresorhus/escape-string-regexp) (MIT) -- [fill-range@7.0.1](https://github.com/jonschlinkert) (MIT) -- [flatpickr@4.6.9](https://github.com/chmln/flatpickr) (MIT) -- [follow-redirects@1.15.2](https://ruben.verborgh.org/) (MIT) -- [form-data@4.0.0](http://debuggable.com/) (MIT) -- [fsevents@2.3.2](https://github.com/fsevents/fsevents) (MIT) -- [glob-parent@5.1.2](https://gulpjs.com/) (ISC) -- [globals@11.12.0](https://github.com/sindresorhus/globals) (MIT) -- [has-flag@3.0.0](https://github.com/sindresorhus/has-flag) (MIT) -- [hoist-non-react-statics@3.3.2](https://github.com/mridgway/hoist-non-react-statics) (BSD-3-Clause) -- [html-parse-stringify@3.0.1](https://github.com/henrikjoreteg/html-parse-stringify) (MIT) -- [i18next-browser-languagedetector@7.0.1](https://github.com/jamuhl) (MIT) -- [i18next-xhr-backend@3.2.2](https://github.com/jamuhl) (MIT) -- [i18next@22.4.13](https://github.com/jamuhl) (MIT) -- [immutable@4.0.0](https://github.com/leebyron) (MIT) -- [invariant@2.2.4](https://github.com/zertosh/invariant) (MIT) -- [is-arrayish@0.3.2](http://github.com/qix-) (MIT) -- [is-binary-path@2.1.0](https://github.com/sindresorhus/is-binary-path) (MIT) -- [is-extglob@2.1.1](https://github.com/jonschlinkert) (MIT) -- [is-glob@4.0.3](https://github.com/jonschlinkert) (MIT) -- [is-number@7.0.0](https://github.com/jonschlinkert) (MIT) -- [js-tokens@4.0.0](https://github.com/lydell/js-tokens) (MIT) -- [jsesc@2.5.2](https://mathiasbynens.be/) (MIT) -- [lodash.debounce@4.0.8](http://allyoucanleet.com/) (MIT) -- [lodash.findlast@4.6.0](http://allyoucanleet.com/) (MIT) -- [lodash.isequal@4.5.0](http://allyoucanleet.com/) (MIT) -- [lodash.omit@4.5.0](http://allyoucanleet.com/) (MIT) -- [lodash.throttle@4.1.1](http://allyoucanleet.com/) (MIT) -- [lodash@4.17.21](https://github.com/lodash/lodash) (MIT) -- [loose-envify@1.4.0](https://github.com/zertosh/loose-envify) (MIT) -- [mime-db@1.51.0](https://github.com/jshttp/mime-db) (MIT) -- [mime-types@2.1.34](https://github.com/jshttp/mime-types) (MIT) -- [ms@2.1.2](https://github.com/zeit/ms) (MIT) -- [normalize-path@3.0.0](https://github.com/jonschlinkert) (MIT) -- [object-assign@4.1.1](https://github.com/sindresorhus/object-assign) (MIT) -- [picomatch@2.3.0](https://github.com/jonschlinkert) (MIT) -- [postcss-value-parser@4.1.0](https://github.com/TrySound/postcss-value-parser) (MIT) -- [prop-types@15.7.2](https://github.com/facebook/prop-types) (MIT) -- [prop-types@15.8.1](https://github.com/facebook/prop-types) (MIT) -- [proxy-from-env@1.1.0](https://robwu.nl/) (MIT) -- [react-debounced@1.1.2](https://github.com/dlavrenuek/react-debounced) (MIT) -- [react-dom@18.2.0](https://github.com/facebook/react) (MIT) -- [react-i18next@12.2.0](https://github.com/jamuhl) (MIT) -- [react-is@16.13.1](https://github.com/facebook/react) (MIT) -- [react-is@17.0.2](https://github.com/facebook/react) (MIT) -- [react-is@18.2.0](https://github.com/facebook/react) (MIT) -- [react-router-dom@6.9.0](https://github.com/remix-run/react-router) (MIT) -- [react-router@6.9.0](https://github.com/remix-run/react-router) (MIT) -- [react-transition-group@4.4.5](https://github.com/reactjs/react-transition-group) (BSD-3-Clause) -- [react@18.2.0](https://github.com/facebook/react) (MIT) -- [readdirp@3.6.0](https://github.com/paulmillr/readdirp) (MIT) -- [regenerator-runtime@0.13.10](https://github.com/facebook/regenerator/tree/master/packages/runtime) (MIT) -- [regenerator-runtime@0.13.11](https://github.com/facebook/regenerator/tree/main/packages/runtime) (MIT) -- [regenerator-runtime@0.13.9](https://github.com/facebook/regenerator/tree/master/packages/runtime) (MIT) -- [resize-observer-polyfill@1.5.1](https://github.com/que-etc/resize-observer-polyfill) (MIT) -- [sass@1.60.0](https://github.com/nex3) (MIT) -- [scheduler@0.23.0](https://github.com/facebook/react) (MIT) -- [shallowequal@1.1.0](https://github.com/dashed/shallowequal) (MIT) -- [simple-swizzle@0.2.2](http://github.com/qix-) (MIT) -- [source-map-js@1.0.2](https://github.com/7rulnik/source-map-js) (BSD-3-Clause) -- [source-map@0.5.7](https://github.com/mozilla/source-map) (BSD-3-Clause) -- [styled-components@5.3.9](https://github.com/styled-components/styled-components) (MIT) -- [supports-color@5.5.0](https://github.com/chalk/supports-color) (MIT) -- [to-fast-properties@2.0.0](https://github.com/sindresorhus/to-fast-properties) (MIT) -- [to-regex-range@5.0.1](https://github.com/jonschlinkert) (MIT) -- [toggle-selection@1.0.6](https://github.com/sudodoki/toggle-selection) (MIT) -- [universal-cookie@4.0.4](https://github.com/reactivestack/cookies) (MIT) -- [use-resize-observer@6.1.0](https://github.com/ZeeCoder/use-resize-observer) (MIT) -- [void-elements@3.1.0](https://github.com/pugjs/void-elements) (MIT) -- [wicg-inert@3.1.1](https://github.com/WICG/inert) (Custom: https://travis-ci.org/WICG/inert.svg) -- [window-or-global@1.0.1](https://github.com/purposeindustries/window-or-global) (MIT) - -### Identity Dependencies (Back end) - -This section covers third-party libraries used by the Identity backend. -All of these libraries are required for core functionality. - -- [com.auth0:auth0:1.44.2](https://github.com/auth0/auth0-java) (The MIT License (MIT)) -- [com.auth0:java-jwt:4.3.0](https://github.com/auth0/java-jwt) (The MIT License (MIT)) -- [com.auth0:jwks-rsa:0.22.0](https://github.com/auth0/jwks-rsa-java) (The MIT License (MIT)) -- [com.fasterxml:classmate:1.5.1](https://github.com/FasterXML/java-classmate) (Apache License, Version 2.0) -- [com.fasterxml.jackson.core:jackson-annotations:2.14.2](https://github.com/FasterXML/jackson) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.core:jackson-core:2.14.2](https://github.com/FasterXML/jackson-core) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.core:jackson-databind:2.14.2](https://github.com/FasterXML/jackson) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.14.2](https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.14.2](https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.14.2](https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.14.2](https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.14.2](https://github.com/FasterXML/jackson-modules-base) (The Apache Software License, Version 2.0) -- [com.fasterxml.jackson.module:jackson-module-parameter-names:2.14.2](https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names) (The Apache Software License, Version 2.0) -- [com.github.fge:btf:1.2](https://github.com/fge/btf) (Apache Software License, version 2.0) (Lesser General Public License, version 3 or greater) -- [com.github.fge:jackson-coreutils:1.6](https://github.com/fge/jackson-coreutils) (Apache Software License, version 2.0) (Lesser General Public License, version 3 or greater) -- [com.github.fge:json-patch:1.9](https://github.com/fge/json-patch) (Apache Software License, version 2.0) (Lesser General Public License, version 3 or greater) -- [com.github.fge:msg-simple:1.1](https://github.com/fge/msg-simple) (Apache Software License, version 2.0) (Lesser General Public License, version 3 or greater) -- [com.github.stephenc.jcip:jcip-annotations:1.0-1](http://stephenc.github.com/jcip-annotations) (Apache License, Version 2.0) -- [com.google.code.findbugs:jsr305:3.0.2](http://findbugs.sourceforge.net/) (The Apache Software License, Version 2.0) -- [com.google.errorprone:error_prone_annotations:2.3.4](http://nexus.sonatype.org/oss-repository-hosting.html/error_prone_parent/error_prone_annotations) (Apache 2.0) -- [com.google.guava:failureaccess:1.0.1](https://github.com/google/guava/failureaccess) (The Apache Software License, Version 2.0) -- [com.google.guava:guava:30.0-jre](https://github.com/google/guava/guava) (Apache License, Version 2.0) -- [com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava](https://github.com/google/guava/listenablefuture) (The Apache Software License, Version 2.0) -- [com.google.j2objc:j2objc-annotations:1.3](https://github.com/google/j2objc/) (The Apache Software License, Version 2.0) -- [com.squareup.okhttp3:logging-interceptor:4.10.0](https://square.github.io/okhttp/) (The Apache Software License, Version 2.0) -- [com.squareup.okhttp3:okhttp:4.10.0](https://square.github.io/okhttp/) (The Apache Software License, Version 2.0) -- [com.squareup.okio:okio-jvm:3.0.0](https://github.com/square/okio/) (The Apache Software License, Version 2.0) -- [com.sun.activation:jakarta.activation:1.2.1](https://github.com/eclipse-ee4j/jaf/jakarta.activation) (EDL 1.0) -- [com.sun.istack:istack-commons-runtime:4.1.1](https://projects.eclipse.org/projects/ee4j/istack-commons/istack-commons-runtime) (Eclipse Distribution License - v 1.0) -- [com.sun.mail:jakarta.mail:1.6.5](http://eclipse-ee4j.github.io/mail/jakarta.mail) (EDL 1.0) (EPL 2.0) (GPL2 w/ CPE) -- [com.zaxxer:HikariCP:5.0.1](https://github.com/brettwooldridge/HikariCP) (The Apache Software License, Version 2.0) -- [commons-codec:commons-codec:1.15](https://commons.apache.org/proper/commons-codec/) (Apache License, Version 2.0) -- [commons-io:commons-io:2.5](http://commons.apache.org/proper/commons-io/) (Apache License, Version 2.0) -- [dev.failsafe:failsafe:3.3.1](https://failsafe.dev/failsafe) (Apache License, Version 2.0) -- [io.micrometer:micrometer-commons:1.10.5](https://github.com/micrometer-metrics/micrometer) (The Apache Software License, Version 2.0) -- [io.micrometer:micrometer-core:1.10.5](https://github.com/micrometer-metrics/micrometer) (The Apache Software License, Version 2.0) -- [io.micrometer:micrometer-observation:1.10.5](https://github.com/micrometer-metrics/micrometer) (The Apache Software License, Version 2.0) -- [io.micrometer:micrometer-registry-prometheus:1.10.5](https://github.com/micrometer-metrics/micrometer) (The Apache Software License, Version 2.0) -- [io.prometheus:simpleclient:0.16.0](http://github.com/prometheus/client_java/simpleclient) (The Apache Software License, Version 2.0) -- [io.prometheus:simpleclient_common:0.16.0](http://github.com/prometheus/client_java/simpleclient_common) (The Apache Software License, Version 2.0) -- [io.prometheus:simpleclient_tracer_common:0.16.0](http://github.com/prometheus/client_java/simpleclient_tracer/simpleclient_tracer_common) (The Apache Software License, Version 2.0) -- [io.prometheus:simpleclient_tracer_otel:0.16.0](http://github.com/prometheus/client_java/simpleclient_tracer/simpleclient_tracer_otel) (The Apache Software License, Version 2.0) -- [io.prometheus:simpleclient_tracer_otel_agent:0.16.0](http://github.com/prometheus/client_java/simpleclient_tracer/simpleclient_tracer_otel_agent) (The Apache Software License, Version 2.0) -- [jakarta.activation:jakarta.activation-api:2.1.1](https://github.com/jakartaee/jaf-api) (EDL 1.0) -- [jakarta.annotation:jakarta.annotation-api:2.1.1](https://projects.eclipse.org/projects/ee4j.ca) (EPL 2.0) (GPL2 w/ CPE) -- [jakarta.inject:jakarta.inject-api:2.0.0](https://github.com/eclipse-ee4j/injection-api) (The Apache Software License, Version 2.0) -- [jakarta.persistence:jakarta.persistence-api:3.1.0](https://github.com/eclipse-ee4j/jpa-api) (Eclipse Distribution License v. 1.0) (Eclipse Public License v. 2.0) -- [jakarta.transaction:jakarta.transaction-api:2.0.1](https://projects.eclipse.org/projects/ee4j.jta) (EPL 2.0) (GPL2 w/ CPE) -- [jakarta.validation:jakarta.validation-api:3.0.2](https://beanvalidation.org) (Apache License 2.0) -- [jakarta.xml.bind:jakarta.xml.bind-api:4.0.0](https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api) (Eclipse Distribution License - v 1.0) -- [javax.cache:cache-api:1.1.1](https://github.com/jsr107/jsr107spec) (Apache License, Version 2.0) -- [net.bytebuddy:byte-buddy:1.12.23](https://bytebuddy.net/byte-buddy) (Apache License, Version 2.0) -- [net.jodah:failsafe:2.4.1](http://github.com/jhalterman/failsafe/) (Apache License, Version 2.0) -- [org.antlr:antlr4-runtime:4.10.1](http://www.antlr.org/antlr4-runtime) (The BSD License) -- [org.apache.commons:commons-collections4:4.4](https://commons.apache.org/proper/commons-collections/) (Apache License, Version 2.0) -- [org.apache.commons:commons-lang3:3.12.0](https://commons.apache.org/proper/commons-lang/) (Apache License, Version 2.0) -- [org.apache.httpcomponents:httpclient:4.5.14](http://hc.apache.org/httpcomponents-client-ga) (Apache License, Version 2.0) -- [org.apache.httpcomponents:httpcore:4.4.16](http://hc.apache.org/httpcomponents-core-ga) (Apache License, Version 2.0) -- [org.apache.james:apache-mime4j:0.6](http://james.apache.org/mime4j) (Apache License, Version 2.0) -- [org.apache.logging.log4j:log4j-api:2.19.0](https://logging.apache.org/log4j/2.x/log4j-api/) (Apache License, Version 2.0) -- [org.apache.logging.log4j:log4j-core:2.19.0](https://logging.apache.org/log4j/2.x/log4j-core/) (Apache License, Version 2.0) -- [org.apache.logging.log4j:log4j-jul:2.19.0](https://logging.apache.org/log4j/2.x/log4j-jul/) (Apache License, Version 2.0) -- [org.apache.logging.log4j:log4j-layout-template-json:2.20.0](https://logging.apache.org/log4j/2.x/log4j-layout-template-json/) (Apache License, Version 2.0) -- [org.apache.logging.log4j:log4j-slf4j2-impl:2.19.0](https://logging.apache.org/log4j/2.x/log4j-slf4j2-impl/) (Apache License, Version 2.0) -- [org.apache.tomcat.embed:tomcat-embed-core:10.1.7](https://tomcat.apache.org/) (Apache License, Version 2.0) -- [org.apache.tomcat.embed:tomcat-embed-el:10.1.7](https://tomcat.apache.org/) (Apache License, Version 2.0) -- [org.apache.tomcat.embed:tomcat-embed-websocket:10.1.7](https://tomcat.apache.org/) (Apache License, Version 2.0) -- [org.aspectj:aspectjweaver:1.9.19](https://www.eclipse.org/aspectj/) (Eclipse Public License - v 2.0) -- [org.checkerframework:checker-qual:3.5.0](https://checkerframework.org) (The MIT License) -- [org.eclipse.angus:angus-activation:2.0.0](https://github.com/eclipse-ee4j/angus-activation/angus-activation) (EDL 1.0) -- [org.ehcache:ehcache:3.10.8](http://ehcache.org) (The Apache Software License, Version 2.0) -- [org.glassfish.jaxb:jaxb-core:4.0.2](https://eclipse-ee4j.github.io/jaxb-ri/) (Eclipse Distribution License - v 1.0) -- [org.glassfish.jaxb:jaxb-runtime:4.0.2](https://eclipse-ee4j.github.io/jaxb-ri/) (Eclipse Distribution License - v 1.0) -- [org.glassfish.jaxb:txw2:4.0.2](https://eclipse-ee4j.github.io/jaxb-ri/) (Eclipse Distribution License - v 1.0) -- [org.hdrhistogram:HdrHistogram:2.1.12](http://hdrhistogram.github.io/HdrHistogram/) (BSD-2-Clause) (Public Domain, per Creative Commons CC0) -- [org.hibernate.common:hibernate-commons-annotations:6.0.6.Final](http://hibernate.org) (GNU Library General Public License v2.1 or later) -- [org.hibernate.orm:hibernate-core:6.1.7.Final](https://hibernate.org/orm) (GNU Library General Public License v2.1 or later) -- [org.hibernate.validator:hibernate-validator:8.0.0.Final](http://hibernate.org/validator/hibernate-validator) (Apache License 2.0) -- [org.jboss:jandex:2.4.2.Final](http://www.jboss.org/jandex) (Apache License, Version 2.0) -- [org.jboss.logging:jboss-logging:3.5.0.Final](http://www.jboss.org) (Apache License, version 2.0) -- [org.jboss.resteasy:resteasy-client:3.13.2.Final](http://rest-easy.org/resteasy-client) (Apache License 2.0) -- [org.jboss.resteasy:resteasy-jackson2-provider:3.13.2.Final](http://rest-easy.org/resteasy-jackson2-provider) (Apache License 2.0) -- [org.jboss.resteasy:resteasy-jaxb-provider:3.13.2.Final](http://rest-easy.org/resteasy-jaxb-provider) (Apache License 2.0) -- [org.jboss.resteasy:resteasy-jaxrs:3.13.2.Final](http://rest-easy.org/resteasy-jaxrs) (Apache License 2.0) -- [org.jboss.resteasy:resteasy-multipart-provider:3.13.2.Final](http://rest-easy.org/resteasy-multipart-provider) (Apache License 2.0) -- [org.jboss.spec.javax.annotation:jboss-annotations-api_1.3_spec:2.0.1.Final](https://github.com/jboss/jboss-jakarta-annotations-api_spec) (EPL 2.0) (GPL2 w/ CPE) -- [org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.1_spec:2.0.1.Final](http://www.jboss.org/jboss-jaxrs-api_2.1_spec) (EPL 2.0) (GPL2 w/ CPE) -- [org.jboss.spec.javax.xml.bind:jboss-jaxb-api_2.3_spec:2.0.0.Final](https://github.com/eclipse-ee4j/jaxb-api/jboss-jaxb-api_2.3_spec) (Eclipse Distribution License - v 1.0) -- [org.jetbrains:annotations:17.0.0](https://github.com/JetBrains/java-annotations) (The Apache Software License, Version 2.0) -- [org.jetbrains.kotlin:kotlin-stdlib:1.7.22](https://kotlinlang.org/) (The Apache License, Version 2.0) -- [org.jetbrains.kotlin:kotlin-stdlib-common:1.7.22](https://kotlinlang.org/) (The Apache License, Version 2.0) -- [org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.7.22](https://kotlinlang.org/) (The Apache License, Version 2.0) -- [org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.22](https://kotlinlang.org/) (The Apache License, Version 2.0) -- [org.keycloak:keycloak-admin-client:19.0.3](http://keycloak.org/keycloak-integration-parent/keycloak-admin-client) (Apache License, Version 2.0) -- [org.keycloak:keycloak-common:19.0.3](http://keycloak.org/keycloak-common) (Apache License, Version 2.0) -- [org.keycloak:keycloak-core:19.0.3](http://keycloak.org/keycloak-core) (Apache License, Version 2.0) -- [org.latencyutils:LatencyUtils:2.0.3](http://latencyutils.github.io/LatencyUtils/) (Public Domain, per Creative Commons CC0) -- [org.postgresql:postgresql:42.5.4](https://jdbc.postgresql.org) (BSD-2-Clause) -- [org.reactivestreams:reactive-streams:1.0.4](http://www.reactive-streams.org/) (MIT-0) -- [org.slf4j:slf4j-api:2.0.7](http://www.slf4j.org) (MIT License) -- [org.springframework:spring-aop:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-aspects:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-beans:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-context:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-core:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-expression:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-jcl:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-jdbc:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-orm:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-tx:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-web:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework:spring-webmvc:6.0.7](https://github.com/spring-projects/spring-framework) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-actuator:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-actuator-autoconfigure:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-autoconfigure:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-configuration-processor:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-actuator:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-aop:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-data-jpa:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-jdbc:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-json:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-log4j2:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-security:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-tomcat:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-validation:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.boot:spring-boot-starter-web:3.0.5](https://spring.io/projects/spring-boot) (Apache License, Version 2.0) -- [org.springframework.data:spring-data-commons:3.0.4](https://spring.io/projects/spring-data) (Apache License, Version 2.0) -- [org.springframework.data:spring-data-jpa:3.0.4](https://projects.spring.io/spring-data-jpa) (Apache License, Version 2.0) -- [org.springframework.security:spring-security-config:6.0.2](https://spring.io/projects/spring-security) (Apache License, Version 2.0) -- [org.springframework.security:spring-security-core:6.0.2](https://spring.io/projects/spring-security) (Apache License, Version 2.0) -- [org.springframework.security:spring-security-crypto:6.0.2](https://spring.io/projects/spring-security) (Apache License, Version 2.0) -- [org.springframework.security:spring-security-web:6.0.2](https://spring.io/projects/spring-security) (Apache License, Version 2.0) -- [org.yaml:snakeyaml:1.33](https://bitbucket.org/snakeyaml/snakeyaml) (Apache License, Version 2.0) - - - - - -### Optimize Dependencies (Front end) - -- [@babel/code-frame@7.18.6](https://babel.dev/team) (MIT) -- [@babel/code-frame@7.21.4](https://babel.dev/team) (MIT) -- [@babel/generator@7.21.4](https://babel.dev/team) (MIT) -- [@babel/helper-annotate-as-pure@7.18.6](https://babel.dev/team) (MIT) -- [@babel/helper-environment-visitor@7.18.9](https://babel.dev/team) (MIT) -- [@babel/helper-function-name@7.21.0](https://babel.dev/team) (MIT) -- [@babel/helper-hoist-variables@7.18.6](https://babel.dev/team) (MIT) -- [@babel/helper-module-imports@7.21.4](https://babel.dev/team) (MIT) -- [@babel/helper-split-export-declaration@7.18.6](https://babel.dev/team) (MIT) -- [@babel/helper-string-parser@7.19.4](https://babel.dev/team) (MIT) -- [@babel/helper-validator-identifier@7.19.1](https://babel.dev/team) (MIT) -- [@babel/highlight@7.18.6](https://babel.dev/team) (MIT) -- [@babel/parser@7.21.4](https://babel.dev/team) (MIT) -- [@babel/runtime@7.20.6](https://babel.dev/team) (MIT) -- [@babel/template@7.20.7](https://babel.dev/team) (MIT) -- [@babel/traverse@7.21.4](https://babel.dev/team) (MIT) -- [@babel/types@7.20.5](https://babel.dev/team) (MIT) -- [@babel/types@7.21.4](https://babel.dev/team) (MIT) -- [@bpmn-io/diagram-js-ui@0.2.2](https://github.com/smbea) (MIT) -- [@bpmn-io/dmn-migrate@0.4.3](https://github.com/bpmn-io/dmn-migrate) (MIT) -- [@bpmn-io/element-templates-icons-renderer@0.3.0](https://github.com/pinussilvestrus) (MIT) -- [@camunda/camunda-composite-components@0.0.34](https://www.npmjs.com/package/@camunda/camunda-composite-components@0.0.34) (Apache-2.0) -- [@carbon/colors@11.13.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/feature-flags@0.13.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/grid@11.12.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/icon-helpers@10.39.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/icons-react@11.17.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/layout@11.12.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/motion@11.10.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/react@1.24.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/styles@1.24.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/telemetry@0.1.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/themes@11.17.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@carbon/type@11.16.0](https://github.com/carbon-design-system/carbon) (Apache-2.0) -- [@emotion/is-prop-valid@1.2.0](https://github.com/emotion-js/emotion/tree/main/packages/is-prop-valid) (MIT) -- [@emotion/memoize@0.8.0](https://github.com/emotion-js/emotion/tree/main/packages/memoize) (MIT) -- [@emotion/stylis@0.8.5](https://github.com/emotion-js/emotion/tree/master/packages/stylis) (MIT) -- [@emotion/unitless@0.7.5](https://github.com/emotion-js/emotion/tree/master/packages/unitless) (MIT) -- [@ibm/plex@6.0.0-next.6](https://github.com/ibm/plex) (OFL-1.1) -- [@ibm/plex@6.1.1](https://github.com/ibm/plex) (OFL-1.1) -- [@jridgewell/gen-mapping@0.3.2](https://github.com/jridgewell/gen-mapping) (MIT) -- [@jridgewell/resolve-uri@3.1.0](https://github.com/jridgewell/resolve-uri) (MIT) -- [@jridgewell/set-array@1.1.2](https://github.com/jridgewell/set-array) (MIT) -- [@jridgewell/sourcemap-codec@1.4.14](https://github.com/jridgewell/sourcemap-codec) (MIT) -- [@jridgewell/trace-mapping@0.3.17](https://github.com/jridgewell/trace-mapping) (MIT) -- [@kurkle/color@0.3.2](https://github.com/kurkle/color) (MIT) -- [@lexical/clipboard@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/code@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/dragon@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/hashtag@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/history@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/html@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/link@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/list@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/mark@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/markdown@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/offset@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/overflow@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/plain-text@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/react@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/rich-text@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/selection@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/table@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/text@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/utils@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@lexical/yjs@0.7.6](https://github.com/facebook/lexical) (MIT) -- [@types/debug@4.1.7](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/hast@2.3.4](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/mdast@3.0.10](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/ms@0.7.31](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/prop-types@15.7.5](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/react@18.0.28](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/scheduler@0.16.2](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@types/unist@2.0.6](https://github.com/DefinitelyTyped/DefinitelyTyped) (MIT) -- [@ungap/structured-clone@1.0.2](https://github.com/ungap/structured-clone) (ISC) -- [ansi-styles@3.2.1](https://github.com/chalk/ansi-styles) (MIT) -- [anymatch@3.1.3](https://github.com/es128) (ISC) -- [babel-plugin-styled-components@2.0.7](https://github.com/styled-components/babel-plugin-styled-components) (MIT) -- [babel-plugin-syntax-jsx@6.18.0](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-jsx) (MIT) -- [bail@2.0.2](https://wooorm.com) (MIT) -- [binary-extensions@2.0.0](https://github.com/sindresorhus/binary-extensions) (MIT) -- [bpmn-js-disable-collapsed-subprocess@0.1.7](https://github.com/barmac) (MIT) -- [bpmn-js@11.4.1](https://github.com/nikku) (MIT\*) -- [bpmn-moddle@8.0.0](https://github.com/nikku) (MIT) -- [braces@3.0.2](https://github.com/jonschlinkert) (MIT) -- [camelize@1.0.1](http://substack.net) (MIT) -- [chalk@2.4.2](https://github.com/chalk/chalk) (MIT) -- [character-entities@2.0.2](https://wooorm.com) (MIT) -- [chart.js@4.2.1](https://github.com/chartjs/Chart.js) (MIT) -- [chartjs-plugin-datalabels@2.2.0](https://github.com/chartjs/chartjs-plugin-datalabels) (MIT) -- [chokidar@3.5.3](https://paulmillr.com) (MIT) -- [classnames@2.3.2](https://github.com/JedWatson/classnames) (MIT) -- [clsx@1.1.1](https://lukeed.com) (MIT) -- [clsx@1.2.1](https://lukeed.com) (MIT) -- [color-convert@1.9.3](https://github.com/Qix-/color-convert) (MIT) -- [color-convert@2.0.1](https://github.com/Qix-/color-convert) (MIT) -- [color-name@1.1.3](https://github.com/dfcreative/color-name) (MIT) -- [color-name@1.1.4](https://github.com/colorjs/color-name) (MIT) -- [color-string@1.9.1](https://github.com/Qix-/color-string) (MIT) -- [color@4.2.3](https://github.com/Qix-/color) (MIT) -- [comma-separated-tokens@2.0.3](https://wooorm.com) (MIT) -- [component-event@0.1.4](https://github.com/component/event) (MIT\*) -- [component-props@1.1.1](https://github.com/component/props) (MIT\*) -- [component-xor@0.0.4](https://github.com/component/xor) (MIT) -- [compute-scroll-into-view@1.0.17](https://github.com/stipsan/compute-scroll-into-view) (MIT) -- [copy-to-clipboard@3.3.3](https://github.com/sudodoki/copy-to-clipboard) (MIT) -- [css-color-keywords@1.0.0](https://github.com/sonicdoe/css-color-keywords) (ISC) -- [css-to-react-native@3.2.0](https://github.com/styled-components/css-to-react-native) (MIT) -- [css.escape@1.5.1](https://mathiasbynens.be/) (MIT) -- [csstype@3.1.1](https://github.com/frenic/csstype) (MIT) -- [date-fns@2.29.3](https://github.com/date-fns/date-fns) (MIT) -- [debounce@1.2.1](https://github.com/component/debounce) (MIT) -- [debug@4.3.4](https://github.com/debug-js/debug) (MIT) -- [decode-named-character-reference@1.0.2](https://wooorm.com) (MIT) -- [dequal@2.0.2](https://lukeed.com) (MIT) -- [dequal@2.0.3](https://lukeed.com) (MIT) -- [diagram-js-direct-editing@2.0.0](https://github.com/bpmn-io/diagram-js-direct-editing) (MIT) -- [diagram-js@11.9.1](https://github.com/nikku) (MIT) -- [didi@9.0.0](https://github.com/nikku/didi) (MIT) -- [diff@5.1.0](https://github.com/kpdecker/jsdiff) (BSD-3-Clause) -- [dmn-js-decision-table@14.1.1](https://github.com/bpmn-io/dmn-js) (MIT\*) -- [dmn-js-drd@14.0.2](https://github.com/bpmn-io/dmn-js) (MIT\*) -- [dmn-js-literal-expression@14.0.2](https://github.com/bpmn-io/dmn-js) (MIT\*) -- [dmn-js-shared@14.0.2](https://github.com/bpmn-io/dmn-js) (MIT\*) -- [dmn-js@14.1.1](https://github.com/SebastianStamm) (MIT\*) -- [dmn-moddle@10.0.0](https://github.com/SebastianStamm) (MIT) -- [dmn-moddle@8.0.4](https://github.com/SebastianStamm) (MIT) -- [dom-iterator@1.0.0](https://github.com/MatthewMueller/dom-iterator) (MIT) -- [domify@1.4.1](https://github.com/component/domify) (MIT) -- [downshift@5.2.1](http://kentcdodds.com/) (MIT) -- [escape-html@1.0.3](https://github.com/component/escape-html) (MIT) -- [escape-string-regexp@1.0.5](https://github.com/sindresorhus/escape-string-regexp) (MIT) -- [event-source-polyfill@1.0.31](https://github.com/Yaffle) (MIT) -- [extend@3.0.2](http://www.justmoon.net) (MIT) -- [fast-deep-equal@3.1.3](https://github.com/epoberezkin/fast-deep-equal) (MIT) -- [fill-range@7.0.1](https://github.com/jonschlinkert) (MIT) -- [fitty@2.3.6](https://pqina.nl/) (MIT) -- [flatpickr@4.6.9](https://github.com/chmln/flatpickr) (MIT) -- [focus-visible@5.2.0](https://github.com/WICG/focus-visible) (W3C) -- [fscreen@1.0.2](https://github.com/rafrex/fscreen) (MIT) -- [glob-parent@5.1.2](https://gulpjs.com/) (ISC) -- [globals@11.12.0](https://github.com/sindresorhus/globals) (MIT) -- [hammerjs@2.0.8](https://github.com/hammerjs/hammer.js) (MIT) -- [has-flag@3.0.0](https://github.com/sindresorhus/has-flag) (MIT) -- [hast-util-whitespace@2.0.0](https://wooorm.com) (MIT) -- [heatmap.js@2.0.5](https://www.patrick-wied.at/) (MIT\*) -- [history@4.10.1](https://github.com/ReactTraining/history) (MIT) -- [hoist-non-react-statics@3.3.0](https://github.com/mridgway/hoist-non-react-statics) (BSD-3-Clause) -- [hoist-non-react-statics@3.3.2](https://github.com/mridgway/hoist-non-react-statics) (BSD-3-Clause) -- [htm@3.1.1](https://github.com/developit/htm) (Apache-2.0) -- [ids@1.0.0](https://github.com/Nikku) (MIT) -- [immutability-helper@3.1.1](https://github.com/kolodny/immutability-helper) (MIT) -- [immutable@4.0.0](https://github.com/leebyron) (MIT) -- [inferno-shared@5.6.1](https://github.com/infernojs/inferno) (MIT) -- [inferno-vnode-flags@5.6.1](https://github.com/infernojs/inferno) (MIT) -- [inferno@5.6.2](https://github.com/infernojs/inferno) (MIT) -- [inherits-browser@0.1.0](https://github.com/nikku/inherits-browser) (ISC) -- [inherits@2.0.4](https://github.com/isaacs/inherits) (ISC) -- [inline-style-parser@0.1.1](https://github.com/remarkablemark/inline-style-parser) (MIT) -- [invariant@2.2.4](https://github.com/zertosh/invariant) (MIT) -- [is-arrayish@0.3.2](http://github.com/qix-) (MIT) -- [is-binary-path@2.1.0](https://github.com/sindresorhus/is-binary-path) (MIT) -- [is-buffer@2.0.5](https://feross.org) (MIT) -- [is-extglob@2.1.1](https://github.com/jonschlinkert) (MIT) -- [is-glob@4.0.3](https://github.com/jonschlinkert) (MIT) -- [is-number@7.0.0](https://github.com/jonschlinkert) (MIT) -- [is-plain-obj@4.1.0](https://sindresorhus.com) (MIT) -- [isarray@0.0.1](http://juliangruber.com) (MIT) -- [js-tokens@4.0.0](https://github.com/lydell/js-tokens) (MIT) -- [jsesc@2.5.2](https://mathiasbynens.be/) (MIT) -- [kleur@4.1.5](https://lukeed.com) (MIT) -- [lexical@0.7.6](https://github.com/facebook/lexical) (MIT) -- [lodash.debounce@4.0.8](http://allyoucanleet.com/) (MIT) -- [lodash.findlast@4.6.0](http://allyoucanleet.com/) (MIT) -- [lodash.isequal@4.5.0](http://allyoucanleet.com/) (MIT) -- [lodash.omit@4.5.0](http://allyoucanleet.com/) (MIT) -- [lodash.throttle@4.1.1](http://allyoucanleet.com/) (MIT) -- [lodash@4.17.21](https://github.com/lodash/lodash) (MIT) -- [loose-envify@1.4.0](https://github.com/zertosh/loose-envify) (MIT) -- [mdast-util-definitions@5.1.1](https://wooorm.com) (MIT) -- [mdast-util-from-markdown@1.2.0](https://wooorm.com) (MIT) -- [mdast-util-to-hast@12.2.4](https://wooorm.com) (MIT) -- [mdast-util-to-string@3.1.0](https://wooorm.com) (MIT) -- [micromark-core-commonmark@1.0.6](https://wooorm.com) (MIT) -- [micromark-factory-destination@1.0.0](https://wooorm.com) (MIT) -- [micromark-factory-label@1.0.2](https://wooorm.com) (MIT) -- [micromark-factory-space@1.0.0](https://wooorm.com) (MIT) -- [micromark-factory-title@1.0.2](https://wooorm.com) (MIT) -- [micromark-factory-whitespace@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-character@1.1.0](https://wooorm.com) (MIT) -- [micromark-util-chunked@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-classify-character@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-combine-extensions@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-decode-numeric-character-reference@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-decode-string@1.0.2](https://wooorm.com) (MIT) -- [micromark-util-encode@1.0.1](https://wooorm.com) (MIT) -- [micromark-util-html-tag-name@1.1.0](https://wooorm.com) (MIT) -- [micromark-util-normalize-identifier@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-resolve-all@1.0.0](https://wooorm.com) (MIT) -- [micromark-util-sanitize-uri@1.1.0](https://wooorm.com) (MIT) -- [micromark-util-subtokenize@1.0.2](https://wooorm.com) (MIT) -- [micromark-util-symbol@1.0.1](https://wooorm.com) (MIT) -- [micromark-util-types@1.0.2](https://wooorm.com) (MIT) -- [micromark@3.1.0](https://wooorm.com) (MIT) -- [min-dash@3.8.1](https://github.com/bpmn-io/min-dash) (MIT) -- [min-dash@4.0.0](https://github.com/bpmn-io/min-dash) (MIT) -- [min-dom@4.0.3](https://github.com/bpmn-io/min-dom) (MIT) -- [mini-create-react-context@0.4.0](https://github.com/StringEpsilon/mini-create-react-context) (MIT) -- [moddle-xml@10.0.0](https://github.com/Nikku) (MIT) -- [moddle-xml@8.0.2](https://github.com/Nikku) (MIT) -- [moddle-xml@9.0.5](https://github.com/Nikku) (MIT) -- [moddle@5.0.1](https://github.com/Nikku) (MIT) -- [moddle@5.0.2](https://github.com/Nikku) (MIT) -- [moddle@6.0.0](https://github.com/Nikku) (MIT) -- [mri@1.2.0](https://lukeed.com) (MIT) -- [ms@2.1.2](https://github.com/zeit/ms) (MIT) -- [normalize-path@3.0.0](https://github.com/jonschlinkert) (MIT) -- [object-assign@4.1.1](https://github.com/sindresorhus/object-assign) (MIT) -- [object-refs@0.3.0](https://github.com/Nikku) (MIT) -- [opencollective-postinstall@2.0.3](https://github.com/opencollective/opencollective-postinstall) (MIT) -- [path-intersection@2.2.1](https://github.com/nikku) (MIT) -- [path-to-regexp@1.7.0](https://github.com/pillarjs/path-to-regexp) (MIT) -- [picomatch@2.2.1](https://github.com/jonschlinkert) (MIT) -- [picomatch@2.2.2](https://github.com/jonschlinkert) (MIT) -- [picomatch@2.3.1](https://github.com/jonschlinkert) (MIT) -- [postcss-value-parser@4.2.0](https://github.com/TrySound/postcss-value-parser) (MIT) -- [preact@10.11.3](https://github.com/preactjs/preact) (MIT) -- [prismjs@1.29.0](https://github.com/PrismJS/prism) (MIT) -- [prop-types@15.8.1](https://github.com/facebook/prop-types) (MIT) -- [property-information@6.2.0](https://wooorm.com) (MIT) -- [react-date-range@1.4.0](https://github.com/hypeserver/react-date-range) (MIT) -- [react-dom@17.0.2](https://github.com/facebook/react) (MIT) -- [react-draggable@4.2.0](https://github.com/mzabriskie/react-draggable) (MIT) -- [react-error-boundary@3.1.4](https://github.com/bvaughn/react-error-boundary) (MIT) -- [react-full-screen@1.1.1](https://github.com/snakesilk/react-fullscreen) (MIT) -- [react-grid-layout@1.3.4](http://strml.net/) (MIT) -- [react-is@16.13.1](https://github.com/facebook/react) (MIT) -- [react-is@17.0.2](https://github.com/facebook/react) (MIT) -- [react-is@18.0.0](https://github.com/facebook/react) (MIT) -- [react-list@0.8.13](https://github.com/coderiety/react-list) (MIT) -- [react-markdown@8.0.5](https://github.com/remarkjs/react-markdown) (MIT) -- [react-resizable@3.0.4](http://strml.net/) (MIT) -- [react-router-dom@5.3.0](https://github.com/ReactTraining/react-router) (MIT) -- [react-router@5.2.1](https://github.com/ReactTraining/react-router) (MIT) -- [react-table@7.8.0](https://github.com/tannerlinsley/react-table) (MIT) -- [react@17.0.2](https://github.com/facebook/react) (MIT) -- [readdirp@3.6.0](https://github.com/paulmillr/readdirp) (MIT) -- [regenerator-runtime@0.13.11](https://github.com/facebook/regenerator/tree/main/packages/runtime) (MIT) -- [remark-parse@10.0.1](https://wooorm.com) (MIT) -- [remark-rehype@10.1.0](https://wooorm.com) (MIT) -- [resize-observer-polyfill@1.5.1](https://github.com/que-etc/resize-observer-polyfill) (MIT) -- [resolve-pathname@3.0.0](https://github.com/mjackson/resolve-pathname) (MIT) -- [sade@1.8.1](https://lukeed.com) (MIT) -- [sass@1.58.3](https://github.com/nex3) (MIT) -- [saxen@8.1.0](http://vflash.ru) (MIT) -- [saxen@8.1.2](http://vflash.ru) (MIT) -- [scheduler@0.20.2](https://github.com/facebook/react) (MIT) -- [selection-ranges@3.0.3](https://github.com/nikku/selection-ranges) (MIT) -- [selection-update@0.1.2](https://github.com/nikku/selection-update) (MIT) -- [shallow-equal@1.2.1](https://github.com/moroshko/shallow-equal) (MIT) -- [shallowequal@1.1.0](https://github.com/dashed/shallowequal) (MIT) -- [simple-swizzle@0.2.2](http://github.com/qix-) (MIT) -- [source-map-js@1.0.2](https://github.com/7rulnik/source-map-js) (BSD-3-Clause) -- [space-separated-tokens@2.0.2](https://wooorm.com) (MIT) -- [style-to-object@0.4.1](https://github.com/remarkablemark/style-to-object) (MIT) -- [styled-components@5.3.9](https://github.com/styled-components/styled-components) (MIT) -- [supports-color@5.5.0](https://github.com/chalk/supports-color) (MIT) -- [table-js@8.0.1](https://github.com/bpmn-io/table-js) (MIT) -- [tiny-invariant@1.0.6](https://github.com/alexreardon/tiny-invariant) (MIT) -- [tiny-svg@3.0.0](https://github.com/nikku) (MIT) -- [tiny-warning@1.0.3](https://github.com/alexreardon/tiny-warning) (MIT) -- [to-fast-properties@2.0.0](https://github.com/sindresorhus/to-fast-properties) (MIT) -- [to-regex-range@5.0.1](https://github.com/jonschlinkert) (MIT) -- [toggle-selection@1.0.6](https://github.com/sudodoki/toggle-selection) (MIT) -- [trim-lines@3.0.1](https://wooorm.com) (MIT) -- [trough@2.1.0](https://wooorm.com) (MIT) -- [unified@10.1.2](https://wooorm.com) (MIT) -- [unist-builder@3.0.0](https://github.com/syntax-tree/unist-builder) (MIT) -- [unist-util-generated@2.0.0](https://wooorm.com) (MIT) -- [unist-util-is@5.1.1](https://wooorm.com) (MIT) -- [unist-util-position@4.0.3](https://wooorm.com) (MIT) -- [unist-util-stringify-position@3.0.2](https://wooorm.com) (MIT) -- [unist-util-visit-parents@5.1.1](https://wooorm.com) (MIT) -- [unist-util-visit@4.1.1](https://wooorm.com) (MIT) -- [use-deep-compare-effect@1.8.1](https://kentcdodds.com) (MIT) -- [use-resize-observer@6.1.0](https://github.com/ZeeCoder/use-resize-observer) (MIT) -- [uvu@0.5.6](https://github.com/lukeed/uvu) (MIT) -- [value-equal@1.0.1](https://github.com/mjackson/value-equal) (MIT) -- [vfile-message@3.1.3](https://wooorm.com) (MIT) -- [vfile@5.3.6](https://wooorm.com) (MIT) -- [wicg-inert@3.1.2](https://github.com/WICG/inert) (W3C-20150513) -- [window-or-global@1.0.1](https://github.com/purposeindustries/window-or-global) (MIT) - -### Optimize Dependencies (Back end) - -- logback-classic@1.2.11, [(Eclipse Public License - v 1.0)](http://www.eclipse.org/legal/epl-v10.html) -- logback-core@1.2.11, [(Eclipse Public License - v 1.0)](http://www.eclipse.org/legal/epl-v10.html) -- auth0@1.44.2, [(The MIT License (MIT))](https://raw.githubusercontent.com/auth0/auth0-java/master/LICENSE) -- java-jwt@4.4.0, [(The MIT License (MIT))](https://raw.githubusercontent.com/auth0/java-jwt/master/LICENSE) -- jwks-rsa@0.22.0, [(The MIT License (MIT))](https://raw.githubusercontent.com/auth0/jwks-rsa-java/master/LICENSE) -- hppc@0.8.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- itu@1.7.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-annotations@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-core@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-databind@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-dataformat-cbor@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-dataformat-smile@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-dataformat-yaml@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-datatype-jdk8@2.14.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-datatype-jsr310@2.14.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-jaxrs-base@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-jaxrs-json-provider@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-module-jaxb-annotations@2.14.1, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- jackson-module-parameter-names@2.14.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- classmate@1.5.1, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- caffeine@3.1.5, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- dateparser@1.0.11, [(Apache-2.0 License)](https://github.com/sisyphsu/dateparser/blob/master/LICENSE) -- retree@1.0.4, [(Apache-2.0 License)](https://github.com/sisyphsu/retree-java/blob/master/LICENSE) -- compiler@0.9.6, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jcip-annotations@1.0-1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- json-base@2.4.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- json-flattener@0.16.4, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jsr305@3.0.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- error_prone_annotations@2.18.0, [(Apache 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- failureaccess@1.0.1, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- guava@31.1-jre, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- listenablefuture@9999.0-empty-to-avoid-conflict-with-guava, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- j2objc-annotations@1.3, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- json-path@2.8.0, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- c3p0@0.9.5.4, [(GNU Lesser General Public License, Version 2.1)](http://www.gnu.org/licenses/lgpl-2.1.html) -- mchange-commons-java@0.2.15, [(GNU Lesser General Public License, Version 2.1)](http://www.gnu.org/licenses/lgpl-2.1.html) -- content-type@2.2, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- lang-tag@1.7, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- nimbus-jose-jwt@9.24.4, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- oauth2-oidc-sdk@9.43.1, [(Apache License, version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.html) -- opencsv@5.7.1, [(Apache 2)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- logging-interceptor@4.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- okhttp@4.10.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- okio-jvm@3.0.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- javax.mail@1.5.6, [(CDDL/GPLv2+CE)](https://glassfish.java.net/public/CDDL+GPL_1_1.html) -- t-digest@3.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- semver4j@3.1.0, [(The MIT License)](http://www.opensource.org/licenses/mit-license.php) -- HikariCP-java7@2.4.13, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-beanutils@1.9.4, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-codec@1.15, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-collections@3.2.2, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-io@2.11.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-logging@1.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- zeebe-protocol@8.2.0, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- classgraph@4.8.157, [(The MIT License (MIT))](http://opensource.org/licenses/MIT) -- micrometer-core@1.9.9, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- micrometer-registry-prometheus@1.10.5, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- netty-buffer@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-codec-http@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-codec@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-common@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-handler@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-resolver@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-transport-native-unix-common@4.1.87.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- netty-transport@4.1.90.Final, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- simpleclient@0.16.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- simpleclient_common@0.16.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- simpleclient_tracer_common@0.16.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- simpleclient_tracer_otel@0.16.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- simpleclient_tracer_otel_agent@0.16.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jakarta.activation-api@1.2.2, [(EDL 1.0)](http://www.eclipse.org/org/documents/edl-v10.php) -- jakarta.annotation-api@1.3.5, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jakarta.servlet-api@4.0.4, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jakarta.validation-api@2.0.2, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jakarta.websocket-api@1.1.2, [(Eclipse Public License 2.0)](https://projects.eclipse.org/license/epl-2.0) -- jakarta.ws.rs-api@2.1.6, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jakarta.xml.bind-api@2.3.3, [(Eclipse Distribution License - v 1.0)](http://www.eclipse.org/org/documents/edl-v10.php) -- activation@1.1, [(Common Development and Distribution License (CDDL) v1.0)](https://glassfish.dev.java.net/public/CDDLv1.0.html) -- javax.activation-api@1.2.0, [(CDDL/GPLv2+CE)](https://github.com/javaee/activation/blob/master/LICENSE.txt) -- javax.annotation-api@1.3.2, [(CDDL + GPLv2 with classpath exception)](https://github.com/javaee/javax.annotation/blob/master/LICENSE) -- cache-api@1.1.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- javax.servlet-api@3.1.0, [(CDDL + GPLv2 with classpath exception)](https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html) -- validation-api@2.0.1.Final, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- javax.websocket-api@1.0, [(Dual license consisting of the CDDL v1.1 and GPL v2)](https://glassfish.java.net/public/CDDL+GPL_1_1.html) -- javax.websocket-client-api@1.0, [(Dual license consisting of the CDDL v1.1 and GPL v2)](https://glassfish.java.net/public/CDDL+GPL_1_1.html) -- javax.ws.rs-api@2.1.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jaxb-api@2.3.1, [(CDDL 1.1)](https://oss.oracle.com/licenses/CDDL+GPL-1.1) -- joda-time@2.1, [(Apache 2)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jna@5.10.0, [(LGPL-2.1-or-later)](https://www.gnu.org/licenses/old-licenses/lgpl-2.1) -- failsafe@2.4.4, [(Apache License, Version 2.0)](http://apache.org/licenses/LICENSE-2.0) -- logstash-logback-encoder@7.3, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- accessors-smart@2.4.8, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- json-smart@2.4.8, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jopt-simple@5.0.2, [(The MIT License)](http://www.opensource.org/licenses/mit-license.php) -- agrona@1.17.1, [(The Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-collections4@4.4, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-email@1.5, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-lang3@3.12.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-math3@3.6.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- commons-text@1.10.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- httpasyncclient@4.1.4, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- httpclient@4.5.14, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- httpcore-nio@4.4.12, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- httpcore@4.4.16, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- log4j-api@2.20.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- log4j-to-slf4j@2.20.0, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-analyzers-common@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-backward-codecs@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-core@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-grouping@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-highlighter@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-join@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-memory@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-misc@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-queries@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-queryparser@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-sandbox@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-spatial3d@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- lucene-suggest@8.11.1, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- tika-core@2.7.0, [(Apache-2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- tomcat-embed-core@9.0.73, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- tomcat-embed-el@9.0.73, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- tomcat-embed-websocket@9.0.73, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-engine-dmn@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-engine-feel-api@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-engine-feel-juel@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-engine-feel-scala@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-bpmn-model@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-cmmn-model@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-dmn-model@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-xml-model@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-engine@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-license-check@2.7.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-commons-logging@1.12.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-commons-typed-values@7.18.0-ee, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-commons-utils@1.12.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-connect-connectors-all@1.5.6, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- camunda-connect-core@1.5.6, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- checker-qual@3.32.0, [(The MIT License)](http://opensource.org/licenses/MIT) -- commons-compiler@3.1.9, [(BSD-3-Clause)](https://spdx.org/licenses/BSD-3-Clause.html) -- janino@3.1.9, [(BSD-3-Clause)](https://spdx.org/licenses/BSD-3-Clause.html) -- javax-websocket-client-impl@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- javax-websocket-server-impl@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- websocket-api@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- websocket-client@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- websocket-common@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- websocket-server@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- websocket-servlet@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-annotations@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-client@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-continuation@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-http@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-io@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-jndi@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-plus@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-rewrite@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-security@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-server@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-servlet@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-servlets@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-util-ajax@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-util@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-webapp@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- jetty-xml@9.4.51.v20230217, [(Apache Software License - Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- ehcache@3.10.8, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- elasticsearch-rest-client@7.17.9, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0) -- elasticsearch-rest-high-level-client@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- aggs-matrix-stats-client@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- lang-mustache-client@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- mapper-extras-client@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- parent-join-client@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- rank-eval-client@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-cli@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-core@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-geo@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-lz4@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-plugin-classloader@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-secure-sm@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch-x-content@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- elasticsearch@7.17.9, [(Elastic License 2.0)](https://raw.githubusercontent.com/elastic/elasticsearch/v7.17.9/licenses/ELASTIC-LICENSE-2.0.txt) -- freemarker@2.3.32, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- aopalliance-repackaged@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jakarta.inject@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- class-model@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- hk2-api@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- hk2-core@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- hk2-locator@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- hk2-runlevel@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- hk2-utils@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- hk2@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- osgi-resource-locator@1.0.3, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- spring-bridge@2.6.1, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-container-servlet-core@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-container-servlet@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-client@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-common@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-server@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-bean-validation@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-entity-filtering@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-spring5@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-hk2@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- jersey-media-json-jackson@2.35, [(EPL 2.0)](http://www.eclipse.org/legal/epl-2.0) -- HdrHistogram@2.1.12, [(Public Domain, per Creative Commons CC0)](http://creativecommons.org/publicdomain/zero/1.0/) -- hibernate-validator@6.2.5.Final, [(Apache License 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- javassist@3.25.0-GA, [(MPL 1.1)](http://www.mozilla.org/MPL/MPL-1.1.html) -- jboss-logging@3.4.1.Final, [(Apache License, version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- kotlin-stdlib-common@1.5.31, [(The Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- kotlin-stdlib-jdk7@1.6.10, [(The Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- kotlin-stdlib-jdk8@1.6.10, [(The Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- kotlin-stdlib@1.6.20, [(The Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- annotations@17.0.0, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- LatencyUtils@2.0.3, [(Public Domain, per Creative Commons CC0)](http://creativecommons.org/publicdomain/zero/1.0/) -- lz4-java@1.8.0, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- mybatis@3.5.6, [(The Apache Software License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0.txt) -- asm-analysis@7.1, [(BSD)](http://asm.ow2.org/license.html) -- asm-commons@9.4, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -- asm-tree@9.4, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -- asm-util@7.1, [(BSD)](http://asm.ow2.org/license.html) -- asm@9.4, [(BSD-3-Clause)](https://asm.ow2.io/license.html) -- quartz@2.3.2, [(The Apache Software License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) -- jul-to-slf4j@1.7.36, [(MIT License)](http://www.opensource.org/licenses/mit-license.php) -- slf4j-api@1.7.36, [(MIT License)](http://www.opensource.org/licenses/mit-license.php) -- spring-boot-actuator-autoconfigure@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-actuator@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-autoconfigure@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-actuator@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-jersey@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-jetty@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-json@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-logging@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-tomcat@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-validation@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter-web@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot-starter@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-boot@2.7.10, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-config@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-core@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-crypto@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-oauth2-client@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-oauth2-core@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-oauth2-jose@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-oauth2-resource-server@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-security-web@5.8.2, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-aop@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-beans@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-context-support@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-context@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-core@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-expression@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-jcl@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-tx@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-web@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-webmvc@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- spring-websocket@5.3.26, [(Apache License, Version 2.0)](https://www.apache.org/licenses/LICENSE-2.0) -- snakeyaml@1.33, [(Apache License, Version 2.0)](http://www.apache.org/licenses/LICENSE-2.0.txt) - - - - - -Desktop Modeler is a desktop modeling application that builds upon a number of third party libraries. You find an up-to-date list of third party libraries used and their license terms in the [THIRD_PARTY_NOTICES](https://github.com/camunda/camunda-modeler/blob/master/THIRD_PARTY_NOTICES), located in the root of the source code repository. This file is also shipped with the application distribution as `THIRD_PARTY_NOTICES.camunda-modeler.txt`. - - - - - -- **Dependencies:** SBOM CycloneDX files with up-to-date lists of third party libraries used and their licenses can be requested [on demand](mailto:dependency-request@camunda.com). -- **Source code:** Access to source code is provided [on demand](mailto:dependency-request@camunda.com). - - - - - -### Connector SDK - -Connectors dependencies are packaged with the [release assets](https://github.com/camunda/connectors/releases/tag/0.23.2) as software bill of materials (SBOMs) in XML or JSON. - - - - diff --git a/versioned_docs/version-8.2/reference/glossary.md b/versioned_docs/version-8.2/reference/glossary.md deleted file mode 100644 index 9656d8b5f98..00000000000 --- a/versioned_docs/version-8.2/reference/glossary.md +++ /dev/null @@ -1,261 +0,0 @@ ---- -id: glossary -title: "Glossary" -description: "This section defines common terminology referenced within the documentation." ---- - -### Bridge - -Synonym to "[Connector](#connector)". - -### Broker - -A broker is an instance of a Zeebe installation which executes processes and manages process state. A single broker is installed on a single machine. - -- [Architecture](/components/zeebe/technical-concepts/architecture.md#brokers) - -### Client - -A client interacts with the Zeebe broker on behalf of the business application. Clients poll for work from the broker. - -- [Architecture](/components/zeebe/technical-concepts/architecture.md#clients) - -### Cluster - -A cluster represents a configuration of one or more brokers collaborating to execute processes. Each broker in a cluster acts as a leader or a follower. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md) - -### Command - -A command represents an action to be taken or executed. Example commands include: deploy a process, execute a process, etc. - -- [Internal processing](/components/zeebe/technical-concepts/internal-processing.md#events-and-commands) - -### Connector - -A reusable building block that performs the integration with an external system and works out of the box. - -The Connector might be uni or bidirectional and possibly include a [job worker](#job-worker). - -The boundary between Connectors and job workers can be fuzzy, but in general, Connectors connect to other active pieces of software. [Outbound](#outbound-connector), [inbound](#inbound-connector), or [protocol](#protocol-connector) Connectors are types of Connectors. - -### Correlation - -Correlation refers to the act of matching a message with an inflight process instance. - -- [Message correlation](/components/concepts/messages.md) - -### Correlation key - -A correlation is an attribute within a message used to match this message against a certain variable within an inflight process instance. If the value of the correlation key matches the value of the variable within the process instance, the message is matched to this process instance. - -- [Message correlation](/components/concepts/messages.md) - -### Deployment - -A process cannot execute unless it is known by the broker. Deployment is the process of pushing or deploying processes to the broker. - -- [Zeebe Deployment](/apis-tools/grpc.md#deployresource-rpc) - -### Event - -An event represents a state change associated with an aspect of an executing process instance. Events capture variable changes, state transition in process elements, etc. An event is represented by a timestamp, the variable name, and variable value. Events are stored in an append-only log. - -- [Internal processing](/components/zeebe/technical-concepts/internal-processing.md#events-and-commands) - -### Exporter - -An exporter represents a sink to which Zeebe will submit all records within the log. This gives users of Zeebe an opportunity to persist records with the log for future use as this data will not be available after log compaction. - -- [Exporter](/self-managed/concepts/exporters.md) - -### Follower - -In a clustered environment, a broker which is not a leader is a follower of a given partition. A follower can become the new leader when the old leader is no longer reachable. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Gateway - -Clients communicate with the Zeebe cluster through a gateway. The gateway provides a gRPC API and forwards client commands to the cluster. Depending on the setup, a gateway can be embedded in the broker or can be configured to be standalone. - -- [Architecture](/components/zeebe/technical-concepts/architecture.md#gateways) - -### Inbound Connector - -Inbound [Connectors](#connector) in Camunda 8 enable workflows to receive data or messages from external systems or services, making it possible to integrate workflows into a wider business process or system architecture. - -Inbound Connectors include three subtypes - [webhooks](#webhook), [subscriptions](#subscription), and [polling](#polling). - -Unlike [outbound Connectors](#outbound-connector), inbound Connectors are **stateful**. The Java code of the inbound Connector has a lifecycle suitable for long-running operations, such as listening for messages on a queue or waiting for a webhook to be called. -Each element referencing an inbound Connector will lead to the creation of one inbound Connector instance. A process definition with one webhook start event and two additional webhooks as intermediate catch events would therefore lead to the creation of three inbound connector instances. - -### Incident - -An incident represents an error condition which prevents Zeebe from advancing an executing process instance. Zeebe will create an incident if there was an uncaught exception thrown in your code and the number of retries of the given step is exceeded. - -- [Incident](/components/concepts/incidents.md) - -### Ingress - -An Ingress is a Kubernetes object that manages external access to the services within a Kubernetes cluster. An **Ingress controller** is required to route traffic to your services according to the rules defined on the Ingress. - -- [Ingress setup](/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup.md) - -### Job - -A job represents a distinct unit of work within a business process. Service tasks represent such -jobs in your process and are identified by a unique id. A job has a type to allow specific job -workers to find jobs that they can work on. - -- [Job workers](/components/concepts/job-workers.md) - -### Job activation timeout - -This is the amount of time the broker will wait for a complete or fail response from the job worker. This comes after a job has been submitted to the job worker for processing and before it marks the job as available again for other job workers. - -- [Job workers](/components/concepts/job-workers.md#requesting-jobs) - -### Job worker - -A special type of client that polls for and executes available jobs. An uncompleted job prevents Zeebe from advancing process execution to the next step. - -- [Job workers](/components/concepts/job-workers.md) - -### Leader - -In a clustered environment, one broker (the leader) is responsible for process execution and housekeeping of data within a partition. Housekeeping includes taking snapshots, replication, and running exports. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Log - -The log is comprised of an ordered sequence of records written to persistent storage. The log is appended-only and is stored on disk within the broker. - -- [Partitions](/components/zeebe/technical-concepts/partitions.md#partition-data-layout) - -### Message - -A message contains information to be delivered to interested parties during execution of a process instance. Messages can be published via Kafka or Zeebe’s internal messaging system. Messages are associated with timestamp and other constraints such as time-to-live (TTL). - -- [Messages](/components/concepts/messages.md) - -### Outbound Connector - -Outbound [Connectors](#connector) in Camunda 8 allow workflows to trigger with external systems or services, making it possible to integrate workflows with other parts of a business process or system architecture. - -### Partition - -A partition represents a logical grouping of data in a Zeebe broker. This data includes process instance variables stored in RocksDB, commands, and events generated by Zeebe stored in the log. The number of partitions is defined by configuration. - -- [Partitions](/components/zeebe/technical-concepts/partitions.md) - -### Polling Connector - -An inbound polling Connector to periodically poll an external system or service for new data using HTTP polling. - -A Camunda workflow uses this type of Connector to retrieve data from a remote system that does not support real-time notifications or webhooks, but instead requires the client to periodically request updates. - -### Process - -A process is a defined sequence of distinct steps representing your business logic. Examples of a -process could be an e-commerce shopping experience or onboarding a new employee. In Zeebe, -process are identified by a unique process id. The process is usually also referred to as the -BPMN model. - -- [Processes](/components/concepts/processes.md) - -### Process instance - -While a process represents a defined sequence of distinct steps representing your business logic, a process instance represents a currently executing or completed process. For a single process, there could be many associated process instances in various stages of their executing lifecycle. Process instances are identified by process instance ids. Executing process instances are also sometimes referred to as inflight processes. - -- [Processes](/components/concepts/processes.md) - -### Process instance variable - -A process instance variable represents the execution state (i.e data) of a process instance. These variables capture business process parameters which are the input and output of various stages of the process instance and which also influence process flow execution. - -- [Variables](/components/concepts/variables.md) -- [Data flow](/components/modeler/bpmn/data-flow.md) - -### Protocol Connector - -Protocol Connectors are a type of [Connector](#connector) in Camunda that can serve as either [inbound](#inbound-connector) or [outbound](#outbound-connector) Connectors, supporting a variety of technical protocols. These Connectors are highly generic, designed to provide a flexible and customizable means of integrating with external systems and services. - -Protocol Connectors can be customized to meet the needs of specific use cases using element templates, with no additional coding or deployment required. Examples of protocol Connectors include HTTP REST, SOAP, GraphQL, as well as message queue Connectors. - -### Record - -A record represents a command or an event. For example, a command to create a new process instance, or a state transition of an executing process instance representing an event at a given point in time would result to generation of a record. During the execution lifecycle of a process instance, numerous records are generated to capture various commands and events generated. Records are stored in the log. - -- [Internal processing](/components/zeebe/technical-concepts/internal-processing.md#events-and-commands) - -### Replication - -Replication is the act of copying data in a partition from a leader to its followers within a clustered Zeebe installation. After replication, the leader and followers of a partition will have the exact same data. Replication allows the system to be resilient to brokers going down. - -- [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) - -### Replication factor - -This is the number of times data in a partition are copied. This depends on the number of brokers in a cluster. A cluster with one leader and two followers have a replication factor of three, as data in each partition needs to have three copies. - -We recommend running an odd replication factor. - -- [Partitions](/components/zeebe/technical-concepts/partitions.md#replication) - -### Request timeout - -This is how long a client waits for a response from the broker after the client submits a request. If a response is not received within the client request timeout, the client considers the broker unreachable. - -- [Zeebe API (gRPC)](/apis-tools/grpc.md) - -### Segment - -The log consists of one or more segments. Each segment is a file containing an ordered sequence records. Segments are deleted when the log is compacted. - -- [Resource planning](/self-managed/zeebe-deployment/operations/resource-planning.md#event-log) - -### Snapshot - -The state of all active process instances, (these are also known as inflight process instances) are stored as records in an in-memory database called RocksDB. A snapshot represents a copy of all data within the in-memory database at any given point in time. Snapshots are binary images stored on disk and can be used to restore execution state of a process. The size of a snapshot is affected by the size of the data. Size of the data depends on several factors, including complexity of the model or business process, the size and quantity of variables in each process instance, and the total number of executing process instances in a broker. - -- [Resource planning](/self-managed/zeebe-deployment/operations/resource-planning.md#snapshots) - -### Soft pause exporting - -Soft pause exporting is a feature that allows you to continue exporting records from Zeebe, but without deleting those records (log compaction) from Zeebe. This is particularly useful during hot backups. - -- [Exporting API](/self-managed/zeebe-deployment/operations/management-api.md) -- [Backup and restore](self-managed/backup-restore/zeebe-backup-and-restore.md) - -### Subscription Connector - -An [inbound Connector](#inbound-connector) that subscribes to a message queue. - -This way, a Camunda workflow can receive messages from an external system or service (like Kafka or RabbitMQ) using message queuing technology. This type of inbound Connector is commonly used in distributed systems where different components of the system need to communicate with each other asynchronously. - -### Webhook - -Webhooks are a subtype of [inbound Connector](#inbound-connector). - -A webhook is a way for web applications to send real-time notifications or data to other applications or services when certain events occur. When a webhook is set up, the application sends a POST request containing data to a pre-configured URL, which triggers a workflow. - -### Worker - -A worker executes a job. In the Zeebe nomenclature, these are also referred to as job workers. - -- [Job workers](/components/concepts/job-workers.md) - -### Workflow - -See [process](#process). - -### Workflow instance - -See [process instance](#process-instance). - -### Workflow instance variable - -See [process instance variable](#process-instance-variable). diff --git a/versioned_docs/version-8.2/reference/img/channels.png b/versioned_docs/version-8.2/reference/img/channels.png deleted file mode 100644 index 4964fadbba0..00000000000 Binary files a/versioned_docs/version-8.2/reference/img/channels.png and /dev/null differ diff --git a/versioned_docs/version-8.2/reference/img/diagram-releases.png b/versioned_docs/version-8.2/reference/img/diagram-releases.png deleted file mode 100644 index 9bedbbd2ace..00000000000 Binary files a/versioned_docs/version-8.2/reference/img/diagram-releases.png and /dev/null differ diff --git a/versioned_docs/version-8.2/reference/img/update-console.png b/versioned_docs/version-8.2/reference/img/update-console.png deleted file mode 100644 index 4738b25f46c..00000000000 Binary files a/versioned_docs/version-8.2/reference/img/update-console.png and /dev/null differ diff --git a/versioned_docs/version-8.2/reference/licenses.md b/versioned_docs/version-8.2/reference/licenses.md deleted file mode 100644 index 99671a6021b..00000000000 --- a/versioned_docs/version-8.2/reference/licenses.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: licenses -title: "Licenses" -description: "Licensing information for components of Camunda 8" ---- - -## Licensing - -This page contains licensing information for all components of Camunda 8. - -### Zeebe - -Licenses and license information for Zeebe can be found in the Zeebe project [README](https://github.com/camunda/camunda#license). - -### Operate, Tasklist, Identity, Optimize - -Operate, Tasklist, Identity, and Optimize are licensed to our users and customers under a proprietary license. The proprietary license can be either the [Camunda Self-Managed Free Edition license](https://legal.camunda.com/licensing-and-other-legal-terms#legal-terms-camunda-platform-c8-self-managed-free-edition-and-beta-offerings) or the Camunda Self-Managed Enterprise Edition license (a copy you obtain when you contact Camunda). Note that while the Camunda Self-Managed Free Edition is indeed free, it only allows usage of the software in non-production environments. To use the software in production, [purchase the Camunda Self-Managed Enterprise Edition](https://camunda.com/platform/camunda-platform-enterprise-contact/). This licensing information also applies to web apps and APIs. - -### Connectors - -The Connector SDK, REST Connector, Connector Runtime Docker image, and Connectors Bundle Docker image are licensed under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). All other Connectors provided by Camunda out of the box are licensed to our users and customers under the same proprietary license as the one defined for Operate, Tasklist, Identity, and Optimize above. - -### Web Modeler - -Web Modeler is licensed under the proprietary Camunda Self-Managed Enterprise Edition license (a copy you obtain when you contact Camunda). - -### Desktop Modeler - -The source code of Desktop Modeler is licensed under the MIT license as stated in the [`LICENSE` file](https://github.com/camunda/camunda-modeler/blob/master/LICENSE) in the root of the source code repository. This file is also shipped as `LICENSE.camunda-modeler.txt` with each modeler distribution. - -### Camunda 8 documentation - -License information for our documentation can be found in the [LICENSE.txt](https://github.com/camunda/camunda-docs/blob/main/LICENSE.txt) of the Camunda 8 documentation repository. - -## Terms & conditions - -For information not covered by the above license links, visit our [Terms and Conditions](https://legal.camunda.com/licensing-and-other-legal-terms). diff --git a/versioned_docs/version-8.2/reference/notices.md b/versioned_docs/version-8.2/reference/notices.md deleted file mode 100644 index d779d8ddc98..00000000000 --- a/versioned_docs/version-8.2/reference/notices.md +++ /dev/null @@ -1,374 +0,0 @@ ---- -id: notices -title: "Security notices" -description: "Let's take a closer look at security notices, reporting vulnerabilities, and addiitonal security information." ---- - -## Security notices - -Camunda publishes security notices after fixes are available. - -### Notice 13 - -#### Publication date - -July 18th, 2024 - -#### Product affected - -Camunda Identity - -#### Impact - -The version of `Apache Tomcat` used by Camunda Identity was affected by the following vulnerability: - -- https://nvd.nist.gov/vuln/detail/CVE-2024-34750 - -#### How to determine if the installation is affected - -You are using Camunda Identity version 8.2.29 or previous. - -#### Solution - -Camunda has provided the following release which contains a fix: - -- [Camunda Identity 8.2.30](https://github.com/camunda-cloud/identity/releases/tag/8.2.30) - -### Notice 12 - -#### Publication date - -October 3rd, 2023 - -#### Product affected - -Camunda Desktop Modeler - -#### Impact - -The version of `libwebp` shipped with Camunda Desktop Modeler was affected by the following vulnerability: - -- https://nvd.nist.gov/vuln/detail/CVE-2023-4863 - -#### How to determine if the installation is affected - -You are using Camunda Desktop Modeler version 5.15.1 or previous. - -#### Solution - -Camunda has provided the following release which contains a fix: - -- [Camunda Desktop Modeler 5.15.2](https://downloads.camunda.cloud/release/camunda-modeler/5.15.2/) - -### Notice 11 - -#### Publication date - -April 17, 2023 - -#### Product affected - -Tasklist - -#### Impact - -The REST API functionality of Tasklist 8.2.0 and 8.2.1 allows unauthenticated access to the following methods/URLs: - -- GET /v1/tasks/{taskId} -- POST /v1/tasks/search -- POST /v1/tasks/{taskId}/variables/search -- POST /v1/forms/{formId} -- POST /v1/variables/{variableId} - -Find more information about the methods in our [Tasklist REST API documentation](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). - -Therefore, if you use Tasklist 8.2.0 or 8.2.1, and if you have sensible data stored in process variables (accessed by user tasks), this data could have been accessed by users knowing the endpoint of the Tasklist instance without authentication. - -#### How to determine if the installation is affected - -You are using Tasklist version 8.2.0 or 8.2.1. - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Tasklist 8.2.2](https://github.com/camunda/camunda-platform/releases/tag/8.2.2) - -### Notice 10 - -#### Publication Date: - -November 10th, 2022 - -#### Product affected: - -Tasklist - -#### Impact: - -The Tasklist Docker image contain an OpenSSL version 3.0.2 for which the following CVEs have been published: - -- https://nvd.nist.gov/vuln/detail/CVE-2022-3602 -- https://nvd.nist.gov/vuln/detail/CVE-2022-3786 - -At this point, Camunda is not aware of any specific attack vector in Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are Tasklist version (8.0.3 >= version <= 8.0.7) or <= 8.1.2 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Tasklist 8.1.3](https://github.com/camunda/camunda-platform/releases/tag/8.1.3) -- [Tasklist 8.0.8](https://github.com/camunda/camunda-platform/releases/tag/8.0.8) - -### Notice 9 - -#### Publication Date: - -April 11th, 2022 - -#### Product affected: - -Zeebe, Operate, Tasklist, IAM - -#### Impact: - -Zeebe, Operate, Tasklist and IAM are using the Spring framework for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2022-22965 - -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate, Tasklist or IAM allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.3.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.3.7) -- [Zeebe, Operate and Tasklist 1.2.12](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.12) - -### Notice 8 - -#### Publication Date: - -December 31th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44832. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.9](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.9) -- [Zeebe, Operate and Tasklist 1.1.10](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.10) - -### Notice 7 - -#### Publication Date: - -December 31th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44832. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.8 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.9](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.9) - -### Notice 6 - -#### Publication Date: - -December 22th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45105. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.8](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.8) -- [Zeebe, Operate and Tasklist 1.1.9](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.9) - -### Notice 5 - -#### Publication Date: - -December 22th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45105. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -IAM bundles logback libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-42550. -At this point, Camunda is not aware of any specific attack vector in IAM allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.7 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.8](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.8) - -### Notice 4 - -#### Publication Date: - -December 17th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45046. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.7) -- [Zeebe, Operate and Tasklist 1.1.8](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.8) - -### Notice 3 - -#### Publication Date: - -December 17th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-45046. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -IAM bundles logback libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-42550. -At this point, Camunda is not aware of any specific attack vector in IAM allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.6 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.7) - -### Notice 2 - -#### Publication Date: - -December 14th, 2021 - -#### Product affected: - -Zeebe, Operate, Tasklist - -#### Impact: - -Zeebe, Operate and Tasklist bundle log4j-core for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44228. -At this point, Camunda is not aware of any specific attack vector in Zeebe, Operate or Tasklist allowing attackers to exploit the vulnerability but recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [Zeebe, Operate and Tasklist 1.2.6](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.6) -- [Zeebe, Operate and Tasklist 1.1.7](https://github.com/camunda-cloud/zeebe/releases/tag/1.1.7) - -Apply the patches mentioned above or set the JVM option `-Dlog4j2.formatMsgNoLookups=true` - -### Notice 1 - -#### Publication Date: - -December 14th, 2021 - -#### Product affected: - -IAM - -#### Impact: - -IAM bundles log4j libraries for which the following CVE has been published: https://nvd.nist.gov/vuln/detail/CVE-2021-44228. -Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bundle the log4j-core library which contains the vulnerability referred to by the CVE. As a result, Camunda does not consider IAM to be affected by the vulnerability. - -Still, Camunda recommends applying fixes as mentioned in the Solution section below. - -#### How to determine if the installation is affected - -You are using IAM version <= 1.2.5 - -#### Solution - -Camunda has provided the following releases which contain a fix - -- [IAM 1.2.6](https://github.com/camunda-cloud/zeebe/releases/tag/1.2.6) - -## Report a vulnerability - -Please report security vulnerabilities to Camunda immediately. Please follow the steps on our [Camunda Security page](https://camunda.com/security#report-a-vulnerability) to report a vulnerability. - -## Additional security information - -For more information about security at Camunda, including our security policy, security issue management, and more, see [Camunda.com/security](https://camunda.com/security). diff --git a/versioned_docs/version-8.2/reference/overview.md b/versioned_docs/version-8.2/reference/overview.md deleted file mode 100644 index be9f327cf98..00000000000 --- a/versioned_docs/version-8.2/reference/overview.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -id: overview -title: Overview -sidebar_label: Overview -slug: /reference/ ---- - -This section contains general reference material for Camunda 8. - -## Support - -- [Announcements](announcements.md) - Stay up to date on versioning changes, end of maintenance updates, OpenSearch support, and other updates. -- [Release notes](release-notes.md) - Learn where you can find release notes for components of Camunda 8. -- [Service status](status.md) - Camunda 8 SaaS is a hosted service for the Camunda 8 stack that runs on the Google Cloud Platform (GCP). When availability changes, Camunda provides you with a current service status. -- [Supported environments](supported-environments.md) - Learn more about supported environments across your web browser, Desktop Modeler, clients, Camunda 8 Self-Managed, and the Camunda 7 and Optimize version matrix. -- [Dependencies & third party libraries](dependencies.md) - A complete list of all dependencies and third-party libraries for all the components of Camunda 8, including Self-Managed. -- [Alpha features](alpha-features.md) - Use alpha features to learn about upcoming changes, try them out, and share feedback. - -## Security and license information - -- [Licenses](licenses.md) - Take a closer look at licensing information for all components of Camunda 8. -- [Security notices](notices.md) - Learn about Camunda's security notices after fixes are available. - -:::note Release policy -Learn about the [Camunda release policy](release-policy.md) with some specific clarifications across provisioning in SaaS and Self-Managed. -::: - -## Additional resources - -- [Regions](regions.md) - When you create a cluster in Camunda 8 SaaS, specify a region for that cluster. -- [Usage metrics](usage-metrics.md) - Understand the three main usage metrics that have an impact on Camunda 8 pricing. -- [Glossary](glossary.md) - Have a closer look at common terminology used across Camunda 8 documentation. diff --git a/versioned_docs/version-8.2/reference/regions.md b/versioned_docs/version-8.2/reference/regions.md deleted file mode 100644 index c3104ed7238..00000000000 --- a/versioned_docs/version-8.2/reference/regions.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: regions -title: "Regions" -description: "After creating a cluster, specify a region for that cluster. Read on for details of Google Cloud Platform regions currently supported in Camunda 8 SaaS." ---- - -When you create a cluster in Camunda 8 SaaS, you must specify a region for that cluster. - -Currently, we make these regions available for customers on the Trial, Starter, and Enterprise Plans. Enterprise customers can discuss custom regions with their Customer Success Manager. - -:::note -Our Console and Web Modeler components are currently hosted in the EU. [Contact us](https://camunda.com/contact/) if you have additional questions. -::: - -Below, find a list of regions currently supported in Camunda 8 SaaS. - -## Available Google Cloud Platform (GCP) regions - -- Belgium, Europe (europe-west1) -- Iowa, North America (us-central1) -- London, Europe (europe-west2) -- South Carolina, North America (us-east1) -- Sydney, Australia (australia-southeast1) -- Toronto, North America (northamerica-northeast2) - -You can find the locations behind the region codes [on the Google page](https://cloud.google.com/about/locations). - -:::note -Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](https://camunda.com/contact/) as we are able to make additional regions available on request. -::: diff --git a/versioned_docs/version-8.2/reference/release-notes.md b/versioned_docs/version-8.2/reference/release-notes.md deleted file mode 100644 index 2885e38a4fc..00000000000 --- a/versioned_docs/version-8.2/reference/release-notes.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -id: release-notes -title: "Release notes" -description: "Release notes for Camunda 8 and its components." ---- - -Release notes for Camunda 8, including alphas, are available on [GitHub](https://github.com/camunda/camunda-platform/releases). This includes release assets and release notes for Zeebe, Operate, Tasklist, and Identity. - -The current release notes can be found [here](https://github.com/camunda/camunda-platform/releases/latest). - -[Update guides](/guides/update-guide/introduction.md) include links to both release notes and release blogs. - -Most releases and release notes are available through the [Camunda Platform repo](https://github.com/camunda/camunda-platform), however, the following component release notes are available as linked below: - -- [Desktop Modeler](https://github.com/camunda/camunda-modeler/releases) - -Some components have release notes that are not publicly available, including the following: - -- Web Modeler Self-Managed -- Optimize - -These release notes are available in a blog format under the [release notes](https://camunda.com/blog/category/releases/) tag. diff --git a/versioned_docs/version-8.2/reference/release-policy.md b/versioned_docs/version-8.2/reference/release-policy.md deleted file mode 100644 index fc32d16ee92..00000000000 --- a/versioned_docs/version-8.2/reference/release-policy.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -id: release-policy -title: "Release policy" -description: "Learn more about Camunda releases, including alpha features and alpha releases." ---- - -Camunda 8 follows the [Camunda release policy](https://camunda.com/release-policy/) with the following specific clarifications. - -:::info -You can find deprecation and support announcements on the [Announcements](announcements.md) page. -::: - -![Stable and alpha channels when provisioning a cluster](./img/diagram-releases.png) - -## Alpha features and releases - -It is important to understand the different ways the term "alpha" is used in the context of Camunda releases and features. - -### Alpha feature - -Refers to a feature or component released as an alpha version, in an early state for you to test and participate in development by sharing your feedback before the feature reaches [general availability (GA)](alpha-features.md#general-availability-ga). Some alpha features require turning on for your cluster before you can use them. See [alpha features](alpha-features.md). - -### Alpha release - -Refers to a release made available between minor versions that allows you to preview an upcoming minor version and the alpha features included (for example, `8.2.0-alpha1`, `8.2.0-alpha2`, and so on). Camunda strives to release this type of release on a monthly basis. To learn more about the alpha features included in each alpha release, see [release notes](release-notes.md). - -:::note - -- An alpha release can also be made available where the entire version is an alpha with [alpha limitations](alpha-features.md#alpha). -- Additionally, "Alpha channel" refers to the channel you can use when provisioning a SaaS cluster. See [alpha channel](#alpha-channel). - -::: - -## SaaS provisioning - -In Camunda 8 SaaS we differentiate between components that are part of a Camunda 8 cluster (cluster components), and components outside the cluster (non-cluster components). - -### Cluster components - -A cluster typically consists of the following components: - -- [Zeebe](/components/zeebe/zeebe-overview.md) -- [Operate](/components/operate/operate-introduction.md) -- [Tasklist](/components/tasklist/introduction-to-tasklist.md) -- [Optimize]($optimize$/components/what-is-optimize) - -You can provision cluster components using one of two channels, following the [Camunda release policy](https://camunda.com/release-policy/). - -![Stable and alpha channels when provisioning a cluster](./img/channels.png) - -#### Stable channel - -You can use the **Stable** channel to access [general availability](alpha-features.md#general-availability-ga) features for cluster components. - -- Provides the latest feature and patch releases ready for most users at minimal risk. -- Releases follow semantic versioning and can be updated to the next minor or patch release without data loss. -- On the stable channel, all supported minor versions are made available for provisioning. - -#### Alpha channel - -You can use the **Alpha** channel to access [alpha features](alpha-features.md) and patch releases for cluster components. - -- Provides alpha releases to preview and prepare for the next stable release. -- Alpha releases provide a short-term stability point to test new features and give feedback before they are released to the stable channel. Use an alpha release to test the upcoming minor release with your infrastructure. -- Alpha releases cannot be updated to a newer release, and so are not suitable for use in production. - -### Non-cluster components - -Non-cluster components include: - -- [Modeler (Web)](/components/modeler/web-modeler/launch-web-modeler.md) -- [Connectors](/components/console/introduction-to-console.md) -- [Console](/components/console/introduction-to-console.md) - -Non-cluster component versions are released continuously. - -- Customers are automatically updated to the latest component version when it is ready for release. -- Admins can [enable alpha features](/components/console/manage-organization/enable-alpha-features.md) for non-cluster components in organization settings. - -### New Camunda 8 versions - -When a new Camunda 8 version is released, we try to provide the new version on our managed service at the same time. - -An **Update available** notification is shown in Console, recommending that you update to the latest version. - -![Console with notice to update the cluster in Camunda 8 SaaS](img/update-console.png) - -#### Generation names - -As of Camunda 8.5.0, the generation naming scheme in Camunda 8 SaaS changed to no longer include the patch version. - -- The naming scheme used for the Camunda 8.5 generations is `Camunda .+gen`, where `N` is incremented with every atomic change to the component version set. - -- This decouples the generation name from the particular patch level of the components it contains, as some component versions such as Connectors are decoupled from other components. - -- You can learn about the particular component patch version changes in the update dialogue to the latest generation available. - -#### Update or restart for critical issues - -In our managed service, Camunda reserves the right to force update or restart a cluster immediately and without notice in advance if there is a critical security or stability issue. - -## Self-Managed - -When a new Camunda 8 version is released, Camunda 8 Self-Managed enterprise customers are notified via email. - -:::info -Non-enterprise customers can stay up to date via [release blogs](https://camunda.com/blog/category/releases/), [announcements](announcements.md), or releases on [GitHub](https://github.com/camunda) and [Docker Hub](https://hub.docker.com/u/camunda). -::: - -### Helm chart - -The [Camunda 8 Self-Managed Helm chart](https://artifacthub.io/packages/helm/camunda/camunda-platform) version is coupled with the applications version (e.g., chart version is 8.2.x which follows the applications main version 8.2.x). - -For more details about the applications version included in the Helm chart, review the [full version matrix](https://helm.camunda.io/camunda-platform/version-matrix/). - -### New versions - -If you are running Camunda 8 Self-Managed, follow our [update guide](/guides/update-guide/introduction.md). diff --git a/versioned_docs/version-8.2/reference/status.md b/versioned_docs/version-8.2/reference/status.md deleted file mode 100644 index c8de779c5d2..00000000000 --- a/versioned_docs/version-8.2/reference/status.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: status -title: "Camunda 8 SaaS status" -description: "Camunda 8 SaaS is a hosted service for the Camunda 8 stack that runs on the Google Cloud Platform (GCP)." ---- - -Camunda 8 SaaS is a hosted service for the Camunda 8 stack that runs on the Google Cloud Platform (GCP). Like any service, it might occasionally undergo availability changes. When availability changes, Camunda makes sure to provide you with a current service status. - -To see current and past service availability, visit [Camunda 8 SaaS Status](https://status.camunda.io). - -## Subscribe to updates - -Don’t want to check the service status page manually? Get notified about changes to the service status automatically. - -To receive service status updates: - -1. Go to the [Camunda 8 SaaS Status](https://status.camunda.io) page and click **SUBSCRIBE TO UPDATES**. -1. Select **Atom and RSS feeds**. -1. Add the feed URL to your favourite Atom/RSS reader. -1. After you subscribe to updates, you are notified whenever a service status update is posted. - -## Support - -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/contact). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). diff --git a/versioned_docs/version-8.2/reference/supported-environments.md b/versioned_docs/version-8.2/reference/supported-environments.md deleted file mode 100644 index 3a839317587..00000000000 --- a/versioned_docs/version-8.2/reference/supported-environments.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -id: supported-environments -title: "Supported environments" -description: "Find out where to run Camunda 8 components for SaaS and Self-Managed, including Optimize for both Camunda 8 and Camunda 7." ---- - -:::note -The versions listed on this page are the minimum version required if appended with a `+`. - -Pay attention to where the `+` falls, as most of our dependencies follow [semantic versioning](https://semver.org/) (semver), where `x.y.z` correspond to MAJOR.MINOR.PATCH. Higher or more recent versions will be compatible with Camunda, with respect to semver. - -For example, 1.2+ means support for the minor version 2, and any higher minors (1.3, 1.4, etc.) and patches (1.2.1, 1.2.2, etc.), but not majors, like 2.x. - -::: - -## Web Browser - -- Google Chrome latest [recommended] -- Mozilla Firefox latest -- Microsoft Edge latest - -## Desktop Modeler - -- Windows 7 / 10 -- Mac OS X 10.11 -- Ubuntu LTS (latest) - -## Clients - -- **Zeebe Java Client**: OpenJDK 8+ -- **Zeebe Go Client**: Go 1.13+ -- **zbctl**: Windows, MacOS, and Linux (latest) -- **Helm CLI**: 3.12.x (for the exact version, check the [version matrix](https://helm.camunda.io/camunda-platform/version-matrix/)) - -## Camunda 8 Self-Managed - -We recommend running Camunda 8 Self-Managed in a Kubernetes environment. We provide officially supported [Helm Charts](/self-managed/platform-deployment/helm-kubernetes/overview.md) for this. Please follow the [Installation Guide](/self-managed/platform-deployment/overview.md) to learn more about installation possibilities. - -Requirements for the components can be seen below: - -| Component | Java version | Other requirements | -| ----------- | ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Zeebe | OpenJDK 17+ | Elasticsearch 7.16.x, 7.17.x (only if Elastic exporter is used), 8.5.x, 8.6.x, AWS OpenSearch 1.3.x (only if OpenSearch exporter is used)\* | -| Operate | OpenJDK 17+ | Elasticsearch 7.16.x, 7.17.x, 8.5.x, 8.6.x, AWS OpenSearch 1.3.x\* | -| Tasklist | OpenJDK 17+ | Elasticsearch 7.16.x, 7.17.x, 8.5.x, 8.6.x | -| Identity | OpenJDK 17+ | Keycloak 16.1.x, 18.x, 19.x, 21.x
    PostgreSQL 14.x, 15.x | -| Optimize | OpenJDK 11+ | Elasticsearch 7.16.x, 7.17.x, 8.5.x, 8.6.x, Identity 8.2.x | -| Web Modeler | - | Keycloak 16.1.x, 18.x, 19.x, 21.x
    PostgreSQL 13.x, 14.x, 15.x, 16.x, Amazon Aurora PostgreSQL 13.x, 14.x, 15x., 16.x (other database systems are currently not supported) | - -:::note Elasticsearch support -Camunda 8 works with the [default distribution](https://www.elastic.co/downloads/elasticsearch) of Elasticsearch. -::: - -:::note Amazon OpenSearch 1.3 Support -To use Amazon OpenSearch, the relevant OpenSearch image must be downloaded from [DockerHub](/self-managed/platform-deployment/docker.md#docker-images). -::: - -### Helm chart version matrix - -The core Camunda components have a unified fixed release schedule following the [release policy](./release-policy.md). However, some of the applications have their own schedule. The following compatibility matrix gives an overview of the different versions with respect to the Helm chart versions. - -For more details about the applications version included in the Helm chart, review the [full version matrix](https://helm.camunda.io/camunda-platform/version-matrix/). - -### Applications version matrix - -This overview shows which Zeebe version works with which Modeler, Operate, Tasklist and Optimize: - -| Design | Automate | | Improve | form-js | -| --------------------- | ----------- | ------------------------------------------------------------- | --------------- | ------- | -| Desktop Modeler 4.7+ | Zeebe 1.0.x | Operate 1.0.x Tasklist 1.0.x | - | 0.0.1 | -| Desktop Modeler 4.9+ | Zeebe 1.1.x | Operate 1.1.x Tasklist 1.1.x | - | 0.1.x | -| Desktop Modeler 4.11+ | Zeebe 1.2.x | Operate 1.2.x Tasklist 1.2.x IAM 1.2.x | - | 0.1.x | -| Desktop Modeler 4.12+ | Zeebe 1.3.x | Operate 1.3.x Tasklist 1.3.x IAM 1.3.x | Optimize 3.7.x | 0.1.x | -| Desktop Modeler 5.0+ | Zeebe 8.0.x | Operate 8.0.x Tasklist 8.0.x Identity 8.0.x | Optimize 3.8.x | 0.2.x | -| Desktop Modeler 5.4+ | Zeebe 8.1.x | Operate 8.1.x Tasklist 8.1.x Identity 8.1.x Connectors 0.23.0 | Optimize 3.9.x | 0.8.x | -| Desktop Modeler 5.10+ | Zeebe 8.2.x | Operate 8.2.x Tasklist 8.2.x Identity 8.2.x Connectors 0.23.2 | Optimize 3.10.x | 0.14.x | -| Web Modeler 8.2.x | Zeebe 8.2.x | Operate 8.2.x Tasklist 8.2.x Identity 8.2.x Connectors 0.23.2 | Optimize 3.10.x | 0.14.x | - -:::note -You can also use newer versions of Desktop and Web Modeler with older Zeebe versions. -::: - -## Camunda 7 & Optimize Version Matrix - -See https://docs.camunda.org/enterprise/download/#camunda-optimize. diff --git a/versioned_docs/version-8.2/reference/usage-metrics.md b/versioned_docs/version-8.2/reference/usage-metrics.md deleted file mode 100644 index 9d2fbeecd58..00000000000 --- a/versioned_docs/version-8.2/reference/usage-metrics.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: usage-metrics -title: "Usage metrics" -description: "Learn about the three main usage metrics that impact Camunda 8 pricing." ---- - -There are three main usage metrics that have an impact on Camunda 8 pricing. It is important to understand these definitions, their impact on billing, and how to retrieve them. - -## Definition of metrics - -### Root process instance - -The number of **root process instance** executions started. This is also known as process instances (PI). A **root process instance** has no parent process instance, i.e. it is a top-level execution. - -### Decision instance - -The number of evaluated **decision instances** (DI). A **decision instance** is a [DMN decision table](/components/modeler/dmn/decision-table.md) or a [DMN literal expression](/components/modeler/dmn/decision-literal-expression.md). In a Decision Requirements Diagram (DRD) each evaluated decision table or expression is counted separately. - -### Task user - -The number of **task users** (TU) that have served as assignees. - -## Retrieve metrics in SaaS - -On Camunda 8 SaaS an **Owner** or **Admin** of an organization can retrieve the information from the **Billing** page. - -You can access the **Billing** page by selecting **Organization Management** in the Camunda Console navigation bar. - - - -## Retrieve metrics on Self-Managed - -:::caution Important note for Enterprise users - -Some Enterprise agreements require the following indices from Elasticsearch for at least 18 months: - -For Operate and Tasklist, the metrics are stored in the `operate-metric-1.0.0_` and `tasklist-metric-1.0.0_` indices respectively. -::: - -On Camunda 8 Self-Managed, you can get the usage metrics in Operate and Tasklist. - -For **root process instances** and **decision instances**, follow the steps provided in the [Operate guide](/self-managed/operate-deployment/usage-metrics.md). - -For **task users**, follow the steps provided in the [Tasklist guide](/self-managed/tasklist-deployment/usage-metrics.md). - -:::note -If you are not running Tasklist or Operate, there is currently no way to retrieve usage metrics until future releases. Regardless, the metrics still need to be factored into any enterprise agreement and count towards any task user pricing. -::: diff --git a/versioned_docs/version-8.2/self-managed/about-self-managed.md b/versioned_docs/version-8.2/self-managed/about-self-managed.md deleted file mode 100644 index 38a5b97dbc3..00000000000 --- a/versioned_docs/version-8.2/self-managed/about-self-managed.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -id: about-self-managed -title: "Camunda 8 Self-Managed" -description: "Camunda 8 Self-Managed is a self-hosted alternative to using Camunda 8 SaaS." ---- - -import Components from './react-components/components.md' - -:::note - -Camunda 8 Self-Managed is not Camunda 7. [Find Camunda 7 documentation here](https://docs.camunda.org). - -::: - -Camunda 8 Self-Managed is a self-hosted alternative to using Camunda 8 SaaS. Building process automation solutions with Camunda 8 works similarly regardless of hosting and deployment. - -For more information on Camunda 8 SaaS, visit [What is Camunda 8?](../components/concepts/what-is-camunda-8.md) If you are new to Camunda 8, we recommend you start your journey with [Camunda 8 SaaS-based guides](../../guides/). - -The content in this section of the documentation includes: - -- Everything you need to download, configure, and work with each component of Camunda 8 Self-Managed. -- Features specific to Camunda 8 Self-Managed. - -To get started with your Self-Managed installation, see our [installation overview](./platform-deployment/overview.md). - -## Components - - - -## Architecture - -Camunda 8 Self-Managed consists of multiple web applications and gRPC services. The following example architecture diagram uses two [Ingress objects](./platform-deployment/helm-kubernetes/guides/ingress-setup.md#combined-ingress-setup) to access these services externally: an Ingress with HTTP(S) protocol for all web applications using a single domain, and another Ingress with gRPC protocol for the Zeebe workflow engine. - -![Camunda 8 Self-Managed Architecture Diagram - Combined Ingress](./assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) - -In this configuration, Camunda 8 Self-Managed can be accessed as follows: - -- Identity, Operate, Optimize, Tasklist, Modeler: `https://camunda.example.com/[identity|operate|optimize|tasklist|modeler]` - - Web Modeler also exposes a WebSocket endpoint on `https://camunda.example.com/modeler-ws`. This is only used by the application itself and not supposed to be accessed by users directly. -- Keycloak authentication: `https://camunda.example.com/auth` -- Zeebe gateway: `grpc://zeebe.camunda.example.com` diff --git a/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png b/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png deleted file mode 100644 index f485d0bec4f..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-architecture-diagram-separated-ingress.png b/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-architecture-diagram-separated-ingress.png deleted file mode 100644 index 28a172341c0..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-architecture-diagram-separated-ingress.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-deployment-diagram-high-availability-mode.png b/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-deployment-diagram-high-availability-mode.png deleted file mode 100644 index 8fa4f15eb13..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/assets/camunda-platform-8-self-managed-deployment-diagram-high-availability-mode.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/backup-restore/backup-and-restore.md b/versioned_docs/version-8.2/self-managed/backup-restore/backup-and-restore.md deleted file mode 100644 index c8b4b0106eb..00000000000 --- a/versioned_docs/version-8.2/self-managed/backup-restore/backup-and-restore.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: backup-and-restore -title: "Backup and restore" -sidebar_label: "Backup and restore" -keywords: ["backup", "backups"] ---- - -:::note -This release introduces breaking changes for [Operate and Tasklist](./operate-tasklist-backup.md), as well as [Optimize](./optimize-backup.md). -::: - -You can use the backup feature of Camunda 8 Self-Managed to regularly back up the state of all of its components (Zeebe, Operate, Tasklist, and Optimize) without any downtime (except Web Modeler, see [the Web Modeler backup and restore documentation](./modeler-backup-and-restore.md)). - -A backup of a Camunda 8 cluster consists of a backup of Zeebe, Operate, Tasklist, Optimize, and exported Zeebe records in Elasticsearch. Since the data of these applications are dependent on each other, it is important that the backup is consistent across all components. The backups of individual components taken independently may not form a consistent recovery point. Therefore, you must take the backup of a Camunda 8 cluster as a whole. To ensure a consistent backup, follow the process described below. - -### Configure backup store - -To take backups, you must first configure backup storage. - -Operate, Tasklist, and Optimize use Elasticsearch as backend storage and use the snapshot feature of Elasticsearch for backing up their state. Therefore, you must configure a [snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html) in Elasticsearch. - -Zeebe stores its backup to an external storage and must be configured before the cluster is started. Refer to [Zeebe backup configuration](/self-managed/backup-restore/zeebe-backup-and-restore.md#configuration) for additional information. - -### Backup process - -The backup of each component and the backup of a Camunda 8 cluster is identified by an id. This means a backup `x` of Camunda 8 consists of backup `x` of Zeebe, backup `x` of Optimize, backup `x` of Operate, and backup `x` of Tasklist. The backup id must be an integer and greater than the previous backups. - -:::note -We recommend using the timestamp as the backup id. -::: - -To back up a Camunda 8 cluster, execute the following sequential steps: - -1. Trigger a backup `x` of Optimize. See [how to take an Optimize backup](/self-managed/backup-restore/optimize-backup.md). -2. Trigger a backup `x` of Operate. See [how to take an Operate backup](/self-managed/backup-restore/operate-tasklist-backup.md). -3. Trigger a backup `x` of Tasklist. See [how to take a Tasklist backup](/self-managed/backup-restore/operate-tasklist-backup.md). -4. Wait until the backup `x` of Optimize is complete. See [how to monitor an Optimize backup](/self-managed/backup-restore/optimize-backup.md). -5. Wait until the backup `x` of Operate is complete. See [how to monitor an Operate backup](/self-managed/backup-restore/operate-tasklist-backup.md). -6. Wait until the backup `x` of Tasklist is complete. See [how to monitor a Tasklist backup](/self-managed/backup-restore/operate-tasklist-backup.md). -7. Soft pause exporting in Zeebe (this feature is only available from 8.2.27, otherwise use Pause exporting). See [Zeebe management API](/self-managed/zeebe-deployment/operations/management-api.md). -8. Take a backup `x` of the exported Zeebe records in Elasticsearch using the Elasticsearch Snapshots API. - -``` - -PUT /_snapshot/my_repository/camunda_zeebe_records_backup_x -{ - "indices": "zeebe-record*", - "feature_states": ["none"] -} - -``` - -By default, the indices are prefixed with `zeebe-record`. If you have configured a different prefix when configuring Elasticsearch exporter in Zeebe, use this instead. - -9. Wait until the backup `x` of the exported Zeebe records is complete before proceeding. - Take a backup `x` of Zeebe. See [how to take a Zeebe backup](/self-managed/backup-restore/zeebe-backup-and-restore.md). -10. Wait until the backup `x` of Zeebe is completed before proceeding. See [how to monitor a Zeebe backup](/self-managed/backup-restore/zeebe-backup-and-restore.md). - Resume exporting in Zeebe. See [Zeebe management API](/self-managed/zeebe-deployment/operations/management-api.md). - -:::note -If any of the steps above fail, you may have to restart with a new backup id. Ensure exporting is resumed if the backup process is canceled in the middle of the process. -::: - -### Restore - -To restore a Camunda 8 cluster from a backup, all components must be restored from their backup corresponding to the same backup id: - -1. Start Zeebe, Operate, Tasklist, and Optimize. (To ensure templates/aliases etc. are created) -2. Confirm proper configuration (such as shards, replicas count, etc.) -3. Stop Operate, Tasklist, and Optimize. -4. Delete all indices. -5. Restore the state of [Operate](/self-managed/backup-restore/operate-tasklist-backup.md), [Tasklist](/self-managed/backup-restore/operate-tasklist-backup.md), and [Optimize](/self-managed/backup-restore/optimize-backup.md). -6. Restore `zeebe-records*` indices from Elasticsearch snapshot. -7. Restore [Zeebe](self-managed/backup-restore/zeebe-backup-and-restore.md). -8. Start Zeebe, Operate, Tasklist, and Optimize. diff --git a/versioned_docs/version-8.2/self-managed/backup-restore/modeler-backup-and-restore.md b/versioned_docs/version-8.2/self-managed/backup-restore/modeler-backup-and-restore.md deleted file mode 100644 index 2663d4f8429..00000000000 --- a/versioned_docs/version-8.2/self-managed/backup-restore/modeler-backup-and-restore.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: modeler-backup-and-restore -title: Backup and restore Web Modeler data -description: "How to perform a backup and restore of Web Modeler data." -keywords: ["backup", "backups"] ---- - -## Create backup - -To create a backup of Web Modeler data, you must back up the database that Web Modeler uses by following the instructions of the official [PostgreSQL documentation](https://www.postgresql.org/docs/current/backup-dump.html). - -For example, to create a backup of the database using `pg_dumpall`, use the following command: - -```bash -pg_dumpall -U -h -p -f dump.psql --quote-all-identifiers -Password: -``` - -`pg_dumpall` may ask multiple times for the same password. -The database will be dumped into `dump.psql`. - -:::note -Database dumps created with `pg_dumpall`/`pg_dump` can only be restored into a database with the same or later version of PostgreSQL, see [PostgreSQL documentation](https://www.postgresql.org/docs/current/app-pgdump.html#PG-DUMP-NOTES). -::: - -## Restore - -Backups can only be restored with downtime. -To restore the database dump, first ensure that Web Modeler is stopped. -Then, to restore the database use the following command: - -```bash -psql -U -h -p -f dump.psql -``` - -After the database has been restored, you can start Web Modeler again. - -:::warning -When restoring Web Modeler data from a backup, ensure that the ids of the users stored in your OIDC provider (e.g. Keycloak) do not change in between the backup and restore. -Otherwise, users may not be able to log in after the restore (see [Web Modeler's login troubleshooting guide](self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login.md#unique-constraint-violation)). -::: - -:::tip -Some vendors provide tools that help with database backups and restores, such as [AWS Backup](https://aws.amazon.com/getting-started/hands-on/amazon-rds-backup-restore-using-aws-backup/) or [Cloud SQL backups](https://cloud.google.com/sql/docs/postgres/backup-recovery/backups). -::: diff --git a/versioned_docs/version-8.2/self-managed/backup-restore/operate-tasklist-backup.md b/versioned_docs/version-8.2/self-managed/backup-restore/operate-tasklist-backup.md deleted file mode 100644 index d6a74323f86..00000000000 --- a/versioned_docs/version-8.2/self-managed/backup-restore/operate-tasklist-backup.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -id: operate-tasklist-backup -title: Backup and restore Operate and Tasklist data -description: "How to perform a backup and restore of Operate and Tasklist data." -keywords: ["backup", "backups"] ---- - -:::note -This release introduces breaking changes, including: - -- The [get backup state API and response codes](#get-backup-state-api). -- The utilized URL has changed. For example, `curl 'http://localhost:8080/actuator/backups'` rather than the previously used `backup`. -- `backupId` must be of integer type now instead of string, which is in sync with Zeebe `backupId` requirements. - -::: - -Operate stores its data over multiple indices in Elasticsearch. Backup of Operate data includes several -Elasticsearch snapshots containing sets of Operate indices. Each backup is identified by `backupId`. For example, a backup with an id of `123` may contain the following Elasticsearch snapshots: - -``` -camunda_operate_123_8.1.0_part_1_of_6 -camunda_operate_123_8.1.0_part_2_of_6 -camunda_operate_123_8.1.0_part_3_of_6 -camunda_operate_123_8.1.0_part_4_of_6 -camunda_operate_123_8.1.0_part_5_of_6 -camunda_operate_123_8.1.0_part_6_of_6 -``` - -Operate provides an API to perform a backup and manage backups (list, check state, delete). Restore a backup using the standard Elasticsearch API. - -:::note -The backup API can be reached via the Actuator management port, which by default is the same as application HTTP port (and in turn defaults to 8080). The port may be reconfigured with the help of `management.server.port` configuration parameter. -::: - -## Prerequisites - -Before you can use the backup and restore feature: - -1. The Elasticsearch repository must be configured. -2. Operate and Tasklist must be configured with the repository name using the following configuration parameters: - -```yaml -for Operate: -camunda.operate: backup.repositoryName= - -for Tasklist: -camunda.tasklist: backup.repositoryName= -``` - -or with environmental variables: - -``` -for Operate: -CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= - -for Tasklist: -CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME= - -``` - -## Create backup API - -During backup creation Operate can continue running. To create the backup, call the following endpoint: - -``` -POST actuator/backups -{ - "backupId": -} -``` - -Response: - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 200 OK | Backup was successfully started, snapshots will be created asynchronously. List of snapshots is returned in the response body (see example below). This list must be persisted together with the backup id to be able to restore it later. | -| 400 Bad Request | In case something is wrong with `backupId`, e.g. the same backup id already exists. | -| 500 Server Error | All other errors, e.g. ES returned error response when attempting to create a snapshot. | -| 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | - -Example request: - -``` -curl --request POST 'http://localhost:8080/actuator/backups' \ --H 'Content-Type: application/json' \ --d '{ "backupId": 123 }' -``` - -Example response: - -```json -{ - "scheduledSnapshots": [ - "camunda_operate_123_8.2.0_part_1_of_6", - "camunda_operate_123_8.2.0_part_2_of_6", - "camunda_operate_123_8.2.0_part_3_of_6", - "camunda_operate_123_8.2.0_part_4_of_6", - "camunda_operate_123_8.2.0_part_5_of_6", - "camunda_operate_123_8.2.0_part_6_of_6" - ] -} -``` - -## Get backup state API - -As a backup is created asynchronously, call the following endpoint to check the state of the backup: - -``` -GET actuator/backups/{backupId} -``` - -Response: - -| Code | Description | -| ---------------- | --------------------------------------------------------------------------------------- | -| 200 OK | Backup state could be determined and is returned in the response body. | -| 404 Not Found | Backup with given id does not exist. | -| 500 Server Error | All other errors, e.g. ES returned error response when attempting to execute the query. | -| 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | - -For example, the request could look like this: - -``` -curl 'http://localhost:8080/actuator/backups/123' -``` - -Example response: - -```json -{ - "backupId": 123, - "state": "COMPLETED", - "failureReason": null, - "details": [ - //here goes the list of all Elasticsearch snapshots included in the backup - { - "snapshotName": "camunda_operate_123_8.2.0_part_1_of_6", - "state": "SUCCESS", - "startTime": "2023-01-01T10:10:10.100+0000", - "failures": [] - }, - <..> - ] -} -``` - -Possible **states** of the backup: - -- `COMPLETED`: Backup can be used for restoring the data. -- `IN_PROGRESS`: Wait until the backup completes to use it for restore. -- `FAILED`: Something went wrong when creating this backup. To find out the exact problem, use the [Elasticsearch get snapshot status API](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-status-api.html) for each of the snapshots included in the given backup. -- `INCOMPATIBLE`: Backup is incompatible with the current Elasticsearch version. -- `INCOMPLETE`: Backup is incomplete (e.g. when backup process was interrupted). - -**State** of the individual snapshot is a copy of [Elasticsearch state](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/get-snapshot-api.html#get-snapshot-api-response-state). - -## Get backups list API - -To get the list of existing backups, the following endpoint can be used: - -``` -GET actuator/backups -``` - -Response: - -| Code | Description | -| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -| 200 OK | Backup list could be determined and is returned in the response body. Can be an empty response in case no backups were created yet. | -| 404 Not Found | Backup repository is not configured. | -| 500 Server Error | All other errors, e.g. ES returned error response when attempting to execute the query. | -| 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | - -For example, the request could look like this: - -``` -curl 'http://localhost:8080/actuator/backups' -``` - -Response will contain JSON with array of objects representing state of each backup (see [get backup state API endpoint](#get-backup-state-api)). - -## Delete backup API - -To delete all the Elasticsearch snapshots associated with the specific backup id, the following endpoint may be used: - -``` -DELETE actuator/backups/123 -``` - -Response: - -| Code | Description | -| ---------------- | ---------------------------------------------------------------------------------------------------------------------------- | -| 204 No Content | All commands to delete corresponding ELS snapshots were successfully sent to ELS. ELS will continue deletion asynchronously. | -| 404 Not Found | Not a single snapshot corresponding to given ID exist. | -| 500 Server Error | All other errors, e.g. ES returned error response when attempting to execute the query. | -| 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | - -## Restore backup - -There is no Operate API to preform the backup restore. Instead, use the [Elasticsearch restore snapshot API](https://www.elastic.co/guide/en/elasticsearch/reference/current/restore-snapshot-api.html). - -:::note -Operate must **not** be running while a backup restore is taking place. -::: - -To restore the backup with a known backup id, you must restore all the snapshots this backup contains (check the response of the [create backup API](#create-backup-api)). - -Example of Elasticsearch query: - -``` -curl --request POST `http://localhost:9200/_snapshot/test/camunda_operate_123_8.1.0-snapshot_part_1_of_6/_restore?wait_for_completion=true` -``` - -To summarize, the process may look as follows: - -1. Stop Operate. -2. Ensure there are no Operate indices present in Elasticsearch (otherwise the restore process will fail). -3. Iterate over all Elasticsearch snapshots included in the desired backup and restore them using the Elasticsearch restore snapshot API. -4. Start Operate. - -## Backup and restore of Tasklist data - -Backup and restore of Tasklist may be performed in exactly the same way as [Operate data](#). diff --git a/versioned_docs/version-8.2/self-managed/backup-restore/optimize-backup.md b/versioned_docs/version-8.2/self-managed/backup-restore/optimize-backup.md deleted file mode 100644 index 6fa4f716420..00000000000 --- a/versioned_docs/version-8.2/self-managed/backup-restore/optimize-backup.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -id: optimize-backup -title: Backup and restore Optimize data -description: "How to perform a backup of Optimize data and restore the backup." -keywords: ["backup", "backups"] ---- - -:::note -This release introduces breaking changes, including the utilized URL. - -For example, `curl 'http://localhost:8080/actuator/backups'` rather than the previously used `backup`. -::: - -Optimize stores its data over multiple indices in Elasticsearch. To ensure data integrity across indices, a backup of Optimize data consists of two Elasticsearch snapshots, each containing a different set of Optimize indices. Each backup is identified by a positive integer backup ID. For example, a backup with ID `123456` consists of the following Elasticsearch snapshots: - -``` -camunda_optimize_123456_3.9.0_part_1_of_2 -camunda_optimize_123456_3.9.0_part_2_of_2 -``` - -Optimize provides an API to trigger a backup and retrieve information about a given backup's state. During backup creation Optimize can continue running. The backed up data can later be restored using the standard Elasticsearch snapshot restore API. - -## Prerequisites - -The following prerequisites must be set up before using the backup API: - -1. A snapshot repository of your choice must be registered with Elasticsearch. -2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize configuration: - -```yaml -backup: - repositoryName: -``` - -## Create backup API - -Note that the backup API can be reached via the `/actuator` management port, which by default is `8092`. -The following endpoint can be used to trigger the backup process: - -``` -POST actuator/backups -{ - "backupId": -} -``` - -### Response - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| 202 Accepted | Backup process was successfully initiated. To determine whether backup process was completed refer to the GET API. | -| 400 Bad Request | Indicates issues with the request, for example when the `backupId` contains invalid characters. | -| 409 Conflict | Indicates that a backup with the same `backupId` already exists. | -| 500 Server Error | All other errors, e.g. issues communicating with Elasticsearch for snapshot creation. Refer to the returned error message for more details. | -| 502 Bad Gateway | Optimize has encountered issues while trying to connect to Elasticsearch. | - -### Example request - -``` -curl --request POST 'http://localhost:8092/actuator/backups' \ --H 'Content-Type: application/json' \ --d '{ "backupId": 123456 }' -``` - -### Example response - -```json -{ - "message": "Backup creation for ID 123456 has been scheduled. Use the GET API to monitor completion of backup process" -} -``` - -## Get backup info API - -Note that the backup API can be reached via the `/actuator` management port, which by default is `8092`. -Information about a specific backup can be retrieved using the following request: - -``` -GET actuator/backups/{backupId} -``` - -Information about all existing Optimize backups can be retrieved by omitting the optional `backupId` parameter: - -``` -GET actuator/backup -``` - -### Response - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 200 OK | Backup state could be determined and is returned in the response body (see example below). | -| 400 Bad Request | There is an issue with the request, for example the repository name specified in the Optimize configuration does not exist. Refer to returned error message for details. | -| 404 Not Found | If a backup ID was specified, no backup with that ID exists. | -| 500 Server Error | All other errors, e.g. issues communicating with Elasticsearch for snapshot state retrieval. Refer to the returned error message for more details. | -| 502 Bad Gateway | Optimize has encountered issues while trying to connect to Elasticsearch. | - -### Example request - -``` -curl ---request GET 'http://localhost:8092/actuator/backups/123456' -``` - -### Example response - -```json - { - "backupId": 123456, - "failureReason": null, - "state": "COMPLETE", - “details”: [ - { - "snapshotName": "camunda_optimize_123456_3.10.0_part_1_of_2", - "state": "SUCCESS", - "startTime": "2022-11-09T10:11:36.978+0100", - "failures": [] - }, - { - "snapshotName": "camunda_optimize_123456_3.10.0_part_2_of_2", - "state": "SUCCESS", - "startTime": "2022-11-09T10:11:37.178+0100", - "failures": [] - } - ] - } -``` - -Note that the endpoint will return a single item when called with a `backupId` and a list of items when called without specifying a `backupId`. - -Possible states of the backup: - -- `COMPLETE`: The backup can be used for restoring data. -- `IN_PROGRESS`: The backup process for this backup ID is still in progress. -- `FAILED`: Something went wrong when creating this backup. To find out the exact problem, use the [Elasticsearch get snapshot status API](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/get-snapshot-status-api.html) for each of the snapshots included in the given backup. -- `INCOMPATIBLE`: The backup is incompatible with the current Elasticsearch version. -- `INCOMPLETE`: The backup is incomplete (this could occur when the backup process was interrupted or individual snapshots were deleted). - -## Delete backup API - -Note that the backup API can be reached via the `/actuator` management port, which by default is `8092`. -An existing backup can be deleted using the below API which deletes all Optimize snapshots associated with the supplied backupID. - -``` -DELETE actuator/backups/{backupId} -``` - -### Response - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 204 No Content | The delete request for the associated snapshots was submitted to Elasticsearch successfully. | -| 400 Bad Request | There is an issue with the request, for example the repository name specified in the Optimize configuration does not exist. Refer to returned error message for details. | -| 500 Server Error | An error occurred, for example the snapshot repository does not exist. Refer to the returned error message for details. | -| 502 Bad Gateway | Optimize has encountered issues while trying to connect to Elasticsearch. | - -### Example request - -``` -curl ---request DELETE 'http://localhost:8092/actuator/backups/123456' -``` - -## Restore backup - -There is no Optimize API to perform the backup restore. Instead, the standard [Elasticsearch restore snapshot API](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/restore-snapshot-api.html) can be used. Note that the Optimize versions of your backup snapshots must match the currently running version of Optimize. You can identify the version at which the backup was taken by the version tag included in respective snapshot names; for example, a snapshot with the name`camunda_optimize_123456_3.9.0_part_1_of_2` was taken of Optimize version `3.9.0`. - -:::note -Optimize must NOT be running while a backup is being restored. -::: - -To restore an existing backup, all the snapshots this backup contains (as listed in the response of the [create backup API request](#example-response)) must be restored using the Elasticsearch API. - -To restore a given backup, the following steps must be performed: - -1. Stop Optimize. -2. Ensure no Optimize indices are present in Elasticsearch (or the restore process will fail). -3. Iterate over all Elasticsearch snapshots included in the desired backup and restore them using the Elasticsearch restore snapshot API. -4. Start Optimize. - -Example Elasticsearch request: - -``` -curl --request POST `http://localhost:9200/_snapshot/repository_name/camunda_optimize_123456_3.9.0_part_1_of_2/_restore?wait_for_completion=true` -``` diff --git a/versioned_docs/version-8.2/self-managed/backup-restore/zeebe-backup-and-restore.md b/versioned_docs/version-8.2/self-managed/backup-restore/zeebe-backup-and-restore.md deleted file mode 100644 index f2c7274f345..00000000000 --- a/versioned_docs/version-8.2/self-managed/backup-restore/zeebe-backup-and-restore.md +++ /dev/null @@ -1,375 +0,0 @@ ---- -id: zeebe-backup-and-restore -title: "Backup and restore Zeebe data" -description: "Create a backup of a running Zeebe cluster comprised of a consistent snapshot of all partitions." -keywords: ["backup", "backups"] ---- - -A backup of a Zeebe cluster is comprised of a consistent snapshot of all partitions. The backup is taken asynchronously in the background while Zeebe is processing. Thus, the backups can be taken with minimal impact on normal processing. The backups can be used to restore a cluster in case of failures that lead to full data loss or data corruption. - -Zeebe provides a REST API to create backups, query, and manage existing backups. -The backup management API is a custom endpoint `backups`, available via [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/2.7.x/reference/htmlsingle/#actuator.endpoints). This is accessible via the management port of the gateway. The API documentation is also available as [OpenApi specification](https://github.com/camunda/camunda/blob/main/dist/src/main/resources/api/backup-management-api.yaml). - -## Configuration - -To use the backup feature in Zeebe, you must choose which external storage system you will use. -Make sure to set the same configuration on all brokers in your cluster. - -Zeebe supports [S3](#s3-backup-store) and [Google Cloud Storage (GCS)](#gcs-backup-store) for external storage. - -:::caution -Backups created with one store are not available or restorable from another store. - -This is especially relevant if you were using GCS through the S3 compatibility mode and want to switch to the new built-in support for GCS now. -Even when the underlying storage bucket is the same, backups from one are not compatible with the other. -::: - -### S3 backup store - -To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket: - -```yaml -zeebe: - broker: - data: - backup: - store: S3 - s3: - bucketName: - basePath: - region: - endpoint: - accessKey: - secretKey: -``` - -Alternatively, you can configure backup store using environment variables: - -- `ZEEBE_BROKER_DATA_BACKUP_STORE` - Set this to `S3` to store backups in S3 buckets. -- `ZEEBE_BROKER_DATA_BACKUP_S3_BUCKETNAME` - The backup is stored in this bucket. **The bucket must already exist**. -- `ZEEBE_BROKER_DATA_BACKUP_S3_BASEPATH` - If the bucket is shared with other Zeebe clusters, a unique basePath must be configured. -- `ZEEBE_BROKER_DATA_BACKUP_S3_ENDPOINT` - If no endpoint is provided, it is determined based on the configured region. -- `ZEEBE_BROKER_DATA_BACKUP_S3_REGION` - If no region is provided, it is determined [from the environment](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/region-selection.html#automatically-determine-the-aws-region-from-the-environment). -- `ZEEBE_BROKER_DATA_BACKUP_S3_ACCESSKEY` - If either `accessKey` or `secretKey` is not provided, the credentials are determined [from the environment](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/credentials.html#credentials-chain). -- `ZEEBE_BROKER_DATA_BACKUP_S3_SECRETKEY` - Specify the secret key. - -[AWS S3]: https://aws.amazon.com/s3/ -[MinIO]: https://min.io/ - -#### Backup Encryption - -Zeebe does not support backup encryption natively, but it _can_ use encrypted S3 buckets. For AWS S3, this means [enabling default bucket encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-bucket-encryption.html). - -Using default bucket encryption gives you control over the encryption keys and algorithms while being completely transparent with Zeebe. - -Combined with TLS between Zeebe and the S3 API, backups are fully encrypted in transit and at rest. Other S3 compatible services might have similar features that should work as well. - -#### Backup compression - -Backups can be large depending on your usage of Zeebe. To reduce S3 storage costs and upload times, you can enable backup compression. - -Zeebe compresses backup data immediately before uploading to S3 and buffers the compressed files in a temporary directory. Compression and buffering of compressed files can have a negative effect if Zeebe is heavily resource constrained. - -You can enable compression by specifying a compression algorithm to use. We recommend using [zstd] as it provides a good trade off between compression ratio and resource usage. - -More compression algorithms are available; check [commons-compress] for a full list. - -```yaml -zeebe.broker.data.backup.s3.compression: zstd # or use environment variable ZEEBE_BROKER_DATA_BACKUP_S3_COMPRESSION -``` - -[zstd]: https://github.com/facebook/zstd -[commons-compress]: https://commons.apache.org/proper/commons-compress/ - -### GCS backup store - -To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use: - -```yaml -zeebe: - broker: - data: - backup: - store: GCS - gcs: - bucketName: # or use environment variable ZEEBE_BROKER_DATA_BACKUP_GCS_BUCKETNAME - basePath: # or use environment variable ZEEBE_BROKER_DATA_BACKUP_GCS_BASEPATH -``` - -The bucket specified with `bucketName` **must already exist**, Zeebe will not try to create one for you. -To prevent misconfiguration, Zeebe will check at startup that the specified bucket exists and can be accessed. - -Setting a `basePath` is not required but useful if you want to use the same bucket for multiple Zeebe clusters. -When `basePath` is set, Zeebe will only create and access objects under this path. -This can be any string that is a valid [object name](https://cloud.google.com/storage/docs/objects#naming), for example the name of your cluster. - -Authentication is handled by [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials). -In many cases, these credentials are automatically provided by the runtime environment. -If you need more control, you can customize authentication by [setting environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC) `GOOGLE_APPLICATION_CREDENTIALS`. - -#### Backup encryption - -There are multiple [data encryption options](https://cloud.google.com/storage/docs/encryption), some of which are supported by Zeebe: - -- [Default server-side encryption](https://cloud.google.com/storage/docs/encryption/default-keys) is fully supported. - This is enabled by default for all GCS buckets. -- [Customer-managed encryption keys](https://cloud.google.com/storage/docs/encryption/customer-managed-keys) are supported if they are [set as - the default key](https://cloud.google.com/storage/docs/encryption/using-customer-managed-keys#set-default-key) for your bucket. -- [Customer-supplied encryption keys](https://cloud.google.com/storage/docs/encryption/customer-supplied-keys) are not supported. -- [Client-side encryption keys](https://cloud.google.com/storage/docs/encryption/client-side-keys) are not supported. - -## Create backup API - -The following request can be used to start a backup. - -### Request - -``` -POST actuator/backups -{ - "backupId": -} -``` - -A `backupId` is an integer and must be greater than the id of previous backups that are completed, failed, or deleted. -Zeebe does not take two backups with the same ids. If a backup fails, a new `backupId` must be provided to trigger a new backup. -The `backupId` cannot be reused, even if the backup corresponding to the backup id is deleted. - -
    - Example request - -``` -curl --request POST 'http://localhost:9600/actuator/backups' \ --H 'Content-Type: application/json' \ --d '{ "backupId": "100" }' -``` - -
    - -### Response - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------ | -| 202 Accepted | A Backup has been successfully scheduled. To determine if the backup process was completed, refer to the GET API. | -| 400 Bad Request | Indicates issues with the request, for example when the `backupId` is not valid or backup is not enabled on the cluster. | -| 409 Conflict | Indicates a backup with the same `backupId` or a higher id already exists. | -| 500 Server Error | All other errors. Refer to the returned error message for more details. | -| 502 Bad Gateway | Zeebe has encountered issues while communicating to different brokers. | -| 504 Timeout | Zeebe failed to process the request within a pre-determined timeout. | - -
    - Example response body with 202 Accepted - -```json -{ - "message": "A backup with id 100 has been scheduled. Use GET actuator/backups/100 to monitor the status." -} -``` - -
    - -## Get backup info API - -Information about a specific backup can be retrieved using the following request: - -### Request - -``` -GET actuator/backups/{backupId} -``` - -
    - Example request - -``` -curl --request GET 'http://localhost:9600/actuator/backups/100' -``` - -
    - -### Response - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------ | -| 200 OK | Backup state could be determined and is returned in the response body (see example below). | -| 400 Bad Request | There is an issue with the request. Refer to the returned error message for details. | -| 404 Not Found | A backup with that ID does not exist. | -| 500 Server Error | All other errors. Refer to the returned error message for more details. | -| 502 Bad Gateway | Zeebe has encountered issues while communicating to different brokers. | -| 504 Timeout | Zeebe failed to process the request within a pre-determined timeout. | - -When the response is 200 OK, the response body consists of a JSON object describing the state of the backup. - -- `backupId`: Id in the request. -- `state`: Gives the overall status of the backup. The state can be one of the following: - - `COMPLETED` if all partitions have completed the backup. - - `FAILED` if at least one partition has failed. In this case, `failureReason` contains a string describing the reason for failure. - - `INCOMPLETE` if at least one partition's backup does not exist. - - `IN_PROGRESS` if at least one partition's backup is in progress. -- `details`: Gives the state of each partition's backup. -- `failureReason`: The reason for failure if the state is `FAILED`. - -
    - Example response body with 200 OK - -```json -{ - "backupId": 100, - "details": [ - { - "brokerVersion": "8.2.0-SNAPSHOT", - "checkpointPosition": 5, - "createdAt": "2022-12-08T13:00:55.344276672Z", - "lastUpdatedAt": "2022-12-08T13:00:55.805351556Z", - "partitionId": 1, - "snapshotId": "2-1-3-2", - "state": "COMPLETED" - }, - { - "brokerVersion": "8.2.0-SNAPSHOT", - "checkpointPosition": 7, - "createdAt": "2022-12-08T13:00:55.370965069Z", - "lastUpdatedAt": "2022-12-08T13:00:55.84756566Z", - "partitionId": 2, - "snapshotId": "3-1-5-3", - "state": "COMPLETED" - } - ], - "state": "COMPLETED" -} -``` - -
    - -## List backups API - -Information about all backups can be retrieved using the following request: - -### Request - -``` -GET actuator/backups -``` - -
    - Example request - -``` -curl --request GET 'http://localhost:9600/actuator/backups' -``` - -
    - -### Response - -| Code | Description | -| ---------------- | ------------------------------------------------------------------------------------------ | -| 200 OK | Backup state could be determined and is returned in the response body (see example below). | -| 400 Bad Request | There is an issue with the request. Refer to returned error message for details. | -| 500 Server Error | All other errors. Refer to the returned error message for more details. | -| 502 Bad Gateway | Zeebe has encountered issues while communicating to different brokers. | -| 504 Timeout | Zeebe failed to process the request with in a pre-determined timeout. | - -When the response is 200 OK, the response body consists of a JSON object with a list of backup info. -See [get backup info API response](#response-1) for the description of each field. - -
    - Example response body with 200 OK - -```json -[ - { - "backupId": 100, - "details": [ - { - "brokerVersion": "8.2.0-SNAPSHOT", - "createdAt": "2022-12-08T13:00:55.344276672Z", - "partitionId": 1, - "state": "COMPLETED" - }, - { - "brokerVersion": "8.2.0-SNAPSHOT", - "createdAt": "2022-12-08T13:00:55.370965069Z", - "partitionId": 2, - "state": "COMPLETED" - } - ], - "state": "COMPLETED" - }, - { - "backupId": 200, - "details": [ - { - "brokerVersion": "8.2.0-SNAPSHOT", - "createdAt": "2022-12-08T13:01:15.27750375Z", - "partitionId": 1, - "state": "COMPLETED" - }, - { - "brokerVersion": "8.2.0-SNAPSHOT", - "createdAt": "2022-12-08T13:01:15.279995106Z", - "partitionId": 2, - "state": "COMPLETED" - } - ], - "state": "COMPLETED" - } -] -``` - -
    - -## Delete backup API - -A backup can be deleted using the following request: - -### Request - -``` -DELETE actuator/backups/{backupId} -``` - -
    - Example request - -``` -curl --request DELETE 'http://localhost:9600/actuator/backups/100' -``` - -
    - -### Response - -| Code | Description | -| ---------------- | -------------------------------------------------------------------------------- | -| 204 No Content | The backup has been deleted. | -| 400 Bad Request | There is an issue with the request. Refer to returned error message for details. | -| 500 Server Error | All other errors. Refer to the returned error message for more details. | -| 502 Bad Gateway | Zeebe has encountered issues while communicating to different brokers. | -| 504 Timeout | Zeebe failed to process the request with in a pre-determined timeout. | - -## Restore - -A new Zeebe cluster can be created from a specific backup. Camunda provides a standalone app which must be run on each node where a Zeebe broker will be running. This is a Spring Boot application similar to the broker and can run using the binary provided as part of the distribution. The app can be configured the same way a broker is configured - via environment variables or using the configuration file located in `config/application.yaml`. - -To restore a Zeebe cluster, run the following in each node where the broker will be running: - -``` -tar -xzf zeebe-distribution-X.Y.Z.tar.gz -C zeebe/ -./bin/restore --backupId= -``` - -If restore was successful, the app exits with a log message of `Successfully restored broker from backup`. - -Restore fails if: - -- There is no valid backup with the given backupId. -- Backup store is not configured correctly. -- The configured data directory is not empty. -- Any other unexpected errors. - -If the restore fails, you can re-run the application after fixing the root cause. - -:::note -When restoring, provide the same configuration (node id, data directory, cluster size, and replication count) as the broker that will be running in this node. The partition count must be same as in the backup. -::: diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/apis.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/apis.md deleted file mode 100644 index 516355a9065..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/apis.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: apis -title: "APIs" -sidebar_label: "APIs" ---- - -An API refers to a service that provides resources which can control -access via permissions. - -In [Identity](/self-managed/identity/what-is-identity.md), we use APIs to attach [Permissions](/self-managed/concepts/access-control/permissions.md). Once they have been created, the -components in the Camunda 8 stack are able to allow or deny users certain functionality. diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/applications.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/applications.md deleted file mode 100644 index 3335e32dc3c..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/applications.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: applications -title: "Applications" -sidebar_label: "Applications" ---- - -To use [Identity](/self-managed/identity/what-is-identity.md) for authentication, -each deployed component requires an application to be created. - -When an application is created in the Identity UI, a client ID and client secret are generated and can be -used in the component configuration to allow authentication flows to happen. - -:::tip Want to learn how to add an application in [Identity](/self-managed/identity/what-is-identity.md)? -See our documentation on [adding an application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for more help. -::: - -## Types of applications - -There are three types of applications in Identity: confidential, machine-to-machine, and public. -A type is selected when [creating the application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) based on -its ability to securely store and use secrets, as well as the mode of authentication it uses. - -| Type | Secret | User login flow | M2M authentication | -| ------------ | ------ | --------------- | ------------------ | -| Confidential | Yes | Yes | Yes | -| M2M | Yes | No | Yes | -| Public | No | Yes | No | - -:::note -See more details on OAuth client types [here](https://oauth.net/2/client-types/), and more information specifically on confidential and public applications [here](https://auth0.com/docs/get-started/applications/confidential-and-public-applications). -::: - -## Permissions - -Access to the components within the stack can be controlled by the permissions assigned to an application. - -:::tip Want to learn how to assign a permission to an application in [Identity](/self-managed/identity/what-is-identity.md)? -See our user guide on [assigning a permission to an application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for more help. -::: diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/groups.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/groups.md deleted file mode 100644 index 460f2e14d05..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/groups.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -id: groups -title: "Groups" -sidebar_label: "Groups" ---- - -Groups are a way to apply a set of [roles](/self-managed/concepts/access-control/roles.md) and authorizations to [users](/self-managed/concepts/access-control/users.md). diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/permissions.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/permissions.md deleted file mode 100644 index 852464a1cee..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/permissions.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: permissions -title: "Permissions" -sidebar_label: "Permissions" -description: "Permissions allow you to control the level of access a user or an application has to a particular component. Typically, this is providing read or write access." ---- - -Permissions allow you to control the level of access a user or an application has to a particular component. Traditionally, this is often described as being able to provide "read" or "write" access. - -Permissions are assigned to [APIs](self-managed/concepts/access-control/apis.md) and can be grouped to form -[roles](/self-managed/concepts/access-control/roles.md). - -:::note -Refer to the [components documentation](../../../components/components-overview.md) to see what permissions are supported or required. -::: diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/resource-authorizations.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/resource-authorizations.md deleted file mode 100644 index 9b55aa3dd4f..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/resource-authorizations.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: resource-authorizations -title: "Resource authorizations" -sidebar_label: "Resource authorizations" -description: "Resource authorizations allow you to control the level of access a user, or group, has to a particular resource in the system." ---- - -:::caution -Resource authorizations are disabled by default and can be enabled by the use of environment variables. This feature must be enabled in all required components, see: - -- [Identity feature flags](../../../../self-managed/identity/deployment/configuration-variables/#feature-flags) -- [Operate resource based permissions](../../../../self-managed/operate-deployment/operate-authentication/#resource-based-permissions) -- [Tasklist resource based permissions](../../../../self-managed/tasklist-deployment/tasklist-authentication/#resource-based-permissions) - -::: - -Resource authorizations allow you to control the level of access a [user](self-managed/concepts/access-control/users.md) or -[group](self-managed/concepts/access-control/groups.md) has to a particular resource in the system. - -### Permissions or resource authorizations - -[Permissions](self-managed/concepts/access-control/permissions.md) are designed to control component access for a -[user](self-managed/concepts/access-control/users.md) or [role](self-managed/concepts/access-control/roles.md). Resource -authorizations, as described above, relate to the resources which may be used within a given component, such as a process definition. diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/roles.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/roles.md deleted file mode 100644 index d7af779351f..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/roles.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: roles -title: "Roles" -sidebar_label: "Roles" ---- - -Roles are a way to group sets of [permissions](/self-managed/concepts/access-control/permissions.md) which can be -assigned to users using the [Identity](/self-managed/identity/what-is-identity.md) UI. diff --git a/versioned_docs/version-8.2/self-managed/concepts/access-control/users.md b/versioned_docs/version-8.2/self-managed/concepts/access-control/users.md deleted file mode 100644 index 09302242e32..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/access-control/users.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: users -title: "Users" -sidebar_label: "Users" ---- - -To use [Identity](/self-managed/identity/what-is-identity.md), an account is required. We refer to this account as a user. - -Access to the components within the stack can be controlled by the roles assigned to a user. - -:::note Want to know more about assigning roles? -See our user guide on [assigning a role](/self-managed/identity/user-guide/roles/add-assign-role.md) for more information. -::: diff --git a/versioned_docs/version-8.2/self-managed/concepts/authentication/m2m-tokens.md b/versioned_docs/version-8.2/self-managed/concepts/authentication/m2m-tokens.md deleted file mode 100644 index 60670e3001e..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/authentication/m2m-tokens.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -id: m2m-tokens -title: "Machine-to-machine (M2M) tokens" -sidebar_label: "Machine-to-machine (M2M) tokens" ---- - -A **machine-to-machine (M2M)** token is a token requested by one service so it can -communicate with another service acting as itself. - -In [Identity](/self-managed/identity/what-is-identity.md), we provide the ability to assign permissions to -an application. This functionality allows an application to perform the `client_credentials` flow to -retrieve a JWT token with permissions. - -The token generated can then be used to communicate with other applications in Camunda without -the need for user intervention. - -:::tip Want to learn how to generate an M2M token? -Head to our guide, [generating M2M tokens](/self-managed/identity/user-guide/authorizations/generating-m2m-tokens.md) -to find out more! -::: diff --git a/versioned_docs/version-8.2/self-managed/concepts/elasticsearch-privileges.md b/versioned_docs/version-8.2/self-managed/concepts/elasticsearch-privileges.md deleted file mode 100644 index bd696548dd9..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/elasticsearch-privileges.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -id: elasticsearch-privileges -title: "Elasticsearch privileges" ---- - -If you implement Camunda 8 with Elasticsearch as a service provider, you must configure Elasticsearch with the following [privileges](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html) in mind: - -## Cluster privileges - -- `monitor` - necessary for health check -- `manage_index_templates` to create and manage index schema on start up, if they don't already exist in Elasticsearch. -- _Optional_ `manage_ilm` - required only when ILM is enabled - -To use the [backup feature](/self-managed/backup-restore/backup-and-restore.md), you must have snapshot privileges: - -- `create_snapshot` -- `monitor_snapshot` - -When [updating](/guides/update-guide/introduction.md) to a newer version of Camunda 8 which requires data migration, Operate requires pipelines: - -- `manage_pipeline` - -More information on cluster privileges in Elasticsearch can be found [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-cluster). - -## Indices privileges - -- `create_index` -- `delete_index` -- `read` -- `write` -- `manage` -- _Optional_ `manage_ilm` - required only when ILM is enabled - -More information on indices privileges in Elasticsearch can be found [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-indices). diff --git a/versioned_docs/version-8.2/self-managed/concepts/exporters.md b/versioned_docs/version-8.2/self-managed/concepts/exporters.md deleted file mode 100644 index b8bf44e3ea1..00000000000 --- a/versioned_docs/version-8.2/self-managed/concepts/exporters.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -id: exporters -title: "Exporters" -description: "As Zeebe processes jobs and processes, or performs internal maintenance, it generates an ordered stream of records." ---- - -As Zeebe processes jobs and processes, or performs internal maintenance (e.g. raft failover), it generates an ordered stream of records. - -:::note - -Exporters are not available in Camunda 8 Software-as-a-Service (SaaS). - -::: - -![record-stream](img/exporters-stream.png) - -While the clients provide no way to inspect this stream directly, Zeebe can load -and configure user code that can process each record in the form of an exporter. - -An **exporter** provides a single entry point to process every record written on a stream. - -- Persist historical data by pushing it to an external data warehouse. -- Export records to a visualization tool (e.g. [zeebe-simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor)). - -Zeebe only loads exporters configured through the main Zeebe YAML configuration file. - -Once an exporter is configured, the next time Zeebe starts, the exporter -starts receiving records. Note that it is only guaranteed to see records -produced from that point on. - -Find a reference implementation in the form of the Zeebe-maintained -[Elasticsearch exporter](https://github.com/camunda/camunda/tree/stable/8.2/exporters/elasticsearch-exporter). - -The main impact exporters have on a Zeebe cluster is that they remove the burden -of persisting data indefinitely. - -Once data is not needed by Zeebe anymore, it queries its exporters to -know if it can be safely deleted, and if so, permanently erases it, thereby -reducing disk usage. - -:::note -If no exporters are configured, Zeebe automatically erases data when it is not necessary anymore. If you need historical data, you **must** configure an exporter to stream records into your external data warehouse. -::: - -Regardless of how an exporter is loaded (whether through an external JAR or not), -all exporters interact in the same way with the broker, which is defined by the -[exporter interface](https://github.com/camunda/camunda/blob/stable/8.2/exporter-api/src/main/java/io/camunda/zeebe/exporter/api/Exporter.java). - -## Loading - -Once configured, exporters are loaded as part of the broker startup phase, before -any processing is done. - -During the loading phase, the configuration for each exporter is validated, such that the broker will not start if: - -- An exporter ID is not unique -- An exporter points to a non-existent/non-accessible JAR -- An exporter points to a non-existent/non-instantiable class -- An exporter instance throws an exception in its `Exporter#configure` method. - -The last point is there to provide individual exporters to perform lightweight -validation of their configuration (e.g. fail if missing arguments). - -One caveat is that an instance of an exporter is created and immediately thrown away. Therefore, exporters should not perform any computationally -heavy work during instantiation/configuration. - -:::note -Zeebe creates a single isolated class loader for every JAR referenced by exporter configurations. If the same JAR is reused to define different exporters, these will share the same class loader. - -Therefore, different exporters can depend on the same third-party libraries without worrying about versions or class -name collisions. - -Additionally, exporters use the system class loader for system classes, or classes packaged as part of the Zeebe JAR. -::: - -Exporter-specific configuration is handled through the exporter's `[exporters.args]` -nested map. This provides a `Map` passed directly -in the form of a [configuration](https://github.com/camunda/camunda/tree/stable/8.2/exporter-api/src/main/java/io/camunda/zeebe/exporter/api/context/Configuration.java) object when the broker calls the `Exporter#configure(Configuration)` method. - -Configuration occurs at two different phases: during the broker startup phase, and -once every time a leader is elected for a partition. - -## Processing - -At any given point, there is exactly one leader node for a given partition. - -Whenever a node becomes the leader for a partition, it runs an instance of an -[exporter stream processor](https://github.com/camunda/camunda/tree/stable/8.2/broker/src/main/java/io/camunda/zeebe/broker/exporter/stream/ExporterDirector.java). - -This stream processor creates exactly one instance of each configured exporter, -and forwards every record written on the stream to each of these in turn. - -:::note -This implies there will be exactly one instance of every exporter for every partition. If you have four partitions, and at least four threads for processing, there are potentially four instances of your exporter exporting simultaneously. -::: - -Zeebe only guarantees at-least-once semantics. That is, a record is seen at least once by an exporter, maybe more. Cases where this may happen -include: - -- During reprocessing after raft failover (i.e. new leader election) -- On error if the position is not yet updated - -To reduce the amount of duplicate records an exporter processes, the stream -processor keeps track of the position of the last successfully exported record -for every single exporter. The position is sufficient since a stream is an ordered -sequence of records whose position is monotonically increasing. This position is -set by the exporter once it can guarantee a record is successfully -updated. - -:::note -Although Zeebe tries to reduce the amount of duplicate records an exporter must handle, it is likely it will have to. Therefore, it is necessary that export operations be idempotent. This can be implemented either in the exporter itself, but if it exports to an external system, it is recommended you perform deduplication there to reduce the load on Zeebe. Refer to the exporter-specific documentation for how this is meant to be achieved. -::: - -### Error handling - -If an error occurs during the `Exporter#open(Context)` phase, the stream -processor fails and is restarted, potentially fixing the error. Worst case -scenario, this means no exporter runs until these errors stop. - -If an error occurs during the `Exporter#close` phase, it is logged, but will -still allow other exporters to gracefully finish their work. - -If an error occurs during processing, we continuously retry the same record until -no error is produced. Worst case scenario, this means a failing exporter could bring -all exporters to a halt. Currently, exporter implementations are expected to -implement their own retry/error handling strategies, though this may change in the -future. - -### Performance impact - -Zeebe naturally incurs a performance impact for each loaded exporter. A slow -exporter slows down all other exporters for a given partition, and in the -worst case, could completely block a thread. - -It's therefore recommended to keep exporters as simple as possible, and perform -any data enrichment or transformation through the external system. diff --git a/versioned_docs/version-8.2/self-managed/concepts/img/exporters-stream.png b/versioned_docs/version-8.2/self-managed/concepts/img/exporters-stream.png deleted file mode 100644 index 7f8eff0fe36..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/concepts/img/exporters-stream.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/connectors-deployment/connectors-configuration.md b/versioned_docs/version-8.2/self-managed/connectors-deployment/connectors-configuration.md deleted file mode 100644 index e917d0dc6d4..00000000000 --- a/versioned_docs/version-8.2/self-managed/connectors-deployment/connectors-configuration.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -id: connectors-configuration -title: Configuration ---- - -You can configure the Connector runtime environment in the following ways: - -- The Zeebe instance to connect to. -- The Connector functions to run. -- The secrets that should be available to the Connectors. - -## Connecting to Zeebe - -In general, the Connector Runtime will respect all properties known to [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe). - -### SaaS - -To use Camunda 8 SaaS specify the connection properties: - -```bash -ZEEBE_CLIENT_CLOUD_CLUSTER-ID=xxx -ZEEBE_CLIENT_CLOUD_CLIENT-ID=xxx -ZEEBE_CLIENT_CLOUD_CLIENT-SECRET=xxx -ZEEBE_CLIENT_CLOUD_REGION=bru-2 -``` - -You can further configure separate connection properties for Camunda Operate (otherwise it will use the properties configured for Zeebe above): - -```bash -CAMUNDA_OPERATE_CLIENT_CLIENT-ID=xxx -CAMUNDA_OPERATE_CLIENT_CLIENT-SECRET=xxx -``` - -If you are connecting a local Connector runtime to a SaaS cluster, you may want to review our [guide to using Connectors in hybrid mode](/guides/use-connectors-in-hybrid-mode.md). - -### Local installation - -Zeebe: - -```bash -ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=127.0.0.1:26500 -ZEEBE_CLIENT_SECURITY_PLAINTEXT=true -``` - -If the Zeebe Gateway is set up with Camunda Identity-based authorization, [Zeebe client OAuth environment variables](../zeebe-deployment/security/client-authorization.md#environment-variables) must be provided. - -Connect to Operate locally using username and password: - -```bash -CAMUNDA_OPERATE_CLIENT_URL=http://localhost:8081 -CAMUNDA_OPERATE_CLIENT_USERNAME=demo -CAMUNDA_OPERATE_CLIENT_PASSWORD=demo -``` - -When running against a self-managed environment you might also need to configure the Keycloak endpoint to not use Operate username/password authentication: - -```bash -CAMUNDA_OPERATE_CLIENT_KEYCLOAK-URL=http://localhost:18080 -CAMUNDA_OPERATE_CLIENT_KEYCLOAK-REALM=camunda-platform -``` - -### Disable Operate connectivity - -Disabling Operate polling will lead to inability to use inbound (e.g., webhook) capabilities. -However, if you still wish to do so, you need to start your Connector runtime with the following environment variables: - -```bash -CAMUNDA_CONNECTOR_POLLING_ENABLED=false -CAMUNDA_CONNECTOR_WEBHOOK_ENABLED=false -OPERATE_CLIENT_ENABLED=false -``` - -## Manual discovery of Connectors - -By default, the Connector runtime picks up outbound Connectors available on the classpath automatically. -To disable this behavior, use the following environment variables to configure Connectors and their configuration explicitly: - -| Environment variable | Purpose | -| :-------------------------------------------- | :------------------------------------------------------------ | -| `CONNECTOR_{NAME}_FUNCTION` (required) | Function to be registered as job worker with the given `NAME` | -| `CONNECTOR_{NAME}_TYPE` (optional) | Job type to register for worker with `NAME` | -| `CONNECTOR_{NAME}_INPUT_VARIABLES` (optional) | Variables to fetch for worker with `NAME` | - -Through that configuration, you define all job workers to run. - -Specifying optional values allow you to override `@OutboundConnector`-provided Connector configuration. - -```bash -CONNECTOR_HTTPJSON_FUNCTION=io.camunda.connector.http.HttpJsonFunction -CONNECTOR_HTTPJSON_TYPE=non-default-httpjson-task-type -``` - -## Secrets - -Providing secrets to the runtime environment can be achieved in different ways, depending on your setup. - -### Secrets in Docker images - -To inject secrets into the [Docker images of the runtime](../platform-deployment/docker.md#connectors), they must be available in the environment of the Docker container. - -For example, you can inject secrets when running a container: - -```bash -docker run --rm --name=connectors -d \ - -v $PWD/connector.jar:/opt/app/ \ # Add a connector jar to the classpath - -e MY_SECRET=secret \ # Set a secret with value - -e SECRET_FROM_SHELL \ # Set a secret from the environment - --env-file secrets.txt \ # Set secrets from a file - camunda/connectors-bundle:latest -``` - -The secret `MY_SECRET` value is specified directly in the `docker run` call, -whereas the `SECRET_FROM_SHELL` is injected based on the value in the -current shell environment when `docker run` is executed. The `--env-file` -option allows using a single file with the format `NAME=VALUE` per line -to inject multiple secrets at once. - -### Secrets in manual installations - -In the [manual setup](../platform-deployment/manual.md#run-connectors), inject secrets during Connector execution by providing -them as environment variables before starting the runtime environment. You can, for example, export them beforehand as follows: - -```bash -export MY_SECRET='foo' -``` - -Reference the secret in the Connector's input in the prefixed style `{{secrets.MY_SECRET}}`. - -### Custom secret provider - -Create your own implementation of the `io.camunda.connector.api.secret.SecretProvider` interface that -[comes with the SDK](https://github.com/camunda/connector-sdk/blob/main/core/src/main/java/io/camunda/connector/api/secret/SecretProvider.java). - -Package this class and all its dependencies as a JAR, e.g. `my-secret-provider-with-dependencies.jar`. This needs to include a file -`META-INF/services/io.camunda.connector.api.secret.SecretProvider` that contains the fully qualified class name of your secret -provider implementation. Add this JAR to the runtime environment, depending on your deployment setup. -Your secret provider will serve secrets as implemented. - -For Docker images, you can add the JAR by using volumes, for example: - -```bash -docker run --rm --name=connectors -d \ - -v $PWD/my-secret-provider-with-dependencies.jar:/opt/app/my-secret-provider-with-dependencies.jar \ # Specify secret provider - -e ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=ip.address.of.zeebe:26500 \ # Specify Zeebe address - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=true \ # Optional: provide security configs to connect to Zeebe - camunda/connectors:latest -``` - -In manual installations, add the JAR to the `-cp` argument of the Java call: - -```bash -java -cp 'connector-runtime-application-VERSION-with-dependencies.jar:...:my-secret-provider-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` diff --git a/versioned_docs/version-8.2/self-managed/connectors-deployment/install-and-start.md b/versioned_docs/version-8.2/self-managed/connectors-deployment/install-and-start.md deleted file mode 100644 index a76440a307c..00000000000 --- a/versioned_docs/version-8.2/self-managed/connectors-deployment/install-and-start.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -id: install-and-start -title: Installation -description: "Let's get started with Connectors by installing and running them." ---- - -The concept of a [Connector](/components/connectors/introduction.md) consists of two parts: - -- The business logic is implemented by a [Connector function](/components/connectors/custom-built-connectors/connector-sdk.md#outbound-connector-runtime-logic) - and executed by a [Connector runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#runtime-environments). -- The user interface during modeling is provided using a [Connector template](/components/connectors/custom-built-connectors/connector-templates.md). - -## Connector runtime and function - -The Connector runtime environment can be installed using the supported [deployment options](/self-managed/platform-deployment/overview.md#deployment-options). - -Currently, we support an installation of Connectors with [Docker](/self-managed/platform-deployment/docker.md#connectors), -[Docker Compose](/self-managed/platform-deployment/docker.md#docker-compose), [Helm charts](/self-managed/platform-deployment/helm-kubernetes/overview.md), and the [manual setup](/self-managed/platform-deployment/manual.md#run-connectors). - -:::note -Inbound Connectors require Operate to be deployed as part of your Camunda Self-Managed installation. -If you don't use Operate with your cluster, you can still use Outbound Connectors. -::: - -## Connector templates - -For the modeling interface, you need to [provide Connector templates](/components/connectors/custom-built-connectors/connector-templates.md#providing-and-using-connector-templates). - -For the [out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) provided by Camunda, -the Connectors Bundle project provides a set of all Connector templates related to one [release version](https://github.com/camunda/connectors-bundle/releases). -If you use the [Docker Compose](/self-managed/platform-deployment/docker.md#docker-compose) installation, you can thus fetch all Connector templates that match the versions of the Connectors used in the backend. - -Alternatively, you can fetch the JSON templates from the respective Connector's releases at respective connectors folder in the [bundle repository](https://github.com/camunda/connectors-bundle) -at `connectors/{connector name}/element-templates`: - -| Connector | License | -| ------------------------------ | ----------------------------------- | -| Asana Connector | [Camunda Self-Managed Free Edition] | -| Automation Anywhere Connector | [Camunda Self-Managed Free Edition] | -| Amazon SNS Connector | [Camunda Self-Managed Free Edition] | -| Amazon SQS Connector | [Camunda Self-Managed Free Edition] | -| AWS Lambda Connector | [Camunda Self-Managed Free Edition] | -| Camunda Operate Connector | [Camunda Self-Managed Free Edition] | -| Easy Post Connector | [Camunda Self-Managed Free Edition] | -| GitHub Connector | [Camunda Self-Managed Free Edition] | -| GitHub Webhook Connector | [Camunda Self-Managed Free Edition] | -| GitLab Connector | [Camunda Self-Managed Free Edition] | -| Google Drive Connector | [Camunda Self-Managed Free Edition] | -| Google Maps Platform Connector | [Camunda Self-Managed Free Edition] | -| GraphQL Connector | [Camunda Self-Managed Free Edition] | -| HTTP Webhook Connector | [Camunda Self-Managed Free Edition] | -| Kafka Consumer Connector | [Camunda Self-Managed Free Edition] | -| Kafka Producer Connector | [Camunda Self-Managed Free Edition] | -| Microsoft Teams Connector | [Camunda Self-Managed Free Edition] | -| OpenAI Connector | [Camunda Self-Managed Free Edition] | -| Power Automate Connector | [Camunda Self-Managed Free Edition] | -| RabbitMQ Connector | [Camunda Self-Managed Free Edition] | -| REST Connector | [Apache 2.0] | -| SendGrid Connector | [Camunda Self-Managed Free Edition] | -| Slack Connector | [Camunda Self-Managed Free Edition] | -| UiPath Connector | [Camunda Self-Managed Free Edition] | - -You can use the Connector templates as provided or modify them to your needs as described in our [Connector templates guide](/components/connectors/custom-built-connectors/connector-templates.md). - -Review our [Connectors Awesome List](https://github.com/camunda-community-hub/camunda-8-connectors/tree/main) to find more Connectors. diff --git a/versioned_docs/version-8.2/self-managed/identity/deployment/application-monitoring.md b/versioned_docs/version-8.2/self-managed/identity/deployment/application-monitoring.md deleted file mode 100644 index d11a6d05b53..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/deployment/application-monitoring.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: application-monitoring -title: "Application monitoring" -sidebar_label: "Application monitoring" -description: "Understand how the Identity component operates in exposing the following endpoints." ---- - -To help understand how the Identity component operates, we expose the following endpoints as default: - -| Endpoint | Default port | Purpose | -| ---------------------- | ------------ | -------------------------------------------------------------------------- | -| `/actuator/health` | `8082` | Provide the health status of the application, often used in health checks. | -| `/actuator/prometheus` | `8082` | Provide operational application metrics. | diff --git a/versioned_docs/version-8.2/self-managed/identity/deployment/configuration-variables.md b/versioned_docs/version-8.2/self-managed/identity/deployment/configuration-variables.md deleted file mode 100644 index 0867feda5d4..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/deployment/configuration-variables.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: configuration-variables -title: "Configuration variables" -sidebar_label: "Configuration variables" -description: "Learn more about core configuration, component configuration, database configuration, and feature flags." ---- - -As a Spring Boot application, Identity supports any standard -[Spring configuration](https://docs.spring.io/spring-boot/reference/features/external-config.html) method. - -### Core configuration - -| Environment variable | Description | Default value | -| ------------------------------------ | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `IDENTITY_AUTH_PROVIDER_BACKEND_URL` | Used to support container to container communication. | http://localhost:18080/auth/realms/camunda-platform | -| `IDENTITY_AUTH_PROVIDER_ISSUER_URL` | Used to denote the token issuer. | http://localhost:18080/auth/realms/camunda-platform | -| `IDENTITY_BASE_PATH` | Used to configure Identity to run on a subpath (Requires HTTPs for `IDENTITY_URL`). | | -| `IDENTITY_CLIENT_ID` | The client ID for the Identity client. | camunda-identity | -| `IDENTITY_CLIENT_SECRET` | The client secret for the Identity client. | | -| `IDENTITY_LOG_LEVEL` | The level of which to log messages at. | INFO | -| `IDENTITY_LOG_PATTERN` | The pattern to use when logging. | `%clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%xwEx` | -| `IDENTITY_URL` | The URL of the Identity service. | http://localhost:8080 | -| `KEYCLOAK_REALM` | The name of the Keycloak Realm to connect to. | camunda-platform | -| `KEYCLOAK_SETUP_USER` | The username of a user with admin access to Keycloak. | admin | -| `KEYCLOAK_SETUP_PASSWORD` | The password of a user with admin access to Keycloak. | admin | -| `KEYCLOAK_SETUP_REALM` | The realm that the setup user is in. | master | -| `KEYCLOAK_SETUP_CLIENT_ID` | The client to use for authentication during setup of the provided Keycloak. | admin-cli | -| `KEYCLOAK_URL` | The URL of the Keycloak instance to use. | http://localhost:18080/auth | - -### Component configuration - -Identity supports component configuration using preset values. To configure a -component for use within Identity, set two variables: - -| Environment variable | Description | Default value | -| ------------------------------------ | ---------------------------------------------- | ------------- | -| `KEYCLOAK_INIT__SECRET` | The secret used for authentication flows. | No default | -| `KEYCLOAK_INIT__ROOT_URL` | The root URL of where the component is hosted. | No default | - -:::note -Identity supports the following values for the `` placeholder: `OPERATE`, `OPTIMIZE`, `TASKLIST`, -and `WEBMODELER`. - -For the `WEBMODELER` value, only the `KEYCLOAK_INIT__ROOT_URL` variable is required to be set. -::: - -### Database configuration - -| Environment variable | Description | -| ---------------------------- | --------------------------------------------------- | -| `IDENTITY_DATABASE_HOST` | The host of the database. | -| `IDENTITY_DATABASE_PORT` | The port of the database. | -| `IDENTITY_DATABASE_NAME` | The name of the database to connect to. | -| `IDENTITY_DATABASE_USERNAME` | The username of a user with access to the database. | -| `IDENTITY_DATABASE_PASSWORD` | The password of a user with access to the database. | - -:::note -There are no default values for the variables above. See -[supported environments](/reference/supported-environments.md#camunda-platform-8-self-managed) for a list of -supported databases. -::: - -### Feature flags - -Identity uses feature flag environment variables to enable and disable features; the supported flags are: - -| Environment variable | Description | Default value | -| ---------------------------- | --------------------------------------------- | ------------- | -| RESOURCE_PERMISSIONS_ENABLED | Controls the resource authorizations feature. | false | - -:::note -Setting the `RESOURCE_PERMISSIONS_ENABLED` flag to `true` requires a database connection. To configure a database -connection, see [database configuration](#database-configuration). -::: diff --git a/versioned_docs/version-8.2/self-managed/identity/deployment/starting-configuration.md b/versioned_docs/version-8.2/self-managed/identity/deployment/starting-configuration.md deleted file mode 100644 index d6b8649eb13..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/deployment/starting-configuration.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: starting-configuration-for-identity -title: "Starting configuration for Identity" -sidebar_label: "Starting configuration" -description: "Understand the set of base configurations to operate Identity correctly." ---- - -Identity requires a set of base configurations to operate correctly. When Identity is started, it will -create or update the following entities in Keycloak: - -### Clients - -| Name | Client ID | Service accounts | Created/updated with component | -| -------------------------------- | -------------------------------- | ---------------- | ------------------------------ | -| Identity | camunda-identity | enabled | All | -| Camunda Identity Resource Server | camunda-identity-resource-server | enabled | All | -| Operate | operate | enabled | Operate | -| Operate API | operate-api | enabled | Operate | -| Optimize | optimize | enabled | Optimize | -| Optimize API | optimize-api | enabled | Optimize | -| Tasklist | tasklist | enabled | Tasklist | -| Tasklist API | tasklist-api | enabled | Tasklist | -| Web Modeler | web-modeler | disabled | Web Modeler | -| Web Modeler API | web-modeler-api | enabled | Web Modeler | - -### Roles - -| Name | Created/updated with component | -| ----------- | ------------------------------ | -| Identity | All | -| Operate | Operate | -| Optimize | Optimize | -| Tasklist | Tasklist | -| Web Modeler | Web Modeler | - -### Client scopes - -| Name | Protocol | Description | -| ---------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| camunda-identity | openid-connect | A default client scope that contains mappers to augment the token generated with information required by the components of Camunda. Contains the mappers described in the [mappers](#mappers) section. | - -### Mappers - -| Name | Protocol Mapper | Description | -| ---------------- | --------------------------------- | --------------------------------------------------------------------------------------------------------- | -| email | oidc-usermodel-property-mapper | Adds the email user attribute to the `access`, `ID`, and `user info` tokens using the claim name `email`. | -| full name | oidc-full-name-mapper | Adds the user's full name to the `access`, `ID`, and `user info` tokens. | -| permissions | oidc-usermodel-client-role-mapper | Adds the user's client roles to the `access` token with the claim name `permissions.${client_id}`. | -| audience resolve | oidc-audience-resolve-mapper | Adds the audiences the user has access to in the `audience` claim. | diff --git a/versioned_docs/version-8.2/self-managed/identity/getting-started/img/identity-landing-page.png b/versioned_docs/version-8.2/self-managed/identity/getting-started/img/identity-landing-page.png deleted file mode 100644 index 678adf7da76..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/getting-started/img/identity-landing-page.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/getting-started/img/identity-login-page.png b/versioned_docs/version-8.2/self-managed/identity/getting-started/img/identity-login-page.png deleted file mode 100644 index 339aba6f9d8..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/getting-started/img/identity-login-page.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/getting-started/install-identity.md b/versioned_docs/version-8.2/self-managed/identity/getting-started/install-identity.md deleted file mode 100644 index 6cd65c792e9..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/getting-started/install-identity.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: install-identity -title: "Installation and first steps" -sidebar_label: "Installation and first steps" -description: "Learn more about installing Identity, accessing the UI, default users, the home screen, and more." ---- - -To use Identity, install it locally via Docker or Kubernetes. - -Follow the [installation guide](/self-managed/platform-deployment/overview.md) for more details on this process. - -## Accessing the UI - -As soon as Identity is started, you can access the login page and log in to the Identity application. - -Navigate to [localhost:8080](http://localhost:8080) to see the UI exposed by Identity. - -![identity-login-page](./img/identity-login-page.png) - -## Default user - -The configuration in this guide creates an example user during installation; use this account to log in: - -```text -Username: demo -Password: demo -``` - -:::note Want to create more users? -Creating a user in Identity is not currently supported. To create a user, see -[Keycloak's documentation on creating a user](https://www.keycloak.org/docs/19.0.3/server_admin/#proc-creating-user_server_administration_guide). -::: - -## Home screen - -You are directed to the home page once logged in successfully. - -![identity-landing-page](./img/identity-landing-page.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/troubleshooting/common-problems.md b/versioned_docs/version-8.2/self-managed/identity/troubleshooting/common-problems.md deleted file mode 100644 index d206f1a15bd..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/troubleshooting/common-problems.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -id: common-problems -title: "Common problems" -sidebar_label: "Common problems" -description: "Learn about solutions to common issues in Identity, such as complications connecting to Keycloak." ---- - -## Problem: Identity is unable to connect to Keycloak - -If you are seeing an error message like the one below in your Identity service logs, there is an issue with the connection -Identity is trying to make and the Keycloak service: - -``` -2022-07-04 15:52:04.250 ERROR 1 --- [main] i.c.i.i.k.config.KeycloakConfiguration : Failure #1. Unable to connect to Keycloak. -``` - -This can be caused by: - -- The Keycloak service has not started/is not ready. -- Identity making requests from an external IP address. - -See details on resolving these issues below. - -### Solution 1: The Keycloak service has not started/is not ready - -The Keycloak service can take time to start due to the supporting systems. - -Keycloak is ready to accept connections when the following log lines are visible: - -``` -15:24:24,094 INFO [org.jboss.as] (Controller Boot Thread) WFLYSRV0025: Keycloak 16.1.1 (WildFly Core 18.0.4.Final) started in 33171ms - Started 718 of 1020 services (699 services are lazy, passive or on-demand) -15:24:24,098 INFO [org.jboss.as] (Controller Boot Thread) WFLYSRV0060: Http management interface listening on http://127.0.0.1:9990/management -15:24:24,100 INFO [org.jboss.as] (Controller Boot Thread) WFLYSRV0051: Admin console listening on http://127.0.0.1:9990 -``` - -When the Keycloak service is ready for connections, start (or restart) the Identity pod. - -### Solution 2: Identity making requests from an external IP address - -By default, Keycloak requires TLS on requests that originate from what it considers to be an external source. The Keycloak -documentation for [setting up SSL](https://www.keycloak.org/docs/19.0.3/server_installation/#_setting_up_ssl) maintains -a list of what they consider to be an external IP address under the `external requests` section. - -The solution to this issue will depend largely on your environment. However, as a starting point we would suggest you consider -these options: - -1. Configure the communication between the services (for example in a cluster) to use IP ranges that fall within the - ranges that Keycloak expects. -2. If configuring the IP ranges is not an option, it is possible to disable the SSL requirement in Keycloak itself by completing the following steps: - 1. In the `master` realm, set `Require SSL` to `none` by following the steps in [SSL modes](https://www.keycloak.org/docs/19.0.3/server_admin/#_ssl_modes). - 2. Restart the Identity service. - 3. In the `camunda-platform` realm, set `Require SSL` to `none` by following the steps in [SSL modes](https://www.keycloak.org/docs/19.0.3/server_admin/#_ssl_modes). - 4. Restart the Identity service again. Identity should now start successfully - -:::warning -We would only recommend that requirements for SSL are disabled in a development environment. -::: - -## Problem: There is an issue in Keycloak where Identity crashloops - -There currently exists a [known issue in Keycloak](https://github.com/keycloak/keycloak/issues/12484) where Identity experiences a crashloop. You may see something similar to the following: - -``` -2023-12-08 04:35:17,142 ERROR [org.keycloak.services.error.KeycloakErrorHandler] (executor-thread-39) Uncaught server error: java.lang.IllegalStateException: Duplicate key openid-connect%Client ID (attempted merging values org.keycloak.models.ProtocolMapperModel@aa0f0e69 and org.keycloak.models.ProtocolMapperModel@9d856c11) - at java.base/java.util.stream.Collectors.duplicateKeyException(Collectors.java:135) - at java.base/java.util.stream.Collectors.lambda$uniqKeysMapAccumulator$1(Collectors.java:182) - at java.base/java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) - at java.base/java.util.stream.DistinctOps$1$2.accept(DistinctOps.java:174) -``` - -### Solution - -As a workaround, take the following steps for **all** Camunda clients: - -1. Log in to Keycloak as an administrator. -2. Go to the `camunda-platform` realm. -3. Click **Clients > Zeebe**. -4. Click **Client Scopes > zeebe-dedicated**. -5. You will likely see duplicates of `Client ID`, `Client IP Address`, and `Client Host`. Delete these until there is only one of each remaining. -6. Restart your Identity pod to ensure connection. diff --git a/versioned_docs/version-8.2/self-managed/identity/troubleshooting/troubleshoot-identity.md b/versioned_docs/version-8.2/self-managed/identity/troubleshooting/troubleshoot-identity.md deleted file mode 100644 index 625d48ef441..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/troubleshooting/troubleshoot-identity.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -id: troubleshoot-identity -title: "Troubleshooting Identity" -sidebar_label: "Overview" -description: "Learn how to resolve issues if the Identity pod crashloops or continually restarts, and more." ---- - -## Issue: Identity pod crashloops/continually restarts - -If the Identity pod crash loops, or the pod continually restarts, it is likely that there is an issue with the connection to Keycloak. - -1. Find the name of the Identity pod by running: - - ``` - kubectl get pods - ``` - - The output should look similar to: - - ``` - NAME READY STATUS RESTARTS AGE - c8-local-identity-6fd96d59c4-8lzxv 1/1 Running 2 (24s ago) 85s - c8-local-keycloak-0 1/1 Running 0 30m - c8-local-operate-69b765f7bb-hjcts 1/1 Running 0 30m - c8-local-postgresql-0 1/1 Running 0 30m - c8-local-zeebe-0 1/1 Running 0 30m - c8-local-zeebe-gateway-678f4c7bfb-w8ght 1/1 Running 0 30m - elasticsearch-master-0 1/1 Running 0 30m - ``` - -2. Using the pod name from the output above, view the logs: - ``` - kubectl logs - ``` -3. Observe the most recent logs for an error message or stacktrace, for example: - - ``` - 2022-07-04 15:52:04.250 ERROR 1 --- [main] i.c.i.i.k.config.KeycloakConfiguration : Failure #1. Unable to connect to Keycloak. - 2022-07-04 15:52:09.252 WARN 1 --- [main] i.c.i.i.k.config.KeycloakConfiguration : Retrying... - ``` - -:::note -If you are not able to resolve the problem after completing the steps in the sections above, take a look at the [common problems](/self-managed/identity/troubleshooting/common-problems.md) related -to the Identity component. -::: diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/additional-features/adding-an-api.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/additional-features/adding-an-api.md deleted file mode 100644 index 4fd1447011d..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/additional-features/adding-an-api.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -id: adding-an-api -title: "Adding an API" -sidebar_label: "Adding an API" -description: "In this guide we will show you how to use Identity to create an API." ---- - -In this guide we will show you how to use Identity to create an API. - -:::tip Want to learn more about APIs? -Head over to our documentation on [APIs](/self-managed/concepts/access-control/apis.md) to find out more. -::: - -:::caution Write access needed -To add an API, you need to have write access to Identity. -Read our [guide on managing user access](/self-managed/identity/user-guide/authorizations/managing-user-access.md) to learn more. -::: - -1. Log in to the Identity UI and navigate to the **API** tab: - -![add-api-tab](../img/add-api-tab.png) - -2. Click the **Add API** button located on the top right of the table and a modal will open. - -3. We are now able to fill out the details of the API. For this guide, we will use a set of example values. When you have inserted the details, click **Add**: - -![add-api-modal-2](../img/add-api-modal-2.png) - -On confirmation, the modal will close, the table will update, and your new API will be shown. Click on your new API to view the details. - -![add-api-refreshed-table](../img/add-api-refreshed-table.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/additional-features/incorporate-applications.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/additional-features/incorporate-applications.md deleted file mode 100644 index 846b767cc6a..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/additional-features/incorporate-applications.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: incorporate-applications -title: "Incorporate applications" -sidebar_label: "Incorporate applications" -description: "Use Identity to create an application and assign a permission to an application." ---- - -In this guide we will show you how to use Identity to create an application and assign a permission to an application. - -:::tip Want to learn more about applications? -Head over to our documentation on [applications](/self-managed/concepts/access-control/applications.md) to find out more. -::: - -:::caution Write access needed -To add an application and assign a permission to an application, you need to have write access to Identity. -Read our [guide on managing user access](/self-managed/identity/user-guide/authorizations/managing-user-access.md) to learn more. -::: - -## Add an application - -1. Log in to the Identity UI and navigate to the **Applications** tab: - -![add-application-tab](../img/add-application-tab.png) - -2. Click the **Add application** button located on the top right of the table and a modal will open. - -3. Fill in a name for your application. For this guide we will use a set of example values. - Select the type of your application based on our [guide](/self-managed/concepts/access-control/applications.md#types-of-applications). - Depending on the selected type, you might need to enter at least one redirect URI. When you have inserted the required - details, click **Add**: - -![add-application-modal-2](../img/add-application-modal-2.png) - -On confirmation, the modal will close, the list will update, and your new application will be shown. Click on your new application to view the details. This includes your generated client ID -and client secret depending on the selected [type](/self-managed/concepts/access-control/applications.md#types-of-applications). - -![add-application-refreshed-table](../img/add-application-refreshed-table.png) - -## Assigning a permission an application - -To assign a permission to an application using Identity, take the following steps: - -1. Log in to the Identity UI and navigate to the **Applications** page and click on an application. Then, select the **Access to APIs** tab and click **Assign Permissions**: - -![assign-a-permission-application-tab](../img/assign-a-permission-application-tab.png) - -2. Select the API which contains the permission you want to assign. - -3. Select the permissions you would like to assign and click **Add**. - -On confirmation, the modal will close, the table will update, and your assigned permissions will be shown: - -![assign-a-permission-application-refreshed-table](../img/assign-a-permission-application-refreshed-table.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/generating-m2m-tokens.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/generating-m2m-tokens.md deleted file mode 100644 index 6433f13f89c..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/generating-m2m-tokens.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: generating-m2m-tokens -title: "Generating machine-to-machine tokens" -sidebar_label: "Generating machine-to-machine (M2M) tokens" -description: "In this guide, we'll show you how to generate your own machine-to-machine (M2M) tokens." ---- - -In this guide, we'll show you how to generate your own **machine-to-machine (M2M)** tokens. - -:::tip Want to learn more about M2M tokens? -Head over to our documentation on [M2M tokens](/self-managed/concepts/authentication/m2m-tokens.md) to find out more. -::: - -### Prerequisites - -- A running [Identity](/self-managed/identity/what-is-identity.md) service -- An [application](/self-managed/concepts/access-control/applications.md) for your service -- The client ID of your application -- The client secret of your application -- A REST client of your choice - -### Generate token - -In our example, the Keycloak instance that supports Identity can be found via `http://localhost:18080`. -This may be different for you, so adjust the host name (and port if required) as appropriate. - -To request a token, use the following cURL command replacing the placeholders with your applications -details: - -``` -curl --location --request POST 'http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token' \ ---header 'Content-Type: application/x-www-form-urlencoded' \ ---data-urlencode 'client_id=[CLIENT_ID]' \ ---data-urlencode 'client_secret=[CLIENT_SECRET]' \ ---data-urlencode 'grant_type=client_credentials' -``` diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md deleted file mode 100644 index 8f7dbd9e365..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -id: managing-resource-authorizations -title: "Managing resource authorizations" -sidebar_label: "Managing resource authorizations" -description: "Learn about the methods to control resource access within the Identity application." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -In this guide you will learn about the methods to control resource access within the Identity application. - -### Creating resource authorizations - -Resource authorizations can be configured for an individual user or a group. Below we show you how to create authorizations -for both: - - - - -1. Log in to the Identity UI and navigate to the **Groups** tab. Select the group you would like to create an authorization for from the table, and click on the **Authorizations** tab: - -![create-authorization-for-group-tab](../img/create-authorization-for-group-tab.png) - -2. Click **Create resource authorization** and a modal will open. Select the type of resource you are creating an authorization for, and click **Next**: - -![create-authorization-for-group-modal-1](../img/create-authorization-for-group-modal-1.png) - -3. Input the ID of the resource you would like to create an authorization for, select the resource from the list, and click **Next**: - -![create-authorization-for-group-modal-2](../img/create-authorization-for-group-modal-2.png) - -:::tip -Want to apply an authorization to a wide range of resources? We support a wildcard character `*` to match any resource. - -Partial matching, for example `my-resource*`, is not supported. -::: - -4. Select the permissions you would like to assign, and click **Create**: - -![create-authorization-for-group-modal-3](../img/create-authorization-for-group-modal-3.png) - -On confirmation, the modal closes, the table updates, and your authorization is shown: - -![create-authorization-for-group-refreshed-modal](../img/create-authorization-for-group-refreshed-table.png) - - - - -1. Log in to the Identity UI and navigate to the **Users** tab. Select the user you would like to create an authorization for from the table, and click on the **Authorizations** tab: - -![create-authorization-for-user-tab](../img/create-authorization-for-user-tab.png) - -2. Click **Create resource authorization** and a modal will open. Select the type of resource you are creating an authorization for, and click **Next**: - -![create-authorization-for-user-modal-1](../img/create-authorization-for-user-modal-1.png) - -3. Input the ID of the resource you would like to create an authorization for, select the resource from the list, and click **Next**: - -![create-authorization-for-user-modal-2](../img/create-authorization-for-user-modal-2.png) - -:::tip -Want to apply an authorization to a wide range of resources? We support a wildcard character `*` to match any resource. - -Partial matching, for example `my-resource*`, is not supported. -::: - -4):. Select the permissions you would like to assign, and click **Create**: - -![create-authorization-for-user-modal-3](../img/create-authorization-for-user-modal-3.png) - -On confirmation, the modal closes, the table updates, and your authorization is shown: - -![create-authorization-for-user-refreshed-modal](../img/create-authorization-for-user-refreshed-table.png) - - - diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/managing-user-access.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/managing-user-access.md deleted file mode 100644 index 565a6d476c1..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/authorizations/managing-user-access.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: managing-user-access -title: "Managing user access to Identity" -sidebar_label: "Managing user access" -description: "Learn about the different access levels users can receive within the Identity application." ---- - -In this guide, you will learn about the different access levels users can receive within the Identity application. - -## Permissions supported by Identity - -Identity implements the following permissions: - -- `read`: Users can access all pages in Identity. They _cannot_ create, modify, or delete any data. -- `read:users`: Users can access only the **Users** page and related subpages. -- `write`: Users have access to all pages. They can create, modify, and delete data. - -You can [assign the above permissions to users as part of a role](/self-managed/identity/user-guide/roles/add-assign-permission.md). -This gives the user access to the Identity application. - -## Assign Identity permissions to a user - -Users are always able to use Identity to log in to the components. -However, they are unable to access the Identity UI without at least one of the permissions listed above. - -To grant a user access to the UI, assign at least one Identity permission as part of a role to the user. This can be achieved in one of the following ways described below. - -### Use our component presets - -When you start Identity with our pre-configured Keycloak container, Identity creates the `Identity` role automatically. -The role contains the necessary permissions to give a user full read and write access to Identity. - -[Assign the `Identity` role to a user](/self-managed/identity/user-guide/roles/add-assign-role.md) to enable the user to access the Identity UI. - -### Create a custom role - -When using an existing Keycloak instance, or if you want to create your own set of permissions, follow our guides to -[create a new role and assign it to users](/self-managed/identity/user-guide/roles/add-assign-role.md). diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider.md deleted file mode 100644 index 335a9673bcb..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/configure-external-identity-provider.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -id: configure-external-identity-provider -title: "Configuring an external identity provider" -sidebar_label: "Configuring an external identity provider" -description: "Learn how to configure an external identity provider like OpenID Connect, SAML, LDAP, or Active Directory." ---- - -:::info -The Identity UI does not offer support for configuring external identity providers. You can configure an external -identity provider directly in Keycloak Administrator Console. -::: - -To configure an external identity provider like OpenID Connect, SAML, LDAP, or Active Directory, take the following steps: - -1. Log in to the Keycloak Administrator Console. Open the URL you have configured for Keycloak in your browser. - :::tip - When using the example - [Docker Compose](/self-managed/platform-deployment/docker.md#docker-compose) setup, Keycloak - is available at [http://localhost:18080/](http://localhost:18080/). - ::: -2. Click **Administrator Console** and log in using the Keycloak administrator credentials. The default administrator username is `admin`. When deploying Camunda 8 with [Helm charts](/self-managed/platform-deployment/helm-kubernetes/overview.md), - you can extract the password as described in - [secrets extraction](/self-managed/platform-deployment/helm-kubernetes/upgrade.md#secrets-extraction). - Using the example [Docker Compose](/self-managed/platform-deployment/docker.md#docker-compose) - setup, the password is set via `KEYCLOAK_ADMIN_PASSWORD` environment variable and is `admin` per default. -3. Select the realm you are using with Camunda 8. By default, this is **Camunda-platform**. - ![keycloak-realm-select](../img/keycloak-realm-select.png) -4. Add an identity provider using one of the following methods: - 1. To add an OpenID Connect or SAML provider, select **Identity Providers** in the main menu, click **Add provider...**, and fill in all required configuration settings. - ![keycloak-add-identity-provider](../img/keycloak-add-identity-provider.png) - 2. To connect to your LDAP, Active Directory, or Kerberos server, select **User Federation** in the main menu, click **Add provider...**, and fill in all required configuration settings. - ![keycloak-add-user-federation](../img/keycloak-add-user-federation.png) - -:::tip -Keycloak supports a wide variety of authentication options, such as mapping external user groups, roles, or scopes to internal roles, and configuring the login screen and flow when multiple providers are added. - -Visit the Keycloak documentation for details on [adding a provider](https://www.keycloak.org/docs/19.0.3/server_admin/index.html#adding-a-provider), -[configuring authentication](https://www.keycloak.org/docs/19.0.3/server_admin/index.html#configuring-authentication), and -[integrating identity providers](https://www.keycloak.org/docs/19.0.3/server_admin/index.html#_identity_broker). -::: diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/configure-logging.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/configure-logging.md deleted file mode 100644 index 5a69f966384..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/configure-logging.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -id: configure-logging -title: "Configure logging" -sidebar_label: "Configure logging" -description: "Learn how to configure logging in Identity." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -## Configuring logging - -The Identity component uses the [Log4j2](https://logging.apache.org/log4j/2.x/) framework to control -the log level and log format. - -The logging configuration that is included in the Identity image is: - -```xml - - - - %clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} - %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%xwEx - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{1.} %enc{%msg}%n - - logs/identity.%d{yyyy-MM-dd-mm-ss}.log - - - - - - - - - - - - - - - - - - - - - - - -``` - -### General configuration options - -Identity provides support for configuring the log level: - -| Environment variable | Accepted values | -| -------------------- | ------------------------------------------------ | -| `IDENTITY_LOG_LEVEL` | OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL | - -### Supported logging outputs - -As part of configuration Identity provides multiple appenders for outputting logs, to configure which logging appender -is -used, set the `IDENTITY_LOG_APPENDER` environment variable to one of the following `Console`, `Stackdriver`, or `File`: - - - - -Console logging produces messages to standard output and is the default log appender. The Console log appender offers -additional -configuration options, these are: - -| Environment variable | Accepted values | -| ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -| `IDENTITY_LOG_PATTERN` | _See the [Log4j2 pattern layout docs](https://logging.apache.org/log4j/2.x/manual/layouts.html#PatternLayout) for possible placeholders._ | - - - - -The Stackdriver log appender produces messages to standard output in a format that is compatible with the GCP cloud -platform. - -This appender uses -the [GCP layout](https://github.com/apache/logging-log4j2/blob/2.x/log4j-layout-template-json/src/main/resources/GcpLayout.json) -provided by the [Log4j2](https://logging.apache.org/log4j/2.x/manual/) library. - - - - -The File log appender produces messages to a rotating log file. The File log appender offers additional configuration -options, these are: - -| Environment variable | Accepted values | -| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `IDENTITY_LOG_FILE_PATTERN` | _See the [Log4j2 pattern layout docs](https://logging.apache.org/log4j/2.x/manual/layouts.html#PatternLayout) for possible placeholders._ | -| `IDENTITY_LOG_FILE_ROTATION_DAYS` | _See the [Log4j2 time-based triggering policy -> interval](https://logging.apache.org/log4j/2.x/manual/appenders.html#timebased-triggering-policy) for possible values._ | -| `IDENTITY_LOG_FILE_ROTATION_SIZE` | _See the [Log4j2 size-bsed triggering policy](https://logging.apache.org/log4j/2.x/manual/appenders.html#sizebased-triggering-policy) for possible values._ | - - - - -### Providing your own logging configuration - -You can provide your own configuration by mounting a configuration file to the Identity container and setting the path to the file using the following variable: - -| Environment variable | Purpose | -| -------------------- | ------------------------------------------------------------------------------------------------------------- | -| `LOGGING_CONFIG` | The path to your [Log4j2 config XML](https://logging.apache.org/log4j/2.x/manual/configuration.html#XML) file | - -:::note -To write logs to a file in a containerized environment, the mounted directory containing the log file has to be writable under the user running Identity. -::: diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md deleted file mode 100644 index ea784278a7f..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -id: connect-to-an-existing-keycloak -title: "Connect to an existing Keycloak instance" -sidebar_label: "Connect to an existing Keycloak instance" -description: "Learn how to connect Identity to your existing Keycloak instance." ---- - -In this guide, we'll demonstrate how to connect Identity to your existing Keycloak instance. - -### Prerequisites - -- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/19.0.3/server_admin/#using-the-admin-console) -- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/19.0.3/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak. - -### Steps - -To connect Identity to an existing Keycloak instance, take the following steps: - -1. Log in to your Keycloak Admin Console. -2. Select the realm you would like to connect Identity to. In our example, this is **Test Realm**. - ![keycloak-admin-realm-select](../img/keycloak-admin-realm-select.png) -3. Select **Clients** in the navigation menu, and click the **Create** button to create a new client. -4. Enter the client ID and the URL of where your Identity instance will be hosted and click **Save**. - :::note What client ID should I use? - By default, Identity uses the Client ID `camunda-identity`, so we recommend using this too. If you choose a different client ID, this will need to be set in the Identity application [environment variables](/docs/self-managed/identity/deployment/configuration-variables.md). - ::: - ![keycloak-admin-client-add](../img/keycloak-admin-client-add.png) -5. On the page for the created client, set the **Access Type** to `confidential`, **Service Accounts Enabled** to `ON`, and save your changes by clicking the **Save** button. - ![keycloak-admin-update-client-1](../img/keycloak-admin-update-client-1.png) -6. Navigate to the **Service Account Roles** tab in the top navigation. - ![keycloak-admin-update-client-2](../img/keycloak-admin-update-client-2.png) -7. Select the `realm-management` client from the **Client Roles** dropdown. -8. Assign the `manage-clients`, `manage-realm`, and `manage-users` role from the **Available Roles** list. - ![keycloak-admin-update-client-4](../img/keycloak-admin-update-client-4.png) - :::note Why does Identity need these roles? - Identity is designed to allow users to manage the various entities related to Camunda. To achieve this, it requires specific access to the realm. - ::: -9. Navigate to the **Credentials** tab and copy the client secret. -10. Set the `IDENTITY_CLIENT_SECRET` [environment variable](/docs/self-managed/identity/deployment/configuration-variables.md) with the value from **Step 9**. -11. Set the `KEYCLOAK_REALM` [environment variable](/docs/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. - :::tip - If you are using a specific realm, you need to set additional variables to use the intended realm. - See the [environment variables](/docs/self-managed/identity/deployment/configuration-variables.md) page for details of Keycloak-specific variables to consider. - ::: -12. Start Identity. - -:::note What does Identity create when starting? -Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/docs/self-managed/identity/deployment/starting-configuration.md). -::: - -### Considerations - -When connecting Identity to a shared realm, accurately determining what clients should and should not be displayed in the Identity UI is not possible. Therefore, the clients in the realm you connect Identity to will be shown in the Identity UI and can -have their secrets viewed and updated. Users with access to Identity should be considered as having administrator-level access to the system. diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/making-identity-production-ready.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/making-identity-production-ready.md deleted file mode 100644 index 3cb4e92c9f4..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/configuration/making-identity-production-ready.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: making-identity-production-ready -title: "Making Identity production ready" -sidebar_label: "Making Identity production ready" -description: "Consider the following topics when moving Identity into a production environment." ---- - -We recommend considering the following topics when moving Identity into a production environment. - -## Keycloak dependency - -As Keycloak is an external-based dependency of Identity, we recommend looking at -[Keycloak's documentation on production configuration](https://www.keycloak.org/server/configuration-production) to -ensure your Keycloak instance is production-ready. - -### Backing up - -To ensure recovery is possible, we recommend regularly backing up the database supporting Keycloak. - -#### Helm deployment - -If you deployed Camunda 8 using our [Helm charts](../../../platform-deployment/helm-kubernetes/overview.md), -by default there will be a Postgres database deployed with it. In this instance, we recommend reading the -[Postgres documentation](https://www.postgresql.org/docs/current/backup.html) for guidance on backing up. - -#### Alternative deployment - -If your Keycloak service uses a different database provider than Postgres, we recommend -referencing the backup section of the documentation for your chosen provider and version. - -## Enabling TLS - -A safe and healthy exchange of secure data requires Transport Layer Security (TLS). - -TLS support for Identity can be enabled by setting configuration values. -Visit [Spring - Configure SSL](https://docs.spring.io/spring-boot/docs/current/reference/html/howto.html#howto.webserver.configure-ssl) -for more information. - -To enable TLS alongside Keycloak, visit the Keycloak documentation regarding [TLS enablement](https://www.keycloak.org/server/enabletls). - -## Setting Identity URL - -To ensure the authentication flows are successful, the `IDENTITY_URL` should be set to the URL of the Identity service. diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/groups/assign-users-roles-to-group.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/groups/assign-users-roles-to-group.md deleted file mode 100644 index c5cd2c84ff6..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/groups/assign-users-roles-to-group.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -id: assign-users-roles-to-group -title: "Assign users and roles to a group" -description: "Groups are a way to apply a set of roles and authorizations to users. Use Identity to assign users and roles to a group." ---- - -In this guide we will show you how to use Identity to assign users and roles to a group. - -:::tip Want to learn more about groups? -Head over to our documentation on [groups](/self-managed/concepts/access-control/groups.md) to learn more. -::: - -## Assign users to a group - -:::caution Write access needed -To assign a user to a group, you must have write access to Identity. -Read our [guide on managing user access](/self-managed/identity/user-guide/authorizations/managing-user-access.md) to learn more. -::: - -1. Log in to the Identity UI and navigate to the **Groups** tab. Select the group you would like to assign a user to from the table: - -![assign-user-to-group-tab](../img/assign-user-to-group-tab.png) - -2. Click **Assign Members** and a modal will open. - -3. Search and select the users to assign to the group. After selecting the users, click **Assign**. - -On confirmation, the modal closes, the table updates, and your assigned members are shown: - -![assign-user-to-group-refreshed-table](../img/assign-user-to-group-refreshed-table.png) - -## Assign roles to a group - -:::caution Write access needed -To assign a role to a group, you must have write access to Identity. -Read our [guide on managing user access](/self-managed/identity/user-guide/authorizations/managing-user-access.md) to learn more. -::: - -1. Log in to the Identity UI and navigate to the **Groups** tab. Select the group you would like to assign a role to from the table, and click on the **Roles** tab: - -![assign-role-to-group-tab](../img/assign-role-to-group-tab.png) - -2. Click **Assign Role** and a modal will open. - -3. Select the roles to assign to the group. When you have selected the roles, click **Add**. - -On confirmation, the modal closes, the table updates, and your assigned roles are shown: - -![assign-role-to-group-refreshed-table](../img/assign-role-to-group-refreshed-table.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/groups/create-group.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/groups/create-group.md deleted file mode 100644 index 5e52c549a8e..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/groups/create-group.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: create-group -title: "Create a group" -description: "Groups are a way to apply a set of roles and authorizations to users. Use Identity to create a group." ---- - -In this guide we will show you how to use Identity to create a group. - -:::tip Want to learn more about groups? -Head over to our documentation on [groups](/self-managed/concepts/access-control/groups.md) to learn more. -::: - -## Create a group - -To create a group using Identity, take the following steps: - -1. Log in to the Identity UI and navigate to the **Groups** tab: - -![create-group-tab](../img/create-group-tab.png) - -2. Click the **Add Group** button located on the top right of the table and a modal will open. - -3. Fill in the name of the group. For this guide, we use an example value. After inserting the name, click **Add**: - -![create-group-modal-2](../img/create-group-modal-2.png) - -On confirmation, the modal closes, the table updates, and your new group is shown: - -![create-group-refreshed-table](../img/create-group-refreshed-table.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-modal-2.png deleted file mode 100644 index ff1cd11d074..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-refreshed-table.png deleted file mode 100644 index ac132ab53c4..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-tab.png deleted file mode 100644 index 20f83a92d11..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-api-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-details.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-details.png deleted file mode 100644 index fec2a12ab4c..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-details.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-modal-2.png deleted file mode 100644 index 78c3bb7b2df..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-refreshed-table.png deleted file mode 100644 index deed1fd15aa..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-tab.png deleted file mode 100644 index 9a89e771c84..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-application-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-modal-2.png deleted file mode 100644 index aad89cb6e6f..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-refreshed-table.png deleted file mode 100644 index 811b9296b43..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-tab.png deleted file mode 100644 index 9a656985333..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-permission-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-details.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-details.png deleted file mode 100644 index df1453b812d..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-details.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-modal-2.png deleted file mode 100644 index 5c50b58c2d6..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-tab.png deleted file mode 100644 index 86aeddfb7ab..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/add-role-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-application-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-application-refreshed-table.png deleted file mode 100644 index f4b5811dc32..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-application-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-application-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-application-tab.png deleted file mode 100644 index 34b0bbd5307..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-application-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-modal-1.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-modal-1.png deleted file mode 100644 index a6b8f50acd6..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-modal-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-refreshed-table.png deleted file mode 100644 index 92f01b9a4f0..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-tab.png deleted file mode 100644 index d5b376ee99c..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-permission-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-role-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-role-refreshed-table.png deleted file mode 100644 index c8b1dad9e67..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-role-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-role-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-role-tab.png deleted file mode 100644 index a3da9a91c91..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-a-role-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-role-to-group-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-role-to-group-refreshed-table.png deleted file mode 100644 index 38c674da14c..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-role-to-group-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-role-to-group-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-role-to-group-tab.png deleted file mode 100644 index 5877674177c..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-role-to-group-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-user-to-group-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-user-to-group-refreshed-table.png deleted file mode 100644 index d35488fc00b..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-user-to-group-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-user-to-group-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-user-to-group-tab.png deleted file mode 100644 index 28d45d84f72..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/assign-user-to-group-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-1.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-1.png deleted file mode 100644 index 40723aa4c04..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-2.png deleted file mode 100644 index f817f9022c4..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-3.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-3.png deleted file mode 100644 index bc796f783a5..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-modal-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-refreshed-table.png deleted file mode 100644 index c85147458cd..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-tab.png deleted file mode 100644 index 3dc96562cbd..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-group-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-1.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-1.png deleted file mode 100644 index 06fcaa03df2..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-2.png deleted file mode 100644 index bd3fea442b5..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-3.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-3.png deleted file mode 100644 index 6dfd386eb77..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-3.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-4.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-4.png deleted file mode 100644 index e4d376211cf..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-modal-4.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-refreshed-table.png deleted file mode 100644 index 519feeb40b2..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-tab.png deleted file mode 100644 index 9c4700a54a5..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-authorization-for-user-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-modal-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-modal-2.png deleted file mode 100644 index 5caa7e6bd61..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-modal-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-refreshed-table.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-refreshed-table.png deleted file mode 100644 index 951729b073c..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-refreshed-table.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-tab.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-tab.png deleted file mode 100644 index 2445da98f4d..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/create-group-tab.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-add-identity-provider.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-add-identity-provider.png deleted file mode 100644 index 6358856c118..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-add-identity-provider.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-add-user-federation.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-add-user-federation.png deleted file mode 100644 index bf2baa6bb9b..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-add-user-federation.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-client-add.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-client-add.png deleted file mode 100644 index 808ab9e2e20..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-client-add.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-realm-select.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-realm-select.png deleted file mode 100644 index 59aa89e06da..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-realm-select.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-1.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-1.png deleted file mode 100644 index 3c7742f090b..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-1.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-2.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-2.png deleted file mode 100644 index 5d28563e6e4..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-2.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-4.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-4.png deleted file mode 100644 index 79ca8020dd0..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-admin-update-client-4.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-realm-select.png b/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-realm-select.png deleted file mode 100644 index 1bb6f1c6aa9..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/identity/user-guide/img/keycloak-realm-select.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/roles/add-assign-permission.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/roles/add-assign-permission.md deleted file mode 100644 index 00012e99bb9..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/roles/add-assign-permission.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -id: add-assign-permission -title: Add and assign a permission to a role -sidebar_label: "Add and assign a permission" -description: "Use Identity to add and assign a permission to a role." ---- - -In this guide, we will show you how to use Identity to add and assign a permission to a role. - -:::tip Want to learn more about roles? -Head over to [our documentation on roles](/self-managed/concepts/access-control/roles.md) to learn more. -::: - -:::caution Write access needed -To assign a permission to a role and assign a role to a user, you need to have write access to Identity. -Read our [guide on managing user access](/self-managed/identity/user-guide/authorizations/managing-user-access.md) to learn more. -::: - -## Add and assigning a permission to a role - -### Add a permission - -:::tip Want to learn more about permissions? -Head over to our documentation on [permissions](/self-managed/concepts/access-control/apis.md) to find out more. -::: - -To create a permission using Identity, take the following steps: - -1. Log in to the Identity UI and navigate to the **API** tab: - -![add-permission-api-tab](../img/add-api-tab.png) - -2. Click the API you would like to create a permission for. This will open the details page. - -3. Click the **Permissions** tab beneath the API name. - -4. Click **Add Permission** located on the top right of the table and a modal will open. - -5. We are now able to fill out the details of the permission. For this guide, we will use a set of example values. When you have inserted the details, click **Add**: - -![add-permission-modal-2](../img/add-permission-modal-2.png) - -On confirmation, the modal will close, the table will update, and your new permission will be shown: - -![add-permission-refreshed-table](../img/add-permission-refreshed-table.png) - -### Assign a permission to a role - -To assign a permission to a role using Identity, take the following steps: - -1. Log in to the Identity UI and navigate to the **Roles** tab, select **Permissions > Assign Permission**: - -![assign-a-permission-tab](../img/assign-a-permission-tab.png) - -2. Select the API which contains the permission you want to assign. - -3. Select the permission you would like to assign and click **Add**. - -On confirmation, the modal will close, the table will update, and your assigned permission will be shown: - -![assign-a-permission-refreshed-table](../img/assign-a-permission-refreshed-table.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/user-guide/roles/add-assign-role.md b/versioned_docs/version-8.2/self-managed/identity/user-guide/roles/add-assign-role.md deleted file mode 100644 index 569ec30132b..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/user-guide/roles/add-assign-role.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: add-assign-role -title: Add and assign a role to a user -sidebar_label: "Add and assign a role" -description: "Use Identity to create a role and assign a role to a user." ---- - -In this guide we will show you how to use Identity to create a role and assign a role to a user. - -:::tip Want to learn more about roles? -Head over to [our documentation on roles](/self-managed/concepts/access-control/roles.md) to find out more. -::: - -:::caution Write access needed -To add and assign a role to a user, you need to have write access to Identity. -Read our [guide on managing user access](/self-managed/identity/user-guide/authorizations/managing-user-access.md) to learn more. -::: - -## Add a role - -1. Log in to the Identity UI and navigate to the **Roles** tab: - -![add-role-tab](../img/add-role-tab.png) - -2. Click the **Add Role** button located on the top right of the table and a modal will open. - -3. We are now able to fill out the details of the role. For this guide, we will use a set of example values. When you have inserted the details, click **Add**: - -![add-role-modal-2](../img/add-role-modal-2.png) - -On confirmation, the modal will close, the table will update, and your new role will be shown. Click on your new role to view the details: - -![add-role-details](../img/add-role-details.png) - -## Assign a role to a user - -To assign a role to a user using Identity, take the following steps: - -1. Log in to the Identity UI and navigate to the **Users** tab: - -![assign-a-role-tab](../img/assign-a-role-tab.png) - -2. Click on the user you want to assign a role to to view their details. - -3. Click on **Assigned Roles** to view the roles currently assigned to the user. - -4. Click the **Assign Role** button located on the top right of the table and a modal will open. - -5. Select the role you want to assign to the user and click **Add**. - -On confirmation, the modal will close, the table will update, and the newly assigned role will be shown: - -![assign-a-role-refreshed-table](../img/assign-a-role-refreshed-table.png) diff --git a/versioned_docs/version-8.2/self-managed/identity/what-is-identity.md b/versioned_docs/version-8.2/self-managed/identity/what-is-identity.md deleted file mode 100644 index 268d1d49348..00000000000 --- a/versioned_docs/version-8.2/self-managed/identity/what-is-identity.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -id: what-is-identity -title: "What is Identity?" -sidebar_label: "What is Identity?" -description: "Identity is the component within the Camunda 8 stack responsible for authentication and authorization." ---- - -Identity is the component within the Camunda 8 stack responsible for authentication and authorization. It allows you to manage: - -- Applications -- APIs -- Permissions -- Roles - -### Next steps - -If you're new to Identity, we suggest reviewing our [getting started guide](./getting-started/install-identity.md). diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md deleted file mode 100644 index b3f34c6f005..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: deploy-to-self-managed -title: Deploy diagram -description: "Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed." ---- - -Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed. Follow the steps below to deploy a diagram: - -1. Click the rocket-shaped deployment icon: - -![deployment icon](./img/deploy-icon.png) - -2. Click **Camunda 8 Self-Managed**: - -![deployment configuration](./img/deploy-empty.png) - -3. Input the `Cluster endpoint`: - -![deployment via Camunda 8](./img/deploy-endpoint.png) - -:::note -You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. - -Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). -::: - -4. Select **Basic**, and input your username and password in case your gateway requires basic authentication: - -![basic auth configuration](./img/deploy-with-basic-auth.png) - -5. Select **OAuth**, and input the credentials in case your gateway requires authentication with OAuth: - -:::note -The OAuth URL needs to contain the full path to the token endpoint, i.e. `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. -::: - -![oauth configuration](./img/deploy-with-oauth.png) - -6. Select the **Remember** checkbox if you want to locally store the connection information. - -7. Click **Deploy** to perform the deployment. - -![deployment successful](./img/deploy-success.png) diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-empty.png b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-empty.png deleted file mode 100644 index 4f23e6dcc57..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-empty.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png deleted file mode 100644 index 11e386fece8..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-endpoint.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-icon.png b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-icon.png deleted file mode 100644 index 426760838fe..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-icon.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-success.png b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-success.png deleted file mode 100644 index f1d771c8789..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-success.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png deleted file mode 100644 index a4919ccdb3b..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-with-basic-auth.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png b/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png deleted file mode 100644 index 35599858466..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/modeler/desktop-modeler/img/deploy-with-oauth.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/configuration.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/configuration.md deleted file mode 100644 index 72c6635298f..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/configuration.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -id: configuration -title: "Configuration" -sidebar_label: "Overview" -description: "Read details on the configuration variables of Web Modeler Self-Managed, including components such as REST API, Identity, Keycloak, webapp, and WebSocket." ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../../reference/licenses.md#web-modeler) only. -::: - -The different components of Web Modeler Self-Managed can be configured using environment variables. Each component's variables are described below. - -- For a working example configuration showing how the components are correctly wired together, see the [Docker Compose file for Web Modeler](../../../platform-deployment/docker#web-modeler-1). -- If you are using the Camunda 8 [Helm chart](../../../platform-deployment/helm-kubernetes/deploy.md) to set up Web Modeler, read more about the different configuration options in the chart's [values docs](https://artifacthub.io/packages/helm/camunda/camunda-platform#webmodeler-parameters). - -## Configuration of the `restapi` component - -### Database - -Web Modeler requires a PostgreSQL database as persistent data storage (other database systems are currently not supported). - -| Environment variable | Description | Example value | -| ------------------------------------- | ----------------------------------------------------- | -------------------------------------------------------- | -| `SPRING_DATASOURCE_URL` | JDBC URL of the database | `jdbc:postgresql://postgres.example.com:5432/modeler-db` | -| `SPRING_DATASOURCE_USERNAME` | Database user name | `modeler-user` | -| `SPRING_DATASOURCE_PASSWORD` | Database user password | \*\*\* | -| `SPRING_DATASOURCE_DRIVER_CLASS_NAME` | [optional]
    Java class name of the database driver | `software.amazon.jdbc.Driver` | - -Refer to the [Advanced Database Configuration Guide](./database.md) for additional details on how to configure Web Modeler's database connection. - -### SMTP / email - -Web Modeler requires an SMTP server to send notification emails to users. - -| Environment variable | Description | Example value | Default value | -| --------------------------- | ------------------------------------------------------------------------------------------------------ | ----------------------------- | ------------- | -| `RESTAPI_MAIL_HOST` | SMTP server host name | `smtp.example.com` | - | -| `RESTAPI_MAIL_PORT` | SMTP server port | `587` | - | -| `RESTAPI_MAIL_USER` | [optional]
    SMTP user name | `modeler-user` | - | -| `RESTAPI_MAIL_PASSWORD` | [optional]
    SMTP user password | \*\*\* | - | -| `RESTAPI_MAIL_ENABLE_TLS` | Enforce TLS encryption for SMTP connections (using STARTTLS). | `true` | `true` | -| `RESTAPI_MAIL_FROM_ADDRESS` | Email address used as the sender of emails sent by Web Modeler. | `noreply@example.com` | - | -| `RESTAPI_MAIL_FROM_NAME` | [optional]
    Name displayed as the sender of emails sent by Web Modeler. | `Camunda` | `Camunda` | -| `RESTAPI_SERVER_URL` | URL at which users access Web Modeler in the browser (used to construct links in notification emails). | `https://modeler.example.com` | - | - -### WebSocket - -The `restapi` component sends certain events (e.g. "file updated", "comment added") to the [WebSocket](#configuration-of-the-websocket-component) server. - -| Environment variable | Description | Example value | -| ----------------------- | -------------------------------------------------------------------------------------- | -------------------- | -| `RESTAPI_PUSHER_HOST` | [Internal](#notes-on-host-names-and-port-numbers) host name of the WebSocket server. | `modeler-websockets` | -| `RESTAPI_PUSHER_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number of the WebSocket server. | `8060` | -| `RESTAPI_PUSHER_APP_ID` | _must be the same as_ [`PUSHER_APP_ID`](#configuration-of-the-websocket-component) | `web-modeler` | -| `RESTAPI_PUSHER_KEY` | _must be the same as_ [`PUSHER_APP_KEY`](#configuration-of-the-websocket-component) | \*\*\* | -| `RESTAPI_PUSHER_SECRET` | _must be the same as_ [`PUSHER_APP_SECRET`](#configuration-of-the-websocket-component) | \*\*\* | - -### Identity / Keycloak - -Web Modeler integrates with Identity and Keycloak for authentication and authorization (using OAuth 2.0 + OpenID Connect) as well as user management. - -| Environment variable | Description | Example value | -| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | -| `RESTAPI_OAUTH2_TOKEN_ISSUER` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | -| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `RESTAPI_OAUTH2_TOKEN_ISSUER` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | -| `RESTAPI_IDENTITY_BASE_URL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | - -### Logging - -| Environment variable | Description | Example value | -| -------------------- | --------------------------------------------------- | ---------------------------------------------- | -| `LOGGING_CONFIG` | [optional]
    Path to custom logback configuration | `file:/full/path/to/custom-logback-config.xml` | - -Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-restapi-component) for additional details on how to customize the `restapi` logging output. - -## Configuration of the `webapp` component - -### General - -| Environment variable | Description | Example value | Default value | -| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | ------------- | -| `SERVER_URL` | URL at which users access Web Modeler in the browser.
    _Note_: To use a sub path for Web Modeler, just include the path in the URL. | `https://modeler.example.com`,
    `https://example.com/modeler` | - | -| `SERVER_HTTPS_ONLY` | Enforce the usage of HTTPS when users access Web Modeler (by redirecting from `http://` to `https://`). | `true` | `false` | -| `RESTAPI_HOST` | [Internal](#notes-on-host-names-and-port-numbers) host name of the `restapi` application. | `modeler-restapi` | - | -| `RESTAPI_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the regular API endpoints. | `8081` | `8081` | -| `RESTAPI_MANAGEMENT_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the management API endpoints. | `8091` | `8091` | -| `PLAY_ENABLED` | [optional]
    Enables the [**Play** mode](../../../../components/modeler/web-modeler/play-your-process.md) in the BPMN editor, allowing users to test processes in a playground environment. | `true` | `false` | -| `ZEEBE_BPMN_DEPLOYMENT_ENABLED` | [optional]
    Enables the [**Deploy** and **Run**](../../../../components/modeler/web-modeler/run-or-publish-your-process.md) actions in the BPMN editor.
    When disabled, it prevents users from deploying and starting instances of processes via the UI. The entire functionality to hide **Deploy** and **Run** actions is available as of `8.2.5`. | `false` | `true` | -| `ZEEBE_DMN_DEPLOYMENT_ENABLED` | [optional]
    Enables the [**Deploy**](../../../../components/modeler/web-modeler/run-or-publish-your-process.md) action in the DMN editor.
    When disabled, it prevents users from deploying decisions via the UI. The entire functionality to hide **Deploy** and **Run** actions is available as of `8.2.5`. | `false` | `true` | - -### Identity / Keycloak - -| Environment variable | Description | Example value | Default value | -| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------- | -| `OAUTH2_CLIENT_ID` | Client ID of the Web Modeler application configured in Identity;
    _must be set to_ `web-modeler`. | `web-modeler` | - | -| `OAUTH2_TOKEN_AUDIENCE` | Expected token audience (used for JWT validation);
    _must be set to_ `web-modeler`. | `web-modeler` | - | -| `OAUTH2_TOKEN_ISSUER` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | - | -| `KEYCLOAK_BASE_URL` | Base URL of the Keycloak instance. | `https://keycloak.example.com` | - | -| `KEYCLOAK_CONTEXT_PATH` | [optional]
    Context path Keycloak serves resources on.
    _Note_: With Keycloak 17, the default path was changed from `/auth` to `/`. | `/` | `/auth` | -| `KEYCLOAK_REALM` | Keycloak realm used for Camunda. | `camunda-platform` | - | -| `KEYCLOAK_JWKS_URL` | [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's JSON Web Key Set (for JWT verification). | `http://keycloak:8080/auth/realms/camunda-platform/protocol/openid-connect/certs` | - | -| `IDENTITY_BASE_URL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | - | - -### WebSocket - -The `webapp` component sends certain events (e.g. "user opened diagram", "user left diagram") to the [WebSocket server](#configuration-of-the-websocket-component) and can also react to such events (e.g. show a notification in the UI that a user left the diagram). - -| Environment variable | Description | Example value | Default value | -| ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------------- | -| `PUSHER_HOST` | [Internal](#notes-on-host-names-and-port-numbers) host name of the WebSocket server. | `modeler-websockets` | - | -| `PUSHER_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number of the WebSocket server. | `8060` | - | -| `PUSHER_APP_ID` | _must be the same as_ [`PUSHER_APP_ID`](#configuration-of-the-websocket-component) | `web-modeler` | - | -| `PUSHER_KEY` | _must be the same as_ [`PUSHER_APP_KEY`](#configuration-of-the-websocket-component) | \*\*\* | - | -| `PUSHER_SECRET` | _must be the same as_ [`PUSHER_APP_SECRET`](#configuration-of-the-websocket-component) | \*\*\* | - | -| `CLIENT_PUSHER_HOST` | [External](#notes-on-host-names-and-port-numbers) host name on which the Web Modeler client accesses the WebSocket server from the browser. | `ws.example.com` | - | -| `CLIENT_PUSHER_PORT` | [External](#notes-on-host-names-and-port-numbers) port number on which the Web Modeler client accesses the WebSocket server from the browser. | `443` | - | -| `CLIENT_PUSHER_PATH` | [optional]
    _must be the same as_ [`PUSHER_APP_PATH`](#configuration-of-the-websocket-component) | `/modeler-ws` | `/` | -| `CLIENT_PUSHER_KEY` | _must be the same as_ [`PUSHER_APP_KEY`](#configuration-of-the-websocket-component) | \*\*\* | - | -| `CLIENT_PUSHER_FORCE_TLS` | Enable TLS encryption for WebSocket connections initiated by the browser. | `true` | `false` | - -### Zeebe Client - -Web Modeler uses the [Zeebe Java client](/versioned_docs/version-8.2/apis-tools/java-client/index.md) to connect to Zeebe. -To customize the client configuration, you can provide optional environment variables. - -| Environment variable | Description | Example value | Default Value | -| ----------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------- | ---------------------------- | -| `ZEEBE_CA_CERTIFICATE_PATH` | [optional]
    Path to a root CA certificate to be used instead of the certificate in the default store. | `/path/to/certificate` | - | -| `ZEEBE_CLIENT_CONFIG_PATH` | [optional]
    Path to the client's OAuth credential cache. | `/path/to/credentials/cache.txt` | `$HOME/.camunda/credentials` | -| `ZEEBE_CLIENT_REQUESTTIMEOUT` | [optional]
    The request timeout used when communicating with a target Zeebe cluster. | `60000` | `10000` | -| `ZEEBE_AUTH_CONNECT_TIMEOUT` | [optional]
    The connection timeout for requests to the OAuth server. | `30000` | `5000` | -| `ZEEBE_AUTH_READ_TIMEOUT` | [optional]
    The data read timeout for requests to the OAuth server. | `30000` | `5000` | - -For more details, [see the Zeebe connection troubleshooting section](/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md). - -### Logging - -| Environment variable | Description | Example value | -| -------------------- | -------------------------------------- | ---------------------------- | -| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | - -Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-webapp-component) for additional details on how to customize the `webapp` logging output. - -## Configuration of the `websocket` component - -The [WebSocket](https://en.wikipedia.org/wiki/WebSocket) server shipped with Web Modeler Self-Managed is based on the [laravel-websockets](https://laravel.com/docs/10.x/broadcasting#open-source-alternatives-php) open source package and implements the [Pusher Channels Protocol](https://pusher.com/docs/channels/library_auth_reference/pusher-websockets-protocol/). - -| Environment variable | Description | Example value | Default value | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | ------------- | -| `PUSHER_APP_ID` | ID of the single application/tenant configured for Web Modeler. | `web-modeler` | - | -| `PUSHER_APP_KEY` | A unique key used for authentication. Provide a random alphanumeric string of at least 20 characters. | \*\*\* | - | -| `PUSHER_APP_SECRET` | A unique secret used for authentication. Provide a random alphanumeric string of at least 20 characters. | \*\*\* | - | -| `PUSHER_APP_PATH` | [optional]
    Base path of the WebSocket endpoint. Can be used to expose the endpoint on a sub path instead of the domain root (e.g. `https://example.com/modeler-ws`). | `/modeler-ws` | `/` | - -### Logging - -| Environment variable | Description | Example value | Default Value | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------- | ------------- | -| `LOG_CHANNEL` | [optional]
    Log channel driver, see [Laravel documentation](https://laravel.com/docs/10.x/logging#available-channel-drivers) | `single` | `stack` | - -Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-websocket-component) for additional details on how to customize the `websocket` logging output. - -## Notes on host names and port numbers - -- _Internal_ refers to host names and port numbers that are only used inside a Docker Compose network or Kubernetes cluster for backend-to-backend communication. -- _External_ refers to host names and port numbers that are exposed to the outside and can be reached from a web browser. diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/database.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/database.md deleted file mode 100644 index 2243a5f89bd..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/database.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -id: database -title: "Database" -description: "Read details on how to connect Web Modeler with a database." ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../../reference/licenses.md#web-modeler) only. -::: - -This page describes advanced database connection configuration for Web Modeler. For a general guide on how to set up Web Modeler's database connection, visit [the configuration overview](configuration.md#database). - -## Configuring SSL for the database connection - -The generic way to configure an SSL connection between Web Modeler and the database is as follows: - -- Modify the JDBC URL `SPRING_DATASOURCE_URL` and customize connection parameters. -- Provide SSL certificates and keys to the `restapi` component, if required. - -Consult the [PostgreSQL documentation](https://jdbc.postgresql.org/documentation/ssl/) for a description -of the different SSL modes and the security provided. - -For a full list of all available connection parameters, visit the [PostgreSQL documentation](https://jdbc.postgresql.org/documentation/use/#connection-parameters/). - -Below are examples for common scenarios, increasing in the level of security they provide. - -### SSL mode "require" - -In this mode, an SSL connection is established between Web Modeler and the database. It is still prone to -person-in-the-middle attacks. - -To enable this mode, modify the JDBC URL as follows: `jdbc:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?sslmode=require` - -No certificates are needed in Web Modeler for this mode. - -### SSL mode "verify-full" - -In this mode, Web Modeler requests a certificate from the database server to verify its identity. It is not -prone to person-in-the-middle attacks. - -To enable this mode, mount the root certificate with which the server certificate was signed and follow these steps: - -1. Provide the root certificate at this location: `myCA.crt -> ~/.postgresql/root.crt`. -2. Modify the JDBC URL: `jdbc:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?ssl=true`. - -### SSL mode "verify-full" with client certificates - -In this mode, Web Modeler requests a certificate from the database server to verify the server's identity, and -the server requests a certificate from the client to verify the client's identity. - -To enable this mode, mount the client certificates and follow these steps: - -1. Provide client certificates at these locations: - 1. `myClientCertificate.pk8 -> ~/.postgresl/postgresql.pk8` - 2. `myClientCertificate.crt -> ~/.postgresl/postgresql.crt` -2. Provide the root certificate at this location: `myCA.crt -> ~/.postgresql/root.crt`. -3. Modify the JDBC URL: `jdbc:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?ssl=true`. - -Furthermore, configure the database server to verify client certificates: -[PostgreSQL documentation](https://www.postgresql.org/docs/current/ssl-tcp.html). - -## Running Web Modeler on Amazon Aurora PostgreSQL - -Web Modeler supports running on Amazon Aurora PostgreSQL. -To connect Web Modeler with your Amazon Aurora PostgreSQL instance, make the following configuration adjustments: - -1. Modify the `SPRING_DATASOURCE_URL` environment variable: `jdbc:aws-wrapper:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]`. -2. Add the environment variable `SPRING_DATASOURCE_DRIVER_CLASS_NAME` with the value `software.amazon.jdbc.Driver`. - -For a full list of available driver parameters visit the [AWS JDBC Driver documentation](https://github.com/awslabs/aws-advanced-jdbc-wrapper/wiki/UsingTheJdbcDriver#aws-advanced-jdbc-driver-parameters). - -### AWS IAM authentication - -To use AWS Identity and Access Management (IAM) database authentication with your Amazon Aurora PostgreSQL -instance, in addition to the adjustments described [above](#running-web-modeler-on-amazon-aurora-postgresql), follow these steps: - -1. Modify the `SPRING_DATASOURCE_URL` environment variable as follows: `jdbc:aws-wrapper:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?wrapperPlugins=iam`. -2. Modify the `SPRING_DATASOURCE_USERNAME` environment variable to match the database user you configured for AWS IAM authentication as described in the [Amazon Aurora documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html#UsingWithRDS.IAMDBAuth.DBAccounts.PostgreSQL). -3. Remove the `SPRING_DATASOURCE_PASSWORD` environment variable. diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/logging.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/logging.md deleted file mode 100644 index be8db0bd16d..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/configuration/logging.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: logging -title: "Logging" -description: "Read details on additional logging configuration for Web Modeler." ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../../reference/licenses.md#web-modeler) only. -::: - -## Logging configuration for the `restapi` component - -Web Modeler's `restapi` component uses the [logback framework](https://logback.qos.ch/) for logging. By default, the -`restapi` component logs to the Docker container's standard output. To change the default logging behavior, create a -custom configuration file and let the `restapi` know of it by specifying the following environment variable: - -``` -LOGGING_CONFIG=file:/full/path/to/custom-logback-config.xml -``` - -Refer to [Spring Boot's logging documentation](https://docs.spring.io/spring-boot/docs/current/reference/html/howto.html#howto.logging.logback) -for more information on how to customize the logback configuration for specific use cases like logging to a file. - -Enabling `DEBUG` logging for the `restapi` component can be useful for troubleshooting purposes, e.g. for -[debugging Zeebe connection issues](../troubleshooting/troubleshoot-zeebe-connection.md#how-can-i-debug-log-grpc--zeebe-communication). - -By default, Web Modeler's `restapi` component logs in JSON. For a more readable logging format, activate the Spring profile using the following: - -```properties -SPRING_PROFILES_INCLUDE=default-logging -``` - -## Logging configuration for the `webapp` component - -By default, the `webapp` component logs to the Docker container's standard output. - -### Logging to a file - -To enable additional log output to a file, adjust the following environment variable: - -``` -LOG_FILE_PATH=/full/path/to/log/file.log -``` - -## Logging configuration for the `websocket` component - -By default, the `websocket` component logs to the Docker container's standard output. - -### Logging to a file - -To enable additional log output to a file, follow these steps: - -1. Mount a volume to the directory `/var/www/html/storage/logs`. The logs will be written to a file named `laravel.log` located inside this directory. -2. Adjust the following environment variable: - ``` - LOG_CHANNEL=single - ``` diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/installation.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/installation.md deleted file mode 100644 index d949841e47a..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/installation.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: installation -title: Installation -description: "Details on installation of Web Modeler Self-Managed." ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../reference/licenses.md#web-modeler) only. -::: - -Refer to the [Installation Guide](../../platform-deployment/overview.md) for details on how to install Web Modeler. diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection.md deleted file mode 100644 index a0d7a4a7f6d..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: troubleshoot-database-connection -title: "Troubleshooting database connection issues" -sidebar_label: "Database connection" ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../../reference/licenses.md#web-modeler) only. -::: - -You try to start Web Modeler, and encounter issues with the database connection. - -## Using a non-empty schema - -As Web Modeler uses [Flyway](https://www.red-gate.com/products/flyway/community/) to manage schema updates, the schema should not be shared. - -Before the first initialization, ensure no tables or functions are present in your schema. - -If your database setup requires mandatory tables or functions, Flyway may throw an exception like `Found non-empty schema(s) "" without schema history table!` - -To overcome this issue, add the property `spring.flyway.baselineOnMigrate: true` to your Web Modeler configuration and remove it after the schema has been initialized. - -## Secure connection to standard PostgreSQL - -Refer to the [database configuration guide](../configuration/database.md#configuring-ssl-for-the-database-connection) -for details on how to configure a secure connection to PostgreSQL. - -## Secure connection to Amazon Aurora fails - -You configured a custom SSL certificate in your remote Amazon Aurora PostgreSQL instance and want Web Modeler to accept -that certificate. - -### Add Amazon Root CA to trust store - -By default, the Java version used by `modeler-restapi` ships with the Amazon Root CA. - -If you passed a custom trust store to `modeler-restapi`'s JVM process (e.g. via `JAVA_TOOL_OPTIONS` as described in -[the Zeebe connection troubleshooting guide](./troubleshoot-zeebe-connection.md#provide-the-certificate-to-the-jvm-trust-store)), -ensure the Amazon Trust Services CA are in `modeler-restapi`'s trust store (see the -[Amazon Aurora documentation](https://aws.amazon.com/blogs/security/how-to-prepare-for-aws-move-to-its-own-certificate-authority/)). - -## IAM authentication against Amazon Aurora fails - -You switched from standard username/password authentication to IAM authentication and Web Modeler can't obtain a connection to the database. - -### Ensure the IAM account has all privileges to the Web Modeler database - -After switching from standard username/password authentication to IAM authentication, privileges to Web Modeler's -database might still be associated with the old username. -Ensure the IAM account has all privileges to the Web Modeler database. diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login.md deleted file mode 100644 index 40c1a7a85f1..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -id: troubleshoot-login -title: "Troubleshooting login issues" -sidebar_label: "Login issues" ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../../reference/licenses.md#web-modeler) only. -::: - -Logging in to Web Modeler doesn't work as expected and shows an error or a blank page when accessing the application. - -To further debug this issue, check the [log output](docs/self-managed/modeler/web-modeler/configuration/logging.md) of the `modeler-restapi` and `modeler-webapp` services for errors and warnings. - -## Unique constraint violation - -When you try to log in to Web Modeler using Keycloak as an OIDC provider, you see an error message in the `modeler-restapi` logs similar to this: - -``` -org.postgresql.util.PSQLException: ERROR: duplicate key value violates unique constraint "users_email_key" - Detail: Key (email)=(***************) already exists. -``` - -### Ensure the Keycloak-managed user id didn't change - -Web Modeler uses the value of the `sub` (subject) claim in the JSON Web Token (JWT) issued by the configured OIDC provider (by default Keycloak) to identify users and correlate them with their data created in Web Modeler. -It is important that this value doesn't change over time, for example when the user is deleted and re-created in Keycloak or re-imported from an external user directory, or when reinstalling/updating/switching Keycloak instances. - -If the `sub` claim value changes for an existing user, Web Modeler will try to create a new user record in its database for the user, which will lead to the error above when the user tries to log in. - -As a workaround, you can manually update the user ID in the Web Modeler database: - -1. Export the users from the **Keycloak database** to a CSV file. The following query can be used to select the relevant data: - ```sql - SELECT id, email - FROM user_entity - WHERE realm_id = 'camunda-platform' AND email IS NOT NULL; - ``` -2. Create a new table in the **Web Modeler database**: - ```sql - CREATE TABLE keycloak_users ( - id varchar(255), - email varchar(255) - ); - ``` -3. Import the CSV file from **Step 1** into the new `keycloak_users` table. -4. Update the user IDs by running the following query in the **Web Modeler database**: - ```sql - UPDATE users u - SET iam_id = k.id - FROM keycloak_users k - WHERE k.email = u.email; - ``` -5. Verify that the login is working again. -6. Delete the `keycloak_users` table: - ```sql - DROP TABLE keycloak_users; - ``` diff --git a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md b/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md deleted file mode 100644 index 783dec1741d..00000000000 --- a/versioned_docs/version-8.2/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: troubleshoot-zeebe-connection -title: "Troubleshooting Zeebe connection issues" -sidebar_label: "Zeebe connection" ---- - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../../reference/licenses.md#web-modeler) only. -::: - -You try to connect (i.e., to deploy) to a remote Zeebe cluster and Web Modeler reports an error. - -To resolve this issue, check if you can connect to Zeebe through another client, i.e., [`zbctl`](/versioned_docs/version-8.2/apis-tools/cli-client/index.md). -If that doesn't work, resolve the general connection issue first (see [the platform deployment troubleshooting section](/versioned_docs/version-8.2/self-managed/platform-deployment/troubleshooting.md), for example.) - -If that works, further debug your Zeebe connection with the help of the information stated below. Enabling [debug logging in `modeler-restapi`](#how-can-i-debug-log-grpc--zeebe-communication) may also help to understand the issue. - -## Zeebe connection times out - -### Increase the Zeebe client timeout - -Web Modeler uses the [Zeebe Java client](/versioned_docs/version-8.2/apis-tools/java-client/index.md) to connect to Zeebe. -Depending on your infrastructure, the default timeouts configured may be too short. - -You can pass custom timeouts in milliseconds for Web Modeler's Zeebe client to `modeler-restapi` via three individual environment variables: - -```shell -ZEEBE_CLIENT_REQUESTTIMEOUT=30000 # limit the time to wait for a response from the Zeebe gateway -ZEEBE_AUTH_CONNECT_TIMEOUT=60000 # limit the time to wait for a connection to the OAuth server -ZEEBE_AUTH_READ_TIMEOUT=60000 # limits the time to wait for a response from the OAuth server -``` - -## Secure connection to Zeebe fails - -If you provide a cluster URL starting with `https`, Web Modeler will try to establish a secure connection to -the Zeebe instance. -In the process, it strictly validates the server's Application-Layer Protocol Negotiation (ALPN) support and its certificates -presented against well-known certificate authorities. -Failure to connect may have several reasons: - -### Configure the gateway to accept secure connections - -Ensure you properly configure the remote cluster URL to accept secure connections. -Refer to the [Zeebe Gateway configuration documentation](../../../zeebe-deployment/security/secure-client-communication.md#gateway) -for additional information. - -### Configure the gateway to support ALPN - -[Inspect the connection](#how-can-i-get-details-about-a-secure-remote-connection) to understand if ALPN is supported -by the server. - -Secure connections to Zeebe require an Ingress controller that supports HTTP/2 over TLS with protocol negotiation via ALPN. -Ensure you properly [configured your Zeebe ingress to support ALPN](../../../platform-deployment/troubleshooting.md#zeebe-ingress-grpc). - -### Configure `modeler-restapi` to trust a custom Zeebe SSL certificate - -[Inspect the connection](#how-can-i-get-details-about-a-secure-remote-connection) to understand which certificates are -being returned by the server and ensure you configure Web Modeler for [custom SSL certificates](#how-can-i-provide-a-custom-zeebe-ssl-certificate). - -If intermediate signing authorities sign the server certificate, ensure the remote endpoint [serves both server and -intermediate certificates](https://nginx.org/en/docs/http/configuring_https_servers.html#chains) to Web Modeler. - -### Make the OAuth token cache location writeable for the `modeler-restapi` process - -When using the `OAuth` authentication method for deploying to Zeebe, Web Modeler caches OAuth tokens in a file-based -cache. -By default, the cache location is writeable by the `modeler-restapi` process. -If you run `modeler-restapi` as a non-root user (e.g. via Kubernetes' `securityContext.runAsUser` option), -you must ensure to provide a writeable cache file location to `modeler-restapi` via the `ZEEBE_CLIENT_CONFIG_PATH` -environment variable: - -```shell -ZEEBE_CLIENT_CONFIG_PATH=/path/to/credentials/cache.txt -``` - -## How can I provide a custom Zeebe SSL certificate? - -You configured a custom SSL certificate in your (remote) Zeebe endpoint and want Web Modeler to accept that certificate. -Web Modeler strictly validates the remote server certificate trust chain. -If you use a custom SSL server certificate, you must make the signing CA certificate known to Web Modeler, not the -server certificate itself. - -### Provide the certificate via an environment variable - -`modeler-restapi` reads a trusted certificate from the environment variable `ZEEBE_CA_CERTIFICATE_PATH`. -This solution is recommended for most users: - -```shell -ZEEBE_CA_CERTIFICATE_PATH=/path/to/certificate -``` - -The provided path has to be accessible from the `modeler-restapi` container (e.g. via a mounted volume). - -### Provide the certificate to the JVM trust store - -Alternatively, you may pass a custom trust store to `modeler-restapi` via the environment variable `JAVA_TOOL_OPTIONS`: - -```shell -JAVA_TOOL_OPTIONS="-Djavax.net.ssl.trustStore=/path/to/truststore.jks -Djavax.net.ssl.trustStorePassword=changeit" -``` - -Analogous to above, the provided path has to be accessible from the `modeler-restapi` container (e.g. via a mounted volume). - -:::caution -Be aware that passing a custom trust store via `JAVA_TOOL_OPTIONS` overrides the default JVM trust store. -This means that all certificates shipped by default with the Java runtime (e.g. the Amazon Trust Services CA for -connecting to a secured Aurora instance) are no longer trusted unless explicitly added. -::: - -## How can I get details about a secure remote connection? - -You can use the following command to retrieve information about HTTP/2 over TLS support (ALPN) and certificates provided -by a remote endpoint: - -```shell -> openssl s_client -alpn h2 -connect google.com:443 -servername google.com -[...] ---- -Certificate chain - 0 s:/CN=*.google.com - i:/C=US/O=Google Trust Services LLC/CN=GTS CA 1C3 - 1 s:/C=US/O=Google Trust Services LLC/CN=GTS CA 1C3 - i:/C=US/O=Google Trust Services LLC/CN=GTS Root R1 - 2 s:/C=US/O=Google Trust Services LLC/CN=GTS Root R1 - i:/C=BE/O=GlobalSign nv-sa/OU=Root CA/CN=GlobalSign Root CA ---- -[...] ---- -New, TLSv1/SSLv3, Cipher is AEAD-CHACHA20-POLY1305-SHA256 -Server public key is 256 bit -Secure Renegotiation IS NOT supported -Compression: NONE -Expansion: NONE -ALPN protocol: h2 -SSL-Session: - Protocol : TLSv1.3 - Cipher : AEAD-CHACHA20-POLY1305-SHA256 - Session-ID: - Session-ID-ctx: - Master-Key: - Start Time: 1687516295 - Timeout : 7200 (sec) - Verify return code: 0 (ok) ---- -``` - -## How can I debug log gRPC / Zeebe communication? - -You can also start `modeler-restapi` with gRPC debug logging turned on to get detailed [logging -output](../configuration/logging.md) on communication to Zeebe: - -```shell -LOGGING_LEVEL_IO_GRPC=TRACE -LOGGING_LEVEL_IO_CAMUNDA_MODELER=DEBUG -``` diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/data-retention.md b/versioned_docs/version-8.2/self-managed/operate-deployment/data-retention.md deleted file mode 100644 index f6fb1d90155..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/data-retention.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: data-retention -title: Data retention -description: "Let's take a closer look at how Operate stores and archives data." ---- - -## How the data is stored and archived - -Operate imports data from Zeebe and stores it in Elasticsearch indices with a defined prefix (default: `operate`). Specifically, this includes the following: - -- Deployed processes, including the diagrams -- The state of process instances, including variables and flow nodes, activated within instance execution, incidents, etc. - -It additionally stores some Operate-specific data: - -- Operations performed by the user -- List of users -- Technical data, like the state of Zeebe import, etc. - -The data representing process instance state becomes immutable after the process instance is finished. Currently, the data may be archived, meaning it is moved to a dated index, e.g. `operate_variables_2020-01-01`, where date represents the date on which the given process instance was finished. The same is valid for user operations; after they are finished, the related data is moved to dated indices. - -:::note -All Operate data present in Elasticsearch (from both **main** and **dated** indices) are visible from the UI. -::: - -## Archive period - -The default time between a process instance finishing and being moved to a dated index is one hour. This can be modified by setting the [waitPeriodBeforeArchiving](importer-and-archiver.md#archive-period) configuration parameter. - -## Data cleanup - -In case of intensive Zeebe usage, the amount of data can grow significantly overtime. Therefore, you should consider the data cleanup strategy. - -Dated indices may be safely removed from Elasticsearch. "Safely" means only finished process instances are deleted together with all related data, and the rest of the data stays consistent. You can use Elasticsearch Curator or other tools/scripts to delete old data. - -Users updating from Elasticsearch 7 to Elasticsearch 8 will encounter issues with the Elasticsearch Curator. To resolve this, Operate allows configuring an Index Lifecycle Management (ILM) Policy using the `archiver` configuration options: - -### A snippet from application.yml - -```yaml -camunda.operate: - archiver: - ilmEnabled: true - ilmMinAgeForDeleteArchivedIndices: 30d -``` - -`ilmMinAgeForDeleteArchivedIndices` defines the duration for which archived data will be stored before deletion. The values use [Elasticsearch TimeUnit format](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units). - -This ILM Policy works on Elasticsearch 7 as well, and can function as a replacement of the Elasticsearch Curator. - -:::note -Only indices containing dates in their suffix may be deleted. -::: - -### OpenSearch - -OpenSearch does not support the above Index Lifecycle Management (ILM) Policy, but rather uses Index State Management. - -Refer to the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ism.html) for configuration guidance. diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/importer-and-archiver.md b/versioned_docs/version-8.2/self-managed/operate-deployment/importer-and-archiver.md deleted file mode 100644 index 04cad62c780..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/importer-and-archiver.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -id: importer-and-archiver -title: Importer and archiver -description: "Let's analyze how Operate is organized by modules to import and archive data." ---- - -Operate consists of three modules: - -- **Webapp**: Contains the UI and operation executor functionality. -- **Importer**: Responsible for importing data from Zeebe. -- **Archiver**: Responsible for archiving "old" data (finished process instances and user operations.) See [data retention](data-retention.md). - -## Configuration - -Modules can be run together or separately in any combination and can be scaled. When you run an Operate instance, by default, all modules are enabled. To disable them, use the following configuration parameters: - -| Configuration parameter | Description | Default value | -| ------------------------------- | -------------------------------------- | ------------- | -| camunda.operate.importerEnabled | When true, Importer module is enabled. | true | -| camunda.operate.archiverEnabled | When true, Archiver module is enabled. | true | -| camunda.operate.webappEnabled | When true, Webapp module is enabled. | true | - -## Scaling - -Additionally, you can have several importer and archiver nodes to increase throughput. Internally, they will spread their work based on Zeebe partitions. - -For example, if your Zeebe runs 10 partitions and you configure two importer nodes, they will import data from five partitions each. - -Each single importer/archiver node must be configured using the following configuration parameters: - -| Configuration parameter | Description | Default value | -| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------- | -| camunda.operate.clusterNode.partitionIds | Array of Zeebe partition ids this Importer (or Archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. | -| camunda.operate.clusterNode.nodeCount | Total amount of Importer (or Archiver) nodes in the cluster. | 1 | -| camunda.operate.clusterNode.currentNodeId | Id of current Importer (or Archiver) node, starting from 0. | 0 | - -It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. - -:::note -`nodeCount` always represents the number of nodes of one specific type. -::: - -For example, the configuration of a cluster with one Webapp node, two Importer nodes, and one Archiver node could look like the following: - -``` -Webapp node - -camunda.operate: - archiverEnabled: false - importerEnabled: false - #other configuration... - -Importer node #1 - -camunda.operate: - archiverEnabled: false - webappEnabled: false - clusterNode: - nodeCount: 2 - currentNodeId: 0 - #other configuration... - -Importer node #2 - -camunda.operate: - archiverEnabled: false - webappEnabled: false - clusterNode: - nodeCount: 2 - currentNodeId: 1 - #other configuration... - -Archiver node - -camunda.operate: - webappEnabled: false - importerEnabled: false - -``` - -You can further parallelize archiver and/or importer within one node using the following configuration parameters: - -| Configuration parameter | Description | Default value | -| ------------------------------------- | ------------------------------------------------- | ------------- | -| camunda.operate.archiver.threadsCount | Number of threads in which data will be archived. | 1 | -| camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 | - -:::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. -::: - -## Archive period - -The time between a process instance finishing and being archived can be set using the following configuration parameter: - -| Configuration parameter | Description | Default value | -| -------------------------------------------------- | -------------------------------------------- | ------------- | -| camunda.operate.archiver.waitPeriodBeforeArchiving | Amount of time before data will be archived. | 1h | - -By default, the archive period is set to "1h" (one hour). This means the data for the finished process instances will be kept in the "main" index for one hour after the process instance has finished, and then it will be moved to a "dated" index. - -The syntax for the parameter uses Elasticsearch date math. See the table below for reference: - -| Value | Description | -| ----- | ----------- | -| y | Years | -| M | Months | -| w | Weeks | -| d | Days | -| h | Hours | -| m | Minutes | -| s | Seconds | diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/install-and-start.md b/versioned_docs/version-8.2/self-managed/operate-deployment/install-and-start.md deleted file mode 100644 index d5709aaa0d9..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/install-and-start.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -id: install-and-start -title: Installation -description: "Let's get started with Operate with these simple installation steps." ---- - -Please refer to the [Installation Guide](/self-managed/platform-deployment/overview.md) for details on how to install Operate. diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/operate-authentication.md b/versioned_docs/version-8.2/self-managed/operate-deployment/operate-authentication.md deleted file mode 100644 index b5ede97f269..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/operate-authentication.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -id: operate-authentication -title: Authentication and authorization -description: "Let's take a closer look at how Operate authenticates for use." ---- - -Operate provides three ways to authenticate: - -1. User information stored in [Elasticsearch](#user-in-elasticsearch) -2. [Lightweight Directory Access Protocol (LDAP)](#ldap) -3. [Identity Authentication and Authorization](#identity) - -By default, user storage in Elasticsearch is enabled. - -## User in Elasticsearch - -In this mode, the user authenticates with a username and password stored in Elasticsearch. - -The **Userid**, **displayName**, **password**, and **roles** for one user may be set in `application.yml`: - -``` -camunda.operate: - userId: anUserId - displayName: nameShownInWebpage - password: aPassword - roles: - - OWNER - - USER -``` - -Currently, `OPERATOR`, `OWNER`, and `USER` roles are available. - -### Roles for users - -| Name | Description | -| -------- | --------------------- | -| OWNER | Full access | -| OPERATOR | Read and write access | -| USER | Read only access | - -On startup of Operate, the user is created if they did not exist before. - -By default, three users are created: - -- Role `OWNER` with **userId**/**displayName**/**password** `demo`/`demo`/`demo`. -- Role `OPERATOR` with **userId**/**displayName**/**password** `act`/`act`/`act`. -- Role `USER` with **userId**/**displayName**/**password** `view`/`view`/`view`. - -Add more users directly to Elasticsearch via the index `operate-user-_`. The password must be encoded with a strong `bcrypt` hashing function. - -## LDAP - -### Enable LDAP - -LDAP can only be enabled by setting the [Spring profile](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-profiles): `ldap-auth`. - -See the following example for setting the Spring profile as an environmental variable: - -``` -export SPRING_PROFILES_ACTIVE=ldap-auth -``` - -### Configuration of LDAP - -A user can authenticate via LDAP. - -The following parameters for connection to an LDAP server should be given: - -| Parameter name | Description | Example | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | ------------------ | -| camunda.operate.ldap.url | URL to an LDAP Server | ldaps://camunda.com/ | Yes | -| camunda.operate.ldap.baseDn | Base domain name | dc=camunda,dc=com | Yes | -| camunda.operate.ldap.managerDn | Manager domain used by Operate to log into LDAP server to retrieve user information. | cn=admin,dc=camunda,dc=com | Yes | -| camunda.operate.ldap.managerPassword | Password for manager | | Yes | -| camunda.operate.ldap.userSearchFilter | Filter to retrieve user info. The pattern '{0}' is replaced by the given username in the login form. | {0} | No, default is {0} | -| camunda.operate.ldap.userSearchBase | The starting point for search. | ou=Support,dc=camunda,dc=com | No | -| camunda.operate.ldap.userIdAttrName | LDAP attribute used to extract user id. | userPrincipalName | No | -| camunda.operate.ldap.displayNameAttrName | LDAP attribute used to extract username; the name the UI will show. | userName | No | -| camunda.operate.ldap.userDnPatterns | Pattern for retrieving user info, similar to userSearchFilter. The pattern '{0}' is replaced by the given username in the login form. | uid={0},ou=people | No | - -Example for standard LDAP server: - -```shell -CAMUNDA_OPERATE_LDAP_BASEDN=dc=planetexpress,dc=com -CAMUNDA_OPERATE_LDAP_URL=ldap://localhost:10389 -CAMUNDA_OPERATE_LDAP_MANAGERDN=cn=admin,dc=planetexpress,dc=com -CAMUNDA_OPERATE_LDAP_MANAGERPASSWORD=GoodNewsEveryone -CAMUNDA_OPERATE_LDAP_USERSEARCHFILTER=uid={0} -``` - -### Configuration of active directory-based LDAP - -For an **active directory**-based LDAP server, an **additional** parameter should be given: - -| Parameter name | Description | Required | -| ------------------------------------- | -------------------------------------- | -------- | -| camunda.operate.ldap.url | URL to an active directory LDAP server | Yes | -| camunda.operate.ldap.domain | Domain | Yes | -| camunda.operate.ldap.baseDn | Root domain name | No | -| camunda.operate.ldap.userSearchFilter | Used as a search filter | No | - -:::note -The active directory configuration will only be applied when `camunda.operate.ldap.domain` is given. -::: - -Example for active directory: - -```shell -CAMUNDA_OPERATE_LDAP_BASEDN=dc=dev,dc=camunda,dc=com -CAMUNDA_OPERATE_LDAP_URL=ldaps://ldap.dev.camunda.com/ -CAMUNDA_OPERATE_LDAP_MANAGERDN=CN=Der Admin,OU=AADDC Users,DC=dev,DC=camunda,DC=com -CAMUNDA_OPERATE_LDAP_MANAGERPASSWORD= -CAMUNDA_OPERATE_LDAP_USERSEARCHFILTER= -CAMUNDA_OPERATE_LDAP_DOMAIN=dev.camunda.com -CAMUNDA_OPERATE_LDAP_USERIDATTRNAME=userPrincipalName -``` - -:::note -`userSearchFilter` can be empty, and active directory default implementation would get `(&(objectClass=user)(userPrincipalName={0}))`. -::: - -## Identity - -[Identity](../../identity/what-is-identity/) provides authentication and authorization functionality along with user management. - -### Enable Identity - -Identity can only be enabled by setting the [Spring profile](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-profiles): `identity-auth`. - -See the following example: - -``` -export SPRING_PROFILES_ACTIVE=identity-auth -``` - -### Configure Identity - -Identity requires the following parameters: - -| Parameter name | Description | Example value | -| --------------------------------------------------- | -------------------------------------------------- | --------------------------------------------------------------------------------- | -| camunda.operate.identity.issuerUrl | URL of issuer (Identity) | http://localhost:18080/auth/realms/camunda-platform | -| camunda.operate.identity.issuerBackendUrl | Backend URL of issuer (Identity) | http://localhost:18080/auth/realms/camunda-platform | -| camunda.operate.identity.clientId | Similar to a username for the application | operate | -| camunda.operate.identity.clientSecret | Similar to a password for the application | XALaRPl...s7dL7 | -| camunda.operate.identity.audience | Audience for Operate | operate-api | -| spring.security.oauth2.resourceserver.jwt.issueruri | Token issuer URI | http://localhost:18080/auth/realms/camunda-platform | -| spring.security.oauth2.resourceserver.jwt.jwkseturi | Complete URI to get public keys for JWT validation | http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/certs | - -### Use Identity JWT token to access Operate API - -Operate provides a [REST API](/apis-tools/operate-api/overview.md) under the endpoint `/v1`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. - -**Example:** - -1. [Add an application in Identity](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). -2. [Add permissions to an application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for Operate API. -3. Obtain a token to access the REST API. - You will need: - - `client_id` and `client_secret` from Identity application you created. - - URL of the authorization server will look like: `http://:/auth/realms/camunda-platform/protocol/openid-connect/token`, where host and port reference Keycloak URL (e.g. `localhost:18080`). - -```shell -curl --location --request POST 'http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token' \ ---header 'Content-Type: application/x-www-form-urlencoded' \ ---data-urlencode 'client_id=' \ ---data-urlencode 'client_secret=' \ ---data-urlencode 'grant_type=client_credentials' -``` - -You will get something like the following: - -```json -{ - "access_token": "eyJhbG...", - "expires_in": 300, - "refresh_expires_in": 0, - "token_type": "Bearer", - "not-before-policy": 0 -} -``` - -Take the `access_token` value from the response object and store it as your token. - -2. Send the token as an authorization header in each request. In this case, request all process definitions. - -```shell -curl -X POST 'http://localhost:8080/v1/process-definitions/search' -H 'Content-Type: application/json' -H 'Authorization: Bearer eyJhb...' -d '{}' -``` - -### Resource-based permissions - -By default, when using Operate with Identity, one can assign a user "read" and/or "write" permissions for Operate. "Read" allows read-only access to Operate. "Write" permission allows the user to perform all types of operations modifying data (e.g. update the variables, resolve the incidents or cancel instances). - -More detailed permissions may be enabled: - -1. Resource authorizations must be [enabled in Identity](/self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md). -2. Operate must be configured to use resource authorizations: - -```yaml -camunda.operate.identity.resourcePermissionsEnabled: true -``` - -Resource-based permissions are defined per process definition or decision definition. Process definition is defined by Process ID, which is present in BPMN XML. Decision definition is defined by Decision ID, which is present in DMN XML. - -The user or user group can be assigned the following types of permissions: - -| Permission name | Resource type(s) | Allowed action(s) in Operate | -| ----------------------- | --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | -| READ | process-definition, decision-definition | User can see the data related to defined process or decision definition. | -| UPDATE_PROCESS_INSTANCE | process-definition | User can retry the incident, add/update variable, cancel, or modify process instance related to defined process definition. | -| DELETE_PROCESS_INSTANCE | process-definition | User can delete process instance related to defined process definition. | - -For more information, visit the [Identity documentation](../../concepts/access-control/resource-authorizations/). - -## Zeebe client credentials - -If the Zeebe Gateway is set up with Camunda Identity-based authorization, [Zeebe client OAuth environment variables](../zeebe-deployment/security/client-authorization.md#environment-variables) must be provided. diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/operate-configuration.md b/versioned_docs/version-8.2/self-managed/operate-deployment/operate-configuration.md deleted file mode 100644 index b7c9c346732..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/operate-configuration.md +++ /dev/null @@ -1,340 +0,0 @@ ---- -id: operate-configuration -title: Configuration ---- - -As a Spring Boot application, Operate supports any standard -[Spring configuration](https://docs.spring.io/spring-boot/reference/features/external-config.html) method. - -By default, the configuration for Operate is stored in a YAML file (`application.yml`). All Operate-related settings are prefixed with `camunda.operate`. - -:::note -Configuration properties can be defined as environment variables using [Spring Boot conventions](https://docs.spring.io/spring-boot/reference/features/external-config.html#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. - -For example, the property `camunda.operate.elasticsearch.clustername` is represented by the environment variable `CAMUNDA_OPERATE_ELASTICSEARCH_CLUSTERNAME`. -::: - -The following parts are configurable: - -- [Webserver](#webserver) - - [Security](#security) -- [Elasticsearch or OpenSearch](#elasticsearch-or-opensearch) - - [Settings to connect](#settings-to-connect) - - [Settings to connect to a secured Elasticsearch or OpenSearch instance](#settings-to-connect-to-a-secured-elasticsearch-or-opensearch-instance) - - [Settings for shards and replicas](#settings-for-shards-and-replicas) - - [A snippet from application.yml](#a-snippet-from-applicationyml) -- [Zeebe broker connection](#zeebe-broker-connection) - - [Settings to connect](#settings-to-connect-1) - - [A snippet from application.yml](#a-snippet-from-applicationyml-1) -- [Zeebe Elasticsearch or OpenSearch exporter](#zeebe-elasticsearch-or-opensearch-exporter) - - [Settings to connect and import](#settings-to-connect-and-import) - - [A snippet from application.yml:](#a-snippet-from-applicationyml-2) -- [Operation executor](#operation-executor) - - [A snippet from application.yml](#a-snippet-from-applicationyml-3) -- [Monitoring Operate](#monitoring-operate) - - [Versions before 0.25.0](#versions-before-0250) -- [Logging](#logging) - - [JSON logging configuration](#json-logging-configuration) - - [Change logging level at runtime](#change-logging-level-at-runtime) - - [Set all Operate loggers to DEBUG](#set-all-operate-loggers-to-debug) -- [An example of application.yml file](#an-example-of-applicationyml-file) - -## Webserver - -Operate supports customizing the **context-path** using default Spring configuration. - -Example for `application.yml`: -`server.servlet.context-path: /operate` - -Example for environment variable: -`SERVER_SERVLET_CONTEXT_PATH=/operate` - -The default context-path is `/`. - -### Security - -To change the values for http header for security reasons, you can use the configuration parameters: - -| Name | Description | Default value | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| camunda.operate.websecurity.contentSecurityPolicy | See [Spring description](https://docs.spring.io/spring-security/site/docs/5.2.0.RELEASE/reference/html/default-security-headers-2.html#webflux-headers-csp) | base-uri 'self'; default-src 'self' 'unsafe-inline' 'unsafe-eval' cdn.jsdelivr.net;img-src \* data:; block-all-mixed-content; form-action 'self'; frame-ancestors 'none'; object-src 'none'; font-src 'self' fonts.camunda.io cdn.jsdelivr.net; sandbox allow-forms allow-scripts allow-same-origin allow-popups | -| camunda.operate.websecurity.httpStrictTransportSecurityMaxAgeInSeconds | See [Spring description](https://docs.spring.io/spring-security/site/docs/5.2.0.RELEASE/reference/html/default-security-headers-2.html#webflux-headers-hsts) | 63,072,000 (two years) | -| camunda.operate.websecurity.httpStrictTransportSecurityIncludeSubDomains | See [Spring description](https://docs.spring.io/spring-security/site/docs/5.2.0.RELEASE/reference/html/default-security-headers-2.html#webflux-headers-hsts) | true | - -## Elasticsearch or OpenSearch - -Operate stores and reads data from Elasticsearch or OpenSearch. - -### Settings to connect - -Operate supports [basic authentication](https://www.elastic.co/guide/en/elasticsearch/reference/7.12/setting-up-authentication.html) for Elasticsearch. - -Set the appropriate username/password combination in the configuration to use it. - -#### Settings to connect to a secured Elasticsearch or OpenSearch instance - -To connect to a secured (https) Elasticsearch instance, you normally need to only set the URL protocol -part to `https` instead of `http`. A secured Elasticsearch instance also needs `username` and `password`. -The other SSL settings should only be used in case of connection problems; for example, in disabling -host verification. - -:::note -You may need to import the certificate into JVM runtime. -::: - -Either set `host` and `port` (deprecated), or `url` (recommended). - -| Name | Description | Default value | -| ------------------------------------------------- | ----------------------------------------- | --------------------- | -| camunda.operate.elasticsearch.indexPrefix | Prefix for index names | operate | -| camunda.operate.elasticsearch.clusterName | Cluster name of Elasticsearch | elasticsearch | -| camunda.operate.elasticsearch.url | URL of Elasticsearch REST API | http://localhost:9200 | -| camunda.operate.elasticsearch.username | Username to access Elasticsearch REST API | - | -| camunda.operate.elasticsearch.password | Password to access Elasticsearch REST API | - | -| camunda.operate.elasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - | -| camunda.operate.elasticsearch.ssl.selfSigned | Certificate was self-signed | false | -| camunda.operate.elasticsearch.ssl.verifyHostname | Should the hostname be validated | false | - -### Settings for shards and replicas - -Operate creates the template with index settings named `operate-_template` that Elasticsearch uses for all Operate indices. These settings can be changed. - -The following configuration parameters define the settings: - -| Name | Description | Default value | -| ---------------------------------------------- | ------------------------------------------------------------ | ------------- | -| camunda.operate.elasticsearch.numberOfShards | How many shards Elasticsearch uses for all Operate indices | 1 | -| camunda.operate.elasticsearch.numberOfReplicas | How many replicas Elasticsearch uses for all Operate indices | 0 | - -These values are applied only on first startup of Operate or during version update. After the Operate -schema is created, settings may be adjusted directly in the Elasticsearch template, and the new settings are applied -to indices created after adjustment. - -### A snippet from application.yml - -```yaml -camunda.operate: - elasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - ssl: - selfSigned: true -``` - -## Zeebe broker connection - -Operate needs a connection to the Zeebe broker to start the import and execute user operations. - -### Settings to connect - -| Name | Description | Default value | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | --------------- | -| camunda.operate.zeebe.gatewayAddress | Gateway address that points to Zeebe as hostname and port. | localhost:26500 | -| camunda.operate.zeebe.secure | Connection should be secure via Transport Layer Security (TLS). | false | -| camunda.operate.zeebe.certificatePath | Path to certificate used by Zeebe. This is necessary when the certificate isn't registered in the operating system | - | - -Additionally, visit [Zeebe Secure Client Communication](/docs/self-managed/zeebe-deployment/security/secure-client-communication/) for more details. - -### A snippet from application.yml - -```yaml -camunda.operate: - zeebe: - # Gateway host and port - gatewayAddress: localhost:26500 -``` - -## Zeebe Elasticsearch or OpenSearch exporter - -For Elasticsearch, Operate imports data from indices created and filled in by the [Zeebe Elasticsearch exporter](../zeebe-deployment/exporters/elasticsearch-exporter.md). - -For OpenSearch, Operate imports data from indices created and filled in by the [Zeebe OpenSearch exporter](../zeebe-deployment/exporters/opensearch-exporter.md). - -Therefore, settings for this Elasticsearch or OpenSearch connection must be defined and must correspond to the settings on the Zeebe side. - -### Settings to connect and import - -See also [settings to connect to a secured Elasticsearch or OpenSearch instance](#settings-to-connect-to-a-secured-elasticsearch-or-opensearch-instance). - -:::note -You may need to import the certificate keystore into the JVM runtime. - -```yaml -# Kubernetes example: -zeebe: - … - javaOpts: >- - … - -Djavax.net.ssl.trustStore=/path/to/certificates/elasticsearch.jks -``` - -::: - -| Name | Description | Default value | -| ------------------------------------------------------ | ---------------------------------------------------------- | --------------------- | -| camunda.operate.zeebeElasticsearch.clusterName | Cluster name of Elasticsearch | elasticsearch | -| camunda.operate.zeebeElasticsearch.url | URL of Zeebe Elasticsearch REST API | http://localhost:9200 | -| camunda.operate.zeebeElasticsearch.prefix | Index prefix as configured in Zeebe Elasticsearch exporter | zeebe-record | -| camunda.operate.zeebeElasticsearch.username | Username to access Elasticsearch REST API | - | -| camunda.operate.zeebeElasticsearch.password | Password to access Elasticsearch REST API | - | -| camunda.operate.zeebeElasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - | -| camunda.operate.zeebeElasticsearch.ssl.selfSigned | Certificate was self-signed | false | -| camunda.operate.zeebeElasticsearch.ssl.verifyHostname | Should the hostname be validated | false | - -### A snippet from application.yml: - -```yaml -camunda.operate: - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` - -## Operation executor - -Operations are user operations, like cancellation of process instance(s) or updating the variable value. - -Operations are executed in a multi-threaded manner. - -| Name | Description | Default value | -| ---------------------------------------------- | -------------------------------- | ------------- | -| camunda.operate.operationExecutor.threadsCount | How many threads should be used. | 3 | - -### A snippet from application.yml - -```yaml -camunda.operate: - operationExecutor: - threadsCount: 3 -``` - -## Monitoring Operate - -Operate includes [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready) inside. This provides the number of monitoring possibilities. - -Operate uses the following Actuator configuration by default: - -```yaml -# Disable default health indicators -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-health-indicators -management.health.defaults.enabled: false -# enable Kubernetes health groups: -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-kubernetes-probes -management.health.probes.enabled: true -# enable several Actuator endpoints -management.endpoints.web.exposure.include: health, prometheus, loggers, usage-metrics, backup -``` - -With this configuration, the following endpoints are available for use out of the box: - -`:8080/actuator/prometheus` Prometheus metrics - -`:8080/actuator/health/liveness` Liveness probe - -`:8080/actuator/health/readiness` Readiness probe - -This configuration may be overwritten by changing the corresponding configuration parameters values. - -### Versions before 0.25.0 - -In versions before 0.25.0, management endpoints look different. Therefore, we recommend reconfiguring for next versions. - -| Name | Before 0.25.0 | Starting with 0.25.0 | -| --------- | ---------------- | -------------------------- | -| Readiness | /api/check | /actuator/health/readiness | -| Liveness | /actuator/health | /actuator/health/liveness | - -## Logging - -Operate uses the Log4j2 framework for logging. In the distribution archive, as well as inside a Docker image, `config/log4j2.xml` logging configuration files are included and can be further adjusted to your needs: - -```xml - - - - %clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%xwEx - - - - - - - - - - - - - - - - -``` - -By default, `ConsoleAppender` is used. - -#### JSON logging configuration - -You can choose to output logs in JSON format (Stackdriver compatible). To enable it, define -the environment variable `OPERATE_LOG_APPENDER` like this: - -```sh -OPERATE_LOG_APPENDER=Stackdriver -``` - -### Change logging level at runtime - -Operate supports the default scheme for changing logging levels as provided by [Spring Boot](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers). - -The log level for Operate can be changed by following the [Setting a Log Level](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers-setting-level) section. - -#### Set all Operate loggers to DEBUG - -```shell -curl 'http://localhost:8080/actuator/loggers/io.camunda.operate' -i -X POST \ --H 'Content-Type: application/json' \ --d '{"configuredLevel":"debug"}' -``` - -## An example of application.yml file - -The following snippet represents the default Operate configuration, which is shipped with the distribution. This can be found inside the `config` folder (`config/application.yml`) and can be used to adjust Operate to your needs. - -```yaml -# Operate configuration file - -camunda.operate: - # Set operate userId, displayName and password. - # If user with does not exists it will be created. - # Default: demo/demo/demo - userId: anUserId - displayName: nameShownInWebpage - password: aPassword - roles: - - OWNER - - USER - # ELS instance to store Operate data - elasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: http://localhost:9200 - # Zeebe instance - zeebe: - # Gateway address to zeebe - gatewayAddress: localhost:26500 - # ELS instance to export Zeebe data to - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # url - url: http://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/schema-and-migration.md b/versioned_docs/version-8.2/self-managed/operate-deployment/schema-and-migration.md deleted file mode 100644 index ed2068d3df3..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/schema-and-migration.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -id: schema-and-migration -title: Schema and migration ---- - -Operate stores data in Elasticsearch. On first start, Operate creates all required indices and templates. - -- [Schema](#schema) -- [Data migration](#data-migration) - - [Concept](#concept) - - [How to migrate](#how-to-migrate) - - [Migrate by using standalone application](#migrate-by-using-standalone-application) - - [Migrate by using built-in automatic update](#migrate-by-using-built-in-automatic-update) - - [Further notes](#further-notes) - - [Configure migration](#configure-migration) - - [Example for migration in Kubernetes](#example-for-migration-in-kubernetes) - -## Schema - -Operate uses several Elasticsearch indices that are mostly created using templates. - -Each index has its own version of schema. This means the version reflected in the index name is _not_ the version of Operate. - -Index names follow the defined pattern below: - -``` -{operate-index-prefix}-{datatype}-{schemaversion}_[{date}] - -``` - -Here, `operate-index-prefix` defines the prefix for index name (default `operate`), `datatype` defines which data is stored in the index (e.g. `user`, `variable` etc.,) `schemaversion` represents the index schema version, and `date` represents the finished date of the archived data. See [data retention](data-retention.md). - -## Data migration - -The version of Operate is reflected in Elasticsearch object names (e.g. `operate-user-1.0.0_` index contains the user data for Operate 1.0.0). When updating from one version of Operate to another, migration of data must be performed. Operate distribution provides an application to perform data migration from older versions. - -### Concept - -The migration uses Elasticsearch [ingest pipelines](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/ingest.html) to reindex the data. -Please ensure that your Elasticsearch cluster has at least one node with the ingest role, as described [here](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/ingest.html#ingest-prerequisites). - -Each version of Operate delivers a set of migration steps which need to be applied for a corresponding version of Operate. - -When updating from one version to another, necessary migration steps constitute the so-called migration plan. -All known migration steps (both applied and not) are persisted in the dedicated Elasticsearch index: `operate-migration-steps-repository`. - -### How to migrate - -#### Migrate by using standalone application - -Ensure Elasticsearch contains the data Operate is running. The migration script will connect to a specified connection in Operate configuration (`/config/application.yml`). - -Execute `/bin/migrate` (or `/bin/migrate.bat` for Windows). - -What is expected to happen: - -- New Elasticsearch indices are created if they don't exist. -- If an older version for some or all indices exists, the migration plan is built. -- For each index with an older version, the migration plan is executed. -- Older indices are deleted. - -All known migration steps with metadata are stored in the `operate-migration-steps-repository` index. - -:::note -The old indices are deleted _only_ after successful migration. This might require more disk space during the migration process. - -Take care of data backup before performing migration. -::: - -#### Migrate by using built-in automatic update - -When running a newer version of Operate against an older schema, it performs data migration on a startup. -The migration happens for every index, for which it detects exactly **one** older version. Migration fails if it detects more than one older version of some index. - -#### Further notes - -- If migration fails, you can retry it. All applied steps are stored and only those steps are applied that haven't been executed yet. -- Operate should not be running while migration is happening. -- In the case version update is performed in the cluster with several Operate nodes, only one node ([Webapp module](importer-and-archiver.md)) must execute data migration. The others must be stopped and started only after migration is fully finished. - -#### Configure migration - -Automatic migration is enabled by default. It can be disabled by setting the configuration key: - -`camunda.operate.migration.migrationEnabled = false` - -The following migration settings may affect the duration of the migration process: - -1. You can set the batch size for reindex of the documents. This can reduce the time needed to reindex the data. - Small document size means big batch size, while big document size means small batch size. - -`camunda.operate.migration.reindexBatchSize = 5000` (Between 1 and 10.000, Default: 5.000) - -2. In how many slices should the reindex be divided. For each shard used by the index, you normally use a slice. - Elasticsearch decides how many slices are used if the value is set to 0 (automatic). - -`camunda.operate.migration.slices = 0` - Must be positive. Default is 0 (automatic). - -#### Example for migration in Kubernetes - -To ensure the migration is executed _before_ Operate is started, use -the [initContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) feature of Kubernetes. - -This ensures only the "main" container is started if the `initContainer` is successfully executed. - -The following snippet of a pod description for Kubernetes shows the usage of `migrate` script as `initContainers`: - -``` -... - labels: - app: operate -spec: - initContainers: - - name: migration - image: camunda/operate:1.0.0 - command: ['/bin/sh','/usr/local/operate/bin/migrate'] - containers: - - name: operate - image: camunda/operate:1.0.0 - env: -... -``` diff --git a/versioned_docs/version-8.2/self-managed/operate-deployment/usage-metrics.md b/versioned_docs/version-8.2/self-managed/operate-deployment/usage-metrics.md deleted file mode 100644 index 523b4489fcb..00000000000 --- a/versioned_docs/version-8.2/self-managed/operate-deployment/usage-metrics.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: usage-metrics -title: Usage metrics -description: "Operate provides usage metrics under usage-metrics Actuator endpoint. It is exposed on management port." ---- - -Operate provides usage metrics under `usage-metrics` Actuator endpoint. It is exposed on management port that can be configured via `management.server.port` configuration parameter (default: 8080). - -## Amount of created process instances - -``` -http://:/actuator/usage-metrics/process-instances?startTime={startTime}&endTime={endTime} -``` - -Here, `startTime` and `endTime` are of format `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`, e.g. "1970-11-14T10:50:26.963-0100". - -Sample response: - -```json -{ - "total": 99 -} -``` - -## Amount of executed decision instances - -``` -http://:/actuator/usage-metrics/decision-instances?startTime={startTime}&endTime={endTime} -``` - -Here, `startTime` and `endTime` are of format `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`, e.g. "1970-11-14T10:50:26.963-0100". - -Sample response: - -```json -{ - "total": 80 -} -``` diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-dashboard-no-processes_dark.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-dashboard-no-processes_dark.png deleted file mode 100644 index eff22424f41..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-dashboard-no-processes_dark.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-dashboard-no-processes_light.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-dashboard-no-processes_light.png deleted file mode 100644 index 15dcacd32f2..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-dashboard-no-processes_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-introduction_dark.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-introduction_dark.png deleted file mode 100644 index e4697160bf7..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-introduction_dark.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-introduction_light.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-introduction_light.png deleted file mode 100644 index bbb389a11bb..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-introduction_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-tasklist-dashboard.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-tasklist-dashboard.png deleted file mode 100644 index 796e7a4f058..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-tasklist-dashboard.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-tasklist-login.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-tasklist-login.png deleted file mode 100644 index 3050aea1b1b..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/operate-tasklist-login.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/tasklist-start-screen_light.png b/versioned_docs/version-8.2/self-managed/platform-deployment/assets/tasklist-start-screen_light.png deleted file mode 100644 index baad1419f95..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/platform-deployment/assets/tasklist-start-screen_light.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/docker.md b/versioned_docs/version-8.2/self-managed/platform-deployment/docker.md deleted file mode 100644 index df59146472d..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/docker.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -id: docker -title: "Docker" -keywords: ["camunda docker"] ---- - -This page guides you through Camunda 8 Docker images and how to run the platform in a developer setup using Docker Compose. - -## Docker images - -We provide Docker images [via Dockerhub](https://hub.docker.com/u/camunda). All these images are publicly accessible (except for [Web Modeler](#web-modeler)). - -:::info -The provided Docker images are supported for production usage only on Linux systems. Windows or macOS are only supported for development environments. -::: - -| Component | Docker image | Link to configuration options | -| ----------------- | -------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | -| Zeebe | [camunda/zeebe:latest](https://hub.docker.com/r/camunda/zeebe) | [Environment variables](../../zeebe-deployment/configuration/environment-variables/) | -| Operate | [camunda/operate:latest](https://hub.docker.com/r/camunda/operate) | [Operate configuration](../../operate-deployment/operate-configuration) | -| Tasklist | [camunda/tasklist:latest](https://hub.docker.com/r/camunda/tasklist) | [Tasklist configuration](../../tasklist-deployment/tasklist-configuration) | -| Identity | [camunda/identity:latest](https://hub.docker.com/r/camunda/identity) | [Configuration variables](../../identity/deployment/configuration-variables/) | -| Optimize | [camunda/optimize:8-latest](https://hub.docker.com/r/camunda/optimize) | [Environment variables]($optimize$/self-managed/optimize-deployment/install-and-start/#available-environment-variables) | -| Connectors | [camunda/connectors:latest](https://hub.docker.com/r/camunda/connectors) | [Connectors configuration](../../connectors-deployment/connectors-configuration) | -| Connectors Bundle | [camunda/connectors-bundle:latest](https://hub.docker.com/r/camunda/connectors-bundle) | [Connectors configuration](../../connectors-deployment/connectors-configuration) | - -Zeebe is the only component that is often run on its own as a standalone component. In this scenario, it does not need anything else, so a simple `docker run` is sufficient: - -```bash -docker run --name zeebe -p 26500-26502:26500-26502 camunda/zeebe:latest -``` - -This will give you a single broker node with the following ports exposed: - -- `26500`: Gateway API (this is the port clients need to use) -- `26501`: Command API (internal, gateway-to-broker) -- `26502`: Internal API (internal, broker-to-broker) - -### Multi-platform support - -With the Camunda 8.2.0 release and onward, all Camunda 8 Docker images are provided as multi-platform images natively supporting the following platforms: - -- `linux/amd64` -- `linux/arm64` - -Your Docker client should automatically pull the image that suits your platform. -We currently only recommend the `linux/amd64` for production usage, as the `linux/arm64` image is provided mainly for development purposes. - -:::note -For Web Modeler, we only provide multi-platform images from the following releases onward: 8.2.8, 8.3.1, 8.4.0-alpha1. -::: - -### Web Modeler - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../reference/licenses.md#web-modeler) only. -::: - -The Docker images for Web Modeler are not publicly accessible, but available to enterprise customers only from -Camunda's private Docker registry. - -| Web Modeler Component | Docker image | -| --------------------- | :---------------------------------------------------------------- | -| Backend (`restapi`) | `registry.camunda.cloud/web-modeler-ee/modeler-restapi:latest` | -| Frontend (`webapp`) | `registry.camunda.cloud/web-modeler-ee/modeler-webapp:latest` | -| WebSocket server | `registry.camunda.cloud/web-modeler-ee/modeler-websockets:latest` | - -To pull the images you first need to log in using the credentials you received from Camunda: - -```bash -$ docker login registry.camunda.cloud -Username: your_username -Password: ****** -Login Succeeded -``` - -See the [instructions below](#web-modeler-1) on how to use the Web Modeler images with Docker Compose. -You can also find more information on the supported [configuration variables](../../modeler/web-modeler/configuration). - -## Docker Compose - -A Docker Compose configuration to run Zeebe, Operate, Tasklist, Optimize, Identity, and Connectors Bundle is available in the [camunda-platform](https://github.com/camunda/camunda-platform/blob/main/docker-compose.yaml) repository. -Follow the instructions in the [README](https://github.com/camunda/camunda-platform#using-docker-compose). - -:::warning -While the Docker images themselves are supported for production usage, the Docker Compose files are designed to be used by developers to run an environment locally; they are not designed to be used in production. We recommend to use [Kubernetes](./helm-kubernetes/overview.md) in production. -::: - -This Docker Compose configuration serves two purposes: - -1. It can be used to start up a development environment locally. -2. It documents how the various components need to be wired together. - -:::note -We recommend to use [Helm + KIND](./helm-kubernetes/guides/local-kubernetes-cluster.md) instead of Docker Compose for local environments, as the Helm configurations are battle-tested and much closer to production systems. -::: - -### Web Modeler - -An additional Docker Compose configuration to run Web Modeler is also available in the -[camunda-platform](https://github.com/camunda/camunda-platform/blob/main/docker-compose-web-modeler.yaml) repository. Follow the instructions in the [README](https://github.com/camunda/camunda-platform#web-modeler-self-managed) to utilize this configuration. - -## Configuration hints - -### Zeebe - -#### Volumes - -The default data volume is under `/usr/local/zeebe/data`. It contains -all data which should be persisted. - -#### Configuration - -The Zeebe configuration is located at `/usr/local/zeebe/config/application.yaml`. -The logging configuration is located at `/usr/local/zeebe/config/log4j2.xml`. - -The configuration of the Docker image can also be changed using [environment -variables](../zeebe-deployment/configuration/environment-variables.md). The configuration template file also contains information on the environment -variables to use for each configuration setting. - -Available environment variables: - -- `ZEEBE_LOG_LEVEL` - Sets the log level of the Zeebe Logger (default: `info`). -- `ZEEBE_BROKER_NETWORK_HOST` - Sets the host address to bind to instead of the IP of the container. -- `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` - Sets the contact points of other brokers in a cluster setup. - -### Optimize - -Some configuration properties are optional and have default values. See a description of these properties and their default values in the table below: - -| Name | Description | Default value | -| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| SPRING_PROFILES_ACTIVE | Determines the mode Optimize is to be run in. For Self-Managed, set to `ccsm`. | -| CAMUNDA_OPTIMIZE_IDENTITY_ISSUER_URL | The URL at which Identity can be accessed by Optimize. | -| CAMUNDA_OPTIMIZE_IDENTITY_ISSUER_BACKEND_URL | The URL at which the Identity auth provider can be accessed by Optimize. This should match the configured provider in Identity and is to be used for container to container communication. | -| CAMUNDA_OPTIMIZE_IDENTITY_CLIENTID | The Client ID used to register Optimize with Identity. | -| CAMUNDA_OPTIMIZE_IDENTITY_CLIENTSECRET | The secret used when registering Optimize with Identity. | -| CAMUNDA_OPTIMIZE_IDENTITY_AUDIENCE | The audience used when registering Optimize with Identity. | -| OPTIMIZE_ELASTICSEARCH_HOST | The address/hostname under which the Elasticsearch node is available. | localhost | -| OPTIMIZE_ELASTICSEARCH_HTTP_PORT | The port number used by Elasticsearch to accept HTTP connections. | 9200 | -| CAMUNDA_OPTIMIZE_SECURITY_AUTH_COOKIE_SAME_SITE_ENABLED | Determines if `same-site` is enabled for Optimize cookies. This must be set to `false`. | true | -| CAMUNDA_OPTIMIZE_ELASTICSEARCH_SECURITY_USERNAME | The username for authentication in environments where a secured Elasticsearch connection is configured. | -| CAMUNDA_OPTIMIZE_ELASTICSEARCH_SECURITY_PASSWORD | The password for authentication in environments where a secured Elasticsearch connection is configured. | -| CAMUNDA_OPTIMIZE_ENTERPRISE | This should only be set to `true` if an Enterprise License has been acquired. | true | -| CAMUNDA_OPTIMIZE_ZEEBE_ENABLED | Enables import of Zeebe data in Optimize. | false | -| CAMUNDA_OPTIMIZE_ZEEBE_NAME | The record prefix for exported Zeebe records. | zeebe-record | -| CAMUNDA_OPTIMIZE_ZEEBE_PARTITION_COUNT | The number of partitions configured in Zeebe. | 1 | -| CAMUNDA_OPTIMIZE_SHARING_ENABLED | Enable/disable the possibility to share reports and dashboards. | true | -| SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI | Authentication for the Public REST API using a resource server to validate the JWT token. Complete URI to get public keys for JWT validation. | null | -| OPTIMIZE_API_ACCESS_TOKEN | Authentication for the Public REST API using a static shared token. Will be ignored if SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI is also set. | null | -| CAMUNDA_OPTIMIZE_CONTAINER_ENABLE_SNI_CHECK | Determines whether SNI checking should be enabled. | true | - -Like for example this `docker-compose` configuration: - -``` -optimize: - container_name: optimize - image: camunda/optimize:8-latest - ports: - - 8090:8090 - environment: - - SPRING_PROFILES_ACTIVE=ccsm - - CAMUNDA_OPTIMIZE_IDENTITY_ISSUER_URL=http://localhost:9090 - - CAMUNDA_OPTIMIZE_IDENTITY_ISSUER_BACKEND_URL=http://keycloak:8080/auth/realms/camunda-platform - - CAMUNDA_OPTIMIZE_IDENTITY_CLIENTID=optimize - - CAMUNDA_OPTIMIZE_IDENTITY_CLIENTSECRET=secret - - CAMUNDA_OPTIMIZE_IDENTITY_AUDIENCE=optimize-api - - OPTIMIZE_ELASTICSEARCH_HOST=localhost - - OPTIMIZE_ELASTICSEARCH_HTTP_PORT=9200 - - CAMUNDA_OPTIMIZE_SECURITY_AUTH_COOKIE_SAME_SITE_ENABLED=false - - CAMUNDA_OPTIMIZE_ENTERPRISE=false - - CAMUNDA_OPTIMIZE_ZEEBE_ENABLED=true - - CAMUNDA_OPTIMIZE_ZEEBE_NAME=zeebe-record - - CAMUNDA_OPTIMIZE_ZEEBE_PARTITION_COUNT=1 - - CAMUNDA_OPTIMIZE_SHARING_ENABLED=true - - SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI=https://weblogin.cloud.company.com/.well-known/jwks.json - - OPTIMIZE_API_ACCESS_TOKEN=secret -``` - -Self-Managed Optimize must be able to connect to Elasticsearch to write and read data. In addition, Optimize needs to connect to Identity for authentication purposes. Both of these requirements can be configured with the options described above. - -Optimize must also be configured as a client in Identity, and users will only be granted access to Optimize if they have a role -that has `write:*` permission for Optimize. - -For Optimize to import Zeebe data, Optimize must also be configured to be aware of the record prefix used when the records are exported to Elasticsearch. This can also be configured per the example above. - -### Connectors - -Use the provided [Docker Compose](#docker-compose) files to execute all [out-of-the-box Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) automatically. -This uses the [Connectors Bundle](https://hub.docker.com/r/camunda/connectors-bundle) Docker image. - -Note that some out-of-the-box Connectors are licensed under the -[Camunda Self-Managed Free Edition license](https://camunda.com/legal/terms/cloud-terms-and-conditions/camunda-cloud-self-managed-free-edition-terms/). -Find an overview in the [Connectors Bundle project](https://github.com/camunda/connectors-bundle). - -Refer to the [Connector installation guide](../../connectors-deployment/install-and-start) for details on how to provide the Connector templates for modeling. - -#### Running single Connectors container - -```shell -docker run --rm --name=MyConnectorsInstance \ - --network=camunda-platform_camunda-platform \ - -e ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=zeebe:26500 \ - -e ZEEBE_CLIENT_SECURITY_PLAINTEXT=true \ - -e CAMUNDA_CONNECTOR_POLLING_ENABLED=false \ - -e CAMUNDA_CONNECTOR_WEBHOOK_ENABLED=false \ - -e OPERATE_CLIENT_ENABLED=false \ - camunda/connectors-bundle:latest -``` - -#### Custom set of Connectors - -To add custom Connectors, you can build on top of our [Connectors base image](https://hub.docker.com/r/camunda/connectors/) that includes the pre-packaged runtime environment without any Connector. -To use the image, at least one Connector must be added to the `classpath`. We recommend providing JARs with all dependencies bundled. - -:::caution - -As all Connectors share a single `classpath`, different versions of the same dependency can be available and cause conflicts. -To prevent this, common dependencies like `jackson` can be shaded and relocated inside the Connector's JAR. - -::: - -You can add a Connector JAR by extending the base image with a JAR from a public URL: - -```yml -FROM camunda/connectors:x.y.z - -ADD https://repo1.maven.org/maven2/io/camunda/connector/connector-http-json/x.y.z/connector-http-json-0..0-with-dependencies.jar /opt/app/ -``` - -You can also add a Connector JAR using volumes: - -```bash -docker run --rm --name=connectors -d -v $PWD/connector.jar:/opt/app/ camunda/connectors:x.y.z -``` diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/deploy.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/deploy.md deleted file mode 100644 index f7d1ef55617..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/deploy.md +++ /dev/null @@ -1,267 +0,0 @@ ---- -id: deploy -title: "Camunda 8 Helm deployment" -sidebar_label: "Deploy" -description: "Camunda provides continuously improved Helm charts which are not cloud provider-specific, so you can choose your Kubernetes provider." ---- - -Camunda provides continuously improved Helm charts which are not cloud provider-specific, so you can choose your Kubernetes provider. The charts are available in [Camunda Helm repository](https://github.com/camunda/camunda-platform-helm) and we encourage you to [report issues](https://github.com/camunda/camunda-platform-helm/issues) if you find any of them. - -## What is Helm? - -[Helm](https://helm.sh/) is a package manager for Kubernetes resources. Helm allows us to install a set of components by simply referencing a package name, and allowing us to override configurations to accommodate these packages to different scenarios. - -Helm also provides dependency management between charts, meaning that charts can depend on other charts. This allows us to aggregate a set of components together that can be installed with a single command. - -## Components installed by the Helm charts - -The following charts will be installed as part of Camunda 8 Self-Managed: - -- **Zeebe**: Deploys a Zeebe Cluster with three brokers using the `camunda/zeebe` Docker image. -- **Zeebe Gateway**: Deploys the standalone Zeebe Gateway with two replicas. -- **Operate**: Deploys Operate, which connects to an existing Elasticsearch. -- **Tasklist**: Deploys the Tasklist component to work with user tasks. -- **Optimize**: Deploys the Optimize component to analyze the historic process executions. -- **Identity**: Deploys the Identity component responsible for authentication and authorization. -- **Connectors**: Deploys the Connectors component responsible for both inbound and outbound integration with external systems. -- **Elasticsearch**: Deploys an Elasticsearch cluster with two nodes. -- **Web Modeler**: Deploys the Web Modeler component that allows you to model BPMN processes in a collaborative way. - - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#installing-web-modeler) as Web Modeler is only available to enterprise customers. - -![Camunda 8 Self-Managed Architecture Diagram](../../assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) - -When installing the [camunda-platform](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform) Helm chart, all the components in this picture are installed. - -## Install Camunda 8 using Helm - -### Prerequisites - -Before deploying Camunda using Helm you need the following: - -- [Kubernetes cluster](./overview.md#kubernetes-environments): either local, cloud platform, or on-premises. -- [Helm](https://helm.sh/docs/intro/install/) binary. -- [Kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) binary. - -### Adding Camunda Helm repository - -You have to add the Camunda Helm chart repository in order to use the charts. Once this is done, Helm is able to fetch and install charts hosted in [https://helm.camunda.io](https://helm.camunda.io): - -```bash -helm repo add camunda https://helm.camunda.io -helm repo update -``` - -Once this is completed, we are ready to install the Helm chart hosted in the official Camunda Helm chart repo. - -### Installing the Camunda Helm chart - -To install the available Camunda 8 components inside a Kubernetes cluster, you can simply run: - -```bash -helm install camunda camunda/camunda-platform -``` - -You can also add the `-n` flag to specify in which Kubernetes namespace the components should be installed. - -The command does not install Web Modeler by default. To enable Web Modeler, refer to the [installation instructions](#installing-web-modeler) below. -::: - -Notice that this Kubernetes cluster can have services which are already running; Camunda components are simply installed as another set of services. - -Installing all the components in a cluster requires all Docker images to be downloaded to the remote cluster. Depending on which Cloud provider you are using, the amount of time it will take to fetch all the images will vary. - -Review the progress of your deployment by checking if the Kubernetes pods are up and running with the following: - -```bash -kubectl get pods -``` - -This will return something similar to the following: - -``` -NAME READY STATUS RESTARTS AGE -camunda-keycloak-0 0/1 Pending 0 4s -camunda-identity-6bb5d864cc-kk6dv 0/1 ContainerCreating 0 4s -camunda-operate-cb597fd76-6vr2x 0/1 ContainerCreating 0 4s -camunda-optimize-676955b547-vxts7 0/1 ContainerCreating 0 4s -camunda-connectors-1bba590ff-a63dc 0/1 ContainerCreating 0 4s -camunda-postgresql-0 0/1 Pending 0 4s -camunda-tasklist-5bf5c56f7b-sdwg7 0/1 ContainerCreating 0 4s -camunda-zeebe-0 0/1 Pending 0 4s -camunda-zeebe-1 0/1 ContainerCreating 0 4s -camunda-zeebe-2 0/1 Pending 0 4s -camunda-zeebe-gateway-657b774f95-bbcx5 0/1 ContainerCreating 0 4s -camunda-zeebe-gateway-657b774f95-gmlbm 0/1 Running 0 4s -elasticsearch-master-0 0/1 Pending 0 4s -elasticsearch-master-1 0/1 Init:0/1 0 4s -``` - -Review the progress of your deployment by checking if the Kubernetes pods are up and running with the following: - -```bash -kubectl get pods -``` - -This will return something similar to the following: - -``` -NAME READY STATUS RESTARTS AGE -elasticsearch-master-0 1/1 Running 0 4m6s -camunda-operate-XXX 1/1 Running 0 4m6s -camunda-connectors-XXX 1/1 Running 0 4m6s -camunda-zeebe-0 1/1 Running 0 4m6s -camunda-tasklist-XXX 1/1 Running 0 4m6s -camunda-zeebe-gateway 1/1 Running 0 4m6s -``` - -### Installing with latest updates for certain Camunda Helm chart - -Although the Camunda 8 Helm chart gets the latest version of [Camunda 8 applications](../../../reference/supported-environments.md), the version is still possible to diverge slightly between the chart and the applications/dependencies due to different releases. - -To have the latest version of the chart and applications/dependencies at any time, install the chart as follows: - -```bash -# This will install the latest Camunda Helm chart with the latest applications/dependencies of it (currently it's v8.2.x). -helm install camunda camunda/camunda-platform \ - --values https://helm.camunda.io/camunda-platform/values/values-latest.yaml -``` - -The same works for previous supported versions as follows: - -```bash -# This will install Camunda Helm chart v8.1.x with the latest applications/dependencies of v8.1.x. -helm install camunda camunda/camunda-platform --version 8.1 \ - --values https://helm.camunda.io/camunda-platform/values/values-v8.1.yaml -``` - -### Connectors - -The **Connector runtime** comes enabled by default. To start using Connectors, install Connector element -templates. Learn more in our documentation for [Web Modeler](/components/connectors/manage-connector-templates.md) -and [Desktop Modeler](/components/modeler/desktop-modeler/element-templates/configuring-templates.md). - -Find all available configurable options at the official Camunda Helm [values docs](https://artifacthub.io/packages/helm/camunda/camunda-platform#connectors-parameters). - -#### Disable Connectors - -To disable Connectors, pass the `connectors.enabled: false` value when deploying Camunda Helm Chart. - -#### Polling authentication mode - -Connectors use the [Operate API](/apis-tools/operate-api/overview.md) to fetch process definitions containing inbound Connectors. Depending on your Camunda architecture, you may want to choose one of the following values for the `inbound.mode`: - -- `disabled` - Polling from Operate is disabled. Connector runtime will support only outbound interactions, such as HTTP REST calls. -- `credentials` - Connector runtime will attempt to authenticate to the Operate API with password-based basic HTTP authentication. -- `oauth` - _(Recommended and enabled by default)_ the Connector runtime will attempt to authenticate to the Operate API with an OAuth 2.0 provider. Camunda offers Keycloak as a default OAuth provider. - -### Installing Web Modeler - -:::note -Web Modeler Self-Managed is available to [enterprise customers](../../../reference/licenses.md#web-modeler) only. -::: - -To install the Camunda Helm chart with Web Modeler enabled, follow the steps below. - -#### Create image pull secret - -The Docker images for Web Modeler are not publicly accessible, but available to enterprise customers only from Camunda's private Docker registry. -To enable Kubernetes to pull the images from this registry, first [create an image pull secret](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) using the credentials you received from Camunda: - -```bash -kubectl create secret docker-registry \ - --docker-server=registry.camunda.cloud \ - --docker-username= \ - --docker-password= \ - --docker-email= -``` - -:::note -Replace `` with a name of your choice and ``, ``, and `` with your credentials. - -The secret must be created in the same Kubernetes namespace you will install the Helm chart in. Use the `-n` flag to specify a namespace. -::: - -Alternatively, create an image pull secret [from your Docker configuration file](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). - -#### Configure Web Modeler - -To set up Web Modeler, you need to provide the following required configuration values (all available configuration options are described in more detail in the Helm chart's [README](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform#web-modeler) file): - -- Enable Web Modeler with `webModeler.enabled: true` (it is disabled by default). -- Configure the previously created [image pull secret](#create-image-pull-secret) in `webModeler.image.pullSecrets`. -- Configure your SMTP server by providing the values under `webModeler.restapi.mail`. - - Web Modeler requires an SMTP server to send notification emails to users. -- Configure the database connection - - Web Modeler requires a PostgreSQL database as persistent data storage (other database systems are currently not supported). - - _Option 1_: Set `postgresql.enabled: true`. This will install a new PostgreSQL instance as part of the Helm release (using the [PostgreSQL Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) by Bitnami as a dependency). - - _Option 2_: Set `postgresql.enabled: false` and configure a [connection to an external database](#optional-configure-external-database). - -We recommend specifying these values in a YAML file that you pass to the `helm install` command. A minimum configuration file would look as follows: - -```yaml -webModeler: - enabled: true - image: - pullSecrets: - # replace with the name of the previously created secret - - name: - restapi: - mail: - smtpHost: smtp.example.com - smtpPort: 587 - smtpUser: user - smtpPassword: secret - # email address to be displayed as sender of emails from Web Modeler - fromAddress: no-reply@example.com -postgresql: - enabled: true -``` - -#### Optional: Configure external database - -If you don't want to install a new PostgreSQL instance with Helm, but connect Web Modeler to an existing external database, set `postgresql.enabled: false` and provide the values under `webModeler.restapi.externalDatabase`: - -```yaml -webModeler: - restapi: - externalDatabase: - host: postgres.example.com - port: 5432 - database: modeler-db - user: modeler-user - password: secret -postgresql: - # disables the PostgreSQL chart dependency - enabled: false -``` - -#### Install the Helm chart - -Assuming you have saved your configuration in `modeler-values.yaml`, install the Helm chart by running the following: - -``` -helm install --values modeler-values.yaml camunda camunda/camunda-platform -``` - -### Troubleshooting the installation - -Check that each pod is running and ready. If one or more of your pods stay pending, it means that it can not be scheduled onto a node. Usually this happens because there are insufficient resources that prevent it. Use the `kubectl describe ...` command to check on messages from the scheduler: - -``` -kubectl describe pods -``` - -If the output of the `describe` command was not beneficial, tail the logs of these pods by running the following: - -``` -kubectl logs -f -``` - -## Upgrading Camunda Helm chart - -For upgrading Camunda Helm chart from one release to another, perform a [Helm upgrade](upgrade.md). - -## General notes - -- **Zeebe gateway** is deployed as a stateless service. We support [Kubernetes startup and liveness probes](../../zeebe-deployment/configuration/gateway-health-probes.md) for Zeebe. -- **Zeebe broker nodes** need to be deployed as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) to preserve the identity of cluster nodes. StatefulSets require persistent storage, which must be allocated in advance. Depending on your cloud provider, the persistent storage differs as it is provider-specific. diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/accessing-components-without-ingress.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/accessing-components-without-ingress.md deleted file mode 100644 index f3792c31cf8..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/accessing-components-without-ingress.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: accessing-components-without-ingress -title: "Accessing components without Ingress" -description: "Accessing Camunda 8 components externally without Ingress" ---- - -By default, the [Camunda Helm chart](../../helm-kubernetes/deploy.md) does not expose the Camunda services externally. So to interact with the Camunda services inside a Kubernetes cluster without Ingress setup, you can use `kubectl port-forward` to route traffic from your local machine to the cluster. This is useful for quick tests or for development purposes. - -:::note -You need to keep `port-forward` running all the time to communicate with the remote cluster. -::: - -## Accessing workflow engine - -To interact with Camunda workflow engine via [Zeebe Gateway](../../../zeebe-deployment/configuration/gateway.md) using [zbctl](../../../../apis-tools/cli-client/index.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe cluster as following: - -``` -kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 -``` - -Now, you can connect and execute operations against your new Zeebe cluster. This allows you to use `zbctl` as a command line interface to read and create resources inside the Zeebe broker. - -:::note -Accessing the Zeebe cluster directly using `kubectl port-forward` is recommended for development purposes. -::: - -## Accessing web applications - -To interact with Camunda web applications like Operate, Tasklist, and Optimize, also `kubectl port-forward` will be used. - -:::note -To use the web applications without Camunda Identity, you can set `global.identity.auth.enabled: false` in the values file to disable the authentication mechanism. -Do _not_ disable it if you like to use Web Modeler, as it requires Camunda Identity and Keycloak. -::: - -First, port-forward for each application service: - -``` -kubectl port-forward svc/camunda-operate 8081:80 - -kubectl port-forward svc/camunda-tasklist 8082:80 - -kubectl port-forward svc/camunda-optimize 8083:80 - -kubectl port-forward svc/camunda-connectors 8088:8080 -``` - -To be able to use Web Modeler, create additional port-forwardings for Web Modeler itself and Keycloak (assuming that Keycloak is installed as part of the Helm release): - -``` -kubectl port-forward svc/camunda-web-modeler-webapp 8084:80 - -kubectl port-forward svc/camunda-web-modeler-websockets 8085:80 - -kubectl port-forward svc/camunda-keycloak 18080:80 -``` - -:::note -The name of the Keycloak service will be truncated after 20 characters if Keycloak 16 is used, for example: `svc/long-release-name-ke` -::: - -Finally, you can access each app pointing your browser at: - -- Operate: [http://localhost:8081](http://localhost:8081) -- Tasklist: [http://localhost:8082](http://localhost:8082) -- Optimize: [http://localhost:8083](http://localhost:8083) -- Web Modeler: [http://localhost:8084](http://localhost:8084) - -Log in to these services using the `demo`/`demo` credentials. - -
    - Operate and Tasklist Login -
    - -
    -
    -
    - Operate and Tasklist Dashboard -
    - -
    -
    - -If you deploy process definitions, they will appear in the dashboard. Then, you can drill down to see your active instances. - -You can deploy and create new instances using the Zeebe clients or `zbctl`. - -You can also trigger **Connectors** inbound webhook, given you deployed one. -You can do so with the following example: `curl -X POST -H "Content-Type: application/json" -d '{"myId": 123456, "myMessage": "Hello, world!"}' http://localhost:8088/inbound/`. diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/air-gapped-installation.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/air-gapped-installation.md deleted file mode 100644 index 3b00356b53f..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/air-gapped-installation.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -id: air-gapped-installation -title: "Installing in an air-gapped environment" -description: "Camunda 8 Self-Managed installation in an air-gapped environment" ---- - -The [Camunda Helm chart](../../helm-kubernetes/deploy.md) may assist in an air-gapped environment. By default, the Docker images are fetched via Docker Hub (except for [Web Modeler](../../docker.md#web-modeler)). -With the dependencies in third-party Docker images and Helm charts, additional steps are required to make all charts available as outlined in this resource. - -## Required Docker images - -The following images must be available in your air-gapped environment: - -- [camunda/zeebe](https://hub.docker.com/r/camunda/zeebe) -- [camunda/operate](https://hub.docker.com/r/camunda/operate) -- [camunda/tasklist](https://hub.docker.com/r/camunda/tasklist) -- [camunda/optimize](https://hub.docker.com/r/camunda/optimize) -- [camunda/connectors-bundle](https://hub.docker.com/r/camunda/connectors-bundle) -- [camunda/identity](https://hub.docker.com/r/camunda/identity) -- [postgres](https://hub.docker.com/_/postgres) -- [bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak) -- [elasticsearch](https://hub.docker.com/_/elasticsearch) -- Web Modeler images (only available from [Camunda's private registry](../../docker.md#web-modeler)): - - `web-modeler-ee/modeler-restapi` - - `web-modeler-ee/modeler-webapp` - - `web-modeler-ee/modeler-websockets` - -## Accessing Camunda images from Camunda Docker Registry - -Please note that all the required Docker images, available on DockerHub's Camunda and Bitnami organizations, are also provided publicly via Camunda's Docker registry: `registry.camunda.cloud/camunda/` and `registry.camunda.cloud/bitnami/` - -For example, the Docker image of Zeebe and PostgreSQL can be pulled via DockerHub or via the Camunda's Docker Registry: - -```shell -docker pull camunda/zeebe:latest -docker pull registry.camunda.cloud/camunda/zeebe:latest - -docker pull bitnami/keycloak:latest -docker pull registry.camunda.cloud/bitnami/keycloak:latest -``` - -## Required Helm charts - -The following charts must be available in your air-gapped environment: - -- [Camunda Helm chart](https://github.com/camunda/camunda-platform-helm) -- [Elasticsearch Helm chart](https://github.com/elastic/helm-charts/tree/main/elasticsearch) -- [Keycloak Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/keycloak) -- [Postgres Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) -- [Bitnami Common Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/common) - -## Dependencies explained - -Identity utilizes Keycloak and allows you to manage users, roles, and permissions for Camunda 8 components. This third-party dependency is reflected in the Helm chart as follows: - -``` -camunda-platform - |_ elasticsearch - |_ identity - |_ keycloak - |_ postgresql - |_ zeebe - |_ optimize - |_ operate - |_ tasklist - |_ connectors - |_ postgresql -``` - -- Keycloak is a dependency for Camunda Identity and PostgreSQL is a dependency for Keycloak. -- PostgreSQL is a dependency for Web Modeler. - - This dependency is optional as you can either install PostgreSQL with Helm or use an existing [external database](../deploy.md#optional-configure-external-database). -- Elasticsearch is a dependency for Zeebe, Operate, Tasklist, and Optimize. -- Connectors can be stand-alone; however if there's an intention to use inbound capabilities, Operate becomes a dependency. - -The values for the dependencies Keycloak and PostgreSQL can be set in the same hierarchy: - -```yaml -identity: - [identity values] - keycloak: - [keycloak values] - postgresql: - [postgresql values] -postgresql: - [postgresql values] -``` - -## Push Docker images to your repository - -All the [required Docker images](#required-docker-images) need to be pushed to your repository using the following steps: - -1. Tag your image using the following command (replace ``, ``, and `` with the corresponding values.) - -``` -docker tag example.jfrog.io/camunda/: -``` - -2. Push your image using the following command: - -``` -docker push example.jfrog.io/camunda/: -``` - -## Deploy Helm charts to your repository - -You must deploy the [required Helm charts](#required-helm-charts) to your repository. -For details about hosting options, visit the [chart repository guide](https://helm.sh/docs/topics/chart_repository). - -### Add your Helm repositories - -You must add your Helm chart repositories to use the charts: - -``` -helm repo add camunda https://example.jfrog.io/artifactory/api/helm/camunda-platform -helm repo add elastic https://example.jfrog.io/artifactory/api/helm/elastic -helm repo add bitnami https://example.jfrog.io/artifactory/api/helm/bitnami -helm repo update -``` - -### Helm chart values - -In a custom values file, it is possible to override the image repository and the image tag. - -```yaml -zeebe: - image: - repository: example.jfrog.io/camunda/zeebe - # e.g. work with the latest versions in development - tag: latest -zeebe-gateway: - image: - repository: example.jfrog.io/camunda/zeebe - tag: latest -elasticsearch: - image: example.jfrog.io/elastic/elasticsearch - imageTag: 7.16.3 -identity: - image: - repository: example.jfrog.io/camunda/identity - ... - keycloak: - image: - repository: example.jfrog.io/bitnami/keycloak - ... - postgresql: - image: - repository: example.jfrog.io/bitnami/postgres - ... -operate: - image: - repository: example.jfrog.io/camunda/operate - ... -tasklist: - image: - repository: example.jfrog.io/camunda/tasklist - ... -optimize: - image: - repository: example.jfrog.io/camunda/optimize - ... -connectors: - image: - repository: example.jfrog.io/camunda/connectors-bundle - ... -webModeler: - image: - # registry and tag will be used for all three Web Modeler images - registry: example.jfrog.io - tag: latest - restapi: - image: - repository: camunda/modeler-restapi - webapp: - image: - repository: camunda/modeler-webapp - websockets: - image: - repository: camunda/modeler-websockets - ... -# only necessary if the PostgreSQL chart dependency is used for Web Modeler -postgresql: - image: - repository: example.jfrog.io/bitnami/postgres -``` - -Afterwards, you can deploy Camunda using Helm and the custom values file. - -``` -helm install my-camunda-platform camunda/camunda-platform -f values.yaml -``` diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/guides.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/guides.md deleted file mode 100644 index da653de9b1b..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/guides.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: guides -title: "Configuration and deployment user guides" -description: "Various use cases configuring and deploying Camunda 8." ---- - -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - -Camunda 8 Self-Managed is highly customizable and can be deployed in different setups. This section highlights various use cases and scenarios of configuring Camunda 8 beyond the default values. - -Each guide is considered complete and a standalone use case. However, view the [deploying Camunda 8 using Helm charts](../deploy.md) page, as this is the base for all guides. - - diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup.md deleted file mode 100644 index 8d77dafa0fc..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -id: ingress-setup -title: "Combined and separated Ingress setup" -description: "Camunda 8 Self-Managed combined and separated Ingress setup" ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Camunda 8 Self-Managed has multiple web applications and gRPC services. Both can be accessed externally using Ingress. There are two ways to do this: - -1. **Combined setup:** In this setup, there are two Ingress objects: one Ingress object for all Camunda 8 web applications using a single domain. Each application has a sub-path e.g. `camunda.example.com/operate`, and `camunda.example.com/optimize` and another Ingress which uses gRPC protocol for Zeebe Gateway e.g. `zeebe.camunda.example.com`. -2. **Separated setup:** In this setup, each component has its own Ingress/host e.g. `operate.camunda.example.com`, `optimize.camunda.example.com`, `zeebe.camunda.example.com`, etc. - -There are no significant differences between the two setups. Rather, they both offer flexibility for different workflows. - -:::note -Camunda 8 Helm chart doesn't manage or deploy Ingress controllers, it only deploys Ingress resources. Hence, this Ingress setup will not work without an Ingress controller running in your cluster. -::: - -## Preparation - -- An Ingress controller should be deployed in advance. The examples below use the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), but any Ingress controller could be used by setting `ingress.className`. -- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform#configuration). - -## Configuration - - - - - -In this setup, a single Ingress/domain is used to access Camunda 8 web applications, and another for Zeebe Gateway. By default, all web applications use `/` as a base, so we just need to set the context path, Ingress configuration, and authentication redirect URLs. - -![Camunda 8 Self-Managed Architecture Diagram - Separated Ingress](../../../assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) - -```yaml -# Chart values for the Camunda 8 Helm chart in combined Ingress setup. - -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters - -# IMPORTANT: Make sure to change "camunda.example.com" to your domain. - -global: - ingress: - enabled: true - className: nginx - host: "camunda.example.com" - identity: - auth: - publicIssuerUrl: "https://camunda.example.com/auth/realms/camunda-platform" - operate: - redirectUrl: "https://camunda.example.com/operate" - tasklist: - redirectUrl: "https://camunda.example.com/tasklist" - optimize: - redirectUrl: "https://camunda.example.com/optimize" - webModeler: - redirectUrl: "https://camunda.example.com/modeler" - -identity: - contextPath: "/identity" - fullURL: "https://camunda.example.com/identity" - -operate: - contextPath: "/operate" - -optimize: - contextPath: "/optimize" - -tasklist: - contextPath: "/tasklist" - -webModeler: - # The context path is used for the web application that will be accessed by users in the browser. - # In addition, a WebSocket endpoint will be exposed on "[contextPath]-ws", e.g. "/modeler-ws". - contextPath: "/modeler" - -zeebe-gateway: - ingress: - enabled: true - className: nginx - host: "zeebe.camunda.example.com" -``` - -:::note Web Modeler -The configuration above only contains the Ingress-related values under `webModeler`. Note the additional [installation instructions and configuration hints](../../helm-kubernetes/deploy.md#installing-web-modeler). -::: - -Using the custom values file, [deploy Camunda 8 as usual](../../helm-kubernetes/deploy.md): - -```shell -helm install demo camunda/camunda-platform -f values-combined-ingress.yaml -``` - -Once deployed, you can access the Camunda 8 components on: - -- **Web applications:** `https://camunda.example.com/[identity|operate|optimize|tasklist|modeler]` - - _Note_: Web Modeler also exposes a WebSocket endpoint on `https://camunda.example.com/modeler-ws`. This is only used by the application itself and not supposed to be accessed by users directly. -- **Keycloak authentication:** `https://camunda.example.com/auth` -- **Zeebe Gateway:** `grpc://zeebe.camunda.example.com` - - - - - -In this setup, each Camunda 8 component has its own Ingress/domain. There is no need to set the context since `/` is used as a default base. Here, we just need to set the Ingress configuration and authentication redirect URLs. - -![Camunda 8 Self-Managed Architecture Diagram - Separated Ingress](../../../assets/camunda-platform-8-self-managed-architecture-diagram-separated-ingress.png) - -```yaml -# Chart values for the Camunda 8 Helm chart in combined Ingress setup. - -# This file deliberately contains only the values that differ from the defaults. -# For changes and documentation, use your favorite diff tool to compare it with: -# https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters - -# IMPORTANT: Make sure to change "camunda.example.com" to your domain. - -global: - identity: - auth: - publicIssuerUrl: "https://keycloak.camunda.example.com/auth/realms/camunda-platform" - operate: - redirectUrl: "https://operate.camunda.example.com" - tasklist: - redirectUrl: "https://tasklist.camunda.example.com" - optimize: - redirectUrl: "https://optimize.camunda.example.com" - webModeler: - redirectUrl: "https://modeler.camunda.example.com" - -identity: - ingress: - enabled: true - className: nginx - host: "identity.camunda.example.com" - fullURL: "https://identity.camunda.example.com" - - keycloak: - ingress: - enabled: true - ingressClassName: nginx - hostname: "keycloak.camunda.example.com" - -operate: - ingress: - enabled: true - className: nginx - host: "operate.camunda.example.com" - -optimize: - ingress: - enabled: true - className: nginx - host: "optimize.camunda.example.com" - -tasklist: - ingress: - enabled: true - className: nginx - host: "tasklist.camunda.example.com" - -zeebe-gateway: - ingress: - enabled: true - className: nginx - host: "zeebe.camunda.example.com" - -webModeler: - ingress: - enabled: true - className: nginx - webapp: - host: "modeler.camunda.example.com" - websockets: - host: "modeler-ws.camunda.example.com" -``` - -:::note Web Modeler -The configuration above only contains the Ingress-related values under `webModeler`. Note the additional [installation instructions and configuration hints](../../helm-kubernetes/deploy.md#installing-web-modeler). -::: - -Using the custom values file, [deploy Camunda 8 as usual](../../helm-kubernetes/deploy.md): - -```shell -helm install demo camunda/camunda-platform -f values-separated-ingress.yaml -``` - -Once deployed, you can access the Camunda 8 components on: - -- **Web applications:** `https://[identity|operate|optimize|tasklist|modeler].camunda.example.com` -- **Keycloak authentication:** `https://keycloak.camunda.example.com` -- **Zeebe Gateway:** `grpc://zeebe.camunda.example.com` - - - - - -## Ingress controllers - -Ingress resources require the cluster to have an [Ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) running. There are many options for configuring your Ingress controller. If you are using a cloud provider such as AWS or GCP, we recommend you follow their Ingress setup guides if an Ingress controller is not already pre-installed. - -### Example local configuration - -An Ingress controller is also required when working on a local Camunda 8 installation. Take a look at an Ingress controller configuration using the [ingress-nginx controller](https://kubernetes.github.io/ingress-nginx/deploy/#bare-metal-clusters/): - -```yaml -# ingress_nginx_values.yml -controller: - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - service: - type: NodePort - - publishService: - enabled: false -``` - -To install this [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) to your local cluster, execute the following command: - -```shell -helm install -f ingress_nginx_values.yml \ -ingress-nginx ingress-nginx \ ---repo https://kubernetes.github.io/ingress-nginx \ ---version "4.9.0" \ ---namespace ingress-nginx \ ---create-namespace -``` - -## Troubleshooting - -If something is not working as expected, check the guide for [general deployment troubleshooting](../../troubleshooting.md). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster.md deleted file mode 100644 index db300df1b50..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -id: local-kubernetes-cluster -title: "Local Kubernetes Cluster" -description: "Deploy Camunda 8 Self-Managed on Kubernetes local cluster for development purposes using KIND." ---- - -You can deploy Camunda 8 Self-Managed on Kubernetes local cluster for development purposes using [KIND](https://kind.sigs.k8s.io/). - -In this guide, we will use `KIND`. However, the concept is the same for any other tool like `K3s`, `Minikube`, or `MicroK8s`. The goal in this guide is to reduce the resources required by Camunda components so they can work on a personal machine. - -## Preparation - -Based on your system, install the CLI tools used in this guide if you don't already have them: - -- [kind](https://kind.sigs.k8s.io/docs/user/quick-start) -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -- [helm](https://helm.sh/docs/intro/install/) - -## Create a local Kubernetes cluster - -If you have not already, create a local Kubernetes cluster with the following command: - -```sh -kind create cluster --name camunda-platform-local -``` - -Next, switch to the new cluster context using the following command: - -``` -kubectl config use-context kind-camunda-platform-local -``` - -## Deploy - -Now it's time to deploy Camunda 8 on the local Kubernetes cluster. - -First, add the Camunda 8 Helm repository using the following command: - -``` -helm repo add camunda https://helm.camunda.io -helm repo update -``` - -Next, download the Camunda 8 values file for KIND: [camunda-platform-core-kind-values.yaml](https://github.com/camunda/camunda-platform-helm/blob/main/kind/camunda-platform-core-kind-values.yaml). - -Lastly, install Camunda 8 using the custom values file with the following command: - -``` -helm install camunda-platform camunda/camunda-platform \ - -f camunda-platform-core-kind-values.yaml -``` - -This will deploy the same components, but with a set of parameters tailored to a local environment setup. - -Depending on your machine hardware and internet connection speed, the services might take some time to get started since it will download the Docker images of all Camunda 8 components to your local KIND cluster. - -## Clean - -If you don't need the cluster anymore, you can just delete the local KIND cluster: - -:::note -This is a destructive action and will destroy all data of Camunda 8 in the local development cluster. -::: - -```sh -kind delete cluster --name camunda-platform-local -``` - -For more details about deployment options, visit the full [Helm deployment guide](../deploy.md). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/running-custom-connectors.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/running-custom-connectors.md deleted file mode 100644 index d8aea30eebb..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/running-custom-connectors.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: running-custom-connectors -title: "Running custom Connectors" -description: "Run custom Connectors in your Helm Kubernetes cluster." ---- - -You can deploy your custom **Connector** in your Helm Kubernetes cluster along with Connectors Bundle. - -The default runtime loads Connectors from classpath via SPI. For the custom Connectors, there is a dedicated folder -inside a **Connectors** Docker image `/opt/custom`; any JAR placed here is included in the classpath. - -This page explains how to put your custom Connector into the `/opt/custom`. - -## Prerequisites - -Start with [creating and building](../../../../components/connectors/custom-built-connectors/connector-sdk.md) a 'fat' JAR (JAR with dependencies) of your custom **Connector**. For the purpose of -this guide, let's consider the custom **Connector** name `custom-connector-0.0.1-with-dependencies.jar`. - -Then, place the JAR somewhere accessible by Helm during installation. For the purpose of this guide, -let's consider the path to the **Connector** is `https://my.host:80/dist/custom-connector-0.0.1-with-dependencies.jar`. - -## Modify Connectors config - -Modify the values of the the [Camunda Helm charts](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters): - -```yaml -connectors: - initContainers: - - name: init-script-downloader - image: appropriate/curl - args: - - "-o" - - "/opt/custom/custom-connector-0.0.1-with-dependencies.jar" - - "https://my.host:80/dist/custom-connector-0.0.1-with-dependencies.jar" - volumeMounts: - - name: init-script - mountPath: /opt/custom - - extraVolumes: - - name: init-script - emptyDir: {} - - extraVolumeMounts: - - mountPath: /opt/custom/custom-connector-0.0.1-with-dependencies.jar - name: init-script - subPath: custom-connector-0.0.1-with-dependencies.jar -``` - -After modification, you can run `helm install ... ` as usual. -These changes copy a custom Connector JAR before the Connector runtime starts. - -The `appropriate/curl` is not the only image option for the `initContainers`. There are other `curl`-based alternatives you can use; for example, `curlimages/curl`. Check `args` configuration with your vendor. - -## Troubleshooting - -If your custom Connector won't start, consider the following troubleshooting steps: - -- Make sure your Connector is present in the `/opt/custom` folder in the pod. -- Make sure the original Connector and the one in `/opt/custom` are the same. Usually, file size check is sufficient, but in some cases you may want to have a checksum comparison. diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/using-existing-keycloak.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/using-existing-keycloak.md deleted file mode 100644 index c1a52a8d893..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/guides/using-existing-keycloak.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -id: using-existing-keycloak -title: "Using Existing Keycloak" -description: "Learn how to use an existing Keycloak instance in Camunda 8 Self-Managed deployment." ---- - -Camunda 8 Self-Managed has two different types of applications: Camunda applications (Operate, Optimize, Tasklist, etc.) and non-Camunda applications (such as Keycloak and Elasticsearch). For more details, review the [architecture](../../../about-self-managed.md#architecture) documentation for more information on the different types of applications. - -This guide steps through using an existing Keycloak instance, which is part of [Camunda Identity](../../../identity/what-is-identity.md). By default, [Helm chart deployment](../deploy.md) creates a new Keycloak instance, but it's possible to use an existing Keycloak instance either inside the same Kubernetes cluster or outside of it. - -## Preparation - -Configure your existing Keycloak realm according to the following guide: [Connect to an existing Keycloak instance](/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md). - -## Values file - -The only change required to use the existing Keycloak is configuring the following values in the Camunda 8 Self-Managed Helm chart: - -```yaml -# File: existing-keycloak-values.yaml -global: - identity: - keycloak: - url: - # This will produce the following URL "https://keycloak.stage.svc.cluster.local:8443". - # Also the host could be outside the Kubernetes cluster like "keycloak.stage.example.com". - protocol: "https" - host: "keycloak.stage.svc.cluster.local" - port: "8443" - auth: - adminUser: "admin" - existingSecret: "stage-keycloak" - existingSecretKey: "admin-password" - -identity: - keycloak: - enabled: false -``` - -Then, use the custom values file to [deploy Camunda 8](../deploy.md) as usual. - -```sh -helm install camunda camunda/camunda-platform -f existing-keycloak-values.yaml -``` diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/overview.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/overview.md deleted file mode 100644 index 225c42424c1..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/overview.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -id: overview -title: "Camunda 8 on Kubernetes" -sidebar_label: "Overview" -description: "An overview of Kubernetes, its environments, and officially supported platforms" ---- - -We strongly recommend using Kubernetes and Helm to deploy and run Camunda 8 in production. - -There are many ways you can provision and configure a Kubernetes cluster, and there are a number of architectural choices you need to make. Will your workers run in the Kubernetes cluster or external to it? You will need to configure your Kubernetes cluster and modify this to suit the architecture you are building. - -## Kubernetes environments - -You can install Camunda 8 on your Kubernetes environment of choice, e.g.: - -- [Stock Kubernetes](https://kubernetes.io/docs/). -- [Kubernetes KIND](https://github.com/kubernetes-sigs/kind), Minikube, K3s, and MicroK8s for local development. -- [Red Hat OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift), an enterprise ready solution with a focus on security. -- Cloud service providers like Google GKE, Azure AKS, Amazon EKS, etc. - -## Officially supported platforms - -With the right configuration, Camunda 8 can be deployed on any Kubernetes distribution (Cloud or on-premises). However, we officially test and support a [specific list of platforms](./platforms/platforms.md). - -## Versioning - -Starting from July 2023 (v8.2.8), the Camunda 8 **Helm chart** version follows the same unified schema -and schedule as [Camunda 8 applications](https://github.com/camunda/camunda-platform) (e.g., if the application version is 8.2.8, then chart version is also 8.2.8). - -For more details about the applications version included in the Helm chart, review the [full version matrix](https://helm.camunda.io/camunda-platform/version-matrix/). - -## Use Helm to install on Kubernetes - -There are several alternatives to deploy applications to a Kubernetes cluster, but we recommend using our provided Helm charts to deploy a set of components into your cluster. Helm allows you to choose exactly what chart (set of components) you want to install and how these components need to be configured. - -At [helm.camunda.io](https://helm.camunda.io/), you'll find a Helm chart to configure a three-broker cluster with two Elasticsearch instances, Operate, two Zeebe Gateways and Tasklist. This size is comparable with the Production-S cluster plan in [Camunda 8 SaaS](https://camunda.com/get-started/). It should be sufficient for 80% of use cases. - -Refer to the [documentation on Camunda's Helm charts](./deploy.md) for details. - -To do, you must have the following tools installed in your local environment: - -- `kubectl`: Kubernetes Control CLI tool, installed and connected to your cluster -- `helm`: Kubernetes Helm CLI tool - -## Useful tools related to Camunda - -- **Camunda Desktop Modeler**: to model/modify business processes. [Learn more](/components/modeler/desktop-modeler/index.md). -- **Zeebe CTL (`zbctl`)**: command line tool to interact with a Zeebe cluster (local/remote). You can get the `zbctl` tool from the official [Zeebe release page](https://github.com/camunda-cloud/zeebe/releases). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks.md deleted file mode 100644 index 99aaf380ec1..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -id: amazon-eks -title: "Amazon EKS" -description: "Deploy Camunda 8 Self-Managed on Amazon EKS" ---- - -Amazon Elastic Kubernetes Service ([Amazon EKS](https://aws.amazon.com/eks/)) is a managed -container service to run and scale Kubernetes applications in the cloud or on-premises. - -Camunda 8 Self-Managed can be deployed on EKS like any Kubernetes cluster using [Helm charts](../deploy.md). However, there are a few pitfalls to avoid as described below. - -## EKS cluster specification - -Generally speaking, the EKS cluster specification depends on your needs and workloads. -Here is a recommended start to run Camunda 8: - -- Instance type: `m6i.xlarge` (4 vCPUs, 16 GiB Memory) -- Number of nodes: `4` -- Volume type: `SSD gp3` - -:::caution -To use `SSD gp3` volume type on an EKS cluster, you need to install -[Amazon EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html). -If you are on 1.22 or an earlier cluster be sure to install this driver to your cluster before updating the cluster to 1.23 to avoid potential workload interruptions. - -The next step is to create a new -[StorageClass](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html) -that uses the Amazon EBS `gp3` volume type. Then, use it cluster-wide as a default -`StorageClass` or set it in your values file under `zeebe.pvcStorageClassName`. - -If you encounter issues with EBS CSI Driver, follow the instructions in the [helm-profiles repository](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/main/aws/README.md#ebs-csi-driver-addon) maintained by the Camunda Consulting Team. -::: - -## Load Balancer set up - -AWS offers different types of Load Balancers (LB). Those namely being: - -- Classic Load Balancer (CLB) - previous generation, unsupported by Camunda 8 -- Network Load Balancer (NLB) -- Application Load Balancer (ALB) - -Typically the NLB and ALB are used in production setups and the ones we're focusing on as CLB are not endorsed anymore and counted as previous generation LB. - -The Zeebe Gateway requires [gRPC](https://grpc.io/) to work, which in itself requires http2 to be used. Additionally, it's recommended to secure the endpoint with [a TLS certificate](https://aws.amazon.com/what-is/ssl-certificate/). - -Here the choice of LB is important as not every setup will work with every TLS termination. Typically, the NLB has to terminate the TLS within the ingress, while the ALB can terminate TLS within the LB, allowing the usage of the [AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/). - -The NLB will not work with the AWS Certificate Manager, as the ACM does not allow exporting the private key required to terminate the TLS within the ingress. - -The Camunda 8 Helm chart primarily focuses on the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) due to the usage of controller specific annotations. Using a different ingress controller requires supplying the necessary equivalent annotation options, ensuring http2 is enabled, and gRPC is used for the Zeebe Gateway. - -### Application Load Balancer (ALB) - -To conclude for using the **Application Load Balancer** (ALB) to terminate TLS in the Load Balancer, the following is required: - -- Deploy the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/). -- A [certificate set up](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the AWS Certificate Manager (ACM). -- Follow the [example by AWS](https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/examples/grpc_server.md) to configure the ingress for the Zeebe Gateway. To summarize, add the following annotations to the Zeebe Gateway ingress: - ```shell - alb.ingress.kubernetes.io/ssl-redirect: '443' - alb.ingress.kubernetes.io/backend-protocol-version: GRPC - alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/target-type: ip - ``` - - This does not require the configuration of the [TLS on the ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) - - If the AWS Load Balancer Controller is correctly set up, it automatically pulls the correct certificate from ACM based on the host name. - -### Network Load Balancer (NLB) - -Alternatively, one can use a **Network Load Balancer** (NLB) to terminate TLS within the ingress. This requires the following: - -- An ingress controller, preferably [ingress-nginx](https://github.com/kubernetes/ingress-nginx) deployed. - - The ingress controller must support gRPC and http2. -- A certificate, preferably created with [Cert-Manager](https://cert-manager.io/). -- [TLS configured](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) on the ingress object. - -## Pitfalls to avoid - -For general deployment pitfalls, visit the [deployment troubleshooting guide](../../troubleshooting.md). - -### Volume performance - -To have proper performance in Camunda 8, the persistent volumes attached to Zeebe should have around 1,000-3,000 IOPS. The `gp3` volumes deliver a consistent baseline IOPS performance -of 3,000 IOPS. The `gp2` volumes could also be used, but `gp2` volume type performance -[varies based on volume size](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/general-purpose.html#gp2-performance). - -It's recommended to use `gp3` volumes, but if only `gp2` type is available, persistent volumes -should use `gp2` volumes of at least 334 GB. diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/google-gke.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/google-gke.md deleted file mode 100644 index 2e5bc601bb7..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/google-gke.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: google-gke -title: "Google GKE" -description: "Deploy Camunda 8 Self-Managed on Google GKE, a managed container service to run and scale Kubernetes applications in the cloud or on-premises." ---- - -Google Kubernetes Engine ([GKE](https://cloud.google.com/kubernetes-engine)) -is a managed container service to run and scale Kubernetes applications in the cloud or on-premises. - -Camunda 8 Self-Managed can be deployed on GKE like any Kubernetes cluster using [Helm charts](../deploy.md). However, there are a few pitfalls to avoid as described below. - -## GKE cluster specification - -Generally speaking, the GKE cluster specification depends on your needs and workloads. -Here is a recommended start to run Camunda 8: - -- Instance type: `n1-standard-4` (4 vCPUs, 15 GB Memory) -- Number of nodes: `4` -- Volume type: `Performance (SSD) persistent disks` - -## Pitfalls to avoid - -For general deployment pitfalls, visit the [deployment troubleshooting guide](../../troubleshooting.md). - -### Volume performance - -To have a proper performance in Camunda 8, the persistent volumes attached to Zeebe should have around 1,000-3,000 IOPS. The `Performance (SSD) persistent disks` volumes deliver a consistent baseline IOPS performance but it [varies based on volume size](https://cloud.google.com/compute/docs/disks/performance#performance_factors). - -It's recommended to use `Performance (SSD) persistent disks` volume type with at least `100 GB` per volume to have 3,000 IOPS. - -### Zeebe Ingress - -Zeebe requires an Ingress controller that supports `gRPC`, so if you are using [GKE Ingress](https://cloud.google.com/kubernetes-engine/docs/concepts/ingress) (ingress-gce), not [ingress-nginx](https://github.com/kubernetes/ingress-nginx), you might need to do extra steps. Namely, using `cloud.google.com/app-protocols` annotation in Zeebe Service. For more details, visit the GKE guide [using HTTP/2 for load balancing with Ingress](https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-http2). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/microsoft-aks.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/microsoft-aks.md deleted file mode 100644 index b3e96b095e3..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/microsoft-aks.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: microsoft-aks -title: "Microsoft AKS" -description: "Deploy Camunda 8 Self-Managed on Microsoft AKS, a managed container service to run and scale Kubernetes applications in the cloud or on-premises." ---- - -Microsoft Azure Kubernetes Service ([Microsoft AKS](https://azure.microsoft.com/en-us/products/kubernetes-service/)) -is a managed container service to run and scale Kubernetes applications in the cloud or on-premises. - -Camunda 8 Self-Managed can be deployed on AKS like any Kubernetes cluster using [Helm charts](../deploy.md). However, there are a few pitfalls to avoid as described below. - -## AKS cluster specification - -Generally speaking, the AKS cluster specification depends on your needs and workloads. -Here is a recommended start to run Camunda 8: - -- Instance type: `Standard_D4as_v4` (4 vCPUs, 16 GiB Memory) -- Number of nodes: `4` -- Volume type: `Premium SSD/Premium SSD v2` - -## Pitfalls to avoid - -For general deployment pitfalls, visit the [deployment troubleshooting guide](../../troubleshooting.md). - -### Volume performance - -To have proper performance in Camunda 8, the persistent volumes attached to Zeebe should have around 1,000-3,000 IOPS. The `Premium SSD v2` volumes deliver a consistent baseline IOPS performance -of 3,000 IOPS. However, it has some [limitations](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssd-v2-limitations), including [lack of support in Azure Backup](https://learn.microsoft.com/en-us/azure/backup/disk-backup-support-matrix#limitations). Therefore, using `Premium SSD` could be the only option in many cases. -The `Premium SSD` volume could also be used, but its performance -[varies based on volume size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds). - -It's recommended to use `Premium SSD v2` volume type, but only if `Premium SSD` type is available; persistent volumes -should use `Premium SSD` volumes of at least `256 GB` (P15). - -### Zeebe Ingress - -**Azure Application Gateway Ingress cannot be used as an Ingress for Zeebe/Zeebe Gateway** because Zeebe requires an Ingress controller that supports `gRPC`. You should use any other Ingress controller that supports `gRPC`, like the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx). - -Currently, the Azure Application Gateway Ingress controller doesn't support `gRPC`. For more details, follow the upstream [GitHub issue about gRPC/HTTP2 support](https://github.com/Azure/application-gateway-kubernetes-ingress/issues/1015). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/platforms.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/platforms.md deleted file mode 100644 index faaceccff89..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/platforms.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: platforms -title: "Supported Kubernetes platforms" -description: "An overview of Camunda 8 officially-supported Kubernetes platforms." ---- - -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - -[Deploying Camunda 8 using Helm charts](../deploy.md) will work for all platforms and distributions. However, each platform or cloud provider may have special prerequisites or pitfalls to avoid. This section highlights important notes for a smooth Camunda 8 deployment on different Kubernetes platforms. - -In addition to Stock Kubernetes (which could be deployed on cloud or on-premises), Camunda only officially tests and supports the following platforms: - - - -:::caution Web Modeler -While it is likely Web Modeler will work on your cloud platform, we do not guarantee functionality and currently offer no dedicated support for these cloud platforms. -::: diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift.md deleted file mode 100644 index 5c104206446..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift.md +++ /dev/null @@ -1,400 +0,0 @@ ---- -id: redhat-openshift -title: "Red Hat OpenShift" -description: "Deploy Camunda 8 Self-Managed on Red Hat OpenShift" ---- - -Camunda 8 can be deployed using Helm on Red Hat OpenShift with proper configurations. The primarily difference from [general Helm deployment guide](../deploy.md) is related to the Security Context Constraints (SCCs) you have in your cluster. - -## Compatibility - -We test against the following OpenShift versions and guarantee compatibility with: - -| OpenShift version | Supported | -| ----------------- | ------------------ | -| 4.10.x | :white_check_mark: | -| 4.11.x | :white_check_mark: | -| 4.12.x | :white_check_mark: | -| 4.13.x | :white_check_mark: | - -Any version not explicitly marked in the table above is not tested, and we cannot guarantee compatibility. - -## Pitfalls to avoid - -For general deployment pitfalls, visit the [deployment troubleshooting guide](../../troubleshooting.md). - -### Security Context Constraints (SCCs) - -Much like how roles control the permissions of users, Security Context Constraints (SCCs) are a way to control the permissions of the applications deployed, both at the pod and container level. It's generally recommended deploying your application with the most restricted SCCs possible. If you're not familiar with security context constraints, refer to the [OpenShift documentation](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html). - -#### Permissive SCCs - -Out of the box, if you deploy Camunda 8 (and related infrastructure) with a permissive SCCs, there is nothing particular for you to configure. Here, a permissive SCCs refers to one where the strategy for `RunAsUser` is defined as `RunAsAny` (including root). - -#### Non-root SCCs - -If you wish to deploy Camunda 8 but restrict the applications from running as root (e.g. the `nonroot` built-in SCCs), you will need to configure each pod and container to run as a non-root user. For example, when deploying Zeebe using a stateful set, you would add the following, replacing `1000` with the user ID you wish to use: - -```yaml -spec: - template: - spec: - securityContext: - runAsUser: 1000 - containers: - securityContext: - runAsUser: 1000 -``` - -:::note -As the container user in OpenShift is always part of the root group, it's not necessary to define a `fsGroup` for any Camunda 8 applications pod security context. -::: - -This is necessary for all Camunda 8 applications, as well as related ones (e.g. Keycloak, PostgreSQL, etc.). This is notably crucial for stateful applications which will write to persistent volumes, but it's also generally a good idea to set. - -#### Restrictive SCCs - -The following is the most restrictive SCCs you can use to deploy Camunda 8. Note that this is, in OpenShift 4.10, equivalent to the built-in `restricted` SCCs (which is the default SCCs). - -```yaml -Allow Privileged: false -Default Add Capabilities: -Required Drop Capabilities: KILL,MKNOD,SYS_CHROOT,SETUID,SETGID -Allowed Capabilities: -Allowed Seccomp Profiles: -Allowed Volume Types: configMap,downwardAPI,emptyDir,persistentVolumeClaim,projected,secret -Allow Host Network: false -Allow Host Ports: false -Allow Host PID: false -Allow Host IPC: false -Read Only Root Filesystem: false -Run As User Strategy: MustRunAsRange -SELinux Context Strategy: MustRunAs -FSGroup Strategy: MustRunAs -Supplemental Groups Strategy: RunAsAny -``` - -When using this, you must take care not to specify _any_ `runAsUser` or `fsGroup` in either the pod or container security context. Instead, let OpenShift assign arbitrary IDs. - -:::note -If you are providing the ID ranges yourself, you can configure the `runAsUser` and `fsGroup` values yourself as well. -::: - -The Camunda Helm chart can be deployed to OpenShift with a few modifications, primarily revolving around your desired security context constraints. You can find out more about this in the next section. - -## Deployment - -As discussed in the previous section, you need to configure the pod and container security contexts based on your desired security context constraints (SCCs). - -The `Elasticsearch`, `Keycloak`, and `PostgreSQL` charts all specify default non-root users for security purposes. To deploy these charts through the Camunda Helm chart, these default values must be removed. Unfortunately, due to a [longstanding bug in Helm](https://github.com/helm/helm/issues/9136) affecting all Helm versions from 3.2.0 and greater, this makes the installation of the chart (when deploying any of these sub-charts) more complex. - -Note that this is only an issue if you are deploying `Elasticsearch`, `Keycloak` (via `Identity`), or `PostgreSQL` (via `Keycloak`). If you are not deploying these, or not via the `camunda-platform` chart, or you are using [permissive SCCs](#permissive-sccs), this issue does not affect your deployment. - -:::note -This also affects installations done through the OpenShift console, as it still uses Helm under the hood. -::: - -### Permissive SCCs - -To use permissive SCCs, install the charts as they are. Follow the [general Helm deployment guide](../deploy.md). - -### Restrictive SCCs - -To use more restrictive SCCs, configure the following minimum set of values for the various applications. The recommendations outlined in the sections are relevant here as well. As the Camunda 8 applications do not define a pod or security context, follow these recommendations, or simply omit defining any. - -If you are deploying with SCCs where `RunAsUser` is `MustRunAsRange` (e.g. the default `restricted` SCCs), and are deploying at least one of `Elasticsearch`, `Keycloak`, or `PostgreSQL`, it's necessary to unset the default security context of these charts. If this does not apply to you, you can stop here. - -Now this depends on which Helm version you use: `3.1.3 and lower`, or `3.2.0 and greater` (i.e. one affected by [Helm's nested sub-charts](https://github.com/helm/helm/issues/9136)). Find out your Helm version by running the following: - -```shell -helm version --short - -v3.8.1+g5cb9af4 -``` - -#### Helm 3.1.3 or lower - -If you're running on Helm 3.0.0 up to 3.1.3, you need to add these values to your `values.yaml` file, or save them to a new file locally, e.g. `openshift.yaml`: - -:::note -These values are also available in the [Camunda Helm chart](https://artifacthub.io/packages/helm/camunda/camunda-platform). -::: - -```yaml -# omit this section if elasticsearch.enabled is false -elasticsearch: - securityContext: - runAsUser: null - sysctlInitContainer: - enabled: false - podSecurityContext: - fsGroup: null - runAsUser: null - -# omit this section if identity.enabled is false -identity: - # omit this section if identity.keycloak.enabled is false - keycloak: - containerSecurityContext: - runAsUser: null - podSecurityContext: - fsGroup: null - runAsUser: null - postgresql: - # omit this section if identity.keycloak.postgresql.primary.enabled is false - primary: - containerSecurityContext: - runAsUser: null - podSecurityContext: - fsGroup: null - runAsUser: null - # omit this section if identity.keycloak.postgresql.readReplicas.enabled is false - readReplicas: - containerSecurityContext: - runAsUser: null - podSecurityContext: - fsGroup: null - runAsUser: null - # omit this section if identity.keycloak.postgresql.metrics.enabled is false - metrics: - containerSecurityContext: - runAsUser: null - podSecurityContext: - fsGroup: null - runAsUser: null -``` - -When installing the chart, run the following: - -```shell -helm install camunda camunda/camunda-platform --skip-crds --version "$CHART_VERSION" -f values.yaml -f openshift.yaml -``` - -#### Helm 3.2.0 and greater - -If you must deploy using Helm 3.2.0 or greater, you have two options. One is to use a SCCs which defines the `RunAsUser` strategy to be at least `RunAsAny`. If that's not possible, then you need to make use of [a post-renderer](https://helm.sh/docs/topics/advanced/#post-rendering). - -:::warning -If using a post-renderer, you **must** use the post-renderer whenever you are updating your release, not only during the initial installation. If you do not, the default values will be used again, which will prevent some services from starting. -::: - -While you can use your preferred `post-renderer`, we provide one (included in the chart archive) which requires only `bash` and `sed` to be available locally: - -```bash -#!/bin/bash -eu -# Expected usage is as an Helm post renderer. -# Example usage: -# helm install my-release camunda/camunda-platform --post-renderer ./patch.sh -# -# This script is a Helm chart post-renderer for users on Helm 3.2.0 and greater. It allows removing default -# values set in sub-charts/dependencies, something which should be possible but is currently not working. -# See this issue for more: https://github.com/helm/helm/issues/9136 -# -# The result of patching the rendered Helm templates is printed out to STDOUT. Any other logging from the -# script is thus sent to STDERR. -# -# Note to contributors: this post-renderer is used in the integration tests, so make sure that it can be used -# from any working directory. - -set -o pipefail - -# Perform two passes: once for single quotes, once for double quotes, as it's not specified that string values are -# always output with single or double quotes -sed -e "s/'@@null@@'/null/g" -e 's/"@@null@@"/null/g' -``` - -You also need to use a custom values file, where instead of using `null` as a value to unset default values, you use a special marker value which will be removed by the post-renderer. - -Copy these values to your values file or save them as a separate file, e.g. `openshift.yaml`: - -:::note -These values are also available in the [Camunda Helm chart](https://artifacthub.io/packages/helm/camunda/camunda-platform). -::: - -```yaml -# omit this section if elasticsearch.enabled is false -elasticsearch: - securityContext: - runAsUser: "@@null@@" - sysctlInitContainer: - enabled: false - podSecurityContext: - fsGroup: "@@null@@" - runAsUser: "@@null@@" - -# omit this section if identity.enabled is false -identity: - # omit this section if identity.keycloak.enabled is false - keycloak: - containerSecurityContext: - runAsUser: "@@null@@" - podSecurityContext: - fsGroup: "@@null@@" - runAsUser: "@@null@@" - postgresql: - # omit this section if identity.keycloak.postgresql.primary.enabled is false - primary: - containerSecurityContext: - runAsUser: "@@null@@" - podSecurityContext: - fsGroup: "@@null@@" - runAsUser: "@@null@@" - # omit this section if identity.keycloak.postgresql.readReplicas.enabled is false - readReplicas: - containerSecurityContext: - runAsUser: "@@null@@" - podSecurityContext: - fsGroup: "@@null@@" - runAsUser: "@@null@@" - # omit this section if identity.keycloak.postgresql.metrics.enabled is false - metrics: - containerSecurityContext: - runAsUser: "@@null@@" - podSecurityContext: - fsGroup: "@@null@@" - runAsUser: "@@null@@" -``` - -Now, when installing the chart, you can do so by running the following: - -```shell -helm install camunda camunda/camunda-platform --skip-crds --version "$CHART_VERSION" \ - -f values.yaml -f openshift.yaml --post-renderer ./patch.sh -``` - -## Configuring Ingress using routes for Zeebe Gateway - -| OpenShift version | Supported | -| ----------------- | ------------------ | -| 4.10.x | limited | -| 4.11.x | :white_check_mark: | - -The Ingress on OpenShift works slightly different from the Kubernetes default. The mechanism is called [routes](https://docs.openshift.com/container-platform/4.11/networking/routes/route-configuration.html). - -To use these routes for the Zeebe Gateway, configure this through Ingress as well. - -### Alternatives - -An alternative to using [routes](https://docs.openshift.com/container-platform/4.14/networking/routes/route-configuration.html) is to install and use one of the Kubernetes Ingress controllers instead, for example, the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx). - -:::warning - -Do not confuse the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) with the [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift) that is endorsed by Red Hat for usage with OpenShift. Despite very similar names, they are two different products. - -If you should decide to use the Red Hat endorsed [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift), you would require additional adjustments done to the Camunda 8 ingress objects and the NGINX Ingress Controller itself to make `gRPC` and `HTTP/2` connections work. In that case, please refer to the [example and the prerequisites](https://github.com/nginxinc/kubernetes-ingress/blob/main/examples/ingress-resources/grpc-services/README.md). - -::: - -### Prerequisite - -As the Zeebe Gateway uses `gRPC` (which relies on `HTTP/2`), this [has to be enabled](https://docs.openshift.com/container-platform/4.11/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress). - -### Required steps - -1. Provide [TLS secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) for the Zeebe Gateway, the [Cert Manager](https://docs.openshift.com/container-platform/4.11/security/cert_manager_operator/index.html) might be helpful here: - -- One issued to the Zeebe Gateway Service Name. This must use the [pkcs8 syntax](https://www.openssl.org/docs/man3.1/man1/openssl-pkcs8.html) as Zeebe only supports this, referenced as **Service Certificate Secret** or ``. For more details, review the [OpenShift documentation](https://docs.openshift.com/container-platform/4.11/networking/routes/secured-routes.html#nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate_secured-routes). -- One that is used on the exposed route, referenced as **External URL Certificate Secret** or ``. - -1. Configure your Zeebe Gateway Ingress to create a [re-encrypt route](https://docs.openshift.com/container-platform/4.11/networking/routes/route-configuration.html#nw-ingress-creating-a-route-via-an-ingress_route-configuration): - -```yaml -zeebe-gateway: - ingress: - annotations: - route.openshift.io/termination: reencrypt - route.openshift.io/destination-ca-certificate-secret: # this is not supported on Openshift 4.10 - className: openshift-default - tls: - enabled: true - secretName: -``` - -3. Mount the **Service Certificate Secret** to the Zeebe Gateway Pod: - -```yaml -zeebe-gateway: - env: - - name: ZEEBE_GATEWAY_SECURITY_ENABLED - value: "true" - - name: ZEEBE_GATEWAY_SECURITY_CERTIFICATECHAINPATH - value: /usr/local/zeebe/config/tls.crt - - name: ZEEBE_GATEWAY_SECURITY_PRIVATEKEYPATH - value: /usr/local/zeebe/config/tls.key - extraVolumeMounts: - - name: certificate - mountPath: /usr/local/zeebe/config/tls.crt - subPath: tls.crt - - name: key - mountPath: /usr/local/zeebe/config/tls.key - subPath: tls.key - extraVolumes: - - name: certificate - secret: - secretName: - items: - - key: tls.crt - path: tls.crt - defaultMode: 420 - - name: key - secret: - secretName: - items: - - key: tls.key - path: tls.key - defaultMode: 420 -``` - -4. Mount the **Service Certificate Secret** to the Operate and Tasklist pods and configure the secure TLS connection. Here, only the `tls.crt` file is required. - -For Operate: - -```yaml -operate: - env: - - name: CAMUNDA_OPERATE_ZEEBE_SECURE - value: "true" - - name: CAMUNDA_OPERATE_ZEEBE_CERTIFICATEPATH - value: /usr/local/operate/config/tls.crt - extraVolumeMounts: - - name: certificate - mountPath: /usr/local/operate/config/tls.crt - subPath: tls.crt - extraVolumes: - - name: certificate - secret: - secretName: - items: - - key: tls.crt - path: tls.crt - defaultMode: 420 -``` - -The actual configuration properties can be reviewed [in the Operate configuration documentation](docs/self-managed/operate-deployment/operate-configuration.md#zeebe-broker-connection). - -For Tasklist: - -```yaml -tasklist: - env: - - name: CAMUNDA_TASKLIST_ZEEBE_SECURE - value: "true" - - name: CAMUNDA_TASKLIST_ZEEBE_CERTIFICATEPATH - value: /usr/local/tasklist/config/tls.crt - extraVolumeMounts: - - name: certificate - mountPath: /usr/local/tasklist/config/tls.crt - subPath: tls.crt - extraVolumes: - - name: certificate - secret: - secretName: - items: - - key: tls.crt - path: tls.crt - defaultMode: 420 -``` - -The actual configuration properties can be reviewed [in the Tasklist configuration documentation](docs/self-managed/tasklist-deployment/tasklist-configuration.md#zeebe-broker-connection). - -5. Configure all other applications running inside the cluster and connecting to the Zeebe Gateway to also use TLS. diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/upgrade.md b/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/upgrade.md deleted file mode 100644 index 1a88b8f3c17..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/helm-kubernetes/upgrade.md +++ /dev/null @@ -1,324 +0,0 @@ ---- -id: upgrade -title: "Upgrading Camunda 8 Helm deployment" -sidebar_label: "Upgrade" -description: "To upgrade to a more recent version of the Camunda Helm charts, there are certain things you need to keep in mind." ---- - -To upgrade to a more recent version of the Camunda Helm charts, there are certain things you need to keep in mind. - -:::caution - -Ensure to review the [instructions for specific version](#version-update-instructions) before starting the actual upgrade. - -::: - -### Upgrading where Identity disabled - -Normally for a Helm upgrade, you run the [Helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) command. If you have disabled Camunda Identity and the related authentication mechanism, you should be able to do an upgrade as follows: - -```shell -helm upgrade camunda -``` - -However, if Camunda Identity is enabled (which is the default), the upgrade path is a bit more complex than just running `helm upgrade`. Read the next section to familiarize yourself with the upgrade process. - -### Upgrading where Identity enabled - -If you have installed the Camunda 8 Helm charts before with default values, this means Identity and the related authentication mechanism are enabled. For authentication, the Helm charts generate the secrets randomly if not specified on installation for each web application. If you run `helm upgrade` to upgrade to a newer chart version, you likely will see the following return: - -```shell -helm upgrade camunda-platform-test camunda/camunda-platform -``` - -You likely will see the following error: - -```shell -Error: UPGRADE FAILED: execution error at (camunda-platform/charts/identity/templates/tasklist-secret.yaml:10:22): -PASSWORDS ERROR: You must provide your current passwords when upgrading the release. - Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims. - Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases - - 'global.identity.auth.tasklist.existingSecret' must not be empty, please add '--set global.identity.auth.tasklist.existingSecret=$TASKLIST_SECRET' to the command. To get the current value: - - export TASKLIST_SECRET=$(kubectl get secret --namespace "camunda" "camunda-platform-test-tasklist-identity-secret" -o jsonpath="{.data.tasklist-secret}" | base64 --decode) -``` - -As mentioned, this output returns because secrets are randomly generated with the first Helm installation by default if not further specified. We use a library chart [provided by Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/common) for this. The generated secrets persist on persistent volume claims (PVCs), which are not maintained by Helm. - -If you remove the Helm chart release or do an upgrade, PVCs are not removed nor recreated. On an upgrade, secrets can be recreated by Helm, and could lead to the regeneration of the secret values. This would mean that newly-generated secrets would no longer match with the persisted secrets. To avoid such an issue, Bitnami blocks the upgrade path and prints the help message as shown above. - -In the error message, Bitnami links to their [troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases). However, to avoid confusion we will step through the troubleshooting process in this guide as well. - -### Secrets extraction - -For a successful upgrade, you first need to extract all secrets which were previously generated. - -:::note -You also need to extract all secrets which were generated for Keycloak, since Keycloak is a dependency of Identity. -::: - -To extract the secrets, use the following code snippet. Make sure to replace `camunda` with your actual Helm release name. - -```shell -export TASKLIST_SECRET=$(kubectl get secret "camunda-tasklist-identity-secret" -o jsonpath="{.data.tasklist-secret}" | base64 --decode) -export OPTIMIZE_SECRET=$(kubectl get secret "camunda-optimize-identity-secret" -o jsonpath="{.data.optimize-secret}" | base64 --decode) -export OPERATE_SECRET=$(kubectl get secret "camunda-operate-identity-secret" -o jsonpath="{.data.operate-secret}" | base64 --decode) -export CONNECTORS_SECRET=$(kubectl get secret "camunda-connectors-identity-secret" -o jsonpath="{.data.connectors-secret}" | base64 --decode) -export KEYCLOAK_ADMIN_SECRET=$(kubectl get secret "camunda-keycloak" -o jsonpath="{.data.admin-password}" | base64 --decode) -export ZEEBE_SECRET=$(kubectl get secret "camunda-zeebe-identity-secret" -o jsonpath="{.data.zeebe-secret}" | base64 --decode) -export KEYCLOAK_MANAGEMENT_SECRET=$(kubectl get secret "camunda-keycloak" -o jsonpath="{.data.management-password}" | base64 --decode) -export POSTGRESQL_SECRET=$(kubectl get secret "camunda-postgresql" -o jsonpath="{.data.postgres-password}" | base64 --decode) -``` - -After exporting all secrets into environment variables, run the following upgrade command: - -```shell -helm upgrade camunda camunda/camunda-platform \ - --set global.identity.auth.tasklist.existingSecret=$TASKLIST_SECRET \ - --set global.identity.auth.optimize.existingSecret=$OPTIMIZE_SECRET \ - --set global.identity.auth.operate.existingSecret=$OPERATE_SECRET \ - --set global.identity.auth.connectors.existingSecret=$CONNECTORS_SECRET \ - --set global.identity.auth.zeebe.existingSecret=$ZEEBE_SECRET \ - --set identity.keycloak.auth.adminPassword=$KEYCLOAK_ADMIN_SECRET \ - --set identity.keycloak.auth.managementPassword=$KEYCLOAK_MANAGEMENT_SECRET \ - --set identity.keycloak.postgresql.auth.password=$POSTGRESQL_SECRET -``` - -:::note -If you have specified on the first installation certain values, you have to specify them again on the upgrade either via `--set` or the values file and the `-f` flag. -::: - -For more details on the Keycloak upgrade path, you can also read the [Keycloak Upgrading Guide](https://www.keycloak.org/docs/latest/upgrading/). - -## Helm CLI version - -For a smooth upgrade, always use the same Helm CLI version corresponding with the chart version that shows in the [chart version matrix](https://helm.camunda.io/camunda-platform/version-matrix/). - -## Version update instructions - -### v8.2.29+ - -As of this Helm chart version, the image tags for all components are independent, and do not reference the global image tag. The value of the key `global.image.tag` is `null`, and each component now sets its own version. - -With this change, Camunda applications no longer require a unified patch version. For example, a given installation may use Zeebe version 8.2.1, and Operate version 8.2.2. Note that only the patch version can differ between components. - -### v8.2.9 - -#### Optimize - -For Optimize 3.10.1, a new environment variable introduced redirection URL. However, the change is not compatible with Camunda Helm charts until it is fixed in 3.10.3 (and Helm chart 8.2.9). Therefore, those versions are coupled to certain Camunda Helm chart versions: - -| Optimize version | Camunda Helm chart version | -| --------------------------------- | -------------------------- | -| Optimize 3.10.1 & Optimize 3.10.2 | 8.2.0 - 8.2.8 | -| Optimize 3.10.3 | 8.2.9+ | - -No action is needed if you use Optimize 3.10.3 (shipped with this Helm chart version by default), but this Optimize version cannot be used out of the box with previous Helm chart versions. - -### v8.2.3 - -#### Zeebe Gateway - -:::caution Breaking change - -Zeebe Gateway authentication is now enabled by default. - -::: - -To authenticate: - -1. [Create a client credential](/docs/guides/setup-client-connection-credentials.md). -2. [Assign permissions to the application](/docs/self-managed/identity/user-guide/authorizations/managing-resource-authorizations.md). -3. Connect with: - -- [Desktop Modeler](/docs/components/modeler/desktop-modeler/connect-to-camunda-8.md). -- [Zeebe client (zbctl)](/docs/self-managed/zeebe-deployment/security/secure-client-communication/#zbctl). - -### v8.2 - -#### Connectors - -Camunda 8 Connectors component is one of our applications which performs the integration with an external system. - -Currently, in all cases, either you will use Connectors v8.2 or not, this step should be done. You need to create the Connectors secret object (more details about this in [camunda-platform-helm/656](https://github.com/camunda/camunda-platform-helm/issues/656)). - -First, generate the Connectors secret: - -```bash -helm template camunda camunda/camunda-platform --version 8.2 \ - --show-only charts/identity/templates/connectors-secret.yaml > - identity-connectors-secret.yaml -``` - -Then apply it: - -```bash -kubectl apply --namespace -f identity-connectors-secret.yaml -``` - -#### Keycloak - -Camunda v8.2 uses Keycloak v19 which depends on PostgreSQL v15. That is a major change for the dependencies. Currently there are two recommended options to upgrade from Camunda 8.1.x to 8.2.x: - -1. Use the previous version of PostgreSQL v14 in Camunda v8.2, this should be simple and it will work seamlessly. -2. Follow the official PostgreSQL upgrade guide: [Upgrading a PostgreSQL Cluster v15](https://www.postgresql.org/docs/15/upgrading.html). However, it requires some manual work and longer downtime to do the database schema upgrade. - -**Method 1: Use the previous version PostgreSQL v14** - -You can set the PostgreSQL image tag as follows: - -```yaml -identity: - keycloak: - postgresql: - image: - tag: 14.5.0 -``` - -Then follow the [typical upgrade steps](#upgrading-where-identity-enabled). - -**Method 2: Upgrade the database schema to work with PostgreSQL v15** - -The easiest way to upgrade major versions of postgresql is to start a port-forward, -and then run `pg_dump` or `pg_restore`. The postgresql client versions are fairly flexible -with different server versions, but for best results, we recommend using the newest -client version. - -1. In one terminal, start a `port-forward` against the postgresql service: - -```bash -kubectl port-forward svc/camunda-postgresql 5432 -``` - -Follow the rest of these steps in a different terminal. - -2. Get the 'postgres' users password from the postgresql service: - -```bash -kubectl exec -it statefulset/camunda-postgresql -- env | grep "POSTGRES_POSTGRES_PASSWORD=" -``` - -3. Scale identity down using the following command: - -```bash -kubectl scale --replicas=0 deployment camunda-identity -``` - -4. Perform the database dump: - -```bash -pg_dumpall -U postgres -h localhost -p 5432 | tee dump.psql -Password: -``` - -`pg_dumpall` may ask multiple times for the same password. The database will be dumped into `dump.psql`. - -5. Scale database down using the following command: - -```bash -kubectl scale --replicas=0 statefulset camunda-postgresql -``` - -6. Delete the PVC for the postgresql instance using the following command: - -```bash -kubectl delete pvc data-camunda-postgresql-0 -``` - -7. Update the postgresql version using the following command: - -```bash -kubectl set image statefulset/camunda-postgresql postgresql=docker.io/bitnami/postgresql:15.3.0 -``` - -8. Scale the services back up using the following command: - -```bash -kubectl scale --replicas=1 statefulset camunda-postgresql -``` - -9. Restore the database dump using the following command: - -```bash -psql -U postgres -h localhost -p 5432 -f dump.psql -``` - -10. Scale up identity using the following command: - -```bash -kubectl scale --replicas=1 deployment camunda-identity -``` - -Then follow the [typical upgrade steps](#upgrading-where-identity-enabled). - -### v8.0.13 - -If you installed Camunda 8 using Helm charts before `8.0.13`, you need to apply the following steps to handle the new Elasticsearch labels. - -As a prerequisite, make sure you have the Elasticsearch Helm repository added: - -```shell -helm repo add elastic https://helm.elastic.co -``` - -#### 1. Retain Elasticsearch Persistent Volume - -First get the name of Elasticsearch Persistent Volumes: - -```shell -ES_PV_NAME0=$(kubectl get pvc elasticsearch-master-elasticsearch-master-0 -o jsonpath="{.spec.volumeName}") - -ES_PV_NAME1=$(kubectl get pvc elasticsearch-master-elasticsearch-master-1 -o jsonpath="{.spec.volumeName}") -``` - -Make sure these are the correct Persistent Volumes: - -```shell -kubectl get persistentvolume $ES_PV_NAME0 $ES_PV_NAME1 -``` - -It should show something like the following (note the name of the claim, it's for Elasticsearch): - -``` -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -pvc-80bde37a-3c5b-40f4-87f3-8440e658be75 64Gi RWO Delete Bound camunda/elasticsearch-master-elasticsearch-master-0 standard 20d -pvc-3e9129bc-9415-46c3-a005-00ce3b9b3be9 64Gi RWO Delete Bound camunda/elasticsearch-master-elasticsearch-master-1 standard 20d -``` - -The final step here is to change Persistent Volumes reclaim policy: - -```shell -kubectl patch persistentvolume "${ES_PV_NAME0}" \ - -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' - -kubectl patch persistentvolume "${ES_PV_NAME1}" \ - -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' -``` - -#### 2. Update Elasticsearch PersistentVolumeClaim labels - -```shell -kubectl label persistentvolumeclaim elasticsearch-master-elasticsearch-master-0 \ - release=camunda chart=elasticsearch app=elasticsearch-master - -kubectl label persistentvolumeclaim elasticsearch-master-elasticsearch-master-1 \ - release=camunda chart=elasticsearch app=elasticsearch-master -``` - -#### 3. Delete Elasticsearch StatefulSet - -Note that there will be a **downtime** between this step and the next step. - -```shell -kubectl delete statefulset elasticsearch-master -``` - -#### 4. Apply Elasticsearch StatefulSet chart - -```shell -helm template camunda/camunda-platform camunda --version \ - --show-only charts/elasticsearch/templates/statefulset.yaml -``` - -The `CHART_VERSION` is the version you want to update to (`8.0.13` or later). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/manual.md b/versioned_docs/version-8.2/self-managed/platform-deployment/manual.md deleted file mode 100644 index d103051acfb..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/manual.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -id: manual -title: "Manual installation on local machine" -sidebar_label: "Manual" ---- - -This page guides you through the manual installation of the Camunda 8 on a local or virtual machine. - -## Prerequisites - -- Operating system: - - Linux - - Windows/macOS (development only, not supported for production) -- Java Virtual Machine, see [supported environments](/docs/reference/supported-environments/) for version details -- Elasticsearch, see [supported environments](/docs/reference/supported-environments/) for version details - -Make sure to configure the web applications to use a port that is available. By default the web applications like Operate and Tasklist listen both to port 8080. - -## Download a compatible set of Camunda 8 components - -Tasklist, Operate and Zeebe distributions are available for download on the [release page](https://github.com/camunda/camunda-platform/releases). Every release contains a set of compatible versions of the various components, ensure you download and use compatible versions. - -All Connector-related resources are available on [Maven Central](https://search.maven.org/search?q=g:io.camunda.connector). Make sure to download `*-jar-with-dependencies.jar` files in order to run Connectors locally including their necessary dependencies. -Note that some out-of-the-box Connectors are licensed under the [Camunda Self-Managed Free Edition license](https://camunda.com/legal/terms/cloud-terms-and-conditions/camunda-cloud-self-managed-free-edition-terms/). -Find an overview in the [Connectors Bundle project](https://github.com/camunda/connectors-bundle). - -## Download and run Elasticsearch - -Operate, Tasklist, and Optimize use Elasticsearch as its underlying data store. Therefore you have to download and run Elasticsearch. - -Camunda is currently compatible with Elasticsearch 7.16.2 (see [supported environments](/docs/reference/supported-environments/)) which you can [download here](https://www.elastic.co/downloads/past-releases/elasticsearch-7-16-2). - -To run Elasticsearch, execute the following commands: - -```bash -cd elasticsearch-* -bin/elasticearch -``` - -You’ll know Elasticsearch has started successfully when you see a message similar to the following: - -```log -[INFO ][o.e.l.LicenseService ] [-IbqP-o] license [72038058-e8ae-4c71-81a1-e9727f2b81c7] mode [basic] - valid -``` - -## Run Zeebe - -Once you've downloaded a Zeebe distribution, extract it into a folder of your choice. - -To extract the Zeebe distribution and start the broker, **Linux users** can type the following: - -```bash -tar -xzf zeebe-distribution-X.Y.Z.tar.gz -C zeebe/ -./bin/broker -``` - -For **Windows users**, take the following steps: - -1. Download the `.zip` package. -2. Extract the package using your preferred unzip tool. -3. Open the extracted folder. -4. Navigate to the `bin` folder. -5. Start the broker by double-clicking on the `broker.bat` file. - -Once the Zeebe broker has started, it should produce the following output: - -```log -23:39:13.246 [] [main] INFO io.camunda.zeebe.broker.system - Scheduler configuration: Threads{cpu-bound: 2, io-bound: 2}. -23:39:13.270 [] [main] INFO io.camunda.zeebe.broker.system - Version: X.Y.Z -23:39:13.273 [] [main] INFO io.camunda.zeebe.broker.system - Starting broker with configuration { -``` - -To run Zeebe with the Elasticsearch Exporter that is needed for Operate, Tasklist and Optimize to work, execute the following commands: - -```bash -cd camunda-cloud-zeebe-* -ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_CLASSNAME=io.camunda.zeebe.exporter.ElasticsearchExporter ./bin/broker -``` - -You’ll know Zeebe has started successfully when you see a message similar to the following: - -```log -[partition-0] [0.0.0.0:26501-zb-actors-0] INFO io.camunda.zeebe.raft - Joined raft in term 0 -[exporter] [0.0.0.0:26501-zb-actors-1] INFO io.camunda.zeebe.broker.exporter.elasticsearch - Exporter opened -``` - -You can test the Zeebe Gateway by asking for the cluster topology with [zbtcl](../../apis-tools/cli-client/index.md#usage): - -```bash -./bin/zbctl --insecure status -``` - -`zbctl status` should produce an output like this: - -``` -Cluster size: 1 -Partitions count: 1 -Replication factor: 1 -Gateway version: 8.1.6 -Brokers: - Broker 0 - 0.0.0.0:26501 - Version: 8.1.6 - Partition 1 : Leader, Healthy -``` - -## Run Operate - -To run Operate, execute the following command: - -```bash -cd camunda-cloud-operate-* -bin/operate -``` - -You’ll know Operate has started successfully when you see messages similar to the following: - -```log -DEBUG 1416 --- [ Thread-6] o.c.o.e.w.BatchOperationWriter : 0 operations locked -DEBUG 1416 --- [ Thread-4] o.c.o.z.ZeebeESImporter : Latest loaded position for alias [zeebe-record-deployment] and partitionId [0]: 0 -INFO 1416 --- [ Thread-4] o.c.o.z.ZeebeESImporter : Elasticsearch index for ValueType DEPLOYMENT was not found, alias zeebe-record-deployment. Skipping. -``` - -Now the Operate web interface is available at [http://localhost:8080](http://localhost:8080). - -The first screen you'll see is a sign-in page. Use the credentials `demo` / `demo` to sign in. - -After you sign in, you'll see an empty dashboard if you haven't yet deployed any processes: - -![operate-dash-no-processes](assets/operate-dashboard-no-processes_light.png) - -If you _have_ deployed processes or created process instances, you'll see them on your dashboard: - -![operate-dash-with-processes](assets/operate-introduction_light.png) - -To update Operate versions, visit the [guide to update guide](/guides/update-guide/introduction.md). - -## Run Tasklist - -To run Tasklist, execute the following commands: - -```bash -cd camunda-cloud-tasklist-* -./bin/tasklist -``` - -You’ll know Tasklist has started successfully when you see messages similar to the following: - -```log -2020-12-09 13:31:41.437 INFO 45899 --- [ main] i.z.t.ImportModuleConfiguration : Starting module: importer -2020-12-09 13:31:41.438 INFO 45899 --- [ main] i.z.t.ArchiverModuleConfiguration : Starting module: archiver -2020-12-09 13:31:41.555 INFO 45899 --- [ main] i.z.t.w.StartupBean : Tasklist Version: 1.0.0 -``` - -The Tasklist web interface is available at [http://localhost:8080](http://localhost:8080). Note, that this is the same default port as Operate, so you might have to configure Tasklist (or Operate) to use another port: - -```bash -cd camunda-cloud-tasklist-* -SERVER_PORT=8081 ./bin/tasklist -``` - -The first screen you'll see is a sign-in page. Use the credentials `demo` / `demo` to sign in. - -If you've already developed user tasks in Zeebe, you can see these on the left panel on the start screen: - -![tasklist-start-screen](assets/tasklist-start-screen_light.png) - -To update Tasklist versions, visit the [update guide](/guides/update-guide/introduction.md). - -## Run Connectors - -### Bundle - -Bundle includes runtime with all available Camunda Connectors. - -The [Connector runtime bundle](https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-bundle/) picks up -outbound Connectors available on the `classpath` automatically. -It uses the default configuration specified by a Connector through its `@OutboundConnector` and `@InboundConnector` annotations. - -Consider the following file structure: - -```shell -/home/user/bundle-with-connector $ -├── connector-runtime-bundle-VERSION-with-dependencies.jar -└── my-custom-connector-0.1.0-SNAPSHOT-with-dependencies.jar -``` - -To start Connectors bundle with all custom Connectors locally, run: - -```bash -java -cp "/home/user/bundle-with-connector/*" "io.camunda.connector.runtime.app.ConnectorRuntimeApplication" -``` - -This starts a Zeebe client, registering the defined Connector as a job worker. By default, it connects to a local Zeebe instance at port `26500`. - -### Runtime-only - -Runtime-only variant is useful when you wish to run only specific Connectors. - -The [Connector runtime bundle](https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-application/) picks up -outbound Connectors available on the `classpath` automatically. -It uses the default configuration specified by a Connector through its `@OutboundConnector` and `@InboundConnector` annotations. - -Consider the following file structure: - -```shell -/home/user/runtime-only-with-connector $ -├── connector-runtime-application-VERSION-with-dependencies.jar -└── my-custom-connector-0.1.0-SNAPSHOT-with-dependencies.jar -``` - -To start Connector runtime with all custom Connectors locally, run: - -```bash -java -cp "/home/user/runtime-only-with-connector/*" "io.camunda.connector.runtime.app.ConnectorRuntimeApplication" -``` - -This starts a Zeebe client, registering the defined Connector as a job worker. By default, it connects to a local Zeebe instance at port `26500`. - -### Configuring runtime - -Visit the [Camunda Connector Runtime GitHub page](https://github.com/camunda/connectors/tree/main/connector-runtime#configuration-options) -to find up-to-date runtime configuration options. - -## Run Identity - -A local setup of Identity in Camunda 8 is not yet supported out-of-the-box, use [Docker](../docker/) instead. - -## Run Optimize - -The installation of Optimize is described in [Optimize Setup]($optimize$/self-managed/optimize-deployment/install-and-start). A local setup in Camunda 8 is not yet supported out-of-the-box, use [Docker](../docker/#optimize) instead. - -## Run Web Modeler - -A local setup of Web Modeler in Camunda 8 is not yet supported out-of-the-box, use [Docker](../docker/#web-modeler) instead. diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/overview.md b/versioned_docs/version-8.2/self-managed/platform-deployment/overview.md deleted file mode 100644 index 456b61251c8..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/overview.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -id: overview -title: "Camunda 8 installation overview" -sidebar_label: "Overview" -description: "This chapter contains information for users who want to deploy and run Camunda 8 Self-Managed in their self-controlled cloud or own hardware." -keywords: ["camunda download"] ---- - -import Components from '../react-components/components.md' - -This guide contains information for users who want to install, deploy, and upgrade Camunda 8 Self-Managed, typically in their self-controlled cloud (public or private) or on their own hardware. - -## Components - - - -## Supported environments - -For details on supported environments (e.g. Java or Elasticsearch versions), see [supported environments](/reference/supported-environments.md). - -## Deployment options - -- [**Helm/Kubernetes**](./helm-kubernetes/deploy.md): We recommend using Kubernetes and Helm to deploy and run Camunda 8 Self-Managed in production. With the right configuration, Camunda 8 Self-Managed can be deployed on any Certified Kubernetes distribution (cloud or on-premises). We also officially support a variety of providers like [Red Hat OpenShift](./helm-kubernetes/platforms/redhat-openshift.md) and [Amazon EKS](./helm-kubernetes/platforms/amazon-eks.md). -- [**Docker**](../platform-deployment/docker.md): Component [Docker images](https://hub.docker.com/u/camunda) are available for use in production on Linux systems. Windows or macOS are only supported for development environments. -- [**Manual**](../platform-deployment/manual.md): The Java applications can run on a local or virtual machine if it provides a supported Java Virtual Machine (JVM). This allows you to run Camunda on virtual machines or bare metal and offers a significant amount of flexibility. However, you will need to configure the details for the components to interact correctly yourself. We consider this a last resort. Note that Windows/Mac is **not** supported for production usage of Zeebe. - -A **Docker Compose** configuration file is also provided for local development, and is **not** optimized for production usage. You can find setup instructions in the [camunda-platform](https://github.com/camunda/camunda-platform) repository. - -For more details on deployment, see [sizing your environment](../../components/best-practices/architecture/sizing-your-environment.md#camunda-8-self-managed). diff --git a/versioned_docs/version-8.2/self-managed/platform-deployment/troubleshooting.md b/versioned_docs/version-8.2/self-managed/platform-deployment/troubleshooting.md deleted file mode 100644 index cf7fde275c3..00000000000 --- a/versioned_docs/version-8.2/self-managed/platform-deployment/troubleshooting.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -id: troubleshooting -title: "Troubleshooting" -sidebar_label: "Troubleshooting" -description: "Troubleshooting considerations in deployment." ---- - -## Keycloak requires SSL for requests from external sources - -When deploying Camunda to a provider, it is important to confirm the IP ranges used -for container to container communication align with the IP ranges Keycloak considers "local". By default, Keycloak considers all IPs outside those listed in their -[external requests documentation](https://www.keycloak.org/docs/19.0.3/server_installation/#_setting_up_ssl) -to be external and therefore require SSL. - -As the [Camunda Helm Charts](https://github.com/camunda/camunda-platform-helm) currently do -not provide support for the distribution of the Keycloak TLS key to the other containers, we recommend viewing the solution available in the -[Identity documentation](/self-managed/identity/troubleshooting/common-problems.md#solution-2-identity-making-requests-from-an-external-ip-address). - -## Identity redirect URL - -If HTTP to HTTPS redirection is enabled in the load-balancer or ingress, make sure to use the HTTPS -protocol in the values file under `global.identity.auth.[COMPONENT].redirectUrl`. -Otherwise, you will get a redirection error in Keycloak. - -For example: - -``` -global: - identity: - auth: - operate - redirectUrl: https://operate.example.com -``` - -:::caution -Changing this property after the initial install will require manual corrections in Keycloak. -::: - -## Zeebe Ingress (gRPC) - -Zeebe requires an Ingress controller that supports `gRPC` which is built on top of `HTTP/2` transport layer. Therefore, to expose Zeebe Gateway externally, you need the following: - -1. An Ingress controller that supports `gRPC` ([ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) supports it out of the box). -2. TLS (HTTPS) via [Application-Layer Protocol Negotiation (ALPN)](https://www.rfc-editor.org/rfc/rfc7301.html) enabled in the Zeebe Gateway Ingress object. - -However, according to the official Kubernetes documentation about [Ingress TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls): - -> There is a gap between TLS features supported by various Ingress controllers. Please refer to documentation on nginx, GCE, or any other platform specific Ingress controller to understand how TLS works in your environment. - -Therefore, if you are not using the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), ensure you pay attention to TLS configuration of the Ingress controller of your choice. Find more details about the Zeebe Ingress setup in the [Kubernetes platforms supported by Camunda](./helm-kubernetes/platforms/platforms.md). - -## Identity `contextPath` - -Camunda 8 Self-Managed can be accessed externally via different methods. One such method is the [combined Ingress setup](./helm-kubernetes/guides/ingress-setup.md#combined-ingress-setup). In that configuration, Camunda Identity is accessed using a specific path, configured by setting the `contextPath` variable, for example `https://camunda.example.com/identity`. - -For security reasons, Camunda Identity requires secure access (HTTPS) when a `contextPath` is configured. If you want to use Camunda Identity with HTTP, use a [separate Ingress setup](./helm-kubernetes/guides/ingress-setup.md#separated-ingress-setup) (applications such as Operate, Optimize, etc, can still be accessed in a combined setup). - -:::note -Due to limitations, the Identity `contextPath` approach is unavailable when using a browser in Incognito mode. -::: - -## Web Modeler database schema - -The Web Modeler `restapi` component requires a [database connection](../../modeler/web-modeler/configuration#database). This connection should not point to the same database as Keycloak does. - -## Gateway timeout on redirect - -A gateway timeout can occur if the headers of a response are too big (for example, if a JWT is returned as `Set-Cookie` header). To avoid this, you can increase the `proxy-buffer-size` of your Ingress controller or Ingress. The setting for **ingress-nginx** can be found [here](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#proxy-buffer-size). diff --git a/versioned_docs/version-8.2/self-managed/react-components/components.md b/versioned_docs/version-8.2/self-managed/react-components/components.md deleted file mode 100644 index 02f01ccb9eb..00000000000 --- a/versioned_docs/version-8.2/self-managed/react-components/components.md +++ /dev/null @@ -1,20 +0,0 @@ ---- ---- - -- Zeebe Broker and Gateway -- Operate -- Tasklist -- Connectors -- Optimize -- Identity -- Web Modeler [Enterprise only](/reference/licenses.md/#web-modeler) - -All components except Web Modeler and Console are single Java applications. Depending on your needs, you might not need all of the above components to successfully use Camunda 8. - -Camunda 8 Self-Managed users may also use [Desktop Modeler](../../components/modeler/desktop-modeler/install-the-modeler.md) as an addition to these components. Desktop Modeler can be used by process developers to build BPMN diagrams, DMN diagrams, or [Camunda Forms](../../guides/utilizing-forms.md) for automation. - -:::note - -To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/contact). - -::: diff --git a/versioned_docs/version-8.2/self-managed/tasklist-deployment/data-retention.md b/versioned_docs/version-8.2/self-managed/tasklist-deployment/data-retention.md deleted file mode 100644 index dc5e3a846ea..00000000000 --- a/versioned_docs/version-8.2/self-managed/tasklist-deployment/data-retention.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: data-retention -title: Data retention -description: "Let's take a closer look at how Tasklist stores and archives data." ---- - -## How the data is stored and archived - -Tasklist imports data from Zeebe and stores it in Elasticsearch indices with a defined prefix (default: `tasklist`). Specifically, this includes the following: - -- Deployed processes, including the diagrams. -- The state of process instances, including variables and flow nodes, activated within instance execution, incidents, etc. - -It additionally stores some Tasklist-specific data: - -- Operations performed by the user -- List of users -- Technical data, like the state of Zeebe import, etc. - -The data representing process instance state becomes immutable after the process instance is finished. Currently, the data may be archived, meaning it is moved to a dated index, e.g. `tasklist_variables_2020-01-01`, where date represents the date on which the given process instance was finished. The same is valid for user operations; after they are finished, the related data is moved to dated indices. - -:::note -All Tasklist data present in Elasticsearch (from both **main** and **dated** indices) are visible from the UI. -::: - -## Archive period - -The default time between a process instance finishing and being moved to a dated index is one hour. This can be modified by setting the [waitPeriodBeforeArchiving](importer-and-archiver.md#archive-period) configuration parameter. - -## Data cleanup - -In case of intensive Zeebe usage, the amount of data can grow significantly overtime. Therefore, you should consider the data cleanup strategy. - -Dated indices may be safely removed from Elasticsearch. "Safely" means only finished process instances are deleted together with all related data, and the rest of the data stays consistent. You can use [Elasticsearch Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html) or other tools/scripts to delete old data. - -Users updating from Elasticsearch 7 to Elasticsearch 8 will encounter issues with the Elasticsearch Curator. To resolve this, Tasklist allows configuring an Index Lifecycle Management (ILM) Policy using the `archiver` configuration options: - -### Snippet from application.yml - -```yaml -camunda.tasklist: - archiver: - ilmEnabled: true - ilmMinAgeForDeleteArchivedIndices: 30d -``` - -`ilmMinAgeForDeleteArchivedIndices` defines the duration for which archived data will be stored before deletion. The values use [Elasticsearch TimeUnit format](https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units). - -This ILM Policy works on Elasticsearch 7 as well, and can function as a replacement for the Elasticsearch Curator. - -:::note -Only indices containing dates in their suffix may be deleted. -::: diff --git a/versioned_docs/version-8.2/self-managed/tasklist-deployment/importer-and-archiver.md b/versioned_docs/version-8.2/self-managed/tasklist-deployment/importer-and-archiver.md deleted file mode 100644 index c75e1fe9655..00000000000 --- a/versioned_docs/version-8.2/self-managed/tasklist-deployment/importer-and-archiver.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -id: importer-and-archiver -title: Importer and archiver -description: "Let's analyze how Tasklist is organized by modules to import and archive data." ---- - -Tasklist consists of three modules: - -- **Web app**: Contains the UI and operation executor functionality. -- **Importer**: Responsible for importing data from Zeebe. -- **Archiver**: Responsible for archiving "old" data (finished process instances and user operations.) See [data retention](data-retention.md). - -## Configuration - -Modules can be run together or separately in any combination and can be scaled. When you run a Tasklist instance, by default, all modules are enabled. To disable them, use the following configuration parameters: - -| Configuration parameter | Description | Default value | -| -------------------------------- | -------------------------------------- | ------------- | -| camunda.tasklist.importerEnabled | When true, Importer module is enabled. | true | -| camunda.tasklist.archiverEnabled | When true, Archiver module is enabled. | true | -| camunda.tasklist.webappEnabled | When true, Webapp module is enabled. | true | - -## Scaling - -Additionally, you can have several importer and archiver nodes to increase throughput. Internally, they will spread their work based on Zeebe partitions. - -For example, if your Zeebe runs 10 partitions and you configure two importer nodes, they will import data from five partitions each. - -Each single importer/archiver node must be configured using the following configuration parameters: - -| Configuration parameter | Description | Default value | -| ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------------------------------------- | -| camunda.tasklist.clusterNode.partitionIds | Array of Zeebe partition ids this importer (or archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. | -| camunda.tasklist.clusterNode.nodeCount | Total amount of Importer (or archiver) nodes in the cluster. | 1 | -| camunda.tasklist.clusterNode.currentNodeId | Id of current Importer (or archiver) node, starting from 0. | 0 | - -It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. - -:::note -`nodeCount` always represents the number of nodes of one specific type. -::: - -For example, the configuration of a cluster with one web app node, two importer nodes, and one archiver node could look like the following: - -``` -Webapp node - -camunda.tasklist: - archiverEnabled: false - importerEnabled: false - #other configuration... - -Importer node #1 - -camunda.tasklist: - archiverEnabled: false - webappEnabled: false - clusterNode: - nodeCount: 2 - currentNodeId: 0 - #other configuration... - -Importer node #2 - -camunda.tasklist: - archiverEnabled: false - webappEnabled: false - clusterNode: - nodeCount: 2 - currentNodeId: 1 - #other configuration... - -Archiver node - -camunda.tasklist: - webappEnabled: false - importerEnabled: false - -``` - -You can further parallelize archiver and/or importer within one node using the following configuration parameters: - -| Configuration parameter | Description | Default value | -| -------------------------------------- | ------------------------------------------------- | ------------- | -| camunda.tasklist.archiver.threadsCount | Number of threads in which data will be archived. | 1 | -| camunda.tasklist.importer.threadsCount | Number of threads in which data will be imported. | 3 | - -:::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. -::: - -## Archive period - -The time between a process instance finishing and being archived can be set using the following configuration parameter: - -| Configuration parameter | Description | Default value | -| --------------------------------------------------- | -------------------------------------------- | ------------- | -| camunda.tasklist.archiver.waitPeriodBeforeArchiving | Amount of time before data will be archived. | 1h | - -By default, the archive period is set to "1h" (one hour). This means the data for the finished process instances will be kept in the "main" index for one hour after the process instance has finished, and then it will be moved to a "dated" index. - -The syntax for the parameter uses Elasticsearch date math. See the table below for reference: - -| Value | Description | -| ----- | ----------- | -| y | Years | -| M | Months | -| w | Weeks | -| d | Days | -| h | Hours | -| m | Minutes | -| s | Seconds | diff --git a/versioned_docs/version-8.2/self-managed/tasklist-deployment/install-and-start.md b/versioned_docs/version-8.2/self-managed/tasklist-deployment/install-and-start.md deleted file mode 100644 index 79ee8ab315e..00000000000 --- a/versioned_docs/version-8.2/self-managed/tasklist-deployment/install-and-start.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -id: install-and-start -title: Installation -description: "Let's get started with Tasklist by installing and running with these simple methods." ---- - -Please refer to the [Installation Guide](/self-managed/platform-deployment/overview.md) for details on how to install Tasklist. diff --git a/versioned_docs/version-8.2/self-managed/tasklist-deployment/tasklist-authentication.md b/versioned_docs/version-8.2/self-managed/tasklist-deployment/tasklist-authentication.md deleted file mode 100644 index b3a9a8275f9..00000000000 --- a/versioned_docs/version-8.2/self-managed/tasklist-deployment/tasklist-authentication.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -id: tasklist-authentication -title: Authentication -description: "Let's take a closer look at the authentication methods of Tasklist." ---- - -Tasklist provides two ways to authenticate: - -1. User information stored in [Elasticsearch](#user-in-elasticsearch) -2. [Identity Authentication and Authorization](#identity) - -By default, user storage in Elasticsearch is enabled. - -## User in Elasticsearch - -In this mode, the user authenticates with a username and password stored in Elasticsearch. - -The **userId**, **password**, and **roles** for one user may be set in application.yml: - -``` -camunda.tasklist: - userId: aUser - password: aPassword - displayName: aDisplayName - roles: - - OWNER - - OPERATOR -``` - -On Tasklist startup, the user is created if they did not exist before. - -By default, three users are created: - -- Role `OWNER` with **userId**/**displayName**/**password** `demo`/`demo`/`demo`. To change userId, password, displayName or role for user `demo` use the above configuration. - -- Role `USER` with **userId**/**displayName**/**password** `view`/`view`/`view`. To change userId, displayName or password for this user the below configuration can be used: - -``` -camunda.tasklist: - readerUserId: aUser - readerPassword: aPassword - readerDisplayName: aDisplayName -``` - -- Role `OPERATOR` with **userId**/**displayName**/**password** `act`/`act`/`act`/. To change userId, displayName or password for this user the below configuration can be used: - -``` -camunda.tasklist: - operatorUserId: aUser - operatorPassword: aPassword - operatorDisplayName: aDisplayName -``` - -More users can be added directly to Elasticsearch, to the index `tasklist-user-_`. The password must be encoded with a strong BCrypt hashing function. - -## Identity - -[Identity](/self-managed/identity/what-is-identity.md) provides authentication and authorization functionality along with user management. - -### Enable Identity - -Identity can only be enabled by setting the [Spring profile](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-profiles): `identity-auth`. - -See the following example: - -``` -export SPRING_PROFILES_ACTIVE=identity-auth -``` - -### Configure Identity - -Identity requires the following parameters: - -| Parameter name | Description | Example value | -| ---------------------------------------------------- | -------------------------------------------------- | --------------------------------------------------------------------------------- | -| camunda.tasklist.identity.issuerUrl | URL of issuer (Identity) | http://localhost:18080/auth/realms/camunda-platform | -| camunda.tasklist.identity.issuerBackendUrl | Backend URL of issuer (Identity) | http://localhost:18080/auth/realms/camunda-platform | -| camunda.tasklist.identity.clientId | Similar to a username for the application | tasklist | -| camunda.tasklist.identity.clientSecret | Similar to a password for the application | XALaRPl...s7dL7 | -| camunda.tasklist.identity.audience | Audience for Tasklist | tasklist-api | -| camunda.tasklist.identity.baseUrl | Base URL for Identity | http://localhost:8084 | -| camunda.tasklist.identity.resourcePermissionsEnabled | Enable/disable Resource Permissions | true | -| spring.security.oauth2.resourceserver.jwt.issueruri | Token issuer URI | http://localhost:18080/auth/realms/camunda-platform | -| spring.security.oauth2.resourceserver.jwt.jwkseturi | Complete URI to get public keys for JWT validation | http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/certs | - -### Resource-based permissions - -1. Resource authorizations must be [enabled in Identity](/self-managed/concepts/access-control/resource-authorizations.md). -2. Tasklist must be configured to use resource authorizations (see above configurations) and `camunda.tasklist.identity.resourcePermissionsEnabled` must be enabled. - -Resource-based permissions are defined per process definition. Process definition is defined by **Process ID**, which is present in BPMN XML. - -The user or user group can be assigned the following permission: - -| Permission name | Resource type(s) | Allowed action(s) in Operate | -| ---------------------- | ------------------ | ----------------------------------------------- | -| START_PROCESS_INSTANCE | process-definition | User can start this process ad hoc on Tasklist. | - -For more information, visit the [Identity documentation](/self-managed/concepts/access-control/resource-authorizations.md). - -### Use Identity JWT token to access Tasklist API - -Tasklist provides a [GraphQL API](/apis-tools/tasklist-api/tasklist-api-overview.md) under the endpoint `/graphql`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. - -:::note -Be aware a JWT token is intended to be used for M2M communication and is therefore issued for the relevant application, not for the user. -::: - -**Example:** - -1. [Add an application in Identity](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). -2. [Add permissions to an application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for Tasklist API. -3. Obtain a token to access the GraphQL API. - You will need: - - `client_id` and `client_secret` from Identity application you created. - - URL of the authorization server will look like: `http://:/auth/realms/camunda-platform/protocol/openid-connect/token`, where host and port reference Keycloak URL (e.g. `localhost:18080`). - -```shell -curl --location --request POST 'http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token' \ ---header 'Content-Type: application/x-www-form-urlencoded' \ ---data-urlencode 'client_id=' \ ---data-urlencode 'client_secret=' \ ---data-urlencode 'grant_type=client_credentials' -``` - -You will get something like the following: - -```json -{ - "access_token": "eyJhbG...", - "expires_in": 300, - "refresh_expires_in": 0, - "token_type": "Bearer", - "not-before-policy": 0 -} -``` - -Take the `access_token` value from the response object and store it as your token. - -2. Send the token as an authorization header in each request. In this case, request all tasks. - -```shell -curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " -d '{"query": "{tasks(query:{}){id name}}"}' http://localhost:8080/graphql -``` - -## Zeebe client credentials - -If the Zeebe Gateway is set up with Camunda Identity-based authorization, [Zeebe client OAuth environment variables](../zeebe-deployment/security/client-authorization.md#environment-variables) must be provided. diff --git a/versioned_docs/version-8.2/self-managed/tasklist-deployment/tasklist-configuration.md b/versioned_docs/version-8.2/self-managed/tasklist-deployment/tasklist-configuration.md deleted file mode 100644 index 844632fdcd6..00000000000 --- a/versioned_docs/version-8.2/self-managed/tasklist-deployment/tasklist-configuration.md +++ /dev/null @@ -1,319 +0,0 @@ ---- -id: tasklist-configuration -title: Configuration ---- - -As a Spring Boot application, Tasklist supports any standard -[Spring configuration](https://docs.spring.io/spring-boot/reference/features/external-config.html) method. - -By default, the configuration for Tasklist is stored in a YAML file `application.yml`. All Tasklist-related settings are prefixed with `camunda.tasklist`. - -:::note -Configuration properties can be defined as environment variables using [Spring Boot conventions](https://docs.spring.io/spring-boot/reference/features/external-config.html#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables). To define an environment variable, convert the configuration property to uppercase, remove any dashes, and replace any delimiters (`.`) with `_`. - -For example, the property `server.servlet.context-path` is represented by the environment variable `SERVER_SERVLET_CONTEXT_PATH`. -::: - -The following components are configurable: - -- [Webserver](#webserver) -- [Elasticsearch](#elasticsearch) - - [Settings to connect](#settings-to-connect) - - [Settings to connect to a secured Elasticsearch instance](#settings-to-connect-to-a-secured-elasticsearch-instance) - - [Settings for shards and replicas](#settings-for-shards-and-replicas) - - [A snippet from application.yml](#a-snippet-from-applicationyml) -- [Zeebe broker connection](#zeebe-broker-connection) - - [Settings to connect](#settings-to-connect-1) - - [A snippet from application.yml](#a-snippet-from-applicationyml-1) -- [Zeebe Elasticsearch exporter](#zeebe-elasticsearch-exporter) - - [Settings to connect and import](#settings-to-connect-and-import) - - [A snippet from application.yml](#a-snippet-from-applicationyml-2) -- [Monitoring and health probes](#monitoring-and-health-probes) - - [Example snippets to use Tasklist probes in Kubernetes](#example-snippets-to-use-tasklist-probes-in-kubernetes) - - [Readiness probe as yaml config](#readiness-probe-as-yaml-config) - - [Liveness probe as yaml config](#liveness-probe-as-yaml-config) -- [Logging](#logging) - - [JSON logging configuration](#json-logging-configuration) - - [Change logging level at runtime](#change-logging-level-at-runtime) - - [Set all Tasklist loggers to DEBUG](#set-all-tasklist-loggers-to-debug) -- [An example of application.yml file](#an-example-of-applicationyml-file) - -## Webserver - -Tasklist supports customizing the **context-path** using the default Spring configuration. - -Example for `application.yml`: -`server.servlet.context-path: /tasklist` - -Example for environment variable: -`SERVER_SERVLET_CONTEXT_PATH=/tasklist` - -Default context-path is `/`. - -## Elasticsearch - -Tasklist stores and reads data in/from Elasticsearch. - -### Settings to connect - -Tasklist supports [basic authentication](https://www.elastic.co/guide/en/elasticsearch/reference/7.12/setting-up-authentication.html) for Elasticsearch. Set the appropriate username/password combination in the configuration to use it. - -#### Settings to connect to a secured Elasticsearch instance - -To connect to a secured (https) Elasticsearch instance you need normally only set the URL protocol -part to `https` instead of `http`. A secured Elasticsearch instance needs also `username` and `password`. -The other SSL settings should only be used in case of connection problems, for example disable -host verification. - -:::note -You may need to import the certificate into JVM runtime. -::: - -| Name | Description | Default value | -| -------------------------------------------------- | ----------------------------------------- | --------------------- | -| camunda.tasklist.elasticsearch.indexPrefix | Prefix for index names | tasklist | -| camunda.tasklist.elasticsearch.clusterName | Clustername of Elasticsearch | elasticsearch | -| camunda.tasklist.elasticsearch.url | URL of Elasticsearch REST API | http://localhost:9200 | -| camunda.tasklist.elasticsearch.username | Username to access Elasticsearch REST API | - | -| camunda.tasklist.elasticsearch.password | Password to access Elasticsearch REST API | - | -| camunda.tasklist.elasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - | -| camunda.tasklist.elasticsearch.ssl.selfSigned | Certificate was self signed | false | -| camunda.tasklist.elasticsearch.ssl.verifyHostname | Should the hostname be validated | false | - -### Settings for shards and replicas - -Tasklist creates the template with index settings named `tasklist-_template` that Elasticsearch uses for all Tasklist indices. These settings can be changed. - -The following configuration parameters define the settings: - -| Name | Description | Default value | -| ----------------------------------------------- | -------------------------------------------------------------- | ------------- | -| camunda.tasklist.elasticsearch.numberOfShards | How many shards Elasticsearch uses for all Tasklist indices. | 1 | -| camunda.tasklist.elasticsearch.numberOfReplicas | How many replicas Elasticsearch uses for all Tasklist indices. | 0 | - -These values are applied only on first startup of Tasklist or during version update. After the Tasklist -ELS schema is created, settings may be adjusted directly in the ELS template, and the new settings are applied -to indices created after adjustment. - -### A snippet from application.yml - -```yaml -camunda.tasklist: - elasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - ssl: - selfSigned: true -``` - -## Zeebe broker connection - -Tasklist needs a connection to Zeebe broker to start the import. - -### Settings to connect - -| Name | Description | Default value | -| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | --------------- | -| camunda.tasklist.zeebe.gatewayAddress | Gateway address that points to Zeebe as hostname and port. | localhost:26500 | -| camunda.tasklist.zeebe.secure | Connection should be secure via Transport Layer Security (TLS). | false | -| camunda.tasklist.zeebe.certificatePath | Path to certificate used by Zeebe. This is necessary when the certificate isn't registered in the operating system. | - | - -Additionally, visit [Zeebe Secure Client Communication](/docs/self-managed/zeebe-deployment/security/secure-client-communication/) for more details. - -### A snippet from application.yml - -```yaml -camunda.tasklist: - zeebe: - # Gateway host and port - gatewayAddress: localhost:26500 -``` - -` - -## Zeebe Elasticsearch exporter - -Tasklist imports data from Elasticsearch indices created and filled in by [Zeebe Elasticsearch Exporter](https://github.com/camunda/camunda/tree/stable/8.2/exporters/elasticsearch-exporter). - -Therefore, settings for this Elasticsearch connection must be defined and correspond to the settings on the Zeebe side. - -### Settings to connect and import - -See also [settings to connect to a secured Elasticsearch instance](#settings-to-connect-to-a-secured-elasticsearch-instance). - -| Name | Description | Default value | -| ------------------------------------------------------- | ---------------------------------------------------------- | --------------------- | -| camunda.tasklist.zeebeElasticsearch.clusterName | Cluster name of Elasticsearch | elasticsearch | -| camunda.tasklist.zeebeElasticsearch.url | URL of Elasticsearch REST API | http://localhost:9200 | -| camunda.tasklist.zeebeElasticsearch.prefix | Index prefix as configured in Zeebe Elasticsearch exporter | zeebe-record | -| camunda.tasklist.zeebeElasticsearch.username | Username to access Elasticsearch REST API | - | -| camunda.tasklist.zeebeElasticsearch.password | Password to access Elasticsearch REST API | - | -| camunda.tasklist.zeebeElasticsearch.ssl.certificatePath | Path to certificate used by Elasticsearch | - | -| camunda.tasklist.zeebeElasticsearch.ssl.selfSigned | Certificate was self signed | false | -| camunda.tasklist.zeebeElasticsearch.ssl.verifyHostname | Should the hostname be validated | false | - -### A snippet from application.yml - -```yaml -camunda.tasklist: - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # Url - url: https://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` - -## Monitoring and health probes - -Tasklist includes the [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready) inside, which -provides the number of monitoring possibilities (e.g. health check (http://localhost:8080/actuator/health) and metrics (http://localhost:8080/actuator/prometheus) endpoints). - -Tasklist uses the following Actuator configuration by default: - -```yaml -# disable default health indicators: -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-health-indicators -management.health.defaults.enabled: false - -# enable Kubernetes health groups: -# https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-kubernetes-probes -management.endpoint.health.probes.enabled: true - -# enable health check and metrics endpoints -management.endpoints.web.exposure.include: health, prometheus, loggers, usage-metrics, backups -``` - -With this configuration, the following endpoints are available for use out of the box: - -`:8080/actuator/prometheus` Prometheus metrics - -`:8080/actuator/health/liveness` Liveness probe - -`:8080/actuator/health/readiness` Readiness probe - -### Example snippets to use Tasklist probes in Kubernetes - -For details to set Kubernetes probes parameters, see [Kubernetes configure probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). - -#### Readiness probe as yaml config - -```yaml -readinessProbe: - httpGet: - path: /actuator/health/readiness - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 30 -``` - -#### Liveness probe as yaml config - -```yaml -livenessProbe: - httpGet: - path: /actuator/health/liveness - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 30 -``` - -## Logging - -Tasklist uses Log4j2 framework for logging. In the distribution archive and inside a Docker image `config/log4j2.xml`, logging configuration files are included and can be further adjusted to your needs: - -```xml - - - - %clr{%d{yyyy-MM-dd HH:mm:ss.SSS}}{faint} %clr{%5p} %clr{${sys:PID}}{magenta} %clr{---}{faint} %clr{[%15.15t]}{faint} %clr{%-40.40c{1.}}{cyan} %clr{:}{faint} %m%n%xwEx - ${env:TASKLIST_LOG_STACKDRIVER_SERVICENAME:-tasklist} - ${env:TASKLIST_LOG_STACKDRIVER_SERVICEVERSION:-} - - - - - - - - - - - - - - - - -``` - -By default, Console Appender is used. - -### JSON logging configuration - -You can choose to output logs in JSON format (Stackdriver compatible). To enable it, define -the environment variable `TASKLIST_LOG_APPENDER` like the following: - -```sh -TASKLIST_LOG_APPENDER=Stackdriver -``` - -### Change logging level at runtime - -Tasklist supports the default scheme for changing logging levels as provided by [Spring Boot](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers). - -The log level for Tasklist can be changed by following the [Setting a Log Level](https://docs.spring.io/spring-boot/docs/2.4.3/actuator-api/htmlsingle/#loggers-setting-level) section. - -#### Set all Tasklist loggers to DEBUG - -```shell -curl 'http://localhost:8080/actuator/loggers/io.camunda.tasklist' -i -X POST \ --H 'Content-Type: application/json' \ --d '{"configuredLevel":"debug"}' -``` - -## An example of application.yml file - -The following snippet represents the default Tasklist configuration, which is shipped with the distribution at the following location: - -- For versions `8.2.20` and later: `/usr/local/tasklist/config/application.yml`. -- For versions `[8.2.0, 8.2.20)`: `/app/resources/application.yml`. - -This configuration file can be used to adjust Tasklist to your needs. - -```yaml -# Tasklist configuration file - -camunda.tasklist: - # Set Tasklist username and password. - # If user with does not exists it will be created. - # Default: demo/demo - #username: - #password: - #roles: - # - OWNER - # - OPERATOR - - # ELS instance to store Tasklist data - elasticsearch: - # Cluster name - clusterName: elasticsearch - # url - url: http://localhost:9200 - # Zeebe instance - zeebe: - # Gateway address - gatewayAddress: localhost:26500 - # ELS instance to export Zeebe data to - zeebeElasticsearch: - # Cluster name - clusterName: elasticsearch - # url - url: http://localhost:9200 - # Index prefix, configured in Zeebe Elasticsearch exporter - prefix: zeebe-record -``` diff --git a/versioned_docs/version-8.2/self-managed/tasklist-deployment/usage-metrics.md b/versioned_docs/version-8.2/self-managed/tasklist-deployment/usage-metrics.md deleted file mode 100644 index f2f5d3b87ac..00000000000 --- a/versioned_docs/version-8.2/self-managed/tasklist-deployment/usage-metrics.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: usage-metrics -title: Usage metrics -description: "Tasklist provides usage metrics under usage-metrics Actuator endpoint. It is exposed on management port." ---- - -Tasklist provides usage metrics under `usage-metrics` Actuator endpoint. It is exposed on management port, which can be configured via `management.server.port` configuration parameter (default: 8080). - -## Number of active users - -This endpoint returns the number of unique users assigned to tasks in a given period and each of the unique `usernames`. - -This also returns the `usernames` so we can reconcile in the case of multiple instances. - -Endpoint: - -``` -http://:/actuator/usage-metrics/assignees?startTime={startTime}&endTime={endTime} -``` - -Here, `startTime` and `endTime` are of format `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`, e.g. "1970-11-14T10:50:26.963-0100". - -Sample response: - -```json -{ - "total": 2, - "assignees": ["john.lennon", "oprah.winfrey"] -} -``` diff --git a/versioned_docs/version-8.2/self-managed/troubleshooting/log-levels.md b/versioned_docs/version-8.2/self-managed/troubleshooting/log-levels.md deleted file mode 100644 index 72edba1e9bb..00000000000 --- a/versioned_docs/version-8.2/self-managed/troubleshooting/log-levels.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: log-levels -title: "Log levels" -description: "Learn about what information you can expect in various log levels and how to handle them" ---- - -When working with Camunda 8, you may see various messages in your logs. Not all messages require action. - -## Understanding log levels - -Camunda 8 uses the following log levels: - -- TRACE: Information which is helpful only if you want to trace the execution of a particular component. -- DEBUG: Information which can provide helpful context when debugging. You may see a DEBUG message right after an INFO message to provide more context. -- INFO: Information about the system which is useful for the user (in the case of the broker, the user here is the user deploying it). For example, leader changes, a new node added to or removed from the membership, etc. -- WARN: Expected errors (e.g. connection timeouts, the remote node is unavailable, etc.) which may indicate that parts of the system are not working, and would require attention if they persist, but may resolve by themselves. These should be monitored, but may not require a support ticket. -- ERROR: Errors which require a person to look into them, e.g. log corruption, inconsistent log, anything which could shut down a partition, etc. - -## Enable logging - -Enable logging for each component of Camunda 8 using the following instructions: - -- [Zeebe](../zeebe-deployment/configuration/logging.md) -- [Operate](../operate-deployment/operate-configuration.md#logging) -- [Tasklist](../tasklist-deployment/tasklist-configuration.md#logging) -- [Web Modeler](../modeler/web-modeler/configuration/logging.md) diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/broker.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/broker.md deleted file mode 100644 index 82c1a320871..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/broker.md +++ /dev/null @@ -1,692 +0,0 @@ ---- -id: broker-config -title: "Broker configuration" -sidebar_label: "Broker configuration" -description: "Let's analyze how to configure the Zeebe broker" ---- - -A complete broker configuration template is available in the [Zeebe repo](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template). - -## Conventions - -Take the following conventions into consideration when working with the broker configuration. - -### Byte sizes - -Buffers and data values referencing sizing must be specified as strings and follow the following format: "10U" where U (unit) must be replaced with KB = Kilobytes, MB = Megabytes or GB = Gigabytes. If unit is omitted then the default unit is simply bytes. - -Example: -`sendBufferSize = "16MB"` (creates a buffer of 16 Megabytes) - -### Time units - -Timeouts, intervals, and the likes, must be specified either in the standard ISO-8601 format used by java.time.Duration, or as strings with the following format: "VU", where: - -- V is a numerical value (e.g. 1, 5, 10, etc.) -- U is the unit, one of: ms = Millis, s = Seconds, m = Minutes, or h = Hours - -### Paths - -Relative paths are resolved relative to the installation directory of the broker. - -## Configuration - -We provide tables with environment variables, application properties, a description, and corresponding default values in the following sections. We also describe a few use cases for each type of configuration. - -Configuration names are noted as the **header** of each documented section, while the **field** values represent properties to set the configuration. - -### zeebe.broker.gateway - -To configure the embedded gateway, see [Gateway config docs](/self-managed/zeebe-deployment/configuration/gateway.md). - -| Field | Description | Example value | -| ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| enable | Enable the embedded gateway to start on broker startup. This setting can also be overridden using the environment variable `ZEEBE_BROKER_GATEWAY_ENABLE`. | false | - -#### YAML snippet - -```yaml -broker: - gateway: - enable: false -``` - -### zeebe.broker.network - -This section contains the network configuration. Particularly, it allows to configure the hosts and ports the broker should bind to. The broker exposes two sockets: - -1. command: the socket which is used for gateway-to-broker communication -2. internal: the socket which is used for broker-to-broker communication - -| Field | Description | Example Value | -| -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| host | Controls the default host the broker should bind to. Can be overwritten on a per binding basis for client, management and replication. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_HOST`. | 0.0.0.0 | -| advertisedHost | Controls the advertised host (the contact point advertised to other brokers); if omitted defaults to the host. This is particularly useful if your broker stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_ADVERTISEDHOST`. | 0.0.0.0 | -| portOffset | If a port offset is set it will be added to all ports specified in the config or the default values. This is a shortcut to not always specifying every port. The offset will be added to the second last position of the port, as Zeebe requires multiple ports. As example a portOffset of 5 will increment all ports by 50, i.e. 26500 will become 26550 and so on. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_PORTOFFSET`. | 0 | -| maxMessageSize | Sets the maximum size of the incoming and outgoing messages (i.e. commands and events). This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_MAXMESSAGESIZE`. | 4MB | - -#### YAML snippet - -```yaml -network: - host: 0.0.0.0 - advertisedHost: 0.0.0.0 - portOffset: 0 - maxMessageSize: 4MB -``` - -### zeebe.broker.network.security - -| Field | Description | Example Value | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| enabled | Enables TLS authentication between this gateway and other nodes in the cluster. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_SECURITY_ENABLED`. | false | -| certificateChainPath | Sets the path to the certificate chain file. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_SECURITY_CERTIFICATECHAINPATH`. | | -| privateKeyPath | Sets the path to the private key file location. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_SECURITY_PRIVATEKEYPATH`. | | - -#### YAML snippet - -```yaml -security: - enabled: false - certificateChainPath: - privateKeyPath: -``` - -### zeebe.broker.network.commandApi - -| Field | Description | Example Value | -| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| host | Overrides the host used for gateway-to-broker communication. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_COMMANDAPI_HOST`. | 0.0.0.0 | -| port | Sets the port used for gateway-to-broker communication. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_COMMANDAPI_PORT`. | 26501 | -| advertisedHost | Controls the advertised host; if omitted defaults to the host. This is particularly useful if your broker stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_COMMANDAPI_ADVERTISEDHOST`. | 0.0.0.0 | -| advertisedPort | Controls the advertised port; if omitted defaults to the port. This is particularly useful if your broker stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_COMMANDAPI_ADVERTISEDPORT`. | 25601 | - -#### YAML snippet - -```yaml -commandApi: - host: 0.0.0.0 - port: 26501 - advertisedHost: 0.0.0.0 - advertisedPort: 25601 -``` - -### zeebe.broker.network.internalApi - -| Field | Description | Example Value | -| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| host | Overrides the host used for internal broker-to-broker communication. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_INTERNALAPI_HOST`. | 0.0.0.0 | -| port | Sets the port used for internal broker-to-broker communication. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_INTERNALAPI_PORT`. | 26502 | -| advertisedHost | Controls the advertised host; if omitted defaults to the host. This is particularly useful if your broker stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_INTERNALAPI_ADVERTISEDHOST`. | 0.0.0.0 | -| advertisedPort | Controls the advertised port; if omitted defaults to the port. This is particularly useful if your broker stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_BROKER_NETWORK_INTERNALAPI_ADVERTISEDPORT`. | 25602 | - -#### YAML snippet - -```yaml -internalApi: - host: 0.0.0.0 - port: 26502 - advertisedHost: 0.0.0.0 - advertisedPort: 25602 -``` - -### zeebe.broker.data - -This section allows to configure Zeebe's data storage. Data is stored in "partition folders". A partition folder has the following structure: - -``` -partitions -└── 1 (root partition folder) - ├── 1.log - ├── 2.log - └── snapshots - └── - └── xx.sst - └── runtime - └── yy.sst -``` - -| Field | Description | Example Value | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| directory | Specify the directory in which data is stored. This setting can also be overridden using the environment variable ZEEBE_BROKER_DATA_DIRECTORY. | data | -| runtimeDirectory | Specify the directory in which runtime is stored. By default runtime is stored in `directory` for data. If runtimeDirectory is configured, then the configured directory will be used. It will have a subdirectory for each partition to store its runtime. There is no need to store runtime in a persistent storage. This configuration allows to split runtime to another disk to optimize for performance and disk usage. Note: If runtime is another disk than the data directory, files need to be copied to data directory while taking snapshot. This may impact disk i/o or performance during snapshotting. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_RUNTIMEDIRECTORY`. | null | -| logSegmentSize | The size of data log segment files. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_LOGSEGMENTSIZE`. | 128MB | -| snapshotPeriod | How often we take snapshots of streams (time unit). This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_SNAPSHOTPERIOD`. | 5m | - -#### YAML snippet - -```yaml -data: - directory: data - runtimeDirectory: null - logSegmentSize: 128MB - snapshotPeriod: 5m -``` - -### zeebe.broker.data.disk - -| Field | Description | Example Value | -| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| enableMonitoring | Configure disk monitoring to prevent getting into a non-recoverable state due to out of disk space. When monitoring is enabled, the broker rejects commands and pause replication when the required freeSpace is not available. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_DISK_ENABLEMONITORING` | true | -| monitoringInterval | Sets the interval at which the disk usage is monitored. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_DISK_MONITORINGINTERVAL` | 1s | - -#### YAML snippet - -```yaml -disk: - enableMonitoring: true - monitoringInterval: 1s -``` - -### zeebe.broker.data.disk.freeSpace - -| Field | Description | Example Value | -| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| processing | When the free space available is less than this value, this broker rejects all client commands and pause processing. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_DISK_FREESPACE_PROCESSING` | 2GB | -| replication | When the free space available is less than this value, broker stops receiving replicated events. This value must be less than freeSpace.processing. It is recommended to configure free space large enough for at least one log segment and one snapshot. This is because a partition needs enough space to take a new snapshot to be able to compact the log segments to make disk space available again. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_DISK_FREESPACE_REPLICATION` | 1GB | - -#### YAML snippet - -```yaml -disk: - freeSpace: - processing: 2GB - replication: 1GB -``` - -### zeebe.broker.data.backup - -Configure backup store. - -:::note - -Use the same configuration on all brokers of this cluster. - -::: - -| Field | Description | Example Value | -| ----- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| store | Set the backup store type. Supported values are [NONE, S3, GCS]. Default value is NONE. When NONE, no backup store is configured and no backup will be taken. Use S3 to use any [S3 compatible storage](https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_Reference.html). Use GCS to use [Google Cloud Storage](https://cloud.google.com/storage/). This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_STORE`. | NONE | - -#### YAML snippet - -```yaml -backup: - store: NONE -``` - -### zeebe.broker.data.backup.s3 - -Configure the following if store is set to s3. - -| Field | Description | Example Value | -| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| bucketName | Name of the bucket where the backup will be stored. The bucket must be already created. The bucket must not be shared with other zeebe clusters. bucketName must not be empty. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_BUCKETNAME`. | | -| endpoint | Configure URL endpoint for the store. If no endpoint is provided, it will be determined based on the configured region. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_ENDPOINT`. | | -| region | Configure AWS region. If no region is provided it will be determined as [documented](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/region-selection.html#automatically-determine-the-aws-region-from-the-environment). This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_REGION` | | -| accessKey | Configure access credentials. If either accessKey or secretKey is not provided, the credentials will be determined as [documented](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/credentials.html#credentials-chain). This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_ACCESSKEY` | | -| secretKey | This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_SECRETKEY`. | | -| apiCallTimeout | Configure a maximum duration for all S3 client API calls. Lower values will ensure that failed or slow API calls don't block other backups but may increase the risk that backups can't be stored if uploading parts of the backup takes longer than the configured timeout. See https://github.com/aws/aws-sdk-java-v2/blob/master/docs/BestPractices.md#utilize-timeout-configurations. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_APICALLTIMEOUT`. | PT180S | -| forcePathStyleAccess | When enabled, forces the s3 client to use path-style access. By default, the client will automatically choose between path-style and virtual-hosted-style. Should only be enabled if the s3 compatible storage cannot support virtual-hosted-style. See https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_FORCEPATHSTYLEACCESS`. | false | -| compression | When set to an algorithm such as 'zstd', enables compression of backup contents. When not set or set to 'none', backup content is not compressed. Enabling compression reduces the required storage space for backups in S3 but also increases the impact on CPU and disk utilization while taking a backup. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_COMPRESSION` | none | -| basePath | When set, all objects in the bucket will use this prefix. Must be non-empty and not start or end with '/'. Useful for using the same bucket for multiple Zeebe clusters. In this case, basePath must be unique. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_S3_BASEPATH`. | - -#### YAML snippet - -```yaml -backup: - store: s3 - s3: - bucketName: null - endpoint: null - region: null - secretKey: null - apiCallTimeout: PT180S - forcePathStyleAccess: false - compression: none - basePath: null -``` - -### zeebe.broker.data.backup.gcs - -Configure the following if store is set to GCS. - -| Field | Description | Example Value | -| ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| bucketName | Name of the bucket where the backup will be stored. The bucket must already exist. The bucket must not be shared with other Zeebe clusters unless basePath is also set. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_GCS_BUCKETNAME`. | | -| basePath | When set, all blobs in the bucket will use this prefix. Useful for using the same bucket for multiple Zeebe clusters. In this case, basePath must be unique. Should not start or end with '/' character. Must be non-empty and not consist of only '/' characters. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_GCS_BASEPATH`. | | -| host | When set, this overrides the host that the GCS client connects to. By default, this is not set because the client can automatically discover the correct host to connect to. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_GCS_HOST` | | -| auth | Configures which authentication method is used for connecting to GCS. Can be either 'auto' or 'none'. Choosing 'auto' means that the GCS client uses application default credentials which automatically discovers appropriate credentials from the runtime environment: https://cloud.google.com/docs/authentication/application-default-credentials. Choosing 'none' means that no authentication is attempted which is only applicable for testing with emulated GCS. This setting can also be overridden using the environment variable `ZEEBE_BROKER_DATA_BACKUP_GCS_AUTH`. | auto | - -#### YAML snippet - -```yaml -backup: - store: gcs - gcs: - bucketName: null - basePath: null - host: null - auth: auto -``` - -### zeebe.broker.cluster - -This section contains all cluster related configurations, to setup a zeebe cluster. - -| Field | Description | Example Value | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | -| nodeId | Specifies the unique id of this broker node in a cluster. The id should be between 0 and number of nodes in the cluster (exclusive). This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_NODEID`. | 0 | -| partitionsCount | Controls the number of partitions, which should exist in the cluster. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT`. | 1 | -| replicationFactor | Controls the replication factor, which defines the count of replicas per partition. The replication factor cannot be greater than the number of nodes in the cluster. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR`. | 1 | -| clusterSize | Specifies the zeebe cluster size. This value is used to determine which broker is responsible for which partition. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_CLUSTERSIZE`. | 1 | -| initialContactPoints | Allows to specify a list of known other nodes to connect to on startup. The contact points of the internal network configuration must be specified. The format is [HOST:PORT]. To guarantee the cluster can survive network partitions, all nodes must be specified as initial contact points. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS` specifying a comma-separated list of contact points. Default is empty list. | [ 192.168.1.22:26502, 192.168.1.32:26502 ] | -| clusterName | Allows to specify a name for the cluster. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_CLUSTERNAME`. | zeebe-cluster | -| heartbeatInterval | Configure heartbeatInterval. The leader sends a heartbeat to a follower every heartbeatInterval. Note: This is an advanced setting. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_HEARTBEATINTERVAL`. | 250ms | -| electionTimeout | Configure electionTimeout. If a follower does not receive a heartbeat from the leader with in an election timeout, it can start a new leader election. electionTimeout should be greater than configured heartbeatInterval. When the electionTimeout is large, there will be delay in detecting a leader failure. When the electionTimeout is small, it can lead to false positives when detecting leader failures and thus leading to unnecessary leader changes. If the network latency between the nodes is high, it is recommended to have a higher election latency. Note: This is an advanced setting. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_ELECTIONTIMEOUT`. | 2500ms | - -#### YAML snippet - -```yaml -cluster: - nodeId: 0 - partitionsCount: 1 - replicationFactor: 1 - clusterSize: 1 - initialContactPoints: [] - clusterName: zeebe-cluster - heartbeatInterval: 250ms - electionTimeout: 2500ms -``` - -### zeebe.broker.cluster.raft - -| Field | Description | Example Value | -| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| raft | Configure raft properties. | | -| enablePriorityElection | When this flag is enabled, the leader election algorithm attempts to elect the leaders based on a pre-defined priority. As a result, it tries to distributed the leaders uniformly across the brokers. Note that it is only a best-effort strategy. It is not guaranteed to be a strictly uniform distribution. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION`. | true | - -#### YAML snippet - -```yaml -cluster: - raft: enablePriorityElection = true -``` - -### zeebe.broker.cluster.flush - -| Field | Description | Example Value | -| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| flush | Configures how often data is explicitly flushed to disk. By default, for a given partition, data is flushed on every leader commit, and every follower append. This is to ensure consistency across all replicas. Disabling this can cause inconsistencies, and at worst, data corruption or data loss scenarios. The default behavior is optimized for safety, and flushing occurs on every leader commit and follower append in a synchronous fashion. You can introduce a delay to reduce the performance penalty of flushing via `delayTime`. | | -| enabled | If false, explicit flushing of the Raft log is disabled, and flushing only occurs right before a snapshot is taken. You should only disable explicit flushing if you are willing to accept potential data loss at the expense of performance. Before disabling it, try the delayed options, which provide a trade-off between safety and performance. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_RAFT_FLUSH_ENABLED`. | true | -| delayTime | If the delay is > 0, then flush requests are delayed by at least the given period. It is recommended that you find the smallest delay here with which you achieve your performance goals. It's also likely that anything above 30s is not useful, as this is the typical default flush interval for the Linux OS. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_RAFT_FLUSH_DELAYTIME`. | 0s | - -#### YAML snippet - -```yaml -cluster: - flush: - enabled: true - delayTime: 0s -``` - -### zeebe.broker.cluster.membership - -Configure parameters for SWIM protocol which is used to propagate cluster membership information among brokers and gateways. - -| Field | Example Value | Description | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| broadcastUpdates | Configure whether to broadcast member updates to all members. If set to false updates will be gossiped among the members. If set to true the network traffic may increase but it reduce the time to detect membership changes. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_BROADCASTUPDATES`. | false | -| broadcastDisputes | Configure whether to broadcast disputes to all members. If set to true the network traffic may increase but it reduce the time to detect membership changes. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_BROADCASTDISPUTES`. | true | -| notifySuspect | Configure whether to notify a suspect node on state changes. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_NOTIFYSUSPECT`. | false | -| gossipInterval | Sets the interval at which the membership updates are sent to a random member. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_GOSSIPINTERVAL`. | 250ms | -| gossipFanout | Sets the number of members to which membership updates are sent at each gossip interval. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_GOSSIPFANOUT`. | 2 | -| probeInterval | Sets the interval at which to probe a random member. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_PROBEINTERVAL`. | 1s | -| probeTimeout | Sets the timeout for a probe response. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_PROBETIMEOUT`. | 100ms | -| suspectProbes | Sets the number of probes failed before declaring a member is suspect. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_SUSPECTPROBES`. | 3 | -| failureTimeout | Sets the timeout for a suspect member is declared dead. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_FAILURETIMEOUT`. | 10s | -| syncInterval | Sets the interval at which this member synchronizes its membership information with a random member. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MEMBERSHIP_SYNCINTERVAL`. | 10s | - -#### YAML snippet - -```yaml -membership: - broadcastUpdates: false - broadcastDisputes: true - notifySuspect: false - gossipInterval: 250ms - gossipFanout: 2 - probeInterval: 1s - probeTimeout: 100ms - suspectProbes: 3 - failureTimeout: 10s - syncInterval: 10s -``` - -### zeebe.broker.cluster.messageCompression - -This feature is useful when the network latency between the nodes is very high (for example when nodes are deployed in different data centers). - -When latency is high, the network bandwidth is severely reduced. Hence enabling compression helps to improve the throughput. - -:::caution -When there is no latency enabling this may have a performance impact. -::: - -:::note -When this flag is enables, you must also enable compression in standalone broker configuration. -::: - -| Field | Description | Example Value | -| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| messageCompression | Configure compression algorithm for all messages sent between the gateway and the brokers. Available options are NONE, GZIP and SNAPPY. This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_MESSAGECOMPRESSION`. | NONE | - -#### YAML snippet - -```yaml -messageCompression: NONE -``` - -### zeebe.broker.threads - -| Field | Example Value | Description | -| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| cpuThreadCount | Controls the number of non-blocking CPU threads to be used. WARNING: You should never specify a value that is larger than the number of physical cores available. Good practice is to leave 1-2 cores for ioThreads and the operating system (it has to run somewhere). For example, when running Zeebe on a machine which has 4 cores, a good value would be 2. This setting can also be overridden using the environment variable `ZEEBE_BROKER_THREADS_CPUTHREADCOUNT`. | 2 | -| ioThreadCount | Controls the number of io threads to be used. These threads are used for workloads that write data to disk. While writing, these threads are blocked which means that they yield the CPU. This setting can also be overridden using the environment variable `ZEEBE_BROKER_THREADS_IOTHREADCOUNT`. | 2 | - -#### YAML snippet - -```yaml -threads: - cpuThreadCount: 2 - ioThreadCount: 2 -``` - -### zeebe.broker.backpressure - -| Field | Description | Example Value | -| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| enabled | Set this to enable or disable backpressure. When enabled the broker rejects user requests when the number of inflight requests is greater than than the "limit". The value of the "limit" is determined based on the configured algorithm. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_ENABLED`. | true | -| useWindowed | if enabled - will use the average latencies over a window as the current latency to update the limit. It is not recommended to enable this when the algorithm is aimd. This setting is not applicable to fixed limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_USEWINDOWED`. | true | -| algorithm | The algorithm configures which algorithm to use for the backpressure. It should be one of vegas, aimd, fixed, gradient, or gradient2. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_ALGORITHM`. | aimd | - -#### YAML snippet - -```yaml -backpressure: - enabled: true - useWindowed: true - algorithm: aimd -``` - -### zeebe.broker.backpressure.aimd - -| Field | Description | Example Value | -| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| requestTimeout | The limit will be reduced if the observed latency is greater than the requestTimeout. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_AIMD_REQUESTTIMEOUT`. | 200ms | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_AIMD_INITIALLIMIT`. | 100 | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_AIMD_MINLIMIT`. | 1 | -| maxLimit | The maximum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_AIMD_MAXLIMIT`. | 1000 | -| backoffRatio | The backoffRatio is a double value x such that x is between 0 and 1. It determines the factor by which the limit is decreased. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_AIMD_BACKOFFRATIO`. | 0.9 | - -#### YAML snippet - -```yaml -backpressure: - algorithm: aimd - aimd: - requestTimeout: 200ms - initialLimit: 100 - minLimit: 1 - maxLimit: 1000 - backoffRatio: 0.9 -``` - -### zeebe.broker.backpressure.fixed - -| Field | Description | Example Value | -| ----- | ------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| limit | Set a fixed limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_FIXED_LIMIT`. | 20 | - -#### YAML snippet - -```yaml -backpressure: - algorithm: fixed - fixed: - limit: 20 -``` - -### zeebe.broker.backpressure.vegas - -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_VEGAS_INITIALLIMIT`. | 20 | -| alpha | The limit is increased if the queue size is less than this value. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_VEGAS_ALPHA`. | 3 | -| beta | The limit is decreased if the queue size is greater than this value. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_VEGAS_BETA`. | 6 | - -#### YAML snippet - -```yaml -backpressure: - algorithm: vegas - vegas: - initialLimit: 20 - alpha: 3 - beta: 6 -``` - -### zeebe.broker.backpressure.gradient - -| Field | Description | Example Value | -| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | - -#### YAML snippet - -```yaml -backpressure: - algorithm: gradient - gradient: - minLimit: 10 - initialLimit: 20 - rttTolerance: 2.0 -``` - -### zeebe.broker.backpressure.gradient2 - -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | - -#### YAML snippet - -```yaml -backpressure: - algorithm: gradient2 - gradient2: - minLimit: 10 - initialLimit: 20 - rttTolerance: 2.0 - longWindow: 600 -``` - -### zeebe.broker.exporters - -Each exporter should be configured following this template: - -| Field | Description | Example Value | -| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| exporters | Configure exporters below | | -| jarPath | path to the JAR file containing the exporter class. JARs are only loaded once, so you can define two exporters that point to the same JAR, with the same class or a different one, and use args to parametrize its instantiation. | | -| className | entry point of the exporter, a class which _must_ extend the io.camunda.zeebe.exporter.Exporter interface. A nested table as "args:" will allow you to inject arbitrary arguments into your class through the use of annotations. These setting can also be overridden using the environment variables "`ZEEBE_BROKER_EXPORTERS_`[exporter name]\_..." | | - -#### YAML snippet - -```yaml -exporters: - jarPath: - className: -``` - -### zeebe.broker.exporters.elasticsearch - -An example configuration for the Elasticsearch exporter is below. - -These setting can also be overridden using the environment variables `ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_*` - -#### YAML snippet - -```yaml -exporters: - elasticsearch: - className: io.camunda.zeebe.exporter.ElasticsearchExporter - - args: - url: http://localhost:9200 - - bulk: - delay: 5 - size: 1000 - memoryLimit: 10485760 - - authentication: - username: elastic - password: changeme - - index: - prefix: zeebe-record - createTemplate: true - - numberOfShards: 3 - numberOfReplicas: 0 - - command: false - event: true - rejection: false - - commandDistribution: true - decisionRequirements: true - decision: true - decisionEvaluation: true - deployment: true - deploymentDistribution: true - error: true - escalation: true - incident: true - job: true - jobBatch: false - message: true - messageStartSubscription: true - messageSubscription: true - process: true - processEvent: false - processInstance: true - processInstanceCreation: true - processInstanceModification: true - processMessageSubscription: true - resourceDeletion: true - signal: true - signalSubscription: true - timer: true - variable: true - variableDocument: true - - retention: - enabled: false - minimumAge: 30d - policyName: zeebe-record-retention-policy -``` - -### zeebe.broker.exporters.opensearch (OpenSearch Exporter) - -An example configuration for the OpenSearch exporter. - -These setting can also be overridden using the environment variables `ZEEBE_BROKER_EXPORTERS_OPENSEARCH_*` - -#### YAML snippet - -```yaml -exporters: - opensearch: - className: io.camunda.zeebe.exporter.opensearch.OpensearchExporter - - args: - url: http://localhost:9200 - requestTimeoutMs: 1000 - - bulk: - delay: 5 - size: 1000 - memoryLimit: 10485760 - - authentication: - username: opensearch - password: changeme - - aws: - enabled: true - serviceName: es - region: eu-west-1 - - index: - prefix: zeebe-record - createTemplate: true - - numberOfShards: 3 - numberOfReplicas: 0 - - command: false - event: true - rejection: false - - commandDistribution: true - decisionRequirements: true - decision: true - decisionEvaluation: true - deployment: true - deploymentDistribution: true - error: true - escalation: true - incident: true - job: true - jobBatch: false - message: true - messageStartSubscription: true - messageSubscription: true - process: true - processEvent: false - processInstance: true - processInstanceCreation: true - processInstanceModification: true - processMessageSubscription: true - resourceDeletion: true - signal: true - signalSubscription: true - timer: true - variable: true - variableDocument: true -``` - -### zeebe.broker.processing - -| Field | Description | Example Value | -| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| maxCommandsInBatch | Sets the maximum number of commands that processed within one batch. The processor will process until no more follow up commands are created by the initial command or the configured limit is reached. By default, up to 100 commands are processed in one batch. Can be set to 1 to disable batch processing. Must be a positive integer number. Note that the resulting batch size will contain more entries than this limit because it includes follow up events. When resulting batch size is too large (see maxMessageSize), processing will be rolled back and retried with a smaller maximum batch size. Lowering the command limit can reduce the frequency of rollback and retry. This setting can also be overridden using the environment variable `ZEEBE_BROKER_PROCESSING_MAXCOMMANDSINBATCH`. | 100 | - -#### YAML snippet - -```yaml -processing: maxCommandsInBatch = 100 -``` - -### Experimental configuration - -See the experimental section of the [broker.yaml.template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template#L883). - -Be aware that all configuration's which are part of the experimental section are subject to change and can be dropped at any time. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/configuration.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/configuration.md deleted file mode 100644 index 1975595de93..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/configuration.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -id: configuration -title: "Configuration" -sidebar_label: "Overview" -description: "Let's analyze how to configure Zeebe." ---- - -Zeebe can be configured through the following: - -- Configuration files -- Environment variables -- A mix of both - -If both configuration files and environment variables are present, environment variables overwrite settings in configuration files. - -To make small changes to the configuration, we recommend using environment variables. - -To make big changes to the configuration, we recommend using a configuration file. - -The configuration is applied during startup of Zeebe. It is not possible to change the configuration at runtime. - -## Default configuration - -The default configuration is located in `config/application.yaml`. This configuration contains the most common configuration settings for a standalone broker. It also lists the corresponding environment variable for each setting. - -:::note -The default configuration is not suitable for a standalone gateway node. To run a standalone gateway node, take a look at [the gateway configuration](gateway.md) or `/config/gateway.yaml.template`. -::: - -## Configuration file templates - -We provide templates that contain all possible configuration settings, along with explanations for each setting: - -- [`config/application.yaml` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/blob/main/dist/src/main/config/application.yaml) - Default configuration containing only the most common configuration settings. Use this as the basis for a single broker deployment for test or development. -- [`config/broker.standalone.yaml.template` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) - Complete configuration template for a standalone broker with embedded gateway. Use this as the basis for a single broker deployment for test or development. -- [`config/broker.yaml.template` Broker Node (without embedded gateway)](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) - Complete configuration template for a broker node without embedded gateway. Use this as the basis for deploying multiple broker nodes as part of a cluster. -- [`config/gateway.yaml.template`](https://github.com/camunda/camunda/blob/main/dist/src/main/config/gateway.yaml.template) - Complete configuration template for a standalone gateway. - -:::note -These templates also include the corresponding environment variables to use for every setting. -::: - -You may find it easier to search through our [broker](broker.md) and [gateway](gateway.md) configuration documentation to adjust the templates. - -## Editing the configuration - -You can either start from scratch or start from the configuration templates listed above. - -If you use a configuration template and want to uncomment certain lines, make sure to also uncomment their parent elements: - -```yaml -Valid Configuration - - zeebe: - gateway: - network: - # host: 0.0.0.0 - port: 26500 - -Invalid configuration - - # zeebe: - # gateway: - # network: - # host: 0.0.0.0 - port: 26500 -``` - -Uncommenting individual lines is a bit finicky, because YAML is sensitive to indentation. The best way to do it is to position the cursor before the `#` character and delete two characters (the dash and the space). Doing this will consistently give you a valid YAML file. - -When it comes to editing individual settings, two data types are worth mentioning: - -- Data size (e.g. `logSegmentSize`) - - Human-friendly format: `500MB` (or `KB, GB`) - - Machine-friendly format: size in bytes as long -- Timeouts/intervals (e.g. `requestTimeout`) - - Human-friendly format: `15s` (or `m, h`) - - Machine-friendly format: either duration in milliseconds as long, or [ISO-8601 duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) format (e.g. `PT15S`) - -## Passing configuration files to Zeebe - -Rename the configuration file to `application.yaml` and place it in the following location: - -```shell script -./config/application.yaml -``` - -### Other ways to specify the configuration file - -Zeebe uses Spring Boot for its configuration parsing. All other ways to [configure a Spring Boot application](https://docs.spring.io/spring-boot/reference/features/external-config.html) should also work. In particular, you can use: - -- `SPRING_CONFIG_ADDITIONAL_LOCATION` to specify an additional configuration file. -- `SPRING_APPLICATION_JSON` to specify settings in JSON format. - -Details can be found in the Spring documentation. - -:::note -We recommend not to use `SPRING_CONFIG_LOCATION` as this will replace all existing configuration defaults. When used inappropriately, some features will be disabled or will not be configured properly. -::: - -If you specify `SPRING_CONFIG_LOCATION`, specify it like this: - -```shell script -export SPRING_CONFIG_LOCATION='classpath:/,file:./[path to config file]' -``` - -This will ensure the defaults defined in the classpath resources will be used (unless explicitly overwritten by the configuration file you provide). If you omit the defaults defined in the classpath, some features may be disabled or will not be configured properly. - -## Verifying configuration - -To verify the configuration was applied, start Zeebe and look at the log. - -If the configuration could be read, Zeebe will log out the effective configuration during startup: - -``` -17:13:13.120 [] [main] INFO io.camunda.zeebe.broker.system - Starting broker 0 with configuration { - "network": { - "host": "0.0.0.0", - "portOffset": 0, - "maxMessageSize": { - "bytes": 4194304 - }, - "commandApi": { - "defaultPort": 26501, - "host": "0.0.0.0", - "port": 26501, -... -``` - -In some cases of invalid configuration, Zeebe will fail to start with a warning that explains which configuration setting could not be read. - -``` -17:17:38.796 [] [main] ERROR org.springframework.boot.diagnostics.LoggingFailureAnalysisReporter - - -*************************** -APPLICATION FAILED TO START -*************************** - -Description: - -Failed to bind properties under 'zeebe.broker.network.port-offset' to int: - - Property: zeebe.broker.network.port-offset - Value: false - Origin: System Environment Property "ZEEBE_BROKER_NETWORK_PORTOFFSET" - Reason: failed to convert java.lang.String to int - -Action: - -Update your application's configuration -``` - -## Logging - -Zeebe uses Log4j2 framework for logging. In the distribution and the Docker image, find the default log configuration file in `config/log4j2.xml`. - -### Google Stackdriver (JSON) logging - -To enable Google Stackdriver compatible JSON logging, set the environment variable `ZEEBE_LOG_APPENDER=Stackdriver` before starting Zeebe. - -### Change log level dynamically - -Zeebe brokers expose a [Spring Boot Actuators web endpoint](https://docs.spring.io/spring-boot/docs/current/actuator-api/html/#loggers) -for configuring loggers dynamically. -To change the log level of a logger, make a `POST` request to the `/actuator/loggers/{logger.name}` endpoint as shown in the example below. -Change `io.camunda.zeebe` to the required logger name and `debug` to required log level. - -``` -curl 'http://localhost:9600/actuator/loggers/io.camunda.zeebe' -i -X POST -H 'Content-Type: application/json' -d '{"configuredLevel":"debug"}' -``` - -## Health probes - -Health probes are set to sensible defaults which cover common use cases. - -For specific use cases, it might be necessary to customize health probes: - -- [Gateway health probes](gateway-health-probes.md) - -## Experimental configuration options - -You may have already noticed a special section of Zeebe's configuration templates titled `experimental`. -This section refers to settings which are potentially not backwards compatible. In other words, any configuration setting found there may or may not be dropped in any minor version. - -These settings are there primarily for incubating features and/or very advanced settings for which the team has not found -a good general default configuration. Once one is found, or the incubating feature is promoted, the setting(s) may be moved -into a different section. Only at that point do they fall under the same backwards compatibility guarantees as the rest of -the project. We may choose to drop support for specific experimental configurations in any minor version update. - -Most users should not have to change anything in this section for a good experience. However, if you have a unique set up, or simply wish to try out new experimental features, it can be worth investigating these (ideally with the guidance of the Zeebe community). diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/environment-variables.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/environment-variables.md deleted file mode 100644 index 46a78bbe95c..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/environment-variables.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: environment-variables -title: "Environment variables" -description: "Let's take a closer look at the environment variables for configuration, operators, and developers." ---- - -## Environment variables for configuration - -As a Spring Boot application, Zeebe supports any standard -[Spring configuration](https://docs.spring.io/spring-boot/reference/features/external-config.html) method. This configuration can be provided as a configuration file, through environment variables, or both. When both sources are used, environment variables have precedence over the configuration file. - -All available environment variables are documented in the [configuration file templates](configuration.md#configuration-file-templates). - -## Environment variables for operators - -The following environment variables are intended for operators: - -- `ZEEBE_LOG_LEVEL`: Sets the log level of the Zeebe Logger (default: `info`). -- `ZEEBE_LOG_APPENDER`: Sets the console log appender (default: `Console`). We recommend using `Stackdriver` if Zeebe runs on Google Cloud Platform to output JSON formatted log messages. - -## Environment variables for developers - -The following environment variables are intended for developers: - -- `SPRING_PROFILES_ACTIVE=dev`: If this is set, the broker starts in a temporary folder and all data is cleaned up upon exit. -- `ZEEBE_DEBUG=true/false`: Activates a `DebugLogExporter` with default settings. The value of the environment variable toggles pretty printing. - -:::note -It is not recommended to use these settings in production. -::: diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/fixed-partitioning.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/fixed-partitioning.md deleted file mode 100644 index 5ca71d90f4a..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/fixed-partitioning.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -id: fixed-partitioning -title: "Fixed partitioning" -description: "Manually configure which partitions belong to which brokers." ---- - -Starting with 1.2.0, there is a new experimental configuration option which lets you specify a fixed partitioning scheme; this means you can manually configure which partitions belong to which brokers. - -The partitioning scheme is controlled via a new configuration option under `zeebe.broker.experimental.partitioning`, -more specifically `zeebe.broker.experimental.partitioning.scheme`. This option currently takes the following values: - -- `ROUND_ROBIN`: When set, this applies the round-robin partition distribution, which corresponds to the distribution explained above on this page. _This is the default option, and requires no extra configuration if you want to use it._ -- `FIXED`: When set, this applies a manually configured partition distribution, configured separately. - -To use the `FIXED` partitioning scheme, _you must provide an exhaustive map of all partitions to a set of brokers_. This is achieved via the `zeebe.broker.experimental.partitioning.fixed` configuration option. The example below outlines a cluster of `5` brokers, `3` partitions, and a replication factor of `3`. - -```yaml -partitioning: - scheme: FIXED - fixed: - - partitionId: 1 - nodes: - - nodeId: 0 - - nodeId: 2 - - nodeId: 4 - - partitionId: 2 - nodes: - - nodeId: 1 - - nodeId: 3 - - nodeId: 4 - - partitionId: 3 - nodes: - - nodeId: 0 - - nodeId: 2 - - nodeId: 3 -``` - -This configuration will produce the following distribution: - -| | Node 0 | Node 1 | Node 2 | Node 3 | Node 4 | -| ----------: | :----: | :----: | :----: | :----: | :----: | -| Partition 1 | X | | X | | X | -| Partition 2 | | X | | X | X | -| Partition 3 | X | | X | X | | - -## Validation - -Each broker performs reasonable checks on the `FIXED` configuration provided. Namely, the configuration must uphold the following conditions: - -- All partitions _must be explicitly configured_. -- All partitions configured must have valid IDs, i.e. between 1 and `zeebe.broker.cluster.partitionsCount`. -- All partitions must configure exactly the replicas count, i.e. `zeebe.broker.cluster.replicationFactor`. -- All nodes configured for a partition have a valid node ID, i.e. between 0 and `zeebe.broker.cluster.clusterSize - 1`. -- If priority election is enabled, all priorities configured for a partition are different. - -The broker will fail to start if any of these conditions are not met. - -## Priority election - -If you're using the priority election feature, you must also specify the priorities of each broker. In fact, the broker will fail to start if the nodes do not have different priorities, as otherwise you may encounter lengthy election loops. - -Here is the same example configuration as above, but this time with priorities configured: - -```yaml -partitioning: - scheme: FIXED - fixed: - - partitionId: 1 - nodes: - - nodeId: 0 - priority: 1 - - nodeId: 2 - priority: 2 - - nodeId: 4 - priority: 3 - - partitionId: 2 - nodes: - - nodeId: 1 - priority: 1 - - nodeId: 3 - priority: 3 - - nodeId: 4 - priority: 2 - - partitionId: 3 - nodes: - - nodeId: 0 - priority: 3 - - nodeId: 2 - priority: 2 - - nodeId: 3 - priority: 1 -``` - -:::note -The only condition is that the priorities for the nodes of a given partition must be different from one another. We recommend, however, that you use a simple monotonic increase from 1 to the replica count, as shown above. -::: diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/gateway-health-probes.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/gateway-health-probes.md deleted file mode 100644 index 30b30d86615..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/gateway-health-probes.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -id: gateway-health-probes -title: "Gateway health probes" -description: "This section outlines health status, probes, and responsiveness." ---- - -The health status for a standalone gateway is available at `{zeebe-gateway}:8080/actuator/health`. - -The following health indicators are enabled by default: - -- **Gateway Started** - Checks if the gateway is running (i.e. not currently starting and not yet shut down). -- **Gateway Responsive** - Checks if the gateway can handle a request within a given timeout. -- **Gateway Cluster Awareness** - Checks if the gateway is aware of other nodes in the cluster. -- **Gateway Partition Leader Awareness** - Checks if the gateway is aware of partition leaders in the cluster. -- **Disk Space** - Checks that the free disk space is greater than 10 MB. -- **Memory** - Checks that at least 10% of max memory (heap) is still available. - -Health indicators are set to sensible defaults. For specific use cases, it might be necessary to customize health probes. - -## Startup probe - -The started probe is available at `{zeebe-gateway}:8080/actuator/health/startup`. - -In the default configuration this is merely an alias for the **Gateway Started** health indicator. Other configurations are possible (see below). - -## Liveness probe - -The liveness probe is available at `{zeebe-gateway}:8080/actuator/health/liveness`. - -It is based on the health indicators mentioned above. - -In the default configuration, the liveness probe is comprised of the following health indicators: - -- **Gateway Started** - Checks if the gateway is running (i.e. not currently starting and not yet shut down). -- **Liveness Gateway Responsive** - Checks if the gateway can handle a request within an ample timeout, but will only report a `DOWN` health status after the underlying health indicator is down for more than 10 minutes. -- **Liveness Gateway Cluster Awareness** - Based on gateway cluster awareness, but will only report a `DOWN` health status after the underlying health indicator is down for more than five minutes. -- **Liveness Gateway Partition Leader Awareness** - Based on gateway partition leader awareness, but will only report a `DOWN` health status after the underlying health indicator is down for more than five minutes. -- **Liveness Disk Space** - Checks that the free disk space is greater than 1 MB. -- **Liveness Memory** - Checks that at least 1% of max memory (heap) is still available. - -:::note -Health indicators with the _liveness_ prefix are intended to be customized for the liveness probe. This allows defining tighter thresholds (e.g. for free memory 1% for liveness vs. 10% for health), as well as adding tolerance for short downtimes (e.g. gateway has no awareness of other nodes in the cluster for more than five minutes). -::: - -## Customizing health probes - -Global settings for all health indicators: - -- `management.health.defaults.enabled=true` - Enables (default) or disables all health indicators. -- `management.endpoint.health.show-details=always/never` - Toggles whether a summary or details (default) of the health indicators will be returned. - -### Startup probe - -Settings for started probe: - -- `management.endpoint.health.group.startup.show-details=never` - Toggles whether a summary (default) or details of the startup probe will be returned. -- `management.endpoint.health.group.startup.include=gatewayStarted` - Defines which health indicators are included in the startup probe. - -### Liveness probe - -Settings for liveness probe: - -- `management.endpoint.health.group.liveness.show-details=never` - Toggles whether a summary (default) or details of the liveness probe will be returned. -- `management.endpoint.health.group.liveness.include=gatewayStarted,livenessGatewayResponsive,livenessGatewayClusterAwareness,livenessGatewayPartitionLeaderAwareness,livenessDiskSpace,livenessMemory` - Defines which health indicators are included in the liveness probe. - -:::note -The individual contributing health indicators of the liveness probe can be configured as well (see below). -::: - -### Gateway started - -Settings for gateway started health indicator: - -- `management.health.gateway-started.enabled=true` - Enables (default) or disables this health indicator. - -### Gateway responsive - -Settings for gateway responsiveness health indicator: - -- `management.health.gateway-responsive.enabled=true` - Enables (default) or disables this health indicator. -- `management.health.gateway-responsive.requestTimeout=500ms` - Defines the timeout for the request; if the test completes before the timeout, the health status is `UP`, otherwise it is `DOWN`. -- `management.health.liveness.gateway-responsive.requestTimeout=5s` - Defines the timeout for the request for liveness probe; if the request completes before the timeout, the health status is `UP`. -- `management.health.liveness.gateway-responsive.maxdowntime=10m` - Defines the maximum downtime before the liveness health indicator for responsiveness will flip. - -### Gateway cluster awareness - -Settings for gateway cluster awareness health indicator: - -- `management.health.gateway-clusterawareness.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.liveness.gateway-clusterawareness.maxdowntime=5m` - Defines the maximum downtime before the liveness health indicator for cluster awareness will flip. In other words, this health indicator will report `DOWN` after the gateway was unaware of other members in the cluster for more than five minutes. - -### Gateway partition leader awareness - -Settings for gateway partition leader awareness health indicator: - -- `management.health.gateway-partitionleaderawareness.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.liveness.gateway-partitionleaderawareness.maxdowntime=5m` - Defines the maximum downtime before the liveness health indicator for partition leader awareness will flip. In other words, this health indicator will report `DOWN` after the gateway was unaware of partition leaders for more than five minutes. - -### Disk space - -This is arguably the least critical health indicator given the standalone gateway does not write to disk. The only exception may be the writing of log files, which depend on the log configuration. - -Settings for disk space health indicator: - -- `management.health.diskspace.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.diskspace.threshold=10MB` - Defines the threshold for the required free disk space. -- `management.health.diskspace.path=.` - Defines the path for which the free disk space is examined. -- `management.health.liveness.diskspace.threshold=1MB` - Defines the threshold for the required free disk space for liveness. -- `management.health.liveness.diskspace.path=.` - Defines the path for which the free disk space for liveness is examined. - -### Memory - -This health indicator examines free memory (heap). - -Settings for memory health indicator: - -- `management.health.memory.enabled=true` - Enables (default) or disables this health indicator (and its liveness counterpart). -- `management.health.memory.threshold=0.1` - Defines the threshold for the required free memory. The default is 0.1 which is interpreted as 10% of max memory. -- `management.health.liveness.memory.threshold=0.01` - Defines the threshold for the required free memory for liveness. The default is 0.01 which is interpreted as 10 of max memory. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/gateway.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/gateway.md deleted file mode 100644 index 39711f1f6f4..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/gateway.md +++ /dev/null @@ -1,322 +0,0 @@ ---- -id: gateway-config -title: "Gateway configuration" -sidebar_label: "Gateway configuration" -description: "Analyze how to configure the Zeebe gateway, including byte sizes, time units, paths, and sample YAML snippets." ---- - -The Zeebe Gateway can be configured similarly to the broker via the `application.yaml` file or environment variables. A complete gateway configuration template is available in the [Zeebe repository](https://github.com/camunda/camunda/blob/main/dist/src/main/config/gateway.yaml.template). - -:::info Configure an embedded gateway -If you're configuring a gateway that is embedded inside of a broker (i.e. you've set [`zeebe.broker.gateway.enable`](./broker.md#zeebebrokergateway)), then you must use `zeebe.broker.gateway.*` instead of `zeebe.gateway.*` for any of the configuration options below. For environment variables this means you must use `ZEEBE_BROKER_GATEWAY_*` instead of `ZEEBE_GATEWAY_*`. -::: - -## Conventions - -Take the following conventions into consideration when working with the gateway configuration. - -### Byte sizes - -Buffers and data values referencing sizing must be specified as strings and follow the following format: "10U" where U (unit) must be replaced with KB = Kilobytes, MB = Megabytes or GB = Gigabytes. If unit is omitted then the default unit is simply bytes. - -For example, `sendBufferSize = "16MB"` creates a buffer of 16 Megabytes. - -### Time units - -Timeouts and intervals must be specified either in the standard ISO-8601 format used by `java.time.Duration`, or as strings with the following format: "VU", where: - -- V is a numerical value (e.g. 1, 5, 10, etc.) -- U is the unit, one of: ms = Millis, s = Seconds, m = Minutes, or h = Hours - -### Paths - -Relative paths are resolved relative to the installation directory of the broker. - -## Configuration - -We provide tables with environment variables, application properties, a description, and corresponding default values in the following sections. We also describe a few use cases for each type of configuration. - -Configuration names are noted as the **header** of each documented section, while the **field** values represent properties to set the configuration. - -For deploying purposes, it is easier to use environment variables. The following sections outline usage of these variables. As Helm is the recommended way to deploy Camunda 8, we will explain some configuration options here as well. Find more information about possible Zeebe Gateway Helm chart [configurations](https://artifacthub.io/packages/helm/camunda/camunda-platform#zeebe-gateway-parameters). - -### zeebe.gateway.network - -The network configuration allows configuration of the host and port details for the gateway. - -| Field | Description | Example value | -| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| host | Sets the host the gateway binds to. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_NETWORK_HOST`. | 0.0.0.0 | -| port | Sets the port the gateway binds to. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_NETWORK_PORT`. | 26500 | -| minKeepAliveInterval | Sets the minimum keep alive interval. This setting specifies the minimum accepted interval between keep alive pings. This value must be specified as a positive integer followed by 's' for seconds, 'm' for minutes, or 'h' for hours. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_NETWORK_MINKEEPALIVEINTERVAL`. | 30s | -| maxMessageSize | Sets the maximum size of the incoming and outgoing messages (i.e. commands and events). Apply the same setting on the broker too, see `ZEEBE_BROKER_NETWORK_MAXMESSAGESIZE`. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_NETWORK_MAXMESSAGESIZE`. | 4MB | - -#### YAML snippet - -```yaml -network: - host: 0.0.0.0 - port: 26500 - minKeepAliveInterval: 30s - maxMessageSize: 4MB -``` - -### zeebe.gateway.cluster - -As mentioned, the gateway needs to connect to the Zeebe brokers. - -It is important to configure the cluster's initial contact point to the Zeebe brokers. You may set only one of the Zeebe brokers, but keep in mind that resiliency will be lower than using all the Zeebe brokers available. The corresponding environment variable is called `ZEEBE_GATEWAY_CLUSTER_INITIALCONTACTPOINTS`. - -It is necessary to use the same cluster name for the broker and gateway. Otherwise, a connection will not be possible. The related configuration property is `zeebe.gateway.cluster.clusterName` and as an environment variable, it is called `ZEEBE_GATEWAY_CLUSTER_CLUSTERNAME`. - -If you use the Helm charts, both properties are configured for you already. - -| Field | Description | Example value | -| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | -| initialContactPoints | Sets initial contact points (brokers), which the gateway should contact. The contact points of the internal network configuration must be specified. The format is [HOST:PORT]. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_INITIALCONTACTPOINTS` specifying a comma-separated list of contact points. | [ 192.168.1.22:26502, 192.168.1.32:26502 ] | -| contactPoint | WARNING: This setting is deprecated! Use initialContactPoints instead. Sets the broker the gateway should initial contact. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_CONTACTPOINT`. | 127.0.0.1:26502 | -| requestTimeout | Sets the timeout of requests sent to the broker cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_REQUESTTIMEOUT`. | 15s | -| clusterName | Sets name of the Zeebe cluster to connect to. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_CLUSTERNAME`. | zeebe-cluster | -| memberId | Sets the member id of the gateway in the cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERID`. | gateway | -| host | Sets the host the gateway node binds to for internal cluster communication. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_HOST`. | 0.0.0.0 | -| port | Sets the port the gateway node binds to for internal cluster communication. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_PORT`. | 26502 | -| advertisedHost | Controls the advertised host; if omitted defaults to the host. This is particularly useful if your gateway stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_ADVERTISEDHOST`. | 0.0.0.0 | -| advertisedPort | Controls the advertised port; if omitted defaults to the port. This is particularly useful if your gateway stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_ADVERTISEDPORT`. | 25602 | - -#### YAML snippet - -```yaml -cluster: - initialContactPoints: 127.0.0.1:26502 - contactPoint: 127.0.0.1:26502 - requestTimeout: 15s - clusterName: zeebe-cluster - memberId: gateway - host: 0.0.0.0 - port: 26502 - advertisedHost: 0.0.0.0 - advertisedPort: 25602 -``` - -### zeebe.gateway.cluster.membership - -To configure how the gateway connects and distributes information with other nodes (brokers or gateways) via SWIM, the following properties can be used. It might be useful to increase timeouts for setups that encounter a high latency between nodes. - -| Field | Description | Example value | -| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| broadcastUpdates | Configure whether to broadcast member updates to all members. If set to `false`, updates will be gossiped among the members. If set to `true`, the network traffic may increase but reduce the time to detect membership changes. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_BROADCASTUPDATES`. | false | -| broadcastDisputes | Configure whether to broadcast disputes to all members. If set to `true`, the network traffic may increase but reduce the time to detect membership changes. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_BROADCASTDISPUTES`. | true | -| notifySuspect | Configure whether to notify a suspect node on state changes. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_NOTIFYSUSPECT`. | false | -| gossipInterval | Sets the interval at which the membership updates are sent to a random member. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_GOSSIPINTERVAL`. | 250ms | -| gossipFanout | Sets the number of members to which membership updates are sent at each gossip interval. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_GOSSIPFANOUT`. | 2 | -| probeInterval | Sets the interval at which to probe a random member. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_PROBEINTERVAL`. | 1s | -| probeTimeout | Sets the timeout for a probe response. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_PROBETIMEOUT`. | 100ms | -| suspectProbes | Sets the number of probes failed before declaring a member is suspect. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_SUSPECTPROBES`. | 3 | -| failureTimeout | Sets the timeout for a suspect member is declared dead. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_FAILURETIMEOUT`. | 10s | -| syncInterval | Sets the interval at which this member synchronizes its membership information with a random member. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERSHIP_SYNCINTERVAL`. | 10s | - -#### YAML snippet - -```yaml -membership: - broadcastUpdates: false - broadcastDisputes: true - notifySuspect: false - gossipInterval: 250ms - gossipFanout: 2 - probeInterval: 1s - probeTimeout: 100ms - suspectProbes: 3 - failureTimeout: 10s - syncInterval: 10s -``` - -### zeebe.gateway.cluster.security - -The cluster security configuration options allow securing communication between the gateway and other nodes in the cluster. - -:::note - -You can read more about intra-cluster security on [its dedicated page](../security/secure-cluster-communication.md). - -::: - -| Field | Description | Example value | -| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| enabled | Enables TLS authentication between this gateway and other nodes in the cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_SECURITY_ENABLED`. | false | -| certificateChainPath | Sets the path to the certificate chain file. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_SECURITY_CERTIFICATECHAINPATH`. | | -| privateKeyPath | Sets the path to the private key file location. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_SECURITY_PRIVATEKEYPATH`. | | - -#### YAML snippet - -```yaml -security: - enabled: false - certificateChainPath: null - privateKeyPath: null -``` - -### zeebe.gateway.cluster.security.authentication - -| Field | Description | Example value | -| ----- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| mode | Controls which authentication mode is active; supported modes are `none` and `identity`. If `identity` is set, authentication will be done using [camunda-identity](/self-managed/identity/what-is-identity.md), which needs to be configured in the corresponding subsection. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_AUTHENTICATION_MODE`. | none | - -#### YAML snippet - -```yaml -security: - authentication: - mode: none -``` - -### zeebe.gateway.cluster.security.authentication.identity - -| Field | Description | Example value | -| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | -| issuerBackendUrl | The URL to the auth provider backend, used to validate tokens. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_AUTHENTICATION_IDENTITY_ISSUERBACKENDURL`. | http://keycloak:8080/auth/realms/camunda-platform | -| audience | The required audience of the auth token. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_AUTHENTICATION_IDENTITY_AUDIENCE`. | zeebe-api | -| type | The identity auth type to apply, one of `keycloak` or `auth0`. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_AUTHENTICATION_IDENTITY_TYPE`. | keycloak | - -#### YAML snippet - -```yaml -security: - authentication: - mode: identity - identity: - issuerBackendUrl: http://keycloak:8080/auth/realms/camunda-platform - audience: zeebe-api - type: keycloak -``` - -### zeebe.gateway.cluster.messageCompression - -To configure the compression algorithm for all messages sent between the gateway and -the brokers, the following property can be set. Available options are NONE, GZIP, and SNAPPY. -This feature is useful when the network latency between the nodes is very high (for example, when nodes are deployed in different data centers). - -When latency is high, the network bandwidth is severely reduced. Therefore, enabling compression helps improve the throughput. You need to decide between reducing bandwidth or reducing resources required for compression. - -:::caution -When this flag is enabled, you must also enable compression in the standalone broker configuration. When there is no latency enabling, this may have a performance impact. -::: - -| Field | Description | Example value | -| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| messageCompression | Configure compression algorithm for all messages sent between the gateway and the brokers. Available options are NONE, GZIP, and SNAPPY. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MESSAGECOMPRESSION`. | NONE | - -#### YAML snippet - -```yaml -messageCompression: NONE -``` - -### zeebe.gateway.threads - -To handle many concurrent incoming requests, the user can do two things: scale the deployed gateways (if the standalone mode is in use), or increase the used resources and threads. - -The Zeebe Gateway uses one thread by default, but this should be set to a higher number if the gateway doesn’t exhaust its available resources and doesn’t keep up with the load. The corresponding environment variables look like this: `ZEEBE_GATEWAY_THREADS_MANAGEMENTTHREADS`. -During benchmarking and when increasing the thread count, it may also make sense to increase the given resources, which are quite small in the Helm chart. - -For high availability and redundancy, two Zeebe Gateways are deployed by default with the Helm charts. To change that amount, set `zeebe-gateway.replicas=2` to a different number. Increasing the number of gateway replicas to more than one enables the possibility for quick failover; in the case one gateway dies, the remaining gateway(s) can handle the traffic. - -To explore how the gateway behaves, or what it does, metrics can be consumed. By default, the gateway exports Prometheus metrics, which can be scrapped under `:9600/actuator/prometheus`. - -| Field | Description | Example value | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| managementThreads | Sets the number of threads the gateway will use to communicate with the broker cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_THREADS_MANAGEMENTTHREADS`. | 1 | - -#### YAML snippet - -```yaml -threads: - managementThreads: 1 -``` - -### zeebe.gateway.security - -The client security configuration options allow securing the communication between a gateway and clients. - -:::note - -You can read more about client-gateway security on [its dedicated page](../security/secure-client-communication.md). - -::: - -| Field | Description | Example value | -| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| enabled | Enables TLS authentication between clients and the gateway. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_ENABLED`. | false | -| certificateChainPath | Sets the path to the certificate chain file. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_CERTIFICATECHAINPATH`. | | -| privateKeyPath | Sets the path to the private key file location. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_SECURITY_PRIVATEKEYPATH`. | | - -#### YAML snippet - -```yaml -security: - enabled: false - certificateChainPath: - privateKeyPath: -``` - -### zeebe.gateway.longPolling - -It's possible to configure gateway long-polling behavior. Read more on long-polling behavior [here](../../../components/concepts/job-workers.md#long-polling). - -| Field | Description | Example value | -| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------- | -| enabled | Enables long polling for available jobs. This setting can also be overridden using the environment `variable ZEEBE_GATEWAY_LONGPOLLING_ENABLED`. | true | - -#### YAML snippet - -```yaml -longPolling: - enabled: true -``` - -### zeebe.gateway.interceptors - -It is possible to intercept requests in the gateway, which can be configured via environment variables or the `application.yaml` file. For more details, read about [interceptors](/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md). - -Each interceptor should be configured with the values described below: - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescriptionExample value
    idIdentifier for this interceptor. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_INTERCEPTORS_0_ID`.
    jarPathPath (relative or absolute) to the JAR file containing the interceptor class and its dependencies. All classes must be compiled for the same language version as Zeebe or lower. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_INTERCEPTORS_0_JARPATH`.
    classNameEntry point of the interceptor, a class which must: -
  • implement io.grpc.ServerInterceptor
  • -
  • have public visibility
  • -
  • have a public default constructor (i.e. no-arg constructor)
  • - This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_INTERCEPTORS_0_CLASSNAME`. -
    - -#### YAML snippet - -```yaml -interceptors: - id: null - jarPath: null - className: null -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/logging.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/logging.md deleted file mode 100644 index 86f1bb38643..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/logging.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -id: logging -title: "Logging" ---- - -Zeebe uses Log4j2 framework for logging. In the distribution and the Docker image, find the default log configuration file in `config/log4j2.xml`. - -## Google Stackdriver (JSON) logging - -To enable Google Stackdriver compatible JSON logging, set the environment variable `ZEEBE_LOG_APPENDER=Stackdriver` before starting Zeebe. - -## Default logging configuration - -- `config/log4j2.xml` (applied by default) - -```xml - - - - - ${sys:app.home}/logs - %d{yyyy-MM-dd HH:mm:ss.SSS} [%X{actor-name}] [%t] %-5level %logger{36} - %msg%n - ${env:ZEEBE_LOG_STACKDRIVER_SERVICENAME:-} - ${env:ZEEBE_LOG_STACKDRIVER_SERVICEVERSION:-} - - - - - - - - - - - - - - ${log.pattern} - - - - - - - - - - - - - - - - - - - - - - -``` - -## Change log level dynamically - -Zeebe brokers expose a [Spring Boot Actuators web endpoint](https://docs.spring.io/spring-boot/docs/current/actuator-api/html/#loggers) for configuring loggers dynamically. -To change the log level of a logger, make a `POST` request to the `/actuator/loggers/{logger.name}` endpoint as shown in the example below. -Change `io.camunda.zeebe` to the required logger name and `debug` to required log level. - -``` -curl 'http://localhost:9600/actuator/loggers/io.camunda.zeebe' -i -X POST -H 'Content-Type: application/json' -d '{"configuredLevel":"debug"}' -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/priority-election.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/priority-election.md deleted file mode 100644 index 4e466a0640a..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/configuration/priority-election.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: priority-election -title: "Priority election" -description: "An alternative to the default raft leader election." ---- - -Priority election is an alternative to the default raft leader election, where leader election is implemented by a random timer-based algorithm. - -It aims to achieve a more uniform leader distribution by assigning each node a priority per partition and modifying the election algorithm to ensure nodes with higher priority have a higher chance of becoming leader. - -## Configuration - -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. - -If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). - -## Limitations - -With priority election enabled, election latency and thus failover time increases. - -The result of leader election is not deterministic and priority election can only increase the chance of having a -uniform leader distribution, not guarantee it. - -Factors such as high load can prevent high priority nodes from becoming the leader. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md deleted file mode 100644 index 032c9f22dac..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -id: elasticsearch-exporter -title: "Elasticsearch exporter" -sidebar_label: "Elasticsearch" -description: "The Zeebe Elasticsearch exporter acts as a bridge between Zeebe and Elasticsearch." ---- - -The Zeebe Elasticsearch exporter acts as a bridge between -[Zeebe](https://zeebe.io/) and [Elasticsearch](https://www.elastic.co/products/elasticsearch) by -exporting records written to Zeebe streams as documents into several indices. - -## Concept - -The exporter operates on the idea that it should perform as little as possible on the Zeebe side of -things. In other words, you can think of the indexes into which the records are exported as a -staging data warehouse. Any enrichment or transformation on the exported data should be performed by -your own ETL jobs. - -When configured to do so, the exporter will automatically create an index per record value type (see the value type in the Zeebe protocol). Each of these indexes has a -corresponding pre-defined mapping to facilitate data ingestion for your own ETL jobs. You can find those as templates in [the resources folder of the exporter's source code](https://github.com/camunda/camunda/tree/8.2.0/exporters/elasticsearch-exporter/src/main/resources). - -:::note -The indexes are created as required, and will not be created twice if they already exist. However, once disabled, they will not be deleted (that is up to the administrator.) Similarly, data is never deleted by the exporter, and must be deleted by the administrator when it is safe to do so. -A [retention](#retention) policy can be configured to automatically delete data after a certain number of days. -::: - -## Configuration - -:::note -As the exporter is packaged with Zeebe, it is not necessary to specify a `jarPath`. -::: - -The exporter can be enabled by configuring it with the `classpath` in the broker settings. - -For example: - -```yaml -exporters: - elasticsearch: - className: io.camunda.zeebe.exporter.ElasticsearchExporter - args: - # Refer to the table below for the available args options -``` - -The exporter can be configured by providing `args`. The table below explains all the different -options, and the default values for these options: - -| Option | Description | Default | -| ---------------- | ---------------------------------------------------------------------------------------- | ----------------------- | -| url | Valid URLs as comma-separated string | `http://localhost:9200` | -| requestTimeoutMs | Request timeout (in ms) for the Elasticsearch. client | `30000` | -| index | Refer to [Index](#index) for the index configuration options. | | -| bulk | Refer to [Bulk](#bulk) for the bulk configuration options. | | -| retention | Refer to [Retention](#retention) for the retention configuration options | | -| authentication | Refer to [Authentication](#authentication) for the authentication configuration options. | | - -### Index - -In most cases, you will not be interested in exporting every single record produced by a Zeebe -cluster, but rather only a subset of them. This can also be configured to limit the kinds of records -being exported (e.g. only events, no commands), and the value type of these records (e.g. only job -and process values). - -| Option | Description | Default | -| ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | -| prefix | This prefix will be appended to every index created by the exporter; must not contain `_` (underscore). | zeebe-record | -| createTemplate | If `true` missing indexes will be created automatically. | `true` | -| numberOfShards | The number of [shards](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#_static_index_settings) used for each new record index created. | 3 | -| numberOfReplicas | The number of shard [replicas](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html#dynamic-index-settings) used for each new record index created. | 0 | -| command | If `true` command records will be exported | `false` | -| event | If `true` event records will be exported | `true` | -| rejection | If `true` rejection records will be exported | `false` | -| checkpoint | If `true` records related to checkpoints will be exported | `false` | -| commandDistribution | If `true` records related to command distributions will be exported | `true` | -| decision | If `true` records related to decisions will be exported | `true` | -| decisionEvaluation | If `true` records related to decision evaluations will be exported | `true` | -| decisionRequirements | If `true` records related to decisionRequirements will be exported | `true` | -| deployment | If `true` records related to deployments will be exported | `true` | -| deploymentDistribution | If `true` records related to deployment distributions will be exported | `true` | -| error | If `true` records related to errors will be exported | `true` | -| escalation | If `true` records related to escalations will be exported | `true` | -| incident | If `true` records related to incidents will be exported | `true` | -| job | If `true` records related to jobs will be exported | `true` | -| jobBatch | If `true` records related to job batches will be exported | `false` | -| message | If `true` records related to messages will be exported | `true` | -| messageSubscription | If `true` records related to message subscriptions will be exported | `true` | -| messageStartEventSubscription | If `true` records related to message start event subscriptions will be exported | `true` | -| process | If `true` records related to processes will be exported | `true` | -| processEvent | If `true` records related to process events will be exported | `false` | -| processInstance | If `true` records related to process instances will be exported | `true` | -| processInstanceBatch | If `true` records related to process instances batches will be exported | `false` | -| processInstanceCreation | If `true` records related to process instance creations will be exported | `true` | -| processInstanceModification | If `true` records related to process instance modifications will be exported | `true` | -| processMessageSubscription | If `true` records related to process message subscriptions will be exported | `true` | -| resourceDeletion | If `true` records related to resource deletions will be exported | `true` | -| signal | If `true` records related to signals will be exported | `true` | -| signalSubscription | If `true` records related to signal subscriptions will be exported | `true` | -| timer | If `true` records related to timers will be exported | `true` | -| variable | If `true` records related to variables will be exported | `true` | -| variableDocument | If `true` records related to variable documents will be exported | `true` | - -### Bulk - -To avoid too many expensive requests to the Elasticsearch cluster, the exporter performs batch -updates by default. The size of the batch, along with how often it should be flushed (regardless of -size) can be controlled by configuration. - -| Option | Description | Default | -| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| delay | Delay, in seconds, before force flush of the current batch. This ensures that even when we have low traffic of records, we still export every once in a while. | `5` | -| size | The amount of records a batch should have before we flush the batch | `1000` | -| memoryLimit | The size of the batch, in bytes, before we flush the batch | `10485760` (10 MB) | - -With the default configuration, the exporter will aggregate records and flush them to Elasticsearch: - -1. When it has aggregated 1000 records. -2. When the batch memory size exceeds 10 MB. -3. Five seconds have elapsed since the last flush (regardless of how many records were aggregated). - -### Retention - -A retention policy can be set up to delete old data. -When enabled, this creates an Index Lifecycle Management (ILM) Policy that deletes the data after the specified `minimumAge`. -All index templates created by this exporter apply the created ILM Policy. - -| Option | Description | Default | -| ---------- | ---------------------------------------------------------------------------- | ------------------------------- | -| enabled | If `true` the ILM Policy is created and applied to the index templates | `false` | -| minimumAge | Specifies how old the data must be, before the data is deleted as a duration | `30d` | -| policyName | The name of the created and applied ILM policy | `zeebe-record-retention-policy` | - -:::note -The duration can be specified in days `d`, hours `h`, minutes `m`, seconds `s`, milliseconds `ms`, and/or nanoseconds `nanos`. -::: - -### Authentication - -Providing these authentication options will enable Basic Authentication on the exporter. - -| Option | Description | Default | -| -------- | ----------------------------- | ------- | -| username | Username used to authenticate | N/A | -| password | Password used to authenticate | N/A | - -## Example - -Here is an example configuration of the exporter: - -```yaml ---- -exporters: - elasticsearch: - # Elasticsearch Exporter ---------- - # An example configuration for the elasticsearch exporter: - # - # These setting can also be overridden using the environment variables "ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_..." - # - - className: io.camunda.zeebe.exporter.ElasticsearchExporter - - args: - # A comma separated list of URLs pointing to the Elasticsearch instances you wish to export to. - # For example, if you want to connect to multiple nodes for redundancy: - # url: http://localhost:9200,http://localhost:9201 - url: http://localhost:9200 - - bulk: - delay: 5 - size: 1000 - memoryLimit: 10485760 - - retention: - enabled: true - minimumAge: 30d - policyName: zeebe-records-retention-policy - - authentication: - username: elastic - password: changeme - - index: - prefix: zeebe-record - createTemplate: true - - command: false - event: true - rejection: false - - commandDistribution: true - decisionRequirements: true - decision: true - decisionEvaluation: true - deployment: true - deploymentDistribution: true - error: true - escalation: true - incident: true - job: true - jobBatch: false - message: true - messageStartSubscription: true - messageSubscription: true - process: true - processEvent: false - processInstance: true - processInstanceCreation: true - processInstanceModification: true - processMessageSubscription: true - resourceDeletion: true - signal: true - signalSubscription: true - timer: true - variable: true - variableDocument: true -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/exporters.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/exporters.md deleted file mode 100644 index 1ac12a4c1f1..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/exporters.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -id: exporters -title: "Exporters" -sidebar_label: "Overview" ---- - -Zeebe comes packaged with two exporters: - -- [Elasticsearch](elasticsearch-exporter.md) -- [OpenSearch](opensearch-exporter.md) - -This section of the docs explains how these exporters can be [installed](install-zeebe-exporters.md) and configured. - -For a general overview on the exporters concept, refer to our [exporters concept](../../concepts/exporters.md) page. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md deleted file mode 100644 index 203c3a698b1..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -id: install-zeebe-exporters -title: "Install Zeebe exporters" -description: "Add dynamic exporters to Zeebe brokers in Camunda 8 Self-Managed deployment." ---- - -Camunda 8 Self-Managed Helm chart supports the addition of Zeebe exporters by using `initContainer`. - -The following is an example to install Zeebe [Hazelcast](https://github.com/camunda-community-hub/zeebe-hazelcast-exporter) and [Kafka](https://github.com/camunda-community-hub/zeebe-kafka-exporter) exporters. - -```yaml -extraInitContainers: - - name: init-exporters-hazelcast - image: busybox:1.35 - command: ["/bin/sh", "-c"] - args: - [ - "wget --no-check-certificate https://repo1.maven.org/maven2/io/zeebe/hazelcast/zeebe-hazelcast-exporter/0.8.0-alpha1/zeebe-hazelcast-exporter-0.8.0-alpha1-jar-with-dependencies.jar -O /exporters/zeebe-hazelcast-exporter.jar; ls -al /exporters", - ] - volumeMounts: - - name: exporters - mountPath: /exporters/ - - name: init-exporters-kafka - image: busybox:1.35 - command: ["/bin/sh", "-c"] - args: - [ - "wget --no-check-certificate https://github.com/zeebe-io/zeebe-kafka-exporter/releases/download/1.1.0/zeebe-kafka-exporter-1.1.0-uber.jar -O /exporters/zeebe-kafka-exporter.jar; ls -al /exporters", - ] - volumeMounts: - - name: exporters - mountPath: /exporters/ -env: - - name: ZEEBE_BROKER_EXPORTERS_HAZELCAST_JARPATH - value: /exporters/zeebe-hazelcast-exporter.jar - - name: ZEEBE_BROKER_EXPORTERS_HAZELCAST_CLASSNAME - value: io.zeebe.hazelcast.exporter.HazelcastExporter - - name: ZEEBE_HAZELCAST_REMOTE_ADDRESS - value: "{{ .Release.Name }}-hazelcast" -``` - -This example is downloading the exporters' JAR from a URL and adding the JAR to the `exporters` directory, -which will be scanned for JARs and added to the Zeebe broker classpath. Then, with `environment variables`, -you can configure the exporter parameters. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/opensearch-exporter.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/opensearch-exporter.md deleted file mode 100644 index bc8ba19f07d..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/exporters/opensearch-exporter.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -id: opensearch-exporter -title: "OpenSearch exporter" -sidebar_label: "OpenSearch" -description: "The Zeebe OpenSearch exporter acts as a bridge between Zeebe and OpenSearch." ---- - -The Zeebe OpenSearch Exporter acts as a bridge between -[Zeebe](https://camunda.com/platform/zeebe/) and [OpenSearch](https://opensearch.org) by -exporting records written to Zeebe streams as documents into several indices. - -## Concept - -The exporter operates on the idea that it should perform as little as possible on the Zeebe side of -things. In other words, you can think of the indexes into which the records are exported as a -staging data warehouse. Any enrichment or transformation on the exported data should be performed by -your own ETL jobs. - -When configured to do so, the exporter will automatically create an index per -record value type (see the value type in the Zeebe protocol). Each of these indexes has a -corresponding pre-defined mapping to facilitate data ingestion for your own ETL jobs. You can find -those as templates in this module's resources folder. - -:::note -The indexes are created as required, and will not be created twice if they already exist. However, once disabled, they will not be deleted (that is up to the administrator). Similarly, data is never deleted by the exporter, and must be deleted by the administrator when it is safe to do so. -::: - -## Configuration - -:::note -As the exporter is packaged with Zeebe, it is not necessary to specify a `jarPath`. -::: - -The exporter can be enabled by configuring it with the classpath in the broker settings. - -For example: - -```yaml -exporters: - opensearch: - className: io.camunda.zeebe.exporter.opensearch.OpensearchExporter - args: - # Refer to the table below for the available args options -``` - -The exporter can be configured by providing `args`. The table below explains all the different -options, and the default values for these options: - -| Option | Description | Default | -| ---------------- | ---------------------------------------------------------------------------------------- | ----------------------- | -| url | Valid URLs as comma-separated string | `http://localhost:9200` | -| requestTimeoutMs | Request timeout (in ms) for the OpenSearch client. | `30000` | -| index | Refer to [Index](#index) for the index configuration options. | | -| bulk | Refer to [Bulk](#bulk) for the bulk configuration options. | | -| authentication | Refer to [Authentication](#authentication) for the authentication configuration options. | | -| aws | Refer to [AWS](#aws) for the AWS configuration options. | | - -### Index - -In most cases, you will not be interested in exporting every single record produced by a Zeebe -cluster, but rather only a subset of them. This can also be configured to limit the kinds of records exported (e.g. only events, no commands), and the value type of these records (e.g. only job -and process values). - -| Option | Description | Default | -| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | -| prefix | This prefix will be appended to every index created by the exporter; must not contain `_` (underscore). | zeebe-record | -| createTemplate | If `true` missing indexes will be created automatically. | `true` | -| numberOfShards | The number of [shards](https://opensearch.org/docs/latest/install-and-configure/configuring-opensearch/index-settings/#static-index-level-index-settings) used for each new record index created. | 3 | -| numberOfReplicas | The number of shard [replicas](https://opensearch.org/docs/latest/install-and-configure/configuring-opensearch/index-settings/#dynamic-index-level-index-settings) used for each new record index created. | 0 | -| command | If `true` command records will be exported | `false` | -| event | If `true` event records will be exported | `true` | -| rejection | If `true` rejection records will be exported | `false` | -| checkpoint | If `true` records related to checkpoints will be exported | `false` | -| commandDistribution | If `true` records related to command distributions will be exported | `true` | -| decision | If `true` records related to decisions will be exported | `true` | -| decisionEvaluation | If `true` records related to decision evaluations will be exported | `true` | -| decisionRequirements | If `true` records related to decisionRequirements will be exported | `true` | -| deployment | If `true` records related to deployments will be exported | `true` | -| deploymentDistribution | If `true` records related to deployment distributions will be exported | `true` | -| error | If `true` records related to errors will be exported | `true` | -| escalation | If `true` records related to escalations will be exported | `true` | -| incident | If `true` records related to incidents will be exported | `true` | -| job | If `true` records related to jobs will be exported | `true` | -| jobBatch | If `true` records related to job batches will be exported | `false` | -| message | If `true` records related to messages will be exported | `true` | -| messageSubscription | If `true` records related to message subscriptions will be exported | `true` | -| messageStartEventSubscription | If `true` records related to message start event subscriptions will be exported | `true` | -| process | If `true` records related to processes will be exported | `true` | -| processEvent | If `true` records related to process events will be exported | `false` | -| processInstance | If `true` records related to process instances will be exported | `true` | -| processInstanceBatch | If `true` records related to process instances batches will be exported | `false` | -| processInstanceCreation | If `true` records related to process instance creations will be exported | `true` | -| processInstanceModification | If `true` records related to process instance modifications will be exported | `true` | -| processMessageSubscription | If `true` records related to process message subscriptions will be exported | `true` | -| resourceDeletion | If `true` records related to resource deletions will be exported | `true` | -| signal | If `true` records related to signals will be exported | `true` | -| signalSubscription | If `true` records related to signal subscriptions will be exported | `true` | -| timer | If `true` records related to timers will be exported | `true` | -| variable | If `true` records related to variables will be exported | `true` | -| variableDocument | If `true` records related to variable documents will be exported | `true` | - -### Bulk - -To avoid too many expensive requests to the OpenSearch cluster, the exporter performs batch -updates by default. The size of the batch, along with how often it should be flushed (regardless of -size) can be controlled by configuration. - -| Option | Description | Default | -| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| delay | Delay, in seconds, before force flush of the current batch. This ensures that even when we have low traffic of records, we still export every once in a while. | `5` | -| size | The amount of records a batch should have before we flush the batch. | `1000` | -| memoryLimit | The size of the batch, in bytes, before we flush the batch. | `10485760` (10 MB) | - -With the default configuration, the exporter would aggregate records and flush them to OpenSearch -either: - -1. When it has aggregated 1000 records. -2. When the batch memory size exceeds 10 MB. -3. Five seconds have elapsed since the last flush (regardless of how many records were aggregated). - -### Authentication - -Providing these authentication options will enable Basic Authentication on the exporter. - -| Option | Description | Default | -| -------- | ----------------------------- | ------- | -| username | Username used to authenticate | N/A | -| password | Password used to authenticate | N/A | - -### AWS - -When running OpenSearch in AWS, you may require requests to be signed. By enabling AWS in the -configurations, a request interceptor will be added to the exporter. This interceptor will take care -of signing the requests. - -Signing requests requires credentials. These credentials are not directly configurable in the -exporter. Instead, they are resolved by following the -[Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). - -| Option | Description | Default | -| ----------- | --------------------------------------------------------------------------------------- | -------------------------------------------------- | -| enabled | Enables AWS request signing | `false` | -| serviceName | AWS' name of the service to where requests are made. For OpenSearch this should be `es` | `es` | -| region | The region this exporter is running in | The value of the `AWS_REGION` environment variable | - -## Example - -Here is an example configuration of the exporter: - -```yaml ---- -exporters: - opensearch: - # Opensearch Exporter ---------- - # An example configuration for the opensearch exporter: - # - # These setting can also be overridden using the environment variables "ZEEBE_BROKER_EXPORTERS_OPENSEARCH_..." - - className: io.camunda.zeebe.exporter.opensearch.OpensearchExporter - - args: - # A comma separated list of URLs pointing to the Opensearch instances you wish to export to. - # For example, if you want to connect to multiple nodes for redundancy: - # url: http://localhost:9200,http://localhost:9201 - url: http://localhost:9200 - - bulk: - delay: 5 - size: 1000 - memoryLimit: 10485760 - - authentication: - username: opensearch - password: changeme - - aws: - enabled: true - serviceName: es - region: eu-west-1 - - index: - prefix: zeebe-record - createTemplate: true - - command: false - event: true - rejection: false - - commandDistribution: true - decisionRequirements: true - decision: true - decisionEvaluation: true - deployment: true - deploymentDistribution: true - error: true - escalation: true - incident: true - job: true - jobBatch: false - message: true - messageStartSubscription: true - messageSubscription: true - process: true - processEvent: false - processInstance: true - processInstanceCreation: true - processInstanceModification: true - processMessageSubscription: true - resourceDeletion: true - signal: true - signalSubscription: true - timer: true - variable: true - variableDocument: true -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/assets/example-setup-cluster.png b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/assets/example-setup-cluster.png deleted file mode 100644 index ba8f550286b..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/assets/example-setup-cluster.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/assets/grafana-preview.png b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/assets/grafana-preview.png deleted file mode 100644 index dd611ebc1fc..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/assets/grafana-preview.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/backpressure.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/backpressure.md deleted file mode 100644 index e2a582b394a..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/backpressure.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -id: backpressure -title: "Backpressure" -description: "This document outlines an overview of backpressure and its accompanying assets." -keywords: [back-pressure, backpressure, back pressure] ---- - -When a broker receives a client request, it is written to the **event stream** first (see section [internal processing](/components/zeebe/technical-concepts/internal-processing.md) for details), and processed later by the stream processor. - -If the processing is slow or if there are many client requests in the stream, it might take too long for the processor to start processing the command. -If the broker keeps accepting new requests from the client, the backlog increases and the processing latency can grow beyond an acceptable time. - -To avoid such problems, Zeebe employs a backpressure mechanism. When the broker receives more requests than it can process with an acceptable latency, it rejects some requests (see [technical error handling](/apis-tools/grpc.md#technical-error-handling)). - -### Terminology - -- **RTT** - Round-Trip Time, known as the time between when the request is accepted by the broker and when the response to the request is sent back to the gateway. -- **Inflight count** - The number of requests accepted by the broker but the response is not yet sent. -- **Limit** - Maximum number of flight requests. When the inflight count is above the limit, any new incoming request is rejected. - -:::note -The limit and inflight count are calculated per partition. -::: - -### Backpressure algorithms - -Zeebe uses adaptive algorithms from [concurrency-limits](https://github.com/Netflix/concurrency-limits) to dynamically calculate the limit. -Configure Zeebe with one of the backpressure algorithms in the following sections. - -The default values can be found in the [Zeebe broker standalone configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) or in the [Zeebe broker configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) in the `# backpressure` section. - -#### Fixed limit - -With **fixed limit**, one can configure a fixed value of the limit. -Zeebe operators are recommended to evaluate the latencies observed with different values for limit. -Note that with different cluster configurations, you may have to choose different limit values. - -#### AIMD - -**Additive increase/multiplicative decrease (AIMD)** calculates the limit based on the configured _requestTimeout_. -When the RTT for a request is shorter than _requestTimeout_, the limit is increased by 1. -When the RTT is longer than _requestTimeout_, -the limit will be reduced according to the configured _backoffRatio_. - -#### Vegas - -Vegas is an adaptive limit algorithm based on TCP Vegas congestion control algorithm. -Vegas estimates a base latency as the minimum observed latency. -This base RTT is the expected latency when there is no load. -Whenever the RTT deviates from the base RTT, a new limit is calculated based on the Vegas algorithm. -Vegas allows you to configure two parameters - _alpha_ and _beta_. -The values correspond to a queue size estimated by the Vegas algorithm based on the observed RTT, base RTT, and current limit. -When the queue size is below _alpha_, the limit is increased. -When the queue size is above _beta_, the limit is decreased. - -#### Gradient - -Gradient is an adaptive limit algorithm that dynamically calculates the limit based on observed RTT. -In the gradient algorithm, the limit is adjusted based on the gradient of observed RTT and an observed minimum RTT. -If gradient is less than 1, the limit is decreased. Otherwise, the limit is increased. - -#### Gradient2 - -Gradient2 is similar to Gradient, but instead of using observed minimum RTT as the base, it uses an exponentially smoothed average RTT. - -## Backpressure tuning - -The goal of backpressure is to keep the processing latency low. -The processing latency is calculated as the time between the command is written to the event stream until it is processed. -To see how backpressure behaves, run a benchmark on your cluster and observe the following metrics: - -- `zeebe_stream_processor_latency_bucket` -- `zeebe_dropped_request_count_total` -- `zeebe_received_request_count_total` -- `zeebe_backpressure_requests_limit` - -You may want to run the benchmark with different loads: - -1. With low load - Where the number of requests sent per second is low. -2. With high load - Where the number of requests sent per second is above what Zeebe can process within a reasonable latency. - -If the value of the limit is small, the processing latency will be small, but the number of rejected requests may be high. -If the value of the limit is large, fewer requests may be rejected (depending on the request rate), -but the processing latency may increase. - -When using **fixed limit**, you can run the benchmark with different values for the limit. -You can then determine a suitable value for a limit for which the processing latency (`zeebe_stream_processor_latency_bucket`) is within the desired latency. - -When using **AIMD**, you can configure a `requestTimeout` which corresponds to a desired latency. -Note that during high load, AIMD can lead to a processing latency two times more than the configured `requestTimeout`. -It is also recommended to configure a `minLimit` to prevent the limit from aggressively dropping during constant high load. - -When using **Vegas**, you cannot configure the backpressure to a desired latency. -Instead, Vegas tries to keep the RTT as low as possible based on the observed minimum RTT. - -Similar to Vegas, you cannot configure the desired latency in Gradient and Gradient2. -They calculated the limit based on the gradient of observed RTT from the expected RTT. -The higher the value of _rttTolerance_, the higher deviations are tolerated that results in higher values for limit. - -If a lot of requests are rejected due to backpressure, it might indicate that the processing capacity of the cluster is not enough to handle the expected throughput. -If this is the expected workload, you might consider a different configuration for the cluster, such as provisioning more resources and increasing the number of nodes and partitions. - -## Potential issues - -The rate limiter used by Zeebe to implement backpressure may use `System.nanoTime()` to measure the RTT of requests. In some systems, we've observed consecutive calls to this method can return equal or even decreasing values. [Low clock resolution](https://shipilev.net/blog/2014/nanotrusting-nanotime) and [monotonicity](https://bugs.openjdk.java.net/browse/JDK-6458294) [issues](https://stackoverflow.com/questions/3657289/linux-clock-gettimeclock-monotonic-strange-non-monotonic-behavior) are some of the most likely culprits of this. If this happens, it's recommended to configure the backpressure to use the **fixed** algorithm. Without a clock with sufficient resolution, adaptive backpressure algorithms are not useful. - -## Next steps - -Looking for more information on backpressure? Visit our documentation on [internal processing and backpressure](/components/zeebe/technical-concepts/internal-processing.md#handling-backpressure). diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/backups.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/backups.md deleted file mode 100644 index b5f00e20a72..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/backups.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -id: backups -title: "Backups" -description: "A guide to creating and installing Zeebe backups." -keywords: ["backup", "backups"] ---- - -:::note -Refer to our documentation on [backup and restore](/self-managed/backup-restore/backup-and-restore.md) to learn how to take backups without downtime. -::: - -As Zeebe fully manages the state of your process instances, consider taking backups of Zeebe data; this is crucial to prevent data loss, roll back application-level errors, and more. - -Zeebe is fault-tolerant and replicates state internally. Backups are only necessary if you'd like to protect against the loss of entire replica sets or data corruption bugs. - -State of other components, such as Operate and Tasklist, is not managed by Zeebe and must be backed up separately. - -Taking backups is a manual process that is highly dependent on your infrastructure and deployment. Camunda does not provide an automated backup mechanism or tool. However, we do offer the following guidance to create and execute a successful backup. - -## Cold backups - -Cold backups, also called offline backups, require **downtime**. - -During the downtime, processes don't make progress and clients can't communicate with Zeebe. -To make sure that the downtime doesn't cause issues for your clients, you should test how your clients behave during the downtime, or shut them down as well. - -### Shutting down all brokers in the cluster - -To take a consistent backup, all brokers must be shut down first. - -As soon as brokers shut down, partitions become unhealthy and clients lose connections to Zeebe or experience full backpressure. -To prevent unnecessary failovers during the shutdown process, we recommend shutting down all brokers at the same time instead of a gradual shutdown. - -Wait for all brokers to fully shut down before proceeding to the next step. - -### Creating the backup - -:::note -The `data` folder contains symbolic and hard links which may require special attention when copying, depending on your environment. -::: - -To create the backup, take the following steps: - -1. Each broker has a data folder where all state is persisted. The location of the data folder is [configured](../configuration/configuration.md) via `zeebe.broker.data.directory`. Create a copy of the data folder and store it in a safe location. - -If you have direct access to the broker, for example in a bare-metal setup, you can do this by creating a tarball like this: `tar caf backup.tar.gz data/`. - -You may also use filesystem snapshots or [Kubernetes volume snapshots](https://kubernetes.io/docs/concepts/storage/volume-snapshots/) if that fits your environment better - -2. Double-check that your tool of choice supports symbolic and hard links. -3. Do not merge or otherwise modify data folders as this might result in data loss and unrestorable backups. -4. Save the broker configuration to ensure the replacement cluster can process the backed-up data. - -See the following example on how a backup may look: - -```bash -$ tree zeebe-backup-* -zeebe-backup-2021-01-31 -├── zeebe-broker-0-config.yml -├── zeebe-broker-0-data.tar.gz -├── zeebe-broker-1-config.yml -├── zeebe-broker-1-data.tar.gz -├── zeebe-broker-2-config.yml -└── zeebe-broker-2-data.tar.gz -``` - -### Resuming - -After taking the backup, brokers can be started again and will automatically resume with processing. - -## Restore from backup - -### Prepare replacement cluster - -:::note Caution -Always use the same or the next minor version of Zeebe that you were using when taking the backup. -Using a different version may result in data corruption or data loss. -See the [update guide](/guides/update-guide/introduction.md) for more details. -::: - -Ensure your replacement cluster has the same number of brokers as the old cluster and uses the [same node IDs](setting-up-a-cluster.md#configuration). - -### Shutting down all brokers in the replacement cluster - -Before installing the backup, ensure all brokers are fully shut down. - -### Installing the backup - -To install the backup, take the following steps: - -1. Delete the existing data folder on each broker of your replacement cluster. -2. For each broker, copy over the configuration and the data folder. -3. You may need to slightly adjust the configuration for your replacement cluster, for example to update IP addresses. - -### Starting the Zeebe cluster - -After replacing the data folders, brokers can be started again and will automatically resume with processing. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/disk-space.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/disk-space.md deleted file mode 100644 index 12cdff52389..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/disk-space.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: disk-space -title: "Disk space" -description: "Understand how Zeebe uses the local disk for storage of its persistent data, and configuring Zeebe settings for the disk usage watermarks." ---- - -Zeebe uses the local disk for storage of its persistent data. Therefore, if the Zeebe broker runs out of disk space, the system is in an invalid state as the broker cannot update its state. - -To prevent the system from reaching an unrecoverable state, Zeebe expects a minimum size of free disk space available. If this limit is violated, the broker rejects new requests to allow the operations team to free more disk space, and allows the broker to continue to update its state. - -Zeebe can be configured with the following settings for the disk usage: - -- **zeebe.broker.data.disk.enablemonitoring**: Configure if disk usage should be monitored (default: true) -- **zeebe.broker.data.disk.monitoringInterval**: The interval in which the disk space usage is checked (default: 1 second) -- **zeebe.broker.data.disk.freeSpace.replication**: When the free space available is less than this value, Zeebe pauses receiving replicated events. (default: 1GB) - - For **production** use cases, we recommend to increase this value and set it approximately to `number of partitions x logSegmentSize + 1GB`. -- **zeebe.broker.data.disk.freeSpace.processing**: When the free space available is less than this value, Zeebe rejects all user commands and pauses processing. (default: 2GB) - - This must be greater than `freeSpace.replication`. - - For **production** use cases, we recommend increasing this value and setting it at a minimum of `number of partitions x 2 x logSegmentSize + 1GB`. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/health.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/health.md deleted file mode 100644 index 4cfa1698f05..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/health.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -id: health -title: "Health status" -description: "This document analyzes health status checks and responses." ---- - -## Broker - -Zeebe broker exposes three HTTP endpoints to query its health status: - -- Startup check -- Ready check -- Health check - -### Startup check - -Startup check endpoint is exposed via `http://{zeebe-broker-host}:9600/startup`. -This endpoint returns an empty 204 response. If it is not ready, it will return a 503 error. - -A broker has successfully started when: - -- The broker has found other brokers in the cluster. -- All partitions owned by this broker have started and participate in replication. -- Other necessary services have started. - -A successful startup does not mean the broker is ready to process requests. -The broker is ready only after startup has successfully completed. - -### Ready check - -Ready check endpoint is exposed via `http://{zeebe-broker-host}:9600/ready`. -This endpoint returns an empty 204 response. If it is not ready, it will return a 503 error. - -A broker is ready when it installs all necessary services to start processing in all partitions. -If a broker is ready, it doesn't mean it's the leader for the partitions. -It means it is participating in the replication and can be either a leader or a follower of all the partitions that are assigned to it. -Once it is ready, it never becomes unready again. - -A ready check is useful, for example, to use as a `readinessProbe` in a Kubernetes configuration to control when a pod can be restarted for rolling update. -Depending on the cluster configuration, restarting one pod before the previous one is ready might make the system unavailable because the quorum of replicas is not available. -By configuring a `readinessProbe` that uses the ready check endpoint, we can inform Kubernetes when it is safe to proceed with the rolling update. - -### Health check - -Health check endpoint is exposed via `http://{zeebe-broker-host}:9600/health`. -This endpoint returns an empty 204 response if the broker is healthy. If it is not healthy, it will return a 503 error. -A broker is never healthy before it is ready. -Unlike ready check, a broker can become unhealthy after it is healthy. -Hence, it gives a better status of a running broker. - -A broker is healthy when it can process processes, accept commands, and perform all its expected tasks. -If it is unhealthy, it may mean three things: - -- **It is only temporarily unhealthy**: For example, due to environmental circumstances such as temporary I/O issues. -- **It is partially unhealthy**: One or more partitions could be unhealthy, while the rest of them are able to process processes. -- **It is completely dead** - -[Metrics](metrics.md) give more insight into which partition is healthy or unhealthy. -When a broker becomes unhealthy, it's recommended to check the logs to see what went wrong. - -(The default broker port can be configured using environment variables - respectively `SERVER_PORT` and `SERVER_ADDRESS` - or system properties - respectively `-Dserver.port=` or `-Dserver.address=` - to configure them) - -## Gateway - -Zeebe gateway exposes three HTTP endpoints to query its health status: - -- Health status - `http://{zeebe-gateway}:9600/health` -- Startup probe - `http://{zeebe-gateway}:9600/actuator/health/startup` -- Liveness probe - `http://{zeebe-gateway}:9600/actuator/health/liveness` - -(The default port can be changed in the configuration: `{zeebe.gateway.monitoring.port}`) - -### Health status - -The gateway is **healthy** if it: - -- Started successfully -- Has sufficient free memory and disk space to work with -- Is able to respond to requests within a defined timeout -- Is aware of other nodes in the cluster -- Is aware of leaders for partitions -- All its partitions are healthy - -The gateway is **degraded** if it also meets the **healthy** standards above, with the exception that at least **one** partition is healthy instead of **all** partitions. - -### Startup probe - -The gateway starts if it finished its boot sequence successfully and is ready to receive requests. It no longer starts when it initiates the shutdown sequence. - -The started probe can be used as Kubernetes startup probe. - -### Liveness probe - -The gateway is live if it: - -- Started successfully -- Has a minimal amount of free memory and disk space to work with -- Is able to respond to requests within a defined timeout, or misses the timeout for less than 10 minutes -- Is aware of other nodes in the cluster, or lost awareness of other nodes for less than five minutes -- Is aware of leaders for partitions, or lost awareness of partition leaders for less than five minutes - -The liveness probe can be used as Kubernetes liveness probe. - -### Status responses - -Each endpoint returns a status which can be one of the following: - -- `UNKNOWN` (HTTP status code 200) -- `UP` (HTTP status code 200) -- `DOWN` (HTTP status code 503) -- `OUT_OF_SERVICE` (HTTP status code 503) - -If details are enabled (default), the response will also contain additional details. - -### Customization - -Health indicators are set to sensible defaults. For specific use cases, it might be necessary to [customize health indicators](../configuration/gateway-health-probes.md). diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/management-api.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/management-api.md deleted file mode 100644 index fcd7d9983e9..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/management-api.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -id: management-api -title: "Management API" -description: "Zeebe Gateway also exposes an HTTP endpoint for cluster management operations." ---- - -Besides the [gRPC API](/apis-tools/grpc.md) for process instance execution, Zeebe Gateway also exposes an HTTP endpoint for cluster management operations. This API is not expected to be used by a typical user, but by a privileged user such as a cluster administrator. It is exposed via a different port and configured using configuration `server.port` (or via environment variable SERVER_PORT). By default, this is set to `9600`. - -The API is a custom endpoint available via [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/2.0.x/reference/html/production-ready-endpoints.html). For additional configurations such as security, refer to the Spring Boot documentation. - -The following operations are currently available: - -- [Rebalancing](/self-managed/zeebe-deployment/operations/rebalancing.md) -- [Pause and resume exporting](#exporting-api) - -## Exporting API - -Exporting API is used: - -- As a debugging tool. -- When taking a backup of Camunda 8 (see [backup and restore](/self-managed/backup-restore/backup-and-restore.md)). - -### Pause exporting - -To pause exporting on all partitions, send the following request to the gateway's management endpoint. - -``` -POST actuator/exporting/pause -``` - -When all partitions pause exporting, a successful response is received. If the request fails, some partitions may have paused exporting. Therefore, it is important to either retry until success or revert the partial pause by resuming exporting. - -### Soft pause exporting - -The soft pause feature can be used when you want to continue exporting records, but don't want to delete those records (log compaction) from Zeebe. This is particularly useful during hot backups. Learn more about [using this feature for hot backups](/self-managed/backup-restore/backup-and-restore.md). - -``` -POST actuator/exporting/pause?soft=true -``` - -When all partitions soft pause exporting, a successful response is received. If the request fails, some partitions may have soft paused exporting. Therefore, either retry until success or revert the partial soft pause by resuming the export. - -The soft pause feature is only available from 8.2.27 onwards. - -### Resume exporting - -After exporting is paused, it must eventually be resumed. Otherwise, the cluster could become unavailable. To resume exporting, send the following request to the gateway's management endpoint: - -``` -POST actuator/exporting/resume -``` - -When all partitions have resumed exporting, a successful response is received. If the request fails, only some partitions may have resumed exporting. Therefore, it is important to retry until successful. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/metrics.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/metrics.md deleted file mode 100644 index 5c322a883e8..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/metrics.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -id: metrics -title: "Metrics" -keywords: ["backpressure", "back-pressure", "back pressure"] ---- - -When operating a distributed system like Zeebe, it is important to put proper monitoring in place. To facilitate this, Zeebe exposes an extensive set of metrics over an embedded HTTP server. - -## Types of metrics - -- **Counters**: A time series that records a growing count of some unit. Examples: number of bytes transmitted over the network, number of process instances started. -- **Gauges**: A time series that records the current size of some unit. Examples: number of currently open client connections, current number of partitions. - -## Metrics format - -Zeebe exposes metrics directly in the [Prometheus text format][prom-format]. - -**Example:** - -``` -# HELP zeebe_stream_processor_records_total Number of events processed by stream processor -# TYPE zeebe_stream_processor_records_total counter -zeebe_stream_processor_records_total{action="written",partition="1",} 20320.0 -zeebe_stream_processor_records_total{action="processed",partition="1",} 20320.0 -zeebe_stream_processor_records_total{action="skipped",partition="1",} 2153.0 -``` - -## Enable additional metrics - -Metrics are exported by default. To enable execution metrics, set the `ZEEBE_BROKER_EXECUTION_METRICS_EXPORTER_ENABLED` environment variable to `true` in your Zeebe [configuration file](../configuration/configuration.md). - -## Connect Prometheus - -Zeebe exposes the metrics over an HTTP server. The default port is `9600`. - -Add the following entry to your `prometheus.yml`: - -``` -- job_name: zeebe - scrape_interval: 15s - metrics_path: /metrics - scheme: http - static_configs: - - targets: - - localhost: 9600 -``` - -## Available metrics - -All Zeebe-related metrics have a `zeebe_`-prefix. - -Most metrics have the following common label: - -- `partition`: Cluster-unique id of the partition - -:::note -Both brokers and gateways expose their respective metrics. The brokers have an optional metrics exporter that can be enabled for maximum insight. -::: - -**Metrics related to process processing:** - -- `zeebe_stream_processor_records_total`: The number of events processed by the stream processor. - The `action` label separates processed, skipped, and written events. -- `zeebe_exporter_events_total`: The number of events processed by the exporter processor. - The `action` label separates exported and skipped events. -- `zeebe_element_instance_events_total`: The number of occurred process element instance events. - The `action` label separates the number of activated, completed, and terminated elements. - The `type` label separates different BPMN element types. -- `zeebe_job_events_total`: The number of job events. The `action` label separates the number of - created, activated, timed out, completed, failed, and canceled jobs. -- `zeebe_incident_events_total`: The number of incident events. The `action` label separates the number - of created and resolved incident events. -- `zeebe_pending_incidents_total`: The number of currently pending incident, i.e. not resolved. - -**Metrics related to performance:** - -Zeebe has a backpressure mechanism by which it rejects requests when it receives more requests than it can handle without incurring high processing latency. - -Monitor backpressure and processing latency of the commands using the following metrics: - -- `zeebe_dropped_request_count_total`: The number of user requests rejected by the broker due to backpressure. -- `zeebe_backpressure_requests_limit`: The limit for the number of inflight requests used for backpressure. -- `zeebe_stream_processor_latency_bucket`: The processing latency for commands and event. - -**Metrics related to health:** - -The health of partitions in a broker can be monitored by the metric `zeebe_health`. - -[prom-format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details - -## Grafana - -Zeebe comes with a pre-built dashboard, available in the repository: -[monitor/grafana/zeebe.json](https://github.com/camunda/camunda/blob/stable/8.2/monitor/grafana/zeebe.json). - -[Import](https://grafana.com/docs/grafana/latest/reference/export_import/#importing-a-dashboard) it into your Grafana instance and select the correct Prometheus data source (important if you have more than one). You will then be greeted with the following dashboard, which displays a healthy cluster topology, general throughput metrics, handled requests, exported events per second, disk and memory usage, and more. - -![Grafana dashboard](assets/grafana-preview.png) - -You can also try out an [interactive version](https://snapshots.raintank.io/dashboard/snapshot/Vbu3EHQMTI5Onh5RKuiS5J7QSMd7Sp5V), where you can explore help messages for every panel and get a feel for what data is available. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/network-ports.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/network-ports.md deleted file mode 100644 index 03f5c610127..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/network-ports.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: network-ports -title: "Network ports" ---- - -The broker cluster sits behind the gRPC Gateway, which handles all requests from clients/workers and forwards events to brokers. - -## Gateway - -The gateway needs to receive communication via `zeebe.gateway.network.port: 26500` from clients/workers, and `zeebe.gateway.cluster.initialContactPoints: [127.0.0.1:26502]` from brokers. - -:::note -You can use all broker connections instead of one to make the startup process of the Zeebe gateway more resilient. -::: - -The relevant [configuration](../configuration/configuration.md) settings are: - -``` -Config file - zeebe: - gateway: - network: - port: 26500 - cluster: - initialContactPoints: [127.0.0.1:26502] - - -Environment Variables - ZEEBE_GATEWAY_CLUSTER_NETWORK_PORT = 26500 - ZEEBE_GATEWAY_CLUSTER_INITIALCONTACTPOINTS = 127.0.0.1:26502 -``` - -## Broker - -The broker needs to receive communication from the gateway and from other brokers. It also exposes a port for monitoring. - -- `zeebe.broker.network.commandApi.port: 26501`: Gateway-to-broker communication, using an internal SBE (Simple Binary Encoding) protocol. This is the Command API port. This should be exposed to the gateway. -- `zeebe.broker.network.internalApi.port: 26502`: Inter-broker clustering using the Gossip and Raft protocols for partition replication, broker elections, topology sharing, and message subscriptions. This should be exposed to other brokers and the gateway. -- `zeebe.broker.network.monitoringApi.port: 9600`: Metrics and Readiness Probe. Prometheus metrics are exported on the route `/metrics`. There is a readiness probe on `/ready`. - -The relevant [configuration](../configuration/configuration.md) settings are: - -``` -Config file - zeebe: - broker: - network: - commandAPI: - port: 26501 - internalAPI: - port: 26502 - monitoringApi - port: 9600 - -Environment Variables - ZEEBE_BROKER_NETWORK_COMMANDAPI_PORT = 26501 - ZEEBE_BROKER_NETWORK_INTERNALAPI_PORT = 26501 - ZEEBE_BROKER_NETWORK_MONITORINGAPI_PORT = 26501 -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/rebalancing.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/rebalancing.md deleted file mode 100644 index a45d586f22c..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/rebalancing.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -id: rebalancing -title: "Rebalancing" -description: "Step through manual rebalancing, limitations, priority election with round-robin distribution, priority election with fixed distribution, and more." ---- - -Rebalancing is re-electing partition leaders so they are evenly distributed across all brokers. An even leader distribution is beneficial as all brokers share the work of being partition leaders. - -Zeebe will, by default, prefer an even leader distribution when electing new leaders, but will not trigger a re-election unless a leader becomes unavailable. - -When a Zeebe cluster uses an uneven leader distribution, caused by losing a leader and thus electing a suboptimal broker as new leader for example, manually requesting rebalancing can restore the cluster to an even leader distribution. - -## Manual rebalancing - -The gateway exposes an HTTP API to request rebalancing. You can use it by `POST`ing to the `/actuator/rebalance` endpoint on the monitoring port of the gateway: - -```bash -curl -X POST https://{zeebe-gateway}:9600/actuator/rebalance -``` - -The result of this operation is always `200 OK` with no body, even when rebalancing is [not supported](#limitations) by the current configuration or when not all leaders have been contacted. - -Track the rebalancing progress by observing [metrics](./metrics.md). -During the rebalancing, partitions might become unhealthy and can't make progress until a new leader is elected. - -### Limitations - -Manual rebalancing is done on a best-effort basis. - -Due to the nature of distributed systems, Zeebe can never guarantee a particular distribution and rebalancing cannot avoid that. - -There are two configurations where manual rebalancing is supported: - -- **Priority election** with **round-robin distribution** - - - Priority election and round-robin distribution are enabled by default. - - As long as you have not manually disabled priority election or set a fixed distribution, rebalancing is supported. - - Brokers are automatically assigned as primary partition leaders during startup, based on cluster size and replication factor. - -- **Priority election** with **fixed distribution** - - Fixed distribution is an experimental configuration that is disabled by default. - - Brokers are assigned as primary partition leaders based on the configuration. - - Only configurations where a partition designates a single broker as primary partition leader are supported. - -**Priority election** is controlled by the `zeebe.broker.cluster.raft.enablePriorityElection` config and is enabled by default. - -Learn more about [priority election](../configuration/priority-election.md). - -**Partition distribution** is controlled by the `zeebe.broker.experimental.partitioning` config options. -The default scheme is `ROUND_ROBIN`. - -All other configurations are not supported and a manual rebalancing will silently fail. -The rebalancing request is successfully completed by the gateway, but leaders will ignore the request and no re-election is triggered. - -Even when a rebalancing request is handled successfully by all leaders, the result of the re-election process is not guaranteed. -Followers that are not fully caught up with the leader cannot be elected as leader. -This becomes more likely under high load or with increased network latency between leader and follower. - -We recommend requesting rebalancing only under low load. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/resource-planning.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/resource-planning.md deleted file mode 100644 index bb1acdefb1a..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/resource-planning.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -id: resource-planning -title: "Resource planning" -keywords: ["backpressure", "back-pressure", "back pressure"] ---- - -The short answer to “_what resources and configuration will I need to take Zeebe to production?_” is: it depends. - -While we cannot tell you exactly what you need, we can explain what depends, what it depends on, and how it depends on it. - -## Disk space - -All brokers in a partition use disk space to store the following: - -- The event log for each partition they participate in. By default, this is a minimum of _128MB_ for each partition, incrementing in 128MB segments. The event log is truncated on a given broker when data has been processed and successfully exported by all loaded exporters. -- One periodic snapshot of the running state (in-flight data) of each partition (unbounded, based on in-flight work). - -Additionally, the leader of a partition also uses disk space to store a projection of the running state of the partition in RocksDB (unbounded, based on in-flight work). - -To calculate the required amount of disk space, the following "back of the envelope" formula can be used as a starting point: - -``` -neededDiskSpace = replicatedState + localState - -replicatedState = totalEventLogSize + totalSnapshotSize - -totalEventLogSize = followerPartitionsPerNode * eventLogSize * reserveForPartialSystemFailure - -totalSnapshotSize = partitionsPerNode * singleSnapshotSize * 2 -// singleSnapshotSize * 2: -// the last snapshot (already replicated) + -// the next snapshot (in transit, while it is being replicated) - -partitionsPerNode = leaderPartitionsPerNde + followerPartitionsPerNode - -leaderPartitionsPerNode = partitionsCount / numberOfNodes -followerPartitionsPerNode = partitionsCount * replicationFactor / numberOfNodes - -clusterSize = [number of broker nodes] -partitionsCount = [number of partitions] -replicationFactor = [number of replicas per partition] -reserveForPartialSystemFailure = [factor to account for partial system failure] -singleSnapshotSize = [size of a single rocks DB snapshot] -eventLogSize = [event log size for duration of snapshotPeriod] -``` - -Some observations on the scaling of the factors above: - -- `eventLogSize`: This factor scales with the throughput of your system. -- `totalSnapshotSize`: This factor scales with the number of in-flight processes. -- `reserveForPartialSystemFailure`: This factor is supposed to be a reserve to account for partial system failure (e.g. loss of quorum inside Zeebe cluster, or loss of connection to external system). See the remainder of this document for a further discussion on the effects of partial system failure on Zeebe cluster and disk space provisioning. - -Many of the factors influencing the above formula can be fine-tuned in the [configuration](../configuration/configuration.md). The relevant configuration settings are: - -```yaml -Config file - zeebe: - broker: - data: - logSegmentSize: 128MB - snapshotPeriod: 5m - cluster: - partitionsCount: 1 - replicationFactor: 1 - clusterSize: 1 - -Environment Variables - ZEEBE_BROKER_DATA_LOGSEGMENTSIZE = 128MB - ZEEBE_BROKER_DATA_SNAPSHOTPERIOD = 5m - ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT = 1 - ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR = 1 - ZEEBE_BROKER_CLUSTER_CLUSTERSIZE = 1 -``` - -Other factors can be observed in a production-like system with representative throughput. - -By default, this data is stored in the following: - -- `segments` - The data of the log split into segments. The log is only appended, and its data can be deleted when it becomes part of a new snapshot. -- `state` - The active state. Deployed processes, active process instances, etc. Completed process instances or jobs are removed. -- `snapshot` - A state at a certain point in time. - -> **Pitfalls** -> -> To avoid exceeding your disk space, here are a few pitfalls to avoid: -> -> - Do not configure an exporter which does not advance its record position (such as the Debug Exporter). - -If you do configure an exporter, ensure you monitor its availability and health, as well as the availability and health the exporter depends on. -This is the Achilles' heel of the cluster. If data cannot be exported, it cannot be removed from the cluster and will accumulate on disk. See _effect of exporters and external system failure_ further on in this document for an explanation and possible buffering strategies. - -### Event log - -The event log for each partition is segmented. By default, the segment size is 128MB. - -The event log grows over time, unless and until individual event log segments are deleted. - -An event log segment can be deleted once: - -- All the events it contains have been processed by exporters. -- All the events it contains have been replicated to other brokers. -- All the events it contains have been processed. - -The following conditions inhibit the automatic deletion of event log segments: - -- A cluster loses its quorum. In this case, events are queued but not processed. Once a quorum is reestablished, events are replicated and eventually event log segments are deleted. -- An exporter does not advance its read position in the event log. In this case, the event log grows ad infinitum. - -An event log segment is not deleted until all the events in it are exported by all configured exporters. This means exporters that rely on side effects, perform intensive computation, or experience backpressure from external storage will cause disk usage to grow, as they delay the deletion of event log segments. - -Exporting is only performed on the partition leader, but the followers of the partition do not delete segments in their replica of the partition until the leader marks all events in it as unneeded by exporters. - -We make sure that event log segments are not deleted too early. No event log segment is deleted until a snapshot is taken that includes that segment. When a snapshot is taken, the event log is only deleted up to that point. - -### Snapshots - -The running state of the partition is captured periodically on the leader in a snapshot. By default, this period is every five minutes. This can be changed in the [configuration](../configuration/configuration.md). - -A snapshot is a projection of all events that represent the current running state of the processes running on the partition. It contains all active data, for example, deployed processes, active process instances, and not yet completed jobs. - -When the broker writes a new snapshot, it deletes all data on the log which was written before the latest snapshot. - -:::note -We tested the snapshot interval via a Zeebe Chaos experiment. Learn more about this experiment and snapshot intervals in our [Zeebe Chaos blog](https://zeebe-io.github.io/zeebe-chaos/2022/02/01/High-Snapshot-Frequency/#snapshot-interval). -::: - -### RocksDB - -On the lead broker of a partition, the current running state is kept in memory and on disk in RocksDB. In our experience, this grows to 2GB under a heavy load of long-running processes. The snapshots replicated to followers are snapshots of RocksDB. - -### Effect of exporters and external system failure - -If an external system relied on by an exporter fails (for example, if you are exporting data to Elasticsearch and the connection to the Elasticsearch cluster fails), the exporter will not advance its position in the event log, and brokers cannot truncate their logs. The broker event log grows until the exporter is able to reestablish the connection and export the data. - -To ensure your brokers are resilient in the event of external system failure, give them sufficient disk space to continue operating without truncating the event log until the connection to the external system is restored. - -### Effect on exporters of node failure - -Only the leader of a partition exports events. Only committed events (events that have been replicated) are passed to exporters. The exporter then updates its read position. The exporter read position is only replicated between brokers in the snapshot. It is not itself written to the event log. This means _an exporter’s current position cannot be reconstructed from the replicated event log, only from a snapshot_. - -When a partition fails over to a new leader, the new leader is able to construct the current partition state by projecting the event log from the point of the last snapshot. The position of exporters cannot be reconstructed from the event log, so it is set to the last snapshot. This means an exporter can see the same events twice in the event of a fail-over. - -You should assign idempotent ids to events in your exporter if this is an issue for your system. The combination of record position and partition id is reliable as a unique id for an event. - -### Effect of quorum loss - -If a partition goes under quorum (for example, if two nodes in a 3-node cluster go down), the leader of the partition continues to accept requests, but these requests are not replicated and are not marked as committed. In this case, they cannot be truncated. This causes the event log to grow. The amount of disk space needed to continue operating in this scenario is a function of the broker throughput and the amount of time to quorum being restored. You should ensure your nodes have sufficient disk space to handle this failure mode. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/setting-up-a-cluster.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/setting-up-a-cluster.md deleted file mode 100644 index 1337df66a15..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/setting-up-a-cluster.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -id: setting-up-a-cluster -title: "Setting up a Zeebe cluster" ---- - -To set up a cluster, you need to adjust the `cluster` section in the Zeebe configuration file. - -Below is a snippet of the default Zeebe configuration file: - -```yaml ---- -cluster: - # This section contains all cluster related configurations, to setup a zeebe cluster - - # Specifies the unique id of this broker node in a cluster. - # The id should be between 0 and number of nodes in the cluster (exclusive). - # - # This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_NODEID. - nodeId: 0 - - # Controls the number of partitions, which should exist in the cluster. - # - # This can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT. - partitionsCount: 1 - - # Controls the replication factor, which defines the count of replicas per partition. - # The replication factor cannot be greater than the number of nodes in the cluster. - # - # This can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR. - replicationFactor: 1 - - # Specifies the zeebe cluster size. This value is used to determine which broker - # is responsible for which partition. - # - # This can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CLUSTERSIZE. - clusterSize: 1 - - # Allows to specify a list of known other nodes to connect to on startup - # The contact points of the internal network configuration must be specified. - # The format is [HOST:PORT] - # Example: - # initialContactPoints : [ 192.168.1.22:26502, 192.168.1.32:26502 ] - # - # To guarantee the cluster can survive network partitions, all nodes must be specified - # as initial contact points. - # - # This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS - # specifying a comma-separated list of contact points. - # Default is empty list: - initialContactPoints: [] - - # Allows to specify a name for the cluster - # This setting can also be overridden using the environment variable ZEEBE_BROKER_CLUSTER_CLUSTERNAME. - # Example: - clusterName: zeebe-cluster -``` - -## Example - -In this example, we will set up a Zeebe cluster with five brokers. Each broker needs to get a unique node id. - -To scale well, we will bootstrap five partitions with a replication factor of three. For more information about this, take a look into the [clustering](/components/zeebe/technical-concepts/clustering.md) section. - -The clustering setup will look like this: - -![cluster](assets/example-setup-cluster.png) - -## Configuration - -The configuration of the first broker could look like this: - -```yaml ---- -cluster: - nodeId: 0 - partitionsCount: 5 - replicationFactor: 3 - clusterSize: 5 - initialContactPoints: - [ - ADDRESS_AND_PORT_OF_NODE_0, - ADDRESS_AND_PORT_OF_NODE_1, - ADDRESS_AND_PORT_OF_NODE_2, - ADDRESS_AND_PORT_OF_NODE_3, - ADDRESS_AND_PORT_OF_NODE_4, - ] -``` - -For the other brokers, the configuration will slightly change: - -```yaml ---- -cluster: - nodeId: NODE_ID - partitionsCount: 5 - replicationFactor: 3 - clusterSize: 5 - initialContactPoints: - [ - ADDRESS_AND_PORT_OF_NODE_0, - ADDRESS_AND_PORT_OF_NODE_1, - ADDRESS_AND_PORT_OF_NODE_2, - ADDRESS_AND_PORT_OF_NODE_3, - ADDRESS_AND_PORT_OF_NODE_4, - ] -``` - -Each broker needs a unique node id. The ids should be in the range of zero and `clusterSize - 1`. You need to replace the `NODE_ID` placeholder with an appropriate value. - -Additionally, the brokers need an initial contact point to start their gossip conversation. Make sure you use the address and **management port** of another broker. You need to replace the `ADDRESS_AND_PORT_OF_NODE_0` placeholder. - -To guarantee a cluster can properly recover from network partitions, it is currently required that all nodes be specified as initial contact points. It is not necessary for a broker to list itself as an initial contact point, but it is safe to do so, and likely simpler -to maintain. - -## Partitions bootstrapping - -On bootstrap, each node will create a partition matrix. - -This matrix depends on the partitions count, replication factor and the cluster size. If you completed the configuration correctly and used the same values for `partitionsCount`, `replicationFactor`, and `clusterSize` on each node, all nodes will generate the same partition matrix. - -For the current example, the matrix will look like the following: - -| | Node 0 | Node 1 | Node 2 | Node 3 | Node 4 | -| ----------- | -------- | -------- | -------- | -------- | -------- | -| Partition 0 | Leader | Follower | Follower | - | - | -| Partition 1 | - | Leader | Follower | Follower | - | -| Partition 2 | - | - | Leader | Follower | Follower | -| Partition 3 | Follower | - | - | Leader | Follower | -| Partition 4 | Follower | Follower | - | - | Leader | - -The matrix ensures the partitions are well distributed between the different nodes. Furthermore, it guarantees each node knows exactly which partitions it has to bootstrap and for which it will become the leader at first (this could change later, if the node needs to step down for example.) - -## Keep alive intervals - -It's possible to specify how often Zeebe clients should send keep alive pings. By default, the official Zeebe clients (Java and Go) send keep alive pings every 45 seconds. This interval can be configured through the clients' APIs and through the `ZEEBE_KEEP_ALIVE` environment variable. When configuring the clients with the environment variable, the time interval must be expressed a positive amount of milliseconds (e.g., 45000). - -It's also possible to specify the minimum interval allowed by the gateway before it terminates the connection. By default, gateways terminate connections if they receive more than two pings with an interval less than 30 seconds. This minimum interval can be modified by editing the network section in the respective configuration file or by setting the `ZEEBE_GATEWAY_NETWORK_MINKEEPALIVEINTERVAL` environment variable. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/update-zeebe.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/update-zeebe.md deleted file mode 100644 index 7e26a237e9c..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/update-zeebe.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -id: update-zeebe -title: "Update Zeebe" -description: "This section describes how to update Zeebe to a new version." ---- - -## Update - -See the [update guide](/guides/update-guide/introduction.md) for specific instructions per Zeebe version. - -To update a Zeebe cluster, take the following steps: - -1. Shut down all Zeebe brokers and other components of the system. -1. Take a [backup](./backups.md) of your Zeebe brokers and Elasticsearch `data` folder if used. -1. Update all Zeebe brokers and gateways to the new version. -1. Restart the system components. - -## Partitions admin endpoint - -This endpoint allows querying the status of the partitions and performing operations to prepare an update. - -The endpoint is available under `http://{zeebe-broker}:{zeebe.broker.network.monitoringApi.port}/actuator/partitions` (default port: `9600`). - -It is enabled by default. It can be disabled in the configuration by setting: - -``` -management.endpoint.partitions.enabled=false -``` - -### Query the partition status - -The status of the partitions can be queried with a `GET` request: - -``` -/actuator/partitions -``` - -The response contains all partitions of the broker mapped to the partition-id. - -
    - Full Response -

    - -``` -{ - "1":{ - "role":"LEADER", - "snapshotId":"399-1-1601275126554-490-490", - "processedPosition":490, - "processedPositionInSnapshot":490, - "streamProcessorPhase":"PROCESSING" - } -} -``` - -

    -
    diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/zeebe-in-production.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/zeebe-in-production.md deleted file mode 100644 index 29b2a6d7d0d..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/operations/zeebe-in-production.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: zeebe-in-production -title: "Operating Zeebe in production" -sidebar_label: "Overview" -keywords: ["backpressure", "back-pressure", "back pressure"] ---- - -This chapter covers topics relevant to anyone who wants to operate Zeebe in production. - -- [Resource planning](resource-planning.md) - Gives an introduction for calculating how many resources need to be provisioned. -- [Network ports](network-ports.md) - Discusses which ports are needed to run Zeebe. -- [Setting up a Zeebe cluster](setting-up-a-cluster.md) - Quick guide on how to set up a cluster with multiple brokers. -- [Metrics](metrics.md) - Lists options to monitor Zeebe. -- [Health status](health.md) - Lists available high-level health and liveness probes. -- [Backpressure](backpressure.md) - Discusses the backpressure mechanism used by Zeebe brokers. -- [Disk space](disk-space.md) - Explains how to set limits for the amount of free disk space. Once these limits are undercut, Zeebe degrades gracefully to allow the operations team to provide more disk space. -- [Update Zeebe](update-zeebe.md) - Contains information on how to perform a shutdown update. -- [Rebalancing](rebalancing.md) - Describes how to rebalance a cluster. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/client-authorization.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/client-authorization.md deleted file mode 100644 index 0c4806b4326..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/client-authorization.md +++ /dev/null @@ -1,264 +0,0 @@ ---- -id: client-authorization -title: "Client authorization" -description: "Learn how the Zeebe Gateway supports Camunda Identity-based auth token validation." ---- - -## Gateway - -The Zeebe Gateway supports [Camunda Identity](../../identity/what-is-identity.md)-based auth token validation. - -In the Camunda 8 Self-Managed Helm chart, authentication is enabled by default via Camunda Identity. - -### Camunda Identity authorization - -[Camunda Identity](../../identity/what-is-identity.md)-based OAuth token validation can be enabled by setting `security.authentication.mode` to `identity` and providing the corresponding `security.authentication.identity.*` properties. You can find more details about these in the [Gateway config documentation](../configuration/gateway.md#zeebegatewayclustersecurityauthenticationidentity). - -The Camunda 8 Self-Managed Helm chart is already fully preconfigured by default. - -#### YAML snippet - -```yaml -security: - authentication: - mode: identity - identity: - issuerBackendUrl: http://keycloak:8080/auth/realms/camunda-platform - audience: zeebe-api - type: keycloak -``` - -With authentication enabled, every gRPC request to the Gateway requires a valid auth token in the `Authorization` header, granting access to the configured `security.authentication.identity.audience`, issued by the configured `security.authentication.identity.issuerBackendUrl`. The `zeebe-api` audience is already pre-configured in Camunda Identity. - -The authentication could be disabled by setting `security.authentication.mode: none` in the Gateway configuration file or via `ZEEBE_GATEWAY_SECURITY_AUTHENTICATION_MODE=none` as environment variable. - -## Client - -Zeebe clients also provide a way for users to modify gRPC call headers, namely to contain access tokens. - -Users can modify gRPC headers using Zeebe's built-in `OAuthCredentialsProvider`, which uses user-specified credentials to contact an OAuth authorization server. The authorization server should return an access token that is then appended to each gRPC request. - -Although, by default `OAuthCredentialsProvider` is configured with to use a Camunda 8 authorization server, it can be configured to use any user-defined server. Users can also write a custom [CredentialsProvider](https://github.com/camunda/camunda/blob/stable/8.2/clients/java/src/main/java/io/camunda/zeebe/client/CredentialsProvider.java). In the following sections, we'll describe the usage of the default `OAuthCredentialsProvider` as well as the `CredentialsProvider` interface that can be extended for implementing a custom provider. - -### OAuthCredentialsProvider - -The `OAuthCredentialsProvider` requires the specification of a client ID and a client secret. These are then used to request an access token from an OAuth 2.0 authorization server through a [client credentials flow](https://tools.ietf.org/html/rfc6749#section-4.4). - -By default, the authorization server is the one used by Camunda 8, but any other can be used. Using the access token returned by the authorization server, the `OAuthCredentialsProvider` adds it to the gRPC headers of each request as a bearer token. Requests which fail with an `UNAUTHENTICATED` gRPC code are seamlessly retried only if a new access token can be obtained. - -#### Java - -To use the Zeebe client with Camunda 8, first an `OAuthCredentialsProvider` must be created and configured with the appropriate client credentials. The `audience` should be equivalent to the cluster endpoint without a port number. - -```java -public class AuthorizedClient { - public void main(String[] args) { - final OAuthCredentialsProvider provider = - new OAuthCredentialsProviderBuilder() - .clientId("clientId") - .clientSecret("clientSecret") - .audience("cluster.endpoint.com") - .build(); - - final ZeebeClient client = - new ZeebeClientBuilderImpl() - .gatewayAddress("cluster.endpoint.com:443") - .credentialsProvider(provider) - .build(); - - System.out.println(client.newTopologyRequest().send().join().toString()); - } -} -``` - -For security reasons, client secrets should not be hard coded. Therefore, it's recommended to use environment variables to pass client secrets into Zeebe. Although several variables are supported, the ones required to set up a minimal client are `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET`. After setting these variables to the correct values, the following would be equivalent to the previous code: - -```java -public class AuthorizedClient { - public void main(final String[] args) { - final ZeebeClient client = - new ZeebeClientBuilderImpl() - .gatewayAddress("cluster.endpoint.com:443") - .build(); - - System.out.println(client.newTopologyRequest().send().join().toString()); - } -} -``` - -The client creates an `OAuthCredentialProvider` with the credentials specified through the environment variables and the audience is extracted from the address specified through the `ZeebeClientBuilder`. - -:::note -Zeebe's Java client will not prevent you from adding credentials to gRPC calls while using an insecure connection, but you should be aware that doing so will expose your access token by transmitting it in plaintext. -::: - -#### Go - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -func main() { - credsProvider, err := zbc.NewOAuthCredentialsProvider(&zbc.OAuthProviderConfig{ - ClientID: "clientId", - ClientSecret: "clientSecret", - Audience: "cluster.endpoint.com", - }) - if err != nil { - panic(err) - } - - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: "cluster.endpoint.com:443", - CredentialsProvider: credsProvider, - }) - if err != nil { - panic(err) - } - - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -As was the case with the Java client, it's possible to make use of the `ZEEBE_CLIENT_ID` and `ZEEBE_CLIENT_SECRET` environment variables to simplify the client configuration: - -```go -package main - -import ( - "context" - "fmt" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - GatewayAddress: "cluster.endpoint.com:443", - }) - if err != nil { - panic(err) - } - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` - -:::note -Like the Java client, the Go client will not prevent you from adding credentials to gRPC calls while using an insecure connection, but doing so will expose your access token. -::: - -#### Environment variables - -Since there are several environment variables that can be used to configure an `OAuthCredentialsProvider`, we list them here along with their uses: - -- `ZEEBE_CLIENT_ID` - The client ID used to request an access token from the authorization server -- `ZEEBE_CLIENT_SECRET` - The client secret used to request an access token from the authorization server -- `ZEEBE_TOKEN_AUDIENCE` - The audience for which the token should be valid -- `ZEEBE_AUTHORIZATION_SERVER_URL` - The URL of the authorization server from which the access token will be requested (by default, configured for Camunda 8) -- `ZEEBE_CLIENT_CONFIG_PATH` - The path to a cache file where the access tokens will be stored (by default, it's `$HOME/.camunda/credentials`) - -### Custom Credentials provider - -As previously mentioned, the `CredentialProvider`'s purpose is to modify the gRPC headers with an authorization method so a reverse proxy sitting in front of the gateway can validate them. - -The interface consists of an `applyCredentials` method and a `shouldRetryRequest` method. The first method is called for each gRPC call and takes a map of headers to which it should add credentials. The second method is called whenever a gRPC call fails and takes in the error that caused the failure which is then used to decide if the request should be retried. - -The following sections implement custom provider in Java and Go: - -#### Java - -```java -public class MyCredentialsProvider implements CredentialsProvider { - /** - * Adds a token to the Authorization header of a gRPC call. - */ - @Override - public void applyCredentials(final Metadata headers) { - final Key authHeaderkey = Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER); - headers.put(authHeaderKey, "Bearer someToken"); - } - - /** - * Retries request if it failed with a timeout. - */ - @Override - public boolean shouldRetryRequest(final Throwable throwable) { - return ((StatusRuntimeException) throwable).getStatus() == Status.DEADLINE_EXCEEDED; - } -} -``` - -After implementing the `CredentialsProvider`, we can provide it when building a client: - -```java -public class SecureClient { - public static void main(final String[] args) { - final ZeebeClient client = ZeebeClient.newClientBuilder().credentialsProvider(new MyCredentialsProvider()).build(); - - // continue... - } -} -``` - -#### Go - -```go -package main - -import ( - "context" - "fmt" - "google.golang.org/grpc/status" - "google.golang.org/grpc/codes" - "github.com/camunda-cloud/zeebe/clients/go/pkg/zbc" -) - -type MyCredentialsProvider struct { -} - -// ApplyCredentials adds a token to the Authorization header of a gRPC call. -func (p *MyCredentialsProvider) ApplyCredentials(ctx context.Context, headers map[string]string) error { - headers["Authorization"] = "someToken" - return nil -} - -// ShouldRetryRequest returns true if the call failed with a deadline exceed error. -func (p *MyCredentialsProvider) ShouldRetryRequest(ctx context.Context, err error) bool { - return status.Code(err) == codes.DeadlineExceeded -} - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - CredentialsProvider: &MyCredentialsProvider{}, - }) - if err != nil { - panic(err) - } - - ctx := context.Background() - response, err := client.NewTopologyCommand().Send(ctx) - if err != nil { - panic(err) - } - - fmt.Println(response.String()) -} -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/secure-client-communication.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/secure-client-communication.md deleted file mode 100644 index d12c8725591..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/secure-client-communication.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -id: secure-client-communication -title: "Secure client communication" -description: "Zeebe supports TLS between the gateway and all the officially supported clients. In this section, we will review how to configure these components." ---- - -Zeebe supports transport layer security (TLS v1.3) between the gateway and all the officially supported clients. In this section, we will review how to configure these components. - -## Gateway - -TLS in the gateway is disabled by default. This means that if you are just experimenting with Zeebe or in development, there is no configuration needed. However, if you want to enable authentication you can configure Zeebe in the `security` section of the configuration files. The following configurations are present in both `gateway.yaml.template` and `broker.standalone.yaml.template`, the file you should edit depends on whether you are using a standalone gateway or an embedded gateway. - -```yaml ---- -security: - # Enables TLS authentication between clients and the gateway - enabled: false - - # Sets the path to the certificate chain file - certificateChainPath: - - # Sets the path to the private key file location - privateKeyPath: -``` - -`enabled` should be either `true` or `false`, where true will enable TLS authentication between client and gateway, and false will disable it. `certificateChainPath` and `privateKeyPath` are used to configure the certificate with which the server will authenticate itself. `certificateChainPath` should be a file path pointing to a certificate chain in PEM format representing the server's certificate, and `privateKeyPath` is a file path pointing to the certificate's PKCS8 private key, also in PEM format. - -Additionally, as you can see in the configuration file, each value can also be configured through an environment variable. The environment variable to use again depends on whether you are using a standalone gateway or an embedded gateway. - -## Clients - -Unlike the gateway, TLS is enabled by default in all of Zeebe's supported clients. The following sections show how to disable or properly configure each client. - -:::note -Disabling TLS should only be done for testing or development. During production deployments, clients and gateways should be properly configured to establish secure connections. -::: - -### Java - -Without any configuration, the client looks in the system's certificate store for a CA certificate with which to validate the gateway's certificate chain. If you wish to use TLS without having to install a certificate in client's system, you can specify a CA certificate: - -```java -public class SecureClient { - public static void main(final String[] args) { - final ZeebeClient client = ZeebeClient.newClientBuilder().caCertificatePath("path/to/certificate").build(); - - // ... - } -} -``` - -Alternatively, use the `ZEEBE_CA_CERTIFICATE_PATH` environment variable to override the code configuration. - -To disable TLS in a Java client, use the `.usePlaintext()` option: - -```java -public class InsecureClient { - public static void main(final String[] args) { - final ZeebeClient client = ZeebeClient.newClientBuilder().usePlaintext().build(); - - // ... - } -} -``` - -Alternatively, use the `ZEEBE_INSECURE_CONNECTION` environment variable to override the code configuration. To enable an insecure connection, set it to **true**. To use a secure connection, set it to any non-empty value other than **true**. Setting the environment variable to an empty string is equivalent to unsetting it. - -### Go - -Similarly to the Java client, if no CA certificate is specified, the client will look in the default location for a CA certificate with which to validate the gateway's certificate chain. It's also possible to specify a path to a CA certificate in the Go client: - -```go -package test - -import ( - "github.com/camunda-cloud/zeebe/clients/go/zbc" -) - - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - CaCertificatePath: "path/to/certificate", - }) - - // ... -} -``` - -To disable TLS, execute the following: - -```go -package test - -import ( - "github.com/camunda-cloud/zeebe/clients/go/zbc" -) - - -func main() { - client, err := zbc.NewClient(&zbc.ClientConfig{ - UsePlaintextConnection: true, - }) - - // ... -} -``` - -As in the Java client, you can use the `ZEEBE_INSECURE_CONNECTION` and `ZEEBE_CA_CERTIFICATE_PATH` to override these configurations. - -### zbctl - -To configure `zbctl` to use a path to a CA certificate: - -``` -./zbctl --certPath /my/certificate/location [arguments] -``` - -To configure `zbctl` to disable TLS: - -``` -./zbctl --insecure [arguments] -``` - -Since `zbctl` is based on the Go client, setting the appropriate environment variables will override these parameters. - -## Self signed certificates - -It may be useful, for testing or development purposes, to use TLS between the client and the gateway; to simplify things, we can use self-signed certificates for this. - -### Testing & example - -To generate your own self-signed certificates for testing/development, you will need `openssl` install on your local machine. Then you can run: - -```sh -openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 --nodes -addext 'subjectAltName=IP:127.0.0.1' -``` - -This will generate a new certificate, `cert.pem`, and a new passwordless key, `key.pem`. - -:::warning -Do not use these in production! Again, this is for development and testing purposes only. -::: - -Then start up your gateway with the certificate and key specified above. For example, if we run a broker with an embedded gateway directly using Docker: - -```sh -docker run -p 26500:26500 -e ZEEBE_BROKER_NETWORK_HOST=0.0.0.0 -e ZEEBE_BROKER_GATEWAY_SECURITY_ENABLED=true -e ZEEBE_BROKER_GATEWAY_SECURITY_CERTIFICATECHAINPATH=/usr/local/zeebe/cert.pem -e ZEEBE_BROKER_GATEWAY_SECURITY_PRIVATEKEYPATH=/usr/local/zeebe/key.pem --mount type=bind,source="$(pwd)"/cert.pem,target=/usr/local/zeebe/cert.pem --mount type=bind,source="$(pwd)"/key.pem,target=/usr/local/zeebe/key.pem camunda/zeebe -``` - -There is one caveat: in order for the client to accept this self-signed certificate, you will need to trust it. The simplest way is to specify it as part of the client's configuration. For example, if you're using `zbctl`, you can then do `zbctl --certPath cert.pem status`. Refer to the documentation above on how to configure your clients. - -## Troubleshooting authentication issues - -Here we will describe a few ways the clients and gateway could be misconfigured and what those errors look like. Hopefully, this will help you recognize these situations and provide an easy fix. - -### TLS is enabled in `zbctl` but disabled in the gateway - -The client will fail with the following error: - -``` -Error: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: tls: first record does not look like a TLS handshake" -``` - -The following error will be logged by Netty in the gateway: - -``` -Aug 06, 2019 4:23:22 PM io.grpc.netty.NettyServerTransport notifyTerminated -INFO: Transport failed -io.netty.handler.codec.http2.Http2Exception: HTTP/2 client preface string missing or corrupt. Hex dump for received bytes: 1603010096010000920303d06091559c43ec48a18b50c028 - at io.netty.handler.codec.http2.Http2Exception.connectionError(Http2Exception.java:103) - at io.netty.handler.codec.http2.Http2ConnectionHandler$PrefaceDecoder.readClientPrefaceString(Http2ConnectionHandler.java:306) - at io.netty.handler.codec.http2.Http2ConnectionHandler$PrefaceDecoder.decode(Http2ConnectionHandler.java:239) - at io.netty.handler.codec.http2.Http2ConnectionHandler.decode(Http2ConnectionHandler.java:438) - at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:505) - at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:444) - at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:283) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) - at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:352) - at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1421) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:374) - at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:360) - at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:930) - at io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:794) - at io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:424) - at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:326) - at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:918) - at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) - at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) - at java.lang.Thread.run(Thread.java:748) -``` - -**Solution:** Either enable TLS in the gateway as well or specify the `--insecure` flag when using `zbctl`. - -### TLS is disabled in `zbctl` but enabled for the gateway - -`zbctl` will fail with the following error: - -``` -Error: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection closed -``` - -**Solution:** Either enable TLS in the client by specifying a path to a certificate or disable it in the gateway by editing the appropriate configuration file. - -### TLS is enabled for both client and gateway but the CA certificate can't be found - -`zbctl` will fail with the following error: - -``` -Error: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: x509: certificate signed by unknown authority -``` - -**Solution:** Either install the CA certificate in the appropriate location for the system or specify a path to certificate using the methods described above. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/secure-cluster-communication.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/secure-cluster-communication.md deleted file mode 100644 index 7afb26023e8..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/secure-cluster-communication.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -id: secure-cluster-communication -title: "Secure cluster communication" ---- - -:::note - -TLS between nodes in the same cluster is disabled by default. - -::: - -Zeebe supports transport layer security (TLS v1.3) between all nodes in a Zeebe cluster. This means it's possible to encrypt all TCP traffic between all nodes of a given cluster. - -Enabling TLS for cluster communication is an all or nothing feature: either all nodes are configured to use TLS, or none are. It's not currently possible to only configure some nodes to enable TLS. - -Additionally, a small portion of Zeebe traffic is done over UDP, which is left unencrypted. This is purely used for the nodes to gossip topology information amongst themselves, and no sensitive or user-given data is transmitted this way. - -## Configuration - -If you wish to enable TLS for cluster communication, you need to provide two things: a certificate file, and its private key. - -The certificate chain file is expected to be a PEM public certificate file, which should contain a x509 public certificate, and may additionally contain an entire certificate chain. If it does include the chain, it should simply be concatenated after the node's certificate. - -For example, a simple certificate file with only a single certificate: - -``` ------BEGIN CERTIFICATE----- -... ------END CERTIFICATE----- -``` - -If you wanted to include its signing authority, for example, you would append the contents of the authority's public certificate to the end of the certificate chain file: - -``` ------BEGIN CERTIFICATE----- -... ------END CERTIFICATE----- ------BEGIN TRUSTED CERTIFICATE----- -... ------END TRUSTED CERTIFICATE----- -``` - -While each node uses the default Java trust store to verify incoming certificates (configurable via `javax.net.ssl.trustStore`), which by default uses the system's root certificates, it's recommended to include the complete certificate chain in the file. These will also be used by each node to verify the other nodes' certificates. - -:::note -More specifically, the certificate chain will be part of the trust store of the node, and will be used to verify other node's certificates. -::: - -This will allow you to configure each node with a different leaf certificate sharing the same root certificate (or at least an intermediate authority), as long as they're contained in the chain. If all nodes use the same certificate, or if you're certain the certificate is trusted by the root certificates available on each node, it's sufficient for the file to only contain the leaf certificate. - -The private key file should be a PEM private key file, and should be the one during generation of the node's public certificate. Algorithms supported for the private keys are RSA, DSA, and EC. The private key must be generated using [PKCS8](https://datatracker.ietf.org/doc/html/rfc5208) or [PKCS #1](https://datatracker.ietf.org/doc/html/rfc2437); any other format will not work with Zeebe. If you're unsure what format your private key is, you can quickly run it through the `openssl` utility to convert it to PKCS8: - -```shell -> openssl pkcs8 -topk8 -nocrypt -in my_private_key -out my_private_pkcs8_key.pem -``` - -Remove the `-nocrypt` parameter if your private key has a password. If your certificate is already in the right format, it will simply do nothing. See the [OpenSSL manpages](https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.html) for more options. - -:::caution - -Note that currently, Zeebe does not support password protected private keys. Since storing the certificates and private keys unencrypted on disk is a security risk, we recommend you use a secret management solution like Vault to inject your certificates in memory at runtime. - -::: - -## Broker - -To configure secure communication for a broker, configure its `zeebe.broker.network.security` section, which looks like this: - -```yaml -security: - # Enables TLS authentication between this gateway and other nodes in the cluster - # This setting can also be overridden using the environment variable ZEEBE_BROKER_NETWORK_SECURITY_ENABLED. - enabled: false - - # Sets the path to the certificate chain file. - # This setting can also be overridden using the environment variable ZEEBE_BROKER_NETWORK_SECURITY_CERTIFICATECHAINPATH. - certificateChainPath: - - # Sets the path to the private key file location - # This setting can also be overridden using the environment variable ZEEBE_BROKER_NETWORK_SECURITY_PRIVATEKEYPATH. - privateKeyPath: -``` - -> The `certificateChainPath` and the `privateKeyPath` can be relative to your broker's working directory, or can be absolute paths. - -## Gateway - -To configure secure communication for a standalone gateway with the rest of the cluster, configure its `zeebe.gateway.cluster.security` section, which looks like this: - -```yaml -security: - # Enables TLS authentication between this gateway and other nodes in the cluster - # This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_SECURITY_ENABLED. - enabled: false - - # Sets the path to the certificate chain file. - # This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_SECURITY_CERTIFICATECHAINPATH. - certificateChainPath: - - # Sets the path to the private key file location - # This setting can also be overridden using the environment variable ZEEBE_GATEWAY_CLUSTER_SECURITY_PRIVATEKEYPATH. - privateKeyPath: -``` - -:::note - -The `certificateChainPath` and the `privateKeyPath` can be relative to the gateway's working directory, or can be absolute paths. - -::: - -## How it works - -When enabled for each node, communication over TCP between these is securely encrypted using the provided certificates in a client-server model. - -For example, let's take two nodes (`A` and `B`). When `A` (the client) sends a request to `B` (the server), they perform a TLS handshake, wherein `B`'s certificate is exchanged and verified by `A`. Afterwards, the request is encrypted such that only a node with `B`'s private key may decrypt it (i.e. in this instance, `B`). - -When the roles are reversed (e.g. `B` sends a request to `A`), the same handshake occurs, but the other way around. As`B` is now the client, and `A` the server, `A`'s certificate is exchanged and verified by `B`. Afterwards, all communication is encrypted and can only be decrypted with `A`'s private key. - -:::note - -In this model, only the client verifies the identity of the server, as opposed to mTLS, in which both client and server exchange and verify one another's identities. If you need mTLS, it's currently recommended to explore a solution which provides this transparently like a service mesh (e.g. Linkerd or Istio). - -::: - -## Self signed certificates - -If you wish to use self-signed certificates for testing or development purposes, the simplest way is to have all nodes share the same certificate. As aforementioned, the certificate chain configured on a node is also part of its trust store. As such, if all nodes share the same certificate, they will have no trouble verifying the identity of the other nodes. - -You can still configure a different self-signed certificate for each node, _provided they can be verified by the other nodes' certificate chain_. - -For example, let's say you have your own root certificate authority you use to sign your own certificates, and one certificate for each node that you signed with that authority. For each node, you can then create a certificate chain file which would consist of the node's public certificate, followed by the root certificate authority's public certificate. Though each node would have a different leaf certificate it uses to identify itself, the other nodes could verify its identity since their certificate chain contains an authority used to sign it. - -### Testing & example - -To generate your own self-signed certificates for testing, you must first create a certificate authority. - -:::note -For this example, whenever you are asked for input, feel free to just press enter and leave the defaults there. -::: - -```shell -openssl req -config <(printf "[req]\ndistinguished_name=dn\n[dn]\n[ext]\nbasicConstraints=CA:TRUE,pathlen:0") -new -newkey rsa:2048 -nodes -subj "/C=DE/O=Test/OU=Test/ST=BE/CN=cluster.local" -x509 -extensions ext -keyout ca.key -out ca.pem -``` - -Once we have our certificate authority, we can now generate certificates for each node. Let's say we have a cluster of three nodes, `A`, `B`, and `C`. - -Take the following steps: - -1. Generate a private key for each node: - -```shell -openssl genpkey -out nodeA.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -openssl genpkey -out nodeB.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -openssl genpkey -out nodeC.key -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -``` - -:::note - -Generating a private key using RSA and `openssl` will generate a PKCS8 private key by default. - -::: - -2. Create a certificate signing request (CSR) for each as well: - -```shell -openssl req -new -key nodeA.key -out nodeA.csr -openssl req -new -key nodeB.key -out nodeB.csr -openssl req -new -key nodeC.key -out nodeC.csr -``` - -3. Create the final certificates for each node: - -```shell -openssl x509 -req -days 365 -in nodeA.csr -CA ca.pem -CAkey ca.key -set_serial 01 -extfile <(printf "subjectAltName = IP.1:127.0.0.1") -out nodeA.pem -openssl x509 -req -days 365 -in nodeB.csr -CA ca.pem -CAkey ca.key -set_serial 01 -extfile <(printf "subjectAltName = IP.1:127.0.0.1") -out nodeB.pem -openssl x509 -req -days 365 -in nodeC.csr -CA ca.pem -CAkey ca.key -set_serial 01 -extfile <(printf "subjectAltName = IP.1:127.0.0.1") -out nodeC.pem -``` - -Make sure to replace `IP.1:127.0.0.1` with the advertised host of the broker. If it's an IP address, then keep the `IP.1` prefix. If it's a hostname/DNS entry, then you can write it out as `DNS.1:advertisedHost`. To be flexible, you can also use a wildcard host. For example, if you're deploying in Kubernetes, you could use `subjectAltName = DNS.1:*.cluster.local"`. You can also omit the whole `-extfile` parameter if you do not wish to use hostname verification at all. - -4. Create the certificate chain so that each node is able to verify the identity of the others: - -```shell -cat nodeA.pem ca.pem > chainNodeA.pem -cat nodeB.pem ca.pem > chainNodeB.pem -cat nodeC.pem ca.pem > chainNodeC.pem -``` - -5. You can now configure each node using its respective final `chainNode*.pem` file and `node*.key` file. For example, if node `A` was a broker: - -```yaml -security: - enabled: true - certificateChainPath: chainNodeA.pem - privateKeyPath: nodeA.key -``` diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/security.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/security.md deleted file mode 100644 index d9d275518f9..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/security/security.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -id: security -title: "Security" -sidebar_label: "Overview" -description: "This document analyzes Zeebe's security features." ---- - -Zeebe supports the following security features: - -- **[Client-gateway authorization](client-authorization.md)** - allows you set up authorization for the client and the gateway. -- **[Secure client-gateway communication](secure-client-communication.md)** - allows you to secure communication between clients and gateways. -- **[Secure cluster communication](secure-cluster-communication.md)** - allows you to secure communication between all nodes in a cluster. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/assets/zeebe-gateway-overview.png b/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/assets/zeebe-gateway-overview.png deleted file mode 100644 index d8517259a05..00000000000 Binary files a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/assets/zeebe-gateway-overview.png and /dev/null differ diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md deleted file mode 100644 index fedf4b2db1a..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -id: interceptors -title: "Interceptors" -sidebar_label: "Interceptors" ---- - -All communication from a client to a broker must first pass through a gateway. -There they can be intercepted before being dispatched. Zeebe provides a way to -load arbitrary interceptors into the gateway. Some typical examples of what you -can accomplish with this include: - -- Enforcing custom authorization rules on incoming calls -- Monitoring and logging of incoming calls (e.g. - https://github.com/grpc-ecosystem/java-grpc-prometheus) -- Distributed tracing (e.g. - https://github.com/open-telemetry/opentelemetry-java-instrumentation) - -## Implementing an interceptor - -For the communication between client and gateway, Zeebe uses the gRPC -[protocol](components/zeebe/technical-concepts/protocols.md). An interceptor is -thus implemented as a gRPC -[ServerInterceptor](https://grpc.github.io/grpc-java/javadoc/io/grpc/ServerInterceptor.html). - -An implementation must adhere to the following requirements: - -- It implements [ServerInterceptor](https://grpc.github.io/grpc-java/javadoc/io/grpc/ServerInterceptor.html) -- It has public visibility -- It has a public default constructor (i.e. no-arg constructor) - -Let's consider an interceptor that provides logging of incoming calls as an -example. Other ServerInterceptor examples can be found in the official grpc-java -[examples](https://github.com/grpc/grpc-java/tree/v1.41.0/examples). - -```java -package io.camunda.zeebe.example; - -import io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener; -import io.grpc.Metadata; -import io.grpc.ServerCall; -import io.grpc.ServerCall.Listener; -import io.grpc.ServerCallHandler; -import io.grpc.ServerInterceptor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A simple interceptor that logs each incoming call. The class must be public - * since we will load it via JAR into the gateway. - */ -public final class LoggingInterceptor implements ServerInterceptor { - private static final Logger LOGGER = - LoggerFactory.getLogger("LoggingInterceptor"); - - @Override - public Listener interceptCall( - final ServerCall call, - final Metadata headers, - final ServerCallHandler next) { - final var listener = next.startCall(call, headers); - return new SimpleForwardingServerCallListener<>(listener) { - @Override - public void onMessage(final ReqT message) { - LOGGER.trace("intercepted a call"); - super.onMessage(message); - } - }; - } -} -``` - -This example interceptor will log `"intercepted a call"` at `TRACE` level for -each incoming call it intercepted. This specific interceptor always dispatches -all incoming calls to the target broker, but it would also be possible to stop -the message from interception by other interceptors and even to block it from -dispatch to the broker. - -## Compiling your interceptor - -Our source code for the interceptor class can now be compiled. There are many -ways to do this, but for simplicity we'll use `javac` directly. - -When compiling your class, you need to make sure all compile-time dependencies -are provided. In the example above, that means we need the `grpc-api` and -`slf4j-api` libraries available when compiling. - -Since the interceptor will be running inside the Zeebe gateway, the language -level of the compiled code must be the same as Zeebe's (i.e. currently JDK 11) or lower. This example thus assumes you're using version 11 of `javac`. - -```sh -# to compile LoggingInterceptor.java, we'll need to provide the api libraries -javac -classpath .:lib/grpc-api.jar:lib/slf4j-api.jar ./LoggingInterceptor.java -``` - -## Packaging an interceptor - -Next, you need to package the interceptor class into a fat JAR. Such a JAR must -contain all classes (i.e. including all classes your own classes depend upon at -runtime). - -Like compiling there are many ways to do this, but for simplicity we'll use -`jar` directly. Note, that means we have to define a java manifest file by hand, -in order to place the libraries' classes on the classpath. - -Similar to your interceptor class, any libraries you package must be compiled -for the same language level as Zeebe's (i.e. currently JDK 11) or lower. - -```sh -# both runtime libraries and the manifest must be packaged together with the compiled classes -jar cvfm LoggingInterceptor.jar ./MANIFEST.MF ./*.class ./lib - -# let's verify the contents of the JAR -jar tf ./LoggingInterceptor.jar -# META-INF/ -# META-INF/MANIFEST.MF -# LoggingInterceptor.java -# LoggingInterceptor$1.class -# lib/ -# lib/grpc-api.jar -# lib/grpc.jar -# lib/slf4j-api.jar -# lib/slf4j.jar -``` - -## Loading an interceptor into a gateway - -An interceptor can be loaded into your gateway as a fat JAR. For each -interceptor, you need to provide your gateway with: - -- An interception order index -- An identifier to identify this specific interceptor -- Where to find the JAR with the interceptor class -- The [fully qualified name](https://docs.oracle.com/javase/specs/jls/se17/html/jls-6.html#jls-6.7) - of the interceptor class, e.g. `com.acme.ExampleInterceptor` - -Let's continue with the LoggingInterceptor example. We can provide these -[configurations](/self-managed/zeebe-deployment/configuration/configuration.md) -using a gateway config file, environment variables or a mix of both. We'll be -using a config file here. - -The following gateway config file configures our LoggingInterceptor so it can be -loaded into the gateway at start-up. - -```yaml -zeebe: - gateway: - ... - - # allows specifying multiple interceptors - interceptors: - - - # identifier, can be used for debugging - id: logging-interceptor - - # name of our ServerInterceptor implementation - # this must be the fully qualified name of the class - className: io.camunda.zeebe.example.LoggingInterceptor - - # path to the fat JAR, can be absolute or relative - jarPath: /tmp/LoggingInterceptor.jar - - # you can add additional interceptors by listing them - - id: ... - className: ... - jarPath: ... -``` - -Note that multiple interceptors can be configured (i.e. -`zeebe.gateway.interceptors` expects a list of interceptor configurations). The -listing order determines the order in which a call is intercepted by the -different interceptors. The first interceptor in the list wraps the second, etc. -The first interceptor is thus the outermost interceptor. In other words, calls -are intercepted first by the first listed interceptor, followed by the second -listed interceptor, etc. - -This configuration can also be provided using environment variables. You'll need -to provide an index for the interceptor in the variable name, to distinguish the -ordering of the different interceptors. For example, to configure the -`className` of the first interceptor use: -`zeebe_gateway_interceptors_0_className`. Likewise, a second interceptor's -`jarPath` can be configured using `zeebe_gateway_interceptors_1_jarPath`. - -## About class loading - -[Previously](#packaging-an-interceptor), we stated that you need to package the -interceptor class into a fat JAR. Although good general advice, this is not -entirely true. To understand why, let's discuss how the class loading of your -interceptor works. - -When your JAR is loaded into the gateway, Zeebe provides a special class loader -for it. This class loader isolates your interceptor from the rest of Zeebe, but -it also exposes our own code to your interceptor. When loading classes for your -interceptor, it will always first look in this special class loader and only if -it is not available it will look in Zeebe's main class loader. In other words, -you can access any classes from Zeebe's main class loader when they are not -provided by your JAR. For internal class loading, Zeebe will still only look in -its main class loader. - -This means you can reduce your JAR size by leaving out libraries that are -already provided by Zeebe's class loader. In addition, if your interceptor -depends on a different version of a class than the one provided by Zeebe, then -you can provide your own version without having to worry about breaking Zeebe. - -## Troubleshooting - -Here we describe a few common errors. Hopefully, this will help you recognize -these situations and provide an easy fix. Generally, the gateway will not be -able to start up with a misconfigured interceptor. - -Note that environment variables can overwrite your gateway configuration file. -The gateway logs the configuration it uses during start-up. Please use that to -verify your configuration. - -**java.lang.ClassNotFoundException** Your ServerInterceptor implementation could -not be found. Make sure you've configured the `className` correctly in the -[gateway configuration](#loading-an-interceptor-into-a-gateway) and that your -[JAR contains your class](#packaging-an-interceptor). - -**io.camunda.zeebe.gateway.interceptors.impl.InterceptorLoadException** -Something went wrong trying to load your interceptor. Make sure your [JAR is -packaged](#packaging-an-interceptor) correctly, i.e. it contains all runtime -dependencies and specifies them in the manifest file's classpath. The exception -should provide a clear description, but generally we distinguish the following -common cases: - -- Unable to instantiate your class: make sure your class adheres to the - [requirements described above](#implementing-an-interceptor). -- The JAR could not be loaded: make sure you've configured your interceptor - correctly in the [gateway configuration](#loading-an-interceptor-into-a-gateway). - -**io.camunda.zeebe.util.jar.ExternalJarLoadException**: the JAR could not be -loaded: make sure you've configured your interceptor correctly in the [gateway -configuration](#loading-an-interceptor-into-a-gateway). - -**java.lang.UnsupportedClassVersionError** Your interceptor has been compiled by -a more recent version of the Java Runtime. Make sure your [class is -compiled](#packaging-an-interceptor) with JDK 11. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md deleted file mode 100644 index 88b7949bb8a..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -id: overview -title: "Zeebe Gateway" -sidebar_label: "Overview" -description: "Learn about this component and contact point of the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster." ---- - -The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. For more information about the Zeebe broker, visit our [additional documentation](../../../components/zeebe/technical-concepts/architecture.md#brokers). - -To summarize, the Zeebe broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. The Zeebe Gateway acts as a load balancer and router between Zeebe’s processing partitions. - -![Zeebe gateway overview](assets/zeebe-gateway-overview.png) - -To interact with the Zeebe cluster, the Zeebe client sends a command as a gRPC message to the Zeebe Gateway (to port `26500` by default). Given the gateway supports gRPC, the user can use several clients in different languages to interact with the Zeebe cluster. For more information, read our [overview](../../../apis-tools/working-with-apis-tools.md). - -:::note -Be aware Zeebe brokers divide data into partitions (shards), and use RAFT for replication. Read more on RAFT [here](../../../components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol). -::: - -When the Zeebe Gateway receives a valid gRPC message, it is translated to an internal binary format and forwarded to one of the partition leaders inside the Zeebe cluster. The command type and values can determine to which partition the command is forwarded. - -For example, creating a new process instance is sent in a round-robin fashion to the different partitions. If the command relates to an existing process instance, the command must be sent to the same partition where it was first created (determined by the key). - -To determine the current leader for the corresponding partition, the gateway must maintain the topology of the Zeebe cluster. The gateway(s) and broker(s) form a cluster using gossip protocol to distribute information. - -## Why do we have the Zeebe Gateway and what problems does it solve? - -The Zeebe Gateway protects the brokers from external sources. It allows the creation of a demilitarized zone ([DMZ]()) and the Zeebe Gateway is the only contact point. - -The Zeebe Gateway also allows you to easily create clients in your language of choice while keeping the client implementation as thin as possible. The clients can be kept thin, since the gateway takes care of the cluster topology and forwards the requests to the right partitions. There are already several client implementations available, officially-supported, and community-maintained. Check the list [here](../../../apis-tools/working-with-apis-tools.md). - -The gateway can be run and scaled independently of the brokers, which means it translates the messages, distributes them to the correct partition leaders, and separates the concerns of the applications. For example, if your system encounters a spike of incoming requests, and you have set up enough partitions on the broker side up front, but not enough gateways to handle the load, you can easily scale them up. - -## Embedded versus standalone - -The Zeebe Gateway can be run in two different ways: embedded and standalone. - -### Embedded - -Running the gateway in embedded mode means it will run as part of the Zeebe broker. The broker will accept gRPC client messages via the embedded gateway and distribute the translated requests inside the cluster. This means the request accepted by the embedded gateway does not necessarily go to the same broker, where the embedded gateway is running. - -The embedded gateway is useful for development and testing purposes, and to reduce the burden of deploying and running multiple applications. For example, in [zeebe-process-test](https://github.com/camunda/zeebe-process-test) an embedded gateway is used to accept the client commands and write directly to the engine. - -:::note Be aware -If the gateway is running in the embedded mode, it will consume resources from the broker, which might impact the performance of the system. -::: - -### Standalone - -Running the gateway in standalone mode means the gateway will be executed as its own application. This is the recommended way for production use cases, and it is the default (and only option) in the Helm charts. As mentioned, this allows separation of concerns, especially as the gateway can be scaled independently of the broker based on the current workload. diff --git a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-installation.md b/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-installation.md deleted file mode 100644 index fd7a4c327a1..00000000000 --- a/versioned_docs/version-8.2/self-managed/zeebe-deployment/zeebe-installation.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: zeebe-installation -title: "Overview" -sidebar_label: "Overview" ---- - -:::warning -Zeebe does not support network file systems (NFS) other types of network storage volumes at this time. Usage of NFS may cause data corruption. -::: - -Please refer to the [Installation Guide](/self-managed/platform-deployment/overview.md) for details on how to install Zeebe in a private cloud or on your own hardware. - -Within this section you will find detailed information about: - -- [Zeebe Gateway](zeebe-gateway/zeebe-gateway-overview.md) - The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. -- [Configuration](configuration/configuration.md) - Explains the configuration options. These configuration options apply to both environments, but not to Camunda 8. In Camunda 8, the configuration is provided for you. -- [Security](security/security.md) - Discusses the security aspects of running Zeebe and how to use them. -- [Operation](operations/zeebe-in-production.md) - Outlines topics that become relevant when you want to operate Zeebe in production. -- [Exporters](exporters/exporters.md) - Zeebe comes packaged with two exporters: [Elasticsearch](exporters/elasticsearch-exporter.md) and [OpenSearch](exporters/opensearch-exporter.md). This section of the docs explains how these exporters can be configured. For a general overview on the exporters concept, refer to our [exporters concept](/self-managed/concepts/exporters.md) page. - -:::note -New to BPMN and want to learn more before moving forward? [Visit our Get Started Guides](/docs/guides/getting-started/) to learn about BPMN and orchestration. -::: - -## Additional resources - -If you have questions or feedback, we encourage you to visit the [GitHub issue tracker](https://github.com/camunda/camunda/issues) and [contact us](/contact). diff --git a/versioned_docs/version-8.3/apis-tools/operate-api/overview.md b/versioned_docs/version-8.3/apis-tools/operate-api/overview.md index ec3c24e09d6..266e67ac3f2 100644 --- a/versioned_docs/version-8.3/apis-tools/operate-api/overview.md +++ b/versioned_docs/version-8.3/apis-tools/operate-api/overview.md @@ -117,8 +117,8 @@ curl -b cookie.txt -X POST 'http://localhost:8080/v1/process-definitions/search' | `GET /v1/process-instances/{key}/statistics` | Get flow node statistic by process instance key | New endpoint | | `GET /v1/process-instances/{key}/sequence-flows` | Get sequence flows of process instance by key | New endpoint | | **Incidents** | | | -| `POST /v1/incidents/search` | Search for incidents | New field added: `jobKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    | +| `GET /v1/incidents/{key}` | Get incident by key | New field added: `jobKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    | | **Flownode instances** | | | | `POST /v1/flownode-instances/search` | Search for flow node instances | New fields added:
    `flowNodeId`
    `flowNodeName`
    `processDefinitionKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    2. The `processDefinitionKey` field will only contain data from version 8.1.8 onward
    3. The field `flowNodeName` is only returned if set in the BPMN diagram, so no flowNodeName is returned for flow nodes that do not have it set in the diagram. | | `GET /v1/flownode-instances/{key}` | Get flow node instance by key | New fields added:
    `flowNodeId`
    `flowNodeName`
    `processDefinitionKey`

    **Warning**
    1. New fields could break deserialization, so ignore fields not used.
    2. The `processDefinitionKey` field will only contain data from version 8.1.8 onward
    3. The field `flowNodeName` is only returned if set in the BPMN diagram, so no flowNodeName is returned for flow nodes that do not have it set in the diagram. | diff --git a/versioned_docs/version-8.3/apis-tools/tasklist-api/tasklist-api-tutorial.md b/versioned_docs/version-8.3/apis-tools/tasklist-api/tasklist-api-tutorial.md index 21c5449ffcb..d78fc5907d8 100644 --- a/versioned_docs/version-8.3/apis-tools/tasklist-api/tasklist-api-tutorial.md +++ b/versioned_docs/version-8.3/apis-tools/tasklist-api/tasklist-api-tutorial.md @@ -250,9 +250,8 @@ export class TasklistModule implements OnModuleInit { logger.log("Tasklist credentials fetched"); axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; + axiosRef.defaults.headers["Authorization"] = + `Bearer ${credentials.access_token}`; axiosRef.defaults.headers["Content-Type"] = "application/json"; setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds } diff --git a/versioned_docs/version-8.3/components/best-practices/development/invoking-services-from-the-process-c7.md b/versioned_docs/version-8.3/components/best-practices/development/invoking-services-from-the-process-c7.md index 4e05898330d..c01fa3eb17a 100644 --- a/versioned_docs/version-8.3/components/best-practices/development/invoking-services-from-the-process-c7.md +++ b/versioned_docs/version-8.3/components/best-practices/development/invoking-services-from-the-process-c7.md @@ -160,7 +160,8 @@ Only if the increased latency does not work for your use case, for example, beca
    -

    Call a named bean or java class implementing the +

    + Call a named bean or java class implementing the JavaDelegate interface.

    -

    Use a configurable Connector +

    + Use a configurable Connector
    (REST or SOAP services provided out-of-the-box).

    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    +

    + Pull a service task into an external worker thread and inform process engine of + completion. +

    Execute a script inside the engine.

    @@ -183,7 +187,8 @@ completion.

    -

    Use with +

    + Use with
    BPMN elements.

    @@ -252,7 +257,8 @@ completion.

    -

    Implement +

    + Implement
    via

    @@ -261,8 +267,10 @@ completion.

    Java (in same JVM)

    -

    Expression Language -(can reference Java code)

    +

    + Expression Language + (can reference Java code) +

    BPMN configuration

    @@ -377,9 +385,11 @@ completion.

    Configure via

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -390,9 +400,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -401,9 +413,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -412,9 +426,11 @@ completion.

    -

    BPMN Ext. Element+ +

    + BPMN Ext. Element+ - serviceTask + + serviceTask
    camunda:
    @@ -423,9 +439,11 @@ completion.

    -

    BPMN Attributes +

    + BPMN Attributes
    - serviceTask + + serviceTask
    camunda:
    @@ -438,13 +456,15 @@ completion.

    -

    BPMN Element +

    + BPMN Element
    script or
    BPMN Attribute
    - scriptTask + + scriptTask
    camunda:
    diff --git a/versioned_docs/version-8.3/components/best-practices/development/service-integration-patterns.md b/versioned_docs/version-8.3/components/best-practices/development/service-integration-patterns.md index 5c5e68ce69e..3b20d54d945 100644 --- a/versioned_docs/version-8.3/components/best-practices/development/service-integration-patterns.md +++ b/versioned_docs/version-8.3/components/best-practices/development/service-integration-patterns.md @@ -120,7 +120,7 @@ You can leverage [message buffering](/docs/components/concepts/messages#message- Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/contact) to discuss such a scenario in more depth. +A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/reference/contact.md) to discuss such a scenario in more depth. **Summary And recommendations** diff --git a/versioned_docs/version-8.3/components/best-practices/development/understanding-transaction-handling-c7.md b/versioned_docs/version-8.3/components/best-practices/development/understanding-transaction-handling-c7.md index 3a39004b8b6..5ceeef56cc5 100644 --- a/versioned_docs/version-8.3/components/best-practices/development/understanding-transaction-handling-c7.md +++ b/versioned_docs/version-8.3/components/best-practices/development/understanding-transaction-handling-c7.md @@ -90,29 +90,29 @@ Aside a general strategy to mark service tasks as being save points you will oft **Do** configure a savepoint **after** -- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. +- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. -- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. +- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. **Do** configure a savepoint **before** -- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. +- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. -- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. +- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. -- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. +- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. **Don't** configure save points **before** -- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. +- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. -- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. +- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. ### Adding save points automatically to every model diff --git a/versioned_docs/version-8.3/components/concepts/clusters.md b/versioned_docs/version-8.3/components/concepts/clusters.md index 00012b19e2b..7bd3ce5deea 100644 --- a/versioned_docs/version-8.3/components/concepts/clusters.md +++ b/versioned_docs/version-8.3/components/concepts/clusters.md @@ -32,21 +32,18 @@ When your Free Trial plan expires, you are automatically transferred to the Free ### Auto-pause -Free Trial `dev` (or untagged) clusters are automatically paused eight hours after a cluster is created or resumed from a paused state. Auto-pause occurs regardless of cluster usage. +Free Trial clusters are automatically paused after a period of inactivity. Auto-pause occurs regardless of cluster usage. You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. -- Clusters tagged as `test`, `stage`, or `prod` do not auto-pause. -- Paused clusters are automatically deleted after 30 consecutive paused days. You can change the tag to avoid cluster deletion. -- No data is lost while a cluster is paused. All execution and configuration is saved, but cluster components such as Zeebe and Operate are temporarily disabled until you resume the cluster. +- Clusters tagged as `dev` (or untagged) auto-pause eight hours after the cluster is created or resumed from a paused state. +- Clusters tagged as `test`, `stage`, or `prod` auto-pause if there is no cluster activity for 48 hours. +- Cluster disk space is cleared when a trial cluster is paused. + - You will need to redeploy processes to the cluster once it is resumed from a paused state. + - Cluster configuration settings (for example, API Clients, Connector secrets, and IP allowlists) are saved so you can easily resume a cluster. :::tip - -To prevent auto-pause, you can: - -- Tag the cluster as `test`, `stage`, or `prod` instead of `dev`. -- [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter, Professional, or Enterprise plan. - +To prevent auto-pause, [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. ::: ## Development clusters diff --git a/versioned_docs/version-8.3/components/concepts/data-retention.md b/versioned_docs/version-8.3/components/concepts/data-retention.md index bd13352187d..c55b3adcd3a 100644 --- a/versioned_docs/version-8.3/components/concepts/data-retention.md +++ b/versioned_docs/version-8.3/components/concepts/data-retention.md @@ -15,7 +15,7 @@ The following time-to-live settings are configured in SaaS for each application. - **Tasklist**: 30 days - **Zeebe**: 7 days -If there are specific requirements for your use-case, [reach out to us](/contact/) to discuss your data retention needs under an Enterprise plan. +If there are specific requirements for your use-case, [reach out to us](/reference/contact.md) to discuss your data retention needs under an Enterprise plan. For more information on development clusters in the Starter or Professional plans, refer to our [fair usage limits of those plans](https://camunda.com/legal/fair-usage-limits-for-starter-plan/). ## Additional information diff --git a/versioned_docs/version-8.3/components/concepts/messages.md b/versioned_docs/version-8.3/components/concepts/messages.md index c1236ca8d93..9a2c691947a 100644 --- a/versioned_docs/version-8.3/components/concepts/messages.md +++ b/versioned_docs/version-8.3/components/concepts/messages.md @@ -118,6 +118,10 @@ The first message creates a new process instance. The following messages are cor When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. +:::note +You may also use TTL to wait for messages that may arrive earlier when combining [start events and intermediate catch events](/docs/components/modeler/bpmn/events.md). +::: + ### Single instance **Problem**: Create exactly one instance of a process diff --git a/versioned_docs/version-8.3/components/concepts/process-instance-creation.md b/versioned_docs/version-8.3/components/concepts/process-instance-creation.md index ea1a7a6b3d7..fc315f7431f 100644 --- a/versioned_docs/version-8.3/components/concepts/process-instance-creation.md +++ b/versioned_docs/version-8.3/components/concepts/process-instance-creation.md @@ -26,9 +26,10 @@ This command creates a new process instance and immediately responds with the pr ![create-process](assets/create-process.png) -

    - Code example -

    Create a process instance: +

    + Code example +

    +Create a process instance: ``` zbctl create instance "order-process" @@ -38,16 +39,16 @@ Response: ``` { - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 + "processKey": 2251799813685249, + "bpmnProcessId": "order-process", + "version": 1, + "processInstanceKey": 2251799813686019 } ``` -

    -
    +

    +
    ### Create and await results @@ -67,7 +68,8 @@ When the client resends the command, it creates a new process instance.
    Code example -

    Create a process instance and await results: +

    +Create a process instance and await results: ``` zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' @@ -123,7 +125,7 @@ Start instructions are supported for both `CreateProcessInstance` commands.

    Code example

    - Create a process instance starting before the 'ship_parcel' element: +Create a process instance starting before the 'ship_parcel' element: ```java client.newCreateInstanceCommand() diff --git a/versioned_docs/version-8.3/components/concepts/what-is-camunda-8.md b/versioned_docs/version-8.3/components/concepts/what-is-camunda-8.md index f225270c009..de0aaa8458e 100644 --- a/versioned_docs/version-8.3/components/concepts/what-is-camunda-8.md +++ b/versioned_docs/version-8.3/components/concepts/what-is-camunda-8.md @@ -105,7 +105,7 @@ The platform and tools are usable in your environment right away, with full publ ## Next steps -- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/contact/) page. +- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/reference/contact.md) page. - [Introduction to Camunda 8](/guides/introduction-to-camunda-8.md) - [Create a Camunda 8 account](/guides/create-account.md) - [Migrate from Camunda 7 to Camunda 8](/guides/migrating-from-camunda-7/index.md) diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/connector-sdk.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/connector-sdk.md index b60b3468f99..1f03f03c11a 100644 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/connector-sdk.md +++ b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/connector-sdk.md @@ -1025,8 +1025,9 @@ For example, you can spin up a custom client with the [Zeebe Java client](/apis-tools/java-client/index.md) as follows: ```java -import io.camunda.connector.MyConnectorFunction -import io.camunda.connector.runtime.jobworker.outbound.ConnectorJobHandler; +import io.camunda.connector.MyConnectorFunction; +import io.camunda.connector.runtime.core.outbound.ConnectorJobHandler; +import io.camunda.connector.validation.impl.DefaultValidationProvider; import io.camunda.zeebe.client.ZeebeClient; public class Main { @@ -1037,7 +1038,7 @@ public class Main { zeebeClient.newWorker() .jobType("io.camunda:template:1") - .handler(new ConnectorJobHandler(new MyConnectorFunction())) + .handler(new ConnectorJobHandler(new MyConnectorFunction(), new DefaultValidationProvider())) .name("MESSAGE") .fetchVariables("authentication", "message") .open(); @@ -1052,5 +1053,5 @@ it with your job handler implementation that handles invoking the Connector func Your custom job handler needs to create a `OutboundConnectorContext` that the Connector function can use to handle variables, secrets, and Connector results. You can extend the -provided `io.camunda.connector.impl.outbound.AbstractConnectorContext` to quickly gain access +provided `io.camunda.connector.runtime.core.AbstractConnectorContext` to quickly gain access to most of the common context operations. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/010-to-020.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/010-to-020.md deleted file mode 100644 index 12d336c91d8..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/010-to-020.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -id: 010-to-020 -title: Update 0.1 to 0.2 -description: "Review which adjustments must be made to migrate from Connector SDK 0.1.x to 0.2.0." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.1.x to 0.2.0. - -:::caution - -Be aware that the update from 0.1 to 0.2 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.2.0, we introduce the following structural changes: - -- Input validation and secret replacement move from writing imperative code to declaratively using annotations. -- The Outbound aspect of APIs is more explicit. Classes have been moved to more explicit packages and have been renamed. -- New required annotation for outbound Connectors. - -### Declarative validation and secrets - -Input objects previously had to implement the `ConnectorInput` interface to participate in validation and secret replacement -initiated from the `ConnectorContext` using its `validate` and `replaceSecrets` methods respectively. - -With version 0.2.0, we remove the imperative approach for validation and secret replacement from the SDK. -Instead, you can use annotations to describe the constraints of input attributes and mark those that can contain -secrets. - -These are two input objects written with the SDK version 0.1.x: - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class MyConnectorRequest implements ConnectorInput { - - private String message; - private Authentication authentication; - - @Override - public void validateWith(final Validator validator) { - validator.require(message, "message"); - validator.require(authentication, "authentication"); - validateIfNotNull(authentication, validator); - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - replaceSecretsIfNotNull(authentication, secretStore); - } -} -``` - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class Authentication implements ConnectorInput { - - private String user; - private String token; - - @Override - public void validateWith(final Validator validator) { - validator.require(user, "user"); - validator.require(token, "token"); - if (token != null && !(token.startsWith("xobx") || token.startsWith("secrets."))) { - validator.addErrorMessage("Token must start with \"xobx\" or be a secret"); - } - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - token = secretStore.replaceSecret(token); - } -} -``` - -You can express the same input objects with SDK version 0.2.0 as follows: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty - private String message; - - @NotNull - @Valid - @Secret - private Authentication authentication; -} -``` - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.Pattern; - -public class Authentication { - - @NotEmpty - private String user; - - @NotEmpty - @Pattern("^(xobx-|secret).+") - @Secret - private String token; -} -``` - -As a result, you have to remove the `ConnectorInput` interface implementation and the imperative code that comes with `validateWith` -and `replaceSecrets`. You can now concisely describe the constraints of attributes rather then expressing them in imperative code. - -In order to use annoation-based validation out of the box, you can include the new artifact `connector-validation` that -comes with the SDK. - - - - - -```xml - - io.camunda.connector - connector-validation - 0.2.0 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.2.0' -``` - - - - -You can read more about validation and secret replacement in our -[SDK Guide](/components/connectors/custom-built-connectors/connector-sdk.md). - -### Explicit Outbound aspect - -With version 0.2.0 of the SDK, we make the Outbound aspect of those components specific to oubound connectivity -more visible. This separates those SDK components that are tightly coupled to Outbound from those that -will be reusable for Inbound. - -With this change, the names of the following classes need to be adjusted: - -- Rename `io.camunda.connector.api.ConnectorContext` to `io.camunda.connector.api.outbound.OutboundConnectorContext` -- Rename `io.camunda.connector.api.ConnectorFunction` to `io.camunda.connector.api.outbound.OutboundConnectorFunction` -- Rename `io.camunda.connector.api.SecretProvider` to `io.camunda.connector.api.secret.SecretProvider` -- Rename `io.camunda.connector.api.SecretStore` to `io.camunda.connector.api.secret.SecretStore` -- Rename `io.camunda.connector.test.ConnectorContextBuilder` to `io.camunda.connector.test.outbound.OutboundConnectorContextBuilder` - -As a result, you must replace all occurrences of the old class names and imports with the new ones. This includes the -SPI for the connector function itself. Therefore, rename the file `META-INF/services/io.camunda.connector.api.ConnectorFunction` to -`META-INF/services/io.camunda.connector.api.outbound.OutboundConnectorFunction`. - -### `@OutboundConnector` annotation - -For best interoperability, Connectors provide default meta-data (`name`, `type`, `inputVariables`) via the `@OutboundConnector` annotation: - -```java -@OutboundConnector( - name = "PING", - inputVariables = {"caller"}, - type = "io.camunda.example.PingConnector:1" -) -public class PingConnector implements OutboundConnectorFunction { - ... -} -``` - -## Connector runtime environment - -If using the -[pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that comes with the SDK does not fit your use case, you can create a custom runtime environment. - -With version 0.2.0 of the [job worker runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#connector-job-handler), you need to make the following changes: - -- Rename `io.camunda.connector.runtime.jobworker.ConnectorJobHandler` to `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` -- Rename connector related env variables from `ZEEBE_` to `CONNECTOR_`. Zeebe configuration properties remain unchanged - -As a general change in behavior the module will now pick up connectors from classpath unless it is explicitly configured via environment variables. - -Also take the name changes in the [SDK core](#explicit-outbound-aspect) into account. - -Implementing your own Connector wrapper you need to provide a Connector context specific to -your environment. Consider extending the `io.camunda.connector.impl.outbound.AbstractConnectorContext` -instead of implementing the `io.camunda.connector.api.ConnectorContext` yourself. Most of the commonly needed functionality -is already provided in there. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md deleted file mode 100644 index c5ad171835f..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 0100-to-0110 -title: Update 0.10 to 0.11 -description: "Review which adjustments must be made to migrate from Connector SDK 0.10.x to 0.11.0." ---- - -Beginner - -:::note -Migrate directly to version 0.11.2 of the SDK. This contains a fix for several issues in the 0.11.0 release. -::: - -This SDK release is not backwards-compatible. We are moving towards a stable Connectors release and continue to improve the experience of developing custom Connectors. - -In this SDK version, we changed the `OutboundConnectorContext` and `InboundConnectorContext interfaces significantly.` You can no longer use the `getVariablesAsType` or `getPropertiesAsType` methods in outbound and inbound Connectors, respectively. -Use the new `bindVariables` method instead, as it takes care of secret replacement, payload validation, and deserialization automatically. - -We are moving away from a mandatory `@Secret` annotation. -From this release onwards, secrets are automatically replaced in all input variables/properties without the need to explicitly declare an annotation. - -To migrate your Connector implementations, complete the following: - -1. If you used the `OutboundConnectorContext::getVariablesAsType` method in you outbound Connector functions, replace it with `OutboundConnectorContext::bindVariables`. -2. If you used the `InboundConnectorContext::getPropertiesAsType` method in you inbound Connector executables, replace it with `InboundConnectorContext::bindProperties`. -3. Remove calls to `OutboundConnectorContext::replaceSecrets` and `InboundConnectorContext::replaceSecrets` methods. The secrets are now replaced automatically. -4. Remove calls to `OutboundConnectorContext::validate` and `InboundConnectorContext::validate` methods. The validation is now performed automatically. -5. If you used the `@Secret` annotation in your Connector implementations, you can safely remove it, as it has no effect. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/020-to-030.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/020-to-030.md deleted file mode 100644 index 248d7e7cc31..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/020-to-030.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: 020-to-030 -title: Update 0.2 to 0.3 -description: "Review which adjustments must be made to migrate from Connector SDK 0.2.x to 0.3.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.2.x to 0.3.0. - -:::caution - -Be aware that the update from 0.2 to 0.3 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.3.0, we introduce the following structural changes: - -- Input validation moves from Jakarta Bean Validation API version 3.0 to 2.0. -- SDK artifacts have to be in scope `provided`. - -### Update to Validation API 2.0 - -To better integrate in the current Java ecosystem and widely used frameworks like Spring 5 and Spring Boot 2, the `connector-validation` module -now operates on Jakarta Bean Validation API version 2.0 instead of version 3.0. Adjust your Connector input objects using validation as follows: - -Replace all class imports starting with `jakarta.validation` by `javax.validation`. A Connector input class on SDK 0.2.x with the following imports: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -changes to the following: - -```java -import io.camunda.connector.api.annotation.Secret; -import javax.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -This way, the Connector runtime environments are able to pick up your validations correctly. - -### Provided SDK artifacts - -The Connector runtime environments can execute multiple Connectors at once. The environments also provide the base SDK artifacts and their classes -to any Connector they execute. This comprises runtime-specific classes related to the Connector context as well as the Connector core and the validation -classes. To minimize the possibility of incompatible classes being on the same classpath, Connectors are required to depend on `connector-core` and -`connector-validation` in Maven's dependency scope `provided`. Other dependency management frameworks like Gradle offer similar scopes. - -As a result, you need to include the SDK artifacts as follows in Maven: - -```xml - - io.camunda.connector - connector-core - provided - - - io.camunda.connector - connector-validation - provided - -``` - -## Connector runtime environment - -The SDK provides a [pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that you can start manually. With version 0.3.0, this runtime moves from the [SDK repository](https://github.com/camunda/connector-sdk/tree/stable/0.2/runtime-job-worker) -to [Connector Runtime](https://github.com/camunda/connectors/blob/main/connector-runtime/README.md). This also means that the provided runtime now is -a Spring Boot application, based on Spring Zeebe. Thus, it offers all out-of-the-box capabilities Spring Zeebe provides. - -The Connector runtime JAR for manual installation can now be fetched from https://repo1.maven.org/maven2/io/camunda/spring-zeebe-connector-runtime/ -(starting with version `8.1.3`) instead of https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-job-worker/. You can start the runtime -environment with the following command: - -```bash -java -cp 'spring-zeebe-connector-runtime-VERSION-with-dependencies.jar:connector-http-json-VERSION-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` - -The Docker image is still accessible at https://hub.docker.com/r/camunda/connectors/tags. - -### Custom runtime environments - -If you are building a custom runtime environment, note the following adjustments: - -- The `runtime-util` artifact replaces the `runtime-job-worker` artifact. -- The `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` has moved to `import io.camunda.connector.runtime.util.outbound.ConnectorJobHandler`. -- The `io.camunda.connector.impl.outbound.AbstractOutboundConnectorContext` has moved to `io.camunda.connector.impl.context.AbstractConnectorContext`. -- To build your own context class, we recommend using the following signature: - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext {} -``` - -- The `SecretStore` class has been removed. Initialize your context class with a `super(SecretProvider)` call. Remove the `getSecretStore` method if you used it. - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext { - - public MyContext(final SecretProvider provider) { - super(provider); - ... - } -} -``` diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/030-to-040.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/030-to-040.md deleted file mode 100644 index 8eaec7c1a8b..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/030-to-040.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 030-to-040 -title: Update 0.3 to 0.4 -description: "Review which adjustments must be made to migrate from Connector SDK 0.3.x to 0.4.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.3.x to 0.4.0. - -:::caution - -Be aware that the update from 0.3 to 0.4 requires manual migration steps as described below. - -::: - -With SDK version 0.4.0, we introduce many basic structural changes: - -- Switching default Connector Runtime to Spring Boot/Spring Zeebe for outbound Connectors. -- Introducing webhook inbound Connector. -- Moved out-of-the-box connectors to mono-repo at https://github.com/camunda/connectors-bundle/tree/main/connectors to ease dependency management and conflict resolution. -- Build Connector bundle artifact and Docker image by Maven as default (done by adding various fat jars to one Docker image). -- Adding GCP Secret Provider used in Camunda SaaS. - -### Inbound webhook - -Spring Zeebe runtime with version `0.4.0` SDK introduces support of inbound webhook capabilities. -See the [list of available inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). - -To function properly, Spring Zeebe runtime requires connection to [Operate API](/apis-tools/operate-api/overview.md). Read more on [how to connect to Operate or disable it completely](/self-managed/connectors-deployment/connectors-configuration.md#local-installation). - -### What happens if I don't properly configure connection to Operate API? - -If you don't configure properly connection to Operate API, it will be not possible to poll process definitions from the Operate therefore the webhook functionality won't be working. -In addition to that, you may observe exception spam in your log file every 5 seconds complaining to inability to connect to Operate. -Overall, this is not critical and given there are no other issues, the connector runtime should function properly. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/040-to-050.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/040-to-050.md deleted file mode 100644 index 637cabc8899..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/040-to-050.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: 040-to-050 -title: Update 0.4 to 0.5 -description: "Review which adjustments must be made to migrate from Connector SDK 0.4.x to 0.5.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.4.x to 0.5.0. - -With SDK version 0.5.0, we introduced minor changes: - -- Removing Spring Zeebe dependency management -- Managing the GCP Secret Provider module version diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/050-to-060.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/050-to-060.md deleted file mode 100644 index 5e2bf64a928..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/050-to-060.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: 050-to-060 -title: Update 0.5 to 0.6 -description: "Review which adjustments must be made to migrate from Connector SDK 0.5.x to 0.6.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.5.x to 0.6.0. - -With SDK version 0.6.0, we introduced the following changes: - -- Replacing secrets in parent classes -- Supporting intermediate inbound events -- Defining interfaces for inbound connectors -- Fixing failing datetime serialization diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/060-to-070.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/060-to-070.md deleted file mode 100644 index fbbb78bf02c..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/060-to-070.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: 060-to-070 -title: Update 0.6 to 0.7 -description: "Review which adjustments must be made to migrate from Connector SDK 0.6.x to 0.7.0." ---- - -Beginner - -Beginner - -With the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), we made -breaking changes to the inbound Connectors. Please update -[HTTP Webhook](https://github.com/camunda/connectors/tree/main/connectors/webhook/element-templates) -and [GitHub Webhook](https://github.com/camunda/connectors/tree/main/connectors/github/element-templates) -element templates to the latest versions. - -If you have used inbound webhook Connectors with Connector Runtime 0.6.x, you need to **manually** -apply the new element template version to your diagrams: - -1. Download the new element template from the [GitHub release page](https://github.com/camunda/connectors-bundle/releases/tag/0.17.0). -2. Follow the [installation guide](/components/modeler/desktop-modeler/element-templates/configuring-templates.md) to reinstall the element template. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/070-to-080.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/070-to-080.md deleted file mode 100644 index 1145b3450fb..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/070-to-080.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 070-to-080 -title: Update 0.7 to 0.8 -description: "Review which adjustments must be made to migrate from Connector SDK 0.7.x to 0.8.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.7.x to 0.8.0. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/080-to-090.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/080-to-090.md deleted file mode 100644 index 51055c0aefc..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/080-to-090.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 080-to-090 -title: Update 0.8 to 0.9 -description: "Review which adjustments must be made to migrate from Connector SDK 0.8.x to 0.9.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.8.x to 0.9.0. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/090-to-0100.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/090-to-0100.md deleted file mode 100644 index 1e6172bb692..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/090-to-0100.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 090-to-0100 -title: Update 0.9 to 0.10 -description: "Review which adjustments must be made to migrate from Connector SDK 0.9.x to 0.10.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.9.x to 0.10.0. diff --git a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/introduction.md b/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/introduction.md deleted file mode 100644 index 034d80e7388..00000000000 --- a/versioned_docs/version-8.3/components/connectors/custom-built-connectors/update-guide/introduction.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: introduction -title: Connector SDK updates ---- - -These documents guide you through the process of updating your Camunda 8 -Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). - -There is a dedicated update guide for each version: - -### [Connector SDK 0.10.x to 0.11](../0100-to-0110) - -Update from 0.10.x to 0.11.2 - -### [Connector SDK 0.9 to 0.10](../090-to-0100) - -Update from 0.9.x to 0.10.0 - -### [Connector SDK 0.8 to 0.9](../080-to-090) - -Update from 0.8.x to 0.9.0 - -### [Connector SDK 0.7 to 0.8](../070-to-080) - -Update from 0.7.x to 0.8.0 - -### [Connector SDK 0.6 to 0.7](../060-to-070) - -Update from 0.6.x to 0.7.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.7.0) - -### [Connector SDK 0.5 to 0.6](../050-to-060) - -Update from 0.5.x to 0.6.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.6.0) - -### [Connector SDK 0.4 to 0.5](../040-to-050) - -Update from 0.4.x to 0.5.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.5.0) - -### [Connector SDK 0.3 to 0.4](../030-to-040) - -Update from 0.3.x to 0.4.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.4.0) - -### [Connector SDK 0.2 to 0.3](../020-to-030) - -Update from 0.2.x to 0.3.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.3.0) - -### [Connector SDK 0.1 to 0.2](../010-to-020) - -Update from 0.1.x to 0.2.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.2.0) diff --git a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md index b993f28802a..cead64b6577 100644 --- a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md +++ b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md @@ -103,10 +103,6 @@ There are two options to authenticate the Connector with AWS: The **Amazon EventBridge Webhook Connector** is an inbound Connector enabling you to start a BPMN process instance triggered by an event from [Amazon EventBridge](https://aws.amazon.com/eventbridge/). -:::note -If you have used the **Amazon EventBridge Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an Amazon EventBridge Webhook Connector task 1. Start building your BPMN diagram. You can use the **Amazon EventBridge Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/github.md b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/github.md index dddc9dd5592..adc5dbcf22e 100644 --- a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/github.md +++ b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/github.md @@ -226,12 +226,6 @@ handling response is still applicable [as described](/components/connectors/prot The **GitHub Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -:::note -If you have used the GitHub Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a GitHub Webhook Connector task 1. Start building your BPMN diagram. You can use GitHub Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. diff --git a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/slack.md b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/slack.md index aac3fb98076..6e70552e4e8 100644 --- a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/slack.md +++ b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/slack.md @@ -31,7 +31,7 @@ To make the **Slack Connector** executable, fill out the mandatory fields highli ### Authentication -Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, '{{secrets.SLACK_OAUTH_TOKEN}}'. +Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, `{{secrets.SLACK_OAUTH_TOKEN}}`. ### Create channel diff --git a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/twilio.md b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/twilio.md index e75e7a6707c..75a7c8af987 100644 --- a/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/twilio.md +++ b/versioned_docs/version-8.3/components/connectors/out-of-the-box-connectors/twilio.md @@ -177,10 +177,6 @@ To learn more about implementing retry and error handling logic in your BPMN dia The **Twilio Webhook Connector** is an inbound Connector that enables you to start a BPMN process instance triggered by a [Twilio event](https://www.twilio.com/docs/usage/webhooks). -:::note -If you have used the **Twilio Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a Twilio Webhook Connector task 1. Start building your BPMN diagram. You can use the **Twilio Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.3/components/connectors/protocol/http-webhook.md b/versioned_docs/version-8.3/components/connectors/protocol/http-webhook.md index 4328e5ff9ba..c9a4e1aca99 100644 --- a/versioned_docs/version-8.3/components/connectors/protocol/http-webhook.md +++ b/versioned_docs/version-8.3/components/connectors/protocol/http-webhook.md @@ -7,12 +7,6 @@ description: Start a process instance with your custom webhook configuration, tr The **HTTP Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by external HTTP call. -:::note -If you have used the HTTP Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an HTTP Webhook Connector event 1. Start building your BPMN diagram. You can use HTTP Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. @@ -83,7 +77,7 @@ Please refer to the [update guide](/components/connectors/custom-built-connector - Set the **API Key** property to the expected value of the API key. - Set the **API Key locator** property that will be evaluated against the incoming request to extract the API key. [See the example](#how-to-configure-api-key-authorization). -- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer {JWT_TOKEN}. +- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer `{JWT_TOKEN}`. - Set JWK URL which is used as a well-known public URL to fetch the [JWKs](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets). - Set JWT role property expression which will be evaluated against the content of the JWT to extract the list of roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). diff --git a/versioned_docs/version-8.3/components/connectors/protocol/rest.md b/versioned_docs/version-8.3/components/connectors/protocol/rest.md index 5683db47a4f..a5be89a1fcb 100644 --- a/versioned_docs/version-8.3/components/connectors/protocol/rest.md +++ b/versioned_docs/version-8.3/components/connectors/protocol/rest.md @@ -55,7 +55,7 @@ Select the **REST Connector** and fill out the following properties under the ** - **Headers**: The API key will be included in the request headers. 3. Specify your API key details: - **API key name**: Enter the parameter name expected by the API (e.g., apiKey). - - **API key value**: Reference the secret you created for your API key (e.g., {{secrets.REST_API_KEY_SECRET}}). + - **API key value**: Reference the secret you created for your API key (e.g., `{{secrets.REST_API_KEY_SECRET}}`). ### REST Connector (Basic) diff --git a/versioned_docs/version-8.3/components/console/manage-plan/migrate-from-prof-to-starter.md b/versioned_docs/version-8.3/components/console/manage-plan/migrate-from-prof-to-starter.md index 1fdcd43ec98..8f3d548da9b 100644 --- a/versioned_docs/version-8.3/components/console/manage-plan/migrate-from-prof-to-starter.md +++ b/versioned_docs/version-8.3/components/console/manage-plan/migrate-from-prof-to-starter.md @@ -11,7 +11,7 @@ Here are a few important remarks to consider before completing the migration ste - Since the two plans have different types of clusters included and fees for those, we recommend comparing the [Professional plan](https://camunda.com/blog/2023/05/camunda-professional-edition-accelerate-projects/) with the [Starter plan](https://camunda.com/blog/2023/09/camunda-starter/) to [understand your monthly costs](https://camunda.com/pricing/starter-plan-price-calculator/) before the migration. - General users and development/production cluster reservations in the Professional plan are migrated “as is” to the Starter plan, which may result in overage costs (e.g. production clusters in Professional will be transferred to production clusters in the Starter plan). If you are not using your production cluster in the Professional plan, we recommend you delete it beforehand and create a new development cluster in the Starter plan afterward. - Once you have edited the plan below, the changes will take effect on the first day of your next subscription period. -- If you have any questions, do not hesitate to [contact us](https://camunda.com/contact/). +- If you have any questions, do not hesitate to [contact us](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.3/components/modeler/bpmn/call-activities/call-activities.md b/versioned_docs/version-8.3/components/modeler/bpmn/call-activities/call-activities.md index fa612a2eab7..0cdba794a12 100644 --- a/versioned_docs/version-8.3/components/modeler/bpmn/call-activities/call-activities.md +++ b/versioned_docs/version-8.3/components/modeler/bpmn/call-activities/call-activities.md @@ -32,10 +32,6 @@ When a non-interrupting boundary event is triggered, the created process instanc ## Variable mappings -By default, all variables of the call activity scope are copied to the created process instance. This can be limited to copying only the local variables of the call activity, by setting the attribute `propagateAllParentVariables` to `false`. - -By disabling this attribute, variables existing at higher scopes are no longer copied. If the attribute `propagateAllParentVariables` is set (default: `true`), all variables are propagated to the child process instance. - Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. diff --git a/versioned_docs/version-8.3/components/modeler/desktop-modeler/telemetry/telemetry.md b/versioned_docs/version-8.3/components/modeler/desktop-modeler/telemetry/telemetry.md index 36a5446325b..382a2e2f72d 100644 --- a/versioned_docs/version-8.3/components/modeler/desktop-modeler/telemetry/telemetry.md +++ b/versioned_docs/version-8.3/components/modeler/desktop-modeler/telemetry/telemetry.md @@ -54,8 +54,8 @@ These events include the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> ### Deployment and start instance events @@ -68,8 +68,8 @@ The `Deployment Event` and `Start Instance` have the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the event of an unsuccessful deployment, an `error` property will be present in the payload containing an error code. diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-button.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-button.md index 577b3c0615e..e3614b67505 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-button.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-button.md @@ -6,7 +6,7 @@ description: A form element to trigger form actions A button allowing the user to trigger form actions. -![Form Button Symbol](/img/form-icons/form-button.svg) +Form Button Symbol ### Configurable properties @@ -15,4 +15,4 @@ A button allowing the user to trigger form actions. - **Submit**: Submit the form (given there are no validation errors). - **Reset**: Reset the form, all user inputs will be lost. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the button. -- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checkbox.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checkbox.md index 834a18cfa63..9546284e2d4 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checkbox.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checkbox.md @@ -6,7 +6,7 @@ description: A form element to read and edit boolean data A checkbox allowing the user to read and edit boolean data. -![Form Checkbox Symbol](/img/form-icons/form-checkbox.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A checkbox allowing the user to read and edit boolean data. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checklist.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checklist.md index 836f3f61e28..80082d7fc28 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checklist.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-checklist.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A set of checkboxes providing data multi-selection for small datasets. -![Form Checklist Symbol](/img/form-icons/form-checklist.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A set of checkboxes providing data multi-selection for small datasets. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checklist. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checklist must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-datetime.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-datetime.md index aeb127128dc..c02c2912925 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-datetime.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-datetime.md @@ -6,7 +6,7 @@ description: Learn about the datetime form element to read and edit date and tim A component allowing the user to read and edit date and time data. -![Form Datetime Symbol](/img/form-icons/form-datetime.svg) +Form Datetime Symbol ## Configurable properties @@ -19,7 +19,7 @@ A component allowing the user to read and edit date and time data. - **Read only**: Makes the datetime component read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the datetime component, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the datetime component. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Time format**: Defines the time data format. This can either be **UTC offset**, **UTC normalized**, or **No timezone**. - **Time interval**: Defines the steps of time that can be selected in the time input field. - **Disallow past dates**: Enables the restriction to not allow past dates. diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-group.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-group.md index be0ae7f87b0..5bdd41712a5 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-group.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-group.md @@ -6,7 +6,7 @@ description: Learn about the group form element to group multiple form elements The group element serves as a container to group various form elements together. It allows for nesting of fields and assists in organizing complex forms. -![Form Group Symbol](/img/form-icons/form-group.svg) +Form Group Symbol ### Configurable properties @@ -14,7 +14,7 @@ The group element serves as a container to group various form elements together. - **Path**: Assigns a path that maps its children into a data object, may be left empty, defined as a variable name or a dot separated variable accessor. See the [data binding docs](../configuration/forms-config-data-binding.md) for more details. - **Show outline**: Can be toggled on and off to display a separating outline around the group - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Usage diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-image.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-image.md index 61e461f7213..33404c3eed3 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-image.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-image.md @@ -6,11 +6,11 @@ description: Learn about the image view form element to display an image. An element allowing the user to display images. -![Form Image Symbol](/img/form-icons/form-image.svg) +Form Image Symbol ## Configurable properties - **Image source**: Specifies the image source via [expression](../../feel/language-guide/feel-expressions-introduction.md), [templating syntax](../configuration/forms-config-templating-syntax.md) or [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (hyperlink or data URI). - **Alternative text**: Provides an alternative text to the image in case it cannot be displayed. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the image. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-number.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-number.md index 6e9d108ca19..902253e3642 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-number.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-number.md @@ -6,7 +6,7 @@ description: A form element to read and edit numeric data A number field allowing the user to read and edit numeric data. -![Form Number Symbol](/img/form-icons/form-number.svg) +Form Number Symbol ### Configurable properties @@ -19,7 +19,7 @@ A number field allowing the user to read and edit numeric data. - **Read only**: Makes the number field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the number field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the number. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Serialize to string**: Configures the output format of the datetime value. This enables unlimited precision digits. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Number field must contain a value. diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-radio.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-radio.md index ec4630789ce..bdb2c430ef9 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-radio.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-radio.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A radio button allowing the user to select one of multiple data option for small datasets. -![Form Radio Symbol](/img/form-icons/form-radio.svg) +Form Radio Symbol ### Configurable properties @@ -18,7 +18,7 @@ A radio button allowing the user to select one of multiple data option for small - **Disabled**: Disables the radio component, for use during development. - **Options source**: Radio components can be configured with an options source defining the individual choices the component provides, refer to [options source docs](../configuration/forms-config-options.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the radio. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One radio option must be selected. diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-select.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-select.md index 17ae7dd2ce0..102a401ff77 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-select.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-select.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A Select dropdown allowing the user to select one of multiple data option from larger datasets. -![Form Select Symbol](/img/form-icons/form-select.svg) +Form Select Symbol ### Configurable properties @@ -18,7 +18,7 @@ A Select dropdown allowing the user to select one of multiple data option from l - **Read only**: Makes the select read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the select, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the select. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Options source**: Selects can be configured with an options source defining the individual choices the select provides, refer to [options source docs](../configuration/forms-config-options.md). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One select entry must be selected. diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-spacer.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-spacer.md index 22043da492b..a0ee765451d 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-spacer.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-spacer.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add vertical space between eleme A **spacer** element is used to create a defined amount of vertical space between two elements. -![Form Spacer Symbol](/img/form-icons/form-spacer.svg) +Form Separator Symbol ## Configurable properties diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-taglist.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-taglist.md index 97411ce8db5..e4dd64059ea 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-taglist.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-taglist.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A complex and searchable tag based component providing multi-selection for large datasets. -![Form Taglist Symbol](/img/form-icons/form-taglist.svg) +Form Taglist Symbol ### Configurable properties @@ -14,7 +14,7 @@ A complex and searchable tag based component providing multi-selection for large - **Field description**: Description provided below the taglist. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Key**: Binds the field to a form variable, refer to [data binding docs](../configuration/forms-config-data-binding.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the taglist. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Taglist must contain a value. - **Options source**: Taglists can be configured with an options source defining the individual choices your user can make, refer to [options source docs](../configuration/forms-config-options.md). diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-text.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-text.md index 4da6171d127..bcad868c71e 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-text.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-text.md @@ -6,13 +6,13 @@ description: A form element to display static information. A text component allowing to display static information to the user. -![Form Text Symbol](/img/form-icons/form-text.svg) +Form Text Symbol ## Configurable properties - **Text**: Either an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). After evaluation, the result is processed using a Markdown renderer that supports basic HTML and [GitHub-flavored Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). To ensure safety and prevent cross-site scripting in Camunda Forms, potentially harmful HTML elements will not be rendered. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Example text configurations diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textarea.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textarea.md index d851a970a92..2e66e4d0712 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textarea.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textarea.md @@ -6,7 +6,7 @@ description: Learn about the text area form element to read and edit multiline t A text area allowing the user to read and edit multiline textual data. -![Form Textarea Symbol](/img/form-icons/form-textArea.svg) +Form Textarea Symbol ## Configurable properties @@ -17,7 +17,7 @@ A text area allowing the user to read and edit multiline textual data. - **Read only**: Makes the text area read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text area; for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text area. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text area must contain a value. - **Minimum length**: Text area must have at least `n` characters. diff --git a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textfield.md b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textfield.md index 1aafa0e824f..da45e37b3c9 100644 --- a/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textfield.md +++ b/versioned_docs/version-8.3/components/modeler/forms/form-element-library/forms-element-library-textfield.md @@ -6,7 +6,7 @@ description: A form element to read and edit textual data A text field allowing the user to read and edit textual data. -![Form Text Field Symbol](/img/form-icons/form-textField.svg) +Form Text Field Symbol ### Configurable properties @@ -17,7 +17,7 @@ A text field allowing the user to read and edit textual data. - **Read only**: Makes the text field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text field. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text field must contain a value. - **Regular expression validation**: Use predefined validation patterns. Available options are: `Email`, `Phone`, and `Custom`. diff --git a/versioned_docs/version-8.3/components/modeler/web-modeler/camunda-marketplace.md b/versioned_docs/version-8.3/components/modeler/web-modeler/camunda-marketplace.md index 9b026b9d57a..5fee9c721d8 100644 --- a/versioned_docs/version-8.3/components/modeler/web-modeler/camunda-marketplace.md +++ b/versioned_docs/version-8.3/components/modeler/web-modeler/camunda-marketplace.md @@ -14,6 +14,10 @@ The Camunda Marketplace can be accessed via your [browser](https://marketplace.c ## Visit the Camunda Marketplace +:::note +Connectors created by partners or the community are not part of the commercial Camunda product. Camunda does not support these Connectors as part of its commercial services to enterprise customers. Please evaluate each client to make sure it meets your requirements before using. +::: + To navigate to the Camunda Marketplace, take the following steps: 1. Log in to your Camunda account, and navigate to Web Modeler using the **Camunda components** icon in the top left corner of your console. Click **Modeler**. diff --git a/versioned_docs/version-8.3/components/zeebe/zeebe-overview.md b/versioned_docs/version-8.3/components/zeebe/zeebe-overview.md index 85bbfa8deb7..d6a2492fed5 100644 --- a/versioned_docs/version-8.3/components/zeebe/zeebe-overview.md +++ b/versioned_docs/version-8.3/components/zeebe/zeebe-overview.md @@ -20,12 +20,6 @@ With Zeebe you can: For documentation on deploying Zeebe as part of Camunda 8 Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/zeebe-installation.md). -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda 8 Starter or Camunda 8 Enterprise. Customers can choose either plan based on their process automation requirements. Camunda 8 Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda 8, you can always find support through the [community](/contact/). - ## Next steps - Get familiar with [technical concepts](technical-concepts/technical-concepts-overview.md). diff --git a/versioned_docs/version-8.3/guides/getting-started-orchestrate-microservices.md b/versioned_docs/version-8.3/guides/getting-started-orchestrate-microservices.md index 9c4ca395f39..6164a6eddc5 100644 --- a/versioned_docs/version-8.3/guides/getting-started-orchestrate-microservices.md +++ b/versioned_docs/version-8.3/guides/getting-started-orchestrate-microservices.md @@ -19,7 +19,7 @@ While this guide uses code snippets in Java, you do not need to be a Java develo ## Prerequisites - Ensure you have a valid [Camunda 8 account](create-account.md), or sign up if you still need one. -- Java >= 8 +- Java ≥ 8 - Maven - IDE (IntelliJ, VSCode, or similar) - Download and unzip or clone the [repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java` diff --git a/versioned_docs/version-8.3/guides/migrating-from-camunda-7/index.md b/versioned_docs/version-8.3/guides/migrating-from-camunda-7/index.md index 000e09dd12f..39764a6da25 100644 --- a/versioned_docs/version-8.3/guides/migrating-from-camunda-7/index.md +++ b/versioned_docs/version-8.3/guides/migrating-from-camunda-7/index.md @@ -53,4 +53,4 @@ As described earlier in this guide, migration is an ongoing topic and this guide - Discuss workload migrations (operations) - Eventual consistency -[Reach out to us](/contact/) to discuss your specific migration use case. +[Reach out to us](/reference/contact.md) to discuss your specific migration use case. diff --git a/versioned_docs/version-8.3/reference/alpha-features.md b/versioned_docs/version-8.3/reference/alpha-features.md index 8a65e9ee0fa..c8229dff48c 100644 --- a/versioned_docs/version-8.3/reference/alpha-features.md +++ b/versioned_docs/version-8.3/reference/alpha-features.md @@ -22,7 +22,7 @@ Limitations of alpha features and components include: - Not necessarily feature-complete. - Might lack full documentation. - No guaranteed updates to newer releases. -- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://docs.camunda.org/enterprise/support/). +- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://camunda.com/services/enterprise-support-guide/). - No maintenance service. - (SaaS) No availability targets. - Released outside the standard [release policy](release-policy.md). @@ -32,7 +32,7 @@ To learn more about using alpha features, see [enabling alpha features](/compone :::note - Alpha features can also be included in a minor version (stable) release. -- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/contact). +- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/reference/contact.md). ::: @@ -43,7 +43,7 @@ Once features and components are released and considered stable, they become gen Stable features and components are: - Ready for production use for most users with minimal risk. -- Supported by [L1 Priority-level support](https://docs.camunda.org/enterprise/support/#priority-level) for production use. +- Supported by [L1 Priority-level support](https://camunda.com/services/enterprise-support-guide/) for production use. - Fully documented. A release or component is considered stable if it has passed all verification and test stages and can be released to production. diff --git a/versioned_docs/version-8.3/reference/contact.md b/versioned_docs/version-8.3/reference/contact.md new file mode 100644 index 00000000000..4e6c7fc3892 --- /dev/null +++ b/versioned_docs/version-8.3/reference/contact.md @@ -0,0 +1,45 @@ +--- +id: contact +title: Contact +description: Contact Camunda, submit feedback, find support using the Camunda community forum, note bug reports and feature requests, and review security notices. +keywords: + [ + support, + contact-us, + get-support, + help, + need-help, + bug, + bug-report, + feature-request, + issue, + enterprise-support, + ] +--- + +There are a few different channels you can reach us based on your needs: + +- We encourage everyone to participate in our **community** via the [Camunda community forum](https://forum.camunda.io/), where you can exchange ideas with other Camunda users, as well as Camunda employees. For all other Camunda community programs and resources, visit our [Camunda Developer Hub](https://camunda.com/developers). + +- We welcome your **bug** reports and **feature requests** through our community channels mentioned above. + +- For **security-related issues**, review our [security notices](/reference/notices.md) for the most up-to-date information on known issues and steps to report a vulnerability so we can solve the problem as quickly as possible. Do not use GitHub for security-related issues. + +- **Feedback and support** can be submitted or requested via JIRA by following our [Enterprise support process](https://camunda.com/services/enterprise-support-guide/). All users can also find feedback and support options in the Help Center or [Camunda community forum](https://forum.camunda.io/). + +- For sales inquiries, information about Camunda 8 performance and benchmarking, or anything not listed above, use our [Contact Us](https://camunda.com/contact/) form. + +## Locating Camunda 8 credentials + +Need assistance locating your Camunda 8 credentials? You can obtain these credentials from Camunda by submitting a **Help Request**. To do this, take the following steps: + +1. Log in to [Jira](https://jira.camunda.com/secure/Dashboard.jspa). +2. Click **Create** in the navigation bar at the top of the page. This launches a **Create Issue** pop-up. +3. In the **Issue Type** field, select **Help Request**. +4. In the **Help Request Type** field, click the option that reads **I need the credentials for downloading Camunda**. +5. In the **Summary** and **Description** fields, **I need the credentials for downloading Camunda** will populate by default. + ![completed help request example](./img/create-issue-request.png) +6. (Optional) Add more details, such as the priority level or authorized support contacts. +7. Click **Create** at the bottom of the pop-up **Create Issue** box. + +After completing these steps, your request is generated. Find additional details on submitting a self-service help request [here](https://camunda.com/services/enterprise-support-guide/). diff --git a/versioned_docs/version-8.3/reference/img/create-issue-request.png b/versioned_docs/version-8.3/reference/img/create-issue-request.png new file mode 100644 index 00000000000..374fdfece6f Binary files /dev/null and b/versioned_docs/version-8.3/reference/img/create-issue-request.png differ diff --git a/versioned_docs/version-8.3/reference/notices.md b/versioned_docs/version-8.3/reference/notices.md index ccec31dc756..71fd93e0cfc 100644 --- a/versioned_docs/version-8.3/reference/notices.md +++ b/versioned_docs/version-8.3/reference/notices.md @@ -74,11 +74,11 @@ Tasklist The REST API functionality of Tasklist 8.2.0 and 8.2.1 allows unauthenticated access to the following methods/URLs: -- GET /v1/tasks/{taskId} +- GET /v1/tasks/\{taskId} - POST /v1/tasks/search -- POST /v1/tasks/{taskId}/variables/search -- POST /v1/forms/{formId} -- POST /v1/variables/{variableId} +- POST /v1/tasks/\{taskId}/variables/search +- POST /v1/forms/\{formId} +- POST /v1/variables/\{variableId} Find more information about the methods in our [Tasklist REST API documentation](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). @@ -115,7 +115,7 @@ At this point, Camunda is not aware of any specific attack vector in Tasklist al #### How to determine if the installation is affected -You are Tasklist version (8.0.3 >= version <= 8.0.7) or <= 8.1.2 +You are Tasklist version (8.0.3 ≥ version ≤ 8.0.7) or ≤ 8.1.2 #### Solution @@ -142,7 +142,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.11 or ≤ 1.3.6 #### Solution @@ -168,7 +168,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.8 or ≤ 1.1.9 #### Solution @@ -194,7 +194,7 @@ Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bu #### How to determine if the installation is affected -You are using IAM version <= 1.2.8 +You are using IAM version ≤ 1.2.8 #### Solution @@ -219,7 +219,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.7 or ≤ 1.1.8 #### Solution @@ -248,7 +248,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.7 +You are using IAM version ≤ 1.2.7 #### Solution @@ -273,7 +273,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.6 or ≤ 1.1.7 #### Solution @@ -302,7 +302,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.6 +You are using IAM version ≤ 1.2.6 #### Solution @@ -327,7 +327,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.5 or ≤ 1.1.6 #### Solution @@ -357,7 +357,7 @@ Still, Camunda recommends applying fixes as mentioned in the Solution section be #### How to determine if the installation is affected -You are using IAM version <= 1.2.5 +You are using IAM version ≤ 1.2.5 #### Solution diff --git a/versioned_docs/version-8.3/reference/regions.md b/versioned_docs/version-8.3/reference/regions.md index c3104ed7238..6d373f5a793 100644 --- a/versioned_docs/version-8.3/reference/regions.md +++ b/versioned_docs/version-8.3/reference/regions.md @@ -9,7 +9,7 @@ When you create a cluster in Camunda 8 SaaS, you must specify a region for that Currently, we make these regions available for customers on the Trial, Starter, and Enterprise Plans. Enterprise customers can discuss custom regions with their Customer Success Manager. :::note -Our Console and Web Modeler components are currently hosted in the EU. [Contact us](https://camunda.com/contact/) if you have additional questions. +Our Console and Web Modeler components are currently hosted in the EU. [Contact us](/reference/contact.md) if you have additional questions. ::: Below, find a list of regions currently supported in Camunda 8 SaaS. @@ -19,6 +19,7 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. - Belgium, Europe (europe-west1) - Iowa, North America (us-central1) - London, Europe (europe-west2) +- Singapore, Asia (asia-southeast1) - South Carolina, North America (us-east1) - Sydney, Australia (australia-southeast1) - Toronto, North America (northamerica-northeast2) @@ -26,5 +27,5 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. You can find the locations behind the region codes [on the Google page](https://cloud.google.com/about/locations). :::note -Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](https://camunda.com/contact/) as we are able to make additional regions available on request. +Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](/reference/contact.md) as we are able to make additional regions available on request. ::: diff --git a/versioned_docs/version-8.3/reference/status.md b/versioned_docs/version-8.3/reference/status.md index c8de779c5d2..ecb840a23ab 100644 --- a/versioned_docs/version-8.3/reference/status.md +++ b/versioned_docs/version-8.3/reference/status.md @@ -21,4 +21,4 @@ To receive service status updates: ## Support -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/contact). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). +Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/reference/contact.md). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). diff --git a/versioned_docs/version-8.3/reference/supported-environments.md b/versioned_docs/version-8.3/reference/supported-environments.md index 73486e67243..e6094772fa8 100644 --- a/versioned_docs/version-8.3/reference/supported-environments.md +++ b/versioned_docs/version-8.3/reference/supported-environments.md @@ -8,7 +8,7 @@ The supported environments page lists browsers, operating systems, clients, depl **If the particular technology is not listed, we cannot resolve issues caused by the usage of that unlisted technology.** -You may [raise a feature request](/contact) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/contact) to work with Consulting services. +You may [raise a feature request](/reference/contact.md) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/reference/contact.md) to work with Consulting services. Recommendations are denoted with [recommended], however, other options are supported as well. diff --git a/versioned_docs/version-8.3/self-managed/concepts/multi-tenancy.md b/versioned_docs/version-8.3/self-managed/concepts/multi-tenancy.md index 89f040b73ee..d50a7f08c5e 100644 --- a/versioned_docs/version-8.3/self-managed/concepts/multi-tenancy.md +++ b/versioned_docs/version-8.3/self-managed/concepts/multi-tenancy.md @@ -21,7 +21,7 @@ Multi-tenancy is currently only available for Camunda 8 Self-Managed with authen ## Multi-tenancy in Camunda 8 -Multi-tenancy in the context of Camunda 8 refers to the ability of the Camunda 8 platform to serve multiple distinct +Multi-tenancy in the context of Camunda 8 refers to the ability of the Camunda platform to serve multiple distinct [tenants](/self-managed/identity/user-guide/tenants/managing-tenants.md) or clients within a single installation. Multi-tenancy in Camunda 8 extends these capabilities to cater to the needs of different departments, teams, or even external clients, all within a shared Camunda environment. Here's a closer look at what multi-tenancy is in Camunda 8: diff --git a/versioned_docs/version-8.3/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md b/versioned_docs/version-8.3/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md index a7ea08ab9fb..513a05bc808 100644 --- a/versioned_docs/version-8.3/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md +++ b/versioned_docs/version-8.3/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md @@ -5,16 +5,27 @@ sidebar_label: "Connect to an existing Keycloak instance" description: "Learn how to connect Identity to your existing Keycloak instance." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + In this guide, we'll demonstrate how to connect Identity to your existing Keycloak instance. ## Prerequisites -- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/22.0.1/server_admin/#using-the-admin-console) -- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/22.0.1/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak. +- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/23.0.1/server_admin/#using-the-admin-console) +- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak ## Steps -To connect Identity to an existing Keycloak instance, take the following steps: +To connect Identity to an existing Keycloak instance, take the following steps for your Camunda installation: + + + + 1. Log in to your Keycloak Admin Console. 2. Select the realm you would like to connect Identity to. In our example, this is **camunda-platform**. @@ -22,7 +33,7 @@ To connect Identity to an existing Keycloak instance, take the following steps: 3. Select **Clients** in the navigation menu, and click the **Create** button to create a new client. 4. Enter a client ID and click **Next**. :::note What client ID should I use? - By default, Identity uses the Client ID `camunda-identity`, so we recommend using this too. If you choose a different client ID, this will need to be set in the Identity application [environment variables](/docs/self-managed/identity/deployment/configuration-variables.md). + By default, Identity uses the Client ID `camunda-identity`, so we recommend using this too. If you choose a different client ID, this will need to be set in the Identity application [environment variables](/self-managed/identity/deployment/configuration-variables.md). ::: ![keycloak-admin-client-add-1](../img/keycloak-admin-client-add-1.png) 5. Toggle **Client authentication** to `on`, select **Service accounts roles** and click **Next**. @@ -38,16 +49,28 @@ To connect Identity to an existing Keycloak instance, take the following steps: Identity is designed to allow users to manage the various entities related to Camunda. To achieve this, it requires specific access to the realm. ::: 10. Navigate to the **Credentials** tab and copy the client secret. -11. Set the `IDENTITY_CLIENT_SECRET` [environment variable](/docs/self-managed/identity/deployment/configuration-variables.md) with the value from **Step 9**. -12. Set the `KEYCLOAK_REALM` [environment variable](/docs/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +11. Set the `IDENTITY_CLIENT_SECRET` [environment variable](/self-managed/identity/deployment/configuration-variables.md) with the value from **Step 9**. +12. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. :::tip If you are using a specific realm, you need to set additional variables to use the intended realm. - See the [environment variables](/docs/self-managed/identity/deployment/configuration-variables.md) page for details of Keycloak-specific variables to consider. + See the [environment variables](/self-managed/identity/deployment/configuration-variables.md) page for details of Keycloak-specific variables to consider. ::: 13. Start Identity. + + + +1. Log in to your Keycloak Admin Console. +2. Verify the name of the realm you would like to connect Identity to. In our example, this is **camunda-platform**. + ![keycloak-admin-realm-select](../img/keycloak-admin-realm-select.png) +3. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +4. Start Identity. + + + + :::note What does Identity create when starting? -Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/docs/self-managed/identity/deployment/starting-configuration.md). +Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/self-managed/identity/deployment/starting-configuration.md). ::: ## Considerations diff --git a/versioned_docs/version-8.3/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md b/versioned_docs/version-8.3/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md index f39ada6f300..4d2c0a73a62 100644 --- a/versioned_docs/version-8.3/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md +++ b/versioned_docs/version-8.3/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md @@ -4,6 +4,9 @@ title: Deploy diagram description: "Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed. Follow the steps below to deploy a diagram: 1. Click the rocket-shaped deployment icon: @@ -30,20 +33,40 @@ Multi-tenancy is only available with authentication enabled [through Identity](/ ![deployment via Camunda 8](./img/deploy-endpoint.png) -4. Select **Basic**, and input your username and password in case your gateway requires basic authentication: +4. Select your authentication method, and input the required credentials: + + + + + +For **basic authentication**, input your username and password: ![basic auth configuration](./img/deploy-with-basic-auth.png) -5. Select **OAuth**, and input the credentials in case your gateway requires authentication with OAuth: + -:::note -The OAuth URL needs to contain the full path to the token endpoint, i.e. `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. -::: + + +For **OAuth**, input the credentials for your OAuth provider. These are configured as part of the default [Helm installation](/self-managed/platform-deployment/helm-kubernetes/deploy.md) and can be discovered in [Identity](/self-managed/identity/what-is-identity.md), or are set by Zeebe [environment variables](/self-managed/zeebe-deployment/security/client-authorization.md#environment-variables). ![oauth configuration](./img/deploy-with-oauth.png) -6. Select the **Remember** checkbox if you want to locally store the connection information. +| Name | Description | Example value | +| --------------- | ------------------------------------ | ----------------------------------------------------------------------------------------- | +| Client ID | The name of your Zeebe client. | `zeebe` | +| Client secret | The password of your Zeebe client. | `zecret` | +| OAuth token url | The full path to the token endpoint. | `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. | +| OAuth audience | The permission name for Zeebe. | `zeebe-api` | + + + + +5. Select the **Remember** checkbox if you want to locally store the connection information. -7. Click **Deploy** to perform the deployment. +6. Click **Deploy** to perform the deployment. ![deployment successful](./img/deploy-success.png) diff --git a/versioned_docs/version-8.3/self-managed/operate-deployment/importer-and-archiver.md b/versioned_docs/version-8.3/self-managed/operate-deployment/importer-and-archiver.md index 04cad62c780..60e561d9c62 100644 --- a/versioned_docs/version-8.3/self-managed/operate-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.3/self-managed/operate-deployment/importer-and-archiver.md @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md b/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md index bae5f4f7250..e2a9e2d3bb2 100644 --- a/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md +++ b/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md @@ -5,6 +5,9 @@ description: "How to perform a backup and restore of Operate and Tasklist data." keywords: ["backup", "backups"] --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + :::note This release introduces breaking changes, including: @@ -37,33 +40,68 @@ The backup API can be reached via the Actuator management port, which by default Before you can use the backup and restore feature: 1. The [Elasticsearch snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html) must be configured. -2. Operate and Tasklist must be configured with the repository name using the following configuration parameters: +2. Operate and Tasklist must be configured with the repository name using one of the following configuration options: + + + + + +#### Operate ```yaml -for Operate: camunda: operate: backup: repositoryName: +``` + + + + + +#### Operate + +``` +CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= +``` + + + + +#### Tasklist -for Tasklist: + + + + +```yaml camunda: tasklist: backup: repositoryName: ``` -or with environmental variables: + -``` -for Operate: -CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= + -for Tasklist: +``` CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME= - ``` + + + ## Create backup API During backup creation Operate can continue running. To create the backup, call the following endpoint: diff --git a/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/optimize-backup.md b/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/optimize-backup.md index c694ff53341..823de9ef51a 100644 --- a/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/optimize-backup.md +++ b/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/optimize-backup.md @@ -25,7 +25,7 @@ Optimize provides an API to trigger a backup and retrieve information about a gi The following prerequisites must be set up before using the backup API: 1. A snapshot repository of your choice must be registered with Elasticsearch. -2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize configuration: +2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize [`environment-config.yaml`]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/): ```yaml backup: @@ -58,7 +58,7 @@ POST actuator/backups ### Example request -``` +```shell curl --request POST 'http://localhost:8092/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123456 }' @@ -101,8 +101,8 @@ GET actuator/backup ### Example request -``` -curl ---request GET 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request GET 'http://localhost:8092/actuator/backups/123456' ``` ### Example response @@ -161,8 +161,8 @@ DELETE actuator/backups/{backupId} ### Example request -``` -curl ---request DELETE 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request DELETE 'http://localhost:8092/actuator/backups/123456' ``` ## Restore backup @@ -184,6 +184,6 @@ To restore a given backup, the following steps must be performed: Example Elasticsearch request: -``` +```shell curl --request POST `http://localhost:9200/_snapshot/repository_name/camunda_optimize_123456_3.9.0_part_1_of_2/_restore?wait_for_completion=true` ``` diff --git a/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md b/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md index 49f49fa2799..de878ae6559 100644 --- a/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md +++ b/versioned_docs/version-8.3/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md @@ -26,7 +26,7 @@ Even when the underlying storage bucket is the same, backups from one are not co ### S3 backup store -To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket: +To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -83,7 +83,7 @@ zeebe.broker.data.backup.s3.compression: zstd # or use environment variable ZEEB ### GCS backup store -To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use: +To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -138,7 +138,7 @@ The `backupId` cannot be reused, even if the backup corresponding to the backup

    Example request -``` +```shell curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": "100" }' @@ -181,7 +181,7 @@ GET actuator/backups/{backupId}
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups/100' ``` @@ -254,7 +254,7 @@ GET actuator/backups
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups' ``` @@ -332,7 +332,7 @@ DELETE actuator/backups/{backupId}
    Example request -``` +```shell curl --request DELETE 'http://localhost:9600/actuator/backups/100' ``` diff --git a/versioned_docs/version-8.3/self-managed/operational-guides/troubleshooting/log-levels.md b/versioned_docs/version-8.3/self-managed/operational-guides/troubleshooting/log-levels.md index f5423bb4a8b..365aa71fd33 100644 --- a/versioned_docs/version-8.3/self-managed/operational-guides/troubleshooting/log-levels.md +++ b/versioned_docs/version-8.3/self-managed/operational-guides/troubleshooting/log-levels.md @@ -24,3 +24,4 @@ Enable logging for each component of Camunda 8 using the following instructions: - [Operate](/self-managed/operate-deployment/operate-configuration.md#logging) - [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#logging) - [Web Modeler](/self-managed/modeler/web-modeler/configuration/logging.md) +- [Identity](/self-managed/identity/user-guide/configuration/configure-logging.md) diff --git a/versioned_docs/version-8.3/self-managed/operational-guides/update-guide/introduction.md b/versioned_docs/version-8.3/self-managed/operational-guides/update-guide/introduction.md index 842f5075b66..3c3af19dd8f 100644 --- a/versioned_docs/version-8.3/self-managed/operational-guides/update-guide/introduction.md +++ b/versioned_docs/version-8.3/self-managed/operational-guides/update-guide/introduction.md @@ -12,10 +12,6 @@ When updating from one minor version to the next, you do not need to update to e Depending on your amount of data, run a minor version for at least 24 hours before updating to the next version. -:::note -Versions prior to Camunda 8 are listed below and identified as Camunda Cloud versions. -::: - There is a dedicated update guide for each version: ### [Camunda 8.2 to Camunda 8.3](../820-to-830) diff --git a/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md b/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md index af2a54e44c9..b2e80ec1fdc 100644 --- a/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md +++ b/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md @@ -235,7 +235,7 @@ Don't forget to set the `serviceAccountName` of the deployment/statefulset to th ### Web Modeler -Since Web Modeler RestAPI uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. +As the Web Modeler REST API uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. #### Kubernetes configuration diff --git a/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/upgrade.md b/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/upgrade.md index aa66e2ba3bd..0ebb66ba68d 100644 --- a/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/upgrade.md +++ b/versioned_docs/version-8.3/self-managed/platform-deployment/helm-kubernetes/upgrade.md @@ -5,14 +5,12 @@ sidebar_label: "Upgrade" description: "To upgrade to a more recent version of the Camunda Helm charts, there are certain things you need to keep in mind." --- -To upgrade to a more recent version of the Camunda Helm charts, there are certain things you need to keep in mind. - -:::caution - -Ensure to review the [instructions for specific version](#version-update-instructions) before starting the actual upgrade. - +:::note +When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the next **minor** version of the chart. ::: +To upgrade to a more recent version of the Camunda Helm charts, review the [instructions for a specific version](#version-update-instructions). + ### Upgrading where Identity disabled Normally for a Helm upgrade, you run the [Helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) command. If you have disabled Camunda Identity and the related authentication mechanism, you should be able to do an upgrade as follows: diff --git a/versioned_docs/version-8.3/self-managed/react-components/components.md b/versioned_docs/version-8.3/self-managed/react-components/components.md index 02f01ccb9eb..aba76ce736d 100644 --- a/versioned_docs/version-8.3/self-managed/react-components/components.md +++ b/versioned_docs/version-8.3/self-managed/react-components/components.md @@ -15,6 +15,6 @@ Camunda 8 Self-Managed users may also use [Desktop Modeler](../../components/mod :::note -To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/contact). +To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.3/self-managed/tasklist-deployment/importer-and-archiver.md b/versioned_docs/version-8.3/self-managed/tasklist-deployment/importer-and-archiver.md index c75e1fe9655..54d26a1fe59 100644 --- a/versioned_docs/version-8.3/self-managed/tasklist-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.3/self-managed/tasklist-deployment/importer-and-archiver.md @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.tasklist.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/broker.md b/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/broker.md index 76dca019af5..59e7c1dcbd7 100644 --- a/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/broker.md +++ b/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/broker.md @@ -484,11 +484,11 @@ backpressure: ### zeebe.broker.backpressure.gradient -| Field | Description | Example Value | -| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | +| Field | Description | Example Value | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | #### YAML snippet @@ -503,12 +503,12 @@ backpressure: ### zeebe.broker.backpressure.gradient2 -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet diff --git a/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/gateway.md b/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/gateway.md index 9c2e6f2e223..ba7bbe1097d 100644 --- a/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/gateway.md +++ b/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/gateway.md @@ -302,7 +302,8 @@ Each interceptor should be configured with the values described below:
    classNameEntry point of the interceptor, a class which must: + + Entry point of the interceptor, a class which must:
  • implement io.grpc.ServerInterceptor
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • diff --git a/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/priority-election.md b/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/priority-election.md index 4e466a0640a..d7fc45c0413 100644 --- a/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/priority-election.md +++ b/versioned_docs/version-8.3/self-managed/zeebe-deployment/configuration/priority-election.md @@ -10,8 +10,8 @@ It aims to achieve a more uniform leader distribution by assigning each node a p ## Configuration -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. +Enable priority election by setting `zeebe.broker.cluster.raft.enablePriorityElection=true` in your config or +by setting the equivalent environment variable `ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION=true`. If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). @@ -19,7 +19,7 @@ If you are using the fixed partitioning scheme (experimental), you may need [add With priority election enabled, election latency and thus failover time increases. -The result of leader election is not deterministic and priority election can only increase the chance of having a +The result of a leader election is not deterministic, and priority election can only increase the chance of having a uniform leader distribution, not guarantee it. -Factors such as high load can prevent high priority nodes from becoming the leader. +Factors such as high load can prevent high-priority nodes from becoming the leader. diff --git a/versioned_docs/version-8.4/apis-tools/tasklist-api/tasklist-api-tutorial.md b/versioned_docs/version-8.4/apis-tools/tasklist-api/tasklist-api-tutorial.md index 3dad8bb79cd..5ecdd5ba456 100644 --- a/versioned_docs/version-8.4/apis-tools/tasklist-api/tasklist-api-tutorial.md +++ b/versioned_docs/version-8.4/apis-tools/tasklist-api/tasklist-api-tutorial.md @@ -250,9 +250,8 @@ export class TasklistModule implements OnModuleInit { logger.log("Tasklist credentials fetched"); axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; + axiosRef.defaults.headers["Authorization"] = + `Bearer ${credentials.access_token}`; axiosRef.defaults.headers["Content-Type"] = "application/json"; setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds } diff --git a/versioned_docs/version-8.4/components/best-practices/architecture/extending-human-task-management-c7.md b/versioned_docs/version-8.4/components/best-practices/architecture/extending-human-task-management-c7.md index 93400dc74c7..0c2a9eacd7d 100644 --- a/versioned_docs/version-8.4/components/best-practices/architecture/extending-human-task-management-c7.md +++ b/versioned_docs/version-8.4/components/best-practices/architecture/extending-human-task-management-c7.md @@ -268,6 +268,6 @@ If you target a _TaskInfoEntity_: If you target a _ProcessInstanceInfoEntity_: -- Create a new instance by an _ExecutionListener_ on the process instance start event. The process instance id might not yet be known at this time. So either you create your own id and set it as a process variable (to SQL "join" on this later), or you can add a safe point before the listener triggers to make sure the process instance was committed to the database. +- Create a new instance by an _ExecutionListener_ on the process instance start event. The process instance ID might not yet be known at this time. So either you create your own ID and set it as a process variable (to SQL "join" on this later), or you can add a safe point before the listener triggers to make sure the process instance was committed to the database. - Decide when you have to update information in the entity, this depends on various factors (like amount of data, frequency of changes, way of changing data, ...). diff --git a/versioned_docs/version-8.4/components/best-practices/development/invoking-services-from-the-process-c7.md b/versioned_docs/version-8.4/components/best-practices/development/invoking-services-from-the-process-c7.md index 4e05898330d..6e93a22044d 100644 --- a/versioned_docs/version-8.4/components/best-practices/development/invoking-services-from-the-process-c7.md +++ b/versioned_docs/version-8.4/components/best-practices/development/invoking-services-from-the-process-c7.md @@ -160,7 +160,8 @@ Only if the increased latency does not work for your use case, for example, beca
    -

    Call a named bean or java class implementing the +

    + Call a named bean or java class implementing the JavaDelegate interface.

    -

    Use a configurable Connector +

    + Use a configurable Connector
    (REST or SOAP services provided out-of-the-box).

    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    +

    + Pull a service task into an external worker thread and inform process engine of + completion. +

    Execute a script inside the engine.

    @@ -183,7 +187,8 @@ completion.

    -

    Use with +

    + Use with
    BPMN elements.

    @@ -252,7 +257,8 @@ completion.

    -

    Implement +

    + Implement
    via

    @@ -261,8 +267,10 @@ completion.

    Java (in same JVM)

    -

    Expression Language -(can reference Java code)

    +

    + Expression Language + (can reference Java code) +

    BPMN configuration

    @@ -377,9 +385,11 @@ completion.

    Configure via

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -390,9 +400,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -401,9 +413,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -412,9 +426,10 @@ completion.

    -

    BPMN Ext. Element+ - - serviceTask +

    + BPMN Ext. Element+ + + serviceTask
    camunda:
    @@ -423,9 +438,11 @@ completion.

    -

    BPMN Attributes +

    + BPMN Attributes
    - serviceTask + + serviceTask
    camunda:
    @@ -438,13 +455,15 @@ completion.

    -

    BPMN Element +

    + BPMN Element
    script or
    BPMN Attribute
    - scriptTask + + scriptTask
    camunda:
    diff --git a/versioned_docs/version-8.4/components/best-practices/development/service-integration-patterns.md b/versioned_docs/version-8.4/components/best-practices/development/service-integration-patterns.md index 5c5e68ce69e..3b20d54d945 100644 --- a/versioned_docs/version-8.4/components/best-practices/development/service-integration-patterns.md +++ b/versioned_docs/version-8.4/components/best-practices/development/service-integration-patterns.md @@ -120,7 +120,7 @@ You can leverage [message buffering](/docs/components/concepts/messages#message- Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/contact) to discuss such a scenario in more depth. +A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/reference/contact.md) to discuss such a scenario in more depth. **Summary And recommendations** diff --git a/versioned_docs/version-8.4/components/best-practices/development/understanding-transaction-handling-c7.md b/versioned_docs/version-8.4/components/best-practices/development/understanding-transaction-handling-c7.md index d2465ecc39f..3f39f09aafb 100644 --- a/versioned_docs/version-8.4/components/best-practices/development/understanding-transaction-handling-c7.md +++ b/versioned_docs/version-8.4/components/best-practices/development/understanding-transaction-handling-c7.md @@ -90,29 +90,29 @@ Aside a general strategy to mark service tasks as being save points you will oft **Do** configure a savepoint **after** -- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. +- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. -- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. +- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. **Do** configure a savepoint **before** -- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. +- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. -- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. +- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. -- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. +- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. **Don't** configure save points **before** -- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. +- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. -- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. +- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. ### Adding save points automatically to every model diff --git a/versioned_docs/version-8.4/components/concepts/clusters.md b/versioned_docs/version-8.4/components/concepts/clusters.md index bba3d0b8af3..d4922663f20 100644 --- a/versioned_docs/version-8.4/components/concepts/clusters.md +++ b/versioned_docs/version-8.4/components/concepts/clusters.md @@ -32,21 +32,18 @@ When your Free Trial plan expires, you are automatically transferred to the Free ### Auto-pause -Free Trial `dev` (or untagged) clusters are automatically paused eight hours after a cluster is created or resumed from a paused state. Auto-pause occurs regardless of cluster usage. +Free Trial clusters are automatically paused after a period of inactivity. Auto-pause occurs regardless of cluster usage. -You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume your cluster](/components/console/manage-clusters/resume-cluster.md/). +You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume your cluster](/components/console/manage-clusters/resume-cluster.md). -- Clusters tagged as `test`, `stage`, or `prod` do not auto-pause. -- Paused clusters are automatically deleted after 30 consecutive paused days. You can change the tag to avoid cluster deletion. -- No data is lost while a cluster is paused. All execution and configuration is saved, but cluster components such as Zeebe and Operate are temporarily disabled until you resume the cluster. +- Clusters tagged as `dev` (or untagged) auto-pause eight hours after the cluster is created or resumed from a paused state. +- Clusters tagged as `test`, `stage`, or `prod` auto-pause if there is no cluster activity for 48 hours. +- Cluster disk space is cleared when a trial cluster is paused. + - You will need to redeploy processes to the cluster once it is resumed from a paused state. + - Cluster configuration settings (for example, API Clients, Connector secrets, and IP allowlists) are saved so you can easily resume a cluster. :::tip - -To prevent auto-pause, you can: - -- Tag the cluster as `test`, `stage`, or `prod` instead of `dev`. -- [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter, Professional, or Enterprise plan. - +To prevent auto-pause, [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. ::: ## Development clusters @@ -59,7 +56,7 @@ The way this type of cluster works varies depending on if you are using it in th Enterprise plan users can purchase development clusters as part of their Enterprise subscription agreement. Deployment and execution of models (process instances, decision instances, and task users) are included at no extra cost for this type of cluster. Additionally, this type of cluster in the Enterprise plan follows the [standard data retention policy](/docs/components/concepts/data-retention.md) and does not auto-pause when not in use. -Please [contact us](https://camunda.com/contact/) if you are an existing customer and would like to purchase a development cluster. +Please [contact us](/reference/contact.md) if you are an existing customer and would like to purchase a development cluster. ### Development clusters in the Starter plan diff --git a/versioned_docs/version-8.4/components/concepts/data-retention.md b/versioned_docs/version-8.4/components/concepts/data-retention.md index aa10e6a970c..55dc9678644 100644 --- a/versioned_docs/version-8.4/components/concepts/data-retention.md +++ b/versioned_docs/version-8.4/components/concepts/data-retention.md @@ -15,7 +15,7 @@ The following time-to-live settings are configured in SaaS for each application. - **Tasklist**: 30 days - **Zeebe**: 7 days -If there are specific requirements for your use-case, [reach out to us](/contact/) to discuss your data retention needs under an Enterprise plan. +If there are specific requirements for your use-case, [reach out to us](/reference/contact.md) to discuss your data retention needs under an Enterprise plan. For more information on development clusters in the Starter or Professional plans, refer to our [fair usage limits of those plans](https://camunda.com/legal/fair-usage-limits-for-starter-plan/). ## Additional information diff --git a/versioned_docs/version-8.4/components/concepts/messages.md b/versioned_docs/version-8.4/components/concepts/messages.md index c1236ca8d93..9a2c691947a 100644 --- a/versioned_docs/version-8.4/components/concepts/messages.md +++ b/versioned_docs/version-8.4/components/concepts/messages.md @@ -118,6 +118,10 @@ The first message creates a new process instance. The following messages are cor When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. +:::note +You may also use TTL to wait for messages that may arrive earlier when combining [start events and intermediate catch events](/docs/components/modeler/bpmn/events.md). +::: + ### Single instance **Problem**: Create exactly one instance of a process diff --git a/versioned_docs/version-8.4/components/concepts/process-instance-creation.md b/versioned_docs/version-8.4/components/concepts/process-instance-creation.md index 6a43c5143a0..b2bb0491f8e 100644 --- a/versioned_docs/version-8.4/components/concepts/process-instance-creation.md +++ b/versioned_docs/version-8.4/components/concepts/process-instance-creation.md @@ -26,9 +26,10 @@ This command creates a new process instance and immediately responds with the pr ![create-process](assets/create-process.png) -

    - Code example -

    Create a process instance: +

    + Code example +

    +Create a process instance: ``` zbctl create instance "order-process" @@ -38,16 +39,16 @@ Response: ``` { - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 + "processKey": 2251799813685249, + "bpmnProcessId": "order-process", + "version": 1, + "processInstanceKey": 2251799813686019 } ``` -

    -
    +

    +
    ### Create and await results @@ -67,7 +68,8 @@ When the client resends the command, it creates a new process instance.
    Code example -

    Create a process instance and await results: +

    +Create a process instance and await results: ``` zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' @@ -123,7 +125,7 @@ Start instructions are supported for both `CreateProcessInstance` commands.

    Code example

    - Create a process instance starting before the 'ship_parcel' element: +Create a process instance starting before the 'ship_parcel' element: ```java client.newCreateInstanceCommand() diff --git a/versioned_docs/version-8.4/components/concepts/what-is-camunda-8.md b/versioned_docs/version-8.4/components/concepts/what-is-camunda-8.md index f225270c009..de0aaa8458e 100644 --- a/versioned_docs/version-8.4/components/concepts/what-is-camunda-8.md +++ b/versioned_docs/version-8.4/components/concepts/what-is-camunda-8.md @@ -105,7 +105,7 @@ The platform and tools are usable in your environment right away, with full publ ## Next steps -- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/contact/) page. +- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/reference/contact.md) page. - [Introduction to Camunda 8](/guides/introduction-to-camunda-8.md) - [Create a Camunda 8 account](/guides/create-account.md) - [Migrate from Camunda 7 to Camunda 8](/guides/migrating-from-camunda-7/index.md) diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/connector-sdk.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/connector-sdk.md index 2a9fe172e35..f445784ad61 100644 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/connector-sdk.md +++ b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/connector-sdk.md @@ -1021,8 +1021,9 @@ For example, you can spin up a custom client with the [Zeebe Java client](/apis-tools/java-client/index.md) as follows: ```java -import io.camunda.connector.MyConnectorFunction -import io.camunda.connector.runtime.jobworker.outbound.ConnectorJobHandler; +import io.camunda.connector.MyConnectorFunction; +import io.camunda.connector.runtime.core.outbound.ConnectorJobHandler; +import io.camunda.connector.validation.impl.DefaultValidationProvider; import io.camunda.zeebe.client.ZeebeClient; public class Main { @@ -1033,7 +1034,7 @@ public class Main { zeebeClient.newWorker() .jobType("io.camunda:template:1") - .handler(new ConnectorJobHandler(new MyConnectorFunction())) + .handler(new ConnectorJobHandler(new MyConnectorFunction(), new DefaultValidationProvider())) .name("MESSAGE") .fetchVariables("authentication", "message") .open(); @@ -1048,5 +1049,5 @@ it with your job handler implementation that handles invoking the Connector func Your custom job handler needs to create a `OutboundConnectorContext` that the Connector function can use to handle variables, secrets, and Connector results. You can extend the -provided `io.camunda.connector.impl.outbound.AbstractConnectorContext` to quickly gain access +provided `io.camunda.connector.runtime.core.AbstractConnectorContext` to quickly gain access to most of the common context operations. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/010-to-020.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/010-to-020.md deleted file mode 100644 index e71a7316283..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/010-to-020.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -id: 010-to-020 -title: Update 0.1 to 0.2 -description: "Review which adjustments must be made to migrate from Connector SDK 0.1.x to 0.2.0." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.1.x to 0.2.0. - -:::caution - -Be aware that the update from 0.1 to 0.2 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.2.0, we introduce the following structural changes: - -- Input validation and secret replacement move from writing imperative code to declaratively using annotations. -- The outbound aspect of APIs is more explicit. Classes have been moved to more explicit packages and have been renamed. -- New required annotation for outbound Connectors. - -### Declarative validation and secrets - -Input objects previously had to implement the `ConnectorInput` interface to participate in validation and secret replacement -initiated from the `ConnectorContext` using its `validate` and `replaceSecrets` methods respectively. - -With version 0.2.0, we remove the imperative approach for validation and secret replacement from the SDK. -Instead, you can use annotations to describe the constraints of input attributes and mark those that can contain -secrets. - -These are two input objects written with the SDK version 0.1.x: - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class MyConnectorRequest implements ConnectorInput { - - private String message; - private Authentication authentication; - - @Override - public void validateWith(final Validator validator) { - validator.require(message, "message"); - validator.require(authentication, "authentication"); - validateIfNotNull(authentication, validator); - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - replaceSecretsIfNotNull(authentication, secretStore); - } -} -``` - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class Authentication implements ConnectorInput { - - private String user; - private String token; - - @Override - public void validateWith(final Validator validator) { - validator.require(user, "user"); - validator.require(token, "token"); - if (token != null && !(token.startsWith("xobx") || token.startsWith("secrets."))) { - validator.addErrorMessage("Token must start with \"xobx\" or be a secret"); - } - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - token = secretStore.replaceSecret(token); - } -} -``` - -You can express the same input objects with SDK version 0.2.0 as follows: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty - private String message; - - @NotNull - @Valid - @Secret - private Authentication authentication; -} -``` - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.Pattern; - -public class Authentication { - - @NotEmpty - private String user; - - @NotEmpty - @Pattern("^(xobx-|secret).+") - @Secret - private String token; -} -``` - -As a result, you have to remove the `ConnectorInput` interface implementation and the imperative code that comes with `validateWith` -and `replaceSecrets`. You can now concisely describe the constraints of attributes rather than express them in imperative code. - -To use annotaion-based validation out of the box, you can include the new artifact `connector-validation` that -comes with the SDK. - - - - - -```xml - - io.camunda.connector - connector-validation - 0.2.0 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.2.0' -``` - - - - -You can read more about validation and secret replacement in our -[SDK guide](/components/connectors/custom-built-connectors/connector-sdk.md). - -### Explicit outbound aspect - -With version 0.2.0 of the SDK, we make the outbound aspect of those components specific to outbound connectivity -more visible. This separates those SDK components that are tightly coupled to outbound from those that -will be reusable for inbound. - -With this change, the names of the following classes need to be adjusted: - -- Rename `io.camunda.connector.api.ConnectorContext` to `io.camunda.connector.api.outbound.OutboundConnectorContext`. -- Rename `io.camunda.connector.api.ConnectorFunction` to `io.camunda.connector.api.outbound.OutboundConnectorFunction`. -- Rename `io.camunda.connector.api.SecretProvider` to `io.camunda.connector.api.secret.SecretProvider`. -- Rename `io.camunda.connector.api.SecretStore` to `io.camunda.connector.api.secret.SecretStore`. -- Rename `io.camunda.connector.test.ConnectorContextBuilder` to `io.camunda.connector.test.outbound.OutboundConnectorContextBuilder`. - -As a result, you must replace all occurrences of the old class names and imports with the new ones. This includes the -SPI for the Connector function itself. Therefore, rename the file `META-INF/services/io.camunda.connector.api.ConnectorFunction` to -`META-INF/services/io.camunda.connector.api.outbound.OutboundConnectorFunction`. - -### `@OutboundConnector` annotation - -For best interoperability, Connectors provide default meta-data (`name`, `type`, `inputVariables`) via the `@OutboundConnector` annotation: - -```java -@OutboundConnector( - name = "PING", - inputVariables = {"caller"}, - type = "io.camunda.example.PingConnector:1" -) -public class PingConnector implements OutboundConnectorFunction { - ... -} -``` - -## Connector runtime environment - -If using the -[pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that comes with the SDK does not fit your use case, you can create a custom runtime environment. - -With version 0.2.0 of the [job worker runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#connector-job-handler), you need to make the following changes: - -- Rename `io.camunda.connector.runtime.jobworker.ConnectorJobHandler` to `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler`. -- Rename Connector-related env variables from `ZEEBE_` to `CONNECTOR_`. Zeebe configuration properties remain unchanged. - -As a general change in behavior the module will now pick up Connectors from classpath unless it is explicitly configured via environment variables. - -Also, take the name changes in the [SDK core](#explicit-outbound-aspect) into account. - -Implementing your own Connector wrapper you need to provide a Connector context specific to -your environment. Consider extending the `io.camunda.connector.impl.outbound.AbstractConnectorContext` -instead of implementing the `io.camunda.connector.api.ConnectorContext` yourself. Most of the commonly needed functionality -is already provided in there. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md deleted file mode 100644 index 61142a93750..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 0100-to-0110 -title: Update 0.10 to 0.11 -description: "Review which adjustments must be made to migrate from Connector SDK 0.10.x to 0.11.0." ---- - -Beginner - -:::note -Migrate directly to version 0.11.2 of the SDK. This contains a fix for several issues in the 0.11.0 release. -::: - -This SDK release is not backwards-compatible. We are moving towards a stable Connectors release and continue to improve the experience of developing custom Connectors. - -In this SDK version, we changed the `OutboundConnectorContext` and `InboundConnectorContext interfaces significantly.` You can no longer use the `getVariablesAsType` or `getPropertiesAsType` methods in outbound and inbound Connectors, respectively. -Use the new `bindVariables` method instead, as it takes care of secret replacement, payload validation, and deserialization automatically. - -We are moving away from a mandatory `@Secret` annotation. -From this release onwards, secrets are automatically replaced in all input variables/properties without the need to explicitly declare an annotation. - -To migrate your Connector implementations, complete the following: - -1. If you used the `OutboundConnectorContext::getVariablesAsType` method in you outbound Connector functions, replace it with `OutboundConnectorContext::bindVariables`. -2. If you used the `InboundConnectorContext::getPropertiesAsType` method in you inbound Connector executables, replace it with `InboundConnectorContext::bindProperties`. -3. Remove calls to `OutboundConnectorContext::replaceSecrets` and `InboundConnectorContext::replaceSecrets` methods. The secrets are now replaced automatically. -4. Remove calls to `OutboundConnectorContext::validate` and `InboundConnectorContext::validate` methods. The validation is now performed automatically. -5. If you used the `@Secret` annotation in your Connector implementations, you can safely remove it as it has no effect. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/020-to-030.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/020-to-030.md deleted file mode 100644 index 248d7e7cc31..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/020-to-030.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: 020-to-030 -title: Update 0.2 to 0.3 -description: "Review which adjustments must be made to migrate from Connector SDK 0.2.x to 0.3.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.2.x to 0.3.0. - -:::caution - -Be aware that the update from 0.2 to 0.3 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.3.0, we introduce the following structural changes: - -- Input validation moves from Jakarta Bean Validation API version 3.0 to 2.0. -- SDK artifacts have to be in scope `provided`. - -### Update to Validation API 2.0 - -To better integrate in the current Java ecosystem and widely used frameworks like Spring 5 and Spring Boot 2, the `connector-validation` module -now operates on Jakarta Bean Validation API version 2.0 instead of version 3.0. Adjust your Connector input objects using validation as follows: - -Replace all class imports starting with `jakarta.validation` by `javax.validation`. A Connector input class on SDK 0.2.x with the following imports: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -changes to the following: - -```java -import io.camunda.connector.api.annotation.Secret; -import javax.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -This way, the Connector runtime environments are able to pick up your validations correctly. - -### Provided SDK artifacts - -The Connector runtime environments can execute multiple Connectors at once. The environments also provide the base SDK artifacts and their classes -to any Connector they execute. This comprises runtime-specific classes related to the Connector context as well as the Connector core and the validation -classes. To minimize the possibility of incompatible classes being on the same classpath, Connectors are required to depend on `connector-core` and -`connector-validation` in Maven's dependency scope `provided`. Other dependency management frameworks like Gradle offer similar scopes. - -As a result, you need to include the SDK artifacts as follows in Maven: - -```xml - - io.camunda.connector - connector-core - provided - - - io.camunda.connector - connector-validation - provided - -``` - -## Connector runtime environment - -The SDK provides a [pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that you can start manually. With version 0.3.0, this runtime moves from the [SDK repository](https://github.com/camunda/connector-sdk/tree/stable/0.2/runtime-job-worker) -to [Connector Runtime](https://github.com/camunda/connectors/blob/main/connector-runtime/README.md). This also means that the provided runtime now is -a Spring Boot application, based on Spring Zeebe. Thus, it offers all out-of-the-box capabilities Spring Zeebe provides. - -The Connector runtime JAR for manual installation can now be fetched from https://repo1.maven.org/maven2/io/camunda/spring-zeebe-connector-runtime/ -(starting with version `8.1.3`) instead of https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-job-worker/. You can start the runtime -environment with the following command: - -```bash -java -cp 'spring-zeebe-connector-runtime-VERSION-with-dependencies.jar:connector-http-json-VERSION-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` - -The Docker image is still accessible at https://hub.docker.com/r/camunda/connectors/tags. - -### Custom runtime environments - -If you are building a custom runtime environment, note the following adjustments: - -- The `runtime-util` artifact replaces the `runtime-job-worker` artifact. -- The `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` has moved to `import io.camunda.connector.runtime.util.outbound.ConnectorJobHandler`. -- The `io.camunda.connector.impl.outbound.AbstractOutboundConnectorContext` has moved to `io.camunda.connector.impl.context.AbstractConnectorContext`. -- To build your own context class, we recommend using the following signature: - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext {} -``` - -- The `SecretStore` class has been removed. Initialize your context class with a `super(SecretProvider)` call. Remove the `getSecretStore` method if you used it. - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext { - - public MyContext(final SecretProvider provider) { - super(provider); - ... - } -} -``` diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/030-to-040.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/030-to-040.md deleted file mode 100644 index 43916603f22..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/030-to-040.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 030-to-040 -title: Update 0.3 to 0.4 -description: "Review which adjustments must be made to migrate from Connector SDK 0.3.x to 0.4.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.3.x to 0.4.0. - -:::caution - -Be aware that the update from 0.3 to 0.4 requires manual migration steps as described below. - -::: - -With SDK version 0.4.0, we introduce many basic structural changes: - -- Switching default Connector Runtime to Spring Boot/Spring Zeebe for outbound Connectors. -- Introducing webhook inbound Connector. -- Moved out-of-the-box connectors to mono-repo at https://github.com/camunda/connectors-bundle/tree/main/connectors to ease dependency management and conflict resolution. -- Build Connector bundle artifact and Docker image by Maven as default (done by adding various fat jars to one Docker image). -- Adding GCP Secret Provider used in Camunda SaaS. - -### Inbound webhook - -Spring Zeebe runtime with version `0.4.0` SDK introduces support of inbound webhook capabilities. -See the [list of available inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). - -To function properly, Spring Zeebe runtime requires connection to [Operate API](/apis-tools/operate-api/overview.md). Read more on [how to connect to Operate or disable it completely](/self-managed/connectors-deployment/connectors-configuration.md#local-installation). - -### What happens if I don't properly configure connection to Operate API? - -If you don't configure properly connection to Operate API, it will be not possible to poll process definitions from Operate. Therefore, the webhook functionality won't work. -Additionally, you may observe exception spam in your log file every 5 seconds complaining of inability to connect to Operate. -Overall, this is not critical and given there are no other issues, the Connector runtime will function properly. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/040-to-050.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/040-to-050.md deleted file mode 100644 index 637cabc8899..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/040-to-050.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: 040-to-050 -title: Update 0.4 to 0.5 -description: "Review which adjustments must be made to migrate from Connector SDK 0.4.x to 0.5.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.4.x to 0.5.0. - -With SDK version 0.5.0, we introduced minor changes: - -- Removing Spring Zeebe dependency management -- Managing the GCP Secret Provider module version diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/050-to-060.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/050-to-060.md deleted file mode 100644 index 46124442521..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/050-to-060.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: 050-to-060 -title: Update 0.5 to 0.6 -description: "Review which adjustments must be made to migrate from Connector SDK 0.5.x to 0.6.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.5.x to 0.6.0. - -With SDK version 0.6.0, we introduced the following changes: - -- Replacing secrets in parent classes -- Supporting intermediate inbound events -- Defining interfaces for inbound Connectors -- Fixing failing datetime serialization diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/060-to-070.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/060-to-070.md deleted file mode 100644 index bc84e1e1940..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/060-to-070.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: 060-to-070 -title: Update 0.6 to 0.7 -description: "Review which adjustments must be made to migrate from Connector SDK 0.6.x to 0.7.0." ---- - -Beginner - -Beginner - -With the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), we made -breaking changes to the inbound Connectors. Update -[HTTP Webhook](https://github.com/camunda/connectors/tree/main/connectors/webhook/element-templates) -and [GitHub Webhook](https://github.com/camunda/connectors/tree/main/connectors/github/element-templates) -element templates to the latest versions. - -If you have used inbound webhook Connectors with Connector Runtime 0.6.x, you need to **manually** -apply the new element template version to your diagrams: - -1. Download the new element template from the [GitHub release page](https://github.com/camunda/connectors-bundle/releases/tag/0.17.0). -2. Follow the [installation guide](/components/modeler/desktop-modeler/element-templates/configuring-templates.md) to reinstall the element template. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/070-to-080.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/070-to-080.md deleted file mode 100644 index 1145b3450fb..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/070-to-080.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 070-to-080 -title: Update 0.7 to 0.8 -description: "Review which adjustments must be made to migrate from Connector SDK 0.7.x to 0.8.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.7.x to 0.8.0. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/080-to-090.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/080-to-090.md deleted file mode 100644 index 51055c0aefc..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/080-to-090.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 080-to-090 -title: Update 0.8 to 0.9 -description: "Review which adjustments must be made to migrate from Connector SDK 0.8.x to 0.9.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.8.x to 0.9.0. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/090-to-0100.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/090-to-0100.md deleted file mode 100644 index 1e6172bb692..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/090-to-0100.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 090-to-0100 -title: Update 0.9 to 0.10 -description: "Review which adjustments must be made to migrate from Connector SDK 0.9.x to 0.10.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.9.x to 0.10.0. diff --git a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/introduction.md b/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/introduction.md deleted file mode 100644 index 034d80e7388..00000000000 --- a/versioned_docs/version-8.4/components/connectors/custom-built-connectors/update-guide/introduction.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: introduction -title: Connector SDK updates ---- - -These documents guide you through the process of updating your Camunda 8 -Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). - -There is a dedicated update guide for each version: - -### [Connector SDK 0.10.x to 0.11](../0100-to-0110) - -Update from 0.10.x to 0.11.2 - -### [Connector SDK 0.9 to 0.10](../090-to-0100) - -Update from 0.9.x to 0.10.0 - -### [Connector SDK 0.8 to 0.9](../080-to-090) - -Update from 0.8.x to 0.9.0 - -### [Connector SDK 0.7 to 0.8](../070-to-080) - -Update from 0.7.x to 0.8.0 - -### [Connector SDK 0.6 to 0.7](../060-to-070) - -Update from 0.6.x to 0.7.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.7.0) - -### [Connector SDK 0.5 to 0.6](../050-to-060) - -Update from 0.5.x to 0.6.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.6.0) - -### [Connector SDK 0.4 to 0.5](../040-to-050) - -Update from 0.4.x to 0.5.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.5.0) - -### [Connector SDK 0.3 to 0.4](../030-to-040) - -Update from 0.3.x to 0.4.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.4.0) - -### [Connector SDK 0.2 to 0.3](../020-to-030) - -Update from 0.2.x to 0.3.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.3.0) - -### [Connector SDK 0.1 to 0.2](../010-to-020) - -Update from 0.1.x to 0.2.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.2.0) diff --git a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md index a7f72d5bc1a..49079553a1d 100644 --- a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md +++ b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md @@ -103,10 +103,6 @@ There are two options to authenticate the Connector with AWS: The **Amazon EventBridge Webhook Connector** is an inbound Connector enabling you to start a BPMN process instance triggered by an event from [Amazon EventBridge](https://aws.amazon.com/eventbridge/). -:::note -If you have used the **Amazon EventBridge Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an Amazon EventBridge Webhook Connector task 1. Start building your BPMN diagram. You can use the **Amazon EventBridge Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/github.md b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/github.md index f02677c8266..7ab7863ec28 100644 --- a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/github.md +++ b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/github.md @@ -237,12 +237,6 @@ handling response is still applicable [as described](/components/connectors/prot The **GitHub Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -:::note -If you have used the GitHub Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a GitHub Webhook Connector task 1. Start building your BPMN diagram. You can use GitHub Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. diff --git a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/slack.md b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/slack.md index 03d36ff3aaa..bd5892fde10 100644 --- a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/slack.md +++ b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/slack.md @@ -31,7 +31,7 @@ To make the **Slack Connector** executable, fill out the mandatory fields highli ### Authentication -Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, '{{secrets.SLACK_OAUTH_TOKEN}}'. +Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, `{{secrets.SLACK_OAUTH_TOKEN}}`. ### Create channel diff --git a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/twilio.md b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/twilio.md index aab4a0887e3..d744bf5ac5f 100644 --- a/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/twilio.md +++ b/versioned_docs/version-8.4/components/connectors/out-of-the-box-connectors/twilio.md @@ -177,10 +177,6 @@ To learn more about implementing retry and error handling logic in your BPMN dia The **Twilio Webhook Connector** is an inbound Connector that enables you to start a BPMN process instance triggered by a [Twilio event](https://www.twilio.com/docs/usage/webhooks). -:::note -If you have used the **Twilio Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a Twilio Webhook Connector task 1. Start building your BPMN diagram. You can use the **Twilio Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.4/components/connectors/protocol/http-webhook.md b/versioned_docs/version-8.4/components/connectors/protocol/http-webhook.md index 0fb040ead87..e6dee47ce4c 100644 --- a/versioned_docs/version-8.4/components/connectors/protocol/http-webhook.md +++ b/versioned_docs/version-8.4/components/connectors/protocol/http-webhook.md @@ -7,12 +7,6 @@ description: Start a process instance with your custom webhook configuration, tr The **HTTP Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by external HTTP call. -:::note -If you have used the HTTP Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an HTTP Webhook Connector event 1. Start building your BPMN diagram. You can use HTTP Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. @@ -83,7 +77,7 @@ Please refer to the [update guide](/components/connectors/custom-built-connector - Set the **API Key** property to the expected value of the API key. - Set the **API Key locator** property that will be evaluated against the incoming request to extract the API key. [See the example](#how-to-configure-api-key-authorization). -- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer {JWT_TOKEN}. +- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer `{JWT_TOKEN}`. - Set JWK URL which is used as a well-known public URL to fetch the [JWKs](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets). - Set JWT role property expression which will be evaluated against the content of the JWT to extract the list of roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). diff --git a/versioned_docs/version-8.4/components/connectors/protocol/rest.md b/versioned_docs/version-8.4/components/connectors/protocol/rest.md index 5683db47a4f..a5be89a1fcb 100644 --- a/versioned_docs/version-8.4/components/connectors/protocol/rest.md +++ b/versioned_docs/version-8.4/components/connectors/protocol/rest.md @@ -55,7 +55,7 @@ Select the **REST Connector** and fill out the following properties under the ** - **Headers**: The API key will be included in the request headers. 3. Specify your API key details: - **API key name**: Enter the parameter name expected by the API (e.g., apiKey). - - **API key value**: Reference the secret you created for your API key (e.g., {{secrets.REST_API_KEY_SECRET}}). + - **API key value**: Reference the secret you created for your API key (e.g., `{{secrets.REST_API_KEY_SECRET}}`). ### REST Connector (Basic) diff --git a/versioned_docs/version-8.4/components/console/manage-plan/migrate-from-prof-to-starter.md b/versioned_docs/version-8.4/components/console/manage-plan/migrate-from-prof-to-starter.md index 1fdcd43ec98..8f3d548da9b 100644 --- a/versioned_docs/version-8.4/components/console/manage-plan/migrate-from-prof-to-starter.md +++ b/versioned_docs/version-8.4/components/console/manage-plan/migrate-from-prof-to-starter.md @@ -11,7 +11,7 @@ Here are a few important remarks to consider before completing the migration ste - Since the two plans have different types of clusters included and fees for those, we recommend comparing the [Professional plan](https://camunda.com/blog/2023/05/camunda-professional-edition-accelerate-projects/) with the [Starter plan](https://camunda.com/blog/2023/09/camunda-starter/) to [understand your monthly costs](https://camunda.com/pricing/starter-plan-price-calculator/) before the migration. - General users and development/production cluster reservations in the Professional plan are migrated “as is” to the Starter plan, which may result in overage costs (e.g. production clusters in Professional will be transferred to production clusters in the Starter plan). If you are not using your production cluster in the Professional plan, we recommend you delete it beforehand and create a new development cluster in the Starter plan afterward. - Once you have edited the plan below, the changes will take effect on the first day of your next subscription period. -- If you have any questions, do not hesitate to [contact us](https://camunda.com/contact/). +- If you have any questions, do not hesitate to [contact us](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.4/components/modeler/bpmn/call-activities/call-activities.md b/versioned_docs/version-8.4/components/modeler/bpmn/call-activities/call-activities.md index fa612a2eab7..0cdba794a12 100644 --- a/versioned_docs/version-8.4/components/modeler/bpmn/call-activities/call-activities.md +++ b/versioned_docs/version-8.4/components/modeler/bpmn/call-activities/call-activities.md @@ -32,10 +32,6 @@ When a non-interrupting boundary event is triggered, the created process instanc ## Variable mappings -By default, all variables of the call activity scope are copied to the created process instance. This can be limited to copying only the local variables of the call activity, by setting the attribute `propagateAllParentVariables` to `false`. - -By disabling this attribute, variables existing at higher scopes are no longer copied. If the attribute `propagateAllParentVariables` is set (default: `true`), all variables are propagated to the child process instance. - Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. diff --git a/versioned_docs/version-8.4/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md b/versioned_docs/version-8.4/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md index 56bee823e46..5a1b961b484 100644 --- a/versioned_docs/version-8.4/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md +++ b/versioned_docs/version-8.4/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md @@ -14,6 +14,8 @@ When an embedded subprocess is entered, the start event is activated. The subpro Embedded subprocesses are often used together with **boundary events**. One or more boundary events can be attached to a subprocess. When an interrupting boundary event is triggered, the entire subprocess (including all active elements) is terminated. +When adding an embedded subprocess to your model, you can either add a collapsed or expanded subprocess. You cannot collapse an existing expanded subprocess in your model. + ## Collapsed subprocesses :::caution @@ -22,7 +24,7 @@ Collapsed subprocesses are currently only partially supported by Optimize. While All other Camunda components fully support collapsed subprocesses. ::: -A subprocess can be collapsed to conceal its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. +A collapsed subprocess conceals its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. Collapsed subprocesses serve purely display purposes. For the creation of reusable processes, it is recommended to utilize [call activities](../call-activities/call-activities.md). diff --git a/versioned_docs/version-8.4/components/modeler/desktop-modeler/telemetry/telemetry.md b/versioned_docs/version-8.4/components/modeler/desktop-modeler/telemetry/telemetry.md index 8db915f21f0..33fe66c6901 100644 --- a/versioned_docs/version-8.4/components/modeler/desktop-modeler/telemetry/telemetry.md +++ b/versioned_docs/version-8.4/components/modeler/desktop-modeler/telemetry/telemetry.md @@ -54,8 +54,8 @@ These events include the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the case of a form, the payload also includes the `formFieldTypes`: @@ -78,8 +78,8 @@ The `Deployment Event` and `Start Instance` have the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the event of an unsuccessful deployment, an `error` property will be present in the payload containing an error code. diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-button.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-button.md index 577b3c0615e..e3614b67505 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-button.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-button.md @@ -6,7 +6,7 @@ description: A form element to trigger form actions A button allowing the user to trigger form actions. -![Form Button Symbol](/img/form-icons/form-button.svg) +Form Button Symbol ### Configurable properties @@ -15,4 +15,4 @@ A button allowing the user to trigger form actions. - **Submit**: Submit the form (given there are no validation errors). - **Reset**: Reset the form, all user inputs will be lost. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the button. -- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md index b99bcbe81bb..950f8454678 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md @@ -6,7 +6,7 @@ description: A form element to select one or multiple values from set options A set of checkbox options providing data multi-selection for small datasets. -![Form Checklist Symbol](/img/form-icons/form-checklist.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A set of checkbox options providing data multi-selection for small datasets. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox group. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox group must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox.md index 834a18cfa63..9546284e2d4 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-checkbox.md @@ -6,7 +6,7 @@ description: A form element to read and edit boolean data A checkbox allowing the user to read and edit boolean data. -![Form Checkbox Symbol](/img/form-icons/form-checkbox.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A checkbox allowing the user to read and edit boolean data. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-datetime.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-datetime.md index aeb127128dc..c02c2912925 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-datetime.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-datetime.md @@ -6,7 +6,7 @@ description: Learn about the datetime form element to read and edit date and tim A component allowing the user to read and edit date and time data. -![Form Datetime Symbol](/img/form-icons/form-datetime.svg) +Form Datetime Symbol ## Configurable properties @@ -19,7 +19,7 @@ A component allowing the user to read and edit date and time data. - **Read only**: Makes the datetime component read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the datetime component, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the datetime component. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Time format**: Defines the time data format. This can either be **UTC offset**, **UTC normalized**, or **No timezone**. - **Time interval**: Defines the steps of time that can be selected in the time input field. - **Disallow past dates**: Enables the restriction to not allow past dates. diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md index 4af25ce51cc..1a24ae1440e 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md @@ -6,7 +6,7 @@ description: Learn about the dynamic list form element to dynamically manage a l The **dynamic list** element is designed to dynamically manage a list of form elements. It enables users to add or remove items from the list and is particularly useful in scenarios where the number of items in a list is not fixed. -![Dynamic List Symbol](/img/form-icons/form-dynamiclist.svg) +Dynamic List Symbol ## Configurable properties diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-group.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-group.md index a90f822b54a..4353743a7b1 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-group.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-group.md @@ -6,7 +6,7 @@ description: Learn about the group form element to group multiple form elements The group element serves as a container to group various form elements together. It allows for nesting of fields and assists in organizing complex forms. -![Form Group Symbol](/img/form-icons/form-group.svg) +Form Group Symbol ### Configurable properties @@ -15,7 +15,7 @@ The group element serves as a container to group various form elements together. - **Show outline**: Can be toggled on and off to display a separating outline around the group - **Vertical alignment**: Determines the alignment of items in the list. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Usage diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-iframe.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-iframe.md index a2d17621500..46ea0889d3d 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-iframe.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-iframe.md @@ -12,7 +12,7 @@ Every iFrame component is a sandbox. This means that the content of the iFrame i ::: -![Form iFrame Symbol](/img/form-icons/form-iframe.svg) +Form iframe Symbol ## Configurable properties @@ -20,4 +20,4 @@ Every iFrame component is a sandbox. This means that the content of the iFrame i - **URL**: Enter an HTTPS URL to a source. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). Ensure the URL is safe as it might impose security risks. Not all external sources can be displayed in the iFrame. Read more about it in [the X-FRAME-OPTIONS documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options). - **Height**: Defines the height of the iFrame. Defined as number of pixels. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the iFrame. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-image.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-image.md index 61e461f7213..33404c3eed3 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-image.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-image.md @@ -6,11 +6,11 @@ description: Learn about the image view form element to display an image. An element allowing the user to display images. -![Form Image Symbol](/img/form-icons/form-image.svg) +Form Image Symbol ## Configurable properties - **Image source**: Specifies the image source via [expression](../../feel/language-guide/feel-expressions-introduction.md), [templating syntax](../configuration/forms-config-templating-syntax.md) or [static value](/docs/components/concepts/expressions.md#expressions-vs-static-values) (hyperlink or data URI). - **Alternative text**: Provides an alternative text to the image in case it cannot be displayed. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the image. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-number.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-number.md index 6e9d108ca19..902253e3642 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-number.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-number.md @@ -6,7 +6,7 @@ description: A form element to read and edit numeric data A number field allowing the user to read and edit numeric data. -![Form Number Symbol](/img/form-icons/form-number.svg) +Form Number Symbol ### Configurable properties @@ -19,7 +19,7 @@ A number field allowing the user to read and edit numeric data. - **Read only**: Makes the number field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the number field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the number. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Serialize to string**: Configures the output format of the datetime value. This enables unlimited precision digits. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Number field must contain a value. diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-radio.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-radio.md index 6e3bb58a254..2a65429a750 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-radio.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-radio.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A radio group allowing the user to select one of multiple data options for small datasets. -![Form Radio Symbol](/img/form-icons/form-radio.svg) +Form Radio Symbol ### Configurable properties @@ -18,7 +18,7 @@ A radio group allowing the user to select one of multiple data options for small - **Disabled**: Disables the radio group, for use during development. - **Options source**: Radio group components can be configured with an options source defining the individual choices the component provides, refer to [options source docs](../configuration/forms-config-options.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the radio group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One radio group option must be selected. diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-select.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-select.md index 17ae7dd2ce0..102a401ff77 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-select.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-select.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A Select dropdown allowing the user to select one of multiple data option from larger datasets. -![Form Select Symbol](/img/form-icons/form-select.svg) +Form Select Symbol ### Configurable properties @@ -18,7 +18,7 @@ A Select dropdown allowing the user to select one of multiple data option from l - **Read only**: Makes the select read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the select, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the select. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Options source**: Selects can be configured with an options source defining the individual choices the select provides, refer to [options source docs](../configuration/forms-config-options.md). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One select entry must be selected. diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-separator.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-separator.md index a83fc9ed785..392d1418cb4 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-separator.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-separator.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add a visual separation between A **separator** element is used to create a visual separation between two elements. -![Form Spacer Symbol](/img/form-icons/form-separator.svg) +Form Separator Symbol ## Usage diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-spacer.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-spacer.md index 22043da492b..7284c15dfd3 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-spacer.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-spacer.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add vertical space between eleme A **spacer** element is used to create a defined amount of vertical space between two elements. -![Form Spacer Symbol](/img/form-icons/form-spacer.svg) +Form Spacer Symbol ## Configurable properties diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-table.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-table.md index 947b4f5e505..5ff8cb77e7d 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-table.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-table.md @@ -6,7 +6,7 @@ description: Learn about the table form element to render tabular data. This is an element allowing the user to render tabular data. -![Form table Symbol](/img/form-icons/form-table.svg) +Form Table Symbol ## Configurable properties @@ -16,4 +16,4 @@ This is an element allowing the user to render tabular data. - **Number of rows per page**: The size of each page. Used only if pagination is enabled. Must be greater than zero. - **Headers source**: Defines which headers will be used in the table. This can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md) or a list of static headers. Review [table data binding](../configuration/forms-config-table-data-binding.md) for the required header structure. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the table. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-taglist.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-taglist.md index 97411ce8db5..e4dd64059ea 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-taglist.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-taglist.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A complex and searchable tag based component providing multi-selection for large datasets. -![Form Taglist Symbol](/img/form-icons/form-taglist.svg) +Form Taglist Symbol ### Configurable properties @@ -14,7 +14,7 @@ A complex and searchable tag based component providing multi-selection for large - **Field description**: Description provided below the taglist. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Key**: Binds the field to a form variable, refer to [data binding docs](../configuration/forms-config-data-binding.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the taglist. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Taglist must contain a value. - **Options source**: Taglists can be configured with an options source defining the individual choices your user can make, refer to [options source docs](../configuration/forms-config-options.md). diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-text.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-text.md index 4da6171d127..bcad868c71e 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-text.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-text.md @@ -6,13 +6,13 @@ description: A form element to display static information. A text component allowing to display static information to the user. -![Form Text Symbol](/img/form-icons/form-text.svg) +Form Text Symbol ## Configurable properties - **Text**: Either an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). After evaluation, the result is processed using a Markdown renderer that supports basic HTML and [GitHub-flavored Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). To ensure safety and prevent cross-site scripting in Camunda Forms, potentially harmful HTML elements will not be rendered. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Example text configurations diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textarea.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textarea.md index d851a970a92..2e66e4d0712 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textarea.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textarea.md @@ -6,7 +6,7 @@ description: Learn about the text area form element to read and edit multiline t A text area allowing the user to read and edit multiline textual data. -![Form Textarea Symbol](/img/form-icons/form-textArea.svg) +Form Textarea Symbol ## Configurable properties @@ -17,7 +17,7 @@ A text area allowing the user to read and edit multiline textual data. - **Read only**: Makes the text area read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text area; for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text area. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text area must contain a value. - **Minimum length**: Text area must have at least `n` characters. diff --git a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textfield.md b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textfield.md index 1aafa0e824f..da45e37b3c9 100644 --- a/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textfield.md +++ b/versioned_docs/version-8.4/components/modeler/forms/form-element-library/forms-element-library-textfield.md @@ -6,7 +6,7 @@ description: A form element to read and edit textual data A text field allowing the user to read and edit textual data. -![Form Text Field Symbol](/img/form-icons/form-textField.svg) +Form Text Field Symbol ### Configurable properties @@ -17,7 +17,7 @@ A text field allowing the user to read and edit textual data. - **Read only**: Makes the text field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text field. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text field must contain a value. - **Regular expression validation**: Use predefined validation patterns. Available options are: `Email`, `Phone`, and `Custom`. diff --git a/versioned_docs/version-8.4/components/modeler/web-modeler/camunda-marketplace.md b/versioned_docs/version-8.4/components/modeler/web-modeler/camunda-marketplace.md index 9b026b9d57a..5fee9c721d8 100644 --- a/versioned_docs/version-8.4/components/modeler/web-modeler/camunda-marketplace.md +++ b/versioned_docs/version-8.4/components/modeler/web-modeler/camunda-marketplace.md @@ -14,6 +14,10 @@ The Camunda Marketplace can be accessed via your [browser](https://marketplace.c ## Visit the Camunda Marketplace +:::note +Connectors created by partners or the community are not part of the commercial Camunda product. Camunda does not support these Connectors as part of its commercial services to enterprise customers. Please evaluate each client to make sure it meets your requirements before using. +::: + To navigate to the Camunda Marketplace, take the following steps: 1. Log in to your Camunda account, and navigate to Web Modeler using the **Camunda components** icon in the top left corner of your console. Click **Modeler**. diff --git a/versioned_docs/version-8.4/components/zeebe/zeebe-overview.md b/versioned_docs/version-8.4/components/zeebe/zeebe-overview.md index 0bb3d7f4e9b..7ab27b959b8 100644 --- a/versioned_docs/version-8.4/components/zeebe/zeebe-overview.md +++ b/versioned_docs/version-8.4/components/zeebe/zeebe-overview.md @@ -20,12 +20,6 @@ With Zeebe you can: For documentation on deploying Zeebe as part of Camunda 8 Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/zeebe-installation.md). -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda 8 Starter or Camunda 8 Enterprise plans. Customers can choose either plan based on their process automation requirements. Camunda 8 Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda 8, you can always find support through the [community](/contact/). - ## Next steps - Get familiar with [technical concepts](technical-concepts/technical-concepts-overview.md). diff --git a/versioned_docs/version-8.4/guides/getting-started-orchestrate-microservices.md b/versioned_docs/version-8.4/guides/getting-started-orchestrate-microservices.md index 9c4ca395f39..6164a6eddc5 100644 --- a/versioned_docs/version-8.4/guides/getting-started-orchestrate-microservices.md +++ b/versioned_docs/version-8.4/guides/getting-started-orchestrate-microservices.md @@ -19,7 +19,7 @@ While this guide uses code snippets in Java, you do not need to be a Java develo ## Prerequisites - Ensure you have a valid [Camunda 8 account](create-account.md), or sign up if you still need one. -- Java >= 8 +- Java ≥ 8 - Maven - IDE (IntelliJ, VSCode, or similar) - Download and unzip or clone the [repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java` diff --git a/versioned_docs/version-8.4/guides/migrating-from-camunda-7/index.md b/versioned_docs/version-8.4/guides/migrating-from-camunda-7/index.md index 000e09dd12f..39764a6da25 100644 --- a/versioned_docs/version-8.4/guides/migrating-from-camunda-7/index.md +++ b/versioned_docs/version-8.4/guides/migrating-from-camunda-7/index.md @@ -53,4 +53,4 @@ As described earlier in this guide, migration is an ongoing topic and this guide - Discuss workload migrations (operations) - Eventual consistency -[Reach out to us](/contact/) to discuss your specific migration use case. +[Reach out to us](/reference/contact.md) to discuss your specific migration use case. diff --git a/versioned_docs/version-8.4/reference/alpha-features.md b/versioned_docs/version-8.4/reference/alpha-features.md index 8a65e9ee0fa..c8229dff48c 100644 --- a/versioned_docs/version-8.4/reference/alpha-features.md +++ b/versioned_docs/version-8.4/reference/alpha-features.md @@ -22,7 +22,7 @@ Limitations of alpha features and components include: - Not necessarily feature-complete. - Might lack full documentation. - No guaranteed updates to newer releases. -- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://docs.camunda.org/enterprise/support/). +- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://camunda.com/services/enterprise-support-guide/). - No maintenance service. - (SaaS) No availability targets. - Released outside the standard [release policy](release-policy.md). @@ -32,7 +32,7 @@ To learn more about using alpha features, see [enabling alpha features](/compone :::note - Alpha features can also be included in a minor version (stable) release. -- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/contact). +- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/reference/contact.md). ::: @@ -43,7 +43,7 @@ Once features and components are released and considered stable, they become gen Stable features and components are: - Ready for production use for most users with minimal risk. -- Supported by [L1 Priority-level support](https://docs.camunda.org/enterprise/support/#priority-level) for production use. +- Supported by [L1 Priority-level support](https://camunda.com/services/enterprise-support-guide/) for production use. - Fully documented. A release or component is considered stable if it has passed all verification and test stages and can be released to production. diff --git a/versioned_docs/version-8.4/reference/contact.md b/versioned_docs/version-8.4/reference/contact.md new file mode 100644 index 00000000000..4e6c7fc3892 --- /dev/null +++ b/versioned_docs/version-8.4/reference/contact.md @@ -0,0 +1,45 @@ +--- +id: contact +title: Contact +description: Contact Camunda, submit feedback, find support using the Camunda community forum, note bug reports and feature requests, and review security notices. +keywords: + [ + support, + contact-us, + get-support, + help, + need-help, + bug, + bug-report, + feature-request, + issue, + enterprise-support, + ] +--- + +There are a few different channels you can reach us based on your needs: + +- We encourage everyone to participate in our **community** via the [Camunda community forum](https://forum.camunda.io/), where you can exchange ideas with other Camunda users, as well as Camunda employees. For all other Camunda community programs and resources, visit our [Camunda Developer Hub](https://camunda.com/developers). + +- We welcome your **bug** reports and **feature requests** through our community channels mentioned above. + +- For **security-related issues**, review our [security notices](/reference/notices.md) for the most up-to-date information on known issues and steps to report a vulnerability so we can solve the problem as quickly as possible. Do not use GitHub for security-related issues. + +- **Feedback and support** can be submitted or requested via JIRA by following our [Enterprise support process](https://camunda.com/services/enterprise-support-guide/). All users can also find feedback and support options in the Help Center or [Camunda community forum](https://forum.camunda.io/). + +- For sales inquiries, information about Camunda 8 performance and benchmarking, or anything not listed above, use our [Contact Us](https://camunda.com/contact/) form. + +## Locating Camunda 8 credentials + +Need assistance locating your Camunda 8 credentials? You can obtain these credentials from Camunda by submitting a **Help Request**. To do this, take the following steps: + +1. Log in to [Jira](https://jira.camunda.com/secure/Dashboard.jspa). +2. Click **Create** in the navigation bar at the top of the page. This launches a **Create Issue** pop-up. +3. In the **Issue Type** field, select **Help Request**. +4. In the **Help Request Type** field, click the option that reads **I need the credentials for downloading Camunda**. +5. In the **Summary** and **Description** fields, **I need the credentials for downloading Camunda** will populate by default. + ![completed help request example](./img/create-issue-request.png) +6. (Optional) Add more details, such as the priority level or authorized support contacts. +7. Click **Create** at the bottom of the pop-up **Create Issue** box. + +After completing these steps, your request is generated. Find additional details on submitting a self-service help request [here](https://camunda.com/services/enterprise-support-guide/). diff --git a/versioned_docs/version-8.4/reference/img/create-issue-request.png b/versioned_docs/version-8.4/reference/img/create-issue-request.png new file mode 100644 index 00000000000..374fdfece6f Binary files /dev/null and b/versioned_docs/version-8.4/reference/img/create-issue-request.png differ diff --git a/versioned_docs/version-8.4/reference/notices.md b/versioned_docs/version-8.4/reference/notices.md index 54571a1a741..495e6468732 100644 --- a/versioned_docs/version-8.4/reference/notices.md +++ b/versioned_docs/version-8.4/reference/notices.md @@ -74,11 +74,11 @@ Tasklist The REST API functionality of Tasklist 8.2.0 and 8.2.1 allows unauthenticated access to the following methods/URLs: -- GET /v1/tasks/{taskId} +- GET /v1/tasks/\{taskId} - POST /v1/tasks/search -- POST /v1/tasks/{taskId}/variables/search -- POST /v1/forms/{formId} -- POST /v1/variables/{variableId} +- POST /v1/tasks/\{taskId}/variables/search +- POST /v1/forms/\{formId} +- POST /v1/variables/\{variableId} Find more information about the methods in our [Tasklist REST API documentation](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). @@ -115,7 +115,7 @@ At this point, Camunda is not aware of any specific attack vector in Tasklist al #### How to determine if the installation is affected -You are Tasklist version (8.0.3 >= version <= 8.0.7) or <= 8.1.2 +You are Tasklist version (8.0.3 ≥ version ≤ 8.0.7) or ≤ 8.1.2 #### Solution @@ -142,7 +142,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.11 or ≤ 1.3.6 #### Solution @@ -168,7 +168,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.8 or ≤ 1.1.9 #### Solution @@ -194,7 +194,7 @@ Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bu #### How to determine if the installation is affected -You are using IAM version <= 1.2.8 +You are using IAM version ≤ 1.2.8 #### Solution @@ -219,7 +219,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.7 or ≤ 1.1.8 #### Solution @@ -248,7 +248,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.7 +You are using IAM version ≤ 1.2.7 #### Solution @@ -273,7 +273,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.6 or ≤ 1.1.7 #### Solution @@ -302,7 +302,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.6 +You are using IAM version ≤ 1.2.6 #### Solution @@ -327,7 +327,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.5 or ≤ 1.1.6 #### Solution @@ -357,7 +357,7 @@ Still, Camunda recommends applying fixes as mentioned in the Solution section be #### How to determine if the installation is affected -You are using IAM version <= 1.2.5 +You are using IAM version ≤ 1.2.5 #### Solution diff --git a/versioned_docs/version-8.4/reference/regions.md b/versioned_docs/version-8.4/reference/regions.md index c3104ed7238..6d373f5a793 100644 --- a/versioned_docs/version-8.4/reference/regions.md +++ b/versioned_docs/version-8.4/reference/regions.md @@ -9,7 +9,7 @@ When you create a cluster in Camunda 8 SaaS, you must specify a region for that Currently, we make these regions available for customers on the Trial, Starter, and Enterprise Plans. Enterprise customers can discuss custom regions with their Customer Success Manager. :::note -Our Console and Web Modeler components are currently hosted in the EU. [Contact us](https://camunda.com/contact/) if you have additional questions. +Our Console and Web Modeler components are currently hosted in the EU. [Contact us](/reference/contact.md) if you have additional questions. ::: Below, find a list of regions currently supported in Camunda 8 SaaS. @@ -19,6 +19,7 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. - Belgium, Europe (europe-west1) - Iowa, North America (us-central1) - London, Europe (europe-west2) +- Singapore, Asia (asia-southeast1) - South Carolina, North America (us-east1) - Sydney, Australia (australia-southeast1) - Toronto, North America (northamerica-northeast2) @@ -26,5 +27,5 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. You can find the locations behind the region codes [on the Google page](https://cloud.google.com/about/locations). :::note -Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](https://camunda.com/contact/) as we are able to make additional regions available on request. +Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](/reference/contact.md) as we are able to make additional regions available on request. ::: diff --git a/versioned_docs/version-8.4/reference/status.md b/versioned_docs/version-8.4/reference/status.md index c8de779c5d2..ecb840a23ab 100644 --- a/versioned_docs/version-8.4/reference/status.md +++ b/versioned_docs/version-8.4/reference/status.md @@ -21,4 +21,4 @@ To receive service status updates: ## Support -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/contact). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). +Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/reference/contact.md). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). diff --git a/versioned_docs/version-8.4/reference/supported-environments.md b/versioned_docs/version-8.4/reference/supported-environments.md index 6567c2f037c..55e060b5ed8 100644 --- a/versioned_docs/version-8.4/reference/supported-environments.md +++ b/versioned_docs/version-8.4/reference/supported-environments.md @@ -8,7 +8,7 @@ The supported environments page lists browsers, operating systems, clients, depl **If the particular technology is not listed, we cannot resolve issues caused by the usage of that unlisted technology.** -You may [raise a feature request](/contact) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/contact) to work with Consulting services. +You may [raise a feature request](/reference/contact.md) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/reference/contact.md) to work with Consulting services. Recommendations are denoted with [recommended], however, other options are supported as well. diff --git a/versioned_docs/version-8.4/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md b/versioned_docs/version-8.4/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md index dbe8c95860f..513a05bc808 100644 --- a/versioned_docs/version-8.4/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md +++ b/versioned_docs/version-8.4/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md @@ -5,16 +5,27 @@ sidebar_label: "Connect to an existing Keycloak instance" description: "Learn how to connect Identity to your existing Keycloak instance." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + In this guide, we'll demonstrate how to connect Identity to your existing Keycloak instance. -### Prerequisites +## Prerequisites + +- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/23.0.1/server_admin/#using-the-admin-console) +- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak -- Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/22.0.1/server_admin/#using-the-admin-console) -- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/22.0.1/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak. +## Steps -### Steps +To connect Identity to an existing Keycloak instance, take the following steps for your Camunda installation: -To connect Identity to an existing Keycloak instance, take the following steps: + + + 1. Log in to your Keycloak Admin Console. 2. Select the realm you would like to connect Identity to. In our example, this is **camunda-platform**. @@ -22,7 +33,7 @@ To connect Identity to an existing Keycloak instance, take the following steps: 3. Select **Clients** in the navigation menu, and click the **Create** button to create a new client. 4. Enter a client ID and click **Next**. :::note What client ID should I use? - By default, Identity uses the Client ID `camunda-identity`, so we recommend using this too. If you choose a different client ID, this will need to be set in the Identity application [environment variables](/docs/self-managed/identity/deployment/configuration-variables.md). + By default, Identity uses the Client ID `camunda-identity`, so we recommend using this too. If you choose a different client ID, this will need to be set in the Identity application [environment variables](/self-managed/identity/deployment/configuration-variables.md). ::: ![keycloak-admin-client-add-1](../img/keycloak-admin-client-add-1.png) 5. Toggle **Client authentication** to `on`, select **Service accounts roles** and click **Next**. @@ -38,19 +49,31 @@ To connect Identity to an existing Keycloak instance, take the following steps: Identity is designed to allow users to manage the various entities related to Camunda. To achieve this, it requires specific access to the realm. ::: 10. Navigate to the **Credentials** tab and copy the client secret. -11. Set the `IDENTITY_CLIENT_SECRET` [environment variable](/docs/self-managed/identity/deployment/configuration-variables.md) with the value from **Step 9**. -12. Set the `KEYCLOAK_REALM` [environment variable](/docs/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +11. Set the `IDENTITY_CLIENT_SECRET` [environment variable](/self-managed/identity/deployment/configuration-variables.md) with the value from **Step 9**. +12. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. :::tip If you are using a specific realm, you need to set additional variables to use the intended realm. - See the [environment variables](/docs/self-managed/identity/deployment/configuration-variables.md) page for details of Keycloak-specific variables to consider. + See the [environment variables](/self-managed/identity/deployment/configuration-variables.md) page for details of Keycloak-specific variables to consider. ::: 13. Start Identity. + + + +1. Log in to your Keycloak Admin Console. +2. Verify the name of the realm you would like to connect Identity to. In our example, this is **camunda-platform**. + ![keycloak-admin-realm-select](../img/keycloak-admin-realm-select.png) +3. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +4. Start Identity. + + + + :::note What does Identity create when starting? -Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/docs/self-managed/identity/deployment/starting-configuration.md). +Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/self-managed/identity/deployment/starting-configuration.md). ::: -### Considerations +## Considerations When connecting Identity to a shared realm, accurately determining what clients should and should not be displayed in the Identity UI is not possible. Therefore, the clients in the realm you connect Identity to will be shown in the Identity UI and can have their secrets viewed and updated. Users with access to Identity should be considered as having administrator-level access to the system. diff --git a/versioned_docs/version-8.4/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md b/versioned_docs/version-8.4/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md index f39ada6f300..4d2c0a73a62 100644 --- a/versioned_docs/version-8.4/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md +++ b/versioned_docs/version-8.4/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md @@ -4,6 +4,9 @@ title: Deploy diagram description: "Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed. Follow the steps below to deploy a diagram: 1. Click the rocket-shaped deployment icon: @@ -30,20 +33,40 @@ Multi-tenancy is only available with authentication enabled [through Identity](/ ![deployment via Camunda 8](./img/deploy-endpoint.png) -4. Select **Basic**, and input your username and password in case your gateway requires basic authentication: +4. Select your authentication method, and input the required credentials: + + + + + +For **basic authentication**, input your username and password: ![basic auth configuration](./img/deploy-with-basic-auth.png) -5. Select **OAuth**, and input the credentials in case your gateway requires authentication with OAuth: + -:::note -The OAuth URL needs to contain the full path to the token endpoint, i.e. `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. -::: + + +For **OAuth**, input the credentials for your OAuth provider. These are configured as part of the default [Helm installation](/self-managed/platform-deployment/helm-kubernetes/deploy.md) and can be discovered in [Identity](/self-managed/identity/what-is-identity.md), or are set by Zeebe [environment variables](/self-managed/zeebe-deployment/security/client-authorization.md#environment-variables). ![oauth configuration](./img/deploy-with-oauth.png) -6. Select the **Remember** checkbox if you want to locally store the connection information. +| Name | Description | Example value | +| --------------- | ------------------------------------ | ----------------------------------------------------------------------------------------- | +| Client ID | The name of your Zeebe client. | `zeebe` | +| Client secret | The password of your Zeebe client. | `zecret` | +| OAuth token url | The full path to the token endpoint. | `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. | +| OAuth audience | The permission name for Zeebe. | `zeebe-api` | + + + + +5. Select the **Remember** checkbox if you want to locally store the connection information. -7. Click **Deploy** to perform the deployment. +6. Click **Deploy** to perform the deployment. ![deployment successful](./img/deploy-success.png) diff --git a/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/configuration.md b/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/configuration.md index 1f374a2abcf..53b595d0f8f 100644 --- a/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/configuration.md +++ b/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/configuration.md @@ -144,10 +144,13 @@ The `webapp` component sends certain events (e.g. "user opened diagram", "user l ### Logging -| Environment variable | Description | Example value | -| -------------------- | -------------------------------------- | ---------------------------- | -| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| Environment variable | Description | Example value | +| -------------------- | ----------------------------------------------- | ---------------------------- | +| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| `LOG_LEVEL_CLIENT` | [optional]
    Log level for the client | `DEBUG` | +| `LOG_LEVEL_WEBAPP` | [optional]
    Log level for the Node.js server | `DEBUG` | +The `LOG_LEVEL_*` options can be found [here](../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-webapp-component) for additional details on how to customize the `webapp` logging output. ## Configuration of the `websocket` component diff --git a/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/logging.md b/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/logging.md index be8db0bd16d..7b5047b908b 100644 --- a/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/logging.md +++ b/versioned_docs/version-8.4/self-managed/modeler/web-modeler/configuration/logging.md @@ -42,6 +42,16 @@ To enable additional log output to a file, adjust the following environment vari LOG_FILE_PATH=/full/path/to/log/file.log ``` +### Configuring log levels + +To control the verbosity of the logs, adjust the environment variables `LOG_LEVEL_CLIENT` (browser client) and `LOG_LEVEL_WEBAPP` (Node.js server). + +```properties +LOG_LEVEL_CLIENT=DEBUG +``` + +The `LOG_LEVEL_*` options can be found [here](../../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). + ## Logging configuration for the `websocket` component By default, the `websocket` component logs to the Docker container's standard output. diff --git a/versioned_docs/version-8.4/self-managed/operate-deployment/importer-and-archiver.md b/versioned_docs/version-8.4/self-managed/operate-deployment/importer-and-archiver.md index 04cad62c780..60e561d9c62 100644 --- a/versioned_docs/version-8.4/self-managed/operate-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.4/self-managed/operate-deployment/importer-and-archiver.md @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md b/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md index bae5f4f7250..e2a9e2d3bb2 100644 --- a/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md +++ b/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md @@ -5,6 +5,9 @@ description: "How to perform a backup and restore of Operate and Tasklist data." keywords: ["backup", "backups"] --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + :::note This release introduces breaking changes, including: @@ -37,33 +40,68 @@ The backup API can be reached via the Actuator management port, which by default Before you can use the backup and restore feature: 1. The [Elasticsearch snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html) must be configured. -2. Operate and Tasklist must be configured with the repository name using the following configuration parameters: +2. Operate and Tasklist must be configured with the repository name using one of the following configuration options: + + + + + +#### Operate ```yaml -for Operate: camunda: operate: backup: repositoryName: +``` + + + + + +#### Operate + +``` +CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= +``` + + + + +#### Tasklist -for Tasklist: + + + + +```yaml camunda: tasklist: backup: repositoryName: ``` -or with environmental variables: + -``` -for Operate: -CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= + -for Tasklist: +``` CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME= - ``` + + + ## Create backup API During backup creation Operate can continue running. To create the backup, call the following endpoint: diff --git a/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/optimize-backup.md b/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/optimize-backup.md index c694ff53341..823de9ef51a 100644 --- a/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/optimize-backup.md +++ b/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/optimize-backup.md @@ -25,7 +25,7 @@ Optimize provides an API to trigger a backup and retrieve information about a gi The following prerequisites must be set up before using the backup API: 1. A snapshot repository of your choice must be registered with Elasticsearch. -2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize configuration: +2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize [`environment-config.yaml`]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/): ```yaml backup: @@ -58,7 +58,7 @@ POST actuator/backups ### Example request -``` +```shell curl --request POST 'http://localhost:8092/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123456 }' @@ -101,8 +101,8 @@ GET actuator/backup ### Example request -``` -curl ---request GET 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request GET 'http://localhost:8092/actuator/backups/123456' ``` ### Example response @@ -161,8 +161,8 @@ DELETE actuator/backups/{backupId} ### Example request -``` -curl ---request DELETE 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request DELETE 'http://localhost:8092/actuator/backups/123456' ``` ## Restore backup @@ -184,6 +184,6 @@ To restore a given backup, the following steps must be performed: Example Elasticsearch request: -``` +```shell curl --request POST `http://localhost:9200/_snapshot/repository_name/camunda_optimize_123456_3.9.0_part_1_of_2/_restore?wait_for_completion=true` ``` diff --git a/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md b/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md index cc72a9ae2af..ef01f03c5ec 100644 --- a/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md +++ b/versioned_docs/version-8.4/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md @@ -26,7 +26,7 @@ Even when the underlying storage bucket is the same, backups from one are not co ### S3 backup store -To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket: +To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -87,7 +87,7 @@ zeebe.broker.data.backup.s3.compression: zstd # or use environment variable ZEEB The GCS backup strategy utilizes the [Google Cloud Storage REST API](https://cloud.google.com/storage/docs/request-endpoints). ::: -To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use: +To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -142,7 +142,7 @@ The `backupId` cannot be reused, even if the backup corresponding to the backup

    Example request -``` +```shell curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": "100" }' @@ -185,7 +185,7 @@ GET actuator/backups/{backupId}
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups/100' ``` @@ -258,7 +258,7 @@ GET actuator/backups
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups' ``` @@ -336,7 +336,7 @@ DELETE actuator/backups/{backupId}
    Example request -``` +```shell curl --request DELETE 'http://localhost:9600/actuator/backups/100' ``` diff --git a/versioned_docs/version-8.4/self-managed/operational-guides/troubleshooting/log-levels.md b/versioned_docs/version-8.4/self-managed/operational-guides/troubleshooting/log-levels.md index f5423bb4a8b..365aa71fd33 100644 --- a/versioned_docs/version-8.4/self-managed/operational-guides/troubleshooting/log-levels.md +++ b/versioned_docs/version-8.4/self-managed/operational-guides/troubleshooting/log-levels.md @@ -24,3 +24,4 @@ Enable logging for each component of Camunda 8 using the following instructions: - [Operate](/self-managed/operate-deployment/operate-configuration.md#logging) - [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#logging) - [Web Modeler](/self-managed/modeler/web-modeler/configuration/logging.md) +- [Identity](/self-managed/identity/user-guide/configuration/configure-logging.md) diff --git a/versioned_docs/version-8.4/self-managed/operational-guides/update-guide/introduction.md b/versioned_docs/version-8.4/self-managed/operational-guides/update-guide/introduction.md index e6282cf6955..9ddfd169e5c 100644 --- a/versioned_docs/version-8.4/self-managed/operational-guides/update-guide/introduction.md +++ b/versioned_docs/version-8.4/self-managed/operational-guides/update-guide/introduction.md @@ -12,10 +12,6 @@ When updating from one minor version to the next, you do not need to update to e Depending on your amount of data, run a minor version for at least 24 hours before updating to the next version. -:::note -Versions prior to Camunda 8 are listed below and identified as Camunda Cloud versions. -::: - There is a dedicated update guide for each version: ### [Camunda 8.3 to Camunda 8.4](../830-to-840) diff --git a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/deploy.md b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/deploy.md index 9e1a60da01e..1b38862c885 100644 --- a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/deploy.md +++ b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/deploy.md @@ -215,7 +215,7 @@ To set up Web Modeler, you need to provide the following required configuration - Configure the database connection - Web Modeler requires a PostgreSQL database as persistent data storage (other database systems are currently not supported). - _Option 1_: Set `postgresql.enabled: true`. This will install a new PostgreSQL instance as part of the Helm release (using the [PostgreSQL Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) by Bitnami as a dependency). - - _Option 2_: Set `postgresql.enabled: false` and configure a [connection to an external database](#optional-configure-external-database). + - _Option 2_: Set `postgresql.enabled: false` and configure a connection to an external database (see the second example below). We recommend specifying these values in a YAML file that you pass to the `helm install` command. A minimum configuration file would look as follows: diff --git a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/guides/connect-to-an-oidc-provider.md b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/guides/connect-to-an-oidc-provider.md index ba313c89090..01bb0340760 100644 --- a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/guides/connect-to-an-oidc-provider.md +++ b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/guides/connect-to-an-oidc-provider.md @@ -38,11 +38,14 @@ UI is not available for this version.

    Steps

    1. In your OIDC provider, create an application for each of the components you want to connect. The expected redirect URI of the component you are configuring an app for can be found in [component-specific configuration](#component-specific-configuration). -2. Make a note of the following values for each application you create: +2. For all Components, ensure the appropriate application type is used: + - **Operate, Tasklist, Optimize, Identity:** Web applications requiring confidential access/a confidential client + - **Web Modeler:** A single-page application requiring public access/a public client +3. Make a note of the following values for each application you create: - Client ID - Client secret - Audience -3. Set the following environment variables for the component you are configuring an app for: +4. Set the following environment variables for the component you are configuring an app for: @@ -124,8 +127,8 @@ Ensure you register a new application for each component. ``` CAMUNDA_IDENTITY_TYPE=MICROSOFT CAMUNDA_IDENTITY_BASE_URL= - CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 - CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 CAMUNDA_IDENTITY_CLIENT_ID= CAMUNDA_IDENTITY_CLIENT_SECRET= CAMUNDA_IDENTITY_AUDIENCE= @@ -139,13 +142,13 @@ Ensure you register a new application for each component. global: identity: auth: - issuer: https://login.microsoftonline.com//v2.0 + issuer: https://login.microsoftonline.com//v2.0 # this is used for container to container communication - issuerBackendUrl: https://login.microsoftonline.com//v2.0 - tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token - jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys + issuerBackendUrl: https://login.microsoftonline.com//v2.0 + tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token + jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys type: "MICROSOFT" - publicIssuerUrl: https://login.microsoftonline.com//v2.0 + publicIssuerUrl: https://login.microsoftonline.com//v2.0 operate: clientId: audience: diff --git a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/eks-helm.md b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/eks-helm.md index 1b06e41c899..7a459720abb 100644 --- a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/eks-helm.md +++ b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/eks-helm.md @@ -12,7 +12,7 @@ Lastly you'll verify that the connection to your Self-Managed Camunda 8 environm ## Prerequisites -- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [terraform](./terraform-setup.md) guide. +- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [Terraform](./terraform-setup.md) guide. - [Helm (3.16+)](https://helm.sh/docs/intro/install/) - [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. - (optional) Domain name/[hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) in Route53. This allows you to expose Camunda 8 and connect via [zbctl](../../../../../../apis-tools/cli-client/) or [Camunda Modeler](https://camunda.com/download/modeler/). diff --git a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md index cabd1eca81d..1b889e08eb0 100644 --- a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md +++ b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/irsa.md @@ -184,7 +184,7 @@ For additional details, refer to the [Camunda 8 Helm deployment documentation](. ### Web Modeler -Since Web Modeler RestAPI uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. +As the Web Modeler REST API uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. #### Kubernetes configuration diff --git a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/terraform-setup.md b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/terraform-setup.md index 28c2429c2ee..d0cf2f1da05 100644 --- a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/terraform-setup.md +++ b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks/terraform-setup.md @@ -75,7 +75,7 @@ provider "aws" { :::note -It's recommended to use a different backend than `local`. More information can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +It's recommended to use a different backend than `local`. More information can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/backend). ::: diff --git a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/upgrade.md b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/upgrade.md index a3cb64cea6f..5e2de975c4d 100644 --- a/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/upgrade.md +++ b/versioned_docs/version-8.4/self-managed/platform-deployment/helm-kubernetes/upgrade.md @@ -5,14 +5,14 @@ sidebar_label: "Upgrade" description: "To upgrade to a more recent version of the Camunda Helm charts, there are certain things you need to keep in mind." --- -To upgrade to a more recent version of the Camunda Helm charts, there are certain things you need to keep in mind. - -:::caution - -Ensure to review the [instructions for a specific version](#version-update-instructions) before starting the actual upgrade. +:::note +When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the next **major** version of the chart. +For example, if the current Helm chart version is 10.x.x, and the latest next major version is 11.0.1, the recommended upgrade is to 11.0.1 (not 11.0.0). ::: +To upgrade to a more recent version of the Camunda Helm charts, review the [instructions for a specific version](#version-update-instructions). + ### Upgrading where Identity disabled Normally for a Helm upgrade, you run the [Helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) command. If you have disabled Camunda Identity and the related authentication mechanism, you should be able to do an upgrade as follows: diff --git a/versioned_docs/version-8.4/self-managed/react-components/components.md b/versioned_docs/version-8.4/self-managed/react-components/components.md index 4b5196c4e1b..fd5bcbced8b 100644 --- a/versioned_docs/version-8.4/self-managed/react-components/components.md +++ b/versioned_docs/version-8.4/self-managed/react-components/components.md @@ -19,6 +19,6 @@ Camunda 8 Self-Managed users may also use [Desktop Modeler](../../components/mod :::note -To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/contact). +To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.4/self-managed/tasklist-deployment/importer-and-archiver.md b/versioned_docs/version-8.4/self-managed/tasklist-deployment/importer-and-archiver.md index c75e1fe9655..54d26a1fe59 100644 --- a/versioned_docs/version-8.4/self-managed/tasklist-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.4/self-managed/tasklist-deployment/importer-and-archiver.md @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.tasklist.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/broker.md b/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/broker.md index 3c2bb8cfe46..109f6e805f4 100644 --- a/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/broker.md +++ b/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/broker.md @@ -484,11 +484,11 @@ backpressure: ### zeebe.broker.backpressure.gradient -| Field | Description | Example Value | -| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | +| Field | Description | Example Value | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | #### YAML snippet @@ -503,12 +503,12 @@ backpressure: ### zeebe.broker.backpressure.gradient2 -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet diff --git a/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/gateway.md b/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/gateway.md index cc5e66dd6f6..2f861de6932 100644 --- a/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/gateway.md +++ b/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/gateway.md @@ -307,7 +307,8 @@ Each interceptor should be configured with the values described below:
    classNameEntry point of the interceptor, a class which must: + + Entry point of the interceptor, a class which must:
  • implement io.grpc.ServerInterceptor
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • @@ -354,3 +355,9 @@ If you are using an embedded gateway, refer to the [broker configuration guide]( multiTenancy: enabled: true ``` + +### Experimental configuration + +See the experimental section of the [gateway.yaml.template](https://github.com/camunda/camunda/blob/stable/8.4/dist/src/main/config/gateway.yaml.template#L298). + +Be aware that all configuration properties which are part of the experimental section are subject to change and can be dropped at any time. diff --git a/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/priority-election.md b/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/priority-election.md index 4e466a0640a..d7fc45c0413 100644 --- a/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/priority-election.md +++ b/versioned_docs/version-8.4/self-managed/zeebe-deployment/configuration/priority-election.md @@ -10,8 +10,8 @@ It aims to achieve a more uniform leader distribution by assigning each node a p ## Configuration -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. +Enable priority election by setting `zeebe.broker.cluster.raft.enablePriorityElection=true` in your config or +by setting the equivalent environment variable `ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION=true`. If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). @@ -19,7 +19,7 @@ If you are using the fixed partitioning scheme (experimental), you may need [add With priority election enabled, election latency and thus failover time increases. -The result of leader election is not deterministic and priority election can only increase the chance of having a +The result of a leader election is not deterministic, and priority election can only increase the chance of having a uniform leader distribution, not guarantee it. -Factors such as high load can prevent high priority nodes from becoming the leader. +Factors such as high load can prevent high-priority nodes from becoming the leader. diff --git a/versioned_docs/version-8.4/self-managed/zeebe-deployment/operations/backpressure.md b/versioned_docs/version-8.4/self-managed/zeebe-deployment/operations/backpressure.md index 3cb675dca0b..3ffc609825e 100644 --- a/versioned_docs/version-8.4/self-managed/zeebe-deployment/operations/backpressure.md +++ b/versioned_docs/version-8.4/self-managed/zeebe-deployment/operations/backpressure.md @@ -12,6 +12,13 @@ If the broker keeps accepting new requests from the client, the backlog increase To avoid such problems, Zeebe employs a backpressure mechanism. When the broker receives more requests than it can process with an acceptable latency, it rejects some requests (see [technical error handling](/apis-tools/zeebe-api/technical-error-handling.md)). +:::note +When [multi-tenancy](./../../concepts/multi-tenancy.md) is enabled in Camunda 8, a large number of concurrent requests +may also lead to issues with Camunda Identity. In such cases, it is recommended to enable and configure the management of +Identity requests in the Zeebe Gateway. This allows Zeebe to employ a backpressure mechanism against these requests. +For more information, see the Zeebe Gateway [experimental configuration documentation](./../configuration/gateway.md#experimental-configuration). +::: + ### Terminology - **RTT** - Round-Trip Time, known as the time between when the request is accepted by the broker and when the response to the request is sent back to the gateway. diff --git a/versioned_docs/version-8.5/apis-tools/administration-api/tutorial.md b/versioned_docs/version-8.5/apis-tools/administration-api/tutorial.md index ab4093fca6f..062ff1fca58 100644 --- a/versioned_docs/version-8.5/apis-tools/administration-api/tutorial.md +++ b/versioned_docs/version-8.5/apis-tools/administration-api/tutorial.md @@ -33,6 +33,10 @@ To set up your credentials, create an `.env` file which will be protected by the These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CONSOLE_CLIENT_ID`, `CAMUNDA_CONSOLE_CLIENT_SECRET`, `CAMUNDA_CONSOLE_OAUTH_AUDIENCE`, and `CAMUNDA_CONSOLE_BASE_URL`. Locate your `CLUSTER_ID` in Console by navigating to **Clusters**. Scroll down and copy your **Cluster Id** under **Cluster Details**. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/versioned_docs/version-8.5/apis-tools/community-clients/spring.md b/versioned_docs/version-8.5/apis-tools/community-clients/spring.md index 11665bc5f19..b2d4cb0e7e0 100644 --- a/versioned_docs/version-8.5/apis-tools/community-clients/spring.md +++ b/versioned_docs/version-8.5/apis-tools/community-clients/spring.md @@ -4,7 +4,7 @@ title: "Spring" --- :::note -This is a community offering. For our officially-supported offering, review Camunda's [Spring Zeebe SDK](/apis-tools/spring-zeebe-sdk/getting-started.md) to leverage Zeebe APIs ([gRPC](docs/apis-tools/zeebe-api/grpc.md) and [REST](docs/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md)) in your Spring Boot project. +This is a community offering. For our officially-supported offering, review Camunda's [Spring Zeebe SDK](/apis-tools/spring-zeebe-sdk/getting-started.md) to leverage Zeebe APIs ([gRPC](docs/apis-tools/zeebe-api/grpc.md) and [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md)) in your Spring Boot project. ::: The Spring integration is a community extension that allows you to easily leverage Zeebe within your Spring or Spring Boot environment. diff --git a/versioned_docs/version-8.5/apis-tools/node-js-sdk.md b/versioned_docs/version-8.5/apis-tools/node-js-sdk.md index 3186f048f21..ffe800bbc5f 100644 --- a/versioned_docs/version-8.5/apis-tools/node-js-sdk.md +++ b/versioned_docs/version-8.5/apis-tools/node-js-sdk.md @@ -1,7 +1,7 @@ --- id: node-js-sdk title: Node.js -description: Get started with the official Camunda 8 JavaScript SDK for Node.js, available via npm. +description: Get started with the official Camunda 8 JavaScript SDK for Node.js. --- As of 8.5.0, the official [Camunda 8 JavaScript SDK for Node.js](https://github.com/camunda/camunda-8-js-sdk) is available via [npm](https://www.npmjs.com/package/@camunda8/sdk). diff --git a/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/configuration.md b/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/configuration.md index 4e39d88af54..e13a7d70b99 100644 --- a/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/configuration.md +++ b/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/configuration.md @@ -166,6 +166,29 @@ public void handleJobFoo(final JobClient client, final ActivatedJob job) { } ``` +You can also control auto-completion in your configuration. + +**Globally:** + +```yaml +camunda: + client: + zeebe: + defaults: + auto-complete: false +``` + +**Per worker:** + +```yaml +camunda: + client: + zeebe: + override: + foo: + auto-complete: false +``` + Ideally, you **don't** use blocking behavior like `send().join()`, as this is a blocking call to wait for the issued command to be executed on the workflow engine. While this is very straightforward to use and produces easy-to-read code, blocking code is limited in terms of scalability. This is why the worker above showed a different pattern (using `exceptionally`). Often, you might also want to use the `whenComplete` callback: diff --git a/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/getting-started.md b/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/getting-started.md index 2d8485dc6a6..7553a5bdd6f 100644 --- a/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/getting-started.md +++ b/versioned_docs/version-8.5/apis-tools/spring-zeebe-sdk/getting-started.md @@ -8,35 +8,19 @@ This project allows you to leverage Zeebe APIs ([gRPC](/apis-tools/zeebe-api/grp ## Version compatibility -| Camunda Spring SDK version | JDK | Camunda version | Bundled Spring Boot version | -| -------------------------- | ------ | --------------- | --------------------------- | -| 8.5.x | \>= 17 | 8.5.x | 3.2.x | +| Camunda Spring SDK version | JDK | Camunda version | Bundled Spring Boot version | +| -------------------------- | ---- | --------------- | --------------------------- | +| 8.5.x | ≥ 17 | 8.5.x | 3.2.x | ## Add the Spring Zeebe SDK to your project -Add the following repository and Maven dependency to your Spring Boot Starter project: - -```xml - - - - true - - - false - - identity - Camunda Identity - https://artifacts.camunda.com/artifactory/camunda-identity/ - - -``` +Add the following Maven dependency to your Spring Boot Starter project, replacing `x` with the latest patch level available: ```xml - io.camunda - spring-boot-starter-camunda-sdk - 8.5.0 + io.camunda + spring-boot-starter-camunda-sdk + 8.5.x ``` @@ -124,6 +108,13 @@ ZEEBE_CLIENT_ID=xxx ZEEBE_CLIENT_SECRET=xxx ``` +Example environment variables to be set to configure gRPC and REST connection: + +```properties +ZEEBE_GRPC_ADDRESS=http://127.0.0.1:26500/ +ZEEBE_REST_ADDRESS=http://127.0.0.1:8080/ +``` + Properties to be set using this approach: ```properties @@ -138,6 +129,14 @@ zeebe.client.connection-mode=CLOUD zeebe.client.connection-mode=ADDRESS ``` +### Configuring OAuth Scope (Optional) + +The OAuth scope can be configured via the following [client environment variable](self-managed/zeebe-deployment/security/client-authorization.md#environment-variables) only: + +``` +ZEEBE_TOKEN_SCOPE=xxx +``` + ## Obtain the Zeebe client You can inject the Zeebe client and work with it to create new workflow instances, for example: diff --git a/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md b/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md index b4f17bacb49..74c98363912 100644 --- a/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md +++ b/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md @@ -12,8 +12,8 @@ import TableTextSmall from "./assets/react-components/TableTextSmall"; import userTaskMigrationDecisionHelperForm from "./assets/forms/userTaskMigrationDecisionHelperForm.js"; import "./assets/css/condensedTable.module.css"; import styles from "./assets/css/cleanImages.module.css"; -import APIArchitectureImg from './assets/img/api-architecture.png'; import ZeebeTaskSelectionImg from './assets/img/zeebe-user-task-selection.png'; +import APIArchitectureImg from './assets/img/api-architecture.png'; Camunda 8.5 introduces a new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type: Zeebe user tasks. Zeebe user tasks have several benefits, including: @@ -373,7 +373,7 @@ docId:"apis-tools/tasklist-api-rest/tasklist-api-rest-overview" }, { type:"link", -href:"/docs/next/apis-tools/zeebe-api-rest/zeebe-api-rest-overview/", +href:"/docs/8.5/apis-tools/zeebe-api-rest/zeebe-api-rest-overview/", label: "Zeebe API (REST)", docId:"apis-tools/zeebe-api-rest/zeebe-api-rest-overview" } diff --git a/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md b/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md index 51b479a2a4a..e0203e390e3 100644 --- a/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md +++ b/versioned_docs/version-8.5/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md @@ -5,6 +5,12 @@ sidebar_position: 1 description: "Build applications for human-centered processes by querying human tasks, assigning users, and completing tasks with the Tasklist API." --- +:::note +Camunda introduced [Zeebe user tasks](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md) with `8.5` to build more advanced functionalities. If you use Zeebe user tasks with `8.5`, task management endpoints in the Tasklist API will not work. + +To manage Zeebe user tasks Camunda has introduced the [Zeebe REST API](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md), though you can still query Zeebe user tasks with the Tasklist API. +::: + ## Introduction The Tasklist API is a REST API designed to build task applications for human-centered processes. The API allows you to query user tasks, assign users to these tasks, and complete these tasks. diff --git a/versioned_docs/version-8.5/apis-tools/tasklist-api/tasklist-api-tutorial.md b/versioned_docs/version-8.5/apis-tools/tasklist-api/tasklist-api-tutorial.md index 3dad8bb79cd..5ecdd5ba456 100644 --- a/versioned_docs/version-8.5/apis-tools/tasklist-api/tasklist-api-tutorial.md +++ b/versioned_docs/version-8.5/apis-tools/tasklist-api/tasklist-api-tutorial.md @@ -250,9 +250,8 @@ export class TasklistModule implements OnModuleInit { logger.log("Tasklist credentials fetched"); axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; + axiosRef.defaults.headers["Authorization"] = + `Bearer ${credentials.access_token}`; axiosRef.defaults.headers["Content-Type"] = "application/json"; setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds } diff --git a/versioned_docs/version-8.5/apis-tools/web-modeler-api/tutorial.md b/versioned_docs/version-8.5/apis-tools/web-modeler-api/tutorial.md index 946f47749aa..0aa8148bf94 100644 --- a/versioned_docs/version-8.5/apis-tools/web-modeler-api/tutorial.md +++ b/versioned_docs/version-8.5/apis-tools/web-modeler-api/tutorial.md @@ -32,6 +32,10 @@ To set up your credentials, create an `.env` file which will be protected by the These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CONSOLE_CLIENT_ID`, `CAMUNDA_CONSOLE_CLIENT_SECRET`, and `CAMUNDA_CONSOLE_OAUTH_AUDIENCE`. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md b/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md index 9ee4f2daecb..57e5bbf0d50 100644 --- a/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md +++ b/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md @@ -84,6 +84,9 @@ Additionally, visit our documentation on [Operate](../self-managed/operate-deplo ### SDKs ### Postman diff --git a/versioned_docs/version-8.5/components/best-practices/development/invoking-services-from-the-process-c7.md b/versioned_docs/version-8.5/components/best-practices/development/invoking-services-from-the-process-c7.md index d46abdcd87d..c99604bbc20 100644 --- a/versioned_docs/version-8.5/components/best-practices/development/invoking-services-from-the-process-c7.md +++ b/versioned_docs/version-8.5/components/best-practices/development/invoking-services-from-the-process-c7.md @@ -160,7 +160,8 @@ Only if the increased latency does not work for your use case, for example, beca
    -

    Call a named bean or java class implementing the +

    + Call a named bean or java class implementing the JavaDelegate interface.

    -

    Use a configurable Connector +

    + Use a configurable Connector
    (REST or SOAP services provided out-of-the-box).

    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    +

    + Pull a service task into an external worker thread and inform process engine of +completion. +

    Execute a script inside the engine.

    @@ -183,7 +187,8 @@ completion.

    -

    Use with +

    + Use with
    BPMN elements.

    @@ -252,7 +257,8 @@ completion.

    -

    Implement +

    + Implement
    via

    @@ -261,8 +267,10 @@ completion.

    Java (in same JVM)

    -

    Expression Language -(can reference Java code)

    +

    + Expression Language + (can reference Java code) +

    BPMN configuration

    @@ -377,9 +385,11 @@ completion.

    Configure via

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -390,9 +400,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -401,9 +413,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -412,9 +426,10 @@ completion.

    -

    BPMN Ext. Element+ - - serviceTask +

    + BPMN Ext. Element+ + + serviceTask
    camunda:
    @@ -423,9 +438,11 @@ completion.

    -

    BPMN Attributes +

    + BPMN Attributes
    - serviceTask + + serviceTask
    camunda:
    @@ -438,13 +455,15 @@ completion.

    -

    BPMN Element +

    + BPMN Element
    script or
    BPMN Attribute
    - scriptTask + + scriptTask
    camunda:
    diff --git a/versioned_docs/version-8.5/components/best-practices/development/service-integration-patterns.md b/versioned_docs/version-8.5/components/best-practices/development/service-integration-patterns.md index 60b84274266..79ce5c6f532 100644 --- a/versioned_docs/version-8.5/components/best-practices/development/service-integration-patterns.md +++ b/versioned_docs/version-8.5/components/best-practices/development/service-integration-patterns.md @@ -120,7 +120,7 @@ You can leverage [message buffering](/components/concepts/messages.md#message-bu Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/contact) to discuss such a scenario in more depth. +A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/reference/contact.md) to discuss such a scenario in more depth. **Summary And recommendations** diff --git a/versioned_docs/version-8.5/components/best-practices/development/understanding-transaction-handling-c7.md b/versioned_docs/version-8.5/components/best-practices/development/understanding-transaction-handling-c7.md index d2465ecc39f..3f39f09aafb 100644 --- a/versioned_docs/version-8.5/components/best-practices/development/understanding-transaction-handling-c7.md +++ b/versioned_docs/version-8.5/components/best-practices/development/understanding-transaction-handling-c7.md @@ -90,29 +90,29 @@ Aside a general strategy to mark service tasks as being save points you will oft **Do** configure a savepoint **after** -- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. +- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. -- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. +- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. **Do** configure a savepoint **before** -- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. +- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. -- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. +- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. -- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. +- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. **Don't** configure save points **before** -- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. +- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. -- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. +- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. ### Adding save points automatically to every model diff --git a/versioned_docs/version-8.5/components/concepts/clusters.md b/versioned_docs/version-8.5/components/concepts/clusters.md index e756c994c95..8b831e9c432 100644 --- a/versioned_docs/version-8.5/components/concepts/clusters.md +++ b/versioned_docs/version-8.5/components/concepts/clusters.md @@ -32,21 +32,18 @@ When your Free Trial plan expires, you are automatically transferred to the Free ### Auto-pause -Free Trial `dev` (or untagged) clusters are automatically paused eight hours after a cluster is created or resumed from a paused state. Auto-pause occurs regardless of cluster usage. +Free Trial clusters are automatically paused after a period of inactivity. Auto-pause occurs regardless of cluster usage. -You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume your cluster](/components/console/manage-clusters/manage-cluster.md#resume-a-cluster). +You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume a cluster](/components/console/manage-clusters/manage-cluster.md#resume-a-cluster). -- Clusters tagged as `test`, `stage`, or `prod` do not auto-pause. -- Paused clusters are automatically deleted after 30 consecutive paused days. You can change the tag to avoid cluster deletion. -- No data is lost while a cluster is paused. All execution and configuration is saved, but cluster components such as Zeebe and Operate are temporarily disabled until you resume the cluster. +- Clusters tagged as `dev` (or untagged) auto-pause eight hours after the cluster is created or resumed from a paused state. +- Clusters tagged as `test`, `stage`, or `prod` auto-pause if there is no cluster activity for 48 hours. +- Cluster disk space is cleared when a trial cluster is paused. + - You will need to redeploy processes to the cluster once it is resumed from a paused state. + - Cluster configuration settings (for example, API Clients, Connector secrets, and IP allowlists) are saved so you can easily resume a cluster. :::tip - -To prevent auto-pause, you can: - -- Tag the cluster as `test`, `stage`, or `prod` instead of `dev`. -- [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter, Professional, or Enterprise plan. - +To prevent auto-pause, [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. ::: ## Development clusters @@ -59,7 +56,7 @@ The way this type of cluster works varies depending on if you are using it in th Enterprise plan users can purchase development clusters as part of their Enterprise subscription agreement. Deployment and execution of models (process instances, decision instances, and task users) are included at no extra cost for this type of cluster. Additionally, this type of cluster in the Enterprise plan follows the [standard data retention policy](/components/concepts/data-retention.md) and does not auto-pause when not in use. -Please [contact us](https://camunda.com/contact/) if you are an existing customer and would like to purchase a development cluster. +Please [contact us](/reference/contact.md) if you are an existing customer and would like to purchase a development cluster. ### Development clusters in the Starter plan diff --git a/versioned_docs/version-8.5/components/concepts/data-retention.md b/versioned_docs/version-8.5/components/concepts/data-retention.md index aa10e6a970c..55dc9678644 100644 --- a/versioned_docs/version-8.5/components/concepts/data-retention.md +++ b/versioned_docs/version-8.5/components/concepts/data-retention.md @@ -15,7 +15,7 @@ The following time-to-live settings are configured in SaaS for each application. - **Tasklist**: 30 days - **Zeebe**: 7 days -If there are specific requirements for your use-case, [reach out to us](/contact/) to discuss your data retention needs under an Enterprise plan. +If there are specific requirements for your use-case, [reach out to us](/reference/contact.md) to discuss your data retention needs under an Enterprise plan. For more information on development clusters in the Starter or Professional plans, refer to our [fair usage limits of those plans](https://camunda.com/legal/fair-usage-limits-for-starter-plan/). ## Additional information diff --git a/versioned_docs/version-8.5/components/concepts/messages.md b/versioned_docs/version-8.5/components/concepts/messages.md index c1236ca8d93..f6fbf1d347d 100644 --- a/versioned_docs/version-8.5/components/concepts/messages.md +++ b/versioned_docs/version-8.5/components/concepts/messages.md @@ -4,7 +4,7 @@ title: "Messages" description: "Learn how process instances can respond to incoming messages." --- -Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called **[message correlation](/guides/message-correlation.md)**. +Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called [message correlation](/components/modeler/bpmn/message-events/message-events.md#message-correlation). ## Message subscriptions @@ -118,6 +118,10 @@ The first message creates a new process instance. The following messages are cor When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. +:::note +You may also use TTL to wait for messages that may arrive earlier when combining [start events and intermediate catch events](/docs/components/modeler/bpmn/events.md). +::: + ### Single instance **Problem**: Create exactly one instance of a process diff --git a/versioned_docs/version-8.5/components/concepts/process-instance-creation.md b/versioned_docs/version-8.5/components/concepts/process-instance-creation.md index 6a43c5143a0..b2bb0491f8e 100644 --- a/versioned_docs/version-8.5/components/concepts/process-instance-creation.md +++ b/versioned_docs/version-8.5/components/concepts/process-instance-creation.md @@ -26,9 +26,10 @@ This command creates a new process instance and immediately responds with the pr ![create-process](assets/create-process.png) -

    - Code example -

    Create a process instance: +

    + Code example +

    +Create a process instance: ``` zbctl create instance "order-process" @@ -38,16 +39,16 @@ Response: ``` { - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 + "processKey": 2251799813685249, + "bpmnProcessId": "order-process", + "version": 1, + "processInstanceKey": 2251799813686019 } ``` -

    -
    +

    +
    ### Create and await results @@ -67,7 +68,8 @@ When the client resends the command, it creates a new process instance.
    Code example -

    Create a process instance and await results: +

    +Create a process instance and await results: ``` zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' @@ -123,7 +125,7 @@ Start instructions are supported for both `CreateProcessInstance` commands.

    Code example

    - Create a process instance starting before the 'ship_parcel' element: +Create a process instance starting before the 'ship_parcel' element: ```java client.newCreateInstanceCommand() diff --git a/versioned_docs/version-8.5/components/concepts/what-is-camunda-8.md b/versioned_docs/version-8.5/components/concepts/what-is-camunda-8.md index 1374cbd8d1f..3a65f8d66ee 100644 --- a/versioned_docs/version-8.5/components/concepts/what-is-camunda-8.md +++ b/versioned_docs/version-8.5/components/concepts/what-is-camunda-8.md @@ -105,7 +105,7 @@ The platform and tools are usable in your environment right away, with full publ ## Next steps -- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/contact/) page. +- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/reference/contact.md) page. - [Introduction to Camunda 8](/guides/introduction-to-camunda-8.md) - [Create a Camunda 8 account](/guides/create-account.md) - [Migrate from Camunda 7 to Camunda 8](/guides/migrating-from-camunda-7/index.md) diff --git a/versioned_docs/version-8.5/components/concepts/workflow-patterns.md b/versioned_docs/version-8.5/components/concepts/workflow-patterns.md index ace1b2cbd69..8d148ee15e5 100644 --- a/versioned_docs/version-8.5/components/concepts/workflow-patterns.md +++ b/versioned_docs/version-8.5/components/concepts/workflow-patterns.md @@ -276,10 +276,6 @@ An important problem to solve is how to roll back a business transaction in case In BPMN, you can use [compensation events](/components/modeler/bpmn/bpmn-coverage.md) to easily implement compensations in your processes. -:::note -The compensation event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: -

    1 diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/connector-sdk.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/connector-sdk.md index 53d83a96be5..71ece7ccebe 100644 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/connector-sdk.md +++ b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/connector-sdk.md @@ -1049,8 +1049,9 @@ For example, you can spin up a custom client with the [Zeebe Java client](/apis-tools/java-client/index.md) as follows: ```java -import io.camunda.connector.MyConnectorFunction -import io.camunda.connector.runtime.jobworker.outbound.ConnectorJobHandler; +import io.camunda.connector.MyConnectorFunction; +import io.camunda.connector.runtime.core.outbound.ConnectorJobHandler; +import io.camunda.connector.validation.impl.DefaultValidationProvider; import io.camunda.zeebe.client.ZeebeClient; public class Main { @@ -1061,7 +1062,7 @@ public class Main { zeebeClient.newWorker() .jobType("io.camunda:template:1") - .handler(new ConnectorJobHandler(new MyConnectorFunction())) + .handler(new ConnectorJobHandler(new MyConnectorFunction(), new DefaultValidationProvider())) .name("MESSAGE") .fetchVariables("authentication", "message") .open(); @@ -1076,5 +1077,5 @@ it with your job handler implementation that handles invoking the Connector func Your custom job handler needs to create a `OutboundConnectorContext` that the Connector function can use to handle variables, secrets, and Connector results. You can extend the -provided `io.camunda.connector.impl.outbound.AbstractConnectorContext` to quickly gain access +provided `io.camunda.connector.runtime.core.AbstractConnectorContext` to quickly gain access to most of the common context operations. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/010-to-020.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/010-to-020.md deleted file mode 100644 index e71a7316283..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/010-to-020.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -id: 010-to-020 -title: Update 0.1 to 0.2 -description: "Review which adjustments must be made to migrate from Connector SDK 0.1.x to 0.2.0." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.1.x to 0.2.0. - -:::caution - -Be aware that the update from 0.1 to 0.2 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.2.0, we introduce the following structural changes: - -- Input validation and secret replacement move from writing imperative code to declaratively using annotations. -- The outbound aspect of APIs is more explicit. Classes have been moved to more explicit packages and have been renamed. -- New required annotation for outbound Connectors. - -### Declarative validation and secrets - -Input objects previously had to implement the `ConnectorInput` interface to participate in validation and secret replacement -initiated from the `ConnectorContext` using its `validate` and `replaceSecrets` methods respectively. - -With version 0.2.0, we remove the imperative approach for validation and secret replacement from the SDK. -Instead, you can use annotations to describe the constraints of input attributes and mark those that can contain -secrets. - -These are two input objects written with the SDK version 0.1.x: - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class MyConnectorRequest implements ConnectorInput { - - private String message; - private Authentication authentication; - - @Override - public void validateWith(final Validator validator) { - validator.require(message, "message"); - validator.require(authentication, "authentication"); - validateIfNotNull(authentication, validator); - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - replaceSecretsIfNotNull(authentication, secretStore); - } -} -``` - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class Authentication implements ConnectorInput { - - private String user; - private String token; - - @Override - public void validateWith(final Validator validator) { - validator.require(user, "user"); - validator.require(token, "token"); - if (token != null && !(token.startsWith("xobx") || token.startsWith("secrets."))) { - validator.addErrorMessage("Token must start with \"xobx\" or be a secret"); - } - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - token = secretStore.replaceSecret(token); - } -} -``` - -You can express the same input objects with SDK version 0.2.0 as follows: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty - private String message; - - @NotNull - @Valid - @Secret - private Authentication authentication; -} -``` - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.Pattern; - -public class Authentication { - - @NotEmpty - private String user; - - @NotEmpty - @Pattern("^(xobx-|secret).+") - @Secret - private String token; -} -``` - -As a result, you have to remove the `ConnectorInput` interface implementation and the imperative code that comes with `validateWith` -and `replaceSecrets`. You can now concisely describe the constraints of attributes rather than express them in imperative code. - -To use annotaion-based validation out of the box, you can include the new artifact `connector-validation` that -comes with the SDK. - - - - - -```xml - - io.camunda.connector - connector-validation - 0.2.0 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.2.0' -``` - - - - -You can read more about validation and secret replacement in our -[SDK guide](/components/connectors/custom-built-connectors/connector-sdk.md). - -### Explicit outbound aspect - -With version 0.2.0 of the SDK, we make the outbound aspect of those components specific to outbound connectivity -more visible. This separates those SDK components that are tightly coupled to outbound from those that -will be reusable for inbound. - -With this change, the names of the following classes need to be adjusted: - -- Rename `io.camunda.connector.api.ConnectorContext` to `io.camunda.connector.api.outbound.OutboundConnectorContext`. -- Rename `io.camunda.connector.api.ConnectorFunction` to `io.camunda.connector.api.outbound.OutboundConnectorFunction`. -- Rename `io.camunda.connector.api.SecretProvider` to `io.camunda.connector.api.secret.SecretProvider`. -- Rename `io.camunda.connector.api.SecretStore` to `io.camunda.connector.api.secret.SecretStore`. -- Rename `io.camunda.connector.test.ConnectorContextBuilder` to `io.camunda.connector.test.outbound.OutboundConnectorContextBuilder`. - -As a result, you must replace all occurrences of the old class names and imports with the new ones. This includes the -SPI for the Connector function itself. Therefore, rename the file `META-INF/services/io.camunda.connector.api.ConnectorFunction` to -`META-INF/services/io.camunda.connector.api.outbound.OutboundConnectorFunction`. - -### `@OutboundConnector` annotation - -For best interoperability, Connectors provide default meta-data (`name`, `type`, `inputVariables`) via the `@OutboundConnector` annotation: - -```java -@OutboundConnector( - name = "PING", - inputVariables = {"caller"}, - type = "io.camunda.example.PingConnector:1" -) -public class PingConnector implements OutboundConnectorFunction { - ... -} -``` - -## Connector runtime environment - -If using the -[pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that comes with the SDK does not fit your use case, you can create a custom runtime environment. - -With version 0.2.0 of the [job worker runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#connector-job-handler), you need to make the following changes: - -- Rename `io.camunda.connector.runtime.jobworker.ConnectorJobHandler` to `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler`. -- Rename Connector-related env variables from `ZEEBE_` to `CONNECTOR_`. Zeebe configuration properties remain unchanged. - -As a general change in behavior the module will now pick up Connectors from classpath unless it is explicitly configured via environment variables. - -Also, take the name changes in the [SDK core](#explicit-outbound-aspect) into account. - -Implementing your own Connector wrapper you need to provide a Connector context specific to -your environment. Consider extending the `io.camunda.connector.impl.outbound.AbstractConnectorContext` -instead of implementing the `io.camunda.connector.api.ConnectorContext` yourself. Most of the commonly needed functionality -is already provided in there. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md deleted file mode 100644 index 61142a93750..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 0100-to-0110 -title: Update 0.10 to 0.11 -description: "Review which adjustments must be made to migrate from Connector SDK 0.10.x to 0.11.0." ---- - -Beginner - -:::note -Migrate directly to version 0.11.2 of the SDK. This contains a fix for several issues in the 0.11.0 release. -::: - -This SDK release is not backwards-compatible. We are moving towards a stable Connectors release and continue to improve the experience of developing custom Connectors. - -In this SDK version, we changed the `OutboundConnectorContext` and `InboundConnectorContext interfaces significantly.` You can no longer use the `getVariablesAsType` or `getPropertiesAsType` methods in outbound and inbound Connectors, respectively. -Use the new `bindVariables` method instead, as it takes care of secret replacement, payload validation, and deserialization automatically. - -We are moving away from a mandatory `@Secret` annotation. -From this release onwards, secrets are automatically replaced in all input variables/properties without the need to explicitly declare an annotation. - -To migrate your Connector implementations, complete the following: - -1. If you used the `OutboundConnectorContext::getVariablesAsType` method in you outbound Connector functions, replace it with `OutboundConnectorContext::bindVariables`. -2. If you used the `InboundConnectorContext::getPropertiesAsType` method in you inbound Connector executables, replace it with `InboundConnectorContext::bindProperties`. -3. Remove calls to `OutboundConnectorContext::replaceSecrets` and `InboundConnectorContext::replaceSecrets` methods. The secrets are now replaced automatically. -4. Remove calls to `OutboundConnectorContext::validate` and `InboundConnectorContext::validate` methods. The validation is now performed automatically. -5. If you used the `@Secret` annotation in your Connector implementations, you can safely remove it as it has no effect. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/020-to-030.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/020-to-030.md deleted file mode 100644 index 248d7e7cc31..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/020-to-030.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: 020-to-030 -title: Update 0.2 to 0.3 -description: "Review which adjustments must be made to migrate from Connector SDK 0.2.x to 0.3.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.2.x to 0.3.0. - -:::caution - -Be aware that the update from 0.2 to 0.3 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.3.0, we introduce the following structural changes: - -- Input validation moves from Jakarta Bean Validation API version 3.0 to 2.0. -- SDK artifacts have to be in scope `provided`. - -### Update to Validation API 2.0 - -To better integrate in the current Java ecosystem and widely used frameworks like Spring 5 and Spring Boot 2, the `connector-validation` module -now operates on Jakarta Bean Validation API version 2.0 instead of version 3.0. Adjust your Connector input objects using validation as follows: - -Replace all class imports starting with `jakarta.validation` by `javax.validation`. A Connector input class on SDK 0.2.x with the following imports: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -changes to the following: - -```java -import io.camunda.connector.api.annotation.Secret; -import javax.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -This way, the Connector runtime environments are able to pick up your validations correctly. - -### Provided SDK artifacts - -The Connector runtime environments can execute multiple Connectors at once. The environments also provide the base SDK artifacts and their classes -to any Connector they execute. This comprises runtime-specific classes related to the Connector context as well as the Connector core and the validation -classes. To minimize the possibility of incompatible classes being on the same classpath, Connectors are required to depend on `connector-core` and -`connector-validation` in Maven's dependency scope `provided`. Other dependency management frameworks like Gradle offer similar scopes. - -As a result, you need to include the SDK artifacts as follows in Maven: - -```xml - - io.camunda.connector - connector-core - provided - - - io.camunda.connector - connector-validation - provided - -``` - -## Connector runtime environment - -The SDK provides a [pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that you can start manually. With version 0.3.0, this runtime moves from the [SDK repository](https://github.com/camunda/connector-sdk/tree/stable/0.2/runtime-job-worker) -to [Connector Runtime](https://github.com/camunda/connectors/blob/main/connector-runtime/README.md). This also means that the provided runtime now is -a Spring Boot application, based on Spring Zeebe. Thus, it offers all out-of-the-box capabilities Spring Zeebe provides. - -The Connector runtime JAR for manual installation can now be fetched from https://repo1.maven.org/maven2/io/camunda/spring-zeebe-connector-runtime/ -(starting with version `8.1.3`) instead of https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-job-worker/. You can start the runtime -environment with the following command: - -```bash -java -cp 'spring-zeebe-connector-runtime-VERSION-with-dependencies.jar:connector-http-json-VERSION-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` - -The Docker image is still accessible at https://hub.docker.com/r/camunda/connectors/tags. - -### Custom runtime environments - -If you are building a custom runtime environment, note the following adjustments: - -- The `runtime-util` artifact replaces the `runtime-job-worker` artifact. -- The `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` has moved to `import io.camunda.connector.runtime.util.outbound.ConnectorJobHandler`. -- The `io.camunda.connector.impl.outbound.AbstractOutboundConnectorContext` has moved to `io.camunda.connector.impl.context.AbstractConnectorContext`. -- To build your own context class, we recommend using the following signature: - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext {} -``` - -- The `SecretStore` class has been removed. Initialize your context class with a `super(SecretProvider)` call. Remove the `getSecretStore` method if you used it. - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext { - - public MyContext(final SecretProvider provider) { - super(provider); - ... - } -} -``` diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/030-to-040.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/030-to-040.md deleted file mode 100644 index 43916603f22..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/030-to-040.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 030-to-040 -title: Update 0.3 to 0.4 -description: "Review which adjustments must be made to migrate from Connector SDK 0.3.x to 0.4.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.3.x to 0.4.0. - -:::caution - -Be aware that the update from 0.3 to 0.4 requires manual migration steps as described below. - -::: - -With SDK version 0.4.0, we introduce many basic structural changes: - -- Switching default Connector Runtime to Spring Boot/Spring Zeebe for outbound Connectors. -- Introducing webhook inbound Connector. -- Moved out-of-the-box connectors to mono-repo at https://github.com/camunda/connectors-bundle/tree/main/connectors to ease dependency management and conflict resolution. -- Build Connector bundle artifact and Docker image by Maven as default (done by adding various fat jars to one Docker image). -- Adding GCP Secret Provider used in Camunda SaaS. - -### Inbound webhook - -Spring Zeebe runtime with version `0.4.0` SDK introduces support of inbound webhook capabilities. -See the [list of available inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). - -To function properly, Spring Zeebe runtime requires connection to [Operate API](/apis-tools/operate-api/overview.md). Read more on [how to connect to Operate or disable it completely](/self-managed/connectors-deployment/connectors-configuration.md#local-installation). - -### What happens if I don't properly configure connection to Operate API? - -If you don't configure properly connection to Operate API, it will be not possible to poll process definitions from Operate. Therefore, the webhook functionality won't work. -Additionally, you may observe exception spam in your log file every 5 seconds complaining of inability to connect to Operate. -Overall, this is not critical and given there are no other issues, the Connector runtime will function properly. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/040-to-050.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/040-to-050.md deleted file mode 100644 index 637cabc8899..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/040-to-050.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: 040-to-050 -title: Update 0.4 to 0.5 -description: "Review which adjustments must be made to migrate from Connector SDK 0.4.x to 0.5.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.4.x to 0.5.0. - -With SDK version 0.5.0, we introduced minor changes: - -- Removing Spring Zeebe dependency management -- Managing the GCP Secret Provider module version diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/050-to-060.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/050-to-060.md deleted file mode 100644 index 46124442521..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/050-to-060.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: 050-to-060 -title: Update 0.5 to 0.6 -description: "Review which adjustments must be made to migrate from Connector SDK 0.5.x to 0.6.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.5.x to 0.6.0. - -With SDK version 0.6.0, we introduced the following changes: - -- Replacing secrets in parent classes -- Supporting intermediate inbound events -- Defining interfaces for inbound Connectors -- Fixing failing datetime serialization diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/060-to-070.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/060-to-070.md deleted file mode 100644 index bc84e1e1940..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/060-to-070.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: 060-to-070 -title: Update 0.6 to 0.7 -description: "Review which adjustments must be made to migrate from Connector SDK 0.6.x to 0.7.0." ---- - -Beginner - -Beginner - -With the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), we made -breaking changes to the inbound Connectors. Update -[HTTP Webhook](https://github.com/camunda/connectors/tree/main/connectors/webhook/element-templates) -and [GitHub Webhook](https://github.com/camunda/connectors/tree/main/connectors/github/element-templates) -element templates to the latest versions. - -If you have used inbound webhook Connectors with Connector Runtime 0.6.x, you need to **manually** -apply the new element template version to your diagrams: - -1. Download the new element template from the [GitHub release page](https://github.com/camunda/connectors-bundle/releases/tag/0.17.0). -2. Follow the [installation guide](/components/modeler/desktop-modeler/element-templates/configuring-templates.md) to reinstall the element template. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/070-to-080.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/070-to-080.md deleted file mode 100644 index 1145b3450fb..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/070-to-080.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 070-to-080 -title: Update 0.7 to 0.8 -description: "Review which adjustments must be made to migrate from Connector SDK 0.7.x to 0.8.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.7.x to 0.8.0. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/080-to-090.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/080-to-090.md deleted file mode 100644 index 51055c0aefc..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/080-to-090.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 080-to-090 -title: Update 0.8 to 0.9 -description: "Review which adjustments must be made to migrate from Connector SDK 0.8.x to 0.9.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.8.x to 0.9.0. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/090-to-0100.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/090-to-0100.md deleted file mode 100644 index 1e6172bb692..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/090-to-0100.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 090-to-0100 -title: Update 0.9 to 0.10 -description: "Review which adjustments must be made to migrate from Connector SDK 0.9.x to 0.10.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.9.x to 0.10.0. diff --git a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/introduction.md b/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/introduction.md deleted file mode 100644 index 034d80e7388..00000000000 --- a/versioned_docs/version-8.5/components/connectors/custom-built-connectors/update-guide/introduction.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: introduction -title: Connector SDK updates ---- - -These documents guide you through the process of updating your Camunda 8 -Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). - -There is a dedicated update guide for each version: - -### [Connector SDK 0.10.x to 0.11](../0100-to-0110) - -Update from 0.10.x to 0.11.2 - -### [Connector SDK 0.9 to 0.10](../090-to-0100) - -Update from 0.9.x to 0.10.0 - -### [Connector SDK 0.8 to 0.9](../080-to-090) - -Update from 0.8.x to 0.9.0 - -### [Connector SDK 0.7 to 0.8](../070-to-080) - -Update from 0.7.x to 0.8.0 - -### [Connector SDK 0.6 to 0.7](../060-to-070) - -Update from 0.6.x to 0.7.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.7.0) - -### [Connector SDK 0.5 to 0.6](../050-to-060) - -Update from 0.5.x to 0.6.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.6.0) - -### [Connector SDK 0.4 to 0.5](../040-to-050) - -Update from 0.4.x to 0.5.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.5.0) - -### [Connector SDK 0.3 to 0.4](../030-to-040) - -Update from 0.3.x to 0.4.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.4.0) - -### [Connector SDK 0.2 to 0.3](../020-to-030) - -Update from 0.2.x to 0.3.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.3.0) - -### [Connector SDK 0.1 to 0.2](../010-to-020) - -Update from 0.1.x to 0.2.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.2.0) diff --git a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md index 9bc8ca591fc..3c9906d6023 100644 --- a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md +++ b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md @@ -105,10 +105,6 @@ There are two options to authenticate the Connector with AWS: The **Amazon EventBridge Webhook Connector** is an inbound Connector enabling you to start a BPMN process instance triggered by an event from [Amazon EventBridge](https://aws.amazon.com/eventbridge/). -:::note -If you have used the **Amazon EventBridge Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an Amazon EventBridge Webhook Connector task 1. Start building your BPMN diagram. You can use the **Amazon EventBridge Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/github.md b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/github.md index e6734c5eca9..0aa050cff72 100644 --- a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/github.md +++ b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/github.md @@ -280,12 +280,6 @@ handling response is still applicable [as described](/components/connectors/prot The **GitHub Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -:::note -If you have used the GitHub Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a GitHub Webhook Connector task 1. Start building your BPMN diagram. You can use GitHub Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. diff --git a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/slack.md b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/slack.md index dc53e84c5d3..58727c2f8be 100644 --- a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/slack.md +++ b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/slack.md @@ -33,7 +33,7 @@ To make the **Slack Connector** executable, fill out the mandatory fields highli ### Authentication -Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, '{{secrets.SLACK_OAUTH_TOKEN}}'. +Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, `{{secrets.SLACK_OAUTH_TOKEN}}`. ### Create channel diff --git a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/twilio.md b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/twilio.md index 902e739f9e8..adfc8d164fc 100644 --- a/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/twilio.md +++ b/versioned_docs/version-8.5/components/connectors/out-of-the-box-connectors/twilio.md @@ -179,10 +179,6 @@ To learn more about implementing retry and error handling logic in your BPMN dia The **Twilio Webhook Connector** is an inbound Connector that enables you to start a BPMN process instance triggered by a [Twilio event](https://www.twilio.com/docs/usage/webhooks). -:::note -If you have used the **Twilio Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a Twilio Webhook Connector task 1. Start building your BPMN diagram. You can use the **Twilio Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.5/components/connectors/protocol/http-webhook.md b/versioned_docs/version-8.5/components/connectors/protocol/http-webhook.md index 0fb040ead87..e6dee47ce4c 100644 --- a/versioned_docs/version-8.5/components/connectors/protocol/http-webhook.md +++ b/versioned_docs/version-8.5/components/connectors/protocol/http-webhook.md @@ -7,12 +7,6 @@ description: Start a process instance with your custom webhook configuration, tr The **HTTP Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by external HTTP call. -:::note -If you have used the HTTP Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an HTTP Webhook Connector event 1. Start building your BPMN diagram. You can use HTTP Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. @@ -83,7 +77,7 @@ Please refer to the [update guide](/components/connectors/custom-built-connector - Set the **API Key** property to the expected value of the API key. - Set the **API Key locator** property that will be evaluated against the incoming request to extract the API key. [See the example](#how-to-configure-api-key-authorization). -- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer {JWT_TOKEN}. +- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer `{JWT_TOKEN}`. - Set JWK URL which is used as a well-known public URL to fetch the [JWKs](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets). - Set JWT role property expression which will be evaluated against the content of the JWT to extract the list of roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). diff --git a/versioned_docs/version-8.5/components/connectors/protocol/rest.md b/versioned_docs/version-8.5/components/connectors/protocol/rest.md index 4a2b159f3bd..be7b1bf804e 100644 --- a/versioned_docs/version-8.5/components/connectors/protocol/rest.md +++ b/versioned_docs/version-8.5/components/connectors/protocol/rest.md @@ -57,7 +57,7 @@ Select the **REST Connector** and fill out the following properties under the ** - **Headers**: The API key will be included in the request headers. 3. Specify your API key details: - **API key name**: Enter the parameter name expected by the API (e.g., apiKey). - - **API key value**: Reference the secret you created for your API key (e.g., {{secrets.REST_API_KEY_SECRET}}). + - **API key value**: Reference the secret you created for your API key (e.g., `{{secrets.REST_API_KEY_SECRET}}`). ### REST Connector (Basic) diff --git a/versioned_docs/version-8.5/components/console/manage-plan/migrate-from-prof-to-starter.md b/versioned_docs/version-8.5/components/console/manage-plan/migrate-from-prof-to-starter.md index 1fdcd43ec98..8f3d548da9b 100644 --- a/versioned_docs/version-8.5/components/console/manage-plan/migrate-from-prof-to-starter.md +++ b/versioned_docs/version-8.5/components/console/manage-plan/migrate-from-prof-to-starter.md @@ -11,7 +11,7 @@ Here are a few important remarks to consider before completing the migration ste - Since the two plans have different types of clusters included and fees for those, we recommend comparing the [Professional plan](https://camunda.com/blog/2023/05/camunda-professional-edition-accelerate-projects/) with the [Starter plan](https://camunda.com/blog/2023/09/camunda-starter/) to [understand your monthly costs](https://camunda.com/pricing/starter-plan-price-calculator/) before the migration. - General users and development/production cluster reservations in the Professional plan are migrated “as is” to the Starter plan, which may result in overage costs (e.g. production clusters in Professional will be transferred to production clusters in the Starter plan). If you are not using your production cluster in the Professional plan, we recommend you delete it beforehand and create a new development cluster in the Starter plan afterward. - Once you have edited the plan below, the changes will take effect on the first day of your next subscription period. -- If you have any questions, do not hesitate to [contact us](https://camunda.com/contact/). +- If you have any questions, do not hesitate to [contact us](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.5/components/modeler/bpmn/call-activities/call-activities.md b/versioned_docs/version-8.5/components/modeler/bpmn/call-activities/call-activities.md index e3c522b341e..154e43211fa 100644 --- a/versioned_docs/version-8.5/components/modeler/bpmn/call-activities/call-activities.md +++ b/versioned_docs/version-8.5/components/modeler/bpmn/call-activities/call-activities.md @@ -32,10 +32,6 @@ When a non-interrupting boundary event is triggered, the created process instanc ## Variable mappings -By default, all variables of the call activity scope are copied to the created process instance. This can be limited to copying only the local variables of the call activity, by setting the attribute `propagateAllParentVariables` to `false`. - -By disabling this attribute, variables existing at higher scopes are no longer copied. If the attribute `propagateAllParentVariables` is set (default: `true`), all variables are propagated to the child process instance. - Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. diff --git a/versioned_docs/version-8.5/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md b/versioned_docs/version-8.5/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md index 56bee823e46..5a1b961b484 100644 --- a/versioned_docs/version-8.5/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md +++ b/versioned_docs/version-8.5/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md @@ -14,6 +14,8 @@ When an embedded subprocess is entered, the start event is activated. The subpro Embedded subprocesses are often used together with **boundary events**. One or more boundary events can be attached to a subprocess. When an interrupting boundary event is triggered, the entire subprocess (including all active elements) is terminated. +When adding an embedded subprocess to your model, you can either add a collapsed or expanded subprocess. You cannot collapse an existing expanded subprocess in your model. + ## Collapsed subprocesses :::caution @@ -22,7 +24,7 @@ Collapsed subprocesses are currently only partially supported by Optimize. While All other Camunda components fully support collapsed subprocesses. ::: -A subprocess can be collapsed to conceal its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. +A collapsed subprocess conceals its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. Collapsed subprocesses serve purely display purposes. For the creation of reusable processes, it is recommended to utilize [call activities](../call-activities/call-activities.md). diff --git a/versioned_docs/version-8.5/components/modeler/desktop-modeler/telemetry/telemetry.md b/versioned_docs/version-8.5/components/modeler/desktop-modeler/telemetry/telemetry.md index 8db915f21f0..33fe66c6901 100644 --- a/versioned_docs/version-8.5/components/modeler/desktop-modeler/telemetry/telemetry.md +++ b/versioned_docs/version-8.5/components/modeler/desktop-modeler/telemetry/telemetry.md @@ -54,8 +54,8 @@ These events include the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the case of a form, the payload also includes the `formFieldTypes`: @@ -78,8 +78,8 @@ The `Deployment Event` and `Start Instance` have the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the event of an unsuccessful deployment, an `error` property will be present in the payload containing an error code. diff --git a/versioned_docs/version-8.5/components/modeler/desktop-modeler/troubleshooting.md b/versioned_docs/version-8.5/components/modeler/desktop-modeler/troubleshooting.md index e738a5aaef7..8082f978170 100644 --- a/versioned_docs/version-8.5/components/modeler/desktop-modeler/troubleshooting.md +++ b/versioned_docs/version-8.5/components/modeler/desktop-modeler/troubleshooting.md @@ -171,6 +171,18 @@ DEBUG=* ZEEBE_NODE_LOG_LEVEL=DEBUG GRPC_VERBOSITY=DEBUG GRPC_TRACE=all camunda-m +## Desktop Modeler does not start on Ubuntu 24 / modern Linux + +Modern Linux operating systems introduce restrictions on user namespaces, a sandboxing (isolation) mechanism Modeler uses. You may see an error message when you start the application: + +```sh +$ ./camunda-modeler +[46193:1114/170934.837319:FATAL:setuid_sandbox_host.cc(163)] The SUID sandbox helper binary was found, but is not configured correctly. Rather than run without sandboxing I'm aborting now. You need to make sure that [...]/camunda-modeler-[...]-linux-x64/chrome-sandbox is owned by root and has mode 4755. +zsh: trace trap (core dumped) ./camunda-modeler +``` + +To remedy this, configure your system to allow sandboxing by [creating an AppArmor profile](https://github.com/camunda/camunda-modeler/issues/4695#issuecomment-2478458250), or review [this issue](https://github.com/camunda/camunda-modeler/issues/4695#issuecomment-2478581677) for an in-depth explanation of available options. If you don't have the necessary permissions to permit sandboxing, you may choose to disable the sandbox, though this is not recommended. + ## Other questions? Head over to the [Modeler category on the forum](https://forum.camunda.io/c/modeler/6) to receive help from the community. diff --git a/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md b/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md index 921b5c0f693..6a0f28ee837 100644 --- a/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md +++ b/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md @@ -12,7 +12,7 @@ Desktop Modeler automatically fetches and updates [element templates](./element- ## Automatic Connector template fetching -Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. +Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. The fetch is triggered whenever you start the application, or every 24 hours if the application is not closed. After an update check has concluded, a notification indicates if the templates are up to date or have been updated: diff --git a/versioned_docs/version-8.5/components/modeler/dmn/dmn.md b/versioned_docs/version-8.5/components/modeler/dmn/dmn.md index f8a894fe113..d19f0a38f3f 100644 --- a/versioned_docs/version-8.5/components/modeler/dmn/dmn.md +++ b/versioned_docs/version-8.5/components/modeler/dmn/dmn.md @@ -53,6 +53,10 @@ You can also edit literal expressions. Just as with decision tables, in the deci ## Business knowledge models +:::caution +Viewing the result of BKM evaluation is currently not supported in Operate. +::: + A _business knowledge model_ (BKM) is a reusable function containing a piece of decision logic. Typically, a BKM instantiates business logic that is required in multiple decisions, such as a common computation. For example, an amortization formula might be used in different loan application processes. You can make BKM elements executable using literal expressions written in FEEL, in almost the same way you would create a decision using a literal expression. A BKM literal expression can optionally accept parameters to be used as inputs to the FEEL expression, and it returns a single result whose name is the same as the BKM element name. Once you’ve created a BKM, it appears in autosuggestions when you’re using literal expressions to create decision logic. diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-button.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-button.md index 577b3c0615e..e3614b67505 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-button.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-button.md @@ -6,7 +6,7 @@ description: A form element to trigger form actions A button allowing the user to trigger form actions. -![Form Button Symbol](/img/form-icons/form-button.svg) +Form Button Symbol ### Configurable properties @@ -15,4 +15,4 @@ A button allowing the user to trigger form actions. - **Submit**: Submit the form (given there are no validation errors). - **Reset**: Reset the form, all user inputs will be lost. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the button. -- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md index f3812384600..3b60e61f43b 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A set of checkbox options providing data multi-selection for small datasets. -![Form Checklist Symbol](/img/form-icons/form-checklist.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A set of checkbox options providing data multi-selection for small datasets. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox group. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox group must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox.md index 834a18cfa63..9546284e2d4 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-checkbox.md @@ -6,7 +6,7 @@ description: A form element to read and edit boolean data A checkbox allowing the user to read and edit boolean data. -![Form Checkbox Symbol](/img/form-icons/form-checkbox.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A checkbox allowing the user to read and edit boolean data. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-datetime.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-datetime.md index aeb127128dc..c02c2912925 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-datetime.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-datetime.md @@ -6,7 +6,7 @@ description: Learn about the datetime form element to read and edit date and tim A component allowing the user to read and edit date and time data. -![Form Datetime Symbol](/img/form-icons/form-datetime.svg) +Form Datetime Symbol ## Configurable properties @@ -19,7 +19,7 @@ A component allowing the user to read and edit date and time data. - **Read only**: Makes the datetime component read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the datetime component, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the datetime component. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Time format**: Defines the time data format. This can either be **UTC offset**, **UTC normalized**, or **No timezone**. - **Time interval**: Defines the steps of time that can be selected in the time input field. - **Disallow past dates**: Enables the restriction to not allow past dates. diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md index 4af25ce51cc..1a24ae1440e 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md @@ -6,7 +6,7 @@ description: Learn about the dynamic list form element to dynamically manage a l The **dynamic list** element is designed to dynamically manage a list of form elements. It enables users to add or remove items from the list and is particularly useful in scenarios where the number of items in a list is not fixed. -![Dynamic List Symbol](/img/form-icons/form-dynamiclist.svg) +Dynamic List Symbol ## Configurable properties diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-expression.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-expression.md index f4f24988c5e..27133f2d8de 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-expression.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-expression.md @@ -6,7 +6,7 @@ description: A form element to compute form state An expression field allowing the user to compute new data based on form state. -![Form Expression Field Symbol](/img/form-icons/form-expression.svg) +Form Expression Field Symbol ### Configurable properties @@ -14,7 +14,7 @@ An expression field allowing the user to compute new data based on form state. - **Target value**: Defines an [expression](../../feel/language-guide/feel-expressions-introduction.md) to evaluate. - **Compute on**: Defines when the expression should be evaluated. Either whenever the result changes, or only on form submission. - **Deactivate if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to disable the expression. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). :::info diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-group.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-group.md index a90f822b54a..4353743a7b1 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-group.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-group.md @@ -6,7 +6,7 @@ description: Learn about the group form element to group multiple form elements The group element serves as a container to group various form elements together. It allows for nesting of fields and assists in organizing complex forms. -![Form Group Symbol](/img/form-icons/form-group.svg) +Form Group Symbol ### Configurable properties @@ -15,7 +15,7 @@ The group element serves as a container to group various form elements together. - **Show outline**: Can be toggled on and off to display a separating outline around the group - **Vertical alignment**: Determines the alignment of items in the list. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Usage diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-html.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-html.md index bc8e56f454f..bad7f9a9483 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-html.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-html.md @@ -6,13 +6,13 @@ description: A form element to display HTML content. A flexible display component designed to quickly render HTML content for the user. -![Form HTML Symbol](/img/form-icons/form-html.svg) +Form HTML Symbol ## Configurable properties - **Content**: This property accepts HTML content. Define it using [templating syntax](../configuration/forms-config-templating-syntax.md) or as plaintext HTML. The rendered content is sanitized for security reasons, see below for details. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to conditionally hide the HTML content. -- **Columns**: Space the field will use inside its row. The **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. The **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Our security and sanitation strategy diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-iframe.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-iframe.md index f928bf92197..782621b9c84 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-iframe.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-iframe.md @@ -12,7 +12,7 @@ Every iframe component is a sandbox. This means that the content of the iframe i ::: -![Form iframe Symbol](/img/form-icons/form-iframe.svg) +Form iframe Symbol ## Configurable properties @@ -30,7 +30,7 @@ Every iframe component is a sandbox. This means that the content of the iframe i - **Top level navigation**: Gives the iframe permission to change the URL of the parent page, navigating away entirely from it. - **Storage access by user**: Controls access of local storage based on user interactions, may be expected in addition to allow same origin on certain browsers for functionality depending on storage. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the iframe. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Security advisory diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-image.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-image.md index 3becca2d45e..ca0c674345e 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-image.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-image.md @@ -6,11 +6,11 @@ description: Learn about the image view form element to display an image. An element allowing the user to display images. -![Form Image Symbol](/img/form-icons/form-image.svg) +Form Image Symbol ## Configurable properties - **Image source**: Specifies the image source via [expression](../../feel/language-guide/feel-expressions-introduction.md), [templating syntax](../configuration/forms-config-templating-syntax.md) or [static value](/components/concepts/expressions.md#expressions-vs-static-values) (hyperlink or data URI). - **Alternative text**: Provides an alternative text to the image in case it cannot be displayed. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the image. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-number.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-number.md index 6e9d108ca19..902253e3642 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-number.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-number.md @@ -6,7 +6,7 @@ description: A form element to read and edit numeric data A number field allowing the user to read and edit numeric data. -![Form Number Symbol](/img/form-icons/form-number.svg) +Form Number Symbol ### Configurable properties @@ -19,7 +19,7 @@ A number field allowing the user to read and edit numeric data. - **Read only**: Makes the number field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the number field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the number. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Serialize to string**: Configures the output format of the datetime value. This enables unlimited precision digits. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Number field must contain a value. diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-radio.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-radio.md index 285fc2b6272..3d9c8928bcc 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-radio.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-radio.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A radio group allowing the user to select one of multiple data options for small datasets. -![Form Radio Symbol](/img/form-icons/form-radio.svg) +Form Radio Symbol ### Configurable properties @@ -18,7 +18,7 @@ A radio group allowing the user to select one of multiple data options for small - **Disabled**: Disables the radio group, for use during development. - **Options source**: Radio group components can be configured with an options source defining the individual choices the component provides, refer to [options source docs](../configuration/forms-config-options.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the radio group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One radio option must be selected. diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-select.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-select.md index 17ae7dd2ce0..102a401ff77 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-select.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-select.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A Select dropdown allowing the user to select one of multiple data option from larger datasets. -![Form Select Symbol](/img/form-icons/form-select.svg) +Form Select Symbol ### Configurable properties @@ -18,7 +18,7 @@ A Select dropdown allowing the user to select one of multiple data option from l - **Read only**: Makes the select read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the select, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the select. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Options source**: Selects can be configured with an options source defining the individual choices the select provides, refer to [options source docs](../configuration/forms-config-options.md). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One select entry must be selected. diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-separator.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-separator.md index a83fc9ed785..392d1418cb4 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-separator.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-separator.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add a visual separation between A **separator** element is used to create a visual separation between two elements. -![Form Spacer Symbol](/img/form-icons/form-separator.svg) +Form Separator Symbol ## Usage diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-spacer.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-spacer.md index 22043da492b..7284c15dfd3 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-spacer.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-spacer.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add vertical space between eleme A **spacer** element is used to create a defined amount of vertical space between two elements. -![Form Spacer Symbol](/img/form-icons/form-spacer.svg) +Form Spacer Symbol ## Configurable properties diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-table.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-table.md index 947b4f5e505..5ff8cb77e7d 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-table.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-table.md @@ -6,7 +6,7 @@ description: Learn about the table form element to render tabular data. This is an element allowing the user to render tabular data. -![Form table Symbol](/img/form-icons/form-table.svg) +Form Table Symbol ## Configurable properties @@ -16,4 +16,4 @@ This is an element allowing the user to render tabular data. - **Number of rows per page**: The size of each page. Used only if pagination is enabled. Must be greater than zero. - **Headers source**: Defines which headers will be used in the table. This can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md) or a list of static headers. Review [table data binding](../configuration/forms-config-table-data-binding.md) for the required header structure. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the table. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-taglist.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-taglist.md index 97411ce8db5..e4dd64059ea 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-taglist.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-taglist.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A complex and searchable tag based component providing multi-selection for large datasets. -![Form Taglist Symbol](/img/form-icons/form-taglist.svg) +Form Taglist Symbol ### Configurable properties @@ -14,7 +14,7 @@ A complex and searchable tag based component providing multi-selection for large - **Field description**: Description provided below the taglist. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Key**: Binds the field to a form variable, refer to [data binding docs](../configuration/forms-config-data-binding.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the taglist. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Taglist must contain a value. - **Options source**: Taglists can be configured with an options source defining the individual choices your user can make, refer to [options source docs](../configuration/forms-config-options.md). diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-text.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-text.md index c2b043cacee..a3fe68e537e 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-text.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-text.md @@ -6,13 +6,13 @@ description: A form element to display simple Markdown-powered text. A Markdown-powered text component allowing to display simple information to the user. -![Form Text Symbol](/img/form-icons/form-text.svg) +Form Text Symbol ## Configurable properties - **Text**: Either an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). After evaluation, the result is processed using a Markdown renderer that supports basic HTML and [GitHub-flavored Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). To ensure safety and prevent cross-site scripting in Camunda Forms, potentially harmful HTML elements will not be rendered. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Example text configurations diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textarea.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textarea.md index d851a970a92..2e66e4d0712 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textarea.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textarea.md @@ -6,7 +6,7 @@ description: Learn about the text area form element to read and edit multiline t A text area allowing the user to read and edit multiline textual data. -![Form Textarea Symbol](/img/form-icons/form-textArea.svg) +Form Textarea Symbol ## Configurable properties @@ -17,7 +17,7 @@ A text area allowing the user to read and edit multiline textual data. - **Read only**: Makes the text area read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text area; for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text area. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text area must contain a value. - **Minimum length**: Text area must have at least `n` characters. diff --git a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textfield.md b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textfield.md index 1aafa0e824f..da45e37b3c9 100644 --- a/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textfield.md +++ b/versioned_docs/version-8.5/components/modeler/forms/form-element-library/forms-element-library-textfield.md @@ -6,7 +6,7 @@ description: A form element to read and edit textual data A text field allowing the user to read and edit textual data. -![Form Text Field Symbol](/img/form-icons/form-textField.svg) +Form Text Field Symbol ### Configurable properties @@ -17,7 +17,7 @@ A text field allowing the user to read and edit textual data. - **Read only**: Makes the text field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text field. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text field must contain a value. - **Regular expression validation**: Use predefined validation patterns. Available options are: `Email`, `Phone`, and `Custom`. diff --git a/versioned_docs/version-8.5/components/modeler/web-modeler/camunda-marketplace.md b/versioned_docs/version-8.5/components/modeler/web-modeler/camunda-marketplace.md index aeaf0878b65..464df80d09c 100644 --- a/versioned_docs/version-8.5/components/modeler/web-modeler/camunda-marketplace.md +++ b/versioned_docs/version-8.5/components/modeler/web-modeler/camunda-marketplace.md @@ -14,6 +14,10 @@ The Camunda Marketplace can be accessed via your [browser](https://marketplace.c ## Visit the Camunda Marketplace +:::note +Connectors created by partners or the community are not part of the commercial Camunda product. Camunda does not support these Connectors as part of its commercial services to enterprise customers. Please evaluate each client to make sure it meets your requirements before using. +::: + To navigate to the Camunda Marketplace, take the following steps: 1. Log in to your Camunda account, and navigate to Web Modeler using the **Camunda components** icon in the top left corner of your console. Click **Modeler**. diff --git a/versioned_docs/version-8.5/components/modeler/web-modeler/img/web-modeler-blueprint.png b/versioned_docs/version-8.5/components/modeler/web-modeler/img/web-modeler-blueprint.png index 97034bf53f3..6a8d34fb60d 100644 Binary files a/versioned_docs/version-8.5/components/modeler/web-modeler/img/web-modeler-blueprint.png and b/versioned_docs/version-8.5/components/modeler/web-modeler/img/web-modeler-blueprint.png differ diff --git a/versioned_docs/version-8.5/components/modeler/web-modeler/launch-web-modeler.md b/versioned_docs/version-8.5/components/modeler/web-modeler/launch-web-modeler.md index 818f394ed8d..ccf7c515719 100644 --- a/versioned_docs/version-8.5/components/modeler/web-modeler/launch-web-modeler.md +++ b/versioned_docs/version-8.5/components/modeler/web-modeler/launch-web-modeler.md @@ -14,8 +14,8 @@ To launch Web Modeler, follow the steps below: 2. Select **Create new project** to create a new project and store diagrams. ![web modeler empty home](img/web-modeler-new-user-home.png) 3. Name your diagram. You can go back and change the name any time by clicking on the project name and **Edit name**. -4. Select **Browse blueprints** to view blueprints for various use cases as a starting point for your first diagram. Open these blueprints by selecting **Use Blueprint**. Alternatively, click **Create new > BPMN diagram** to create a blank BPMN diagram. +4. Select **Browse blueprints** to open the blueprints dialog and browse blueprints for various use cases as a starting point for your first diagram. ![web modeler blueprint browsing](img/web-modeler-blueprint.png) -5. While browsing blueprints, you can also open the details of a specific blueprint by selecting **More details**. This opens a new tab in the [Camunda Marketplace](/components/modeler/web-modeler/camunda-marketplace.md). Here, you can have a closer look at the diagram, and open it in SaaS or Self-Managed. - -![Camunda marketplace example](img/camunda-marketplace-example.png) +5. While browsing blueprints, open the details of a specific blueprint by selecting **More details**. This opens a new tab in [Camunda Marketplace](/components/modeler/web-modeler/camunda-marketplace.md). Here, have a closer look at the diagram, and open it in SaaS or Self-Managed. + ![Camunda marketplace example](img/camunda-marketplace-example.png) +6. Open a blueprint by selecting **Use blueprint**, which downloads the blueprint into the project and opens it in the diagram screen. Alternatively, click **Create new > BPMN diagram** to create a blank BPMN diagram. diff --git a/versioned_docs/version-8.5/components/zeebe/zeebe-overview.md b/versioned_docs/version-8.5/components/zeebe/zeebe-overview.md index c1fda640f43..0e80316cde3 100644 --- a/versioned_docs/version-8.5/components/zeebe/zeebe-overview.md +++ b/versioned_docs/version-8.5/components/zeebe/zeebe-overview.md @@ -20,12 +20,6 @@ With Zeebe you can: For documentation on deploying Zeebe as part of Camunda 8 Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/zeebe-installation.md). -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda 8 Starter or Camunda 8 Enterprise plans. Customers can choose either plan based on their process automation requirements. Camunda 8 Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda 8, you can always find support through the [community](/contact/). - ## Next steps - Get familiar with [technical concepts](technical-concepts/technical-concepts-overview.md). diff --git a/versioned_docs/version-8.5/guides/getting-started-java-spring.md b/versioned_docs/version-8.5/guides/getting-started-java-spring.md index 7f28e44c769..f27acb4c3ba 100644 --- a/versioned_docs/version-8.5/guides/getting-started-java-spring.md +++ b/versioned_docs/version-8.5/guides/getting-started-java-spring.md @@ -116,33 +116,13 @@ To implement a service task, take the following steps: ### Configure Spring Boot Starter -See our documentation on [adding the Spring Zeebe SDK to your project](/apis-tools/spring-zeebe-sdk/getting-started.md#add-the-spring-zeebe-sdk-to-your-project) for more details, also described below: - -1. Copy the following code snippet into the `pom.xml` file of your Spring project, below properties and above dependencies: - -```xml - - - - true - - - false - - identity - Camunda Identity - https://artifacts.camunda.com/artifactory/camunda-identity/ - - -``` - -2. Add the following dependency to your `pom.xml` file, as a child of the `` element: +Add the following Maven dependency to your Spring Boot Starter project, replacing `x` with the latest patch level available: ```xml - io.camunda - spring-boot-starter-camunda-sdk - 8.5.0 + io.camunda + spring-boot-starter-camunda-sdk + 8.5.x ``` diff --git a/versioned_docs/version-8.5/guides/getting-started-orchestrate-microservices.md b/versioned_docs/version-8.5/guides/getting-started-orchestrate-microservices.md index ce7e39406c0..0d2a32b9ec8 100644 --- a/versioned_docs/version-8.5/guides/getting-started-orchestrate-microservices.md +++ b/versioned_docs/version-8.5/guides/getting-started-orchestrate-microservices.md @@ -19,7 +19,7 @@ While this guide uses code snippets in Java, you do not need to be a Java develo ## Prerequisites - Ensure you have a valid [Camunda 8 account](create-account.md), or sign up if you still need one. -- Java >= 8 +- Java ≥ 8 - Maven - IDE (IntelliJ, VSCode, or similar) - Download and unzip or clone the [repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java` diff --git a/versioned_docs/version-8.5/guides/introduction-to-camunda-8.md b/versioned_docs/version-8.5/guides/introduction-to-camunda-8.md index bcf3c0d7b1c..74ef3f25773 100644 --- a/versioned_docs/version-8.5/guides/introduction-to-camunda-8.md +++ b/versioned_docs/version-8.5/guides/introduction-to-camunda-8.md @@ -36,7 +36,7 @@ type:"link", href:"/docs/next/guides/getting-started-java-spring/", label: "Get } ]}/> -With these guides, start working with [Web Modeler](/components/modeler/about-modeler.md) to get familiar with BMPN and model a business process, or as a Java developer, step through using Spring Boot and the Spring Zeebe SDK with Desktop Modeler to interact with a local Self-Managed Camunda 8 installation. +With these guides, start working with [Web Modeler](/components/modeler/about-modeler.md) to get familiar with BPMN and model a business process, or as a Java developer, step through using Spring Boot and the Spring Zeebe SDK with Desktop Modeler to interact with a local Self-Managed Camunda 8 installation. ### Use cases diff --git a/versioned_docs/version-8.5/guides/message-correlation.md b/versioned_docs/version-8.5/guides/message-correlation.md deleted file mode 100644 index 7c0dc0ef853..00000000000 --- a/versioned_docs/version-8.5/guides/message-correlation.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: message-correlation -title: Message correlation -description: "Message correlation allows you to target a running workflow with a state update from an external system asynchronously." ---- - -Intermediate -Time estimate: 20 minutes - -## Prerequisites - -- [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js) -- [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) -- [Desktop Modeler](https://camunda.com/download/modeler/) - -## Message correlation - -Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously. - -This tutorial uses the [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js), but it serves to illustrate message correlation concepts that are applicable to all language clients. - -We will use [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production. However, it is useful during development. - -## Workflow - -Here is a basic example from [the Camunda 8 documentation](/components/concepts/messages.md): - -![message correlation workflow](img/message-correlation-workflow.png) - -Use [Desktop Modeler](https://camunda.com/download/modeler/) to open the [test-messaging](https://github.com/jwulf/zeebe-message-correlation/blob/master/bpmn/test-messaging.bpmn) file in [this GitHub project](https://github.com/jwulf/zeebe-message-correlation). - -Click on the intermediate message catch event to see how it is configured: - -![message properties](img/message-correlation-message-properties.png) - -A crucial piece here is the **Subscription Correlation Key**. In a running instance of this workflow, an incoming **Money Collected** message will have a `correlationKey` property: - -```typescript - zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid" - }); -``` - -The concrete value of the message `correlationKey` is matched against running workflow instances by comparing the supplied value against the `orderId` variable of running instances subscribed to this message. This is the relationship established by setting the `correlationKey` to `orderId` in the message catch event in the BPMN. - -## Running the demonstration - -To run the demonstration, take the following steps: - -1. Clone this repository. -2. Install dependencies: - :::note - This guide requires `npm` version 6. - ::: - `npm i && npm i -g ts-node typescript` -3. In another terminal, start the Zeebe Broker in addition to [simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor). -4. Deploy the workflow and start an instance: - `ts-node start-workflow.ts` - This starts a workflow instance with the `orderId` set to 345: - -```typescript -await zbc.createProcessInstance("test-messaging", { - orderId: "345", - customerId: "110110", - paymentStatus: "unpaid", -}); -``` - -5. Open Simple Monitor at [http://localhost:8082](http://localhost:8082). -6. Click on the workflow instance. You will see the current state of the workflow: - ![workflow state](img/message-correlation-workflow-state.png) - The numbers above the BPMN symbols indicate that no tokens are waiting at the start event, and one has passed through. One token is waiting at the **Collect Money** task, and none have passed through. -7. Take a look at the **Variables** tab at the bottom of the screen. (If you don't see it, you are probably looking at the workflow, rather than the instance. In that case, drill down into the instance): - ![message correlation variables](img/message-correlation-variables.png) - You can see that this workflow instance has the variable `orderId` set to the value 345. -8. Start the workers: - `ts-node workers.ts` -9. Refresh Simple Monitor to see the current state of the workflow: - ![message correlation wait on message](img/message-correlation-wait-on-message.png) - Now, the token is at the message catch event, waiting for a message to be correlated. -10. Take a look at the **Message Subscriptions** tab: - ![message subscriptions](img/message-correlation-message-subscriptions.png) - You can see the broker has opened a message subscription for this workflow instance with the concrete value of the `orderId` 345. This was created when the token entered the message catch event. -11. Send the message in another terminal: - `ts-node send-message.ts` -12. Refresh Simple Monitor, and note that the message has been correlated and the workflow has run to completion: - -![message correlation completed](img/message-correlation-completed.png) - -The **Message Subscriptions** tab now reports that the message was correlated: - -![message correlation correlated](img/message-correlation-correlated.png) - -## Message buffering - -Messages are buffered on the broker, so your external systems can emit messages before your process arrives at the catch event. The amount of time a message is buffered is configured when publishing the message from the client library. - -For example, to send a message buffered for 10 minutes with the JavaScript client: - -```typescript -zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid", - }, - timeToLive: 600000, -}); -``` - -To see it in action, take the following steps: - -1. Keep the workers running. -2. Publish the message: - -```typescript -ts-node send-message.ts -``` - -3. Click on **Messages** at the top of the Simple Monitor page. You will see the message buffered on the broker: - -![message buffered on broker](img/message-correlation-buffered.png) - -4. Start another instance of the workflow: - -```typescript -ts-node start-workflow.ts -``` - -Note that the message is correlated to the workflow instance, even though it arrived before the workflow instance was started. - -## Common mistakes - -A couple of common gotchas: - -- The `correlationKey` in the BPMN message definition is the name of the workflow variable to match against. The `correlationKey` in the message is the concrete value to match against that variable in the workflow instance. - -- The message subscription _is not updated after it is opened_. That is not an issue in the case of a message catch event. However, for boundary message events (both interrupting and non-interrupting,) the subscription is opened _as soon as the token enters the bounding subprocess_. If any service task modifies the `orderId` value inside the subprocess, the subscription is not updated. - -For example, the interrupting boundary message event in the following example will not be correlated on the updated value, because the subscription is opened when the token enters the subprocess, using the value at that time: - -![not correlating](img/message-correlation-not-like-this.png) - -If you need a boundary message event correlated on a value modified somewhere in your process, put the boundary message event in a subprocess after the task that sets the variable. The message subscription for the boundary message event will open when the token enters the subprocess, with the current variable value. - -![correlating](img/message-correlation-like-this.png) - -## Summary - -Message Correlation is a powerful feature in Camunda 8. Knowing how messages are correlated, and how and when the message subscription is created is important to design systems that perform as expected. - -Simple Monitor is a useful tool for inspecting the behavior of a local Camunda 8 system to figure out what is happening during development. diff --git a/versioned_docs/version-8.5/guides/migrating-from-camunda-7/index.md b/versioned_docs/version-8.5/guides/migrating-from-camunda-7/index.md index 000e09dd12f..39764a6da25 100644 --- a/versioned_docs/version-8.5/guides/migrating-from-camunda-7/index.md +++ b/versioned_docs/version-8.5/guides/migrating-from-camunda-7/index.md @@ -53,4 +53,4 @@ As described earlier in this guide, migration is an ongoing topic and this guide - Discuss workload migrations (operations) - Eventual consistency -[Reach out to us](/contact/) to discuss your specific migration use case. +[Reach out to us](/reference/contact.md) to discuss your specific migration use case. diff --git a/versioned_docs/version-8.5/reference/alpha-features.md b/versioned_docs/version-8.5/reference/alpha-features.md index 8a65e9ee0fa..c8229dff48c 100644 --- a/versioned_docs/version-8.5/reference/alpha-features.md +++ b/versioned_docs/version-8.5/reference/alpha-features.md @@ -22,7 +22,7 @@ Limitations of alpha features and components include: - Not necessarily feature-complete. - Might lack full documentation. - No guaranteed updates to newer releases. -- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://docs.camunda.org/enterprise/support/). +- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://camunda.com/services/enterprise-support-guide/). - No maintenance service. - (SaaS) No availability targets. - Released outside the standard [release policy](release-policy.md). @@ -32,7 +32,7 @@ To learn more about using alpha features, see [enabling alpha features](/compone :::note - Alpha features can also be included in a minor version (stable) release. -- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/contact). +- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/reference/contact.md). ::: @@ -43,7 +43,7 @@ Once features and components are released and considered stable, they become gen Stable features and components are: - Ready for production use for most users with minimal risk. -- Supported by [L1 Priority-level support](https://docs.camunda.org/enterprise/support/#priority-level) for production use. +- Supported by [L1 Priority-level support](https://camunda.com/services/enterprise-support-guide/) for production use. - Fully documented. A release or component is considered stable if it has passed all verification and test stages and can be released to production. diff --git a/versioned_docs/version-8.5/reference/contact.md b/versioned_docs/version-8.5/reference/contact.md new file mode 100644 index 00000000000..4e6c7fc3892 --- /dev/null +++ b/versioned_docs/version-8.5/reference/contact.md @@ -0,0 +1,45 @@ +--- +id: contact +title: Contact +description: Contact Camunda, submit feedback, find support using the Camunda community forum, note bug reports and feature requests, and review security notices. +keywords: + [ + support, + contact-us, + get-support, + help, + need-help, + bug, + bug-report, + feature-request, + issue, + enterprise-support, + ] +--- + +There are a few different channels you can reach us based on your needs: + +- We encourage everyone to participate in our **community** via the [Camunda community forum](https://forum.camunda.io/), where you can exchange ideas with other Camunda users, as well as Camunda employees. For all other Camunda community programs and resources, visit our [Camunda Developer Hub](https://camunda.com/developers). + +- We welcome your **bug** reports and **feature requests** through our community channels mentioned above. + +- For **security-related issues**, review our [security notices](/reference/notices.md) for the most up-to-date information on known issues and steps to report a vulnerability so we can solve the problem as quickly as possible. Do not use GitHub for security-related issues. + +- **Feedback and support** can be submitted or requested via JIRA by following our [Enterprise support process](https://camunda.com/services/enterprise-support-guide/). All users can also find feedback and support options in the Help Center or [Camunda community forum](https://forum.camunda.io/). + +- For sales inquiries, information about Camunda 8 performance and benchmarking, or anything not listed above, use our [Contact Us](https://camunda.com/contact/) form. + +## Locating Camunda 8 credentials + +Need assistance locating your Camunda 8 credentials? You can obtain these credentials from Camunda by submitting a **Help Request**. To do this, take the following steps: + +1. Log in to [Jira](https://jira.camunda.com/secure/Dashboard.jspa). +2. Click **Create** in the navigation bar at the top of the page. This launches a **Create Issue** pop-up. +3. In the **Issue Type** field, select **Help Request**. +4. In the **Help Request Type** field, click the option that reads **I need the credentials for downloading Camunda**. +5. In the **Summary** and **Description** fields, **I need the credentials for downloading Camunda** will populate by default. + ![completed help request example](./img/create-issue-request.png) +6. (Optional) Add more details, such as the priority level or authorized support contacts. +7. Click **Create** at the bottom of the pop-up **Create Issue** box. + +After completing these steps, your request is generated. Find additional details on submitting a self-service help request [here](https://camunda.com/services/enterprise-support-guide/). diff --git a/versioned_docs/version-8.5/reference/img/create-issue-request.png b/versioned_docs/version-8.5/reference/img/create-issue-request.png new file mode 100644 index 00000000000..374fdfece6f Binary files /dev/null and b/versioned_docs/version-8.5/reference/img/create-issue-request.png differ diff --git a/versioned_docs/version-8.5/reference/notices.md b/versioned_docs/version-8.5/reference/notices.md index 1773709bb43..60b0a1322ba 100644 --- a/versioned_docs/version-8.5/reference/notices.md +++ b/versioned_docs/version-8.5/reference/notices.md @@ -74,11 +74,11 @@ Tasklist The REST API functionality of Tasklist 8.2.0 and 8.2.1 allows unauthenticated access to the following methods/URLs: -- GET /v1/tasks/{taskId} +- GET /v1/tasks/\{taskId} - POST /v1/tasks/search -- POST /v1/tasks/{taskId}/variables/search -- POST /v1/forms/{formId} -- POST /v1/variables/{variableId} +- POST /v1/tasks/\{taskId}/variables/search +- POST /v1/forms/\{formId} +- POST /v1/variables/\{variableId} Find more information about the methods in our [Tasklist REST API documentation](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). @@ -115,7 +115,7 @@ At this point, Camunda is not aware of any specific attack vector in Tasklist al #### How to determine if the installation is affected -You are Tasklist version (8.0.3 >= version <= 8.0.7) or <= 8.1.2 +You are Tasklist version (8.0.3 ≥ version ≤ 8.0.7) or ≤ 8.1.2 #### Solution @@ -142,7 +142,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.11 or ≤ 1.3.6 #### Solution @@ -168,7 +168,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.8 or ≤ 1.1.9 #### Solution @@ -194,7 +194,7 @@ Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bu #### How to determine if the installation is affected -You are using IAM version <= 1.2.8 +You are using IAM version ≤ 1.2.8 #### Solution @@ -219,7 +219,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.7 or ≤ 1.1.8 #### Solution @@ -248,7 +248,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.7 +You are using IAM version ≤ 1.2.7 #### Solution @@ -273,7 +273,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.6 or ≤ 1.1.7 #### Solution @@ -302,7 +302,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.6 +You are using IAM version ≤ 1.2.6 #### Solution @@ -327,7 +327,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.5 or ≤ 1.1.6 #### Solution @@ -357,7 +357,7 @@ Still, Camunda recommends applying fixes as mentioned in the Solution section be #### How to determine if the installation is affected -You are using IAM version <= 1.2.5 +You are using IAM version ≤ 1.2.5 #### Solution diff --git a/versioned_docs/version-8.5/reference/regions.md b/versioned_docs/version-8.5/reference/regions.md index 2ea6139f85b..41ed59eaf2c 100644 --- a/versioned_docs/version-8.5/reference/regions.md +++ b/versioned_docs/version-8.5/reference/regions.md @@ -9,7 +9,7 @@ When you create a cluster in Camunda 8 SaaS, you must specify a region for that Currently, we make these regions available for customers on the Trial, Starter, and Enterprise Plans. Enterprise customers can discuss custom regions with their Customer Success Manager. :::note -Our Console and Web Modeler components are currently hosted in the EU. [Contact us](https://camunda.com/contact/) if you have additional questions. +Our Console and Web Modeler components are currently hosted in the EU. [Contact us](/reference/contact.md) if you have additional questions. ::: Below, find a list of regions currently supported in Camunda 8 SaaS. @@ -21,6 +21,7 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. | Belgium, Europe (europe-west1) | Germany, Europe (europe-west3) | | Iowa, North America (us-central1) | Salt Lake City, North America (us-west1) | | London, Europe (europe-west2) | _Not available_ | +| Singapore, Asia (asia-southeast1) | Changhua County, Taiwan (asia-east1) | | South Carolina, North America (us-east1) | Iowa, North America (us-central1) | | Sydney, Australia (australia-southeast1) | Melbourne, Australia (australia-southeast2) | | Toronto, North America (northamerica-northeast2) | Montréal, North America (northamerica-northeast1) | @@ -28,5 +29,5 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. You can find the locations behind the region codes [on the Google page](https://cloud.google.com/about/locations). :::note -Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](https://camunda.com/contact/) as we are able to make additional regions available on request. +Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](/reference/contact.md) as we are able to make additional regions available on request. ::: diff --git a/versioned_docs/version-8.5/reference/status.md b/versioned_docs/version-8.5/reference/status.md index c8de779c5d2..ecb840a23ab 100644 --- a/versioned_docs/version-8.5/reference/status.md +++ b/versioned_docs/version-8.5/reference/status.md @@ -21,4 +21,4 @@ To receive service status updates: ## Support -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/contact). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). +Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/reference/contact.md). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). diff --git a/versioned_docs/version-8.5/reference/supported-environments.md b/versioned_docs/version-8.5/reference/supported-environments.md index 920519bca10..32c0f9f5ae7 100644 --- a/versioned_docs/version-8.5/reference/supported-environments.md +++ b/versioned_docs/version-8.5/reference/supported-environments.md @@ -8,7 +8,7 @@ The supported environments page lists browsers, operating systems, clients, depl **If the particular technology is not listed, we cannot resolve issues caused by the usage of that unlisted technology.** -You may [raise a feature request](/contact) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/contact) to work with Consulting services. +You may [raise a feature request](/reference/contact.md) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/reference/contact.md) to work with Consulting services. Recommendations are denoted with [recommended], however, other listed options are supported as well. @@ -30,7 +30,7 @@ For example, 1.2+ means support for the minor version 2, and any higher minors ( ## Desktop Modeler - Windows 10 / 11 -- Mac OS 12 / 13 / 14 +- Mac OS 12 / 13 / 14 / 15 - Ubuntu LTS (latest) ## Clients diff --git a/versioned_docs/version-8.5/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md b/versioned_docs/version-8.5/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md index 835be739ecc..2e61c28b921 100644 --- a/versioned_docs/version-8.5/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md +++ b/versioned_docs/version-8.5/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md @@ -5,12 +5,15 @@ sidebar_label: "Connect to an existing Keycloak instance" description: "Learn how to connect Identity to your existing Keycloak instance." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + In this guide, we'll demonstrate how to connect Identity to your existing Keycloak instance. ## Prerequisites - Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/23.0.1/server_admin/#using-the-admin-console) -- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/23.0.1/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak. +- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak :::note Clients in Camunda 8 SaaS and applications in Camunda 8 Self-Managed provide a similar purpose. One key difference is that for Camunda 8 SaaS, you can set up specific [client connection credentials](/guides/setup-client-connection-credentials.md), whereas in Identity, an application is created with credentials automatically assigned. @@ -24,7 +27,15 @@ As of the 8.5.3 release, Identity uses the Keycloak frontend URL instead of the To avoid connectivity issues, ensure your Keycloak frontend URL is accessible by adjusting your network, firewall, or security settings as needed. This adjustment is crucial to maintain the integration with Keycloak and ensure compatibility. ::: -To connect Identity to an existing Keycloak instance, take the following steps: +To connect Identity to an existing Keycloak instance, take the following steps for your Camunda installation: + + + + 1. Log in to your Keycloak Admin Console. 2. Select the realm you would like to connect Identity to. In our example, this is **camunda-platform**. @@ -56,6 +67,18 @@ To connect Identity to an existing Keycloak instance, take the following steps: ::: 13. Start Identity. + + + +1. Log in to your Keycloak Admin Console. +2. Verify the name of the realm you would like to connect Identity to. In our example, this is **camunda-platform**. + ![keycloak-admin-realm-select](../img/keycloak-admin-realm-select.png) +3. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +4. Start Identity. + + + + :::note What does Identity create when starting? Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/self-managed/identity/deployment/starting-configuration.md). ::: diff --git a/versioned_docs/version-8.5/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md b/versioned_docs/version-8.5/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md index f39ada6f300..2f6d9bd6f54 100644 --- a/versioned_docs/version-8.5/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md +++ b/versioned_docs/version-8.5/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md @@ -4,46 +4,69 @@ title: Deploy diagram description: "Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed. Follow the steps below to deploy a diagram: 1. Click the rocket-shaped deployment icon: -![deployment icon](./img/deploy-icon.png) + ![deployment icon](./img/deploy-icon.png) 2. Click **Camunda 8 Self-Managed**: -![deployment configuration](./img/deploy-empty.png) + ![deployment configuration](./img/deploy-empty.png) 3. Input the `Cluster endpoint`: -:::note -You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. + :::note + You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. + + Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). + ::: + + :::caution + + Multi-tenancy is only available with authentication enabled [through Identity](/self-managed/identity/what-is-identity.md), and [enabled in all required components](/self-managed/concepts/multi-tenancy.md). + + ::: + + ![deployment via Camunda 8](./img/deploy-endpoint.png) + +4. Select your authentication method, and input the required credentials: -Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). -::: + -:::caution + -Multi-tenancy is only available with authentication enabled [through Identity](/self-managed/identity/what-is-identity.md), and [enabled in all required components](/self-managed/concepts/multi-tenancy.md). + For **basic authentication**, input your username and password: -::: + ![basic auth configuration](./img/deploy-with-basic-auth.png) -![deployment via Camunda 8](./img/deploy-endpoint.png) + -4. Select **Basic**, and input your username and password in case your gateway requires basic authentication: + -![basic auth configuration](./img/deploy-with-basic-auth.png) + For **OAuth**, input the credentials for your OAuth provider. These are configured as part of the default [Helm installation](/self-managed/setup/install.md) and can be discovered in [Identity](/self-managed/identity/what-is-identity.md), or are set by Zeebe [environment variables](/self-managed/zeebe-deployment/security/client-authorization.md#environment-variables). -5. Select **OAuth**, and input the credentials in case your gateway requires authentication with OAuth: + ![oauth configuration](./img/deploy-with-oauth.png) -:::note -The OAuth URL needs to contain the full path to the token endpoint, i.e. `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. -::: + | Name | Description | Example value | + | --------------- | ------------------------------------ | ----------------------------------------------------------------------------------------- | + | Client ID | The name of your Zeebe client. | `zeebe` | + | Client secret | The password of your Zeebe client. | `zecret` | + | OAuth token url | The full path to the token endpoint. | `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. | + | OAuth audience | The permission name for Zeebe. | `zeebe-api` | -![oauth configuration](./img/deploy-with-oauth.png) + + -6. Select the **Remember** checkbox if you want to locally store the connection information. +5. Select the **Remember** checkbox if you want to locally store the connection information. -7. Click **Deploy** to perform the deployment. +6. Click **Deploy** to perform the deployment. ![deployment successful](./img/deploy-success.png) diff --git a/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/configuration.md b/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/configuration.md index 906cf98a7a4..30e364a1ac7 100644 --- a/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/configuration.md +++ b/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/configuration.md @@ -60,11 +60,13 @@ The `restapi` component sends certain events (e.g. "file updated", "comment adde Web Modeler integrates with Identity and Keycloak for authentication and authorization (using OAuth 2.0 + OpenID Connect) as well as user management. -| Environment variable | Description | Example value | -| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | -| `CAMUNDA_IDENTITY_BASEURL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | -| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | -| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | +| Environment variable | Description | Example value | +| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `CAMUNDA_IDENTITY_BASEURL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | +| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI` | [optional] URL of the JWK Set endpoint (used for JWT validation). Only necessary if URL cannot be derived from the OIDC configuration endpoint. | `https://keycloak.example.com/auth/realms/camunda-platform/protocol/openid-connect/certs` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWS_ALGORITHMS` | [optional] List of trusted JWS algorithms used for JWT validation. Only necessary if the algorithms cannot be derived from the JWK Set response. | `ES256` | Refer to the [advanced Identity configuration guide](./identity.md) for additional details on how to connect a custom OpenID Connect (OIDC) authentication provider. @@ -117,6 +119,20 @@ Refer to the [advanced SSL configuration guide](./ssl.md) for additional details | `RESTAPI_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the regular API endpoints. | `8081` | `8081` | | `RESTAPI_MANAGEMENT_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the management API endpoints. | `8091` | `8091` | +### Proxy + +These settings are useful when the application needs to make outgoing network requests in environments that require traffic to pass through a proxy server. + +| Environment variable | Description | Example value | Default value | +| -------------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------- | ------------- | +| `http_proxy` | Specifies the proxy server to be used for outgoing HTTP requests. | `http://proxy.example.com:8080` | - | +| `https_proxy` | Specifies the proxy server to be used for outgoing HTTPS requests. | `https://secureproxy.example.com:443` | - | +| `no_proxy` | A comma-separated list of domain names or IP addresses for which the proxy should be bypassed. | `localhost,127.0.0.1,.example.com` | - | + +:::note +The proxy-related environment variables are lowercase because they follow a widely accepted convention used in many system environments and tools. +::: + ### Feature Flags | Environment variable | Description | Example value | Default value | @@ -158,10 +174,13 @@ The `webapp` component sends certain events (e.g. "user opened diagram", "user l ### Logging -| Environment variable | Description | Example value | -| -------------------- | -------------------------------------- | ---------------------------- | -| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| Environment variable | Description | Example value | +| -------------------- | ----------------------------------------------- | ---------------------------- | +| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| `LOG_LEVEL_CLIENT` | [optional]
    Log level for the client | `DEBUG` | +| `LOG_LEVEL_WEBAPP` | [optional]
    Log level for the Node.js server | `DEBUG` | +The `LOG_LEVEL_*` options can be found [here](../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-webapp-component) for additional details on how to customize the `webapp` logging output. ### SSL diff --git a/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/logging.md b/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/logging.md index b39f3747821..5718a6b1e2d 100644 --- a/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/logging.md +++ b/versioned_docs/version-8.5/self-managed/modeler/web-modeler/configuration/logging.md @@ -42,6 +42,16 @@ To enable additional log output to a file, adjust the following environment vari LOG_FILE_PATH=/full/path/to/log/file.log ``` +### Configuring log levels + +To control the verbosity of the logs, adjust the environment variables `LOG_LEVEL_CLIENT` (browser client) and `LOG_LEVEL_WEBAPP` (Node.js server). + +```properties +LOG_LEVEL_CLIENT=DEBUG +``` + +The `LOG_LEVEL_*` options can be found [here](../../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). + ## Logging configuration for the `websocket` component By default, the `websocket` component logs to the Docker container's standard output. diff --git a/versioned_docs/version-8.5/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md b/versioned_docs/version-8.5/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md new file mode 100644 index 00000000000..6059d9bed8c --- /dev/null +++ b/versioned_docs/version-8.5/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md @@ -0,0 +1,36 @@ +--- +id: troubleshoot-proxy-configuration +title: "Troubleshoot proxy configuration issues" +sidebar_label: "Proxy configuration" +description: "Troubleshooting guide for issues caused by incorrect proxy configuration in Web Modeler." +--- + +Troubleshoot and resolve issues in Web Modeler caused by incorrect or incomplete proxy configuration. + +## Issue + +Users experience a variety of failures when Web Modeler attempts to communicate with external services. These issues can manifest as: + +- Failed authentication due to the inability to access the JWKS (JSON Web Key Set) endpoint. Error message: "Expected 200 OK from the JSON Web Key Set HTTP response." +- Failure to reach other external services, such as the Camunda Marketplace. + +## Cause + +Proxy settings must be correctly configured for Web Modeler to route outgoing requests through a network proxy. Common issues occur when: + +- The proxy server is not properly configured or unreachable. +- Requests to external services are being blocked by the proxy configuration. +- Authentication requests, such as those to the OIDC provider, fail when the JWKS endpoint is unreachable via the proxy. + +## Resolution + +Ensure correct proxy configuration for both `webapp` and `restapi` components. + +- For the `webapp` component, proxy configuration is handled via the environment variables `http_proxy`, `https_proxy` and `no_proxy`. + ```properties + http_proxy=http://proxy.example.com:8080 https_proxy=https://secureproxy.example.com:443 no_proxy=localhost,127.0.0.1,.example.com + ``` +- For the `restapi` component, the proxy configuration is handled via JVM settings passed as the value of the environment variable `JAVA_OPTS`. + ```properties + JAVA_OPTS=-Dhttp.proxyHost= -Dhttps.proxyPort= + ``` diff --git a/versioned_docs/version-8.5/self-managed/operate-deployment/importer-and-archiver.md b/versioned_docs/version-8.5/self-managed/operate-deployment/importer-and-archiver.md index 04cad62c780..60e561d9c62 100644 --- a/versioned_docs/version-8.5/self-managed/operate-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.5/self-managed/operate-deployment/importer-and-archiver.md @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.5/self-managed/operate-deployment/operate-configuration.md b/versioned_docs/version-8.5/self-managed/operate-deployment/operate-configuration.md index 09132364ffd..ba73ec06d19 100644 --- a/versioned_docs/version-8.5/self-managed/operate-deployment/operate-configuration.md +++ b/versioned_docs/version-8.5/self-managed/operate-deployment/operate-configuration.md @@ -66,7 +66,7 @@ in terms of tenant assignment, Operate - Zeebe connection must be secured. Check ### Troubleshooting multi-tenancy in Operate -If users can view data from the `` tenant only and no data from other tenants (and you have not [configured multi-tenancy using Helm](https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform-latest/README.md#global-parameters)), multi-tenancy is not enabled in Operate. Refer to the [configuration instructions above](#multi-tenancy). +If users can view data from the `` tenant only and no data from other tenants (and you have not [configured multi-tenancy using Helm](https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform-8.5/README.md#global-parameters)), multi-tenancy is not enabled in Operate. Refer to the [configuration instructions above](#multi-tenancy). If multi-tenancy is enabled in Operate but disabled in [Identity](/self-managed/identity/what-is-identity.md), users will not have any tenant authorizations in Operate and will not be able to access the data of any tenants in Operate. diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/application-configs.md b/versioned_docs/version-8.5/self-managed/operational-guides/application-configs.md index 47bfcee3638..6231578062b 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/application-configs.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/application-configs.md @@ -63,20 +63,20 @@ operate: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 numberOfShards: 3 # Zeebe instance zeebe: # Broker contact point - brokerContactPoint: "cpt-zeebe-gateway:26500" + brokerContactPoint: "-zeebe-gateway:26500" # ELS instance to export Zeebe data to zeebeElasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Index prefix, configured in Zeebe Elasticsearch exporter @@ -122,27 +122,27 @@ operate: ## Default properties set by the helm chart -Before you supply a configuration, it's helpful to know what the default configuration is so you can start from a working configuration and then update the values you want: +The `helm template` command generates the application's default configuration, allowing you to only update the values required by your setup. Use the following command to generate the default configuration, substituting in the name of your release: ```bash -helm template \ +helm template \ -f values.yaml \ camunda/camunda-platform \ --show-only templates/operate/configmap.yaml ``` -`--show-only` will allow you to print out the `configmap` to the console: +The `--show-only` flag prints out the `configmap` to the console: ```yaml # Source: camunda-platform/templates/operate/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: cpt-operate + name: -operate labels: app: camunda-platform app.kubernetes.io/name: camunda-platform - app.kubernetes.io/instance: cpt + app.kubernetes.io/instance: app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: camunda-platform helm.sh/chart: camunda-platform-9.3.1 @@ -160,25 +160,26 @@ data: oauth2: resourceserver: jwt: - issuer-uri: "http://cpt-keycloak:80/auth/realms/camunda-platform" - jwk-set-uri: "http://cpt-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/certs" + issuer-uri: "http://-keycloak:80/auth/realms/camunda-platform" + jwk-set-uri: "http://-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/certs" camunda: identity: clientId: "operate" audience: "operate-api" + baseUrl: "http://-identity:80" # Operate configuration file camunda.operate: identity: - redirectRootUrl: "https://dev.jlscode.com" + redirectRootUrl: "http://localhost:8081" # ELS instance to store Operate data elasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Zeebe instance @@ -190,7 +191,7 @@ data: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Index prefix, configured in Zeebe Elasticsearch exporter @@ -207,14 +208,14 @@ Then, take the contents under `application.yml` and put it under the `operate.co ## Where to search for configuration options -- [Zeebe Broker](docs/self-managed/zeebe-deployment/configuration/broker.md) -- [Zeebe Gateway](docs/self-managed/zeebe-deployment/configuration/gateway.md) -- [Operate](docs/self-managed/operate-deployment/operate-configuration.md) -- [Tasklist](docs/self-managed/tasklist-deployment/tasklist-configuration.md) -- [Web Modeler](docs/self-managed/modeler/web-modeler/configuration/configuration.md) -- [Console](docs/self-managed/console-deployment/configuration.md) -- [Connectors](docs/self-managed/connectors-deployment/connectors-configuration.md) -- [Identity](docs/self-managed/identity/deployment/configuration-variables.md) +- [Zeebe Broker](/self-managed/zeebe-deployment/configuration/broker.md) +- [Zeebe Gateway](/self-managed/zeebe-deployment/configuration/gateway.md) +- [Operate](/self-managed/operate-deployment/operate-configuration.md) +- [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md) +- [Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md) +- [Console](/self-managed/console-deployment/configuration.md) +- [Connectors](/self-managed/connectors-deployment/connectors-configuration.md) +- [Identity](/self-managed/identity/deployment/configuration-variables.md) - [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration) ## Limitations diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md b/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md index bae5f4f7250..e2a9e2d3bb2 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md @@ -5,6 +5,9 @@ description: "How to perform a backup and restore of Operate and Tasklist data." keywords: ["backup", "backups"] --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + :::note This release introduces breaking changes, including: @@ -37,33 +40,68 @@ The backup API can be reached via the Actuator management port, which by default Before you can use the backup and restore feature: 1. The [Elasticsearch snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html) must be configured. -2. Operate and Tasklist must be configured with the repository name using the following configuration parameters: +2. Operate and Tasklist must be configured with the repository name using one of the following configuration options: + + + + + +#### Operate ```yaml -for Operate: camunda: operate: backup: repositoryName: +``` + + + + + +#### Operate + +``` +CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= +``` + + + + +#### Tasklist -for Tasklist: + + + + +```yaml camunda: tasklist: backup: repositoryName: ``` -or with environmental variables: + -``` -for Operate: -CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= + -for Tasklist: +``` CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME= - ``` + + + ## Create backup API During backup creation Operate can continue running. To create the backup, call the following endpoint: diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/optimize-backup.md b/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/optimize-backup.md index c694ff53341..823de9ef51a 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/optimize-backup.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/optimize-backup.md @@ -25,7 +25,7 @@ Optimize provides an API to trigger a backup and retrieve information about a gi The following prerequisites must be set up before using the backup API: 1. A snapshot repository of your choice must be registered with Elasticsearch. -2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize configuration: +2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize [`environment-config.yaml`]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/): ```yaml backup: @@ -58,7 +58,7 @@ POST actuator/backups ### Example request -``` +```shell curl --request POST 'http://localhost:8092/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123456 }' @@ -101,8 +101,8 @@ GET actuator/backup ### Example request -``` -curl ---request GET 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request GET 'http://localhost:8092/actuator/backups/123456' ``` ### Example response @@ -161,8 +161,8 @@ DELETE actuator/backups/{backupId} ### Example request -``` -curl ---request DELETE 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request DELETE 'http://localhost:8092/actuator/backups/123456' ``` ## Restore backup @@ -184,6 +184,6 @@ To restore a given backup, the following steps must be performed: Example Elasticsearch request: -``` +```shell curl --request POST `http://localhost:9200/_snapshot/repository_name/camunda_optimize_123456_3.9.0_part_1_of_2/_restore?wait_for_completion=true` ``` diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md b/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md index fc3a1fc1ab6..c87473891cc 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md @@ -26,7 +26,7 @@ Even when the underlying storage bucket is the same, backups from one are not co ### S3 backup store -To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket: +To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -87,7 +87,7 @@ zeebe.broker.data.backup.s3.compression: zstd # or use environment variable ZEEB The GCS backup strategy utilizes the [Google Cloud Storage REST API](https://cloud.google.com/storage/docs/request-endpoints). ::: -To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use: +To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -124,7 +124,7 @@ There are multiple [data encryption options](https://cloud.google.com/storage/do ### Azure backup store -To store your backups in Azure Storage, choose the `AZURE` backup store and specify how to connect with the Azure container: +To store your backups in Azure Storage, choose the `AZURE` backup store and specify how to connect with the Azure container. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -177,7 +177,7 @@ The `backupId` cannot be reused, even if the backup corresponding to the backup
    Example request -``` +```shell curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": "100" }' @@ -220,7 +220,7 @@ GET actuator/backups/{backupId}
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups/100' ``` @@ -293,7 +293,7 @@ GET actuator/backups
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups' ``` @@ -371,7 +371,7 @@ DELETE actuator/backups/{backupId}
    Example request -``` +```shell curl --request DELETE 'http://localhost:9600/actuator/backups/100' ``` diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/configure-multi-tenancy.md b/versioned_docs/version-8.5/self-managed/operational-guides/configure-multi-tenancy.md index e2046c57337..38d4b1953ea 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/configure-multi-tenancy.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/configure-multi-tenancy.md @@ -19,7 +19,7 @@ Multi-tenancy must be enabled for each required component. Using the single glob ## Helm charts When using Helm charts, you can enable multi-tenancy globally with the flag `global.multitenancy.enabled`. -Visit [the Helm chart configuration](https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform-latest/README.md#global-parameters) for additional details. +Visit [the Helm chart configuration](https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform-8.5/README.md#global-parameters) for additional details. ## Environment variables diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/log-levels.md b/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/log-levels.md index f5423bb4a8b..365aa71fd33 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/log-levels.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/log-levels.md @@ -24,3 +24,4 @@ Enable logging for each component of Camunda 8 using the following instructions: - [Operate](/self-managed/operate-deployment/operate-configuration.md#logging) - [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#logging) - [Web Modeler](/self-managed/modeler/web-modeler/configuration/logging.md) +- [Identity](/self-managed/identity/user-guide/configuration/configure-logging.md) diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/troubleshooting.md b/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/troubleshooting.md index b080c12da2f..ba31e9f3c35 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/troubleshooting.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/troubleshooting/troubleshooting.md @@ -67,6 +67,25 @@ A gateway timeout can occur if the headers of a response are too big (for exampl If you encounter errors during Helm chart installation, such as type mismatches or other template rendering issues, you may be using an outdated version of the Helm CLI. Helm's handling of data types and template syntax can vary significantly between versions. Ensure you use the Helm CLI version `3.13` or higher. +## DNS disruption issue for Zeebe in Kubernetes clusters (1.29-1.31) + +Kubernetes clusters running versions 1.29 to 1.31 may experience DNS disruptions during complete node restarts, such as during upgrades or evictions, particularly if the cluster's DNS resolver pods are affected. + +This issue is specifically noticeable for Zeebe (Netty), as it will no longer be able to form a cluster because of improper DNS responses. This occurs because Zeebe continues to communicate with a non-existent DNS resolver, caused by improper cleanup of conntrack entries for UDP connections. + +Details on this issue can be found in [this Kubernetes issue](https://github.com/kubernetes/kubernetes/issues/125467) and has been resolved in the following patch releases: + +- Kubernetes 1.29.10 +- Kubernetes 1.30.6 +- Kubernetes 1.31.2 + +Kubernetes versions 1.32 and versions before 1.29 are not affected. + +If an immediate cluster upgrade to a fixed version is not possible, the following temporary workarounds can be applied if you encounter DNS issues: + +- Restart the `kube-proxy` pod(s) +- Delete the affected Zeebe pod + ## Anomaly detection scripts The [c8-sm-checks](https://github.com/camunda/c8-sm-checks) project introduces a set of scripts to aid detection of Camunda deployment anomalies. diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/840-to-850.md b/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/840-to-850.md index b94830b187a..0a36d850327 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/840-to-850.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/840-to-850.md @@ -31,11 +31,11 @@ Note that there is **no** actual corruption or data loss, however. The broker health check routes have moved, and the old routes are now deprecated. Specifically, the following routes will return [a status code of 301](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/301) and redirect you. See the table below about the new mappings: -| Old route | **New route** | -| --------------------------------------- | ------------------------------------------------------------- | -| http://{zeebe-broker-host}:9600/health | **http://{zeebe-broker-host}:9600/actuator/health/status** | -| http://{zeebe-broker-host}:9600/ready | **http://{zeebe-broker-host}:9600/actuator/health/readiness** | -| http://{zeebe-broker-host}:9600/startup | **http://{zeebe-broker-host}:9600/actuator/health/startup** | +| Old route | **New route** | +| ---------------------------------------- | -------------------------------------------------------------- | +| http://\{zeebe-broker-host}:9600/health | **http://\{zeebe-broker-host}:9600/actuator/health/status** | +| http://\{zeebe-broker-host}:9600/ready | **http://\{zeebe-broker-host}:9600/actuator/health/readiness** | +| http://\{zeebe-broker-host}:9600/startup | **http://\{zeebe-broker-host}:9600/actuator/health/startup** | Please migrate to the new routes in your deployments. **If you're using the official Helm charts, then you don't have to do anything here.** diff --git a/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/introduction.md b/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/introduction.md index d7b1c74381c..0511d4753dc 100644 --- a/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/introduction.md +++ b/versioned_docs/version-8.5/self-managed/operational-guides/update-guide/introduction.md @@ -12,10 +12,6 @@ When updating from one minor version to the next, you do not need to update to e Depending on your amount of data, run a minor version for at least 24 hours before updating to the next version. -:::note -Versions prior to Camunda 8 are listed below and identified as Camunda Cloud versions. -::: - There is a dedicated update guide for each version: ### [Camunda 8.4 to Camunda 8.5](../840-to-850) diff --git a/versioned_docs/version-8.5/self-managed/react-components/components.md b/versioned_docs/version-8.5/self-managed/react-components/components.md index e3f3aeb8672..7659e1d03ab 100644 --- a/versioned_docs/version-8.5/self-managed/react-components/components.md +++ b/versioned_docs/version-8.5/self-managed/react-components/components.md @@ -16,6 +16,6 @@ Camunda 8 Self-Managed users may also use [Desktop Modeler](../../components/mod :::note -To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/contact). +To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md index a7e364ab461..4ec9da0b97e 100644 --- a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md +++ b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md @@ -95,13 +95,13 @@ https://github.com/camunda/c8-multi-region/blob/stable/8.5/aws/dual-region/scrip #### config.tf -This file contains the [backend](https://developer.hashicorp.com/terraform/language/settings/backends/configuration) and [provider](https://developer.hashicorp.com/terraform/language/providers/configuration) configuration, meaning where to store the [Terraform state](https://developer.hashicorp.com/terraform/language/state) and which providers to use, their versions, and potential credentials. +This file contains the [backend](https://developer.hashicorp.com/terraform/language/backend) and [provider](https://developer.hashicorp.com/terraform/language/providers/configuration) configuration, meaning where to store the [Terraform state](https://developer.hashicorp.com/terraform/language/state) and which providers to use, their versions, and potential credentials. The important part of `config.tf` is the initialization of two AWS providers, as you need one per region and this is a limitation by AWS given everything is scoped to a region. :::note -It's recommended to use a different backend than `local`. Find more information in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +It's recommended to use a different backend than `local`. Find more information in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/backend). ::: diff --git a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md index b046938fe35..d5c4ef5d169 100644 --- a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md +++ b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md @@ -12,7 +12,7 @@ Lastly you'll verify that the connection to your Self-Managed Camunda 8 environm ## Prerequisites -- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [terraform](./terraform-setup.md) guide. +- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [Terraform](./terraform-setup.md) guide. - [Helm (3.16+)](https://helm.sh/docs/intro/install/) - [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. - (optional) Domain name/[hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) in Route53. This allows you to expose Camunda 8 and connect via [zbctl](../../../../../../apis-tools/cli-client/) or [Camunda Modeler](https://camunda.com/download/modeler/). diff --git a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/irsa.md b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/irsa.md index 7025ee73b70..0c2de39d7a8 100644 --- a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/irsa.md +++ b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/irsa.md @@ -266,7 +266,7 @@ For additional details, refer to the [Camunda 8 Helm deployment documentation](/ ### Web Modeler -Since Web Modeler RestAPI uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. +As the Web Modeler REST API uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. #### Kubernetes configuration @@ -538,8 +538,6 @@ There are different ways to configure the mapping within Amazon OpenSearch Servi To authorize the IAM role in OpenSearch for access, follow these steps: -**_Note that this example uses basic authentication (username and password), which may not be the best practice for all scenarios, especially if fine-grained access control is enabled._** The endpoint used in this example is not exposed by default, so consult your OpenSearch documentation for specifics on enabling and securing this endpoint. - Use the following `curl` command to update the OpenSearch internal database and authorize the IAM role for access. Replace placeholders with your specific values: ```bash @@ -562,6 +560,12 @@ curl -sS -u ":" \ - Replace `` with your OpenSearch endpoint URL. - Replace `` with the IAM role name created by Terraform, which is output by the `opensearch_role` module. +:::note Security of basic auth usage + +**This example uses basic authentication (username and password), which may not be the best practice for all scenarios, especially if fine-grained access control is enabled.** The endpoint used in this example is not exposed by default, so consult your OpenSearch documentation for specifics on enabling and securing this endpoint. + +::: +
    The important part is assigning the `iam_role_arn` of the previously created `opensearch_role` to an internal role within Amazon OpenSearch Service. For example, `all_access` on the Amazon OpenSearch Service side is a good candidate, or if required, extra roles can be created with more restrictive access. diff --git a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md index 28c2429c2ee..d0cf2f1da05 100644 --- a/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md +++ b/versioned_docs/version-8.5/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md @@ -75,7 +75,7 @@ provider "aws" { :::note -It's recommended to use a different backend than `local`. More information can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +It's recommended to use a different backend than `local`. More information can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/backend). ::: diff --git a/versioned_docs/version-8.5/self-managed/setup/deploy/openshift/redhat-openshift.md b/versioned_docs/version-8.5/self-managed/setup/deploy/openshift/redhat-openshift.md index a1617d55b2b..7416df5a6b4 100644 --- a/versioned_docs/version-8.5/self-managed/setup/deploy/openshift/redhat-openshift.md +++ b/versioned_docs/version-8.5/self-managed/setup/deploy/openshift/redhat-openshift.md @@ -33,7 +33,7 @@ We conduct testing and ensure compatibility against the following OpenShift vers | 4.13.x | November 17, 2024 | :::caution -Compatibility is not guaranteed for OpenShift versions no longer supported by Red Hat, as per the End of Support Date. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift). +Camunda 8 supports OpenShift versions in the Red Hat General Availability, Full Support, and Maintenance Support life cycle phases. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift). ::: ## Deploying Camunda 8 in OpenShift diff --git a/versioned_docs/version-8.5/self-managed/setup/guides/connect-to-an-oidc-provider.md b/versioned_docs/version-8.5/self-managed/setup/guides/connect-to-an-oidc-provider.md index 029e334a259..64bf68961f5 100644 --- a/versioned_docs/version-8.5/self-managed/setup/guides/connect-to-an-oidc-provider.md +++ b/versioned_docs/version-8.5/self-managed/setup/guides/connect-to-an-oidc-provider.md @@ -34,11 +34,14 @@ configuration](#component-specific-configuration) to ensure the components are c

    Steps

    1. In your OIDC provider, create an application for each of the components you want to connect. The expected redirect URI of the component you are configuring an app for can be found in [component-specific configuration](#component-specific-configuration). -2. Make a note of the following values for each application you create: +2. For all Components, ensure the appropriate application type is used: + - **Operate, Tasklist, Optimize, Identity:** Web applications requiring confidential access/a confidential client + - **Web Modeler:** A single-page application requiring public access/a public client +3. Make a note of the following values for each application you create: - Client ID - Client secret - Audience -3. Set the following environment variables for the component you are configuring an app for: +4. Set the following environment variables for the component you are configuring an app for: @@ -132,8 +135,8 @@ Ensure you register a new application for each component. ``` CAMUNDA_IDENTITY_TYPE=MICROSOFT CAMUNDA_IDENTITY_BASE_URL= - CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 - CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 CAMUNDA_IDENTITY_CLIENT_ID= CAMUNDA_IDENTITY_CLIENT_SECRET= CAMUNDA_IDENTITY_AUDIENCE= @@ -149,13 +152,13 @@ Ensure you register a new application for each component. global: identity: auth: - issuer: https://login.microsoftonline.com//v2.0 + issuer: https://login.microsoftonline.com//v2.0 # this is used for container to container communication - issuerBackendUrl: https://login.microsoftonline.com//v2.0 - tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token - jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys + issuerBackendUrl: https://login.microsoftonline.com//v2.0 + tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token + jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys type: "MICROSOFT" - publicIssuerUrl: https://login.microsoftonline.com//v2.0 + publicIssuerUrl: https://login.microsoftonline.com//v2.0 identity: clientId: existingSecret: diff --git a/versioned_docs/version-8.5/self-managed/setup/guides/ingress-setup.md b/versioned_docs/version-8.5/self-managed/setup/guides/ingress-setup.md index 1fe93e5acf1..5d517099c50 100644 --- a/versioned_docs/version-8.5/self-managed/setup/guides/ingress-setup.md +++ b/versioned_docs/version-8.5/self-managed/setup/guides/ingress-setup.md @@ -21,7 +21,7 @@ Camunda 8 Helm chart doesn't manage or deploy Ingress controllers, it only deplo ## Preparation - An Ingress controller should be deployed in advance. The examples below use the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), but any Ingress controller could be used by setting `ingress.className`. -- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#configuration). +- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.5#configuration). ## Configuration diff --git a/versioned_docs/version-8.5/self-managed/setup/guides/using-existing-opensearch.md b/versioned_docs/version-8.5/self-managed/setup/guides/using-existing-opensearch.md index c8c7f96d0d1..1ec38c5a763 100644 --- a/versioned_docs/version-8.5/self-managed/setup/guides/using-existing-opensearch.md +++ b/versioned_docs/version-8.5/self-managed/setup/guides/using-existing-opensearch.md @@ -56,7 +56,7 @@ If you do not wish to specify the username and password in plaintext within the ```yaml global: - opensearcn: + opensearch: auth: existingSecret: secretName existingSecretKey: secretKey diff --git a/versioned_docs/version-8.5/self-managed/setup/install.md b/versioned_docs/version-8.5/self-managed/setup/install.md index 84bc342bb81..01857b43152 100644 --- a/versioned_docs/version-8.5/self-managed/setup/install.md +++ b/versioned_docs/version-8.5/self-managed/setup/install.md @@ -234,7 +234,7 @@ To set up Web Modeler, you need to provide the following required configuration - Configure the database connection - Web Modeler requires a PostgreSQL database as persistent data storage (other database systems are currently not supported). - _Option 1_: Set `postgresql.enabled: true`. This will install a new PostgreSQL instance as part of the Helm release (using the [PostgreSQL Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) by Bitnami as a dependency). - - _Option 2_: Set `postgresql.enabled: false` and configure a [connection to an external database](#optional-configure-external-database). + - _Option 2_: Set `postgresql.enabled: false` and configure a connection to an external database (see the second example below). We recommend specifying these values in a YAML file that you pass to the `helm install` command. A minimum configuration file would look as follows: diff --git a/versioned_docs/version-8.5/self-managed/setup/upgrade.md b/versioned_docs/version-8.5/self-managed/setup/upgrade.md index 5989fb065e1..251fec0c926 100644 --- a/versioned_docs/version-8.5/self-managed/setup/upgrade.md +++ b/versioned_docs/version-8.5/self-managed/setup/upgrade.md @@ -9,7 +9,9 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; :::note -When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the **next major version**. +When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the next **major** version of the chart. + +For example, if the current Helm chart version is 10.x.x, and the latest next major version is 11.0.1, the recommended upgrade is to 11.0.1 (not 11.0.0). ::: Upgrading between minor versions of the Camunda Helm chart may require [configuration changes](#update-your-configuration). To upgrade between patch versions or when no configuration changes are required, see the [`helm upgrade`](#identity-disabled) instructions. @@ -66,9 +68,7 @@ Ensure to use Helm CLI with version `3.14.3` or more. The upgrade could fail to #### Deprecation notes -The following keys in the values file have been changed in Camunda Helm chart v10.0.2. For compatibility, the keys are deprecated in the Camunda release cycle 8.5 and will be removed in the Camunda 8.6 release (October 2024). - -We highly recommend updating the keys in your values file rather than waiting until the 8.6 release. +The following keys were deprecated in 8.5, and their removal has been delayed until the release of Camunda 8.7 (January 2025). We highly recommend updating the keys in your values file rather than waiting until the 8.7 release. | Component | Old Key | New Key | | ------------- | ---------------------------------- | ----------------------------------- | @@ -88,13 +88,6 @@ We highly recommend updating the keys in your values file rather than waiting un | | `global.elasticsearch.host` | `global.elasticsearch.url.host` | | | `global.elasticsearch.port` | `global.elasticsearch.url.port` | -Also, the Web Modeler PostgreSQL key will be changed in the 8.6 release (the new key `webModelerPostgresql` will not work in any chart using Camunda 8.5). - -| Component | Old Key | New Key | -| ----------- | ------------ | ---------------------- | -| Web Modeler | -| | `postgresql` | `webModelerPostgresql` | - #### Identity The Camunda Identity component was formerly a sub-chart of the Camunda Helm chart. Now, it is part of the parent Camunda Helm chart. diff --git a/versioned_docs/version-8.5/self-managed/tasklist-deployment/importer-and-archiver.md b/versioned_docs/version-8.5/self-managed/tasklist-deployment/importer-and-archiver.md index c75e1fe9655..54d26a1fe59 100644 --- a/versioned_docs/version-8.5/self-managed/tasklist-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.5/self-managed/tasklist-deployment/importer-and-archiver.md @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.tasklist.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/broker.md b/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/broker.md index e8f409df2fd..581aba36ee1 100644 --- a/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/broker.md +++ b/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/broker.md @@ -588,11 +588,11 @@ backpressure: ### zeebe.broker.backpressure.gradient -| Field | Description | Example Value | -| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | +| Field | Description | Example Value | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | #### YAML snippet @@ -607,12 +607,12 @@ backpressure: ### zeebe.broker.backpressure.gradient2 -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet diff --git a/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/gateway.md b/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/gateway.md index 5af0ee191e5..b04075cba48 100644 --- a/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/gateway.md +++ b/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/gateway.md @@ -384,7 +384,8 @@ Each interceptor should be configured with the values described below:
    classNameEntry point of the interceptor, a class which must: + + Entry point of the interceptor, a class which must:
  • implement io.grpc.ServerInterceptor
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • @@ -431,3 +432,9 @@ If you are using an embedded gateway, refer to the [broker configuration guide]( multiTenancy: enabled: true ``` + +### Experimental configuration + +See the experimental section of the [gateway.yaml.template](https://github.com/camunda/camunda/blob/stable/8.5/dist/src/main/config/gateway.yaml.template#L298). + +Be aware that all configuration properties which are part of the experimental section are subject to change and can be dropped at any time. diff --git a/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/priority-election.md b/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/priority-election.md index 4e466a0640a..d7fc45c0413 100644 --- a/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/priority-election.md +++ b/versioned_docs/version-8.5/self-managed/zeebe-deployment/configuration/priority-election.md @@ -10,8 +10,8 @@ It aims to achieve a more uniform leader distribution by assigning each node a p ## Configuration -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. +Enable priority election by setting `zeebe.broker.cluster.raft.enablePriorityElection=true` in your config or +by setting the equivalent environment variable `ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION=true`. If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). @@ -19,7 +19,7 @@ If you are using the fixed partitioning scheme (experimental), you may need [add With priority election enabled, election latency and thus failover time increases. -The result of leader election is not deterministic and priority election can only increase the chance of having a +The result of a leader election is not deterministic, and priority election can only increase the chance of having a uniform leader distribution, not guarantee it. -Factors such as high load can prevent high priority nodes from becoming the leader. +Factors such as high load can prevent high-priority nodes from becoming the leader. diff --git a/versioned_docs/version-8.5/self-managed/zeebe-deployment/operations/backpressure.md b/versioned_docs/version-8.5/self-managed/zeebe-deployment/operations/backpressure.md index 3cb675dca0b..3ffc609825e 100644 --- a/versioned_docs/version-8.5/self-managed/zeebe-deployment/operations/backpressure.md +++ b/versioned_docs/version-8.5/self-managed/zeebe-deployment/operations/backpressure.md @@ -12,6 +12,13 @@ If the broker keeps accepting new requests from the client, the backlog increase To avoid such problems, Zeebe employs a backpressure mechanism. When the broker receives more requests than it can process with an acceptable latency, it rejects some requests (see [technical error handling](/apis-tools/zeebe-api/technical-error-handling.md)). +:::note +When [multi-tenancy](./../../concepts/multi-tenancy.md) is enabled in Camunda 8, a large number of concurrent requests +may also lead to issues with Camunda Identity. In such cases, it is recommended to enable and configure the management of +Identity requests in the Zeebe Gateway. This allows Zeebe to employ a backpressure mechanism against these requests. +For more information, see the Zeebe Gateway [experimental configuration documentation](./../configuration/gateway.md#experimental-configuration). +::: + ### Terminology - **RTT** - Round-Trip Time, known as the time between when the request is accepted by the broker and when the response to the request is sent back to the gateway. diff --git a/versioned_docs/version-8.6/apis-tools/administration-api/tutorial.md b/versioned_docs/version-8.6/apis-tools/administration-api/tutorial.md index f9d02023875..af7c6ccd38c 100644 --- a/versioned_docs/version-8.6/apis-tools/administration-api/tutorial.md +++ b/versioned_docs/version-8.6/apis-tools/administration-api/tutorial.md @@ -27,12 +27,16 @@ Make sure you keep the generated client credentials in a safe place. The **Clien ## Set up authentication -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. +If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client ID and client secret. Then, we return the actual token that can be passed as an authorization header in each request. To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `CLUSTER_ID`, `ADMINISTRATION_CLIENT_ID`, `ADMINISTRATION_CLIENT_SECRET`, `ADMINISTRATION_AUDIENCE`, which is `api.cloud.camunda.io` in a Camunda 8 SaaS environment, and `ADMINISTRATION_API_URL`, which is `https://api.cloud.camunda.io`. These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CONSOLE_CLIENT_ID`, `CAMUNDA_CONSOLE_CLIENT_SECRET`, `CAMUNDA_CONSOLE_OAUTH_AUDIENCE`, and `CAMUNDA_CONSOLE_BASE_URL`. Locate your `CLUSTER_ID` in Console by navigating to **Clusters**. Scroll down and copy your **Cluster Id** under **Cluster Details**. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/versioned_docs/version-8.6/apis-tools/camunda-api-rest/camunda-api-rest-overview.md b/versioned_docs/version-8.6/apis-tools/camunda-api-rest/camunda-api-rest-overview.md index f0190956ca6..a1cdd6c63e7 100644 --- a/versioned_docs/version-8.6/apis-tools/camunda-api-rest/camunda-api-rest-overview.md +++ b/versioned_docs/version-8.6/apis-tools/camunda-api-rest/camunda-api-rest-overview.md @@ -15,7 +15,7 @@ Ensure you [authenticate](./camunda-api-rest-authentication.md) before accessing For SaaS: `https://${REGION}.zeebe.camunda.io:443/${CLUSTER_ID}/v2/`, and for Self-Managed installations: `http://localhost:8080/v2/`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Zeebe component. ::: diff --git a/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/cli-get-started.md b/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/cli-get-started.md index 5e75e07fa03..c3401baf84e 100644 --- a/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/cli-get-started.md +++ b/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/cli-get-started.md @@ -4,6 +4,12 @@ title: Getting started with the CLI client sidebar_label: "Getting started with the CLI client" --- +:::note Heads up! +This project is now community-supported. + +See the [announcement](reference/announcements.md#deprecation-zeebe-go-client--cli-client-zbctl) for more information. +::: + In this tutorial, you will learn how to use the [community-supported](https://github.com/camunda-community-hub) `zbctl` CLI client to interact with Camunda 8. :::note diff --git a/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/index.md b/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/index.md index 3f3080a46fe..5713a910d48 100644 --- a/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/index.md +++ b/versioned_docs/version-8.6/apis-tools/community-clients/cli-client/index.md @@ -5,6 +5,12 @@ sidebar_label: "Quick reference" description: "Learn how to use the community-supported CLI client and command line interface `zbctl` to interact with Camunda 8 and test a connection." --- +:::note Heads up! +This project is now community-supported. + +See the [announcement](reference/announcements.md#deprecation-zeebe-go-client--cli-client-zbctl) for more information. +::: + You can use the [community-supported](https://github.com/camunda-community-hub) `zbctl` command line interface to interact with Camunda 8. After installation, a connection can be tested immediately. diff --git a/versioned_docs/version-8.6/apis-tools/community-clients/go-client/go-get-started.md b/versioned_docs/version-8.6/apis-tools/community-clients/go-client/go-get-started.md index 78db5a571b4..edfaa549a37 100644 --- a/versioned_docs/version-8.6/apis-tools/community-clients/go-client/go-get-started.md +++ b/versioned_docs/version-8.6/apis-tools/community-clients/go-client/go-get-started.md @@ -4,6 +4,12 @@ title: Getting started with the Go client sidebar_label: "Getting started with the Go client" --- +:::note Heads up! +This project is now community-supported. + +See the [announcement](reference/announcements.md#deprecation-zeebe-go-client--cli-client-zbctl) for more information. +::: + import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; @@ -143,7 +149,7 @@ Now, we need a simple process we can deploy. Later, we will extend the process w ![model-process-step-1](assets/order-process-simple.png) -3. Set the ID (the BPMN process id) to `order-process` instead of the autogenerated value so it's easier to work with in this example. +3. Set the ID (the BPMN process ID) to `order-process` instead of the autogenerated value so it's easier to work with in this example. 4. [Optional] Download the BPMN file to the root of the project. @@ -157,7 +163,7 @@ Now, we need a simple process we can deploy. Later, we will extend the process w ![model-process-step-1](assets/order-process-simple.png) -3. Set the ID (the BPMN process id) to `order-process` instead of the autogenerated value so it's easier to work with in this example. +3. Set the ID (the BPMN process ID) to `order-process` instead of the autogenerated value so it's easier to work with in this example. 4. Place the BPMN diagram in the root of the project. @@ -169,7 +175,7 @@ Now, we need a simple process we can deploy. Later, we will extend the process w Next, we want to deploy the modeled process to the broker. -The broker stores the process under its BPMN process id and assigns a version. +The broker stores the process under its BPMN process ID and assigns a version. -As a general rule, you should assign human tasks in your business process to groups of people instead of specific individuals. This avoids bottlenecks (such as high workloads on single individuals or employees being on sick leave) and can greatly improve your process performance. +As a general rule, you should assign user tasks in your business process to groups of people instead of specific individuals. This avoids bottlenecks (such as high workloads on single individuals or employees being on sick leave) and can greatly improve your process performance. In the [XML of a user task](/components/modeler/bpmn/user-tasks/user-tasks.md#xml-representations), this is represented as follows: diff --git a/versioned_docs/version-8.6/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md b/versioned_docs/version-8.6/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md index 50dfb21ef17..b0027b95827 100644 --- a/versioned_docs/version-8.6/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md +++ b/versioned_docs/version-8.6/apis-tools/frontend-development/01-task-applications/03-task-application-architecture.md @@ -6,10 +6,10 @@ description: "Understand and decide on the architecture of your task application A typical task application architecture consists of a task application frontend, a backend-for-frontend, and one or more data sources or services that contain business data relevant for the application users to perform their work. The backend implements Camunda Zeebe and Tasklist clients to retrieve and interact with tasks via Camunda APIs. For historical process instance data, Operate is also required. -Depending on the user task implementation type (job-based vs Zeebe user task) you use in your processes, you need to run either the Tasklist or Zeebe client to run operations on task. Task, form, and variable retrieval happens via the Tasklist API. Learn more about the differences of the task implementation types in the [migration guide for Zeebe user tasks](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md). +Depending on the user task implementation type (job worker-based vs Zeebe user task) you use in your processes, you need to run either the Tasklist or Zeebe client to run operations on task. Task, form, and variable retrieval happens via the API. Learn more about the differences of the task implementation types in the [migration guide for Zeebe user tasks](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md). :::tip -Starting a completely new project? Use only Zeebe user tasks to simplify your implementation. +Starting a new project? Use Zeebe user tasks to simplify your implementation. ::: Click on any element of this diagram to jump to the documentation page for the respective component: @@ -72,14 +72,14 @@ style Tasklist stroke:#10c95d,color:#000 click Forms "../../forms/introduction-to-forms" click Rest "../../../tasklist-api-rest/tasklist-api-rest-overview" -click Job "../../../tasklist-api-rest/migrate-to-zeebe-user-tasks" -click ZeebeTasks "../../../tasklist-api-rest/migrate-to-zeebe-user-tasks" +click Job "../../../migration-manuals/migrate-to-zeebe-user-tasks" +click ZeebeTasks "../../../migration-manuals/migrate-to-zeebe-user-tasks" click ZeebeRest "../../../zeebe-api-rest/zeebe-api-rest-overview" ``` Follow these resources to learn more about the individual components: -- Familiarize yourself with the [Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) for task, variable, and form retrieval, and to run operations on job-based user tasks. -- Learn how to use the [Zeebe API](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) to run operations on Zeebe-based user tasks. +- Learn how to use the [Camunda 8 API](/apis-tools/camunda-api-rest/specifications/assign-user-task.api.mdx) for task, variable, and form retrieval, and to run operations on Zeebe user tasks. +- Familiarize yourself with the [Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) to run operations on job worker-based user tasks. - Understand how to design, embed, and customize [forms](/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md). - Understand how this architecture fits into the overall Camunda architecture with the [Java greenfield stack](/components/best-practices/architecture/deciding-about-your-stack.md). diff --git a/versioned_docs/version-8.6/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md b/versioned_docs/version-8.6/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md index aaf60920878..e79fcdd2332 100644 --- a/versioned_docs/version-8.6/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md +++ b/versioned_docs/version-8.6/apis-tools/frontend-development/03-forms/01-introduction-to-forms.md @@ -1,13 +1,13 @@ --- id: introduction-to-forms title: "Introduction to forms" -description: "Forms play a key role in guiding work processes, gathering necessary information, and aiding in decision-making for human tasks." +description: "Forms play a key role in guiding work processes, gathering necessary information, and aiding in decision-making for human task orchestration." --- import FormEditorImg from './img/form-editor.png'; import GHIcon from "@site/src/mdx/GitHubInlineIcon"; -Forms play a key role in giving work instructions, collecting information and making decisions on human tasks. Forms are lightweight user interfaces, tailored for focused data input in specific steps of a process, rendering the orchestration of human tasks more efficient than simply routing users to the applications that are orchestrated. +Forms play a key role in giving work instructions, collecting information and making decisions within human task orchestration. Forms are lightweight user interfaces, tailored for focused data input in specific steps of a process, rendering the orchestration of human tasks more efficient than simply routing users to the applications that are orchestrated. Forms are commonly used in [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md/#user-task-forms), but also as [start forms](/components/tasklist/userguide/starting-processes.md) to start a new process instance, or even as [public forms](/components/modeler/web-modeler/advanced-modeling/publish-public-processes.md), e.g. to capture user input at scale or to allow your customers to trigger a self-service process. diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/cluster-topology-request.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/cluster-topology-request.md index ac1a9a952fc..31a4108466e 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/cluster-topology-request.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/cluster-topology-request.md @@ -11,7 +11,7 @@ This example shows which broker is leader and follower for which partition. This ## Prerequisites -Run the Zeebe broker with endpoints, `localhost:8080` (default REST) and `localhost:26500` (default gRPC). +Run the Zeebe Broker with endpoints `localhost:8080` (default REST) and `localhost:26500` (default gRPC). ## TopologyViewer.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/data-pojo.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/data-pojo.md index b73b7bece7c..fc3e9e792f9 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/data-pojo.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/data-pojo.md @@ -10,7 +10,7 @@ description: "Let's analyze the prerequisites and code to handle variables as PO ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 2. Run the [deploy a process example](process-deploy.md). ## HandleVariablesAsPojo.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/decision-evaluate.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/decision-evaluate.md index b500b4da8b4..c6302c35694 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/decision-evaluate.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/decision-evaluate.md @@ -6,7 +6,7 @@ description: "Let's dive deeper into Zeebe and Java to evaluate a decision." ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 1. Run the [deploy a process example](process-deploy.md). Deploy [`demoDecision.dmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoDecision.dmn) instead of `demoProcess.bpmn`. ## EvaluateDecisionCreator.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/job-worker-open.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/job-worker-open.md index 9471c7072a3..8d4c2392038 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/job-worker-open.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/job-worker-open.md @@ -10,7 +10,7 @@ description: "Let's analyze the prerequisites and code to open a job worker." ## Prerequisites -- Run the Zeebe broker with endpoint `localhost:26500` (default). +- Run the Zeebe Broker with endpoint `localhost:26500` (default). - Run the [deploy a process example](process-deploy.md). - Run the [create a process instance example](process-instance-create.md) a few times. diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-deploy.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-deploy.md index 34ae1c4542d..373844a46be 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-deploy.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-deploy.md @@ -11,7 +11,7 @@ description: "Let's analyze the prerequisites and code to deploy a process using ## Prerequisites -Run the Zeebe broker with endpoint `localhost:26500` (default). +Run the Zeebe Broker with endpoint `localhost:26500` (default). ## ProcessDeployer.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-nonblocking.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-nonblocking.md index 531191dc7b5..aae09cf30e3 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-nonblocking.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-nonblocking.md @@ -6,7 +6,7 @@ description: "Let's analyze the prerequisites and code to create non-blocking pr ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 2. Run the [deploy a process example](process-deploy.md). ## NonBlockingProcessInstanceCreator.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-with-result.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-with-result.md index 1c9d0c22e4d..e2fbe45d708 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-with-result.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create-with-result.md @@ -6,7 +6,7 @@ description: "Let's analyze the prerequisites and code to create a process insta ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 1. Run the [deploy a process example](process-deploy.md). Deploy [`demoProcessSingleTask.bpmn`](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/zeebe-client-plain-java/src/main/resources/demoProcessSingleTask.bpmn) instead of `demoProcess.bpmn`. ## ProcessInstanceWithResultCreator.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create.md b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create.md index b12e0c0d562..1408e781deb 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create.md +++ b/versioned_docs/version-8.6/apis-tools/java-client-examples/process-instance-create.md @@ -6,7 +6,7 @@ description: "Let's dive deeper into Zeebe and Java to create a process instance ## Prerequisites -1. Run the Zeebe broker with endpoint `localhost:26500` (default). +1. Run the Zeebe Broker with endpoint `localhost:26500` (default). 1. Run the [deploy a process example](process-deploy.md). ## ProcessInstanceCreator.java diff --git a/versioned_docs/version-8.6/apis-tools/java-client/index.md b/versioned_docs/version-8.6/apis-tools/java-client/index.md index 0768a607df7..30212b1bdc8 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client/index.md +++ b/versioned_docs/version-8.6/apis-tools/java-client/index.md @@ -52,8 +52,8 @@ In Java code, instantiate the client as follows: .build(); try (ZeebeClient client = ZeebeClient.newClientBuilder() - .grpcAddress(zeebeGrpc) - .restAddress(zeebeRest) + .grpcAddress(URI.create(zeebeGrpc)) + .restAddress(URI.create(zeebeRest)) .credentialsProvider(credentialsProvider) .build()) { client.newTopologyRequest().send().join(); diff --git a/versioned_docs/version-8.6/apis-tools/java-client/job-worker.md b/versioned_docs/version-8.6/apis-tools/java-client/job-worker.md index c0af82e7dad..6f03d44423f 100644 --- a/versioned_docs/version-8.6/apis-tools/java-client/job-worker.md +++ b/versioned_docs/version-8.6/apis-tools/java-client/job-worker.md @@ -14,7 +14,12 @@ keywords: ["backpressure", "back-pressure", "back pressure"] The Java client provides a job worker that handles polling for available jobs. This allows you to focus on writing code to handle the activated jobs. :::caution REST API limitation -The 8.6.0 Java client cannot maintain the long-lived polling connections required for job polling via the REST API. For example, this applies to performing long-polling job activation when activating jobs larger than the maximum message size, or receiving additional job activation requests while the long-polling connection is still open. +The 8.6.0 Java client cannot maintain the long-lived polling connections required for job polling via the REST API. For example, this applies when: + +- Performing long-polling job activation when activating jobs larger than the maximum message size. +- Receiving additional job activation requests from the same Java client while the long-polling connection is still open. +- Receiving additional job activation requests from a Java client running on the same JVM while the long-polling connection is still open. +- Receiving additional job activation requests from a Java client running on a different JVM while the long-polling connection is still open. If you encounter this issue, consider switching to the Zeebe gRPC protocol for job activation, or use job activation via the REST API with long polling disabled. diff --git a/versioned_docs/version-8.6/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md b/versioned_docs/version-8.6/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md new file mode 100644 index 00000000000..d1f19118f98 --- /dev/null +++ b/versioned_docs/version-8.6/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md @@ -0,0 +1,328 @@ +--- +id: migrate-to-zeebe-user-tasks +title: Migrate to Zeebe user tasks +description: "Learn how to migrate job worker-based user tasks to Zeebe user tasks." +--- + +import DocCardList from '@theme/DocCardList'; +import FormViewer from "@site/src/mdx/FormViewer"; +import YesItem from "../tasklist-api-rest/assets/react-components/YesItem"; +import NoItem from "../tasklist-api-rest/assets/react-components/NoItem"; +import TableTextSmall from "../tasklist-api-rest/assets/react-components/TableTextSmall"; +import userTaskMigrationDecisionHelperForm from "../tasklist-api-rest/assets/forms/userTaskMigrationDecisionHelperForm.js"; +import "../tasklist-api-rest/assets/css/condensedTable.module.css"; +import styles from "../tasklist-api-rest/assets/css/cleanImages.module.css"; +import ZeebeTaskSelectionImg from '../tasklist-api-rest/assets/img/zeebe-user-task-selection.png'; + +Camunda 8.5 introduced a new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type: Zeebe user tasks. + +Zeebe user tasks have several benefits compared to job worked-based user tasks. It includes: + +- Running directly on the automation engine for high performance. +- Removing dependencies and round trips to Tasklist. +- A powerful API that supports the full task lifecycle. + +In this guide, you will learn: + +- Under which circumstances and when you should migrate. +- How to estimate the impact on a project. +- Steps you need to take for a successful migration without interrupting your operations. + +## Decide on your migration path + +Zeebe user tasks require migration of the user tasks in both your diagrams and the task API. + +With this in mind, you can migrate at your own pace. If you should migrate now or later, and what is required to migrate depends on your current setup and future plans. + +### Task type differences + +Learn the differences between both task types and make an informed decision, and understand the new capabilities of Zeebe user tasks. Refer to this table for important high-level differences of the two task types: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    Job worker-based user tasks
    + Existing implementation +
    +
    Zeebe user tasks
    + Recommended for existing and new projects +
    Implementation locationTasklist +
    Zeebe
    + Does not require Tasklist to run +
    Compatible versions8.0 +8.5 +
    Supports Tasklist UI
    API
    Supports Tasklist API + + Full support + +
    Partially
    + Queries, GET tasks, forms, variables + ℹ Currently, you must use the Camunda 8 and Tasklist APIs to use Zeebe user tasks +
    Supports Camunda 8 API + + Task state operations (assign/update/complete) +
    Supports job workers
    Supports task lifecycle events + + Basic only: created/completed/canceled + + + Full lifecycle events including custom actions +
    Supports task listeners + +
    Extras
    Custom actions/outcomes + + Custom actions can be defined on any operation excluding unassign (DELETE assignment, send update beforehand) +
    Supports task reports in Optimize
    Recommendations + You can continue to use this task type on existing projects when you have a custom task application running on it and do not require any of the above features. + + Recommended for existing and new projects when you run Tasklist. + Migrate existing projects and task applications/clients to this task type when you require one of the features above, or the following use cases: + +
      +
    • Implement a full task lifecycle
    • +
    • React on any change/events in tasks, such as assignments, escalations, due date updates, or any custom actions
    • +
    • Send notifications
    • +
    • Track task or team performance
    • +
    • Build an audit log on task events
    • +
    • Enrich tasks with business data
    • +
    +
    +
    + +## Switch the implementation type of user tasks + +We recommend you migrate process-by-process, allowing you to thoroughly test the processes in your test environments or via your [CI/CD](/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md). To do this, take the following steps: + +1. Open a diagram you want to migrate. +2. Click on a user task. +3. Check if the task has an embedded form. + - If a form is embedded, [transform it into a linked form](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked) before you change the task type implementation. Press `Ctrl+Z` or `⌘+Z` to undo if you accidentally removed your embedded form. +4. Open the **Implementation** section in the properties panel. +5. Click the **Type** dropdown and select **Zeebe user task**. The linked form or external form reference will be preserved. + +Task Type Selection + +Repeat these steps for all user tasks in the process. Then, deploy the process to your development cluster and test it by running the process and ensuring your custom task applications work. + +## Use the Camunda 8 API + +:::note +The Tasklist REST API is not deprecated, and you still need it for queries on both task types. +::: + +The following table provides a breakdown of which operations are supported in which API, and for which user tasks. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    OperationTasklist APICamunda 8 API
    Supported from 8.6+
    Query user task Job worker-based user tasks Zeebe user tasks
    Get user task Job worker-based user tasks Zeebe user tasks
    Retrieve task variables Job worker-based user tasks Zeebe user tasks
    Get user task form Job worker-based user tasks Zeebe user tasks
    Change task assignment Job worker-based user tasks Zeebe user tasks
    Complete task Job worker-based user tasks Zeebe user tasks
    Update task- Zeebe user tasks
    Save and retrieve draft variables Job worker-based user tasks -
    + +The following table outlines the respective endpoints. Click the endpoints to follow to the API documentation and inspect the differences in the request and response objects. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    OperationTasklist APICamunda 8 API
    Assign a task + + PATCH /tasks/:taskId/assign + + + + POST /user-tasks/:userTaskKey/assignment + +
    Unassign a task + + PATCH /tasks/:taskId/unassign + + + + DELETE /user-tasks/:userTaskKey/assignee + +
    Complete task + + PATCH /tasks/:taskId/complete + + + + POST /user-tasks/:userTaskKey/completion + +
    Update task- + + PATCH /user-tasks/:userTaskKey + +
    Save and retrieve draft variables + + POST /tasks/:taskId/variables + + -
    + +### Zeebe Java client + +Use the Zeebe Java client when you are building your task application in Java. The client assists with managing authentication and request/response objects. + +### API differences + + + +Refer to the dedicated sections and API explorers to learn details about the APIs. + + + +## Troubleshooting and common issues + +If your task application does not work properly after migration, check the following: + +- **The endpoints return specific error messages when you run them on the wrong task type**: Ensure to call the right endpoint for the right task type, c.f. above [table](#use-the-new-camunda-8-api). +- **Forms do not appear**: Ensure you have extracted embedded forms, if any, and [transformed them into linked forms](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked), before you change the task type implementation. +- **Task update operation does not work**: The update operation is only available to Zeebe user tasks. diff --git a/versioned_docs/version-8.6/apis-tools/node-js-sdk.md b/versioned_docs/version-8.6/apis-tools/node-js-sdk.md index 3186f048f21..b11e5f327d7 100644 --- a/versioned_docs/version-8.6/apis-tools/node-js-sdk.md +++ b/versioned_docs/version-8.6/apis-tools/node-js-sdk.md @@ -1,7 +1,7 @@ --- id: node-js-sdk title: Node.js -description: Get started with the official Camunda 8 JavaScript SDK for Node.js, available via npm. +description: Get started with the official Camunda 8 JavaScript SDK for Node.js. --- As of 8.5.0, the official [Camunda 8 JavaScript SDK for Node.js](https://github.com/camunda/camunda-8-js-sdk) is available via [npm](https://www.npmjs.com/package/@camunda8/sdk). @@ -234,20 +234,20 @@ This will start a service task worker that runs in an asynchronous loop, invokin The handler must return a job completion function - `fail`, `complete`, or `forward`. This is enforced by the type system and ensures you do not write code that does not have code paths that do not respond to Zeebe after taking a job. The `job.complete` function can take an object that represents variables to update. -### Create a programmatic human task worker +### Create a programmatic user task worker -Our process has a [human task](/guides/getting-started-orchestrate-human-tasks.md) after the [service task](/guides/getting-started-orchestrate-microservices.md). The service task worker will complete the service task job, and we will complete the human task using the Tasklist API client. +Our process has a [user task](/guides/getting-started-orchestrate-human-tasks.md) after the [service task](/guides/getting-started-orchestrate-microservices.md). The service task worker will complete the service task job, and we will complete the user task using the Tasklist API client. Add the following code beneath the service worker code: ```typescript -console.log(`Starting human task poller...`); +console.log(`Starting user task poller...`); setInterval(async () => { const res = await tasklist.searchTasks({ state: "CREATED", }); if (res.length > 0) { - console.log(`[Tasklist] fetched ${res.length} human tasks`); + console.log(`[Tasklist] fetched ${res.length} user tasks`); res.forEach(async (task) => { console.log( `[Tasklist] claiming task ${task.id} from process ${task.processInstanceKey}` @@ -258,19 +258,19 @@ setInterval(async () => { allowOverrideAssignment: true, }); console.log( - `[Tasklist] servicing human task ${t.id} from process ${t.processInstanceKey}` + `[Tasklist] servicing user task ${t.id} from process ${t.processInstanceKey}` ); await tasklist.completeTask(t.id, { - humanTaskStatus: "Got done", + userTaskStatus: "Got done", }); }); } else { - console.log("No human tasks found"); + console.log("No user tasks found"); } }, 3000); ``` -We now have an asynchronously polling service worker and an asynchronously polling human task worker. +We now have an asynchronously polling service worker and an asynchronously polling user task worker. The last step is to create a process instance. @@ -278,7 +278,7 @@ The last step is to create a process instance. There are two options for creating a process instance: -- For long-running processes, use `createProcessInstance`, which returns as soon as the process instance is created with the process instance id. +- For long-running processes, use `createProcessInstance`, which returns as soon as the process instance is created with the process instance ID. - For the shorter-running process we are using, use `createProcessInstanceWithResult`, which awaits the completion of the process and returns with the final variable values. 1. Locate the following line in the `main` function: @@ -295,11 +295,11 @@ console.log( const p = await zeebe.createProcessInstanceWithResult({ bpmnProcessId: `c8-sdk-demo`, variables: { - humanTaskStatus: "Needs doing", + userTaskStatus: "Needs doing", }, }); console.log(`[Zeebe] Finished Process Instance ${p.processInstanceKey}`); -console.log(`[Zeebe] humanTaskStatus is "${p.variables.humanTaskStatus}"`); +console.log(`[Zeebe] userTaskStatus is "${p.variables.userTaskStatus}"`); console.log( `[Zeebe] serviceTaskOutcome is "${p.variables.serviceTaskOutcome}"` ); @@ -315,14 +315,14 @@ You should see a output similar to the following: ``` Creating worker... -Starting human task poller... +Starting user task poller... [Zeebe] Deployed process c8-sdk-demo [Zeebe Worker] handling job of type service-task -[Tasklist] fetched 1 human tasks +[Tasklist] fetched 1 user tasks [Tasklist] claiming task 2251799814895765 from process 2251799814900881 -[Tasklist] servicing human task 2251799814895765 from process 2251799814900881 +[Tasklist] servicing user task 2251799814895765 from process 2251799814900881 [Zeebe] Finished Process Instance 2251799814900881 -[Zeebe] humanTaskStatus is "Got done" +[Zeebe] userTaskStatus is "Got done" [Zeebe] serviceTaskOutcome is "We did it!" ``` diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/overview.md b/versioned_docs/version-8.6/apis-tools/operate-api/overview.md index 33f45476b82..61085487b57 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/overview.md +++ b/versioned_docs/version-8.6/apis-tools/operate-api/overview.md @@ -19,7 +19,7 @@ Work with this API in our [Postman collection](https://www.postman.com/camundate For SaaS: `https://${REGION}.operate.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Operate component. ::: @@ -35,7 +35,7 @@ A Swagger UI is also available within a running instance of Operate, at `https:/ For SaaS: `https://${REGION}.operate.camunda.io/${CLUSTER_ID}/swagger-ui.html`, and for Self-Managed installations: `http://localhost:8080/swagger-ui.html`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). ::: ## Multi-tenancy diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-id.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-id.api.mdx index eef02cea212..e1e428e5b1e 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-id.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-id.api.mdx @@ -5,9 +5,9 @@ description: "Get decision instance by id" sidebar_label: "Get decision instance by id" hide_title: true hide_table_of_contents: true -api: eJzlV1tv2zYU/isCn7bOiZw2HQpjGOAuSqHVsI3YaQcEQUFLxzEbidRIyqkh6L/vHOpix5a9bE8b+iSK537Vp4JZ/mDY4I5dQSSMUDKUxnIZAbvvMZWB5pYuYzZgiw0+eizjmqdgQZNYwSS+IFEQSUg8Zdyu8ByDibTISBovw9hTSy+ubXiiMdJjGv7MhQY0YHUOPWaiFaScDQpmNxlpNlYL+cDK8p6YTaakAUP01/0+PZ4bmuVRBMag4khJC9ISC8+yREQuEv+rIb7i0I5afIXIUoCa4raisoKBHfrSY4+w2bkXaOoBNAovlU65ra5+viROjNTCoY4eA5mnlPnrYTgKrvAi+DQc3Q7n7nw7/jiefB6702wa/BZeh3h/j/pgzZPcxXLVqfgZyzUXSa67uTBOStUVLIUUxPzxxTHVok2vvFyw6YCwO6sNeevT3zCOXfedYPiEfSqqip90783rXam5Yz1esissyCycjL/Mh+9HARJG4Ty4GY6+BH9Mb4IZkfYqt60o1RD7OE/sqdpBHMost2aHh2vNNzRlFlLzrxtXHssY2e2iIMkKm9DV/o5wLjqO1utJbv8bbmOS8wSOtI8jyRi+vagvTmSgCrdiAcml7TR4QgESiXzZtctCicGJ2KMVCcYe32mYwUUC6U//dLfRasrNC2cjxYHnD92pbvd5F9F2DtNuUgKtlW4z8eYwE9dKL0Qcg3yeg1f+q/9/uJeH4d5UBQcqvVG5jsCTynpLlcv4++iCt13zMJyG3k7AHjiB7yAfhCIgyrWwG4e5FsA16DOHQe7we1JgCtSjAPd2v4+9PoA9RF7eYuM5zIZQbqUI3j2ASwWhtwHz1xd+I3PWyBi/EHHJyBm9bvBfrhPkL6rklgPfL1bK2HJQZEpbYl5zLTgWwmWUaFVhl9x9AVmiIp64632/5yvwiECrn9CjxXdqgcr6ucMhaOO5unf9d/1OTcR6RMu2MbZ6VtZmnXoq5k5NDqI2hZoRXxV0U5ztJzETBJmarxqbTBE8zIOzWQsdHJKu5dDD3YK3WmoXnUP0XjGxhvu66dXfP89dXwm5VE687q+JQ/fgTfMFzgyFchiw8rjD0/iwYg0el7EXqTRLgLZTDQPbpqKT16jFWfFShShOUWc7SatVTp2wUspWgIpGF1WTW1UjUVAGu+jp6ek84ikuPH6OBikJ6CQg9ifeOm+j+qa3JxyryLTSQrl3X8MSNKCXfq3I+A4/NACRXZz3z/tVVxmbcrlj6PQIPctZWx4L36yfJRxLWdYOFvV43bH1Bduize2A4eUAVWIbVYNyx4piwQ3c6qQs6Rq/C9oN/Xau3BTGwtAZ53jJEwMHPrUrkv1wU/9w/egd/ynrDKFpXrlhLexirP4VItxWErRdAY+xC8mpijDEBsrsjsjB3xiNTbuGPgRz5OU55anN5V5rO+2d7vzy3jF4c/UI8tfWOUuv5F9Z/gXHqxkU +api: eJzlV21v2zYQ/ivCfVo7JXLadCiEYoBbK4VWwwlipx0QBAVNnWM2EqmSlFND0H8fjnqxY8tetk8b+skS74V3z734UQmW3RsIb2GEXBihZCyNZZIj3PmgctTM0mECIczXcQI+5EyzDC1qMitBsgwhBEEiISGEnNkl+JCg4VrkZA0hxImnFl7S3OGJ9hIfNH4vhMYEQqsL9MHwJWYMwhLsOifPxmoh76Gq7kjZ5EoaNCR/NRjQz9OLpgXnaAz4wJW0KC2psDxPBXeZBN8M6ZX796j5N+SWEtSUtxX1LSLpicWHB1xvnQtp8R41+LBQOmO2PvrtnDSNZRb3ffiAssgI+YthPI5G4EP0eTi+Gc7c883k0+Tyy8Q9Ta+iD/FFHI3grvIBVywtXC6jXsdPVC6YSAvdr5VrRVCNcCGkIOVPz86pMW175fmGbQfE/ai24k1Mf6M4cd13ROEzaiPqih8N7/WrbauZUz1cslH0IZ7Gl5Ovs+H7cQQ+jONZdD0cf43+vLqOpiTaqdymolRDjaZI7bHaYRLLvLBmS4dpzdY0ZRYz868bVx5CjO7tk1Q+WGFTOtrdES5Ep9FFfVnY/0bYPugixQPt40QywR/P6osjCNTp1ioombS9Fx5xAFVF4vO+XRbLFUtF4tGKRGMP77Rcq3mK2a//dLfRairMM2cjQ2PYfT/U3T7vE9reYdoGJdJa6Q6J1/tIXCg9F0mC8ikGL4OX//90z/fTva4LjlR6owrN0ZPKegtVyOTn6II3ffMwvIq9rYQ9dAY/AR7EIpAXWti141xzZBr1ieMgt3eVXwJX6kGge7vb5V4f0e4zL2++9hxny9AuFdG7e3RQEHsLIVidBa3NSWtjglIkFVAwetXyv0KnEEJZg1uFQVAulbFVWOZKW1JeMS3YPK2xJVld2AVz/4CQKs5Sd7wb92yJHglo9RN7tEv0qAXq208dD1F6x93bwdtBrydSPeBl0xgbP0tr814/tXKvJ0dR20JNSa9Oui3O5i8xF0SZ2n81uLyKroez6GTaUQfHpBu7yn9S8M5LE6ILiN5rJWi1L9pe/ePLzPWVkAvlzJv+unTsHr2rYp4KTqnsJ6w85vi0x7gVK/SYTDyusjxF2k4NDeyaip681u1CaS9TUlhFne0srVYFdcJSKVsTKhpdxl0N60aipEwYBI+Pj6ecZYVM2ClXGYGQCo7SOBwb3MbNib9jnChuOmuh3HugcYEaJcegcWQCxx9agghnp4PTQd1VxmZMbl10fISeYNaVx+IPG+QpE5J8ugDLZrxuYXUGG7a5GTDwIRQJfXrVg3ILZTlnBm90WlV0/L1A7YZ+M1duChNh6DmBcMFSg3sxdSsSfrluPrheeIc/ynpTaJtXrqGjXQDNpxDxtoqo7RJZgtoFVQuGnGNut0z2vsZobLo19DGagQ+sIJw6LHda23nvDefde6fgzdQDyt+74Cy9UnxV9RfHqxkU sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-1.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-1.api.mdx index 79260b9703a..f36cdf8c8a6 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-1.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-1.api.mdx @@ -5,9 +5,9 @@ description: "Get process instance by key" sidebar_label: "Get process instance by key" hide_title: true hide_table_of_contents: true -api: eJzlVllv4zYQ/isEn/ZwLGc3LRZGsYA3cRZu0sSI3e1DEBSUNLa5kUiVpJwagv57Z6jDjq0E3sdi/WDxmHu+4UzBnVhaPrznU6MjsHairBMqAv7Q4zoDI5zUahLzIQ83V7D5+5T3eCaMSMGBIcaCK9zg/SNs8E4qXGbCrXAdg42MzEgCHiI30wuWVXqYbBT1uIF/cmkAlTiTQ4/baAWp4MOCu01GoqVysASDpAttUuGqo1/PeFk+ELvNtLJgiePDYECf56pneUQ6kT/SKEo5IhFZlsjI+xd8t0RXHGrW4XeIHPlsKBpOVlrI12PM83yk+hsGS1Y6XmX6+IGYwixVTT7iHR7rjFRLL1YYdOPqeDM8/WWin250DE2Sj+dHeuMuhINOa0DFL94hZ9cNMeUpAW90Pp98G+PB+e0f0+vxfHxB69HN+fgalw/bGF7AQipJ+Trebky2UO7VIO4B38vmkTRRngjzZu/2rRcqXUKS9kumxF+Pn3UhcKLWIpExI6iDdS8jEX0NE0jf/ygiKcy5PRJfKVotlt35auuy67I6OLjYicnYGG3aSHw8jMSlNqGMY1DPY/AuePf/d/fs0N27KuFAqbc6NxEwpR1b6FzFPwcKfumqh9F0wnYcZuAZfoJ40IsIUW6k2/jmGQI+QubEN5R7fOwKDIF+lOB3D/s99Cu4gwbKwg2rei/25JWmVr0EHwrqwkMerE+Dmuek4bFBgSwlJ2PMumnkuUmQvqiCWw6DoFhp68phkWnjiHgtjBSYCB9RuqsSuxB5QpFMdCQSf7xv93wFjC5oVKAhwOGeIFBp7/vHGHU8F/dp8GnQKYlIX5CyBcZWzsq5rFNORdwpyQ8WTaJmRFc53SSnTbbI5JUPfj0F3U7Hd6P5+GQ2ns0mtzfNRFTzUW/fSXgrpTbRG0T7iog31JcNVn//a+5xJdVCe/YaX7d+UgM2zUOsGXLl0GHNhJ+C8OPkGphQMYt0miVAr9M+qmjFGrFYKyzV2Hs1IdtzOqNzQsJKa1f1cypdFE1mVUAipyyi6OnpqR+JFB880UeFFAQ0EnBiI9o6btf1SW+POdaRbbml9vvAwAKwdUcQ1IJsQFLXzZDFT/uD/qBClXWpUDuKXi+hZzFr0+PgXxdkicBUlrWBRV1e93x9ytsRZVtgeDYkkQijqlDueVGEwsKfJilLOsa+YHzRb+vKV2EsLa2xjhcisXBgU/tE8jd39eD8lr08XHe60IBXbXxVJzntcOlR6f9LGrxWIGKEIVlV3YwQQZnb4TkYoqlu2nfo63iOtCKnQLXB3MO2l95pz29fPAGb60dQn1vrHG3JvrL8DzUuW/w= +api: eJzlVt9v2zYQ/leIe2o7JXLabiiEYUCWOIWXNAkSr3sIjIGizjYbiVTJkzND0P8+HCU5jq0E2ePQF1sS77uf3/GuBpILD8kdXDur0PuJ8SSNQphFYEt0krQ1kwwSSNfnuP77CCIopZMFEjoG1mBkgZDAPa4hAm0ggVLSEiLI0CunS9YACZzjWti5KFs7QveGInD4vdIOM0jIVRiBV0ssJCQ10Lpk1doQLtBBBHPrCkntp18+QtPMGO5Lazx6RrwfjfjvqenbSrFNiEBZQ2iIRWRZ5lqF+OJvnuXqfcs2/YaKOGbH2SDdWuFYX+NewLHpr+i8bm28CPrwfh80lYstnCenzYKl0rIwfdWyQYlSOjR0/npng/xZbh8ubYY9FV6P9yQdnUrCQW/QZM+eeRo8YVBVMD2PT6aTr2OI4OTqy/XFeDo+5efjy5PxxfgUZg0zT+msq22nJbU2R2m2UnqKc2001/z1UREaaejFFO80T9ANSjtV5dK92Tl9G5RqylnTbts1TdNE8HGIxROzkrnOBLcLenqezaWzaY7FT/+V1VyEyr+SowV6LxfD1dz09tBh+2HvYCsnY+es22Tiw34mzqxLdZaheZqDd/G7/3+4H/fDvWkLjlx6byunUBhLYm4rk/0YLPh5qB+OrydiK2CBAfAD5IPvS1SV07QOAzhF6dAdhKF0N2uiGpS19xrD22x3Dn9G2hvCIl2Ldn4XSEvL436BIRU8yROIV0dxhznoMT6u73HdADvjVv0yULkcEqjb5DZJHNdL66lJ6tI6YuGVdFqmeZtbPmsLO5dVzpnMrZJ5+Lzr93SJgg943eBFgpYomAKt9cNwGVu3o+7T6NNoUBOLPqPlkRiPepZE5aCeVnhQU1hO+kLdslwbdF+cTbFlqc9D8rtN6up6fHM8HR/cjm9vJ1eX/VbV4XjybxV8o6VzMTgUhl8Qgl76rOfqH39NA6+0mdsA7/h1FbY9FNdVmmvFoewHbIUMm5SQivQKhTSZULYoc+TbaZdV/CR6tXPrRGGNJsvMDkhytmImLK2ldtpz60oVatgSiYPySRw/PDwcKllUJpOHyhachFwrND7kscvbRfcl2gFnVvkNWtvwHjuco0OjMO4U+Zi1rvpFDY4OR4ejllWeCmm2DL3cQk9ytikP4T8Ul7nUYR0JDtZde93B6gg2K8pjg0EECaucRV2j3EFdp9Ljny5vGv78vUIXmv6xr0IXZtrzcwbJXOYe93zaXJHw5qZbvt+K5xf0wRB68pp16Oq84jeI2t04/Da8li1RZuiCV+3JsVJY0hZmbxHnvtncQ5/HU4hAVpyox8XuKbeD9kF/fv09CIipvUfz28Y74lf2r2n+BXkpdA4= sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -40,7 +40,7 @@ Get process instance by key Success -
    Schema
    +
    Schema
    Invalid request diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-2.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-2.api.mdx index e2d2de91467..85681d95add 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-2.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-2.api.mdx @@ -5,9 +5,9 @@ description: "Get process definition by key" sidebar_label: "Get process definition by key" hide_title: true hide_table_of_contents: true -api: eJzlVltv0zAU/iuWn7h0TTcGmiqENESHBohNaxEP04Sc5LT1ltjBdjqqKP+dc+wkXdsw4BHx0sY+9+/cXHEnFpaPr/ml0QlY+w7mUkknteI3A64LMIIO5ykf83j9EdbfjviAF8KIHBwYEq24wgPS72CNNKnwsxBuid8p2MTIwqsbc5Rmes6KYImlG1MDbuB7KQ2gGWdKGHCbLCEXfFxxty5IuVQOFmCQda5NLly4enXM6/qGxG2hlQVLEkejEf1tG5+WCVlF+USjKuWIRRRFJhMfYXRria/at6zjW0gcRW0IDyeDFYr2T9wbNPB0rNYZqRZEWCGAMlh9VM2LI+KOi1w1WaJ09OjDsIRyvUSiSpfR1X6m65rox32wnauVyGTKKD9g3a/hQ3DiDPLnfwujdcKV9g8hyNFvsegHUypUpZJ+Yrh4DJWJMdp0SLzYR+JMm1imKahtDJ5Fz/79cI/3w70KCQdKvdWlSYAp7dhclyr9P6rgZV8/nF6eswcBM/AC/wEeeGkhKY10az/zYxAGzIGfgtc39aBCCPSdBH+62R3978H1zH0Wr1lYGrhMlpp2zAI8GLQ+xjxaHUaN1MFGykYVCtWcHDKrdgeVJkOJKgBcj6OoWmrr6nFVaOOIeSWMFJgMjyrRQnLnoswIzUwnIvPXu77PlsCIQGOc9pfDM5VBsD4kIMnGtrqT0cmoVxOx/kLLpjg2epbOFb16AnOvJr8R22RNiS8E3SaoS7go5EcPf7PALy4nV6ezycF0Mp2eX3xul3kjRyvoQdI7LY2L3iE6Bybecp+19frh68zXllRz7cWbGrvwjwxgl2WMfUOh7AesmfDrG/+cXAETKmWJzosMaEK1ldUWO32xVi32C8s1lo6m6vaSzuiSKmGptaOKD+2LqsmtUEgUlMUqur+/HyYix6EnhmiQQEAnAZ8axNvg9qm5GewIpzqxnbTU/hwZmIMB9DJqFNlo6y3AD4ej4ShUlXW5UA8M/a6NtlDrEuTgh4uKTGAy68bFqmmxa746DMNnt8nwdkxKsZRCs1zzqoqFhS8mq2u6xv1gfPNvest3YiotfWM3z0VmYc+rblTyJ1fNq+8pe+xt2BtGW8Jq7Xs7K+mEn742/W+NU4kvQaRYjORXoJxiHRXugczeG5C6p5tH7ycz5BUlgdUBulPhXnuvP6/fegY203eg3nTeOTqSf3X9Ez6D8Xg= +api: eJzlVt9v4zYM/lcEPm03N0573XAwhgEdljt0HdaiybCHIhhkmY51tSWfJKcLDP/vAyXb+eXrbo/DvSSWRH4kP5KiWnB8YyF5ggejBVr7C+ZSSSe1gnUEukbDaXGbQQLp7g53f11BBDU3vEKHhlRbULxCSOAZdxCBVJBAzV0BEWRohZG1h0vgDndM56wOlli2NxWBwU+NNJhB4kyDEVhRYMUhacHtagKXyuEGDUSQa1NxF7Z+uIauW5O6rbWyaEnjaj6nv2Pjy0aQVYhAaOVQORLhdV1K4SOMP1qSa88t6/QjCkdRG+LDyWCFov0S96KenlHUOiPVhg62aKwMVl+FeXt1IL3im0mwtK5Un0TK1oSEQ8WVmzykU+lK2jovhK6j8+spVm/VlpcyY5Q+tO7z7NZGpyVW3/1Xlq3jrrFfyFCF1vLNNNdSWceVmD4MG6+xsjBGm5GJt+dMvNcmlVmG6piDN/Gb/3+41+fhPoaEI6Xe6sYIZEo7lutGZV9HFXw/1Q83D7fsIGCGXuEr4KOLwKJojHQ7PxJS5AbNhb8kn9Zd1ILQ+lmiX61PJ8MHdBNjgaU7FmZKha7QNII26Mmg6ZJAvL2Me62LvZaN22fcdUAOme0wohpTQgJtILhL4rgttHVd0tbaOBLeciN5WgZ+6SwkN+dNSWyWWvDSb5/6viqQ0QHd8jTeXIGMyiBYnxGRZOMY7t383XwSiUQ/g7Ivjj1O4Vw9iROEJ5H8wByStSS5EPSQoDHhvJZ3nv5+vt8/LB5vVouL5WK5vL3/fZj1vR6NoIOkjyi9i94hWgchGKTfD/X6658rX1tS5dqr9zV2798gyB6atJSCQjkPWDPupzvjwsktMq4yJnRVl0g31FBZQ7HTFxtgc21YpZV0mqrbazqjG6qEQmtHFR/alwufw1BIFJRN4vjl5WUmeNWojM+EroiEUgpU1vPY8/ZbvxOdKGda2FFbar+ODeZoUAmMeyAbHz0V4HI2n81DVVlXcXVg6N/a6Ii1MUEO/3ZxXXKpCNW72PYt9gTby3D5nDYZRJAQ6Drqm+UJ2jblFv8wZdfR9qcGjW/+fW/5Tsykpe8MkpyXFs+8Gq9K+OaxfxR+y157Ok6GMZSw2vneLhtaQRRebf63W3cRFMgzNN6vcHIjBNbuQOfsiUjdM95HHxYriIA3RNZI6EmFe/RJf3782QuwlX5G9dPonaMl+dd1/wB6G/xX sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -40,7 +40,7 @@ Get process definition by key Success -
    Schema
    +
    Schema
    Invalid request diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-3.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-3.api.mdx index 15e65cd3da4..16b32c5492b 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-3.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-3.api.mdx @@ -5,9 +5,9 @@ description: "Get incident by key" sidebar_label: "Get incident by key" hide_title: true hide_table_of_contents: true -api: eJzlVm1v2zYQ/isCP61dGjtNNhTGMMCNFU+NIxmynRYLAoOWzzETWdRIypkh6L/3jnqJE6tr83HoF5s63utzd7zLmeF3mvVumJdEYgmJYbdHTKaguBEy8Zasxxa7S9jNT9kRS7niGzCgSCJnCX7g/QPs8E4keEy5WeN5CTpSIiUNSERpR64cURs4Ygr+yYQCVG5UBkdMR2vYcNbLmdmlpFIkBu5AIetKqg03Jen3M1YUtySuU5lo0CTxvtulv+cmJ1kUgdYoH0lUhUaRhadpLCIbV+deE19+aFku7iEiH1NFKBhRWqEYf8Q9K0emB7ASiSBbl68V9RJteBLBjwuWLA2rNkokd8gJSbah3M78ydg99y48d4DUmX/pB599PHnB/Ko/Hnv+cO6GYRAi6VPwce4H89Cdhp47QcJ54A+8qRf4DYv7ZRr2z6fz6/5o5jbU8/5o5A7m7si9cv1pQ575f/X9gb0hyty9xlukX7mTSX/ozife36jjy7nrDqxzlZoBujvZN/pEILP9Zw5dBOEVOj2dXwQzf8BuEZEN4sjvWkDBu0iBLYKp2LQzIPzmP/HE6L1rl6LwhmF/ah0P3UkwurbHsYuQ+UPryL1cvCKPkPDEUNMdeEW3wsREajq1KIh81tYAXrLlsVg61GmgzbcbAWtuEcPm19c2BGGU6e/GdfqefScboqr21sv2wt4Hw1VKqgaJ00MkLqRaiCXi9RyDt523//9wzw7DDcuEA6Vey0xF4CTSOCuZJcufowp+a+uH/thz9gJ2wAr8BHjQcwZRpoTZ2Zm9AK5AvbPz7AYfqBwhkA8C7Nfty9E9BNPMbWexc8pRjyvAWtJmcAcWAhr6PdbZnnRqXt3JkbVgZFxt630hUzHy5SWYRa/TyddSm6KXp1IZYt5yJTgCbxGkuzKRK57FhFwsIx5b8ks/p2tw6II2Eto1DH5Tykvrx3a4oo3n6j50P3RbNRHrN7Q8FcKTnrUxaauekrlVk91j6sRMiK8Muk5Gk1yeiksLerVsBWOXJs67Cc5PnIH14lXJoYf7CW60VC5ah+i7ZGI190Vdm58+T20diWQlrXhVT4FdCMEZZwvsEQrlMGDpcLt04Z8RW3B4snQiuUljoNeo2m2curDp5NRqsTecjcR1SVIlW0mjZEaVsJbSlMOXWhVVk1tlIVFQGqvo8fHxOOIbfOD4MRokENBJwAWReCvcRhXl6IXwUka6kRbSfncUrEABetmpFOkOaaVCLoM9Oe4ed8uq0mbDkz1D7S3zDKsmLQb+NZ005pjConIsr9rphm1PbHKrhsJzj1Rh2ZSNccPyfME1zFRcFETGd1/Zpn7qI9t1S6HpjP264rGGA1+aJ5D9ElZ7+RvncGdvdbku0mRnuzfO6AuPtvrsb0FL0Br4EsuNvClv+lgpqdmTOdjNqT+ad2bo0srIMwKmAe9FDVvtrf788dEyOFP5AMmfjXeGPsm/ovgKBVte4w== +api: eJzlVm1v2zYQ/ivCfdo6NUr6MhTCMMC1mUyNIxu2nAYLAoGWzjEbiVRJKp1h6L8PR8nKi921/Tj0iy0d7/W5O/HZguW3BsJriGQmcpQWbnxQFWpuhZJRDiEsN+e4SV+DDxXXvESLmiy2IHmJEMIdbsAHISGEits1+JCjybSoyAOEcI4bT608sQvgg8bPtdCYQ2h1jT6YbI0lh3ALdlORSyEt3qIGH1ZKl9y2ot/fQNPckLmplDRoyOLV8TH9PQ05r7MMjQEfMiUtBQ23wKuqEJmrK/hkSG+7H1ktP2FGOVaaULCijUI1fk96zo5Cj3AlpKBY5z9qGkljuczw+w1blV7VWC3kLfiAsi6pt4t4PmXD6DRiI/BhEZ/Hk48x+BBN0ovBdBrFZymbzSYz8OHD5H0aT9IZS2YRm4MP7IoNF0k0idNxNE9YzGZPz4eTeBS5850LdpXMBsMkvRyMF6yXDgfjMRulbMwuWJz04kX81yAeuROSpOySxQn4cMHm88EZS+fR3yxlV0PGRi75zs2IDaP546APAgo7eJLQ6WR2kcaTJD2dLOIR3DQ+lGgMvz0AWuNDptENSSLKwwrGcvufeA+GSXTJqIrobDZIXOIzNp+ML93jlMWjKD5ziXxSyx/oM0ouLS3lXlZ0KmxBon6Tm4bEbw4tSCTveSFyjzYRjf36olRaLQssf/vRhSGMavPNul6/gm90Q3TbcPDw8OA/BoNprXSPxOt9JE6VXoo8R/kUgxfBi/9/uW/2y521DUdqvVG1ztCTynorVcv855iCt4f2YTCNvEcFe+gMfgI86HOGWa2F3bg7fYlco37p7rvrm8bfQqbUnUD3dvP8aj9D29/r3nLjtVSgRLtWxBxu0UFApCCE4P4k2OmaYHuHmwYouL7f8YlaFxDCtgWzCYNgu1bGNuG2UtqS8j3Xgi+LFks6axu54nVByBUq44UTP88zWaNHB8RYiIvYNXrU8jb6kbt8lX7m7t3xu+ODnkj1K14eBuHBz9ra6qCfVvmgJ8dzdo2Zk15b9K4ZfXN5Jc4d6B0Zm0wZ3Tgv52xO1+GOmHV2jf+kwb2XLkWXEL23SrDTPt3N5oePiZsjIVfKmXfzNHGEEb1pvSxERqXsF6w87kiZxzMr7tHjMvcyVVYF0teo4z7ebrDpydu5XSntlUoKq2iSnaXVqqZJWCtl28uXVpVnroftIFFRJgyCL1++HGW8rGXOjzJVEgiFyFAah2OH27iT+M+Mc5WZ3loo9x5oXKFGmWHQOTIBeaVBbos9OTo+Om6nytiSy0eBDq/ME6z6tlj8xwZVwYUkXy6xbbdO13B/4prbLRT4EJKrG79bjGvYbpfc4EIXTUPizzVqt9QPe+S2LheGnnMIV7wwuJdL/wmEX2Ydb//V2+f0B1PeDancuO0tanoDv6XT7rchErRGnqN22bQngyzDyj6y2ePutB/9d+aMEWXkNQHTg/dshp33g/n88d4peIm6Q/lnn52lV8qvaf4FfTtoRQ== sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -37,7 +37,7 @@ Get incident by key Success -
    Schema
    +
    Schema
    Invalid request diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-4.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-4.api.mdx index 2d0d3a29525..6563ac201ed 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-4.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-4.api.mdx @@ -5,9 +5,9 @@ description: "Get flow node instance by key" sidebar_label: "Get flow node instance by key" hide_title: true hide_table_of_contents: true -api: eJzlV1tv4kYU/iuWn9ptEshuWq1QVcmAyboQQ22TdBVFaLCHMBsz447HpMjyf+854wsQnN3sY7UvyZz7Zb4zPuSmIo+p2bs3R7F45iKiDk8V4SE1H85MkVBJFBPcicyeudyN6W5xZZ6ZCZFkQxWVaJmbHAiQP9EdyBiHY0LUGs4RTUPJEvQATLA2xMpYVYEMVkc6MyX9J2OSQhQlM3pmpuGabojZy021S9A344o+UgmqKyE3RJWs367MonhA8zQRPKUpWrzvdvHfcWw/C0OapmAfCnDFFaqQJIlZqAvsfElRLz+NLJZfaKiwaIntUKyMgsW+JT1th6Hrvo6/13BIV4wzTPLtphBKqiFR9EA/VZLxR5RSHr0qw9txEQbRV8WuvvIWBcZDFkF/355rqfLSFWaZbRCYc9ef2QNn5NhD4M686cD2fTj58/5iT9m3thssjnl+YHnBQkuActzA9m7soWMF9mJgBYNP7aLgkze9a0T96dwdWt7nhmG7w+bs296tMwATyx8D6dkD27ltyDmI6/ON5c6tSU1V/+y/B5O5jxbXEPjO+qxTOeXNLM+aTOzJAasst2/59vCA69t/zW0XEhpNpncYdT4JnIXjQiOQ258OUWsAzhbWIHBunQDpPsRzoWMLbz5pkvcHnjMLGgqrrstyx+70zjUfSpy14Wh/eTqMjUGnN7OJHeg7xGY7roXEwwFkDvwshYgp4RoelBOuWuGIUqZiZI1q2NZvSlGg+KrtLXD4lsQsMvDRoal6/U2AAVzGdPPL974N2JUs/Sb8P7zH+jYw4uTxtWGqqmkTts/NYVNsKYVsOvHhtBMjIZcsgt4f9+Bd593/v9yr03K98sIpXn0qMhlSgwtlrETGox8DBb+2zYM1c4yDgg2qDX6AfuADRsNMMrXTe8ySEknluf6038PLlEMLxBOjmnp4uc5cU6V3GeNomTGWO6NchGBBWgvcmx6pbgauRD2zs73s1BvQeW2UdnKwKUzMR27rtSqTMRjkZX+LXqeTr0Wqil6eCKlQeUskI3AXuqkoK+92RbIYmxmLkMSa/TL1YE0NFODihiuZAhpRUEa/0MsHxDh297H7sdvqCVVf8bLHxt7PWqmk1U+p3OpJb3n1XfmoVxZd309z3yRhY939aiedzmwPPjTnPnzfnKlb76eVHWR4eOeNlypFnZD+HGkls9Ye1XD98y7Q0GJ8JbR5BbGp3pupMcuWMDZYymnBwiB6JYV/im2pQXhkhGKTxBQfqGr3a2CFJ6N2C+NibATshALBrS2VFBkiYS2EKr/AOL3gGtMqgYRFpYCi5+fni5Bs4M0jFxAQmwBJUlifUbfq26TinL0wjkSYNtZMaLoj6YpKCll2KkdpB70ikMtiLy+6F90SVanaEH4Q6FtTdNS15oIU/Vd1kpgwvSLoFPNqwu7N7aVZLqrHMwbMHvoEJJWzcm/m+ZKkdC7jokA2fB2kHv39aOlBjFiKZ5jlFYlTepJU81CaP3nVD5mfja/82mktogYw3+nJjjOk4KiRqf8WuCytKYkAiphWKbEARYk6sDn5VYOz0zxG1zYuriTDVu3XrWN8a++t+fze1wpGIJ4o/6PJTiGJ+RXFf+pfx+g= +api: eJzlV1tv2zYU/isCn7ZOjdI2HQphGKDYTKvFlT1JTlYEhkFLxzEbmVRJKplh6L8Ph7rEjp1eHoe+2CLPhefyHerTlhh2q4l/Qy4K+SBkDqHQhokMyMwlsgTFDJcizIlPFptL2MzPiEtKptgaDCi03BLB1kB8cgcb4hIuiE9KZlbEJTnoTPESPRCfXMLGkUtn2R7k8O4klyj4UnEFOfGNqsAlOlvBmhF/S8ymRN9cGLgFRVyylGrNTLP1+xmp6xma61IKDRotXp+e4t/+2UmVZaA1cUkmhQFhUIWVZcEzm6D3WaPe9vBkufgMmcGkFZbD8OYUTPZ7wrN2eHRX18sfNRzCkguOQX6/qTZMmSEzsKOvjeLiFqUg8mdl2J0IYZB/VRzZlh9R4CLjOQjz/bE2Kk9dYZTVGoE5jZIJHYQXIR0Sl0zi8YAmCXFJMj2fP67oFY3S+f5ekgZxOrcS4pIwSmn8kQ7DIKXzQZAOPhwXpR/i8XUvOh9Po2EQf+o3aDTsnxMaX4UDOk+D5JK4JKYDGl71y2lC4+75YxBNg1G3av/oP4PRNEGL90FKr4NPNpTDvUkQB6MRHe1sNemeBwkd7uwm9O8pjQZ0fjEaX+Op01EazsMoSQPcPR8PUWsQjEbzYJCGV2GK6/NpEkY0SebxdNQHnwzicJL2K8y6Syu6jMbXEZk1ODuGo8fm2WMoHjr+OBnR1PYQix1GAS5mO5DZ8bOQsgAmLDxAMGGOwhGl3BS4ddHBtrtT6hrFZ8fuglDcs4LnDl46oM3zd0Kp5KKA9W8/ejdgVSr9Tfi/eY35rUFrdvvcMLXZHBMen5vdolClpOor8eawEhdSLXieg9ivwQvvxf8/3bPDdOOm4YCt17JSGThCGmcpK5H/HCh4e2wegkno7CTsgDX4CeqBFxhkleJmY3nMApgC9dK+2m9mtbslmZR3HOxq9pTOvAdjuYyzR2acxcZpiNAazEoib7oFWwykRD7x7l95HQN62Rlpb3sHm5pgPOq+o1WVKohPtk19a9/ztiupTe1vS6kMKt8zxdmiaMqLsqa3S1YVWMxCZqyw209DT1fgoACJG1IyswIHUdCcfmLJh1RP3L07fXd61BOqPuPlERuPflbGlEf9NMpHPVmW1/UqQb0m6a4/fb9ZyS9t9VtOOp7QOEjpy4QmSTiOOn7a2tXuXs97L22INiD7OrJKpNO+6OD613VqocXFUlrzFmJjy5vBmVSLgmeYymHC0mGWkjosM/weHCZyJ5PrsgC8oFru18MKn5zO7VIqZy0FNxLBbS2NkhUiYSWlad7AOL0ssz1sgIRJad/zHh4eTjK2rkTOTjK5xiIUPAOhbR3buo3aHfeJcS4z3VtzadeegiUoEBl4rSPtoVcEcpPsq5PTk9MGVdqsmdg56FtTtFe1vkEG/jVeWTBuKYINcdtO2A25f0Uaoro/Y8QlPvqcue2s3JDtdsE0TFVR17j9pQJlR/9xtOwg5lzjc078JSs0HATVX5Tkl7j9kPnV+crXztEkOgCLjZ3sosIVcZsPDftbI1laActB2bAaSZBlUJodm4OvGpyd/jJ6T5G4sgpL9Ui39vFtvR+N549zq+Ck8g7En310BpcYX13/B+pfx+g= sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-5.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-5.api.mdx index 3782eb8c1bf..c269bbffaeb 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-5.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-5.api.mdx @@ -5,9 +5,9 @@ description: "Get decision requirements by key" sidebar_label: "Get decision requirements by key" hide_title: true hide_table_of_contents: true -api: eJzlVt9v2zYQ/lcIPq2dazltOhRGMSDD3MLr0ASxhz0EwUBLZ5uNRGrkyakh6H/fHSkpdqx02ePQF1sk7+d33/FYS1QbL6c38ldItdfWXMPflXZQgEEvb0fSluAU0sE8k1O52n+C/V9v5UiWyqkCEBxr19LQgs7vYE9n2tBnqXBL3xn41OmSLdAmaQu7FlnrTLhDbyPZLskTugpG0qdbKJSc1hL3JdvXBmEDjkTX1hUK49ZP57Jpblndl9Z48KzxejLhv2P/iypNwbOr1JIpgyyiyjLXaUgy+eJZrj71bFdfIEVO3DEkqKMXnR3IeHTabGQzCjg8J2rG5xT3+bDRiPHAwY6qoGPc3/T45jVLE0q2cil8fsoc4aIMDgbBpxpz3hokTNOwyPkQ9HOzU7nOQsnB49MlIIBXORQ//tdSeFRY+WeCUBAL1GY4f23IlEmfACdsfAuYmXPW9Ui8OUXig3UrnWVgjjF4mbz8/6d7fprudSw4cOkj84SxKNa2Mtn3wYK3Q/1wcTUXBwkLCArfAR606SGtnMZ9GB0rUA7cq3Bl3tw2o5ogsHcawur28QT5CDg8PsRqL+L4obG0tTytNhDw4EE0lcnuLMlcltQk1EiOwe266VW5nCTqiGkzTZJ6az0207q0Dll4p5xWhH8Aks9iPdeqyhnA3KYqD9uPw11uQfAB3908+ZDWXPnofczYsY9jc+8m7yaDllj0CSsPfHiws0UsB+1E4UFLYZB29VmwXEy6q0lfY1XqTwHudvRfXs2uL5azV4vZYjG//Nw9A1o9ivCwzr2VNsQQEK+jkOykP3QU/e3PZaCTNmsb1FtaXYbnCYirakWtwqmcJmyFClOf/lDvQCiTidQWZQ58KVG7hMOO3/wlOrPUIqKwRqNlQgdNdLZiJmytRSZ57FgyzWFFInFSnlh0f38/TlVB95wak0MGgYIEeqGwbIvb7+3O6JFyZlPfa2sb1omDNTigKJPWkE+OHgDybDwZTyKrPBbKHDh6RuccAdfXCOErJmWuqJ5NG2XddtWN3J2xnuObfMpGiD2xP25kXa+Uhz9c3jS8TVPAhRZ/aKfQfJn2/E0Nu1a5h5Mo+gtR/tC+NbIX4l8ekoORd8Q1+9DRecUr2T7Wwm9D14/cgsqIghxaPLkg9pR4oHPyYOSe6W+dj7MlyaqK8ekxfMTrYH0wnve/BAGxtHdgfu6jQ15yfE3zD5p7AyA= +api: eJzlVt9v2zYQ/leIe9o61XLadCiEYUCKuoWXoQliD3sIjIKmzjYbiVTJkzNB0P8+HPUjdqx06ePQF1sk7767++6OxxpIbj0kt/Aelfbamhv8WmqHORrysIrAFugkaWvmKSSwri6x+vwGIiikkzkSOtauwcgcIYE7rCACbSCBQtIOIkjRK6cLRoAELrESdiPSzphwh9Yi6JYpJORKjMCrHeYSkhqoKhhfG8ItOohgY10uqd369RyaZsXqvrDGo2eNV9Mp/x3bX5RKoWdTyhpCQywiiyLTKgQZf/EsV59atusvqIgDd0wJ6daKTg9kPDltttBEgYfneM38nPI+HwdtOR452KPzuvX7mxZfv2Jph96WTuGnp+AIjTQ06gSfasp4a7RgmoZFzseon5u9zHQaUo6enk5B4ew6w/yX702FJ0mlfyYJOXovt+Pxa+NJGvUEOWHjW8TMnLNuYOL1KRMfrFvrNEVzzMGL+MX/P9zz03Bv2oQjp76tPGEsiY0tTfpjVMGbsX64uJ6Lg4AFBoUfgI8mAo+qdJqqMDrWKB26l+HKvF01UQ3K2juNYbV6PEE+Io2PD7GuRDt+cqSd5Wm1xcAHD6IE4v1ZnLo0ru+waoB9cPt+epUugwTqltMmieN6Zz01SV1YRyy8l07LddZSymdtPjeyzJjAzCqZhe3H7i53KPiA726efLRDwZlvrU+YO7ZxDPd2+nY6isSiT6A81MMDzo6oGMVphUeRwiDt87NguTboPidDjmWhLwPd3ei/up7dXCxnLxezxWJ+9al/BnR6TXSU5wGlczE4xOtWCHrpD32J/vH3MpSTNhsb1LuyugrPExTX5TrTikM5DdgKGaa+kIr0HoU0qVA2LzLkS6lwNhz29c1foofdWCdyazRZLuigSc6WXAk7a4mLvO1YqUIO20LioHwSx/f39xMl89KkcqJsziRkWqHxgceOtz+7neiRcmqVH7S1DevY4QYdGoVxB+TjowcAnE2mk2lbVZ5yaQ4MPaNzjogbckT4D8VFJrVh4OBl3XXVLezPWM/xTZ4wyCrq+uMW6notPf7lsqbh7a8lutDiD+0Umi/Vnr9TSDYy83jixXAhwk/dWyP9WfzHQ3LU875wTRU6Oit5Bd1jLfw2qyaCHcoUXXCtPblQCgs60Dl5MHLPDLfOx9kSIpAl8zNw+KiuA/qoP7+9CwJiae/Q/D54R7xk/5rmX5p7AyA= sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-6.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-6.api.mdx index fc0487f647b..2af5df108f0 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-6.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key-6.api.mdx @@ -5,9 +5,9 @@ description: "Get decision definition by key" sidebar_label: "Get decision definition by key" hide_title: true hide_table_of_contents: true -api: eJzlVt9v0zAQ/lcsP8Homg4GQhVCGqKgMsSmtcDDNCE3ubZmiR1sp6OK8r9zZydpu2aDPaK9tLF9Pz9/57uSO7GwfHjJ30MsrdTqPcylkg6/+FWP6xyMoMU44UM+W5/C+scr3uO5MCIDB4Z0S65wgefXsMYzqfAzF26J3wnY2Mjcmxty1GZ6zpLaFX60vnrcwK9CGkA/zhTQ4zZeQib4sORunZN1qRwswKDoXJtMuLD16phX1RWp21wrC5Y0ng8G9LfrfVLEMViL+rFGU8qRiMjzVMY+xeinJbly37Oe/YTYUdqGAHEyeJHJlox1RqoFr3oehX+JmtAJQIy7DQVUOw5WiLsMsd7r5cXzbS8XAeAMM7d3eOwSPX1wOtvaX+7KoUv424PywisUynVmQqfSpbTVweuqIoHjLo6M1UqkMmFERrDubq4gE2YpZM8eyhnrhCvsP2aYIV3Fohs/qdCUirsPw8Z9sIyM0aZF4sU+Eh+0mckkAbWLwUF08P+ne7yf7kW4cKCrt7owMTClHZvrQiWPgwUvu+rh5HzMthJm4BUeAR64aSEujHRr3+FmIAyYQ/+2X15VvRIh0NcS/OrqdqP7CK6ry7HZmoUeib1zqamlLsCjQd1yyKPVUdSoHW7UbFSiVsUpJLNqem5hUlQpA8TVMIrKpbauGpa5No6EV8JIgdfhcaWzcL1zUaSEZ6pjkfrt29FPl8DogPoP9WuHayJC8N4nKMnHrrnXg9eDTkskeoeVDT02dpbO5Z12gnCnJT8ANNc1IbmQdHNF7ZWLXJ56/OuB5ex8dHEyHR1ORpPJ+OxLM7zUehjh9rW3VuoQfUC0DkK8kf7QMPbT96lnl1Rz7dVrlp35oQrYeTHDyqFU9hPWTPhpBf+cXAETKmGxzvIU6I3C6vGHDd3pizVmsWJYppE6mvjtNZ3RBTFhqbUjzocCRtMUViASJWWRRTc3N/1YZPjsiT46JBAwSMDJimRr3D7XO71byomObasttV9HBuZgAKOMakM22hli+FF/0B8EVlmXCbXl6K+FtANbe0MOfrsoTwXeZlXHWNZFdslXR3wzfmyXGW4PySqSKZTLJS/LmbDw1aRVRdvYI4x/ADbV5WsxkZa+saDnIrWwF1b7XPIn9byTPGX3TsOdiTQsVmtf3mlBK15PnP63wqeJL0EkyEcKLJycIJVyt6WzN/VSAbVv0sfRFGVFQXC1kN4iubfeGc+bd16ATfU1qLdtdI6WFF9V/QHkj0fJ +api: eJzlVm1v2zYQ/ivEfdo6xXLarCiEYUCGuoWXoQlib/sQGANNnW02EqmSJ6eGoP8+HCm/xUrWfBz6xRbJe334HO8aILn0kN3Be1Taa2ve40IbTdoamCVgK3SSF+McMphvrnDzz1tIoJJOlkjoWLcBI0uEDO5xAwloAxlUklaQQI5eOV0Fcxlc4UbYhcg7VyLf+0rA4ZdaO8whI1djAl6tsJSQNUCbiq1rQ7hEBwksrCslxa23F9C2M1b3lTUePWu8Hg7579j7pFYKvYcElDWEhlhEVlWhVUgx/exZrjn1bOefURGn7RgQ0tGLzg9kPDltltAmAYVviZrRiUCM+w1FVHsO1ui8jrE+6+XN60MvtxHgEg35Jzz2iV69OJ1D7U9P5dAn/NeL8iI00lBvJnyqqeCtHl63LQtc9HFkbNay0LlgMqKnp7lSOTsvsPzppZzxJKn235hhid7LZT9+2niSRvUfxo3nYBk5Z90OiTenSHywbq7zHM0xBq/SV///dC9O072NF4589d7WTqEwlsTC1ib/Pljwc189XN6MxUHCAoPCd4BHm4BHVTtNm9Dh5igdurPwtt/N2qQBZe29xrCaPW50H5H6upyYb0TskSXSynJLXWJAg7tlBun6PN2qne3VfNrc46YFDsmttz23dgVk0ESI2yxNm5X11GZNZR2x8Fo6LedFRJjP4vUuZF0wnoVVsgjbj6OfrlDwAfcf7te0QsFEiN4HDCX7ODb3bvhu2GuJRZ+wsqfH3s6KqOq1E4V7LYUBYHtdE5aLSW+vaHflstJXAf9uYLm+Gd1eTkdnk9FkMr7+tB1eOr02Obr2nZUuxBAQr6MQbKU/bBn7+9/TwC5tFjaodyy7DkMVipt6XmjFqZwmbIUM04qQivQahTS5ULasCuQ3qnI2HG7pzl9ia3ZhnSit0WSZ30GTnK2ZCStriTkfC1iqcIeRSJyUz9L04eFhoGRZm1wOlC0ZhEIrND7g2OH2R7eTPFLOrfI7bW3DOnW4QIdGYdoZ8unREAPng+FgGFnlqZTmwNF/FtIRbLsbIvxKaVVIbdhsiLHpiuwO1uewHz8OywwSyNjqLOnK5Q6aZi49/umKtuXtLzW68ADsqyvUYq49f+eQLWTh8SSs3XMJP3TzTv6jeHYa7k1ky2KzCeVd1LyCbuIMv+2sTWCFMkcXAosnl0phRQc6J1MvF9DuTfo4mkICsma4dpA+Inmw3hvPL78FATG192h+3UVHvOT42vZf5I9HyQ== sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key.api.mdx index 06dc3839efd..11ffbbedf5d 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/by-key.api.mdx @@ -5,9 +5,9 @@ description: "Get variable by key" sidebar_label: "Get variable by key" hide_title: true hide_table_of_contents: true -api: eJzlVt9v2zYQ/lcIPq2dazltNhRGMSAD3MLrsASxtz0EeaCks81GIjXy5NQQ9L/vjpSUOFbb7HHoiy2S9/O773hsJKqtl/Mb+ZdyWqUFyNuJtBU4hdqaZS7nMj18hIOcyEo5VQKCY/lGGlrQ6V0404Y+K4U7+s7BZ05XrE+bpCvsRux78xPp4J9aOyDT6GqYSJ/toFRy3kg8VGxSG4QtOBLdWFcqjFs/n8u2vWV1X1njwbPG69mM/45druosA+9JP7NkyiCLqKoqdBaySj55lmtOPdv0E2TIuTrGAHX0wjk+J7ygx66XxqMyGXx8tqLPyOHzxSP4g6hHp82WD/aqqMdPCGxD+TPuw2lqbQHKhGMwyuAyH9HlU40Fbw0saVvePh+Df2koCJ0LrjN4/HIZCCwyVf74X8tB2GLtv4nUm9ecV0nlUNtxSHRXpnG8wsbXwFg4Z92AxJtTJN5bl+o8B3OMwcvk5f8/3fPTdK9jwYFL723tMhDGotjY2uTfBwt+GuuHi6uleJSwgKDwHeDB1xpktdN4CBMjBeXAvQq36c1tO2kIAnunIaxunw6OD4DD1BDpQcRBQwNoZ3kqbSFAwCNnLpP9WdLL+qQh0Vayc7fvp1XtCpJrIpjtPEmanfXYzpvKOmThQZ2z47NYyI2qC0ausJkqwvbTONc7EHzAVzJPOqQ1lzx6n4apQD6Ozb2dvZ2NWmLRL1h5IMKDnR1iNWonCo9aClO0L8yK5WLSfTGG4qpKx8nfjfrLq8X1xXrxarVYrZaXf/Rjv9OjCB8XeLDShRgCCjMnCMle+n3Pzd/+XgceabOxQb3j02V4jIC4qlPqEU7lNGErVBj59Id6D0KZXGS2rArg26gbyqInNn+J3iz1hiit0WiZyUETna2ZCTtrkdkdW5VMc1iRSJyUJxbd399PM1XSBaem5JBBoCCBnics2+H2e7czeaKc28wP2tqGdeJgAw4oyqQz5JMw14nIMdmz6Ww6i6zyWCrzyNF4yxxhNZQF4TMmVaF0mP4hsKZrpxu5PzvqiImcsymiTWyMG9k0qfLwpyvalrfp3nehqR+0Qtfl2vM39etGFR5OYhmuQPnDdfcqfCFOX4yjIfckNQc5PHskfQb2hd+W7hi5A5UT3TiaeHJBTKnwkc7Jy5D7Y7hnPizWJKtqBubh3XTM4WB9NJ53vwYBsbZ3YH4ZokNecnxt+y+MIezQ +api: eJzlVt1v2zYQ/1eIe9pa1XLabCiEYkAKuIWXYQlir30I/EBRZ4uNRKrkyakh6H8fjvpIHKtr+zj0xRZ5H7z73R1/bIDkzkNyCx+k0zItEDYR2AqdJG3NMoME0sMlHiCCSjpZIqFj/QaMLBESuAsybSCBSlIOEWToldMV20MCl3gQdiv2g/sIHH6utcMMEnI1RuBVjqWEpAE6VOxSG8IdOohga10pqdv6/RzadsPmvrLGo2eLl/M5/x0fuaqVQu8hAmUNoSFWkVVVaBWyij951mtOT7bpJ1TEuTrGgHR3Cuf4PeEFOz56aTxJo/Dyuw29stUPqHfgj6qenDY7FuxlUU9LyNVGSWLcR2lqbYHSBDEaaWiZTdiyVFPBW2OXtC1vn0/BvzR7WehMcJ3R09fLUDmbFlg+/9FyeJJU+28i9eol51Wi93I3DYnuyzSNV9j4LzAWzlk3IvHqFIl31qU6y9AcY/Asfvb/T/f8NN2bruDIpfe2dgqFsSS2tjbZz9EFv03Nw8X1UjxKWGAw+Anw4GsNVe00HQJjpCgduhfhNr3dtFEDyto7jWG1eUoc75FG1hDpQXREUyLllllphwECppwE4v1ZPOj6uLnDQwt8uNsPbFW7AhJoOjDbJI6b3Hpqk6ayjlh5NOfsWNYVcivrgpErrJJF2H4a5zpHwQK+kpnpKEfBJe9OnwVWsO6Ju9fz1/NJT6z6FS8PjfDgJyeqJv10ypOeAosOhVmxXpf0UIyxuLLSHfP3VH91vbi5WC9erBar1fLq74H2e7s2Oirw6KUPMQQUOCcowaD9bujNPz+uQx9ps7XBvO+nq/AYQXFdp4VWnMppwlbIQPlCKtJ7FNJkQtmyKpBvo56UxdDY/CUGt1vrRGmNJsudHCzJ2Zo7IbeWuLu7UZUq1LBrJE7KJ3F8f38/U7KsTSZnypYMQqEVGh9w7HH7q9+JnhhnVvnRWtuwjh1u0aFRGPeOfBx4HZ3vkj2bzWfzrqs8ldI8Omh6ZI6wGstC+IXiqpA6sH8IrOnH6Rb2Z0cTEUHCrjZRPxi30DSp9PiPK9qWtz/X6MJQP1iFqcu05+8Mkq0sPJ7EMl6B8MtN/yr8VZy+GCdDHprUHGB89gBE3WMt/LabNoIcZYYuRNNJLpTCih7ZnLwMeT7Ge+b9Yg0RyJqBeXg3Hfdw8D4Zz5u3QUGs7R2aP8boiJccX9v+C4wh7NA= sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/delete.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/delete.api.mdx index 87a0f9a05dd..eb7cc632ca5 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/delete.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/delete.api.mdx @@ -5,9 +5,9 @@ description: "Delete process instance and all dependant data by key" sidebar_label: "Delete process instance and all dependant data by key" hide_title: true hide_table_of_contents: true -api: eJzlVktv20YQ/iuLPTWpLMqJWwRCUcBtFEBNUBuWihwMH1bkSNyY2mV3h3IFgv+9M7skLYlyX7ciF2kf8/z2mxnWEtXGy+m9vHU2Be/nxqMyKciHkbQlOIXamnkmpzKDAhDkSJbKqS0tHevV0tCGrh9hT3fa0LJUmNM6A586XbIBOvwIe2HXooxuhO78jKSD3yvtgHygq2AkfZrDVslpLXFfsmltEDbgSHRt3VZhPPr+SjbNA6v70hoPnjXeTCb8d+x6UaXsk/RTS6YMsogqy0KnIb3ki2e5eujZrr5AipyzYzBQRy9bsqY2cCDo0Wmzkc2ohSn7Z+GPJGosWOjnXJkNLFBh5emCr67O5TI3O1XoTDBo4PHlnCjiVQHbb/9tbj6G8Hfhv33Dyf4VEP0Ln7uMB4OLA0BmzlnXI/F2iMQH61Y6y8AcY/A6ef3/T/dqmO5dfHDgp/e2cikIY1GsbWWyr4MF352rh+vbuThIWEBQ+ArwoEMPaeU07kMbXoFy4C64DU/vH5pRTRDYRw1h93Dajd+HLjXoxkKZTKiiEBmUYDJlUGQKlVjtRezv1PdzezINqNlPZbK7TFprF501n9Sk1UiO1O26eVG5guTriHwzTZI6tx6baV1ahyy8U04reqUAN9/FV1+rqmCYC5uqIhyfJrXMQfAFTySeNUh75kf0PmaE2cexuXeTd5Ozllj0BSvPrHm2kyOWZ+1E4bOWwvzqXnHBcjHp7uV6JqhSfwz4t8P25nZ2d72cXSxmi8X85tdu8LZ6FOEhG3orbYghIN5HIdlJf+iI/MvnZSCdNmsb1Fvy3YTvARC31YoKilMZJmyFCsOW/lDvIqFSuy3DUBzwjVeiM0uFJLbWaLRM+6CJzlbMhNxa5FKIdU2mOaxIJE7KE4uenp7GqdpSN1RjcsggUJBAHwYs2+L2qT0ZnShnNvW9trZhnzhYgwOKMmkN+YStMpFjspfjyXgSWeVxq8yBo/9aX0do9g+H8AcmZaHokZs29LotvHu5u4zd6rj06GzKJolgsYTuZV2vlIffXNE0fEzjxIVe8VxxoT4z7XlNRb5WhYdBTH1nld/ctV9ur8TLX3dnU+hobfah3ouKd7QMfA2/DbUwmYPKiKAcVby5Jm6VeKAz+Irjiuqb1PvZp9lyRuKqYqx6PE+IHxycDemHn4KAWNpHMD/2ASJvOcSm+RMnstvx +api: eJzlVktv3DYQ/ivEnNpUXq0TtwiEooDbbAA3QW14t+jB2AOXml0xlkiFHK2zEPTfiyEleV/u61bkJJGc58dvZtgCyY2H7AHunFXo/Y3xJI1CWCZga3SStDU3OWSQY4mEkEAtnayQ0LFeC0ZWCBk84g4S0AYyqCUVkECOXjldswHI4APuhF2LOroRevCTgMPPjXaYQ0auwQS8KrCSkLVAu5pNa0O4QQcJrK2rJMWtH66g65as7mtrPHrWeD2d8ufQ9bxR7BMSUNYQGmIRWdelViG99JNnufbUs119QkWcs2MwSEcvFXovN7gn6Mlps4Eu6WHK/1n4CZCmkoV+KaTZ4JwkNR66jo+uzuVyY7ay1Llg0NDTyznVzq5KrL77t7n5GMLfhf/mNSf7V0CMN3zuMG6cHOwBMnPOuhGJN6dIvLdupfMczSEGr9JX//90r07TvY8Xjnz13jZOoTCWxNo2Jv86WPD9uXq4vrsRewkLDApfAR5dAh5V4zTtQhteoXToLrgNZw/LLmlBWfuoMayWx934XehSJ91YSJMLWZYixxpNLg2JXJIUq52I/b1CKuzRNKACMki3l2lv7WKw5tP2EXcdcKRuO8yLxpWQQRuR77I0bQvrqcva2jpi4a10Wq7KCDyfxVtfy6ZkmEurZBm2j5NaFCj4gCcSzxoqUDA/ovcJI8w+Ds29nb6dnrXEoi9YeWbNs52CqD5rJwqftRTm13CLc5aLSQ83NzJB1vpDwL8ftrd3s/vrxexiPpvPb25/GwZvr9clB2wYrfQhhoB4HYVgkH4/EPnXPxaBdNqsbVDvyXcb3gMo7ppVqRWncpqwFTIMWyEV6W0klLJVHYbiCd/4Twxm19aJyhpNlmkfNMnZhplQWEtcCrGupQp3GInESfksTZ+eniZKVo3J5UTZikEotULjA449bh/7neRIObfKj9rahnXqcI0OjcK0N+RTtspEjsleTqaTaWSVp0qaPUf/tb4O0BwvjvALpXUptWFvIfS2L7wH2F7GbnVYepBAxiaXSV9CD9C2K+nxd1d2HW9/btCFXvFccaE+c+35P4dsLUuPJzGNnRW+ue9fbt+Kl193Z1MYaG12od7LhleQhEdkfEp2yy6BAmWOLkQVT66Vwpr2dE5ecVxRY5N6N/s4W8wgAdkwViOeR8QPDs6G9OPPQUAs7COan8YAiZccYtf9CSey2/E= sidebar_class_name: "delete api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/get-statistics.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/get-statistics.api.mdx index c3098e28e3c..9b0793cadfe 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/get-statistics.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/get-statistics.api.mdx @@ -5,9 +5,9 @@ description: "Get flow node statistic by process instance id" sidebar_label: "Get flow node statistic by process instance id" hide_title: true hide_table_of_contents: true -api: eJzlV99P5DYQ/lcsP7XXZbPc0eq0qipRCU70qgOxVH1APHiT2V0fiZ3ak+VWUf73zthJCCSogrfqeIDYnp+fv/EMtUS19XJ5K6+cTcH7C+NRmRTk3UzaEpxCbc1FJpdyC7hCWnrUqZczWSqnCkBwrF5LQwuSuocDnWlDn6XCHX1n4FOnS7ZDm5/hIOxGlNGb0J27mXTwT6UdkCt0FcykT3dQKLmsJR5KNq0NwhYciW6sKxTGrV9OZNPcsbovrfHgWeP9YsF/nrpeVSn7nItrwMoZL3yfjiCLAncgtnoPZhTdTGydrUrIxPogNrl9EMaSbYoktRSUQXamyjLXacAr+erZYz3OQTmnAkAIhR/s2/VXSJFRdYw66piHSlHvNR74AnpZj06b7QjZGwpfZwwuJ9JHGVJ72Ol0F/YJpipHL5QDobZbB1uFhHkzi75gEu+xI7SocmGqYg2OXUblHjA/CmPi1gg9ls0he6PTTv21brVJdUa35t/ot9fvafMfadqizAHfnmen/7pEyTNqzNnbOYl9IalBATf8M5MnU5VyYfYqJy5xSYLHl3lObF3nUPz0It9f4DVXXjUN/zCLD+8ZvoJKUW1hXAHhKtv3Y+owbowOBricOWed7JD4MEbi3Lq1zuiyn2LwLnn3/0/3ZJzudbxw4Kv3tnIpELmQaF6Z7Ptgwc9T9XB6dSEGCQsICt8BHrTpIa0ctaDQ5NdAfcMdcZNf3t41s5ogsPcawuru+Vv2CXDQh/puy030eYelziU5J9zZdtYIEwYNEEuZ7I+TVv6ofwCTmoJoEj+cSDy4fTeOVC4n1TpC3yyTpN5Zj82yLq3DhoT3ymlF1xTw5rN47RtF7ZE0c5uqPGxPvdB8wANP9wwzQaL3OUPMPp6a+7j4uJi0xKIvWHmkzaOdHWI5aScKT1oK41F3jSuWi0l3V/c4nJT6cxjf2lnu8urs+vTm7Gh1tlpdXH7p5rpWjyIc0qG30oYYAuJ1FJKd9HnH5D/+vgms02Zjg3rLvsswdYK4qtZUUZzKOGFLA0fgTzt3KJMNuuRzcvGX6Mxyyy6s0WiZ90ETabhjJuysxThZcWGTaQ4rEomT8sSih4eHeaoKeg7VnBwyCBQk0NzJsi1uf7Y7s2fKmU19r61tWCcONuCAokxaQz5hq0zkmOzxfDFfRFZ5LJQZOHp1gT2Bsb8xhG+YlLmi223amOu2+G7l/ji+U0/Lj/aWcdQfVCDRLBbSrazrtfLwl8ubhrepq7jwZDzWXajSTHv+ppLfqNzDKMD+gZU/XLf/HvwoXv4XYjKfjtzmEKo+r3hFn4G14XdDL5ncgcqIphxVPDklhpU40BkN+FxX/ZP16eyGZFXFqPXIPuN+sD4Zz6+/BwFxY+/B/NZHh7zk+JrmX71Gt3o= +api: eJzlV0tv4zYQ/ivEnNqtYjm7abEQigIpkCzSLTZB7KKHwAeaGlvcSKSWHNk1BP33YqhH7EhBkdyKzSXmY17ffDMc1UBy6yF5gDtnFXp/YzxJoxBWEdgSnSRtzU0KCWyRFiRJe9LKQwSldLJAQsfiNRhZICTwiAeIQBtIoJSUQQQpeuV0yXoggc94EHYjytaa0L25CBx+q7TDFBJyFUbgVYaFhKQGOpSsWhvCLTqIYGNdIand+uUCmmbF4r60xqNniffzOf87Nb2oFNuciXukyhkv/BCO2FgnKEOx1Ts0I+8isXW2KjEV64PY5HYvjE2RQVDWEBpiY7Isc60CXvFXzxbrcQzSORkAIiz80b5df0VFjKpj1Em3cUhFeqfpwAkY7npy2mxHyC4zFDplcDmQwcsQ2j7TKgv7Dn2VkxfSoZDbrcOtJEyhiVpbOIn32BBZkrkwVbFGxyZb4QEwP3JjImsRKL6bY/pGo734a81qo3SKhvwb7Q7yA23+I0xblDnS2+Ps5V8XaBMBacrZ2nVu919sikcF3PBfBBdTlXJjdjLXqeCSRE8v87x0dp1j8dOLfH+B11x51TT8x1F8eM/wFei93OK4AkIqu/4xddhujA6OcLlyzjrokfgwRuLaurVOUzSnGLyL3/3/w70Yh3vfJhw59d5WTqEwlsTGVib9Pljw81Q9XN7diKOABQaB7wCPJgKPqnKaDuGRX6N06M74kU8eVk1Ug7L2UWNYrZ73sk9IR+/Q8NryI/r8hRWa+VUgZbabNcKEQRkkEO/O4+7+2dAA4/oRD03sjycSj27XjyOVyyGBuoW+SeK4zqynJqlL66iBCHbSabnOW+T5rE37RlY545xbJfOwPdWh+YAHnr4NM0Fa6zOGmG2cqvs4/zif1MRXX9DyRJsnPRlROamnvTypKYxHfRoXfK8Nuk/d03BS6s9hfOtmudu7q/vL5dXZ4mqxuLn90s91nVwTndBh0NK5GBzidXsJ+tvXPZP/+HsZWKfNxgbxjn23YepEcVetc604lHHAVsgwy/VzhzTp0Sv5nFz8S/Rq+ckurNFkmfdBkpytmAmZtdROVlzYUoUctkTioHwSx/v9fqZkUZlUzpQtGIRcKzQ+4Njh9me3Ez0TTq3yg7S2YR073KBDozDuFPmYtTKR22DPZ/PZvGWVp0KaI0OvLrATGIeMEf5DcZlLbdhM8Lnuiu8BdudtnzotP4ggaUf9owpcRV0hPUBdr6XHv1zeNLz9rUIXWsZT3YUqTbXn3ykkG5l7HDk4NFj44b77PPhRvPwJMRlPT25zCFWfV7yCKHyptN8rzaqJIEOZogtetSeXSmFJRzKjAZ/ramhZn66WEIGsGLUB2WfcD9on/fn193BBLO0jmt8G74iX7F/T/Au9Rrd6 sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-1.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-1.api.mdx index 58f971eb4ff..5cba3950265 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-1.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-1.api.mdx @@ -5,9 +5,9 @@ description: "Search process instances" sidebar_label: "Search process instances" hide_title: true hide_table_of_contents: true -api: eJztWF9v2zYQ/yoEX9J2TiwncZsGwwA3cYBsWZLFXvaQBAMt0TZbSVRJyoln6LvvjpQtWVJcG9jLtiJALJF3x7vf/aUW1LCJpqcP9FZJn2t9GWvDYp/TpxaVCVfMCBlfBvSUas6UP/2zQ1tU8a8p1+aTDOb0dEEDrn0lEqQEuoGlI4mTR0QuUAOfL2PDY4M8LElC4Vvp7c8aGRdU+1MeMXwy84SDKDn6zH0DjCAMdDECpMDuWISGq2/TfeHzEpGAsyfA1qJjqSJm3NL7Y5pZPlT2nistnC4bmY4OkWmURPEStaDEo40S8cSKZQrM/WV7NSz9RSifr2XAl67Ynh/olTlnhjdqw+Pg1T3gbNpBpjTC8OidDS/v+7BwdvPr7VV/2D/H5971Wf8KHp8KDM/5WMQC/bq93hAULDYbQayEp5VNfaH8NGTqTWX3rRUqTIiSqoGN1oq/+JZOdlHfG68HHFOKzYFeGB7peiBmyCiV2Z6jHuI8bIZDqmBNlbqnBmfwdt6Hn6eshMMAFcrKK7+lXM0b4OEvLEpCp0gvDOspfsdNqmJNWBjW85y8CfiYpaEhypKRUGhDEHIiNOl4b0G7GQtT9ABqg3rxgIzmZGV082n1k7RjZdqH2AYUUMh6VhZH5Q55WGFLq5Q5shbA7KmkGosDkrAJPD0LMyXL6GnWMmIv5KhBV4YhRPZGYnLPVH7snhUNZYq8/9DtHn38eHzc8brdkw/et00jj/FQkgk3xEw5ifmLsToSXyZzu2QNJ3JM9lDWPb7BgRDYElaKqN5zhAePMVpLdMJ9Mc4rM9FTmYYB2GSgoqPQEuM66RrSFqCj1q6QV7Ltga6BRVtVkKyPLmwzyL3kUNvsnK5X906LjJdiAOf1boAQHu4WakWDqjaWw1X16Xo745NlNn2x+wqFZhqVcrugExlrl7CHntfQk1PfQfhPteBqGduxvH3vzN87c0Nnztumq1W7tVsjDQu3MqqkwB3X0Kd0XQ8kOm5KJHATg7IBmS6C17MJAB+FPPph16xCX6d6yyCPQGUo+I2eWRa2xk23UNso4dJXSqoVDEd1GC6kGokg4PE6Bu/a7/795h7Xzb2WhlzINA7+c+Z2m4K8d3tJSuFMuGX4H0S7nff9VAkzt215BOMIV/u2Wz1AJV0ABPKL4PYNxpWtL74RN1OJDT6R2gLBzBTe2rNOOyffX5G33RBEURc1g75nVUkVVDe6cNhmp+32YgqistNFAgUzs8OHEmyUT+245/xqJ3HgDKXPwqk7fV3tIYx1uBGzyI6LOOZhBLjTD2yhz+8yhbgT78RrlISkr0gp4qKQMzUmaZTjiBslZW5SdH4aIJ0zeumbom0kArtRi6Jp8H5z27/rDfv7g/5gcHlzjQ0Fj8v5cG4o+XslJVfRKoTvjoguqS+WofrzH0MbVpgGd8UHkr67TK2PhfYErz7deLXZpej3pZHF2ziQeGvjRnlgCKpL+XBRzBDNo4JXHgTKs4mdZL3q3L6wDqrOtqtDK0MtemEsLeB5Qt7Y706c3KYjKDLo/HqISMLsSAs/Rsy4nf59iUjjmF6/fomYLMVCcSGRBPMkKmQ5jZIp5s5USuOUxFoHolEtl3oYBhry7vn5+cBnETQDdgAHIgygJIfxG2nzSLvKV1oV5kD6esUtpH1vKz6Gmwdo2c4F6TZKnS2DgnYOvAPP5aE2EYtLB22oOWuAraLZwEWxnYQMIj/LtVvk5eiBzjpFCOyXZeUlCdzqKssDXSxGTPPfVZhluPwVPyVgWSwKkSuZdMpZ4KLCRT09c11kf4gKrS5N9dsIllvH0QNPJ2Yj7VOpxt7eDIaYnvnXyQjyA1YVe8Yvl/D/lD7CHwaiBcfWDru+oCGLJ6ltM9TJxYRmKaKzQrCS/9ayZcGJ5yUtf/xkCchQfuHxT7DhrDH46m5zfwMoNl4Y +api: eJztWG1PIzcQ/ivWfOHuupDl7V6iqhIHQeJ6BUpS+gGiyvFOEh+79p7tBdIo/70ae5NssgsXpPZD2+MTsWfG42eeefFOwfGRhfYNXBot0NozZR1XAqEfgc7RcCe1OkugDRa5EeM/diECg18LtO6jTibQnkKCVhiZkyS0oevlWB7sMVkatBCB0MqhcqTD8zyVwltvfbGkOAUrxphx+s9NcoQ26MEXFA4iyA354iRa2h3K1KH5ttwdTipCUjkcoYEIhtpk3IWltwcw83rk7DUaK4Mvzyrt79WVenxU0bPOSDUiqUGeqTm2SaNEzg0q9/Pmznr501Q/nOsE5wHbXN86btwJd9joDarkyT3rGndIqciIREfHvbPrDkRwfPHL5edOr3NC/x+dH3c+d06gP4tAKiGTkgOllYHWKXJVgfQEh1JJ4sbmt3KouHLPQrxGcW8bhDSiSLl5tbb72huVLiVL68lBWMg/cUOihMw5Gq6SlhvDJxCBdJjZOplnpKiN21yjniaYNsOhTbLiSj2O3WOI4KTTPYb+rIJDlxyaVVd+LdBMGuDBR57laXDkKE3rZeIKXWGUZTxN67WCvUpwyIvUMePFWCqtYwQ5k5btxq8hgnueFhQB8ob8woQNJmxx6ebT6ifZoMqtQJVINSIjqzm7PKoMyM0CW1iXLJH1AM76Fde4SljOR5iwB+nGbM6eZi8z/sj2G3zlRCG2NZCja27KY7e86TucsLfvDg/3P3w4ONiNDw/fv4u/fTV2q3qajdAxN0am8NF5H5nQ+cQv+YszPWRbZOuaftktJpXTbKvC6q0guHOr6LbM5ijksKzuzI51kSYs406MvdGK4qroCtIeoP3opZCvZdsNrIAF0TpIPkanvqGUUQqoPR+cw7genYgN52YGE7baHAjCvZdRbdnk1pvT3qL6HMYvxmfm/6iF21wrGxJ0L44b+nghAmR/V9teL1svLGffu/n3bv4PdfOy1Yb69rIW7bTj6UaXqjhwhbZIna37QUIHTcl4wh1nUt3zVCZPZ2Ru9CDF7IeXZiYxobAbJkqG1vJRM6XmxbBxMyzUNiq4dIzRZgHDfh2GU20GMklQrWLwpvXm33/dg/p1z7Vjp7pQyX/uuodNJD+6PGMVOjP0Cv8Dtvs3giiMdBPfygfIDZpt3/Fu+rNoCkLrO4n+Vz/a/MGdoRtrGgpybT0Q3I2hDa373VYpvr0Qb4XBCcgXc4/GelcKk0IbpgHbWbvVmo61dbP2NNfGzfzAYiQflJM+7YW4+ukd2pBqwdNxOH3V7d4YGW0onvkRk0ZDYkA4fccX+vL9szT3Pn4fN1oi0SesLHmxtDN2Lm+0E4QbLc3CdBni1CW5cOl5bJZtI5fUjSKgq0EbLi47V0e9zna30+2eXZxTQ6HjSj2aKirxXlgpXfQO+cbqhWAufTqn6qffe55WlAZXyw8znfAAWx0l/QlxfUKqLfn5ZzkRrI09y43KtBM/O8vEK5NKddZI1pfKuWQ5fiynDGcKfGquiKtTQ3XM8aNyvP4wmPporg/PCyfWpmZyYqh9dMrsvfAfx5BdFoNUCmJKnU+acT9DMy6cvEf/vBCawkLvgPr7Tio2NzvUhmVaSafJIa/pjC4o0cZau+AkFUYufIqEPCXO2Har9fDwsCN4VqiE7widEQypFKis50NJy8/lSrSmnGhhF9pS+98tg0M0qAS2SkO2RVbv5wyC3Z14Jw5Ja13GVeWgZwrUCmAL6jt8dK085dKPkd67aVm7buB+FxYU2K7aKutXPyrL0A1MpwNu8TeTzma0/JW+VVANXVatUF9hjDwJrAgpAseh5Wz3yKHFq6z+/KHaHDSOhMDcPSvbrxTky4tujxKr/ISa6YR0DH+gz6v8AdpwC7dARPTg+ELj16eQcjUqfE+CYJeynxeEznIKXy0W/mbz6qQmFS9//OgFWE/fofoJovI2jn6G5+Jf7YeYrw== sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -36,15 +36,15 @@ Search process instances ## Request -

    Body

    required
    +

    Body

    Search process instances -
      filter object
      sort object[]
    • Array [
    • ]
    +
      filter object
      sort object[]
    • Array [
    • ]
    Success -
    Schema
      items object[]
    • Array [
    • ]
    +
    Schema
      items object[]
    • Array [
    • ]
    Data invalid diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-2.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-2.api.mdx index 6dd740755aa..d59e2ac1312 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-2.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-2.api.mdx @@ -5,9 +5,9 @@ description: "Search process definitions" sidebar_label: "Search process definitions" hide_title: true hide_table_of_contents: true -api: eJzlWFtv2zYU/isEX7J1jm9N2tQYBriJA6Qbmiz2tgcnGGiJttlIpEpSST3B/33nkLQsW0qaDHtZhwCxSZ4bz+U7hy6oZQtDB1N6pVXEjTnjcyGFFUrS2xZVGdcMFxcxHVDDmY6Wf/Zpi2r+OefGvlfxig4KGnMTaZE5tgEdOzrCv7A0S7gB8khJy6VFUpZliYic0M4ng/QFNdGSpwy/2VXGQYKafeKRBcZMowlWgBQ4nYvEcv11uju+qhAJ0L0AthadK50y67feHNF1i0qW8gqpsVrIBR7cc22Et+5JMa/7SD3LUhkciJ5qkAfXZ9I2HuKpsAlu1YMAh0b8xZ9ph4/QcL7rJaY1WwG9sDw1de+hAUZp+3yOelx40nxtpeMdU8JJi3KZp5h2w/EprM5G8HFb9cQYDVpXd37NuV41OqhMNNAzTJJ6QsImyTwnEdJYJuEr+S7mc5YnlqCDiTCk1/0ebLlnSY7+Rt1jPFFzornNteQxSYSxjyZ8XQWTceB1nCjKKTuu6AnxPXb6QhheIB4jR2Yr4jK5KtWJmpbBoYEgRMQ5fn27UepFhUR7nvaWV43EXMYQ1Y0V/qomuK7pqq2vWucSYs+8jC1eYl7VMSU/RMNAyM02Em1yI8lEkQW3xC45kfyL9aSRylZuy5mPHAco83dcmQPQZBXsbCvuwBO2b6Qz2WQ8EvMAdcQsVZ7EBOoVjEWhFcZd0n/kLx/NPQCY0lOWJJC1oWqAvt8/7r199+6k9/rNydv+Sc8X3bnD1W061b0cKMCdARh3cm8X/ir2bwG7xNNeiWjH3YZ77YvaT1cHCdh9hOZAb3XO3YbJlDQeAvrdbkOa5FFwwb/Ui/ah8YWQ+S21qNA/fGG8rO9YZVnyLEdUTLj2NdxkCZIdNcX/jFkGJQuZKeLHkwBiNEt4+sNLkwEwx+bmmZFIwegAZbUYbOCrOUBu46ngjLRWunTD64Y6Vnom4pjLXR+86rz671/3qH7dj8qSc5XL+Ju77nFTkg+vLkglnQl3DP+DbHejb5RrYVeum8ygDXJ96EB2ertuFeACdSe4W0GbfHKEiEtAwXaRcrtU2JkyZZwrmF3CqnPf6wSGwwpDxzdgivZoBGhnTq4B42jh/bsedDrFEoStB0UGsLl2DVMLNgsjLJ752LrxFDgTFbFk6fXvmj6BSQIP3JADEwpOFpgFXnsb/ZiVM+VG3En3pNsoCUkfkbLNja2cpbVZoxxP3Chp7acUH6sx0vlLb+KzbR6Z+Jlj9/D9j15eja6Hk9HheDQeX1x+xLaC6gIfNrhKzEspwURnEK49Ed1Qn2/S9cMfE5daWArX27ftyL8sdkcZp6FbGlY+acp+3K112y3RtslWUt3PQ9398a1wvtqfkEpRe6MROmSu3N1DfVy61zsnV/kMah7jUI+WIswNRvBhxb0flCOFl7YwONafHEKSjViodZIqSHyFBjlOq1WOabxUynojEXpANJrlqwAjYqAEHh4e2hFLAZtZGxSiG8BIDkMc0gbf/hJ2WnvMsYpMyS2UW3c0n3PNwcpOEGQ6O2MS7bW77a4vCWNTJiuKnoSAHZeVqWXhodDJEibcG9TZVwR0mNL7nkfNfXygmxjjryu+0Ke0KGbM8N90sl7j9md86CJSbXHBoxhdchb7zPBJSE89sB9O0KRy7q7PtYiAnmMI0c7sk7S3FdC7uhxPsFrC7zypipFHswf8DQj+D+gN/GEyZv5+QOT2C5owucgd8lMvF+uL5eif0od75ehutql/uapY+eN7RwAPtTsuf4IDfxuLS/8u+BvLQG6S +api: eJzlWN9v2zYQ/leIe8nWKf6RJm0qDAPcNAXaDU1We9tDEgy0dLbZUqRKUkk9wf/7cCQty7aaJsNetr5Z5N3x+N3dd0fX4PjcQnoFl0ZnaO0rnAklnNAKbhLQJRpOH29ySMEiN9nizyNIwOCnCq17qfMlpDXkaDMjSq+WwtjLMfzMi1KihQQyrRwqR6K8LKXIvNH+B0vyNdhsgQWnX25ZIqSgpx8wc5BAacgFJ9DS7kxIh+brch9x2RISyuEcDSQw06bgLiw9O4ZVAooX2BK1zgg1p41bNFYE7+418/SoJT3h805j07JQEV8CskPCoeLKdW7SrnCSlvZjtErAir/wgW6GAI5m2yByY/gSEhAOC7sPLjlgtXEP19gPG8rua2uTb7kSdxJAVRWUlaPxGSTw6nx8BjdtJMbk0Kq98muFZtkJUJOHaQ0jKffzdSQlK4MmE8o6rjK07LscZ7ySjhHATFg2HHwPCdxyWRHedPaYdvSMGXSVUZgzKaz7Yj3sH8FVHnW9Jpnyh520zonxPfHnxTA8wjxFjk2XzCd626o3ddUEB6JAjIgHfnWzPjSYion2sNOTcDQJo8qFmq+9CFe1EbquqyZf9c4nxI57JZ8/xr02MI0+M2gr6ewmEj12rdhEszk65hbIFH52QTTT5dIvefdJ44Bs/k5f9oAJ5TQ7aFXcQRDsXSvvsi0xE7PIhMwudCVzVnCXLbzRluK26D/CK0RzhwCu4IxLiTmLVQMJHB2dDJ+/eHE6fPrs9PnR6TAU3WtPu5t02kc5SkyXLDLhVu5t01/L/w2fN3Q7bBjtZNBxr11Tu+m68pxg0JZa2VDyR4NBR1pUWbzyv9SadqnwkRT5DXWs2E5CnTyuDTntuHwQTi0X3oeS7vKExI670uMVd5wJdculyL+cI6XRU4nFD4/NFeu4q+wDA1WgtZHZ9mKwZrPuAPmF+4Jzbow2DQxPO8pam6nIc1TbGDzpP/nvX/d4/7rvtGOvdaXy/911T7qSfHT5hrXSmaFX+Aay3U/CWWWEW/rmMkVu0Bx6Dr66WSU1ZFp/FOi/bpL7J4q8IRTqJgW6haZGVWrroeBuASn0b4f9qHDYUuiHfgzkjyFG9u5URkIKdcB3lfb79UJbt0rrUhu38v3TCD6NEy3thdj6aRVSkDrjchHO33Z9skBGG37m0TM/aFAWhNN7hGPZjJhrc6eD00GnJRL9gpVNbmzsLJwrO+0E4U5LqzC0hFiNSS5ceh2fTfMoxc9I3SO0R7i4PH8/mpwfjs/H4zcX76it0HFRjxpcK+aNleiid4i+gxCspV+v0/XtHxOfWlQK7zcv4fPw0NiebPwJg8ax5oXTtOvBdjPeSOz04M3GpvW2CiAMTYPdGa/2CO6OUY2pnfmJYJppj0ismgv/DwCyy2oqRUbR2Y+hZtxPU4xnTtyGaTrTBIXDvONdIhRbm51pwwqthNPkkNd0RleU3AutXXCSCIlnPi1DbVCcbNrv393d9TJeVCrnvUwXBIMUGSrrYxAR/yWuJDvKuc5soy20/+4bnKFBlWE/GrL9rdkKhr1BbxAKxbqCq9ZB9xLDFmRNwjn87Pql5MI/VL1/deSMK7gdBi7dZQ1Yx5j+oQnlfwV1PeUWfzNytaLlT/QaJv7asEXgNlggz0NmhNSEs0D3hxNyqRnO94dh4sWgMcoyLN29sjctKry8GE8oneN/RYXOScfwO/ofid9BCtdwDZSMZbhfWof1GiRX88r3Awh2qep4Rfg0GO4Uqb/ZmhXUsuXljy+9AJvoj6h+giTextFneDz8DaxJhiw= sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -36,15 +36,15 @@ Search process definitions ## Request -

    Body

    required
    +

    Body

    Search examples -
      filter object
      sort object[]
    • Array [
    • ]
    +
      filter object
      sort object[]
    • Array [
    • ]
    Success -
    Schema
      items object[]
    • Array [
    • ]
    +
    Schema
      items object[]
    • Array [
    • ]
    Data invalid diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-3.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-3.api.mdx index dd38bd07ef8..bf4ab7bd85d 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-3.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-3.api.mdx @@ -5,9 +5,9 @@ description: "Search incidents" sidebar_label: "Search incidents" hide_title: true hide_table_of_contents: true -api: eJztWFtz2kYU/is7eiFJHRAY2+DpdIYY2SWxgQJ2MnU9zCItRomkVXZXdijDf+85u0III1+Y9iFtM54x0u7Zc/nOdbWwFL2V1vG11Ylc32ORsm72LB4zQZXPo45nHVuSUeHOxvvWniXY14RJ9Y57c+t4YXlMusKPkRLohpqO+CkjCfQujxTyBFoax4Hvaq6VzxIPLCzpzlhI8UnNYwYs+OQzcxUcjAXqoHwmcXfqB4qJ5+m+sHmOyAfZt3Bsz5pyEVJllg7r1lKfc5mUbTb1Ix91+rDr0U4kFY1c9vKDhiQjlUr40S1QsigJ0QOX3WHfOemcdpw2rF52P3R7H7vw1OmNL1r9fqd7NnYGg94Alt733o27vfHAGQ06zhAWTnrddmfU6XUzEufTaNA6GY2vWueXTrZ60jo/d9pj59y5cLqjbPmy+2ur29Y7uDJ2rmAX1i+c4bB15oyHnd+Bx6cTx2lr5VI2bVB3mBe6XkCxrQ2FTnuDC1B6ND7tXXbb1g0gEgKO9LYAFNhzBdPBMvLDYgKAXz2JJ1jfuXLQis7ZoDXSig+cYe/8Sj/2HYCse6YV+cwnO/iRRTRSmBpbWuGurwJcyvIJVfX/ZM8y369pUp1ErelmvFMh6BzofcVCuZ0HKFdyoV5+YjvDWFBkENQC4W2osg3z8ER7Hn5u8gAMUaFlfuW3hIl5Hhf2jYZxYDRoBcF2SRkwlYhIEhoE67pCXnlsSpNAEaG3SeBLRRBj4ktStV+XQZ87GiSIOco3XEjNJhkaxWJC+k1TrSTlGaVOrNnIEW0jkznJYCvmt1YZvcM8gmQs8gBAPF3Kx3hpQ5Z25nXml81syJxiUF9q2E91jdxJKVNWQS3UBd1bKpNTPExKaWaWiEujEpjKSCKBEOI1PUVo5Gmr8mpv1eniurLM6Zux2dJ3bZDRTZOWigp3ae8l+Jaf1PSRwljcJ6xa7aB61Gw2qvuHjcPq4ZGVJeBOPusDxKho2reK/RUDkXZRItGuWLA7nycyDX7YKqHkK7RMloi2kLzS6f96M37zleU6b8HRUeMgF0MGTQ13nFbn3SPp73orEw9BJ3i4Nnvb2u/MsQ9q+HX1sH7YtOt246DebO41bfuo2mzWDupH9f1aowG463TAycoHBK1jJRKmF2TMI2mKY822C+atxEUT/rkx62Gv2LGH/Ji+fkxf39/0lSawqRi7jVSKKxq8SK2c5AGTMJrInAK4Wy9K4DZVFIoo1C7fezyLIeInAQt/2jWb0UOJfFZ9M3Y+FQt+mmuFm8VplQfEEYKLDIb9gkbPxcT3AKxNDN5U3vz7za1vm9vlipzyJPL+c+YeFAV5q98huXAmTB/4H0S7vsy5ifDVXE8PE5gKmHiru+Q1lL0FQMC/+Ey/wdTw7MeUkKkZx/kj5lIDQNUM3ip31UpGVjGzh4WyxR0TUotOBJQxa2GwXB5XKosZsFgeL2KojEs9PgmfTtKbGO4ZP+pbFpwMuEuDmZG6qeZoxghuRDRkhE+Jgnf0uJFe1h07m+5X7Bp2wy7khKSPcFnHwZrPTKm4kI8hLuS0NAOa8csQ6YzRK1+s+0PsY1Pas9A0eO/1HWxjb4fQlKGxYudAcek50DDv34xLqqJWCN8NkbWiPl2F5vuPIx1GGPaD9Uc2x1yQNwdbLcF+bIqyi2ckexW8DwadLBXWHXyz96/X05a/7uyrvm3nu3J+RtA3ZvvhQLzQLng4VGdiVuN0y1yTEOcp15CmKdbTXycZ6ScTKBvo3u0g4ITq4Rh+lH/H9G3C5YglXjxSgMgq1/GJrNjiHTfkgChHhfRJJXiC2THjXKUYQfUC1qiWSS50tITMur+/L7s0hPJOyyAQYQAlGQzySJvG0nm6svfgsMddmZ32uX6vCDaFqxVoWUkZyQpyxeQ2xlbLdtk2mSZVSKOcoIIqsgFUFqeKfVOVOKAQ08tUq0VaYK6tu6qO9jWPtMiAG02tuLYWiwmV7FIEyyUuf8UvPVjY1qXFFD1rxqhnosDEsXVi+sDbESqSXeS27zFYMM2JFng2Vk/S3uSqZb83xGF6kn6zDrmHZwS9x+/Z8P/Y+gP+MPA0KLoa6PWFFdDoNjHZYfhiitIEUcmQe5DR2rJVCYnmOS1/fqcJyIh/YdEvsGGsUfiqvwcs/wI+Wtph +api: eJztWG1v2zYQ/ivEfXG7qbGc9xjDANdWOrWp7NlOWiwNDFqiY7aSqJJU0szwfx+OlGU5VpoG24d1qz9Z5PHuufcjF6DptYL2JfhpyCOWarhyQGRMUs1F6kfQBsWoDOeTPXBAss85U/qliO6gvYCIqVDyDCmhDSNDR3jBSIEDoUg18mwvgGZZzEPDtflR4YEFqHDOEor/9F3GoA1i+pGFGhzIJGLQnCncnfFYM/k43Sd2VyHiqWbXTIIDMyETqu3S4T4szbmQKdVjM55yxPTmqUf9VGmahuzbD1qSklRpydNrcICleYIeOA9GA6/rn/peDxw4D94E/XcBOOD3J287g4EfvJp4w2F/CA687r+cBP3J0BsPfW8EDnjvve752O8HkzN/NPYCb7i53+0HPd/sr1h478fDTnc8ueicnXvlardzdub1Jt6Z99YLxuXyefBbJ+iZHVyZeBdeMAYH3nqjUeeVNxn5f3gT733X83oGfMGm53X9UVXoegHFdjYAnfaHbydBfzw57Z8HPbhaOpAwpeh1jdGWDoSSmWAa86SeQGmqv2rvTnfsX3iohf9q2Bkb4ENv1D+7MH8HXtDzg1cGyEcxfYKfWUpTjamzhQp3uY5xqcw3hMr/ZI8y39s1pCbJOrPNfKBS0jtwgGuWqO08QblKSP3tJ7YzkMV1CjkgZLQBZdvMo67x/KgLV1UDjBDQsrrye87kXdUu7AtNstgi6MTxdskZMp3LVBEax+u6Q55FbEbzWBNptknMlSZoY8IVabnPd8CBGxrnaHOUb7mQXZeU1qgXk9AvhmolqcqocOKuixxRNzK9I6XZ6vmtIaN3WESQjKURT6/xdKMa440NWcaZl6VfNrOhdIq1+tKY/dTU0CeBsmWXRQYLurexQ07xMGkUmdkgIU0bmkwZyRWLyEzI4hShaWS0qsLequP1dWVZwVuy2cK7VshiM6SNusLecL7FvjtfRfpAYazvI7C7e9A6Ojk5bu0dHh+2Do+gTMAn+WxArxkCLfpavb8yem1dlCvUK5PshotcFcHPItJAyReomWoQoyF5ZtL/+Wb8VivLZVWDo6Pjg0oMWWsac2dFdX56JP1db5XiIzKTIlmrva3tv8yx92r4Zetw//DE3XePD/ZPTpwT1z1qnZzsHuwf7e/tHh9fLc0PRy+ViVTZYrjrujXzVx4i5H9u7LrfG57YM35MYz+mse9vGisS2laQp41YWmgafxOsiuQhU3msVQUA7u7XJXiPakp4ekNjHj2c5ZkU05glPz8129FDuXoUvh1DvxYLvMjF2s36tKsaxJNSyNIMezWNX8gpjyKWbtrgp+ZP37+6+9vqBkKTU5Gn0X9O3YO6IO8MfFIJZ8LMgf9BtJvLXZhLru/MNDFlVDL5wnTRy6uls4BQiE+cma8r5/HHl4TpucB5JBPKGIDqObShedNqlmRNO4sAypY3TCojOpcxtGFhbblsN5uLuVB62V5kQuqlGackp9PiZoZ71o/m1gVtiEVI47mVuglzPGcEN1KaMCJmRM8ZQY9b6Tumo5fT/ordsXvs1nJC0ge4rONgzWeudVbLxxLXclragc36ZYR0VumVL9b9IePYlBxA1aAN/YGHbezFyBthj8XOgeKKc0tnw78llwKiAYTflghW1Ker0Hz9bmzCCMN+uH6U8+yFeXPQNRLch6Yst36GclfBe28QKlNh3cE3e/96vWj5686+6ttutStXZwRzg3bvD8gL44L7Q3YpZjVed+y1Ce08E8akRYr1zWsmI4N8GvMQ3bsdBIJQMzwTGmp+w8ztIhRoS7yIFAYiq1zHf2TFFu+8iUi5FgjInNRS5JgdcyF0YSORahqauLbJhY5W7Wbz9vZ2J6RJnkZ0JxQJmiHmIUuVcWIRS2fFinPvcCRCVZ7mwnw3JZsxydKQNQtGqolcMbmtsq0dd8e1maZ0QtOKoJoqsmGoMk41+6KbWUx5iowMqkVRYC7hpmWifc2jKDJXTlErLmGxmFLFzmW8XOLyZ3z5wcK2Li226MGc0chGgY1j6No+8GKMQMqL3fY9BwumPdEJQ5bpr9JeVarloD/CYXpavHEnIsIzkt7i+ze9hTZ8gA+AgWeMYqqBWV9ATNPr3GaH5YspSnO0Smm5exltNFuVkPSugvKXl4aAjMUnlv4KTqGNxk/zPrD8C/QS51o= sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -33,15 +33,15 @@ Search incidents ## Request -

    Body

    required
    +

    Body

    Search incidents -
      filter object
      sort object[]
    • Array [
    • ]
    +
      filter object
      sort object[]
    • Array [
    • ]
    Success -
    Schema
      items object[]
    • Array [
    • ]
    +
    Schema
      items object[]
    • Array [
    • ]
    Data invalid diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-4.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-4.api.mdx index 86413414c9d..1ecf3581674 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-4.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-4.api.mdx @@ -5,9 +5,9 @@ description: "Search flownode-instances" sidebar_label: "Search flownode-instances" hide_title: true hide_table_of_contents: true -api: eJztWW1z2jgQ/isaf0nbI8EQkkDm5mYccK5cqaEYkuvkMozAIrg1FpXlJBzDf79dydgGnDa56Yd76XSmIGm1+2h39WiXrAxJ7yLj/Ma4DPhDyD3WDiNJwwkzbksGXzBBpc/DtmecGxGjYjIb1YySIdiXmEXygntL43xleCyaCH+BkiDnKjkyTRQe+onGCDZOeChZKHETXSwCf6LUlz9FuHNlRJMZm1P8JpcLBrr4+BObSNi4EAhG+qAFVqd+IJn4ttxntswJ+WD7DraVjCkXcyr11GnNWKt9gDDanP7dSze22NQPfTzM87eCKSFbVLKcfCSFH97hKgu9J9fQtw4Gy/vqskPnxfv9cOJ7EIfnY9Uiu6oQZTzH9Bk6bs9uti/bdgtme/1u03Zd+OYOL0bZyL6yncFoe84dWP3BSK3AqO0M7P57u9W2BvaoaQ2ab4uXBm/73et06aI7dFpW/2M6YTut9Ltr96/aTdhiue9g2LebdvsqHQ5hefP9veUMrc5mlHzYvzc7Qxd3/AqGr62PCsr+XM/qW52O3clN6eNeWK7dys269oeh7QCgy073Gq0OO4P2qO2AI3D2ottCqSYoG1nNQfuqPcDxBdhzwGOj/rCTgneb/XZvkI7w1JtjOe+c7rVj3Oo8K8qjLHjKjI1Gu+97HXugYojObjsWDm5zKZPTM+Y8YDRU6cFCGsrCdMRVXwY4dblJ2w3HIDj/T/bNHDyuKlFFLNZ0++pTIegS5H3J5tE+JaD9iAv5/B37ZMOC4nvGhbcFZd+xbhNGLRs+bvOOcBHQOj/zIWZiWeQf9kjni0AjsYJgn277TMYijAgNgpRzScq55JXHpjQOJBFKjgR+JAk6nfgRqZivjwDgPQ1iDAIC0upI1SSpe4rtzemjkkryIsorSqJaNVEjHpaMlyT1Y7G+AuwYN+YRlAc2BNeimoOEGA+27KkI36TBStkzDZKOwlqF4VI9H38Pk356ABVC2Rz+YOvw2euUXRopYrbO2aahRzZpuW07A5epJ6UnfJE+Ic9EUNr3VPYKFfqqR+8Ymkqe0mI/LUBIeSSOENlCsHufx1GSc7B0gGavEF50QBRM8kpdw9dHpDnjPGLgEcIeITlRARgjU8HnmSZ998H/khMplkTO/J2My5PDTbV6UjlrNOqV49P62Vn9JBd27UoVAIT9HYKfRYc+EZzUmrdzrH2/fNc4WiqMO8x5UzmtnTbMmlk/qTUapYZpnlUajepJ7ax2XK3XwVUqUbHG8+GsqUXBogUPI01FVdMsqPziCdZC36/O22XoFzL3j/LvR/n3o/z7B5d/CY9pAnxZTSe5pMGzsjeHoM8iKIWiAiAoVSviNLiEFF4AoGTfe5rYgAbGAZv/9FKCw9jE0TePoevfORBN8mIVXOnkKEWLxbc37xhbCC5SNxwXVCVcjH0PMmDbB2/Kb/79x63tH9fhklzyOPT+c8c9KUpyq9cmuXQmTG34H2S76ionsfDlUhVTYyiUmDhUhcMN8N4KXMA/+0yNoJB6/i9dcyZnHOuyBY+UJ6icwah8Xynvy5d1gWYgGnHPRKTAxAIIzlhp767Py+XVDHStz1cL4My1qhOFT8dJc4hrOrKq34OdAZ/QYKbNbwMfzBjBhRAqBMKnUE0zgjmgrR+pwiZtTjbq6mbdLNSEok9oyTIj0zOTclGoRwsXalrrKlZHykU5fehNdLKXY+FjVVMyQlX8GN2e3YdH7NCFt7PddfBNQXPJPkCYj3iqJYGoAKmnTgkZG+nLTbL+dj1QiYUXoZ/9Jmrrnn27glcWzOJS03yqkDS3ysT80+3tTuWLwv1ZXQtm81sloLm5NDt1XFI5ZAXCdheSf/dz90/3/uZu17FSIdzvWhJA2y2LuuNTrkKSXNqu+jGakV48BiLC9NhPIk6o6kDgQ/r3TLVdE46xwO4scXGun/NDslELBETmHFzPEZDaKQWP8XZBfyo1SORDUI2w9OXERIngZj48PBxN6BweDHoEBtENAJJBt4SySS52kpnSzmaPT6J0t8/VuCzYFDpNQFlOFEVl1IrkoA9bOTKPTH1TIzmnYc7Q13hpy2Npwkv2KMuLgPqqnFPwVgll3Rj3lSSR9pQltAWB1exzY6xWYxqxoQjWa5z+gj9rIXlmZKWJ1Zgx6um80DfDaOq35nCAiNIeeL99RFLWOyyI9UJ+VfY2R8S9rov9wDj5o8UcDgOzgj7AJP5/bvwB/zAVlXcUv6j5lRHQ8C5Wj5Gh9eKlpzG6JyuHtzlCnWxDSuEyh/LnCyVABvwzC3+BBX0aiUP1o8v6L6yxhBk= +api: eJztWVtz4jYU/isavaQXb3CyZJMwnc444LR0WUOxSdpJGUbYIqhrJCrJyVKG/945krENOLtJZx962X1ZJB2d+/l0jrPGmtwr3LrD16l45CKhXa404THFYweLJZVEM8G7CW5hRYmM55MmdrCkf2RU6SuRrHBrjROqYsmWQIlbODR0aJYzfMVyjgo7OBZcU67hElkuUxYb9o3fFdxcYxXP6YLAL71aUtzCYvo7jTV28FKCMppRBaczlmoqP033nq4qRIxrek8ldvBMyAXRdutNE2/MvZgqtbX+7UsvduiMcQbGPP+q0kTqDtG0Qq+0ZPweTilPnjwD3wYQrOSjxwFZ1N9nPGYJ5fr5ulqSfVagZbaA9BkF4cBvd6+7fgc7eDDst/0wxA4OR1eTcuXf+EE02d0LI28YTcwJdnA3iPzhO7/T9SJ/0vai9o/1R9GPw/5tcXTVHwUdb/hrseEHneJ36A9vum1/EnnhW+zgod/2uzfFchT6w+3vd14w8nrbVf6f/0u7Nwrhxg9e5N96vxpVDvcG3tDr9fxeZcuae+WFfqeyG/o/j/yg7U+ue/1bkDrqRd1JNwgjD3av+h2ganu93sRrR92bbgTrq1HYDfwwnAxHvUL5sD3sDqJiBVZvzQreBv3bAI9tntXlURk8I8YHof13g54fmRiCs7uBB4txJWUqfKZCpJRwkx6UE65r0xFOmU5h63qbtluMAeXYn/STOfj61JAaYPFmu6VPpCQr7GCm6UIdQgLIV0Lq5984BBua1teZkMmOKoeODdvYwR0/bONx1REhKLSp7vycUbmq8w/9QBbL1Gripekh3A6pziRXiKRpgbmowFz0VUJnJEs1koYOpUxpBE5HTKET9+tj7OAHkmYQBFDIskOnLircUy9vQT4YqjwvVJVRHtVTFziCsWi6QoUf6/nV6A5xowkCesoTxu+BzVEOjEc78kyE74pgFehZBMlGYWPCcG2ej7+nk316aGJU2Rp/tGN8+TqVRaNlRjcV2YQnaJuWu7JL5Ur2yHnCF8UT8kwNnENPla9Qra8G5J6CqPwprffTktxbj2QKNFtK+sBEpvKcowk6ArE3oJ46QkZN9JUpw6+PUXsuhKKIcEQ/MKWBwXu6QjMpFiUnW/tUIS2Qliuk52wv46rgcHd6enZyfnl5cfL6zcX5+cVZJezWlSYAoPZnCH4ZHfJEcAppyZ5Zh375rHH0TBj3kPPu5E3zzaXbdC/OmpeXzqXrnp9cXp6eNc+br08vLsYb8w+aPLUUXFnoOXXdmk4vi6H3+Xx93T4ivxCpv7R7X9q9L+3eP7jdy3HLAt7LejgtNEmflb0VDYZUZalWNYoAVbMO0zpEE8T4A0lZ8jSwLaWYpnTx7UsBDmKTqU+aYfvdBVUqf6FqSjo3pe6wvnqrjvGlFLJww+uaLkTIKUsSynd98E3jm3+/uc1DcwOh0bXIePKfM/esLsm9QRdV0hlRc+F/kO1miowzyfTKNE9TSiSVr0zjcDfeOGscC/GeUbMaOy/4srWgei6gD1sKZTxB9By3cOPhpHFI37ANGQZt5AOVyiiTyRS38Np6d9NqNNZzofSmtV4KqTemL5SMTPNhEM5sZM18h1s4FTFJ51b8ruLRnCI44GRBkZghPacIcsBKPzaNTTGMbNlduBduLScgfYJLmRkln7nWy1o+lriW08Z2rTZSIdBZo7fRKV+OJYOuxsHcND+4P/CHXuS/Cv0w7PYDeFNAXH5v4+xEvOCSq2gUMk+dIcJb6uttsv50G5nEgkIYlt9AfTuj73bsRoJb32q6TzWS7k6bWH26k/2talN4uGt7wXJ/pwV0t0Wz18flnUPZIOxOHdV3v1J/dtZ396eMtQnh4ZSSK7Q7opganwkTkrxo++bjM0WDbJqyGNLjMIkEImYCQSTW7IGaMSsWEAuYxnIXV+Y3xtGW7UxItBCcaQEKmZtaigyqay6EtkoCHpLY1IUtTkgU1Wo0Hh8fj2OyyHhCjmOxADekLKZcmSTIc7GX7zh7lxMRq+I2E2bdkHRGJeUxbeSMVAO4AjhYY0+O3WPXVqrSC8Irgj6GSzseKxJe0w+6sUwJM+2cUW+dQ9YdfjjJE+mAWQ5bYydHnzu8Xk+JoiOZbjaw/Qd8xgLwLMHKAiueU5LYvLCVgdv2rXkVgUbFzHs4PgIo2xteHNOl/ijtuALEg34I88A0/yPFQiRwR5JH+AMGecQt/Bv+DUMqGu8YfDH7a5wSfp+ZxwhbvlD0JAP3lO3wLkYYy7agxFcVLb+7MgQoEu8p/x47uTUaluYjy+YvhYB+Tg== sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -36,7 +36,7 @@ Search flownode-instances ## Request -

    Body

    required
    +

    Body

    Search flownode-instances diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-5.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-5.api.mdx index 6fc8223eeaa..56935b6c5ee 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-5.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-5.api.mdx @@ -5,9 +5,9 @@ description: "Search decision requirements" sidebar_label: "Search decision requirements" hide_title: true hide_table_of_contents: true -api: eJztWFlv4zYQ/isEX9JuHR/ZOM0GRQFnkwDZFkkau+1DNihoaWxzI4laknLWNfzfO0Pqsi3nWPSlBwLEFjkz/DjHNyMvuRVTw0/u+BkE0kiV3MLnTGqIIbGG37e4SkELixuXIT/hBoQOZn/0eYtrFARjT1W44CdLHoIJtExJEuWGTo7BFxGnERgUD1Ri0SaJijSNZOCMdj4Zkl9yE8wgFvTNLlJAC2r8CQKLiqkmCFaiFdydyMiCfl5OhjUZY7VMpnzV4g+wqK1LhDRFay0+UToW1i8dHZJk2OCPy2ajiYihcWMO2kh/vydPfHtA0hqMynQAV7vMof9EYhtB0K60ES01BhL3jfwTXojER3kwWfe00FosUF5aiM12BAiDUdq+XGM7thA1e1jpcA1KvtPikGQxZe9g+B6fzs7x477ujCEBWtVXfslAL3b5qMxXPGoQRdt5jYusyAyma9rsmxAmIossIz8zaViv+y1CmosoI7cThCHtqAnq2UwnELJIGruzdpqPEUmY6zttMucO7NfOykPdd2fmEXnlERRIJkwASYiOZuMFc0leP8MZviujxnOBPFQuIqv7AoI3mmfgy7G0PBBSWEfSepUjWs+idZmzATcV06+Du+W3NbRktkLb/piMFJuCZXYGLIEv1gm0WKDShVtzlyGFPTL+Gz2ZPYa1qnClKtQ9L4j23AVMigAnOcsyM1NZFDIsc4RORmuK66Ltr3KfD/YGcdzxy2SuZADsNDMyAWNYUXfUEA4O+r3v37077r09Oj7q97u+cC8cv1c5uB2AXAJdm9NrJYxrO2i7dqmqg5T03Cvpsd9tuOxOm5u57ogmTwhUtDoDz+spXtmzykG325BTWRCge/6+LrlJuK8k4v+b53pjyDubr73XdUSrrIhe5LUailsw2EnMDjAkediURmfCCiQGzHQZ7s4lDPU4gvi71+aUscJm5oXhiDGdc/rcioRM0FQS7AiTW3gqROdaK1264W0DQSg9lmEIyboP3nTe/POve7h93Stl2YXKkvBfd91+U5IPbi5ZLZ0ZOIX/QLa7uTzItLQL153G2GtB7ztSvrtftZboAvUgwT3dt14xtnC6gZ0p6nWpMs4Zws7wqTPvdUIddnxf54RAEy87AJlGYuNL79HVSaeznKHy6mSZIleuXMvVUozzcZr2fDTdmIyakQpENPPnrYMd4YRCG9QbaPKhiYXi7k9vk+fScq4tzB13j7uNlkh0h5UqGyo7M2vTRjteuNHSyg8/PjpDkvOXLiJSdYxU/gTUMnzb49c357eD0fn+8Hw4vLy+ol5Cx+V6iLAe5dJKDtEBomcvxAvpiyJBP/w+cslEyX9bva6f+7ec9WFIhvX3Knded3cjryTzi5TPZdvubjblSqbqxbVa8ANYd3OIXDrXbo5kpamNEYz8N1HOVXkBXbvfL4DdZGMkBQrbdnAVE24Aww8r5+AGykCRjyy+pSExuM2ikukbK8wiGbBYJdIqAuQ0rVYZZf1MKetBEjehaYLli4YCaLBiHh8f24GIkbxFGw8kNyBIwGGRZHPX/pyvtDaUQxWYUlsq99zRMAENiLKTGzKdtWGK99rddtdXkLGxSGoHPcMRa04rc9HiK0snjQTm7SpHuMzp447Pe6SnQ15ElX5R8kxwx5fLsTDwq45WK1r+TC/mRF4VcXhi4zMQoc8Fn5f8vef6/RFBKEf77YmZSNFrDDC+qX1S9r7GgjfXwxGVU/7bVqxC0tHikX73wv8n/CP+Ufo5d7had+tLHolkmrlmwL1dKkCRkT9Kn23Uq7tZQRDJoobyh1MnwEbqAZIfi7rEoRIf/RvHX2u/yG0= +api: eJztWG1v2zYQ/ivEfcnWqZad1l0qDAPcNgWyDU0We9uHJBho6mxzlUiVpJx6hv77cKQsy7bcJsW+7OWbRN4dH97LcyetwfG5heQG3qCQVmp1jR9KaTBH5SzcRaALNNxJrS5SSMAiN2Lx+xAiMPihROte6XQFyRpStMLIgiQhgbGXY/iR50WGFiIQWjlUjkR5UWRSeKPxH5bk12DFAnNOT25VICSgp3+gcBBBYQiCk2hpdyYzh+bzcjJtyVhnpJpDFcF7XLXWpXI4RwMRzLTJuQtLL56TZNrhj4tuo4rn2LmxRGNluN8nT3x2StIGrS6NwHfHzDlUXLlOELQrXUZLnYGsIrDyT3wgkhDl0WzX09wYvoIIpMPcHkaAMFht3MM1DmOLWbeHtUl3oNQ7EaAqc8re0fg1RPDmfPwa7trOGBOgqr3yc4lmdcxHTb4maxhl2WFej7KMbTKDmZY2+yrFGS8zx8jPTFo26H8NESx5VpLbCcKYdvSMGXSlUZiyTFp3tHa6j+EqrfW9NpnzBw5bZ9WhHvoz64g88ggKJONWoEqlmrPpivkkb5/hDd80UYNaoA6Vj0h1t4EQjNYZ+HAsUQBCCrtIokc5IvosWp85e3ALPv8yuAd+20FLZrdoe7dqotkcHXMLZAo/Oi8QMaGLlV/zlyGFEzL+K73ZEyaV0+ykVagnQbB3q/wFbIFCzmqWZXahyyxlOXdi4Y22FHdFe1/kvhDsPeK4gQu11FIge1VaqdBatqk7aginp8PBty9fng2evTh7MRz2Q+G+9fy+zcHDANQS0xWr6XUrPF2xI7TdutS2gzT0PGjocdjvuOxRm/u5XnmmMWgLrWxgkdN+vyOHSiHQ/o1dcZ9gH0m8/zfL3UZQd7JQa4/rgE47nj3Iay0U12jLzNkjYEjyeVcaveGOM6mWPJPp8VwqjJ5mmH/z2JyyjrvSPjAcOVpb0+VBJKSyjitxJEx+4VMhOjdGm8YNzzoIQZupTFNUuz54Ej/551/3+eF132nH3upSpf+66w67knx0dcFa6czQK/wHst3P4aI00q18N5oiN2ieelK+uauiNQit30v0b3fRI8YUoBu4habeVmjrncHdAhKIl4M4NWkc+jgQAkO87AGUJoME1sGjVRLH64W2rkrWhTau8i3WSD6tx2faC9H0YzEkkGnBs0U4bxfsZIGMNqg30KRDEwrFPZzeI88VzRy7MXfWP+t3WiLRI1a22bC1s3Cu6LQThDstVWHYCdEZk1y49CYi245RyB+RWkZoe3B5dX49mpw/HZ+PxxeX76iX0HG1XhXtRLmxUkP0gOg9CMFG+u0mQX/4beKTiZL/evt5fh6+anaHH5m2v6P8ef3jjXwrWV+keW/adn+/KW9ltr24VQth4OrvD41r79r9EawxtTdykf9m2ruqLqBL/78C2VU5zaSgsB0GVzPuBzDGhZNL9AOk0OQjhykrjPabm0qmJ7YxO9OG5VpJpwmQ13RGl5T1C61dAEncxIXP11A0FECbxPH9/X1P8LxUKe8JnZMbMilQWR+c2rU/1SvRnnKqhW20pfbvscEZGlQC49qQjXeGKRj0+r1+qCDrcq5aB32GI3ac1uSiw48uLjIuFRn1CNc1fdzAckB6JoVNVOkPUmCCG1ivp9ziLyarKlr+QB/iRF5b4gjEBgvkaciFkJfwOnD90wlBaEb5w4mZSDFojITAwn1S9q7FgleX4wmVU/0vK9cp6Rh+T/+5+D0kcAu3QOnn3eFr3a+vIeNqXvpmAMEuFSAvyR+Nz/bq1d9sQxBq1UL53SsvwCb6ParvN3UJjl7DF8ZfAH7Cog== sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -33,7 +33,7 @@ Search decision requirements ## Request -

    Body

    required
    +

    Body

    Search examples diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-6.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-6.api.mdx index 84f7b4a4691..9635ddbad3d 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-6.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-6.api.mdx @@ -5,9 +5,9 @@ description: "Search decision instances" sidebar_label: "Search decision instances" hide_title: true hide_table_of_contents: true -api: eJztWW1vGjkQ/isrf+ldj8CSllwanU6iCZG4Rkku0PYkiiqza4Kvi721vUk4xH+/GXvfWBZK2n64lyhSYO3xzHhm/MzjZUkMvdXkZETOWMA1l6IvtKEiYGTcIDJmihocDMkJ0YyqYPbxiDSIYp8Tps1rGS7IyZKETAeKxygJcgMr57EHOo8jpkE8kMIwYVCUxnHEA6u09adG+SXRwYzNKX4zi5iBBjn5kwUGFsYKXTActMDslEeGqS/L8bAko43i4pasGuQTW5TGObh0C9oaZCrVnBo3dPQSJSEChm3qaBAmkjkG67zbv+idwUDvXffibXdov7+9fHN59f7Sfhtc9077530YH4M+dkejxO75rFbxmsg55VGi6qVgnwHT+oxNueAo/GbvPaVLs/TuvzDMCqM+qtl04dMXBC/pvH5zmcA7pjR3lbHTvReH5VVDK7o9ZWeQkEH/6vLjsPv6ogcTF/1h76Z78bH3x/VNb4BTlcwVGcUcKqaTyOzKHQv7Ik6MLslQpegCFHHD5vqrC1dsixjarZuBKcNNhEPVY21dtBK511eJ+We4DUFOIralfOyUCNnDXnWxIwJuu06ECSpMrcEdCixC8L/YngXqYLM7XYeuL4QY7WupzNcnZcpZVB9JqcI1VzaOSndwCk9nPfgYlwMxQIdW5ZHfE6YWdfHJwR/MdKNos0nAoJcdXY+nK7X3Q8imFM6ZhwH2uPba/o+kqBi0PcAZOfUUM4kSLPQirs3WLlRjg4owXWyXoi5rrVMylCa4Yw2meXiMfsydR3XARAix9SYLbw3+yoas9lGeMFIRTLNlk7IaZ/44I2kR7ulYw3mF0vVuNR4VmsberttKqvge09uv8H1rRNdcR92F680PYii9W2Y8M2OeYA/GCjS8QMYLO2Z3hgueoZF3+KSfgW0jYaQ4vc+cIOizu9Ax2J+mXMbTM5lEoQdnH/xHpaWF66LNb4qlK4MKqoxIX9xJHjDvNKJa56Zg3eFhp/3zq1fH7RdHx0edju9O9LllUkWlbmYilSiFOW3LxaLSHEBoaVMFT9to6e0cOzv+jk1bfdXKt8iDzJMrBsJGJcz15VgK7aDm0PdriioJkPl8Px5aReDv0y6f6OkTPX2ip0/09NvpacodXSN7HOc00tBor5NX8uDG1p6ucQSlXtZhMgANhQ4LoebhdmCGPE4iNv/psQCNAJnoPU/oHGAnJSMbWc34R+2kqT3S5cD0lJIqD8OLmi4r1YSHIRPrMXjeev7v3+7Lze1eSuOdy0SE/7ntduqKvHvd90rl7DG74H9Q7fbWGySKm4WldxMgq0wdWIYzgm61hBDIT5zZp3Fj30sAQffNTCJTjKW2kaBmBk+tu3Yrkz/I5VuOJBP0Rt1B27bOJAoAjixddFcnrdZyBrpWJ8sYMHNleazidJJeXnHOZdbeS2FlJAMazZz5dceHQPtxAjsLXifwGoA14Kw3Lc3J75KZumP/2K/VhKJbtBSVUeiZGRPX6nHCtZpW7ibhMjVAObfpLDtF54g5MrKsaZKra+Amw97BIGcmHM2l68DDcsZzLamL1iF8dkIkkz7PivW390NbWHgQboo3zT33TmH9hsHDMpuy9vycIRdEuEpxSwRsk9kWk/WE1q+nq/46GS201HPQzXlHPTfHc8bpV/nkJm3MeODGBgv6N6pGTVTspgQoP+PjOjb2WC0Fe6qMONLko5WC8JSAx10X/eqVd2lrt3qBzFVXLo9YoFNpazFFqyv72wbzrpMJIDCei83TIz1qr47wYfgds1ffQGIRQiS8tAZKb3648DK1gLzeXEK2JTpkVxolE4SVmZTGOYmNAFSjWw6V8IRogKT7+/tmQOfQKWkTDGIYwEkG11yUTSN9kY40KotDGeh8NZf2uaXYlCkGXrZSRbpluW5WWqTd9Ju+gyht5lSUDO0C5LWI5SfdsAfTiiMKqLBK3VumWD0id+1SbR+UlaV4DYl1sDsiy+WEavZWRasVDn/G943YNQqUdh2FzBgNXV04ECCnrske2INSVOPGvR+7kVvRhVzHZqfsuNSBrq8GQ8Su9DewuQxxjaL3WNjw/4R8gD8sRRsdC6x2fEkiKm4T24WJ04toRxMMTx7CCjjanWVoLBYlL395bQW8ofzExK8ZCAKTh0f33uRv3C2Gpg== +api: eJztWW1PGzkQ/ivWfOGutySBFo5Gp5NSCFKuCDiStidBVJndCfF1Y29tL5CL8t9PY+9bNgsNbT/cC98Se94883jmcbIAy28MdC/hCENhhJIDaSyXIcI4AJWg5pYWI+iCQa7D6cd9CEDj5xSNfaOiOXQXEKEJtUhIErowdHIM7/ksidFAAKGSFqUlUZ4ksQid0fafhuQXYMIpzjh9svMEoQvq+k8MLQSQaArBCjS0OxGxRf1lORFVZIzVQt7AMoBPOK+sC2nxBjUEMFF6xq1f2n9FksZyi+s2AkCZzihZx73BSf8IAui/7528643c53enb0/PPpy6T8Pz/uHgeNA/gvEyALzlcerOfNRoeEXkmIs41c1SiVYhGnOEEyEFCb/d+EyZal7ezRWjHBjNWc23y5i+IHjKZ82HywXeozbCI+PR8F7uVrVGTvThkh31DwfDwdnpx1HvzUkfAjgZjPoXvZOP/T/OL/pD2qpVrqwo1VCjSWP7WO0wGsgktaYiw7XmcwhAWJyZrwaufChj5LdpZxmAFTampfq1diE6iSLqs9T+M8IOQKcxPgAftyUjvN8IF49kwB/Xi6Dk0jY6fMSA6xDiL9wQoL5t9iarresLKSb/Rmn79UWZCIybM6l0tBLK2lXpDQ8hgKP+8BDG1UQMKaBldeX3FPW8KT9F8+8uoBfH60OiF8csv7pMZJqG/RDhhKexZZRgJgzb6fwIJWLI95B21IRptKmWGLFYGPvgFGrwwWWUKTtVsuW87VUcZQXecw6zOjzFPtWOcROijIS8YddzttL+qo6c9cuiYFATzKrlirIc5/F4JxkINwws8FGRdHNYwZNSE2wcukNSLfaE33xF7A9mdCV0sl2G3rqSI8Vu0DI7RSbx3jqBgIUqmbs1dzJS2CIn7+mb2WJCWsW2Krd3ywu2rqQ7hUkwFJOMyzAzVWkcsRm34dQZrSiuira+KZceBrWucgkDeatEiOww5sYUriCA3d29nZ9fvz7Yebl/sL+31/E3+tgxqRKp65XIJCppzsZyqVTZG0TVQ5U8bW2k7xS9c6/zyKGdvTryl671aDSJksa3lt1OpwFEaUhM5/vxznrH/T7j8ZmOPtPRZzr6TEe/nY5mXNEPrqdxTKssjze6eZUILhz2TEMgJPWqqScfccuZkLc8FtHDjTnR6jrG2U9PbdDUIFOz4Q2doTEZ+Viras43Gjdt45WuJqavtdJFGl42TFWlr0UUoVzNwYv2i3//cV+tH/dUWXasUhn954671wTy3vmAVeDM0Cn8D9DuXrlhqoWdOzp3jVyj3nYM53K8DBYQKvVJoPs2DjYl/UDh26kiZpgo4zLB7RS60L7daefy24V825NioGj0LWrjgkl1DF1Y+Owuu+32YqqMXXYXidJ26XirFvw6e6zSnq+se4dCF2IV8njq3a8GPpoiow2aLPR8INpPGPDeW47mFG/H3NxB56DTaIlEH7BSIqO0M7U2abTjhRstLf3LwVdqSHL+0Hl1ysmRCGJk+dCEs/P+RW/U3x4WzESQu0xvGaxUvLCShegCou9eCHLp4xysv30YOWDRRbgof1nu+98QVl8UIqqyKeevUzDkkgjXKW6FgK0z23KzmdB2mulqZ5WMllaaOej6vqee6+sF4+zU+eQ6bcx54NoBS/p3Wc+arPnNCFBxx8dNbOypVkr2VFvxpKlDXkrCU2k8/nnYqT9xFw679QdjYbr2WCSATpTDYtatztx/GcjO0+tYhHQv1m+PYtw9HRkPrbhF99QNFYHQYsQyDFR+6RGS5WYnSrOZksIqCshpWq1SaitTpawPkgYBD11D8F2Jbojpttt3d3etkM9SGfFWqGaUhliEKI1Df5bpk2wlqClHKjSFtlDue1vjBDXKENuZIdN2XDeHFuy0Oq2Ob1HGzrisOHqsIa9krLjpFu9tO4m5kGTRhbfIevUl3O5UsL1dNZb163GQtd1LWCyuucF3Ol4uafkz/b5IU6Ps0n6iwBR55HHhmwAc+iG77S5Kica1dz9NI6/RC0NM7KOy48oEOj8bjqh3Zf95zVREOprfEbD5HXThCq6AoOiy4xqrW19AzOVN6qYweLvU7XhK6SlSWGuO7mR5N5bzSpS/vHECbKQ+ofw1b4Jg6av/neRvuEiA2w== sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -36,7 +36,7 @@ Search decision instances ## Request -

    Body

    required
    +

    Body

    Search examples diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-7.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-7.api.mdx index cec8aac2145..53311a23861 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-7.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search-7.api.mdx @@ -5,9 +5,9 @@ description: "Search decision definitions" sidebar_label: "Search decision definitions" hide_title: true hide_table_of_contents: true -api: eJztWFtv2zYU/isEX7J1ji0ncZoawwC3cYBsQ5LFXveQGgMt0TYbSVRJKqkn+L/vHFKSJVtO7WIv3YYAsUiey8dzlzJq2FzT/gO95L7QQsaXfCZiYeCJTlpUJlwxXFwHtE81Z8pf/Pmatqjin1KuzVsZLGk/owHXvhKJZevTkaUj/DOLkpBrIPdlbHhskJQlSSh8K7TzUSN9RrW/4BHDJ7NMOEiQ04/cN8CYKIRgBEiB05kIDVdfphNBhUYbJeI5XbXoI19W9gVAmoO0Fp1JFTHjts7PkDLIrXHdLChmEW88eOJKC3enF7WcnlS13IMxheIRWEjv0NhE+svB16ly3+y6QxPx+4PuBa5msWm8CZ4KE+JWQ8TBqRZ/8T3VuHAczOohwZRiS6AXhkd6O1QQgZbK7M+xHYQ8bHaSVEENSn7SojxOI0yywegdrC6H8DOpmmKEgFbVnd9SrpbNFirTChQNwnA7/WCTFE6Eh4JXk+9gwdLQEDQyEZp0ve8BzxMLU7Q56h/hiZwRxU2qYh6QUGizM8MbtbA4yNktM0qz+noVVbmbe1Zl7o3DNKAPCdM+jwOwMZkuiU3Kqgor96F0GM0Jci9ZZ6wmBQInNA++vaG0HA6krwNpHWSG1hfB2pjZQJuw+Veh3bJaDSxKXYNtf4jHksy5IWbBScw/G0vQIr5MlnbP3gUZjlD4e1zpIwJJKmFnnaFHjhDkWfw6AXyzvA8QvZBpGBDIb0COQiuMddL2V1nPuXqjYrieF3Cr8FLoBTCcnPS6r9+8ueienl+c93qeS9Mr23bWYbdt9JwC7Jl3gDUx7FW6SQX9upmVXaNbFsCe13CrmpzNMLblQ7maDcRGpdxu6AQ876rFiec1xEvq+1z/g016s4weWF7/793faO/OG6urAIc1ZCMNC/eyRwXDPdfQy3QjFKQ7a4r2S2YYFCdIQhHsDnmIyGnIox8ODX1tmEn1npaOIOvyCr7lRxGDqNhvPnQbL7lnqJRUpRlOG+qVVFMRBDyu2+BV59W3f92z7eveSEOuZBoH/7rr9pqCfHB3TSrhTLhl+A9Eu30p8FMlzNI2zin0e66Obe94mKxaGZhAPgpuV5PW/pMTxQuYhcQunEhtbcHMAladp26n4DiucHTcrEERkcLWYgGlCsoczZyFV/1OJ1uAtFU/S6Byrux0oASb5tM9njnv2rEdOEPps3DhANTBj2GIwQNsbziN4VCDceC0t9GSSTloF+IuvAuvURKS7pCyjo61nIUxSaMcR9woaeUGMuetEdK5SxceWvePRGC3LDo3vb0b3g/Gw+PRcDS6vr3BzoLqcj5AWPV6KSWHaAHh2hHRgvqqCNif/xjb4MJkuF9/5Bi6l6763CaC6kue1efV54/1aQ6+XJfThrd7llhT7xwhdnDfbGh7cS7wql2/knluEvU2x+bMOm5zNi1Vbcyl6J2ZtI7I0/XWflPi5C6dQgnCoNgOHUmYnUrhx4gnbqdpX6IHDLyTQhmyh0XdwCdSiIXSQyIJWSgRkOU0SqaYUwspjQOJlRBEIyyXkhgeGvLx+fm57bMIWgVrg0I0A4DkMEEjbe7EX/Od1gZzIH1dcgtp1x3FZ1xxQNnJBelObdqk3bbX9lx+ahOxuKLo5YpUs1kZ6Abe0TpJyIT9WGABZnmxeqBP3UowHNfF5QULnOvqzgPNsinT/HcVrla4/Qm/SWDpXJcpV1bpgrPAxYbLAvrOdZrjMWIq33m2XyuwJDuOAfg7MS/STipF+O52NMbkzb8/RjJAHsWe8dsk/O/TD/CH4Zi4+wGR3c9oyOJ5alsRdXIx3VmKBiqNuFEd7M2KchQvKyh/fGsJyFg+8vinogrAQAtL91r2Nx50W08= +api: eJztWFtv2zYU/ivEecnWKb6kdZsKwwC3SYFsQ5LFXveQGAMtHVtsJVIlqaSe4P8+HFKWZVtO42Iv3fYmkefynfuRSrB8biC8hTOMhBFKnuFMSGGFkjAJQOWoOb1cxBCCQa6j5M9XEIDGTwUa+0bFCwhLiNFEWuSOLYSRo2P4mWd5igYCiJS0KC2R8jxPReSEdj8Yoi/BRAlmnJ7sIkcIQU0/YGQhgFwTBCvQ0O1MpBb1l+lE3KAxVgs5h2UAH3HROBfS4hw1BDBTOuPWH718QZRx5Y2LdkGSZ9h6cY/aCG/To1qenzS13OCnQmjMUFqzR2Mb6S8Hm9PkvtxnQxvx+4Pssii5tK2W0K2wKR21ZNwyACP+wieq8ek4nG2mBNeaLyAAYTEzu6lCCIzS9ukcu0mIaXuQlI43oFQ3AaAsMiqy4egtBHB2PnoLk6YrRgRo2Tz5rUC9aPdQXVZhCcM03S2/YZqyVRBZXPMa9l2MM16klpGTmTCs3/seArjnaUE+J/0julEzptEWWmLMUmHs3gpv1cJlXLE7ZpLm9A0aqqowD5zKKhqHaaAYMm4ilLGQczZdMFeUTRVO7m0dMKgIqii5YCwnKwReaJV8T4YSeBxEvwkkOMgNwRfBupzZQpvz+Veh3fHaBliSugbbuZNjxeZomU2QSfxsHUHAIpUv3JmzhRiOSPh7ejNHTEir2FGjQo88YedOOvwmx0jMqjnATKKKNGYZt1HihDYYN0k7X+U9H+qtjuFnXoxO4ZkwCQRwcjLov3r9+rT//OXpy8Gg58v0nRs767TbdXpFMV2wagKsiacL1pgmDfTrYVZPjX7dAAe9Fqs25Gyn8dL1D40mV9L47nDS67XkRxFFaP7BobzdNg9sp//P6m90VleD1Ff8YQPYKsvTJ/mjgeEGTZFa0wqF6F60ZfsZt5wJec9TEe9P+VyraYrZD4emvrHcFuaJns7QmKpj78RRSGO5jNov/cFj4TnXWunaDc9b+pPSUxHHKDd98Kz77Ns398WuuZfKsneqkPG/ztxBW5IPry9YI50ZOob/QLa7j4Co0MIu3KCcIteoj93suJ0sgxIipT4KdG+T4OmbEpABNlE0dXNlnC+4TSCE7n2/u+I4bnB0/W4BhEjTaHGACp1CCKX38DLsdstEGbsMy1xpu3TbgBZ8Wm3zdOej69Z0CCFVEU8TD2AT/DhBRhc03mj7oiWG8sBr75An83qxXok77Z32WiUR6R4p6+xYy0mszVvleOJWSUu/gPlojYjOG72K0Hp+5IKm5Wpyw9X1+c1wfH48Oh+NLq4uabKQuopvGWxEvZZSQXSA6N0TwYr63Sphf/5j7JKLiuFm/VPj3H9kbe5pIm5+1Dl9vc39Y31bga/f622jt3+XWFPvXSH2cF9uaXt0L+g1p36j8vzm2dtek0sXuO1dtFa1tYdSdGbKBaIq1yv3DwnZdTFNRURJsZs6inG3lTIeWXGPbnuOFEXAYsxyrdzlqm/QE1uJnSnNMiWFVQTIcVqtCqqpRCnrQVIn5JGrBl+SlB4m7HYfHh46Ec8KGfNOpDJyQyoilMaFvgrir9VJsMUcq8jU3EK5967GGWqUEXYrQaa7sW1Cv9Pr9Hx9Gptx2VD0eEfa8Fmd6BY/226ecuF+DjiAZdWsbuG+30iG401xVcOaBFXfuYWynHKDv+t0uaTjT/QPglrnuk35tgoJ8tjnhq8CeOsnzfGYMNXfOLufFdSSPccwijC3j9JOGk34+mo0puKt/jdmKiYezR/oXyR/gBDu4A4oHXNvX1j68xJSLueFG0Xg5VK584IcVDtxqzs4y1btSC4aKH984wjYWH1E+dOqC4ClV/8Z9jeJZlWE sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -36,7 +36,7 @@ Search decision definitions ## Request -

    Body

    required
    +

    Body

    Search examples diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search.api.mdx index 4abf8ac4b93..0bb5e4e40e3 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/search.api.mdx @@ -5,9 +5,9 @@ description: "Search variables for process instances" sidebar_label: "Search variables for process instances" hide_title: true hide_table_of_contents: true -api: eJztWOtv2zYQ/1cIfnHbObGcpo8Ew4A0TYBsQ5PFWffBywdaom02EqmSlFNP8P++u6Msy7aSOMAwYA8EiE3y7niP3z3oknsxcfx4yD8Lq8Qolfy2y00urfDK6IuEH3MnhY2nvMut/FpI5z+YZM6PS55IF1uVIx1QDYiKzSoxDuhjo73UHmlFnqcqJpm9Lw4ZSu7iqcwEfvPzXIIIM/oiYw+MuUUNvAIpcDpWqZf2abo7OW8QKbh7AmxdPjY2Ez5svT3kC+KLpXMX2nmhY/nTzowuhgt3J9cikw1S563SEzyYibRoP/G20OAnmTROR8akUmg6llpof5G08OKp8ilu1bFEldUf8kl1Xx8QKYXwZLzubWGtmAO98jJz21HAe52xfneO7fjKtM0gwKFN1lSpTrpc6iJD0J4MTmH18Qw+bpsOGKBCi+bOL4W086Zf5DeR5WnQ4CRNtwF9LX1htWMiTVeoZi8SORZF6pmlY5Yq5xn6mCnH+tFLvgouXj+ovN8u+iBqSG5wVkE7iFDEOcGfCZ2wpZ93UfRe+SnrbCO9wzpHUfSuf3R08Obw3WG/f/S2Q4JlwoSLpU7AxWw0ZwTehk6rNGxLH74plNe4GNYh5pXMKq4UvgUF7kpMMLIP2qblN98wDrDbbtuaBR28rrPPXpyafM7IEGbGrINqfcaV6zDSDHdzK2fKFA4C6yC+7iX7W4zfyLohdxnEkXfXJB5EEXiJ8Iw1WFksD1ApJG243GgXgAyELZW5iFHjv64gb+b1M/P9/zq9XacrvARUPq/4euNFupOhjZuvA8gbCuDpYRt8PgovmNLgCpU8jCEIFcjJvnsuliCyvnBPqh8aVAZgEJP2gKgKJO3Roo3HQnFmrbG1G15vu+Hc2JFKEqnXffCq9+qfb+7htrmfjGfnptDJv87cN20gP7m6YA04M0kM/wG009gXF1b5OTWrEXQjafeoRg9vF90SXGDulKQVdKvHh35qzFXVZksdsfNk0k8NNsHcOHKL8FNY9Wb9Xs3cqx8aTtqZtI4UKiwUN14GDy+Oe71yCiIWx2UO9XJBPXr54gCT8SxEl6Y04ExNLNJpuHVd+ZupZHiA1R8nAA9rxEG4fZ8aUD1wLcW9j95HrZKQ9AEpK3Ss5Ey9z1vlBOJWSYswLoRoDZAuGL2M0Kpr5Aqb37Kx8curs+uTm7O9wdlgcHH5CfsJXlfxgYbNqNdSKhVJIWpvRMSX1OdLwP742w2BC5PhevVEPAsD9vr0RDdE7Z09avbtqFa+HvqrZrzaaPRgHIaaXbeRDWGWjjYHrZKcuTmj1bI3RlT02NiQc6oUuqQ3smRXxQjKAgZqO5yGCRq94MOrmaQZPjboFZy2t/IEvrGlWEykzGjlDSpEnN6aAnE+NcYHJbE6gWhUK6QJhsxBjtzf3+/HIoPyLfbhQnQDKClhTETayrE/VzvdDebExK7mVobWPSvH0krQslcJcj0akCBNg7H9/Wg/CjnjfCZ046Kdq8Sa+2ocepj9e3kqFE1WpGtZFZAhn/XXSsAyyvgrRqgFQ16WI+HkrzZdLHD7K74EsZyt+EKp41MpkoCNgFN+Gqr/3g0qsgLg1uyMZTJwnEC8c/8o7W2jGl5dDm4woapfVDKTII8V9/hrC/w/5r/DH8KRnELZTvslT4WeFNQeeJCLKSgK9MpqIF3PWLJsWSL0vKHl9x+IgN2YO6l/gINgjcclvdAWfwKwvivT +api: eJztWFtv2zYU/ivEeXHbqZGcppcYw4A0TYB0Q5PFWfeQ5oGWjm22FKmSlBNP0H8fDinLsq02CTAM2CVPEc+F5/KdC12B4zMLo2v4yI3gE4lwE4Eu0HAntDrLYAQWuUnnEIHBryVa91ZnSxhVkKFNjSiID0Yw9lxs0aixEEGqlUPliJcXhRSp1xl/tiRQgU3nmHP6zy0LhBHoyWdMHURQGLLACbREnQrp0NzP9wWXHSahHM7QQARTbXLuwtGrA6i9XIrWninruErx5wcL2lQXj2BXPMcOq3VGqBkRFlyW/RRnSpVyh1mHOtFaIleejIord5b1yBJVOElHbS7JZPEH3mvui33P6lN4NN2MNjeGLyEC4TC3u1mge6027uESu/lF2edQBNpkG6Y0lAhQlTmB9mh8DBG8Oxkfw003AGMyqO6e/FqiWXbjgnc8L2Sw4EjKXUBfoiuNsoxLuUY1e5LhlJfSMePJTArrGMWYCcuGyVNYJ5euHzfR71e9n3Q0dySbpO0npOLUw59xlbFVnB9i6K1wczbYRfqADQ6T5PXw8HD/5cHrg+Hw8NXAK8aMcZuiyoSascmSefB2bFqXYV/5wLZSaHFx3aYYGp1NXn36ap+4Cz6jzH7TN4V3ruPcVJt+3zY8GNB1gz325FgXS+YdYXrKBmTWR/qyA+Yto9PC4ELo0jKDtpTOPmV/i/NbVXcNNudSQrShcT9Jbmr/R03YFlrZANz9JOnpxGVKFv51DXi7jh9Z3//35d2+3OAjoPBxzdZpx+WDHO3cfBlA3TGAqAd98HnHHWdCLbgU2bcxVBg9kZj/8FgsWcddae81PwykHK3ls/6EiAYk/dnyB99LxYkx2rRheLEbhlNtJiLLUG3G4Fn87J/v7sGuux+0Y6e6VNm/zt2XfSA/ujhjHTgz9AL/AbT7NS8tjXBLP5wmyA2a575HX9/UUQWp1l8E+q+b6J4l3w/ipmuzlY00eXJ0c01Dr9DWh4W7OYwgXgzjVjhuHxYWzQKN9QaVRsIIqhDhehTH1VxbV4+qQhtX+5m8emGMKiBayK7fymAEUqdczsOtm8ZfzZERgbo/TXw3R0Y4CLfv+QHULlgrdW+SN0mvJmL9hpY1OtZ65s4VvXoCc6+mOqwHIVtj4gtOrzK0nhqFoOG3GmxwfnFyeXR18nx8Mh6fnX+geULXNXJ1tJH1VktjojfIjzfPBCvu0xVg3/9+5cFFxXC5fhKehIV6c1vyNyT9kz3pzu2kNb5d8pthvD7ozGBnSuxO3U41hN052V6sKh/M7Z2s1b21klLEptoHpymhc/8mRnZRTqRIKVG76dSM+9WL8dSJBfqdPdUUFdqud+qECcVWaqmQcq2E02SQl3RGl4TzudYuGEndiaceoaFMKGV2FMe3t7d7Kc9LlfG9VOcUBilSVNanownsL81JtCWc6dS20kL779jgFA2qFONGkY39goTGBmeHe8leEmrGupyrzkUP7hIb4Wtx6PDOxYXkwm9W3taqaSDXsBhutIBVlulXi9ALrqGqJtzib0bWNR1/pZcftbO1XGh1MEeeBWwEnMJx6P7Pr8iQNQB3dmdqk0HiKE2xcN/lvel0w4vz8RUVVPMLSq4zkjH8ln5d4bcwgk/wCQiOPii+2v15BZKrWenHAwS9VIK8pKisF9LNivWerVqEWnas/PGtZ2BX+guqnyBqvHH06V9k9Z/w5iYI sidebar_class_name: "post api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- @@ -33,7 +33,7 @@ Search variables for process instances ## Request -

    Body

    required
    +

    Body

    Search variables diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx index 6d1356d49e5..b378b45273b 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/sequence-flows-by-key.api.mdx @@ -5,9 +5,9 @@ description: "Get sequence flows of process instance by key" sidebar_label: "Get sequence flows of process instance by key" hide_title: true hide_table_of_contents: true -api: eJzlVktv2zgQ/isET93WsZw2WwTGokAKOIXbogliFz0EOdDS2GYjkSo5ctYQ9N87Q0qOHyp297hoDhE5nPd8M+Naolp5Ob6Xt86m4P3UeFQmBfkwkLYEp1BbM83kWHr4UQG9XOf2yb/ffoKtHMhSOVUAgmMdtTR0IdbH8KYNHUuFazpn4FOnS1ZGRJIVdinKaFLozuZAOjKiHZA9dBUMpE/XUCg5riVuS1atDcIKHLEurSsURtLbC9k0DyzuS2s8eJZ4PRrx59D0rErZJsmnllQZZJaXyUv+nBhTzqkQCULh9+genTYrMkl/A3nRZ2dqNirXmeCAwOOhPVWWuU5DahNKwiKH4tV3z3I9TtjFd0hZAXFSQVDH6ChlWPl/zMyb15JcLChktYKeELhMbfb7HiPhNHB60ZgzaeKcdbLLxJvTTFxbt9BZBubf5fx/Fe7Fabh3seDApfe2cikIY1EsbWWy3wMFf/b1w9XtVOwFLCAI/Ab5IKKHtHIat2FELkA5cGc8Isf3D82gphTYRw3h9nA8KT8Aim7wiiVP3r7BKRZbEWcuzeK15Wm9gpAcnr5jmWzOk1bmrJPxSU0iTdJpPwvaJXvrNt08r1xO4nXMfjNOknptPTbjurQOG2LeKKcVVSqknN9i5ZeqyjnVuU1VHsjHgc3XIPiBNwaHhHRnjETrQ84y2zhUdzm6HPVqYtZfaHlGzrOeNWLZqycy92oK+6Wr5Iz5YtBd9Z6XRqnjbmyX4c3t5O5qPjmbTWaz6c2XbjG2cuThPiJ2WloXg0N8j0yy477uwPzx2zwAT5ulDeItAG/C7gZxWy2oqTiU04CtUGEZ0gf1BoQymUhtUebA4+sYZHwSnVpqJlFYo9Ey9IMkOlsxEtbWIrdD7G1SzW5FIHFQnlD09PQ0TFVBE1ENySAngZwEWtzM2+btc0sZHAlnNvU7aW3DPXGwBMcwTlpFPmGtDOQY7PlwNBxFVHkslNkz9F977CCLu4Ih/I1JmSsqbtO6XLf9dy8353FSHXYg0cZR5VETEtJiL93Lul4oD19d3jRMJjYXBsdz64VGzbTnM3X+UuUeTpzcjVn54q79ifWH+PXPsN6YOnybbWj8vOIbHQNww/+G5plcg8oIqexVfLkikJW4J8Orn7tpN6w+TOb0rCpO1i6hR4gPCntd+Ot9YBBz+wjm3c4h5Cu71DQ/ARnwu5g= +api: eJzlVt1v2zYQ/1eIe9o6xnLabCiEYUAKOIXXYQmSFH0I/EBTZ5uNRCrkyZkh6H8fjpSc2Fax7XGoX2R+3O/ufvfFFkitA+QPcOOdxhDmNpCyGmEhwdXoFRln5wXkEPCpQavxqnTP4cPuE+5AQq28qpDQM0YLVlUIOTzGM2Mhh1rRBiQUGLQ3NYNBDp9wJ9xK1EmlMINOCR6fGuOxgJx8gxKC3mClIG+BdjVDG0u4Rg8SVs5XitLWLxfQdQsWD7WzAQNLvJ1O+XOo+q7RrBMkaGcJLfGVN9kb/pwoU96r6AlhFV7tB/LGrqHjn4SLMT1zu1WlKQQ7hIEO9am6Lo2O1Ga1d8sSq5++BpYbMcItv6JmgNpzQMgk7wIpasI/MvPuLXQSKgxBrXHEBQ5Tz/7YYdo4dVwCGSp5a+a98zAw8e6UiSvnl6Yo0P47zv9X7l6cunubAo4c+uAar1FYR2LlGlt8H1nw81g9XN7MxSuHBUaB74CPTkJA3XhDu9gil6g8+jNukfnDopMtaOceDcbV4rhTfkQSQ+MVK+68Y41TLHci9dwKaeO4W68xksPdN4dse571MmeDTMjaR9x12YB+FtGBrfXboZ83voQc2sR+l2dZu3GBurytnacOJGyVN2pZJvL5LEV+pZqSqS6dVmXcPnbsfoOCD3hisEu0QcE5krRPmGXWcQj3fvp+OorEV7+B8pI5LzgbonoUJ10eRYrzZYjkHd9LTg/RexkatUmzsR+G1zez28v72dnd7O5ufv3nMBh7uU4eZMQepTcxGsTrdAmG21dDMv/+5T4mnrErF8X7BLyOsxvFTbMsjWZXTh12QsVhKJQms0WhbCG0q+oSuX0dJxn/EwPsynlROWvIcepHSfKu4UzYOEdcDqm2lY4xTInEToU8y56fnydaVY0t1ES7ikkojUYbIo89b3/0O/JIuHA67KWNi+vM4wo9p3HWA4WMUTmRk7Pnk+lkmrIqUKXsK0X/tcYOWNwHjPAvyupSGctaosltX38PsD1PneqwAkFCniCPinAh+1p6gLZdqoCffdl1vP3UoI+N46X0YqEWJvD/AvKVKgOeGLlvs/DDbf/E+lF8+xk26tOQ33YXC79seAUyvvbSm69bdBI2qAr00ap0cqk11vRKhkc/V9O+WX2c3YME1TBZe0KPMj4Cjprw64d4Qdy7R7S/7Q0iXrJJXfc3GfC7mA== sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx index e9df1fff308..434048b7599 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key-1.api.mdx @@ -5,9 +5,9 @@ description: "Get decision requirements as XML by key" sidebar_label: "Get decision requirements as XML by key" hide_title: true hide_table_of_contents: true -api: eJzlVm1v0zAQ/iuWP/FSmu4FhCqEtIluKhtsWotAqibkJtfWW2IH2+moovx37uwka9cg+Ir40vrl7rm3584puRNLy4cz/gFiaaVWN/CjkAYyUM7y2x7XORjh8GKc8CH/maWnmwvYfD/gPZ4LIzJwYAig5Ao3KHIPG7yTCpe5cCtcJ2BjI3MCwUPUZnrBktoeM9sGe7zeojFnCuhxG68gE3xYcrfJCV8qB0swKLrQJhMuHL055lV1S+o218qCJY3DwYD+du1PijgGS6ZijVDKeWz46SIMjtZ7Fq0zUi0Rv6p6/LgLc6zWIpWJjwWs28UWeZ7K2Ocwyo2ep5C9vLOk12FLz+8gJgCUxMw7GSKxTrjC/jELR4ccXcwwPLGEjgioLgil4u7LcLAfN95Il9LRyBht2kwc7WfiTJu5TBJQuzl4Eb3498M93g/3JhQcqPRWFyYGprRjC12o5P9gweuufji5HrOtgBl4hf8gH3hoIS6MdBs/E+cgDJhXNBOHs9uqV2IK9L0Ev7t9OhrPwXXPRSYs+/bpks03LIxXHLsrTQN5CT4tNGiHPFofRIlJohKFKj/OyB2zbiZ0YXDA8TKktxpGUbnS1lXDMtfGVSi8FkYKLIXPKd2F0i5EkVIuUx2L1B8/9Xy6AkYX9AbQdHe4JxIE631KI9nYhXs7eDvoRCLR36A8UuMRZ+Vc3okThDuR/GPRlGpCciHopjxtuUUuL3zK6+ft6np0czIdvZqMJpPx1efmqav10MPtkrcotYveIdoHId5InzVs/fh16pkl1UJ79ZphV/4VBnZdzLFrKJT9gDUT/mXDPyfXwIRKWKyzPAWaT9g5/rKhOq1YA4vdwjKtpNPEba/pjC6ICSutHfE9NC9Ck1uBSBSURRY9PDz0Y5HhyBN9NEhJQCcBX2GSrfN2WZ/0nignOratttR+HxlYgAH0MqqBbESoROQQ7EF/0B8EVlmXCbVl6O+baCd/ban8p0CeCixrVTtb1g0242v67sEWw99hAKEuQyqFZpnxspwLC19MWlV0jK+D8a3/2Fu+ExNpaY0dvBCphT1f2kHJn9UfZMlz9ocvp07/GxarjW/vtKAdLj09/W+FY4mvQCTIR3It3JwglXK3pdN+IVHjtOPnfDRFGVFQdtoMPiG3R+30492pF2BTfQ/qfeuVoy35VVW/APihoiw= +api: eJzlVm1v2zYQ/ivEfVo71nLarCiEYUCKuUWWdgliFy0QGANNnW02EqmSlBND4H8vjpSUONbQfR36SeLLPXf33AuvBS82DvIb+BOlcsroa/zWKIsVau9gycHUaIVXRp8XkMN9Vb7dX+D+nxPgUAsrKvRoCaAFLSqEHG5xDxyUhhxq4bfAoUAnraoJBHK4wD0za1Z0+ph9rJBDtywg97ZBDk5usRKQt+D3NeEr7XGDFjisja2ET1uvTyGEJYm72miHjiReTqf0OdQ/b6RER6qk0R61j9h477P7qqT/I43OW6U3EEIIHE7HMM/1TpSqiL6g84fYoq5LJSOHWW3NqsTq16+O5EZ0mdVXlARQW2Leq+SJ88I37ocsvHoJgUOFzokNjnhAcXFeaDl+mDaO/ebglS9pa2atsQMTr46ZeGfsShUF6kMOnmfP///unh67e50CjhR6ZxorkWnj2do0uvg5suC3sXo4uzpnjxxmGAV+Aj4CB4eyscrvY09cobBoX1BPzG+WgbcgjblVGFfLp63xPfrxvsiEY18+fmCrPUvttUK/NdSQNxhpoUabQ7Y7yQpbZO0t7kNsZ2SO3fUdurEl5NAmekOeZe3WOB/ytjbWB+CwE1aJVZnYpbMU2rVoSuKyNFKUcfup5YstMjqgN4C6u98ioyRI2idEI+k4hHszfTMdRaKr/4LykBoPOFvv61GcdHkUKT4WfajmdC853YdnCLeo1UWkvHveLq9m12eL2Yv5bD4/v/y7f+o6ucAPQj6gdCZGg2idLkF/+12frX99XsTMUnptoniXYZfxFUZ21axKJcmVY4cNE/FlY0J6tUMmdMGkqeoSqT/V1sTDPtXpj/Wwa2NZZbTyhnI7SnprGsqErTGe8j0Vr5AxhimRyCmXZ9nd3d1EiqrRhZhIUxEJpZKoXeSx4+1Dt8OfCBdGukFambjOLK7RopaYdUAuI1RK5OTsyWQ6maascr4S+pGi/15EB/wNoYqjQF0KpQk/Gtt2BXYDO5p7Cku9PU8gVGVL3hXLDbTtSjj8ZMsQaPtbgzaW/kNtxUoslKP/AvK1KB0e2TI0SvilG8iKZ+wHk9Oo/X0W630s77KhFfA4paVZLSwDhy2KAm00LZ2cSYm1fyQzTEhUOEP7eT9bAAfREDsDg0+SO6KO2vH723iBLcwt6j8Gqzwtya4QvgP4oaIs sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key.api.mdx b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key.api.mdx index a4d4935afc8..a00d32b6d33 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key.api.mdx +++ b/versioned_docs/version-8.6/apis-tools/operate-api/specifications/xml-by-key.api.mdx @@ -5,9 +5,9 @@ description: "Get process definition as XML by key" sidebar_label: "Get process definition as XML by key" hide_title: true hide_table_of_contents: true -api: eJzlVltv0zAU/iuWn7h0TccGQhFCGqJDg8GmtQikaQ9uctp6S+xgO92qKP+dc+wkXdtweUW8tL6c853bd45TcScWlsfX/NLoBKx9D3OppJNa8ZsB1wUYQZuzlMf8Ic/erT/Bmg94IYzIwYEh3Yor3KDAnb+TCpeFcEtcp2ATIwuPF3PUZXrOimCKpRtbA27gRykNoB1nShhwmywhFzyuuFsXBC6VgwUYFJ1rkwsXjl4d87q+IXVbaGXBksaL0Yj+to1PyoSson6iEUo5jw0PLsK4aL1n0Toj1QLx63rAj/swz9RKZDJl5DxYt40tiiKTiU9fhCHPMsif31rS67GlZ7eQEABKYtKdDJFYJ1xp/5iFoxccXcwxPLGAngioKAilkv7LcLAfN95Il9HR2Bhtukwc7WfiVJuZTFNQ2zl4Fj3798M93g/3KhQcqPRWlyYBprRjc12q9P9gwcu+fji5PGOPAmbgFf6DfOChhaQ00q39QJyBMGAOaCDG1zf1oMIU6DsJfnezOxc/gOsZikxY9v3zOZutWRisOHCXmgbxAnxOaMTGPFodRo3ywUbZRhUq1X62kW9m1c7q0uC041XIdR1HUbXU1tVxVWjjahReCSMF1sUnmO5CneeizCixmU5E5o93w5gugdEFvQY05x3uiRHB+pBySja24V6PXo96kUj0Fygbnmxwls4VvThBuBfJvxxt3SYkF4Jua9XVXhQyvHvNQ3dxOb46mY4PJuPJ5OziS/voNXro4eP6dyiNi94h2gch3kqfttT9+G3qaSbVXHv1hm4X/jUGdlnOsIUolP2ANRP+mcM/J1fAhEpZovMiAxpWLcla3tOKtbDYOizXSB9NRPeazuiSmLDU2hH5QycjNLkViERBWWTR/f39MBE5zj8xRIOUBHQS8Ekm2SZv583JYEc51YnttKX2+8jAHAygl1EDZCNCJSKHYA+Ho+EosMq6XKhHhv6yo7aS19XJfxQUmcCa1o2nVdNt13x1GMbRbr/haRxAqeWQV6FzrnlVzYSFryarazrGd8P4obBpNN+WqbS0xvaei8zCnm/dCOVPrppPpafsdx9UvcG0fFZr3+hZSTtceqL63xqnFV+CSJGZ5Fe4OUFSFe6RTvfhRC3UDaYP4ynKiJJS1aVzh+YetdePN++8AJvqO1BvO68cbcmvuv4JhPioRg== +api: eJzlVm1v2zYQ/ivEfdo6xnLabCiEYUCKuUXWbgliDxsQ+ANNnW02EqmSlBND4H8fjpTk2NZevg79JPHlnrt77o0teLFxkD/AnTUSnfsZ10orr4yGJQdToxW0uCkgh+eqfLf/iHvgUAsrKvRoSbYFLSqEHB7jmdKQQy38FjgU6KRVdcTL4SPumVmzOqlixUEXB4tfGmWxgNzbBjk4ucVKQN6C39cErrTHDVrgsDa2Ej5t/XAFISxJ3NVGO3Qk8Xo6pc+x8nkjSStwkEZ71D5i47PPnquS/s80Om+V3kAIIXC4GsO80TtRqoKR8ej8Mbao61LJSF9WW7MqsfrusyO5EV1m9RklAdSWSPcqeeK88I37VxbevIbAoULnxAZHPKCgOC+0HD9MG+d+c/DKl7Q1s9bYgYk350y8N3aligL1MQevslf/f3evzt29TwFHCr0zjZXItPFsbRpdfB1Z8P1YPVzf3bAXDjOMAl8BH4GDQ9lY5fexIa5QWLQX1BDzh2XgLUhjHhXG1fK0L35AP9IUmXDsz18/sdWepcZaod8aasQbjJxQi80h211mnfDFQdhl7SPuQ+xtZJvd9b26sSXk0CauQ55l7dY4H/K2NtYH4LATVolVmaimsxTntWhKIrY0UpRx+9SNxRYZHdA0oD7vt8goI5L2CXFKOo7h3k7fTkeR6OrfoBzy5ICz9b4exUmXR5Hi5OjjNqd7yek+VkPsRa3S3OsG3e3d7P56MbuYz+bzm9vf+qHXyQV+FP8BpTMxGkTrdAn62+/71P3lj0VMM6XXJop36XYbpzGyu2ZVKkmunDtsmIhjjgnp1Q6Z0AWTpqpLpGbVJ1mf9/THeti1sawyWnlDiR4lvTUNZcLWGE/JnypZyBjDlEjklMuz7OnpaSJF1ehCTKSpiIRSSdQu8tjx9qnb4SfChZFukFYmrjOLa7SoJWYdkMsIlRI5OXs5mU6mKaucr4R+oeg/VtQReUOc4qOgLoXSBB4tbbtqe4DdZWpHp/UGHPIESiW35F3lPEDbroTD320ZAm1/adDGpnAotFiWhXL0X0C+FqXDM9uGFgrf3HdPpW/ZPz2oRp3p81nvY6GXDa2Ax5dber+FZeCwRVGgjXalk2spsfYvZIaHE5XQ0Jg+zBbAQTRE1UDnSZpH1FE7fnwXL7CFeUT902CVpyXZFcJfhPioRg== sidebar_class_name: "get api-method" -info_path: docs/apis-tools/operate-api/specifications/operate-public-api +info_path: versioned_docs/version-8.6/apis-tools/operate-api/specifications/operate-public-api custom_edit_url: null hide_send_button: true --- diff --git a/versioned_docs/version-8.6/apis-tools/operate-api/tutorial.md b/versioned_docs/version-8.6/apis-tools/operate-api/tutorial.md index c69436c108b..b353e6462d2 100644 --- a/versioned_docs/version-8.6/apis-tools/operate-api/tutorial.md +++ b/versioned_docs/version-8.6/apis-tools/operate-api/tutorial.md @@ -124,7 +124,7 @@ body, ```js async function fetchDiagram() { return fetch( - // Replace {PROCESS_DEFINITION_ID} with a process definition id. + // Replace {PROCESS_DEFINITION_ID} with a process definition ID. // http://localhost:3030 is the URL of the Proxy server, which should stay the same. "http://localhost:3030/v1/process-definitions/{PROCESS_DEFINITION_ID}/xml", { @@ -180,12 +180,12 @@ async function fetchDiagram() { ## Show statistics on the diagram -1. Add a new function to the `api.js` file that fetches the flow node statistics for a specified process instance id: +1. Add a new function to the `api.js` file that fetches the flow node statistics for a specified process instance ID: ```js async function fetchStatistics() { return fetch( - // Replace {PROCESS_INSTANCE_ID} with a process instance id. + // Replace {PROCESS_INSTANCE_ID} with a process instance ID. // http://localhost:3030 is the URL of the proxy server, which should stay the same. "http://localhost:3030/v1/process-instances/{PROCESS_INSTANCE_ID}/statistics", { @@ -230,12 +230,12 @@ fetchStatistics() ## Highlight processed sequence flows on the diagram -1. Add a new function to the `api.js` file that fetches the processed sequence flows for a specified process instance id: +1. Add a new function to the `api.js` file that fetches the processed sequence flows for a specified process instance ID: ```js async function fetchSequenceFlows() { return fetch( - // Replace {PROCESS_INSTANCE_ID} with a process instance id. + // Replace {PROCESS_INSTANCE_ID} with a process instance ID. // http://localhost:3030 is the URL of the Proxy server, which should stay the same. "http://localhost:3030/v1/process-instances/{PROCESS_INSTANCE_ID}/sequence-flows", { diff --git a/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md b/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md index b4bfe5f0bf6..6cff515bf51 100644 --- a/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md +++ b/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md @@ -170,6 +170,29 @@ public void handleJobFoo(final JobClient client, final ActivatedJob job) { } ``` +You can also control auto-completion in your configuration. + +**Globally:** + +```yaml +camunda: + client: + zeebe: + defaults: + auto-complete: false +``` + +**Per worker:** + +```yaml +camunda: + client: + zeebe: + override: + foo: + auto-complete: false +``` + Ideally, you **don't** use blocking behavior like `send().join()`, as this is a blocking call to wait for the issued command to be executed on the workflow engine. While this is very straightforward to use and produces easy-to-read code, blocking code is limited in terms of scalability. This is why the worker above showed a different pattern (using `exceptionally`). Often, you might also want to use the `whenComplete` callback: @@ -357,7 +380,7 @@ camunda: execution-threads: 1 ``` -For a full set of configuration options, see [ZeebeClientConfigurationProperties.java](https://github.com/camunda/camunda/blob/main/clients/spring-boot-starter-camunda-sdk/src/main/java/io/camunda/zeebe/spring/client/properties/ZeebeClientConfigurationProperties.java). +For a full set of configuration options, see [CamundaClientConfigurationProperties.java](https://github.com/camunda/camunda/blob/main/clients/spring-boot-starter-camunda-sdk/src/main/java/io/camunda/zeebe/spring/client/properties/CamundaClientProperties.java). :::note We generally do not advise using a thread pool for workers, but rather implement asynchronous code, see [writing good workers](/components/best-practices/development/writing-good-workers.md) for additional details. diff --git a/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/getting-started.md b/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/getting-started.md index ce41f8b036c..dcfa9fa05ce 100644 --- a/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/getting-started.md +++ b/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/getting-started.md @@ -8,36 +8,20 @@ This project allows you to leverage Zeebe APIs ([gRPC](/apis-tools/zeebe-api/grp ## Version compatibility -| Camunda Spring SDK version | JDK | Camunda version | Bundled Spring Boot version | -| -------------------------- | ------ | --------------- | --------------------------- | -| 8.5.x | \>= 17 | 8.5.x | 3.2.x | -| 8.6.x | \>= 17 | 8.6.x | 3.2.x | +| Camunda Spring SDK version | JDK | Camunda version | Bundled Spring Boot version | +| -------------------------- | ---- | --------------- | --------------------------- | +| 8.5.x | ≥ 17 | 8.5.x | 3.2.x | +| 8.6.x | ≥ 17 | 8.6.x | 3.2.x | ## Add the Spring Zeebe SDK to your project -Add the following repository and Maven dependency to your Spring Boot Starter project: - -```xml - - - - true - - - false - - identity - Camunda Identity - https://artifacts.camunda.com/artifactory/camunda-identity/ - - -``` +Add the following Maven dependency to your Spring Boot Starter project, replacing `x` with the latest patch level available: ```xml io.camunda spring-boot-starter-camunda-sdk - 8.6.3 + 8.6.x ``` @@ -134,6 +118,7 @@ camunda: rest-address: http://localhost:8080 prefer-rest-over-grpc: false audience: zeebe-api + scope: # optional ``` ## Obtain the Zeebe client diff --git a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md index 2fd6e935f61..d403d964086 100644 --- a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md +++ b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-task-controller.md @@ -168,7 +168,7 @@ curl -X 'PATCH' \ ### Unassign task -Unassign a task with the provided id. This returns the task. +Unassign a task with the provided ID. This returns the task. #### URL diff --git a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md index 2eed6411de4..48ce994ad29 100644 --- a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md +++ b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/controllers/tasklist-api-rest-variables-controller.md @@ -10,7 +10,7 @@ The Variables API controller provides an API to query variables. ### Get variable -Get the variable details by variable id. +Get the variable details by variable ID. #### URL diff --git a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md deleted file mode 100644 index b4f17bacb49..00000000000 --- a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md +++ /dev/null @@ -1,388 +0,0 @@ ---- -id: migrate-to-zeebe-user-tasks -title: Migrate to Zeebe user tasks -description: "Learn how to migrate job worker-based user tasks to Zeebe-based tasks." ---- - -import DocCardList from '@theme/DocCardList'; -import FormViewer from "@site/src/mdx/FormViewer"; -import YesItem from "./assets/react-components/YesItem"; -import NoItem from "./assets/react-components/NoItem"; -import TableTextSmall from "./assets/react-components/TableTextSmall"; -import userTaskMigrationDecisionHelperForm from "./assets/forms/userTaskMigrationDecisionHelperForm.js"; -import "./assets/css/condensedTable.module.css"; -import styles from "./assets/css/cleanImages.module.css"; -import APIArchitectureImg from './assets/img/api-architecture.png'; -import ZeebeTaskSelectionImg from './assets/img/zeebe-user-task-selection.png'; - -Camunda 8.5 introduces a new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type: Zeebe user tasks. -Zeebe user tasks have several benefits, including: - -- Running directly on the automation engine for high performance. -- Removing dependencies and round trips to Tasklist. -- A more powerful API that supports the full task lifecycle. - -In this guide, you will learn: - -- Under which circumstances and when you should migrate. -- How to estimate the impact on a project. -- The steps you need to take for a successful migration without interrupting your operations. - -## Decide on your migration path - -Zeebe user tasks require migration of the user tasks in both your diagrams and the task API. - -With this in mind, you can migrate at your own pace. If you should migrate now or later, and what is required to migrate depends on your current setup and future plans. - -Use the following decision helper questionnaire to figure out what's right for you: - - - -### Task type differences - -Learn the differences between both task types and make an informed decision, and understand the new capabilities of Zeebe user tasks. Refer to this table for important high-level differences of the two task types: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    Job worker-based user tasks
    - Existing implementation -
    -
    Zeebe user tasks
    - Recommended for new projects -
    Implementation locationTasklist -
    Zeebe
    - Does not require Tasklist to run -
    Compatible versions8.0 +8.5 +
    Supports Tasklist UI
    API
    Supports Tasklist API - - Full support - -
    Partially
    - Queries, GET tasks, forms, variables - ℹ Currently, you must use Zeebe and Tasklist APIs to use Zeebe user tasks -
    Supports Zeebe API - - Task state operations (assign/update/complete) -
    Supports job workers
    Supports task lifecycle events - - Basic only: created/completed/canceled - - - Full lifecycle events including custom actions -
    Supports task listeners - - Task listeners will be introduced in a future release -
    Extras
    Custom actions/outcomes - - Custom actions can be defined on any operation excluding unassign (DELETE assignment, send update beforehand) -
    Supports task reports in Optimize
    Recommendations - You can continue to use this task type on existing projects when you have a custom task application running on it and do not require any of the above features. - Refer to the decision helper above for a tailored recommendation. - - Use this task type on any new projects when you run Tasklist. - Migrate existing projects and task applications/clients to this task type when you require one of the features above, or the following use cases: - -
      -
    • Implement a full task lifecycle
    • -
    • React on any change/events in tasks, such as assignments, escalations, due date updates, or any custom actions
    • -
    • Send notifications
    • -
    • Track task or team performance
    • -
    • Build an audit log on task events
    • -
    • Enrich tasks with business data
    • -
    -
    - Refer to the decision helper above for a tailored recommendation. -
    - -## Switch the implementation type of your user tasks - -We recommend you migrate process-by-process, allowing you to thoroughly test the processes in your test environments or via your [CI/CD](/guides/devops-lifecycle/integrate-web-modeler-in-ci-cd.md). To do this, take the following steps: - -1. Open a diagram you want to migrate. -2. Click on a user task. -3. Check if the task has an embedded form. - - If a form is embedded, [transform it into a linked form](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked) before you change the task type implementation. Press `Ctrl+Z` or `⌘+Z` to undo if you accidentally removed your embedded form. -4. Open the **Implementation** section in the properties panel. -5. Click the **Type** dropdown and select **Zeebe user task**. The linked form or external form reference will be preserved. - -Task Type Selection - -Repeat these steps for all user tasks in the process. Then, deploy the process to your development cluster and test it by running the process and ensuring your custom task applications work. - -## Use the new Zeebe Task API - -:::note -The Tasklist REST API is not deprecated, and you still need it for queries on both task types. -::: - -Operations on Zeebe user tasks which modify the task state have to be performed using the new Zeebe REST API. However, queries and adjacent operations still require the Tasklist REST API. The following table provides a breakdown of which operations are supported in which API, and for which user tasks. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OperationTasklist APIZeebe Task API (8.5)
    Query tasks All types← Use Tasklist API
    Get task All types← Use Tasklist API
    Retrieve task variables All types← Use Tasklist API
    Get task form All types← Use Tasklist API
    Change task assignment Job worker-based tasks Zeebe tasks
    Complete task Job worker-based tasks Zeebe tasks
    Update task- Zeebe tasks
    Safe and retrieve draft variables All types← Use Tasklist API
    - -You can also operate both task types at the same time in the same application utilizing both APIs. We recommend this for a smooth migration, but you should eventually update all processes to use the new task type to use all benefits. The following image illustrates how to route API calls to the respective APIs: - -Task API Architecture - -The major changes are: - -- Create and maintain new, additional secrets for the Zeebe REST API. -- Call dedicated endpoints on separate components (Zeebe vs. Tasklist) for all state modifications on tasks for the respective task types. -- Manage new request/response objects. - -The following table outlines the respective endpoints. Click the endpoints to follow to the API documentation and inspect the differences in the request and response objects. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OperationTasklist APIZeebe Task API (8.5)
    Query tasks - - POST /tasks/search - - ← Use Tasklist API
    Get task - - GET /tasks/:taskId - - ← Use Tasklist API
    Retrieve task variables - - GET /variables/:variableId - -
    - - POST /tasks/:taskId/variables/search - -
    ← Use Tasklist API
    Get task form - - GET /forms/:formId - - ← Use Tasklist API
    Assign a task - - PATCH /tasks/:taskId/assign - - - - POST /user-tasks/:taskKey/assignment - -
    Unassign a task - - PATCH /tasks/:taskId/unassign - - - - DELETE /user-tasks/:taskKey/assignee - -
    Complete task - - PATCH /tasks/:taskId/complete - - - - POST /user-tasks/:taskKey/completion - -
    Update task- - - PATCH /user-tasks/:taskKey - -
    Safe and retrieve draft variables - - POST /tasks/:taskId/variables - - ← Use Tasklist API
    - -### Zeebe Java client - -Use the Zeebe Java client when you are building your task application in Java. The client assists with managing authentication and request/response objects. - -### API differences - - - -Refer to the dedicated sections and API explorers to learn details about the APIs. - - - -## Troubleshooting and common issues - -If your task application does not work properly after migration, check the following: - -- **The endpoints return specific error messages when you run them on the wrong task type**: Ensure to call the right endpoint for the right task type, c.f. above [table](#use-the-new-zeebe-task-api). -- **Forms do not appear**: Ensure you have extracted embedded forms, if any, and [transformed them into linked forms](/components/modeler/bpmn/user-tasks/user-tasks.md#camunda-form-linked), before you change the task type implementation. -- **Task update operation does not work**: The update operation is only available to Zeebe user tasks. diff --git a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/sidebar-schema.js b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/sidebar-schema.js index 517c97c4df8..ebb41a15e74 100644 --- a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/sidebar-schema.js +++ b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/sidebar-schema.js @@ -7,6 +7,5 @@ module.exports = { { Specifications: require("./specifications/sidebar.js"), }, - "apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks", ], }; diff --git a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md index 51b479a2a4a..31bedd5f7cd 100644 --- a/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md +++ b/versioned_docs/version-8.6/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md @@ -2,9 +2,15 @@ id: tasklist-api-rest-overview title: "Overview" sidebar_position: 1 -description: "Build applications for human-centered processes by querying human tasks, assigning users, and completing tasks with the Tasklist API." +description: "Build applications for human-centered processes by querying user tasks, assigning users, and completing tasks with the Tasklist API." --- +:::note +Camunda introduced the [Camunda 8 REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md) with `8.6` to manage and query process entities such as processes, decisions, forms, and user tasks. + +Camunda has also introduced [Zeebe user tasks](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md) to build more advanced functionalities. If you use Zeebe user tasks with `8.6`, task management endpoints in the Tasklist API will not work. +::: + ## Introduction The Tasklist API is a REST API designed to build task applications for human-centered processes. The API allows you to query user tasks, assign users to these tasks, and complete these tasks. @@ -18,7 +24,7 @@ Ensure you [authenticate](./tasklist-api-rest-authentication.md) before accessin For SaaS: `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/v1/`, and for Self-Managed installations: `http://localhost:8080/v1/`. :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). For Self-Managed, the host and port depend on your configuration. The context path mentioned here is the default for the Tasklist component. ::: @@ -34,7 +40,7 @@ A detailed API description is also available as a Swagger UI at `https://${base- For SaaS: `https://${REGION}.tasklist.camunda.io:443/${CLUSTER_ID}/swagger-ui/index.html`, and for Self-Managed installations: [`http://localhost:8080/swagger-ui/index.html`](http://localhost:8080/swagger-ui/index.html). :::note -Find your region and cluster id under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). +Find your region and cluster ID under **Connection information** in your client credentials (revealed when you click on your client under the **API** tab within your cluster). ::: ## API in Postman diff --git a/versioned_docs/version-8.6/apis-tools/tasklist-api/tasklist-api-tutorial.md b/versioned_docs/version-8.6/apis-tools/tasklist-api/tasklist-api-tutorial.md index 3dad8bb79cd..5ecdd5ba456 100644 --- a/versioned_docs/version-8.6/apis-tools/tasklist-api/tasklist-api-tutorial.md +++ b/versioned_docs/version-8.6/apis-tools/tasklist-api/tasklist-api-tutorial.md @@ -250,9 +250,8 @@ export class TasklistModule implements OnModuleInit { logger.log("Tasklist credentials fetched"); axiosRef.defaults.baseURL = config.get("TASKLIST_API_ADDRESS"); - axiosRef.defaults.headers[ - "Authorization" - ] = `Bearer ${credentials.access_token}`; + axiosRef.defaults.headers["Authorization"] = + `Bearer ${credentials.access_token}`; axiosRef.defaults.headers["Content-Type"] = "application/json"; setTimeout(this.onModuleInit.bind(this), credentials.expires_in * 1000); // we need convert minutes to milliseconds } diff --git a/versioned_docs/version-8.6/apis-tools/testing/getting-started.md b/versioned_docs/version-8.6/apis-tools/testing/getting-started.md index 6101c53b3fe..632421edb4c 100644 --- a/versioned_docs/version-8.6/apis-tools/testing/getting-started.md +++ b/versioned_docs/version-8.6/apis-tools/testing/getting-started.md @@ -277,7 +277,7 @@ The test runtime uses [SLF4J](https://www.slf4j.org/) as the logging framework. - `tc.camunda` - The Camunda Docker container - `tc.connectors` - The Connectors Docker container - `tc.elasticsearch` - The Elasticsearch Docker container -- `org.testcontainers` - The Testconainers framework +- `org.testcontainers` - The Testcontainers framework For most cases, the log level `warn` (warning) is sufficient. diff --git a/versioned_docs/version-8.6/apis-tools/web-modeler-api/index.md b/versioned_docs/version-8.6/apis-tools/web-modeler-api/index.md index 43d1d5d9c14..5c339f39f0a 100644 --- a/versioned_docs/version-8.6/apis-tools/web-modeler-api/index.md +++ b/versioned_docs/version-8.6/apis-tools/web-modeler-api/index.md @@ -44,11 +44,11 @@ On Self-Managed instances no limits are enforced. ### What is the difference between _simplePath_ and _canonicalPath_? -In Web Modeler you can have multiple files with the same name, multiple folders with the same name, and even multiple projects with the same name. Internally, duplicate names are disambiguated by unique ids. +In Web Modeler you can have multiple files with the same name, multiple folders with the same name, and even multiple projects with the same name. Internally, duplicate names are disambiguated by unique IDs. -The API gives you access to the names, as well as the ids. For example, when requesting a file you will get the following information: +The API gives you access to the names, as well as the IDs. For example, when requesting a file you will get the following information: - **simplePath** contains the human-readable path. This path may be ambiguous or may have ambiguous elements (e.g. folders) in it. -- **canonicalPath** contains the unique path. It is a list of **PathElementDto** objects which contain the id and the name of the element. +- **canonicalPath** contains the unique path. It is a list of **PathElementDto** objects which contain the ID and the name of the element. -Internally, the ids are what matters. You can rename files or move files between folders and projects and the id will stay the same. +Internally, the IDs are what matters. You can rename files or move files between folders and projects and the ID will stay the same. diff --git a/versioned_docs/version-8.6/apis-tools/web-modeler-api/tutorial.md b/versioned_docs/version-8.6/apis-tools/web-modeler-api/tutorial.md index 946f47749aa..518ff87028b 100644 --- a/versioned_docs/version-8.6/apis-tools/web-modeler-api/tutorial.md +++ b/versioned_docs/version-8.6/apis-tools/web-modeler-api/tutorial.md @@ -26,12 +26,16 @@ Make sure you keep the generated client credentials in a safe place. The **Clien ## Set up authentication -If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client id and client secret. Then, we return the actual token that can be passed as an authorization header in each request. +If you're interested in how we use a library to handle auth for our code, or to get started, examine the `auth.js` file in the GitHub repository. This file contains a function named `getAccessToken` which executes an OAuth 2.0 protocol to retrieve authentication credentials based on your client ID and client secret. Then, we return the actual token that can be passed as an authorization header in each request. To set up your credentials, create an `.env` file which will be protected by the `.gitignore` file. You will need to add your `MODELER_CLIENT_ID`, `MODELER_CLIENT_SECRET`, `MODELER_AUDIENCE`, which is `modeler.cloud.camunda.io` in a Camunda 8 SaaS environment, and `MODELER_BASE_URL`, which is `https://modeler.camunda.io/api/v1`. These keys will be consumed by the `auth.js` file to execute the OAuth protocol, and should be saved when you generate your client credentials in [prerequisites](#prerequisites). +:::tip Can't find your environment variables? +When you create new client credentials as a [prerequisite](#prerequisites), your environment variables appear in a pop-up window. Your environment variables may appear as `CAMUNDA_CONSOLE_CLIENT_ID`, `CAMUNDA_CONSOLE_CLIENT_SECRET`, and `CAMUNDA_CONSOLE_OAUTH_AUDIENCE`. +::: + Examine the existing `.env.example` file for an example of how your `.env` file should look upon completion. Do not place your credentials in the `.env.example` file, as this example file is not protected by the `.gitignore`. :::note diff --git a/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md b/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md index 766d9a434e5..37a4f546afe 100644 --- a/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md +++ b/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md @@ -72,6 +72,9 @@ Additionally, visit our documentation on [Operate](../self-managed/operate-deplo ### SDKs ### Postman diff --git a/versioned_docs/version-8.6/apis-tools/zeebe-api-rest/tutorial.md b/versioned_docs/version-8.6/apis-tools/zeebe-api-rest/tutorial.md index ec299cbe3a6..2bb1795037a 100644 --- a/versioned_docs/version-8.6/apis-tools/zeebe-api-rest/tutorial.md +++ b/versioned_docs/version-8.6/apis-tools/zeebe-api-rest/tutorial.md @@ -41,7 +41,7 @@ In this tutorial, we will execute arguments to assign and unassign a user to and ## Assign a Zeebe user task (POST) :::note -In this tutorial, you will capture a **Zeebe user task** ID to assign and unassign users in this API. Camunda 8.5 introduced this new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type, and these Zeebe user tasks are different from job worker-based user tasks. See more details on task type differences in the [migrating to Zeebe user tasks documentation](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md#task-type-differences). +In this tutorial, you will capture a **Zeebe user task** ID to assign and unassign users in this API. Camunda 8.5 introduced this new [user task](/components/modeler/bpmn/user-tasks/user-tasks.md) implementation type, and these Zeebe user tasks are different from job worker-based user tasks. See more details on task type differences in the [migrating to Zeebe user tasks documentation](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md#task-type-differences). ::: First, let's script an API call to assign a Zeebe user task. diff --git a/versioned_docs/version-8.6/apis-tools/zeebe-api/gateway-service.md b/versioned_docs/version-8.6/apis-tools/zeebe-api/gateway-service.md index cab216dc3c7..e0f24593720 100644 --- a/versioned_docs/version-8.6/apis-tools/zeebe-api/gateway-service.md +++ b/versioned_docs/version-8.6/apis-tools/zeebe-api/gateway-service.md @@ -36,7 +36,7 @@ message ActivateJobsRequest { // if the requestTimeout = 0, a default timeout is used. // if the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. int64 requestTimeout = 6; - // a list of tenant IDs for which to activate jobs + // a list of IDs of tenants for which to activate jobs repeated string tenantIds = 7; } ``` @@ -79,7 +79,7 @@ message ActivatedJob { // JSON document, computed at activation time, consisting of all visible variables to // the task scope string variables = 13; - // the id of the tenant that owns the job + // the ID of the tenant that owns the job string tenantId = 14; } ``` @@ -118,7 +118,7 @@ message BroadcastSignalRequest { // the signal variables as a JSON document; to be valid, the root of the document must be an // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. string variables = 2; - // the id of the tenant that owns the signal. + // the ID of the tenant that owns the signal. string tenantId = 3; } ``` @@ -129,7 +129,7 @@ message BroadcastSignalRequest { message BroadcastSignalResponse { // the unique ID of the signal that was broadcasted. int64 key = 1; - // the tenant id of the signal that was broadcasted. + // the tenant ID of the signal that was broadcasted. string tenantId = 2; } ``` @@ -160,8 +160,6 @@ message CancelProcessInstanceRequest { // the process instance key (as, for example, obtained from // CreateProcessInstanceResponse) int64 processInstanceKey = 1; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 2; } ``` @@ -254,10 +252,8 @@ message CreateProcessInstanceRequest { // will start at the start event. If non-empty the process instance will apply start // instructions after it has been created repeated ProcessInstanceCreationStartInstruction startInstructions = 5; - // the tenant ID of the process definition + // the tenant id of the process definition string tenantId = 6; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 7; } message ProcessInstanceCreationStartInstruction { @@ -279,7 +275,7 @@ message ProcessInstanceCreationStartInstruction { ```protobuf message CreateProcessInstanceResponse { // the key of the process definition which was used to create the process instance - int64 processKey = 1; + int64 processDefinitionKey = 1; // the BPMN process ID of the process definition which was used to create the process // instance string bpmnProcessId = 2; @@ -288,7 +284,7 @@ message CreateProcessInstanceResponse { // the unique identifier of the created process instance; to be used wherever a request // needs a process instance key (e.g. CancelProcessInstanceRequest) int64 processInstanceKey = 4; - // the tenant ID of the created process instance + // the tenant identifier of the created process instance string tenantId = 5; } ``` @@ -309,21 +305,24 @@ Start instructions have the same [limitations as process instance modification]( ### Input: `CreateProcessInstanceWithResultRequest` ```protobuf -message CreateProcessInstanceRequest { - CreateProcessInstanceRequest request = 1; - // timeout (in ms). the request will be closed if the process is not completed before - // the requestTimeout. - // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. - int64 requestTimeout = 2; +message CreateProcessInstanceWithResultRequest { + CreateProcessInstanceRequest request = 1; + // timeout (in ms). the request will be closed if the process is not completed + // before the requestTimeout. + // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. + int64 requestTimeout = 2; + // list of names of variables to be included in `CreateProcessInstanceWithResultResponse.variables` + // if empty, all visible variables in the root scope will be returned. + repeated string fetchVariables = 3; } ``` ### Output: `CreateProcessInstanceWithResultResponse` ```protobuf -message CreateProcessInstanceResponse { +message CreateProcessInstanceWithResultResponse { // the key of the process definition which was used to create the process instance - int64 processKey = 1; + int64 processDefinitionKey = 1; // the BPMN process ID of the process definition which was used to create the process // instance string bpmnProcessId = 2; @@ -332,9 +331,10 @@ message CreateProcessInstanceResponse { // the unique identifier of the created process instance; to be used wherever a request // needs a process instance key (e.g. CancelProcessInstanceRequest) int64 processInstanceKey = 4; - // consisting of all visible variables to the root scope + // JSON document + // consists of visible variables in the root scope string variables = 5; - // the tenant ID of the process definition + // the tenant identifier of the process definition string tenantId = 6; } ``` @@ -400,7 +400,7 @@ message EvaluateDecisionRequest { // [{ "a": 1, "b": 2 }] would not be a valid argument, as the root of the // JSON document is an array and not an object. string variables = 3; - // the tenant ID of the decision + // the tenant identifier of the decision string tenantId = 4; } ``` @@ -435,7 +435,7 @@ message EvaluateDecisionResponse { string failedDecisionId = 9; // an optional message describing why the decision which was evaluated failed string failureMessage = 10; - // the tenant ID of the evaluated decision + // the tenant identifier of the evaluated decision string tenantId = 11; // the unique key identifying this decision evaluation int64 decisionInstanceKey = 12; @@ -461,12 +461,12 @@ message EvaluatedDecision { repeated MatchedDecisionRule matchedRules = 7; // the decision inputs that were evaluated within this decision evaluation repeated EvaluatedDecisionInput evaluatedInputs = 8; - // the tenant ID of the evaluated decision + // the tenant identifier of the evaluated decision string tenantId = 9; } message EvaluatedDecisionInput { - // the id of the evaluated decision input + // the ID of the evaluated decision input string inputId = 1; // the name of the evaluated decision input string inputName = 2; @@ -475,7 +475,7 @@ message EvaluatedDecisionInput { } message EvaluatedDecisionOutput { - // the id of the evaluated decision output + // the ID of the evaluated decision output string outputId = 1; // the name of the evaluated decision output string outputName = 2; @@ -484,7 +484,7 @@ message EvaluatedDecisionOutput { } message MatchedDecisionRule { - // the id of the matched rule + // the ID of the matched rule string ruleId = 1; // the index of the matched rule int32 ruleIndex = 2; @@ -524,12 +524,12 @@ Note that this is an atomic call, i.e. either all resources are deployed, or non message DeployResourceRequest { // list of resources to deploy repeated Resource resources = 1; - // the tenant ID of the resources to deploy + // the tenant id of the resources to deploy string tenantId = 2; } message Resource { - // the resource name, e.g. myProcess.bpmn, myDecision.dmn or myForm.form + // the resource name, e.g. myProcess.bpmn or myDecision.dmn string name = 1; // the file content as a UTF8-encoded string bytes content = 2; @@ -544,7 +544,7 @@ message DeployResourceResponse { int64 key = 1; // a list of deployed resources, e.g. processes repeated Deployment deployments = 2; - // the tenant ID of the deployed resources + // the tenant id of the deployed resources string tenantId = 3; } @@ -573,7 +573,7 @@ message ProcessMetadata { // the resource name (see: ProcessRequestObject.name) from which this process was // parsed string resourceName = 4; - // the tenant ID of the deployed process + // the tenant id of the deployed process string tenantId = 5; } @@ -594,7 +594,7 @@ message DecisionMetadata { // the assigned key of the decision requirements graph that this decision is // part of int64 decisionRequirementsKey = 6; - // the tenant ID of the deployed decision + // the tenant id of the deployed decision string tenantId = 7; } @@ -612,7 +612,7 @@ message DecisionRequirementsMetadata { // the resource name (see: Resource.name) from which this decision // requirements was parsed string resourceName = 5; - // the tenant ID of the deployed decision requirements + // the tenant id of the deployed decision requirements string tenantId = 6; } @@ -626,7 +626,7 @@ message FormMetadata { int64 formKey = 3; // the resource name string resourceName = 4; - // the tenant ID of the deployed form + // the tenant id of the deployed form string tenantId = 5; } ``` @@ -725,11 +725,9 @@ message ModifyProcessInstanceRequest { repeated ActivateInstruction activateInstructions = 2; // instructions describing which elements should be terminated repeated TerminateInstruction terminateInstructions = 3; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 4; message ActivateInstruction { - // the id of the element that should be activated + // the ID of the element that should be activated string elementId = 1; // the key of the ancestor scope the element instance should be created in; // set to -1 to create the new element instance within an existing element @@ -746,13 +744,13 @@ message ModifyProcessInstanceRequest { // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a // valid argument, as the root of the JSON document is an array and not an object. string variables = 1; - // the id of the element in which scope the variables should be created; + // the ID of the element in which scope the variables should be created; // leave empty to create the variables in the global scope of the process instance string scopeId = 2; } message TerminateInstruction { - // the id of the element that should be terminated + // the ID of the element that should be terminated int64 elementInstanceKey = 1; } } @@ -779,11 +777,11 @@ Returned if: Returned if: - At least one activate instruction is invalid. An activate instruction is considered invalid if: - - The process doesn't contain an element with the given id. + - The process doesn't contain an element with the given ID. - A flow scope of the given element can't be created. - The given element has more than one active instance of its flow scope. - At least one variable instruction is invalid. A variable instruction is considered invalid if: - - The process doesn't contain an element with the given scope id. + - The process doesn't contain an element with the given scope ID. - The given element doesn't belong to the activating element's flow scope. - The given variables are not a valid JSON document. - At least one terminate instruction is invalid. A terminate instruction is considered invalid if: @@ -807,8 +805,7 @@ message MigrateProcessInstanceRequest { int64 processInstanceKey = 1; // the migration plan that defines target process and element mappings MigrationPlan migrationPlan = 2; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 3; + message MigrationPlan { // the key of process definition to migrate the process instance to int64 targetProcessDefinitionKey = 1; @@ -817,9 +814,9 @@ message MigrateProcessInstanceRequest { } message MappingInstruction { - // the element id to migrate from + // the element ID to migrate from string sourceElementId = 1; - // the element id to migrate into + // the element ID to migrate into string targetElementId = 2; } } @@ -886,7 +883,7 @@ message PublishMessageRequest { // the message variables as a JSON document; to be valid, the root of the document must be an // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. string variables = 5; - // the tenant ID of the message + // the tenant id of the message string tenantId = 6; } ``` @@ -897,7 +894,7 @@ message PublishMessageRequest { message PublishMessageResponse { // the unique ID of the message that was published int64 key = 1; - // the tenant ID of the message + // the tenant id of the message string tenantId = 2; } ``` @@ -935,8 +932,6 @@ problem, followed by this call. message ResolveIncidentRequest { // the unique ID of the incident to resolve int64 incidentKey = 1; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 2; } ``` @@ -979,8 +974,6 @@ message SetVariablesRequest { // be unchanged, and scope 2 will now be `{ "bar" : 1, "foo" 5 }`. if local was false, however, // then scope 1 would be `{ "foo": 5 }`, and scope 2 would be `{ "bar" : 1 }`. bool local = 3; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 4; } ``` @@ -1103,12 +1096,14 @@ message Partition { enum PartitionBrokerRole { LEADER = 0; FOLLOWER = 1; + INACTIVE = 2; } // Describes the current health of the partition enum PartitionBrokerHealth { HEALTHY = 0; UNHEALTHY = 1; + DEAD = 2; } // the unique ID of this partition @@ -1137,8 +1132,6 @@ message UpdateJobRetriesRequest { int64 jobKey = 1; // the new amount of retries for the job; must be positive int32 retries = 2; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 3; } ``` @@ -1177,8 +1170,6 @@ message UpdateJobTimeoutRequest { int64 jobKey = 1; // the duration of the new timeout in ms, starting from the current moment int64 timeout = 2; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 3; } ``` @@ -1210,11 +1201,9 @@ Returned if: ```protobuf message DeleteResourceRequest { - // The key of the resource that should be deleted. This can be the key - // of a process definition, the key of a decision requirements definition or the key of a form definition. + // The key of the resource that should be deleted. This can either be the key + // of a process definition, the key of a decision requirements definition or the key of a form. int64 resourceKey = 1; - // a reference key chosen by the user and will be part of all records resulting from this operation - optional uint64 operationReference = 2; } ``` @@ -1300,7 +1289,7 @@ message ActivatedJob { // JSON document, computed at activation time, consisting of all visible variables to // the task scope string variables = 13; - // the id of the tenant that owns the job + // the ID of the tenant that owns the job string tenantId = 14; } ``` diff --git a/versioned_docs/version-8.6/components/best-practices/architecture/extending-human-task-management-c7.md b/versioned_docs/version-8.6/components/best-practices/architecture/extending-human-task-management-c7.md index 93400dc74c7..592121fba5a 100644 --- a/versioned_docs/version-8.6/components/best-practices/architecture/extending-human-task-management-c7.md +++ b/versioned_docs/version-8.6/components/best-practices/architecture/extending-human-task-management-c7.md @@ -83,7 +83,7 @@ Now you can use a task _filter_ with criteria checking the follow-up date and if ### Enforcing deadlines for tasks -There are different ways of enforcing deadlines for Human Tasks. Typical actions for overdue tasks are: +There are different ways of enforcing deadlines with human task orchestration. Typical actions for overdue tasks are: - Sending reminder mails - Changing the assignee/group @@ -96,7 +96,7 @@ There are different ways of enforcing deadlines for Human Tasks. Typical actions | Bulk actions possible (e.g. one mail with a list of all due tasks) | | | yes | | | No custom component required | yes | yes | Querying has to be done by external trigger or BPMN process | yes | | Use when | The escalation is business relevant and has to be visible in the process model | Overdue tasks can be easily monitored via tasklist application, actions are taken manually | Sophisticated, automated actions should take place | A timely escalation mechanism is desired | -| Don’t use when…​ | Each and every User Task has a due date and explicit modeling would clutter your process model | You need an action to be executed automatically | You do not want to run your own scheduling infrastructure | The escalation should be visible in the process model | +| Don’t use when…​ | Each and every user task has a due date and explicit modeling would clutter your process model | You need an action to be executed automatically | You do not want to run your own scheduling infrastructure | The escalation should be visible in the process model | #### Modeling an escalation @@ -176,7 +176,7 @@ In case you need _dynamically calculated values_ or specific _fields derived fro - using task variables as a kind of _caching_ mechanism, - being filled by "calculating" the values using _expression language_ -- e.g. by means of an _I/O Mapping_ of a User Task: +- e.g. by means of an _I/O Mapping_ of a user task: ```xml diff --git a/versioned_docs/version-8.6/components/best-practices/architecture/sizing-your-environment.md b/versioned_docs/version-8.6/components/best-practices/architecture/sizing-your-environment.md index 4c800642b2a..adc1deacb3c 100644 --- a/versioned_docs/version-8.6/components/best-practices/architecture/sizing-your-environment.md +++ b/versioned_docs/version-8.6/components/best-practices/architecture/sizing-your-environment.md @@ -85,7 +85,7 @@ The payload size also affects disk space requirements, as described in the next ### Disk space -The workflow engine itself will store data along every process instance, especially to keep the current state persistent. This is unavoidable. In case there are human tasks, data is also sent to Tasklist and kept there, until tasks are completed. +The workflow engine itself will store data along every process instance, especially to keep the current state persistent. This is unavoidable. In case there are user tasks, data is also sent to Tasklist and kept there, until tasks are completed. Furthermore, data is also sent from Operate and Optimize, which store data in Elasticsearch. These tools keep historical audit data for the configured retention times. The total amount of disk space can be reduced by using **data retention settings**. We typically delete data in Operate after 30 to 90 days, but keep it in Optimize for a longer period of time to allow more analysis. A good rule of thumb is something between 6 and 18 months. @@ -120,7 +120,7 @@ Using your throughput and retention settings, you can now calculate the required ## Understanding sizing and scalability behavior -Spinning up a Camunda 8 Cluster means you run multiple components that all need resources in the background, like the Zeebe broker, Elasticsearch (as the database for Operate, Tasklist, and Optimize), Operate, Tasklist, and Optimize. All those components need to be equipped with resources. +Spinning up a Camunda 8 Cluster means you run multiple components that all need resources in the background, like the Zeebe Broker, Elasticsearch (as the database for Operate, Tasklist, and Optimize), Operate, Tasklist, and Optimize. All those components need to be equipped with resources. All components are clustered to provide high-availability, fault-tolerance and resiliency. @@ -146,16 +146,16 @@ Now you can select a hardware package that can cover these requirements. In this Camunda 8 defines four [cluster sizes](/components/concepts/clusters.md#cluster-size) you can select from (1x, 2x, 3x, and 4x) after you have chosen your [cluster type](/components/concepts/clusters.md#cluster-type). The following table gives you an indication of what requirements you can fulfill with each cluster size. :::note -Contact your Customer Success Manager if you require a custom cluster size above these requirements. +Contact your Customer Success Manager to increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. ::: -| Cluster size | 1x | 2x | 3x | 4x | -| :---------------------------------------------------------------------------------- | ---------------------------------: | ----------------------------------: | -------------------------------: | -------------------------------: | -| Max Throughput **Tasks/day** **\*** | 4.3 M | 8.6 M | 12.9 M | 17.2 M | -| Max Throughput **Tasks/second** **\*** | 50 | 100 | 150 | 200 | -| Max Throughput **Process Instances/day** **\*\*** | 3 M | 6 M | 9 M | 12 M | -| Max Total Number of Process Instances stored (in Elasticsearch in total) **\*\*\*** | 75 k | 150 k | 225 k | 300 k | -| Approximate resources provisioned **\*\*\*\*** | 11 vCPU, 22 GB memory, 64 GB disk. | 22 vCPU, 44 GB memory, 128 GB disk. | 33 vCPU, 66 GB mem, 192 GB disk. | 44 vCPU, 88 GB mem, 256 GB disk. | +| Cluster size | 1x | 2x | 3x | 4x | +| :---------------------------------------------------------------------------------- | ----------------------------------: | ----------------------------------: | -------------------------------: | -------------------------------: | +| Max Throughput **Tasks/day** **\*** | 9 M | 18 M | 27 M | 36 M | +| Max Throughput **Tasks/second** **\*** | 100 | 200 | 300 | 400 | +| Max Throughput **Process Instances/second** **\*\*** | 5 | 10 | 15 | 20 | +| Max Total Number of Process Instances stored (in Elasticsearch in total) **\*\*\*** | 75 k | 150 k | 225 k | 300 k | +| Approximate resources provisioned **\*\*\*\*** | 11 vCPU, 22 GB memory, 192 GB disk. | 22 vCPU, 44 GB memory, 384 GB disk. | 33 vCPU, 66 GB mem, 576 GB disk. | 44 vCPU, 88 GB mem, 768 GB disk. | The numbers in the table were measured using Camunda 8 (version 8.6), [the benchmark project](https://github.com/camunda-community-hub/camunda-8-benchmark) running on its own Kubernetes Cluster, and using a [realistic process](https://github.com/camunda/camunda/blob/main/zeebe/benchmarks/project/src/main/resources/bpmn/realistic/bankCustomerComplaintDisputeHandling.bpmn) containing a mix of BPMN symbols such as tasks, events and call activities including subprocesses. To calculate day-based metrics, an equal distribution over 24 hours is assumed. @@ -164,12 +164,20 @@ The numbers in the table were measured using Camunda 8 (version 8.6), [the bench **\*\*** As Tasks are the primary resource driver, the number of process instances supported by a cluster is calculated based on the assumption of an average of 10 tasks per process. Customers can calculate a more accurate process instance estimate using their anticipated number of tasks per process. **\*\*\*** Total number of process instances within the retention period, regardless of if they are active or finished. This is limited by disk space, CPU, and memory for running and historical process instances available to ElasticSearch. Calculated assuming a typical set of process variables for process instances. Note that it makes a difference if you add one or two strings (requiring ~ 1kb of space) to your process instances, or if you attach a full JSON document containing 1MB, as this data needs to be stored in various places, influencing memory and disk requirements. If this number increases, you can still retain the runtime throughput, but Tasklist, Operate, and/or Optimize may lag behind. +The provisioned disk size is calculated as the sum of the disk size used by Zeebe and Elasticsearch. -Data retention has an influence on the amount of data that is kept for completed instances in your cluster. The default data retention is set to 30 days, which means that data that is older than 30 days gets removed from Operate and Tasklist. If a process instance is still active, it is fully functioning in runtime, but customers are not able to access historical data older than 30 days from Operate and Tasklist. Data retention is set to 6 months, meaning that data that is older than 6 months will be removed from Optimize. Up to certain limits data retention can be adjusted by Camunda on request. See [Camunda 8 SaaS data retention](/components/concepts/data-retention.md). +The max throughput numbers should be considered as peak loads, and the data retention configuration considered when defining the amount of data kept for completed instances in your cluster. See [Camunda 8 SaaS data retention](/components/concepts/data-retention.md) for the default retention times for Zeebe, Tasklist, Operate and Optimize. + +- If process instances are completed and older than the configured retention time of an application, the data is removed. +- If a process instance is older than the configured retention time but still active and incomplete, it is fully functioning in runtime and is _not_ removed. + +Data retention can be adjusted by Camunda on request (up to certain limits). You should consider retention time adjustments and/or storage capacity increases if you plan to run more than [max PI stored in ES]/ [configured retention time]. **\*\*\*\*** These are the resource limits configured in the Kubernetes cluster and are always subject to change. -You might wonder why the total number of process instances stored is that low. This is related to limited resources provided to Elasticsearch, yielding performance problems with too much data stored there. By increasing the available memory to Elasticsearch you can also increase that number. At the same time, even with this rather low number, you can always guarantee the throughput of the core workflow engine during peak loads, as this performance is not influenced. Also, you can always increase memory for Elasticsearch later on if it is required. +:::note +Why is the total number of process instances stored that low? This is related to limited resources provided to Elasticsearch, yielding performance problems with too much data stored there. By increasing the available memory to Elasticsearch you can also increase that number. At the same time, even with this rather low number, you can always guarantee the throughput of the core workflow engine during peak loads, as this performance is not influenced. Also, you can always increase memory for Elasticsearch later on if it is required. +::: ### Camunda 8 Self-Managed diff --git a/versioned_docs/version-8.6/components/best-practices/architecture/understanding-human-tasks-management.md b/versioned_docs/version-8.6/components/best-practices/architecture/understanding-human-tasks-management.md index eae24bd4232..a2edbfae66c 100644 --- a/versioned_docs/version-8.6/components/best-practices/architecture/understanding-human-tasks-management.md +++ b/versioned_docs/version-8.6/components/best-practices/architecture/understanding-human-tasks-management.md @@ -12,13 +12,13 @@ description: "Use Camunda task management features or implement your requirement ## Using task assignment features -The lifecycle of human tasks (like assigning, delegating, and completing tasks) is mostly a generic issue. There is no need to model common aspects into all your processes, if often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. +The lifecycle of human task orchestration (like assigning, delegating, and completing tasks) is mostly a generic issue. There is no need to model common aspects into all your processes, if often makes models unreadable. Use Camunda task management features or implement your requirements in a generic way. ![Task assignment](understanding-human-tasks-management-assets/human-tasks.png) So every task can be assigned to either a group of people, or a specific individual. An individual can 'claim' a task, indicating that they are picking the task from the pool (to avoid multiple people working on the same task). -As a general rule, you should assign human tasks in your business process to _groups of people_ instead of specific individuals. +As a general rule, you should assign human tasks, like [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) or [manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md), in your business process to _groups of people_ instead of specific individuals. ```xml @@ -51,11 +51,11 @@ While assigning users to groups is advised, it's not the only option. You could ## Deciding about your task list frontend -If you have human tasks in your process, you must make up your mind on how exactly you want to let your users work on their tasks and interact with the workflow engine. You have basically three options: +If you are orchestrating human tasks in your process, you must make up your mind on how exactly you want to let your users work on their tasks and interact with the workflow engine. You have basically three options: - [Camunda Tasklist](/components/tasklist/introduction-to-tasklist.md): The Tasklist application shipped with Camunda. This works out-of-the-box and has a low development effort. However, it is limited in terms of customizability and how much you can influence the user experience. -- Custom task list application: You can develop a custom task list and adapt this to your needs without compromises. Human tasks are shown inside your custom application, following your style guide and usability concept. You will use the [Camunda Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) in the background. This is very flexible, but requires additional development work. +- Custom task list application: You can develop a custom task list and adapt this to your needs without compromises. User tasks are shown inside your custom application, following your style guide and usability concept. You will use the [Camunda Tasklist API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) in the background. This is very flexible, but requires additional development work. - Third party tasklist: If our organization already has a task list application rolled out to the field, you might want to use this for tasks created by Camunda. You will need to develop some synchronization mechanism. The upside of this approach is that your end users might not even notice that you introduce a new workflow engine. diff --git a/versioned_docs/version-8.6/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md b/versioned_docs/version-8.6/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md index 5d2e5d98d88..6839288e478 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md +++ b/versioned_docs/version-8.6/components/best-practices/development/connecting-the-workflow-engine-with-your-world.md @@ -5,9 +5,7 @@ description: "To sketch the basic architecture of your solution, learn how to co One of your first tasks to build a process solution is to sketch the basic architecture of your solution. To do so, you need to answer the question of how to connect the workflow engine (Zeebe) with your application or with remote systems. -This document predominantly outlines writing some custom glue code in the programming language of your choice and using existing client libraries. In some cases, you might also want to leverage existing Connectors as a starting point. - -The workflow engine is a remote system for your applications, just like a database. Your application connects with Zeebe via remote protocols, [gRPC](https://grpc.io/) to be precise, which is typically hidden from you, like when using a database driver based on ODBC or JDBC. +The workflow engine is a remote system for your applications, just like a database. Your application connects with Zeebe via remote protocols (like [gRPC](https://grpc.io/) or REST), which is typically hidden from you, like when using a database driver based on ODBC or JDBC. With Camunda 8 and the Zeebe workflow engine, there are two basic options: @@ -197,7 +195,7 @@ As discussed in [writing good workers](../writing-good-workers/), you typically ## Connectors -The glue code is relatively simple, but you need to write code. Sometimes you might prefer using an out-of-the-box component, connecting Zeebe with the technology you need just by configuration. This component is called a **Connector**. +The glue code is relatively simple, but you need to write code. You might prefer using an out-of-the-box component, connecting Zeebe with the technology you need just by configuration. This component is called a **Connector**. A Connector can be uni or bidirectional and is typically one dedicated application that implements the connection that translates in one or both directions of communication. Such a Connector might also be helpful in case integrations are not that simple anymore. @@ -217,15 +215,7 @@ This is a bidirectional Connector which contains a Kafka listener for forwarding ### Out-of-the-box Connectors -Most Connectors are currently community extensions, which basically means that they are not officially supported by Camunda, but by community members (who sometimes are Camunda employees). While this sounds like a restriction, it can also mean there is more flexibility to make progress. A list of community-maintained Connectors can be found [here](https://github.com/camunda-community-hub/awesome-camunda-cloud#connectors-and-bridges). - -Camunda itself is also working on improving the Connector infrastructure as such to be able to provide more Connectors easier in the future. - -### Using Connectors in SaaS - -Currently, Connectors are not operated as part of the Camunda 8 SaaS offering, which means you need to operate them yourself in your environment, which might be a private or public cloud. - -![Connectors in SaaS](connecting-the-workflow-engine-with-your-world-assets/connector-in-cloud.png) +As well as Camunda-maintained Connectors, additional Connectors are maintained by the community (made up of consultants, partners, customers, and enthusiastic individuals). You can find a list of Connectors in the [Camunda Marketplace](https://marketplace.camunda.com/). ### Reusing your own integration logic by extracting Connectors @@ -241,11 +231,11 @@ Don’t forget about the possibility to extract common glue code in a simple lib Updating a library that is used in various other applications can be harder than updating one central Connector. In this case, the best approach depends on your scenario. ::: -Whenever you have such glue code running and really understand the implications of making it a Connector, as well as the value it will bring, it can make a lot of sense. +Whenever you have such glue code running and understand the implications of making it a Connector, as well as the value it will bring, it can make a lot of sense. ## Recommendation -As a general rule of thumb, prefer custom glue code whenever you don’t have a good reason to go with an existing Connector (like the reasons mentioned above). +As a general rule of thumb, prefer custom glue code whenever you don’t have a good reason to go with an existing Connector. A good reason to use Connectors is if you need to solve complex integrations where little customization is needed, such as the [Camunda RPA bridge](https://docs.camunda.org/manual/latest/user-guide/camunda-bpm-rpa-bridge/) to connect RPA bots (soon to be available for Camunda 8). @@ -253,7 +243,7 @@ Good use of Connectors are also scenarios where you don’t need custom glue cod Some use cases also allow you to create a **resuable generic adapter**; for example, to send status events to your business intelligence system. -But there are also common downsides with Connectors. First, the possibilities are limited to what the creator of the Connector has foreseen. In reality, you might have slightly different requirements and hit a limitation of a Connector soon. +But there are also common downsides with Connectors. First, the possibilities are limited to what the creator of the Connector has foreseen. In reality, you might have slightly different requirements and hit a limitation of a Connector. Second, the Connector requires you to operate this Connector in addition to your own application. The complexity associated with this depends on your environment. diff --git a/versioned_docs/version-8.6/components/best-practices/development/invoking-services-from-the-process-c7.md b/versioned_docs/version-8.6/components/best-practices/development/invoking-services-from-the-process-c7.md index 4e05898330d..a50480fda57 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/invoking-services-from-the-process-c7.md +++ b/versioned_docs/version-8.6/components/best-practices/development/invoking-services-from-the-process-c7.md @@ -160,7 +160,8 @@ Only if the increased latency does not work for your use case, for example, beca
    -

    Call a named bean or java class implementing the +

    + Call a named bean or java class implementing the JavaDelegate interface.

    -

    Use a configurable Connector +

    + Use a configurable Connector
    (REST or SOAP services provided out-of-the-box).

    -

    Pull a service task into an external worker thread and inform process engine of -completion.

    +

    + Pull a service task into an external worker thread and inform process engine of + completion. +

    Execute a script inside the engine.

    @@ -183,7 +187,8 @@ completion.

    -

    Use with +

    + Use with
    BPMN elements.

    @@ -252,7 +257,8 @@ completion.

    -

    Implement +

    + Implement
    via

    @@ -261,8 +267,10 @@ completion.

    Java (in same JVM)

    -

    Expression Language -(can reference Java code)

    +

    + Expression Language +(can reference Java code) +

    BPMN configuration

    @@ -377,9 +385,11 @@ completion.

    Configure via

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -390,9 +400,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -401,9 +413,11 @@ completion.

    -

    BPMN Attribute +

    + BPMN Attribute
    - serviceTask + + serviceTask
    camunda:
    @@ -412,9 +426,10 @@ completion.

    -

    BPMN Ext. Element+ - - serviceTask +

    + BPMN Ext. Element+ + + serviceTask
    camunda:
    @@ -423,9 +438,11 @@ completion.

    -

    BPMN Attributes +

    + BPMN Attributes
    - serviceTask + + serviceTask
    camunda:
    @@ -438,13 +455,15 @@ completion.

    -

    BPMN Element +

    + BPMN Element
    script or
    BPMN Attribute
    - scriptTask + + scriptTask
    camunda:
    diff --git a/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes-c7.md b/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes-c7.md index 94278a8e592..373e6ec7165 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes-c7.md +++ b/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes-c7.md @@ -89,7 +89,7 @@ This could end with a successful income confirmation. However, it could also end 3 -In this case, a **conditional event** watching this data (e.g. a process variable changed by the human task) triggers and causes the process to reconsider the consequences of the new findings. +In this case, a **conditional event** watching this data (e.g. a process variable changed by the user task) triggers and causes the process to reconsider the consequences of the new findings. A conditional event's condition expression is evaluated at it's "scope" creation time, too, and not just when variable data changes. For our example of a boundary conditional event, that means that the activity it is attached to could principally be left immediately via the boundary event. However, our process example evaluates the data via the exclusive gateway - therefore such a scenario is semantically impossible. @@ -387,7 +387,7 @@ If messages are exchanged between different processes deployed in the workflow e 1 -Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN id in Java using a JavaDelegate: +Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN ID in Java using a JavaDelegate: ```java public class SendOrderReceivedMessageDelegate implements JavaDelegate { diff --git a/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes.md b/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes.md index 158f5a94900..9865f5635ec 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes.md +++ b/versioned_docs/version-8.6/components/best-practices/development/routing-events-to-processes.md @@ -87,7 +87,7 @@ This could end with a successful income confirmation. However, it could also end 3 -In this case, a **conditional event** watching this data (e.g. a process variable changed by the human task) triggers and causes the process to reconsider the consequences of the new findings. +In this case, a **conditional event** watching this data (e.g. a process variable changed by the user task) triggers and causes the process to reconsider the consequences of the new findings. :::caution Camunda 8 Camunda 8 does not yet [support a **conditional event**](/components/modeler/bpmn/bpmn-coverage.md). @@ -102,7 +102,7 @@ Most events actually occur somewhere external to the workflow engine and need to - Using API: Receive the message by means of your platform-specific activities such as connecting to a AMQP queue or processing a REST request and then route it to the process. - Using Connectors: Configure a Connector to receive messages such as Kafka records and rote it to the process. Note that this possibility works for Camunda 8 only. -### Starting process instance by BPMN process id +### Starting process instance by BPMN process ID If you have only one starting point (none start event) in your process definition, you reference the process definition by the ID in the BPMN XML file. @@ -210,7 +210,7 @@ If messages are exchanged between different processes deployed in the workflow e 1 -Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN id in Java: +Use some simple code on the sending side to route the message to a new process instance, e.g. by starting a new process instance by the BPMN ID in Java: ```java @JobWorker(type="routeInput") diff --git a/versioned_docs/version-8.6/components/best-practices/development/service-integration-patterns.md b/versioned_docs/version-8.6/components/best-practices/development/service-integration-patterns.md index 60b84274266..4dacb210f09 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/service-integration-patterns.md +++ b/versioned_docs/version-8.6/components/best-practices/development/service-integration-patterns.md @@ -114,13 +114,13 @@ This is also balanced by the fact that service tasks are simply very handy. The Using send and receive tasks means to use [the message concept built into Zeebe](/components/concepts/messages.md). This is a powerful concept to solve a lot of problems around cardinalities of subscriptions, correlation of the message to the right process instances, and verification of uniqueness of the message (idempotency). -When using messages, you need to provide the correlation id yourself. This means that the correlation id is fully under your control, but it also means that you need to generate it yourself and make sure it is unique. You will most likely end up with generated UUIDs. +When using messages, you need to provide the correlation ID yourself. This means that the correlation ID is fully under your control, but it also means that you need to generate it yourself and make sure it is unique. You will most likely end up with generated UUIDs. You can leverage [message buffering](/components/concepts/messages.md#message-buffering) capabilities, which means that the process does not yet need to be ready to receive the message. You could, for example, do other things in between, but this also means that you will not get an exception right away if a message cannot be correlated, as it is simply buffered. This leaves you in charge of dealing with messages that can never be delivered. Retries are not built-in, so if you need to model a loop to retry the initial service call if no response is received. And (at least in the current Zeebe version), there is no possibility to trigger error events for a receive task, which means you need to model error messages as response payload or separate message types — both are discussed later in this post. -A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/contact) to discuss such a scenario in more depth. +A final note for high-performance environments: These powerful messaging capabilities do not come for free and require some overhead within the engine. For pure request/response calls that return within milliseconds, none of the features are truly required. If you are looking to build a high-performance scenario, using service tasks instead of message correlation for request/response calls, you can tune your overall performance or throughput. However, as with everything performance related, the devil is in the detail, so [reach out to us](/reference/contact.md) to discuss such a scenario in more depth. **Summary And recommendations** @@ -132,7 +132,7 @@ The following table summarizes the possibilities and recommendations. | | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send task](/img/bpmn-elements/task-send.svg) | ![Service task](/img/bpmn-elements/task-service.svg) | ![Send and receive task](/img/bpmn-elements/send-and-receive-task.png) | | Technical implications | | Behaves like a service task | A unique correlation ID is generated for you. You don’t have to think about race conditions or idempotency. Timeout handling and retry logic are built-in. API to flag business or technical errors. | Correlation ID needs to be generated yourself, but is fully under control. Message buffering is possible but also necessary. Timeouts and retries need to be modeled. BPMN errors cannot be used. | | Assessment | Very intuitive. | Might be more intuitive for fire and forget semantics, but can also lead to discussions. | Removes visual noise which helps stakeholders to concentrate on core business logic, but requires use of internal job instance keys. | More visual clutter, but also more powerful options around correlation and modeling patterns. | -| Recommendation | Default option, use unless it is confusing for business stakeholders (e.g. because of fire and forget semantics of a task). | Use for fire and forget semantics, unless it leads to unnecessary discussions, in this case use service task instead. | Use when response is within milliseconds and you can pass the Zeebe-internal job instance key around. | Use when the response will take time (> some seconds), or you need a correlation id you can control. | +| Recommendation | Default option, use unless it is confusing for business stakeholders (e.g. because of fire and forget semantics of a task). | Use for fire and forget semantics, unless it leads to unnecessary discussions, in this case use service task instead. | Use when response is within milliseconds and you can pass the Zeebe-internal job instance key around. | Use when the response will take time (> some seconds), or you need a correlation ID you can control. | ## Integrating services with BPMN events diff --git a/versioned_docs/version-8.6/components/best-practices/development/testing-process-definitions.md b/versioned_docs/version-8.6/components/best-practices/development/testing-process-definitions.md index d6bc992865d..edb3ff10769 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/testing-process-definitions.md +++ b/versioned_docs/version-8.6/components/best-practices/development/testing-process-definitions.md @@ -56,15 +56,19 @@ When using Java, most customers use Spring Boot. While this is a common setup fo ### Technical setup using Spring +:::caution +Spring support with Zeebe Process Test uses the community-maintained project Spring Zeebe. +The new Camunda Spring SDK (Camunda 8.6+) is not supported. You could still use the testing library but without hooking into the Spring lifecycle. +::: + :::caution JUnit 5 You need to use JUnit 5. Ensure you use JUnit 5 in every test class: the `@Test` annotation you import needs to be `org.junit.jupiter.api.Test`. ::: 1. Use [_JUnit 5_](http://junit.org) as unit test framework. -2. Use the [Spring Zeebe SDK](../../../apis-tools/spring-zeebe-sdk/getting-started.md). -3. Use `@ZeebeSpringTest` to ramp up an in-memory process engine. -4. Use annotations from [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/) to check whether your expectations about the state of the process are met. -5. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. +2. Use `@ZeebeSpringTest` to ramp up an in-memory process engine. +3. Use annotations from [zeebe-process-test](https://github.com/camunda-cloud/zeebe-process-test/) to check whether your expectations about the state of the process are met. +4. Use mocking of your choice, e.g. [Mockito](http://mockito.org) to mock service methods and verify that services are called as expected. A test can now look like the following example. The complete source code is available on [GitHub](https://github.com/camunda-community-hub/camunda-cloud-examples/blob/main/twitter-review-java-springboot/src/test/java/org/camunda/community/examples/twitter/TestTwitterProcess.java): diff --git a/versioned_docs/version-8.6/components/best-practices/development/understanding-transaction-handling-c7.md b/versioned_docs/version-8.6/components/best-practices/development/understanding-transaction-handling-c7.md index a22bccca414..e5b710d0f26 100644 --- a/versioned_docs/version-8.6/components/best-practices/development/understanding-transaction-handling-c7.md +++ b/versioned_docs/version-8.6/components/best-practices/development/understanding-transaction-handling-c7.md @@ -90,29 +90,29 @@ Aside a general strategy to mark service tasks as being save points you will oft **Do** configure a savepoint **after** -- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. +- _User tasks_ : This savepoint allows users to complete their tasks without waiting for expensive subsequent steps and without seeing an unexpected rollback of their user transaction to the waitstate before the user task. Sometimes, e.g. when validating user input by means of a subsequent step, you want exactly that: rolling back the user transaction to the user task waitstate. In that case you might want to introduce a savepoint right after the validation step. -- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service Tasks (or other steps) causing _Non-idempotent Side Effects_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a side effect which must not happen more often than once is not accidentally repeated because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. +- Service tasks (or other steps) executing _expensive Ccmputations_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that a computationally expensive step does not have to be repeated just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. End Events should be included if the process can be called from other processes. -- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. +- Receive tasks (or other steps) catching _external events_, possibly with payload Receive Task Message Intermediate Event Signal Intermediate Event : This savepoint makes sure that a external event like a message is persisted as soon as possible. It cannot get lost just because any subsequent steps might roll back the transaction to a savepoint well before the affected step. This applies also to External Service Tasks. **Do** configure a savepoint **before** -- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. +- _Start events_ None Start Event Message Start Event Signal Start Event Timer Start Event: This savepoint allows to immediately return a process instance object to the user thread creating it - well before anything happens in the process instance. -- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. +- Service tasks (or other steps) invoking _remote systems_ Service Task Script Task Send Task Message Intermediate Event Message End Event: This savepoint makes sure that you always transactionally separate the potentially more often failing remote calls from anything that happens before such a step. If a service call fails you will observe the process instance waiting in the corresponding service task in cockpit. -- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. +- _Parallel joins_ Parallel Join Inclusive Join Multiinstance Task: Parallel joins synchronize separate process pathes, which is why one of two path executions arriving at a parallel join at the same time will be rolled back with an optimistic locking exception and must be retryed later on. Therefore such a savepoint makes sure that the path synchronisation will be taken care of by Camunda's internal job executor. Note that for multi instance activities, there exists a dedicated "multi instance asynchronous after" flag which saves every single instance of those multiple instances directly after their execution, hence still "before" their technical synchronization. The Camunda JobExecutor works (by default) with [exclusive jobs](https://docs.camunda.org/manual/latest/user-guide/process-engine/the-job-executor/#exclusive-jobs), meaning that just one exclusive job per process instance may be executed at once. Hence, job executor threads will by default not cause optimistic locking exceptions at parallel joins "just by themselves", but other threads using the Camunda API might cause them - either for themselves or also for the job executor. **Don't** configure save points **before** -- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. +- User tasks and other _wait states_ User Task ScrReceiveipt Task Message Intermediate Event Signal Intermediate Event Timer Intermediate Event Event Based Gateway including steps configured as _external tasks_ Service Task Script Task Send Task Message Intermediate Event Message End Event: Such savepoints just introduce overhead as [wait-states](https://docs.camunda.org/manual/latest/user-guide/process-engine/transactions-in-processes/#wait-states) on itself finish the transaction and wait for external intervention anyway. -- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. +- _All forking_ and _exclusively joining gateways_ Exclusive Gateway Parallel Join Inclusive Join: There should just be no need to do that, unless execution listeners are configured at such points, which could fail and might need to be transactionally separated from other parts of the execution. ### Adding save points automatically to every model diff --git a/versioned_docs/version-8.6/components/best-practices/modeling/choosing-the-resource-binding-type.md b/versioned_docs/version-8.6/components/best-practices/modeling/choosing-the-resource-binding-type.md index f263474207a..f34b1de93cd 100644 --- a/versioned_docs/version-8.6/components/best-practices/modeling/choosing-the-resource-binding-type.md +++ b/versioned_docs/version-8.6/components/best-practices/modeling/choosing-the-resource-binding-type.md @@ -56,7 +56,8 @@ Camunda 8 supports the following binding types:

  • This option ensures predictable behavior by tying the two versions together, and allows you to deploy future versions of the target resource without disrupting ongoing process instances.

  • It is ideal for self-contained projects without external or shared dependencies.

  • -

    To use the deployment binding option, create and deploy a process application in Web Modeler, +

    + To use the deployment binding option, create and deploy a process application in Web Modeler, or deploy multiple resources together via the Zeebe API.

  • diff --git a/versioned_docs/version-8.6/components/best-practices/modeling/naming-technically-relevant-ids.md b/versioned_docs/version-8.6/components/best-practices/modeling/naming-technically-relevant-ids.md index 86c06417168..150538f99ec 100644 --- a/versioned_docs/version-8.6/components/best-practices/modeling/naming-technically-relevant-ids.md +++ b/versioned_docs/version-8.6/components/best-practices/modeling/naming-technically-relevant-ids.md @@ -36,7 +36,7 @@ The following table provides you with a guideline that we would use in a context ### Editing IDs with Camunda Modeler -We recommend using Camunda Modeler's properties panel to edit technical identifiers and change them according to your naming conventions, like it is shown here for the process id: +We recommend using Camunda Modeler's properties panel to edit technical identifiers and change them according to your naming conventions, like it is shown here for the process ID: ![Properties Panel](naming-technically-relevant-ids-assets/camunda-modeler-properties-panel.png) @@ -77,13 +77,13 @@ Elements in the diagram interchange section (DI) reference identifiers from abov Changing IDs can potentially break your tests or even process logic if done at a late stage of development. Therefore, consider using meaningful IDs right from the beginning and perform the renaming as part of the modeling. -### Aligning the BPMN file name with the process id +### Aligning the BPMN file name with the process ID It is a good practice to _align_ the _file name_ of your BPMN models with the _process id_ of the executable process that is inside the file. ![BPMN file name](naming-technically-relevant-ids-assets/aligning-the-bpmn-file-names.png) -## Generating id constants classes +## Generating ID constants classes If you have lots of process, case, and decision definitions with lots of IDs, consider generating constant classes (e.g. via XSLT) directly from your BPMN or DMN XML files. For example, this can be used for testing. diff --git a/versioned_docs/version-8.6/components/best-practices/operations/securing-camunda-c7.md b/versioned_docs/version-8.6/components/best-practices/operations/securing-camunda-c7.md index 84cfe8e18ee..a6b2163d9f8 100644 --- a/versioned_docs/version-8.6/components/best-practices/operations/securing-camunda-c7.md +++ b/versioned_docs/version-8.6/components/best-practices/operations/securing-camunda-c7.md @@ -30,7 +30,7 @@ The core of the Camunda engine treats **users**, **groups**, and **tenants** as ``` -Or, claim that user task for a specific user via the Java API by referencing the user with a text string-based user id: +Or, claim that user task for a specific user via the Java API by referencing the user with a text string-based user ID: ```java taskService.claim(taskId, "fozzie"); @@ -178,5 +178,5 @@ From Camunda 7.9 on, it is much easier to implement SSO by making use of the [Co You can get started by looking at some examples showing how this can be achieved for different authentication frameworks: - [Very basic authentication filter](https://github.com/camunda-consulting/camunda-webapp-plugins/tree/master/camunda-webapp-plugin-sso-autologin) for the Camunda web apps that reads the user from a provided URL parameter. -- Many _application servers_ support single sign-on out of the box (or through plugins) and can provide the user id to the application. Have a look at the [Single Sign-On Community Extension](https://github.com/camunda/camunda-sso-jboss/). +- Many _application servers_ support single sign-on out of the box (or through plugins) and can provide the user ID to the application. Have a look at the [Single Sign-On Community Extension](https://github.com/camunda/camunda-sso-jboss/). - It is quite easy to [integrate Camunda with Spring Security](https://github.com/camunda-consulting/code/tree/master/snippets/springboot-security-sso) so that the framework handles authentication and passes the authenticated user on to Camunda. diff --git a/versioned_docs/version-8.6/components/concepts/clusters.md b/versioned_docs/version-8.6/components/concepts/clusters.md index 00432521a6a..2df77d72957 100644 --- a/versioned_docs/version-8.6/components/concepts/clusters.md +++ b/versioned_docs/version-8.6/components/concepts/clusters.md @@ -12,8 +12,8 @@ When [creating a cluster in SaaS](/components/console/manage-clusters/create-clu Prior to 8.6, clusters were configured by hardware size (S, M, L). -- To learn more about clusters prior to 8.6, see previous documentation versions. -- To learn more about migrating your existing clusters to the newer model, contact your Customer Success Manager. +- This documentation covers the SaaS cluster model introduced in 8.6. To learn more about clusters prior to 8.6, see previous documentation versions. +- To learn how you can migrate your existing clusters to the newer model, contact your Customer Success Manager. ::: @@ -54,14 +54,10 @@ To learn more about choosing your cluster size, see [sizing your environment](/c - Larger cluster sizes include increased performance and capacity, allowing you to serve more workload. - Increased usage such as higher throughput or longer data retention requires a larger cluster size. - Each size increase uses one of your available cluster reservations. For example, purchasing two HWP advanced reservations for your production cluster allows you to configure two clusters of size 1x, or one cluster of size 2x. +- You can change the cluster size at any time. See [resize a cluster](/components/console/manage-clusters/manage-cluster.md#resize-a-cluster). :::note - -Contact your Customer Success Manager to: - -- Increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. -- Increase the cluster size of an existing cluster. - +Contact your Customer Success Manager to increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. ::: ## Free Trial clusters @@ -74,19 +70,16 @@ When your Free Trial plan expires, you are automatically transferred to the Free ### Auto-pause -Free Trial `dev` (or untagged) clusters are automatically paused eight hours after a cluster is created or resumed from a paused state. Auto-pause occurs regardless of cluster usage. +Free Trial clusters are automatically paused after a period of inactivity. Auto-pause occurs regardless of cluster usage. -You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume your cluster](/components/console/manage-clusters/manage-cluster.md#resume-a-cluster). +You can resume a paused cluster at any time, which typically takes five to ten minutes to complete. See [resume a cluster](/components/console/manage-clusters/manage-cluster.md#resume-a-cluster). -- Clusters tagged as `test`, `stage`, or `prod` do not auto-pause. -- Paused clusters are automatically deleted after 30 consecutive paused days. You can change the tag to avoid cluster deletion. -- No data is lost while a cluster is paused. All execution and configuration is saved, but cluster components such as Zeebe and Operate are temporarily disabled until you resume the cluster. +- Clusters tagged as `dev` (or untagged) auto-pause eight hours after the cluster is created or resumed from a paused state. +- Clusters tagged as `test`, `stage`, or `prod` auto-pause if there is no cluster activity for 48 hours. +- Cluster disk space is cleared when a trial cluster is paused. + - You will need to redeploy processes to the cluster once it is resumed from a paused state. + - Cluster configuration settings (for example, API Clients, Connector secrets, and IP allowlists) are saved so you can easily resume a cluster. :::tip - -To prevent auto-pause, you can: - -- Tag the cluster as `test`, `stage`, or `prod` instead of `dev`. -- [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. - +To prevent auto-pause, [Upgrade your Free Trial plan](https://camunda.com/pricing/) to a Starter or Enterprise plan. ::: diff --git a/versioned_docs/version-8.6/components/concepts/data-retention.md b/versioned_docs/version-8.6/components/concepts/data-retention.md index aa10e6a970c..55dc9678644 100644 --- a/versioned_docs/version-8.6/components/concepts/data-retention.md +++ b/versioned_docs/version-8.6/components/concepts/data-retention.md @@ -15,7 +15,7 @@ The following time-to-live settings are configured in SaaS for each application. - **Tasklist**: 30 days - **Zeebe**: 7 days -If there are specific requirements for your use-case, [reach out to us](/contact/) to discuss your data retention needs under an Enterprise plan. +If there are specific requirements for your use-case, [reach out to us](/reference/contact.md) to discuss your data retention needs under an Enterprise plan. For more information on development clusters in the Starter or Professional plans, refer to our [fair usage limits of those plans](https://camunda.com/legal/fair-usage-limits-for-starter-plan/). ## Additional information diff --git a/versioned_docs/version-8.6/components/concepts/encryption-at-rest.md b/versioned_docs/version-8.6/components/concepts/encryption-at-rest.md index 7c5148e168b..22f38e0ea53 100644 --- a/versioned_docs/version-8.6/components/concepts/encryption-at-rest.md +++ b/versioned_docs/version-8.6/components/concepts/encryption-at-rest.md @@ -23,7 +23,7 @@ By default, Camunda 8 SaaS cluster data at rest is protected with a provider-man Enterprise customers requiring a higher level of protection can select a dedicated Camunda-managed software or hardware (HSM) encryption key when creating a new cluster. The encryption key is managed by Camunda using Google Cloud Key Management Service (KMS). -- You can only select the encryption type when [creating a cluster](/docs/components/console/manage-clusters/create-cluster.md). You cannot change the encryption type after cluster creation. +- You can only select the encryption type when [creating a cluster](/components/console/manage-clusters/create-cluster.md). You cannot change the encryption type after cluster creation. - You can configure encryption keys on a per-cluster basis so that each cluster has a dedicated encryption key. Encryption keys can be configured for all cluster versions. - You can view cluster encryption key details in **Cluster Details** on the **Console Overview** tab. diff --git a/versioned_docs/version-8.6/components/concepts/messages.md b/versioned_docs/version-8.6/components/concepts/messages.md index e17a04e4ebf..a2b84a6fb43 100644 --- a/versioned_docs/version-8.6/components/concepts/messages.md +++ b/versioned_docs/version-8.6/components/concepts/messages.md @@ -4,7 +4,7 @@ title: "Messages" description: "Learn how process instances can respond to incoming messages." --- -Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called **[message correlation](/guides/message-correlation.md)**. +Process instances can respond to incoming messages. Published messages must be mapped onto a process instance. This step is called [message correlation](/components/modeler/bpmn/message-events/message-events.md#message-correlation). ## Message subscriptions @@ -52,7 +52,7 @@ zbctl publish message "Money collected" --correlationKey "order-123" --ttl 1h ## Message cardinality -A message is correlated only _once_ to a process (based on the BPMN process id), across all versions of this process. If multiple subscriptions for the same process are opened (by multiple process instances or within one instance), the message is correlated only to one of the subscriptions. +A message is correlated only _once_ to a process (based on the BPMN process ID), across all versions of this process. If multiple subscriptions for the same process are opened (by multiple process instances or within one instance), the message is correlated only to one of the subscriptions. When subscriptions are opened for different processes, the message is correlated to _all_ the subscriptions. @@ -60,14 +60,14 @@ A message is _not_ correlated to a message start event subscription if an instan ## Message uniqueness -A message can have an optional message id — a unique id to ensure the message is published and processed only once (i.e. idempotency). The id can be any string; for example, a request id, a tracking number, or the offset/position in a message queue. +A message can have an optional message ID — a unique ID to ensure the message is published and processed only once (i.e. idempotency). The ID can be any string; for example, a request ID, a tracking number, or the offset/position in a message queue. -A message is rejected and not correlated if a message with the same name, the same correlation key, and the same id is already buffered. After the message is discarded from the buffer, a message with the same name, correlation key, and id can be published again. +A message is rejected and not correlated if a message with the same name, the same correlation key, and the same ID is already buffered. After the message is discarded from the buffer, a message with the same name, correlation key, and ID can be published again. -The uniqueness check is disabled when no message id is set. +The uniqueness check is disabled when no message ID is set.
    - Publish message with id via zbctl + Publish message with ID via zbctl

    ``` @@ -118,6 +118,10 @@ The first message creates a new process instance. The following messages are cor When the instance ends and messages with the same correlation key are not correlated yet, a new process instance is created. +:::note +You may also use TTL to wait for messages that may arrive earlier when combining [start events and intermediate catch events](/docs/components/modeler/bpmn/events.md). +::: + ### Single instance **Problem**: Create exactly one instance of a process diff --git a/versioned_docs/version-8.6/components/concepts/process-instance-creation.md b/versioned_docs/version-8.6/components/concepts/process-instance-creation.md index 6a43c5143a0..04f1d5369c8 100644 --- a/versioned_docs/version-8.6/components/concepts/process-instance-creation.md +++ b/versioned_docs/version-8.6/components/concepts/process-instance-creation.md @@ -14,7 +14,7 @@ Camunda 8 supports the following ways to create a process instance: ## Commands -A process instance is created by sending a command specifying the BPMN process id, or the unique key of the process. +A process instance is created by sending a command specifying the BPMN process ID, or the unique key of the process. There are two commands to create a process instance, outlined in the sections below. @@ -22,13 +22,14 @@ There are two commands to create a process instance, outlined in the sections be A process that has a [none start event](/components/modeler/bpmn/none-events/none-events.md#none-start-events) is started explicitly using **[CreateProcessInstance](/apis-tools/zeebe-api/gateway-service.md#createprocessinstance-rpc)**. -This command creates a new process instance and immediately responds with the process instance id. The execution of the process occurs after the response is sent. +This command creates a new process instance and immediately responds with the process instance ID. The execution of the process occurs after the response is sent. ![create-process](assets/create-process.png) -

    - Code example -

    Create a process instance: +

    + Code example +

    +Create a process instance: ``` zbctl create instance "order-process" @@ -38,16 +39,16 @@ Response: ``` { - "processKey": 2251799813685249, - "bpmnProcessId": "order-process", - "version": 1, - "processInstanceKey": 2251799813686019 + "processKey": 2251799813685249, + "bpmnProcessId": "order-process", + "version": 1, + "processInstanceKey": 2251799813686019 } ``` -

    -
    +

    +
    ### Create and await results @@ -67,7 +68,8 @@ When the client resends the command, it creates a new process instance.
    Code example -

    Create a process instance and await results: +

    +Create a process instance and await results: ``` zbctl create instance "order-process" --withResult --variables '{"orderId": "1234"}' @@ -112,7 +114,7 @@ To start the process instance at a user-defined element, you need to provide sta By default, the instruction starts before the given element. This means input mappings of that element are applied as usual. Multiple instructions can be provided to start the process instance at more than one element. -You can activate the same element multiple times inside the created process instance by referring to the same element id in more than one instruction. +You can activate the same element multiple times inside the created process instance by referring to the same element ID in more than one instruction. :::note Start instructions have the same [limitations as process instance modification](/components/concepts/process-instance-modification.md#limitations), e.g., it is not possible to start at a sequence flow. @@ -123,7 +125,7 @@ Start instructions are supported for both `CreateProcessInstance` commands.

    Code example

    - Create a process instance starting before the 'ship_parcel' element: +Create a process instance starting before the 'ship_parcel' element: ```java client.newCreateInstanceCommand() diff --git a/versioned_docs/version-8.6/components/concepts/resource-deletion.md b/versioned_docs/version-8.6/components/concepts/resource-deletion.md index dfca210ff4e..5315a73e4e6 100644 --- a/versioned_docs/version-8.6/components/concepts/resource-deletion.md +++ b/versioned_docs/version-8.6/components/concepts/resource-deletion.md @@ -31,7 +31,7 @@ Zeebe's state. As a result, it is not possible to create new process instances f to create one will result in a `NOT_FOUND` exception. Deleting a process definition also deletes historical data. Zeebe will **never** reuse a process version. When deleting a process definition, it keeps track of the version number. -Deploying a new process with the same id will increment the version as usual. +Deploying a new process with the same ID will increment the version as usual. ### Deleting the latest version @@ -53,8 +53,8 @@ new `latest` instead. ### Call activities -A [call activity](/components/modeler/bpmn/call-activities/call-activities.md) references a process by id. It's -possible that all process definitions for this process id are deleted. In this case, Zeebe creates an [incident](/components/concepts/incidents.md) on the +A [call activity](/components/modeler/bpmn/call-activities/call-activities.md) references a process by ID. It's +possible that all process definitions for this process ID are deleted. In this case, Zeebe creates an [incident](/components/concepts/incidents.md) on the call activity, informing you that the process cannot be not found. ### Limitations @@ -74,5 +74,5 @@ a `NOT_FOUND` exception. Deleting a DRG also deletes historical data. ### Business rule tasks A [business rule task](/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md) references a decision -by id. It's possible that all versions of this decision are deleted. When this happens, an incident is created on the -business rule task with the message that no decision with the given decision id is found. +by ID. It's possible that all versions of this decision are deleted. When this happens, an incident is created on the +business rule task with the message that no decision with the given decision ID is found. diff --git a/versioned_docs/version-8.6/components/concepts/what-is-camunda-8.md b/versioned_docs/version-8.6/components/concepts/what-is-camunda-8.md index 1374cbd8d1f..3a65f8d66ee 100644 --- a/versioned_docs/version-8.6/components/concepts/what-is-camunda-8.md +++ b/versioned_docs/version-8.6/components/concepts/what-is-camunda-8.md @@ -105,7 +105,7 @@ The platform and tools are usable in your environment right away, with full publ ## Next steps -- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/contact/) page. +- To request information about Camunda 8 performance and benchmarking, refer to our [Contact](/reference/contact.md) page. - [Introduction to Camunda 8](/guides/introduction-to-camunda-8.md) - [Create a Camunda 8 account](/guides/create-account.md) - [Migrate from Camunda 7 to Camunda 8](/guides/migrating-from-camunda-7/index.md) diff --git a/versioned_docs/version-8.6/components/concepts/workflow-patterns.md b/versioned_docs/version-8.6/components/concepts/workflow-patterns.md index ace1b2cbd69..8d148ee15e5 100644 --- a/versioned_docs/version-8.6/components/concepts/workflow-patterns.md +++ b/versioned_docs/version-8.6/components/concepts/workflow-patterns.md @@ -276,10 +276,6 @@ An important problem to solve is how to roll back a business transaction in case In BPMN, you can use [compensation events](/components/modeler/bpmn/bpmn-coverage.md) to easily implement compensations in your processes. -:::note -The compensation event is supported in Camunda 7, but not yet in Camunda 8. It is on the roadmap and will eventually be available in Camunda 8. -::: -

    1 diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-sdk.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-sdk.md index f2da5c2c7fd..b79128ec119 100644 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-sdk.md +++ b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-sdk.md @@ -1075,8 +1075,9 @@ For example, you can spin up a custom client with the [Zeebe Java client](/apis-tools/java-client/index.md) as follows: ```java -import io.camunda.connector.MyConnectorFunction -import io.camunda.connector.runtime.jobworker.outbound.ConnectorJobHandler; +import io.camunda.connector.MyConnectorFunction; +import io.camunda.connector.runtime.core.outbound.ConnectorJobHandler; +import io.camunda.connector.validation.impl.DefaultValidationProvider; import io.camunda.zeebe.client.ZeebeClient; public class Main { @@ -1087,7 +1088,7 @@ public class Main { zeebeClient.newWorker() .jobType("io.camunda:template:1") - .handler(new ConnectorJobHandler(new MyConnectorFunction())) + .handler(new ConnectorJobHandler(new MyConnectorFunction(), new DefaultValidationProvider())) .name("MESSAGE") .fetchVariables("authentication", "message") .open(); @@ -1102,5 +1103,5 @@ it with your job handler implementation that handles invoking the Connector func Your custom job handler needs to create a `OutboundConnectorContext` that the Connector function can use to handle variables, secrets, and Connector results. You can extend the -provided `io.camunda.connector.impl.outbound.AbstractConnectorContext` to quickly gain access +provided `io.camunda.connector.runtime.core.AbstractConnectorContext` to quickly gain access to most of the common context operations. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-template-generator.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-template-generator.md new file mode 100644 index 00000000000..d0d919499ea --- /dev/null +++ b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-template-generator.md @@ -0,0 +1,57 @@ +--- +id: connector-template-generator +title: Generate a Connector template +description: Learn how to generate Connector templates for easier custom Connector creation. +--- + +Camunda 8 SaaS only + +You can configure and automatically generate a custom [Connector template](/components/connectors/custom-built-connectors/connector-templates.md) in Web Modeler. + +You can start from a blank template or import an existing API definition such as an [OpenAPI specification](https://swagger.io/resources/open-api/), [Swagger specification](https://swagger.io/resources/open-api/), or a [Postman collection](https://www.postman.com/collection/). For example, download a Postman collection as a YAML file, import this into the generator, and choose which methods to include in the generated template. + +## Generate a Connector template + +To generate a Connector template: + +1. Select the Modeler project you want to create the template in. +1. Click **Create new**, and select **Connector template** to open the **Create new Connector template** screen. + ![fCreate the new Connector template](./img/configure-template-details.png) + +1. Select the template starting point. + + - **Start from API definition**: Import an existing API definition file as a starting point for the template. If you select this option, the **Import data source** section is shown below the template details. + + - **Start from blank**: Start from a blank template. + +1. Configure the template details in the **Configure template details** section. + + - **Name:** Enter a clear and easily understood name for the template. For example, include the brand name if the template connects to a service or tool, or indicate the template's main feature. + + - **Description:** Enter a description for the template. For example, describe the template's main features and benefits. + + - **Icon:** Use a default BPMN symbol as the template icon in a BPMN diagram, or upload a custom icon. Supported icon formats are SVG, PNG, and JPEG, with a maximum file size limit of 8 KB. Icons must be a minimum of 512 x 512 pixels in size. + + - **Import from URL**: Enter the URL for the image you want to import, and click **Import icon**. + - **Upload file**: Drag and drop a file into the upload area, or click the link and select a file to upload. + + :::note + If you do not configure the template details at this point, a default name is generated and a default BPMN symbol selected. You can edit these template details after the template is created. + ::: + +1. If you selected the **Start from API definition** option, the **Import data source** section is shown. Select and upload an API definition. JSON and YAML file formats are supported, with a maximum file size limit of 1024 KB. + + 1. Select the format you are going to upload (OpenAPI or Postman). + 1. Upload the API definition. + + - **Import from URL**: Enter the URL for the API definition you want to import, and click **Import icon**. + - **Upload file**: Drag and drop a file into the upload area, or click the link and select a file to upload. + + 1. After the import is complete, select which actions to include in the template from the generated list of supported methods. + ![List of imported methods](./img/Imported-methods.png) + +1. Click **Create template** to create and open the newly generated Connector template in the [template editor](/components/connectors/manage-connector-templates.md). + +:::info +For more information on working with and configuring Connector templates, see [Connector templates](/components/connectors/custom-built-connectors/connector-templates.md). +::: diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-templates.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-templates.md index 4b1dad5a0f3..d9f3b816c46 100644 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-templates.md +++ b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/connector-templates.md @@ -7,8 +7,10 @@ description: Learn how to modify BPMN elements with Connector templates to creat import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -**Connector templates** are JSON configuration files, which customize how a BPMN element is shown, -and how it can be configured by process developers. Connector templates are a specific kind of [element template](/components/modeler/desktop-modeler/element-templates/about-templates.md). +Connectors are available [out-of-the-box (OOTB)](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md) and come with [Connector templates](/components/connectors/manage-connector-templates.md) which customize how a BPMN element is shown, +and how it can be configured by process developers. Connector templates are a specific kind of [element templates](/components/modeler/desktop-modeler/element-templates/about-templates.md), which can also be used when creating custom Connectors via the [Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). + +Additionally, the [Camunda Marketplace](/components/modeler/web-modeler/camunda-marketplace.md) provides Connectors by Camunda partners and community contributors. Before developing one, you'll need to decide what you would like to achieve with your Connector. Currently, the options are: @@ -442,6 +444,566 @@ a simple JSON configuration: +## Inbound boundary event Connector templates + +You can, for example, allow the user to model and configure the following **HTTP webhook Connector** for boundary events by providing +a simple JSON configuration: + + + + + +![Webhook Inbound boundary Connector Example.png](img/custom-connector-template-inbound-boundary.png) + + + + + +```json +{ + "$schema": "https://unpkg.com/@camunda/zeebe-element-templates-json-schema/resources/schema.json", + "name": "Webhook Boundary Event Connector", + "id": "io.camunda.connectors.webhook.WebhookConnectorBoundary.v1", + "description": "Configure webhook to receive callbacks", + "documentationRef": "https://docs.camunda.io/docs/components/connectors/out-of-the-box-connectors/http-webhook/", + "version": 11, + "category": { + "id": "connectors", + "name": "Connectors" + }, + "appliesTo": ["bpmn:BoundaryEvent"], + "elementType": { + "value": "bpmn:BoundaryEvent", + "eventDefinition": "bpmn:MessageEventDefinition" + }, + "groups": [ + { + "id": "endpoint", + "label": "Webhook configuration" + }, + { + "id": "authentication", + "label": "Authentication" + }, + { + "id": "authorization", + "label": "Authorization" + }, + { + "id": "webhookResponse", + "label": "Webhook response" + }, + { + "id": "activation", + "label": "Activation" + }, + { + "id": "correlation", + "label": "Correlation", + "tooltip": "Learn more about message correlation in the documentation." + }, + { + "id": "output", + "label": "Output mapping" + } + ], + "properties": [ + { + "value": "io.camunda:webhook:1", + "binding": { + "name": "inbound.type", + "type": "zeebe:property" + }, + "type": "Hidden" + }, + { + "id": "inbound.method", + "label": "Webhook method", + "description": "Select HTTP method", + "optional": false, + "value": "any", + "group": "endpoint", + "binding": { + "name": "inbound.method", + "type": "zeebe:property" + }, + "type": "Dropdown", + "choices": [ + { + "name": "Any", + "value": "any" + }, + { + "name": "GET", + "value": "get" + }, + { + "name": "POST", + "value": "post" + }, + { + "name": "PUT", + "value": "put" + }, + { + "name": "DELETE", + "value": "delete" + } + ] + }, + { + "id": "inbound.context", + "label": "Webhook ID", + "description": "The webhook ID is a part of the URL", + "optional": false, + "constraints": { + "notEmpty": true, + "pattern": { + "value": "^[a-zA-Z0-9]+([-_][a-zA-Z0-9]+)*$", + "message": "can only contain letters, numbers, or single underscores/hyphens and cannot begin or end with an underscore/hyphen" + } + }, + "group": "endpoint", + "binding": { + "name": "inbound.context", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "inbound.shouldValidateHmac", + "label": "HMAC authentication", + "description": "Choose whether HMAC verification is enabled. See documentation and example that explains how to use HMAC-related fields", + "optional": false, + "value": "disabled", + "group": "authentication", + "binding": { + "name": "inbound.shouldValidateHmac", + "type": "zeebe:property" + }, + "type": "Dropdown", + "choices": [ + { + "name": "Enabled", + "value": "enabled" + }, + { + "name": "Disabled", + "value": "disabled" + } + ] + }, + { + "id": "inbound.hmacSecret", + "label": "HMAC secret key", + "description": "Shared secret key", + "optional": true, + "feel": "optional", + "group": "authentication", + "binding": { + "name": "inbound.hmacSecret", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.hmacHeader", + "label": "HMAC header", + "description": "Name of header attribute that will contain the HMAC value", + "optional": true, + "feel": "optional", + "group": "authentication", + "binding": { + "name": "inbound.hmacHeader", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.hmacAlgorithm", + "label": "HMAC algorithm", + "description": "Choose HMAC algorithm", + "optional": false, + "value": "sha_256", + "group": "authentication", + "binding": { + "name": "inbound.hmacAlgorithm", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "Dropdown", + "choices": [ + { + "name": "SHA-1", + "value": "sha_1" + }, + { + "name": "SHA-256", + "value": "sha_256" + }, + { + "name": "SHA-512", + "value": "sha_512" + } + ] + }, + { + "id": "inbound.hmacScopes", + "label": "HMAC scopes", + "description": "Set HMAC scopes for calculating signature data. See documentation", + "optional": true, + "feel": "required", + "group": "authentication", + "binding": { + "name": "inbound.hmacScopes", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.shouldValidateHmac", + "equals": "enabled", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.type", + "label": "Authorization type", + "description": "Choose the authorization type", + "value": "NONE", + "group": "authorization", + "binding": { + "name": "inbound.auth.type", + "type": "zeebe:property" + }, + "type": "Dropdown", + "choices": [ + { + "name": "None", + "value": "NONE" + }, + { + "name": "Basic", + "value": "BASIC" + }, + { + "name": "API key", + "value": "APIKEY" + }, + { + "name": "JWT", + "value": "JWT" + } + ] + }, + { + "id": "inbound.auth.username", + "label": "Username", + "description": "Username for basic authentication", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.username", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "BASIC", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.password", + "label": "Password", + "description": "Password for basic authentication", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.password", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "BASIC", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.apiKey", + "label": "API key", + "description": "Expected API key", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.apiKey", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "APIKEY", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.apiKeyLocator", + "label": "API key locator", + "description": "A FEEL expression that extracts API key from the request. See documentation", + "optional": false, + "value": "=split(request.headers.authorization, \" \")[2]", + "constraints": { + "notEmpty": true + }, + "feel": "required", + "group": "authorization", + "binding": { + "name": "inbound.auth.apiKeyLocator", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "APIKEY", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.jwt.jwkUrl", + "label": "JWK URL", + "description": "Well-known URL of JWKs", + "optional": false, + "feel": "optional", + "group": "authorization", + "binding": { + "name": "inbound.auth.jwt.jwkUrl", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "JWT", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.jwt.permissionsExpression", + "label": "JWT role property expression", + "description": "Expression to extract the roles from the JWT token. See documentation", + "optional": false, + "feel": "required", + "group": "authorization", + "binding": { + "name": "inbound.auth.jwt.permissionsExpression", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "JWT", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.auth.jwt.requiredPermissions", + "label": "Required roles", + "description": "List of roles to test JWT roles against", + "optional": false, + "feel": "required", + "group": "authorization", + "binding": { + "name": "inbound.auth.jwt.requiredPermissions", + "type": "zeebe:property" + }, + "condition": { + "property": "inbound.auth.type", + "equals": "JWT", + "type": "simple" + }, + "type": "String" + }, + { + "id": "inbound.responseExpression", + "label": "Response expression", + "description": "Expression used to generate the HTTP response", + "optional": true, + "feel": "required", + "group": "webhookResponse", + "binding": { + "name": "inbound.responseExpression", + "type": "zeebe:property" + }, + "type": "Text" + }, + { + "id": "inbound.verificationExpression", + "label": "One time verification response expression", + "description": "Specify condition and response. Learn more in the documentation", + "optional": true, + "feel": "required", + "group": "webhookResponse", + "binding": { + "name": "inbound.verificationExpression", + "type": "zeebe:property" + }, + "type": "Text" + }, + { + "id": "activationCondition", + "label": "Activation condition", + "description": "Condition under which the Connector triggers. Leave empty to catch all events", + "optional": true, + "feel": "required", + "group": "activation", + "binding": { + "name": "activationCondition", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "consumeUnmatchedEvents", + "label": "Consume unmatched events", + "value": true, + "group": "activation", + "binding": { + "name": "consumeUnmatchedEvents", + "type": "zeebe:property" + }, + "tooltip": "Unmatched events are rejected by default, allowing the upstream service to handle the error. Check this box to consume unmatched events and return a success response", + "type": "Boolean" + }, + { + "id": "correlationKeyProcess", + "label": "Correlation key (process)", + "description": "Sets up the correlation key from process variables", + "constraints": { + "notEmpty": true + }, + "feel": "required", + "group": "correlation", + "binding": { + "name": "correlationKey", + "type": "bpmn:Message#zeebe:subscription#property" + }, + "type": "String" + }, + { + "id": "correlationKeyPayload", + "label": "Correlation key (payload)", + "description": "Extracts the correlation key from the incoming message payload", + "constraints": { + "notEmpty": true + }, + "feel": "required", + "group": "correlation", + "binding": { + "name": "correlationKeyExpression", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "messageIdExpression", + "label": "Message ID expression", + "description": "Expression to extract unique identifier of a message", + "optional": true, + "feel": "required", + "group": "correlation", + "binding": { + "name": "messageIdExpression", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "messageTtl", + "label": "Message TTL", + "description": "Time-to-live for the message in the broker (ISO-8601 duration)", + "optional": true, + "constraints": { + "notEmpty": false, + "pattern": { + "value": "^(PT.*|)$", + "message": "must be an ISO-8601 duration" + } + }, + "feel": "optional", + "group": "correlation", + "binding": { + "name": "messageTtl", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "messageNameUuid", + "generatedValue": { + "type": "uuid" + }, + "group": "correlation", + "binding": { + "name": "name", + "type": "bpmn:Message#property" + }, + "type": "Hidden" + }, + { + "id": "resultVariable", + "label": "Result variable", + "description": "Name of variable to store the response in", + "group": "output", + "binding": { + "name": "resultVariable", + "type": "zeebe:property" + }, + "type": "String" + }, + { + "id": "resultExpression", + "label": "Result expression", + "description": "Expression to map the response into process variables", + "feel": "required", + "group": "output", + "binding": { + "name": "resultExpression", + "type": "zeebe:property" + }, + "type": "Text" + } + ], + "icon": { + "contents": "data:image/svg+xml;base64,PHN2ZyBpZD0naWNvbicgeG1sbnM9J2h0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnJyB3aWR0aD0nMTgnIGhlaWdodD0nMTgnIHZpZXdCb3g9JzAgMCAzMiAzMic+CiAgPGRlZnM+CiAgICA8c3R5bGU+LmNscy0xIHsgZmlsbDogbm9uZTsgfTwvc3R5bGU+CiAgPC9kZWZzPgogIDxwYXRoCiAgICBkPSdNMjQsMjZhMywzLDAsMSwwLTIuODE2NC00SDEzdjFhNSw1LDAsMSwxLTUtNVYxNmE3LDcsMCwxLDAsNi45Mjg3LDhoNi4yNTQ5QTIuOTkxNCwyLjk5MTQsMCwwLDAsMjQsMjZaJy8+CiAgPHBhdGgKICAgIGQ9J00yNCwxNmE3LjAyNCw3LjAyNCwwLDAsMC0yLjU3LjQ4NzNsLTMuMTY1Ni01LjUzOTVhMy4wNDY5LDMuMDQ2OSwwLDEsMC0xLjczMjYuOTk4NWw0LjExODksNy4yMDg1Ljg2ODYtLjQ5NzZhNS4wMDA2LDUuMDAwNiwwLDEsMS0xLjg1MSw2Ljg0MThMMTcuOTM3LDI2LjUwMUE3LjAwMDUsNy4wMDA1LDAsMSwwLDI0LDE2WicvPgogIDxwYXRoCiAgICBkPSdNOC41MzIsMjAuMDUzN2EzLjAzLDMuMDMsMCwxLDAsMS43MzI2Ljk5ODVDMTEuNzQsMTguNDcsMTMuODYsMTQuNzYwNywxMy44OSwxNC43MDhsLjQ5NzYtLjg2ODItLjg2NzctLjQ5N2E1LDUsMCwxLDEsNi44MTItMS44NDM4bDEuNzMxNSwxLjAwMmE3LjAwMDgsNy4wMDA4LDAsMSwwLTEwLjM0NjIsMi4wMzU2Yy0uNDU3Ljc0MjctMS4xMDIxLDEuODcxNi0yLjA3MzcsMy41NzI4WicvPgogIDxyZWN0IGlkPSdfVHJhbnNwYXJlbnRfUmVjdGFuZ2xlXycgZGF0YS1uYW1lPScmbHQ7VHJhbnNwYXJlbnQgUmVjdGFuZ2xlJmd0OycgY2xhc3M9J2Nscy0xJwogICAgd2lkdGg9JzMyJyBoZWlnaHQ9JzMyJy8+Cjwvc3ZnPg==" + } +} +``` + + + + + ## Outbound Connector templates You can, for example, allow the user to model and configure the following **REST Connector** by providing a JSON configuration for a service task: diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/Imported-methods.png b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/Imported-methods.png new file mode 100644 index 00000000000..5c3ce9071bb Binary files /dev/null and b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/Imported-methods.png differ diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/Launch-template-generator.png b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/Launch-template-generator.png new file mode 100644 index 00000000000..2cf3fcb582b Binary files /dev/null and b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/Launch-template-generator.png differ diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/configure-template-details.png b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/configure-template-details.png new file mode 100644 index 00000000000..27012b099e4 Binary files /dev/null and b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/configure-template-details.png differ diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-boundary.png b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-boundary.png new file mode 100644 index 00000000000..933dd0df8d5 Binary files /dev/null and b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/img/custom-connector-template-inbound-boundary.png differ diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/010-to-020.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/010-to-020.md deleted file mode 100644 index e71a7316283..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/010-to-020.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -id: 010-to-020 -title: Update 0.1 to 0.2 -description: "Review which adjustments must be made to migrate from Connector SDK 0.1.x to 0.2.0." ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.1.x to 0.2.0. - -:::caution - -Be aware that the update from 0.1 to 0.2 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.2.0, we introduce the following structural changes: - -- Input validation and secret replacement move from writing imperative code to declaratively using annotations. -- The outbound aspect of APIs is more explicit. Classes have been moved to more explicit packages and have been renamed. -- New required annotation for outbound Connectors. - -### Declarative validation and secrets - -Input objects previously had to implement the `ConnectorInput` interface to participate in validation and secret replacement -initiated from the `ConnectorContext` using its `validate` and `replaceSecrets` methods respectively. - -With version 0.2.0, we remove the imperative approach for validation and secret replacement from the SDK. -Instead, you can use annotations to describe the constraints of input attributes and mark those that can contain -secrets. - -These are two input objects written with the SDK version 0.1.x: - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class MyConnectorRequest implements ConnectorInput { - - private String message; - private Authentication authentication; - - @Override - public void validateWith(final Validator validator) { - validator.require(message, "message"); - validator.require(authentication, "authentication"); - validateIfNotNull(authentication, validator); - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - replaceSecretsIfNotNull(authentication, secretStore); - } -} -``` - -```java -import io.camunda.connector.api.ConnectorInput; -import io.camunda.connector.api.SecretStore; -import io.camunda.connector.api.Validator; - -public class Authentication implements ConnectorInput { - - private String user; - private String token; - - @Override - public void validateWith(final Validator validator) { - validator.require(user, "user"); - validator.require(token, "token"); - if (token != null && !(token.startsWith("xobx") || token.startsWith("secrets."))) { - validator.addErrorMessage("Token must start with \"xobx\" or be a secret"); - } - } - - @Override - public void replaceSecrets(final SecretStore secretStore) { - token = secretStore.replaceSecret(token); - } -} -``` - -You can express the same input objects with SDK version 0.2.0 as follows: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.Valid; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.NotNull; - -public class MyConnectorRequest { - - @NotEmpty - private String message; - - @NotNull - @Valid - @Secret - private Authentication authentication; -} -``` - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import jakarta.validation.constraints.Pattern; - -public class Authentication { - - @NotEmpty - private String user; - - @NotEmpty - @Pattern("^(xobx-|secret).+") - @Secret - private String token; -} -``` - -As a result, you have to remove the `ConnectorInput` interface implementation and the imperative code that comes with `validateWith` -and `replaceSecrets`. You can now concisely describe the constraints of attributes rather than express them in imperative code. - -To use annotaion-based validation out of the box, you can include the new artifact `connector-validation` that -comes with the SDK. - - - - - -```xml - - io.camunda.connector - connector-validation - 0.2.0 - -``` - - - - - -```yml -implementation 'io.camunda.connector:connector-validation:0.2.0' -``` - - - - -You can read more about validation and secret replacement in our -[SDK guide](/components/connectors/custom-built-connectors/connector-sdk.md). - -### Explicit outbound aspect - -With version 0.2.0 of the SDK, we make the outbound aspect of those components specific to outbound connectivity -more visible. This separates those SDK components that are tightly coupled to outbound from those that -will be reusable for inbound. - -With this change, the names of the following classes need to be adjusted: - -- Rename `io.camunda.connector.api.ConnectorContext` to `io.camunda.connector.api.outbound.OutboundConnectorContext`. -- Rename `io.camunda.connector.api.ConnectorFunction` to `io.camunda.connector.api.outbound.OutboundConnectorFunction`. -- Rename `io.camunda.connector.api.SecretProvider` to `io.camunda.connector.api.secret.SecretProvider`. -- Rename `io.camunda.connector.api.SecretStore` to `io.camunda.connector.api.secret.SecretStore`. -- Rename `io.camunda.connector.test.ConnectorContextBuilder` to `io.camunda.connector.test.outbound.OutboundConnectorContextBuilder`. - -As a result, you must replace all occurrences of the old class names and imports with the new ones. This includes the -SPI for the Connector function itself. Therefore, rename the file `META-INF/services/io.camunda.connector.api.ConnectorFunction` to -`META-INF/services/io.camunda.connector.api.outbound.OutboundConnectorFunction`. - -### `@OutboundConnector` annotation - -For best interoperability, Connectors provide default meta-data (`name`, `type`, `inputVariables`) via the `@OutboundConnector` annotation: - -```java -@OutboundConnector( - name = "PING", - inputVariables = {"caller"}, - type = "io.camunda.example.PingConnector:1" -) -public class PingConnector implements OutboundConnectorFunction { - ... -} -``` - -## Connector runtime environment - -If using the -[pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that comes with the SDK does not fit your use case, you can create a custom runtime environment. - -With version 0.2.0 of the [job worker runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#connector-job-handler), you need to make the following changes: - -- Rename `io.camunda.connector.runtime.jobworker.ConnectorJobHandler` to `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler`. -- Rename Connector-related env variables from `ZEEBE_` to `CONNECTOR_`. Zeebe configuration properties remain unchanged. - -As a general change in behavior the module will now pick up Connectors from classpath unless it is explicitly configured via environment variables. - -Also, take the name changes in the [SDK core](#explicit-outbound-aspect) into account. - -Implementing your own Connector wrapper you need to provide a Connector context specific to -your environment. Consider extending the `io.camunda.connector.impl.outbound.AbstractConnectorContext` -instead of implementing the `io.camunda.connector.api.ConnectorContext` yourself. Most of the commonly needed functionality -is already provided in there. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md deleted file mode 100644 index 61142a93750..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/0100-to-0110.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -id: 0100-to-0110 -title: Update 0.10 to 0.11 -description: "Review which adjustments must be made to migrate from Connector SDK 0.10.x to 0.11.0." ---- - -Beginner - -:::note -Migrate directly to version 0.11.2 of the SDK. This contains a fix for several issues in the 0.11.0 release. -::: - -This SDK release is not backwards-compatible. We are moving towards a stable Connectors release and continue to improve the experience of developing custom Connectors. - -In this SDK version, we changed the `OutboundConnectorContext` and `InboundConnectorContext interfaces significantly.` You can no longer use the `getVariablesAsType` or `getPropertiesAsType` methods in outbound and inbound Connectors, respectively. -Use the new `bindVariables` method instead, as it takes care of secret replacement, payload validation, and deserialization automatically. - -We are moving away from a mandatory `@Secret` annotation. -From this release onwards, secrets are automatically replaced in all input variables/properties without the need to explicitly declare an annotation. - -To migrate your Connector implementations, complete the following: - -1. If you used the `OutboundConnectorContext::getVariablesAsType` method in you outbound Connector functions, replace it with `OutboundConnectorContext::bindVariables`. -2. If you used the `InboundConnectorContext::getPropertiesAsType` method in you inbound Connector executables, replace it with `InboundConnectorContext::bindProperties`. -3. Remove calls to `OutboundConnectorContext::replaceSecrets` and `InboundConnectorContext::replaceSecrets` methods. The secrets are now replaced automatically. -4. Remove calls to `OutboundConnectorContext::validate` and `InboundConnectorContext::validate` methods. The validation is now performed automatically. -5. If you used the `@Secret` annotation in your Connector implementations, you can safely remove it as it has no effect. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/020-to-030.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/020-to-030.md deleted file mode 100644 index 248d7e7cc31..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/020-to-030.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -id: 020-to-030 -title: Update 0.2 to 0.3 -description: "Review which adjustments must be made to migrate from Connector SDK 0.2.x to 0.3.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.2.x to 0.3.0. - -:::caution - -Be aware that the update from 0.2 to 0.3 requires manual migration steps as described below. - -::: - -## Connector function - -With SDK version 0.3.0, we introduce the following structural changes: - -- Input validation moves from Jakarta Bean Validation API version 3.0 to 2.0. -- SDK artifacts have to be in scope `provided`. - -### Update to Validation API 2.0 - -To better integrate in the current Java ecosystem and widely used frameworks like Spring 5 and Spring Boot 2, the `connector-validation` module -now operates on Jakarta Bean Validation API version 2.0 instead of version 3.0. Adjust your Connector input objects using validation as follows: - -Replace all class imports starting with `jakarta.validation` by `javax.validation`. A Connector input class on SDK 0.2.x with the following imports: - -```java -import io.camunda.connector.api.annotation.Secret; -import jakarta.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -changes to the following: - -```java -import io.camunda.connector.api.annotation.Secret; -import javax.validation.constraints.NotEmpty; -import java.io.IOException; -import java.util.Objects; -``` - -This way, the Connector runtime environments are able to pick up your validations correctly. - -### Provided SDK artifacts - -The Connector runtime environments can execute multiple Connectors at once. The environments also provide the base SDK artifacts and their classes -to any Connector they execute. This comprises runtime-specific classes related to the Connector context as well as the Connector core and the validation -classes. To minimize the possibility of incompatible classes being on the same classpath, Connectors are required to depend on `connector-core` and -`connector-validation` in Maven's dependency scope `provided`. Other dependency management frameworks like Gradle offer similar scopes. - -As a result, you need to include the SDK artifacts as follows in Maven: - -```xml - - io.camunda.connector - connector-core - provided - - - io.camunda.connector - connector-validation - provided - -``` - -## Connector runtime environment - -The SDK provides a [pre-packaged runtime environment](/components/connectors/custom-built-connectors/connector-sdk.md#pre-packaged-runtime-environment) -that you can start manually. With version 0.3.0, this runtime moves from the [SDK repository](https://github.com/camunda/connector-sdk/tree/stable/0.2/runtime-job-worker) -to [Connector Runtime](https://github.com/camunda/connectors/blob/main/connector-runtime/README.md). This also means that the provided runtime now is -a Spring Boot application, based on Spring Zeebe. Thus, it offers all out-of-the-box capabilities Spring Zeebe provides. - -The Connector runtime JAR for manual installation can now be fetched from https://repo1.maven.org/maven2/io/camunda/spring-zeebe-connector-runtime/ -(starting with version `8.1.3`) instead of https://repo1.maven.org/maven2/io/camunda/connector/connector-runtime-job-worker/. You can start the runtime -environment with the following command: - -```bash -java -cp 'spring-zeebe-connector-runtime-VERSION-with-dependencies.jar:connector-http-json-VERSION-with-dependencies.jar' \ - io.camunda.connector.runtime.ConnectorRuntimeApplication -``` - -The Docker image is still accessible at https://hub.docker.com/r/camunda/connectors/tags. - -### Custom runtime environments - -If you are building a custom runtime environment, note the following adjustments: - -- The `runtime-util` artifact replaces the `runtime-job-worker` artifact. -- The `io.camunda.connector.runtime.jobworker.api.outbound.ConnectorJobHandler` has moved to `import io.camunda.connector.runtime.util.outbound.ConnectorJobHandler`. -- The `io.camunda.connector.impl.outbound.AbstractOutboundConnectorContext` has moved to `io.camunda.connector.impl.context.AbstractConnectorContext`. -- To build your own context class, we recommend using the following signature: - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext {} -``` - -- The `SecretStore` class has been removed. Initialize your context class with a `super(SecretProvider)` call. Remove the `getSecretStore` method if you used it. - -```java -public class MyContext extends AbstractConnectorContext implements OutboundConnectorContext { - - public MyContext(final SecretProvider provider) { - super(provider); - ... - } -} -``` diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/030-to-040.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/030-to-040.md deleted file mode 100644 index 43916603f22..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/030-to-040.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -id: 030-to-040 -title: Update 0.3 to 0.4 -description: "Review which adjustments must be made to migrate from Connector SDK 0.3.x to 0.4.0." ---- - -Intermediate - -The following sections explain which adjustments must be made to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.3.x to 0.4.0. - -:::caution - -Be aware that the update from 0.3 to 0.4 requires manual migration steps as described below. - -::: - -With SDK version 0.4.0, we introduce many basic structural changes: - -- Switching default Connector Runtime to Spring Boot/Spring Zeebe for outbound Connectors. -- Introducing webhook inbound Connector. -- Moved out-of-the-box connectors to mono-repo at https://github.com/camunda/connectors-bundle/tree/main/connectors to ease dependency management and conflict resolution. -- Build Connector bundle artifact and Docker image by Maven as default (done by adding various fat jars to one Docker image). -- Adding GCP Secret Provider used in Camunda SaaS. - -### Inbound webhook - -Spring Zeebe runtime with version `0.4.0` SDK introduces support of inbound webhook capabilities. -See the [list of available inbound Connectors](/components/connectors/out-of-the-box-connectors/available-connectors-overview.md). - -To function properly, Spring Zeebe runtime requires connection to [Operate API](/apis-tools/operate-api/overview.md). Read more on [how to connect to Operate or disable it completely](/self-managed/connectors-deployment/connectors-configuration.md#local-installation). - -### What happens if I don't properly configure connection to Operate API? - -If you don't configure properly connection to Operate API, it will be not possible to poll process definitions from Operate. Therefore, the webhook functionality won't work. -Additionally, you may observe exception spam in your log file every 5 seconds complaining of inability to connect to Operate. -Overall, this is not critical and given there are no other issues, the Connector runtime will function properly. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/040-to-050.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/040-to-050.md deleted file mode 100644 index 637cabc8899..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/040-to-050.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: 040-to-050 -title: Update 0.4 to 0.5 -description: "Review which adjustments must be made to migrate from Connector SDK 0.4.x to 0.5.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.4.x to 0.5.0. - -With SDK version 0.5.0, we introduced minor changes: - -- Removing Spring Zeebe dependency management -- Managing the GCP Secret Provider module version diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/050-to-060.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/050-to-060.md deleted file mode 100644 index 46124442521..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/050-to-060.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: 050-to-060 -title: Update 0.5 to 0.6 -description: "Review which adjustments must be made to migrate from Connector SDK 0.5.x to 0.6.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.5.x to 0.6.0. - -With SDK version 0.6.0, we introduced the following changes: - -- Replacing secrets in parent classes -- Supporting intermediate inbound events -- Defining interfaces for inbound Connectors -- Fixing failing datetime serialization diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/060-to-070.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/060-to-070.md deleted file mode 100644 index bc84e1e1940..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/060-to-070.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -id: 060-to-070 -title: Update 0.6 to 0.7 -description: "Review which adjustments must be made to migrate from Connector SDK 0.6.x to 0.7.0." ---- - -Beginner - -Beginner - -With the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), we made -breaking changes to the inbound Connectors. Update -[HTTP Webhook](https://github.com/camunda/connectors/tree/main/connectors/webhook/element-templates) -and [GitHub Webhook](https://github.com/camunda/connectors/tree/main/connectors/github/element-templates) -element templates to the latest versions. - -If you have used inbound webhook Connectors with Connector Runtime 0.6.x, you need to **manually** -apply the new element template version to your diagrams: - -1. Download the new element template from the [GitHub release page](https://github.com/camunda/connectors-bundle/releases/tag/0.17.0). -2. Follow the [installation guide](/components/modeler/desktop-modeler/element-templates/configuring-templates.md) to reinstall the element template. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/070-to-080.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/070-to-080.md deleted file mode 100644 index 1145b3450fb..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/070-to-080.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 070-to-080 -title: Update 0.7 to 0.8 -description: "Review which adjustments must be made to migrate from Connector SDK 0.7.x to 0.8.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.7.x to 0.8.0. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/080-to-090.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/080-to-090.md deleted file mode 100644 index 51055c0aefc..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/080-to-090.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 080-to-090 -title: Update 0.8 to 0.9 -description: "Review which adjustments must be made to migrate from Connector SDK 0.8.x to 0.9.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.8.x to 0.9.0. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/090-to-0100.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/090-to-0100.md deleted file mode 100644 index 1e6172bb692..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/090-to-0100.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: 090-to-0100 -title: Update 0.9 to 0.10 -description: "Review which adjustments must be made to migrate from Connector SDK 0.9.x to 0.10.0." ---- - -Beginner - -No manual adjustment necessary to migrate from -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md) -0.9.x to 0.10.0. diff --git a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/introduction.md b/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/introduction.md deleted file mode 100644 index 034d80e7388..00000000000 --- a/versioned_docs/version-8.6/components/connectors/custom-built-connectors/update-guide/introduction.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -id: introduction -title: Connector SDK updates ---- - -These documents guide you through the process of updating your Camunda 8 -Connector runtimes to a newer version of the -[Connector SDK](/components/connectors/custom-built-connectors/connector-sdk.md). - -There is a dedicated update guide for each version: - -### [Connector SDK 0.10.x to 0.11](../0100-to-0110) - -Update from 0.10.x to 0.11.2 - -### [Connector SDK 0.9 to 0.10](../090-to-0100) - -Update from 0.9.x to 0.10.0 - -### [Connector SDK 0.8 to 0.9](../080-to-090) - -Update from 0.8.x to 0.9.0 - -### [Connector SDK 0.7 to 0.8](../070-to-080) - -Update from 0.7.x to 0.8.0 - -### [Connector SDK 0.6 to 0.7](../060-to-070) - -Update from 0.6.x to 0.7.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.7.0) - -### [Connector SDK 0.5 to 0.6](../050-to-060) - -Update from 0.5.x to 0.6.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.6.0) - -### [Connector SDK 0.4 to 0.5](../040-to-050) - -Update from 0.4.x to 0.5.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.5.0) - -### [Connector SDK 0.3 to 0.4](../030-to-040) - -Update from 0.3.x to 0.4.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.4.0) - -### [Connector SDK 0.2 to 0.3](../020-to-030) - -Update from 0.2.x to 0.3.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.3.0) - -### [Connector SDK 0.1 to 0.2](../010-to-020) - -Update from 0.1.x to 0.2.0 - -[Release notes](https://github.com/camunda/connector-sdk/releases/tag/0.2.0) diff --git a/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-1.png b/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-1.png index 9ea40b01675..fbf95aa1be2 100644 Binary files a/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-1.png and b/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-1.png differ diff --git a/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-3.png b/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-3.png index 6e9cefb4615..a0ec13ff48f 100644 Binary files a/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-3.png and b/versioned_docs/version-8.6/components/connectors/img/connector-templates/create-connector-template-3.png differ diff --git a/versioned_docs/version-8.6/components/connectors/img/connector-templates/edit-connector-template-1.png b/versioned_docs/version-8.6/components/connectors/img/connector-templates/edit-connector-template-1.png index 47953c0fbce..a5dd1e313a5 100644 Binary files a/versioned_docs/version-8.6/components/connectors/img/connector-templates/edit-connector-template-1.png and b/versioned_docs/version-8.6/components/connectors/img/connector-templates/edit-connector-template-1.png differ diff --git a/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems-2.png b/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems-2.png index 92158564df1..a52b92e1528 100644 Binary files a/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems-2.png and b/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems-2.png differ diff --git a/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems.png b/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems.png index 341de0b0184..2a66c9d8b7d 100644 Binary files a/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems.png and b/versioned_docs/version-8.6/components/connectors/img/connector-templates/fix-connector-template-problems.png differ diff --git a/versioned_docs/version-8.6/components/connectors/img/use-connectors-error-general.png b/versioned_docs/version-8.6/components/connectors/img/use-connectors-error-general.png index 11e33cfc2b6..b095efcf94d 100644 Binary files a/versioned_docs/version-8.6/components/connectors/img/use-connectors-error-general.png and b/versioned_docs/version-8.6/components/connectors/img/use-connectors-error-general.png differ diff --git a/versioned_docs/version-8.6/components/connectors/manage-connector-templates.md b/versioned_docs/version-8.6/components/connectors/manage-connector-templates.md index efa7120409c..f175b4d1103 100644 --- a/versioned_docs/version-8.6/components/connectors/manage-connector-templates.md +++ b/versioned_docs/version-8.6/components/connectors/manage-connector-templates.md @@ -14,7 +14,7 @@ You can create and manage [Connector templates](/components/connectors/custom-bu Take the following steps to create a new Connector template: -1. Navigate to the project of your choice and click **New**. +1. Navigate to the project of your choice in Web Modeler and click **New**. 2. Click **Connector Template**. @@ -27,7 +27,7 @@ Take the following steps to create a new Connector template: The components of the editor interface are as follows: - In the **breadcrumbs bar** at the top of the screen, you can rename your template by clicking the chevron next to the template name. Note that you cannot change the name of your template in the template JSON, but only with this action. - - On the left, you observe the **template JSON editor**. Here, you define the actual template descriptor. The descriptor follows the [most recent element template schema](https://github.com/camunda/element-templates-json-schema). The editor is divided into two sections: a read-only section, containing the schema reference, the template name, the template id, and an editable section, where you can [define your template descriptor](/components/modeler/desktop-modeler/element-templates/defining-templates.md). + - On the left, you observe the **template JSON editor**. Here, you define the actual template descriptor. The descriptor follows the [most recent element template schema](https://github.com/camunda/element-templates-json-schema). The editor is divided into two sections: a read-only section, containing the schema reference, the template name, the template ID, and an editable section, where you can [define your template descriptor](/components/modeler/desktop-modeler/element-templates/defining-templates.md). - On the right, you observe the live **Visual Preview**. The live preview shows how the properties panel will look when you apply the template to an element. It automatically updates on every valid change, and reflects the latest valid state of the template. The preview allows you to interactively check your template before publishing, enhancing its usability. - In the upper left, you can **Upload an icon** for your template. You can upload any image file you want, however we recommend to use squared SVG graphics. The icons get rendered 18x18 pixels in the element on the modeling canvas, and 32x32 pixels in the properties panel. diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md index d02a79c4f93..37cd2ca1002 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-dynamodb.md @@ -284,7 +284,7 @@ The **Result Expression** allows you to access specific attributes from the resp } ``` -In this example, we are using the **Result Expression** to extract the **ID** and **price** attributes from the response variable and assign them to the id and price process variables, respectively. You can then use these variables in subsequent steps of your process. +In this example, we are using the **Result Expression** to extract the **ID** and **price** attributes from the response variable and assign them to the ID and price process variables, respectively. You can then use these variables in subsequent steps of your process. :::note The syntax for accessing attributes in the **Result Expression** may vary depending on the structure of your response object. You can refer to the [FEEL Context Expression](/components/modeler/feel/language-guide/feel-context-expressions.md) documentation for more information on how to use the **Result Expression**. diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md index 5ff312afc75..a374ce7f305 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/amazon-eventbridge.md @@ -105,10 +105,6 @@ There are two options to authenticate the Connector with AWS: The **Amazon EventBridge Webhook Connector** is an inbound Connector enabling you to start a BPMN process instance triggered by an event from [Amazon EventBridge](https://aws.amazon.com/eventbridge/). -:::note -If you have used the **Amazon EventBridge Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an Amazon EventBridge Webhook Connector task 1. Start building your BPMN diagram. You can use the **Amazon EventBridge Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/email.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/email.md index e43c19ec89a..e4815f21287 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/email.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/email.md @@ -2,9 +2,16 @@ id: email title: Email Connector sidebar_label: Email Connector -description: The Email Connector allows you to connect your BPMN service with different email protocol. +description: The Email Connector allows you to connect your BPMN service with different email protocols such as SMTP, POP3 or IMAP. --- +import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; + + + + + The **Email Connector** is an outbound Connector that allows you to connect your BPMN service with any email POP3, IMAP or SMTP server. @@ -239,7 +246,7 @@ object with a field and a value. - If an operator is set, the criteria array must also be defined. - Each criterion within the criteria array is applied to the specified field based on the value associated with it. -:::note +::: #### Example Response @@ -332,7 +339,7 @@ Allow users to fetch a list of emails from a specified folder, with customizable | `Max Emails to read` | Specify the maximum number of emails to retrieve. This parameter determines the cap on the number of emails the task will return. | | `Sort emails by` |

    Choose the field by which to sort the emails. Supported sorting fields are:

    • `Sent date`: Sorts emails by the date and time they were sent.
    • `Size`: Sorts emails by the size of the email.

    | | `Sort order` |

    Define the sort order:

    • `ASC`: Ascending order, from the oldest or smallest value to the most recent or largest.
    • `DESC`: Descending order, from the most recent or largest value to the oldest or smallest.

    | -| `Folder` | (Optional) the folder to list emails from, default is `INBOX`. | +| `Folder` | (Optional) the folder to list emails from, default is `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | #### Sorting and Limiting Behavior @@ -378,23 +385,23 @@ Retrieve an email's details based on the specified `messageId`. #### Parameters -| Parameter | Description | -| :---------- | :------------------------------------------------------------------------------------------------------------------------ | -| `MessageId` | The unique identifier of the email that must be read. | -| `Folder` | (Optional) Specifies the folder from which the email should be retrieved. If not provided, the default folder is `INBOX`. | +| Parameter | Description | +| :---------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `MessageId` | The unique identifier of the email that must be read. | +| `Folder` | (Optional) Specifies the folder from which the email should be retrieved. If not provided, the default folder is `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | #### Response Structure The task returns a JSON object containing detailed information about the email: -- `messageId`: The unique identifier corresponding to the email message. -- `fromAddress`: the email addresses of the sender. -- `headers` : A list containing the email's headers +- `messageId`: The unique identifier of the email message. +- `fromAddress`: The email addresses of the sender. +- `headers` : A list of the email headers. - `subject`: The subject line of the email. -- `size`: The size of the email in bytes. -- `plainTextBody`: The plain text version of the email's content. -- `htmlBody`: The HTML version of the email's content, provided it exists. -- `receivedDateTime`: the email's reception datetime +- `size`: The size of the email (in bytes). +- `plainTextBody`: The plain text version of the email content. +- `htmlBody`: The HTML version of the email content, if it exists. +- `receivedDateTime`: The date and time the email was received. #### Example Response @@ -428,10 +435,10 @@ Delete an email from a specified folder, using the email's unique `messageId`. #### Parameters -| Parameter | Description | -| :---------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `MessageId` | The identifier of the email message to delete. | -| `Folder` | (Optional) Specifies the folder from which the email should be deleted. If this parameter is not supplied, the default folder is assumed to be `INBOX`. | +| Parameter | Description | +| :---------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `MessageId` | The identifier of the email message to delete. | +| `Folder` | (Optional) Specifies the folder from which the email should be deleted. If this parameter is not supplied, the default folder is assumed to be `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | #### Response Structure @@ -462,7 +469,8 @@ A search query is represented as a JSON object. Below is an example of a JSON ob using an AND and OR operator to combine multiple conditions: - `Folder`: (Optional) Specifies the folder from which the email should be deleted. If this parameter is not supplied, - the default folder is assumed to be `INBOX`. + the default folder is assumed to be `INBOX`. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or + `inside.folder`) - `Criteria`: _See below_ ```json @@ -521,7 +529,7 @@ object with a field and a value. - If an operator is set, the criteria array must also be defined. - Each criterion within the criteria array is applied to the specified field based on the value associated with it. -:::note +::: #### Example Response @@ -540,11 +548,11 @@ Enable users to transfer an email from one folder to another, streamlining inbox #### Parameters -| Parameter | Description | -| :-------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `MessageId` | The identifier of the email that needs to be moved. | -| `Source folder` | (Optional) The folder from which the email will be moved. If not specified, the default is INBOX. | -| `Target folder` | The destination folder where the email is placed. To specify a new folder or a nested hierarchy, use a dot-separated path (for example, 'Archive' or 'Projects.2023.January'). The system automatically creates any non-existent folders in the path. | +| Parameter | Description | +| :-------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `MessageId` | The identifier of the email that needs to be moved. | +| `Source folder` | (Optional) The folder from which the email will be moved. If not specified, the default is INBOX. For subfolders, use `.` or `/` separated path (ex: `inside/folder` or `inside.folder`) | +| `Target folder` | The destination folder where the email is placed. To specify a new folder or a nested hierarchy, use `.` or `/` separated path (for example, 'Archive/test' or 'Projects.2023.January'). The system automatically creates any non-existent folders in the path. | #### Response Structure @@ -565,3 +573,172 @@ The example below shows the expected JSON response after an email has been succe "to": "TEST" } ``` + +
    + + + +The **Email Inbound Connector** is an inbound Connector that allows you to connect your BPMN service with any email IMAP server. + +:::caution +This inbound connector only supports working with IMAP server. +::: + +## Prerequisites + +To use the **Email Inbound Connector**, you must have an IMAP server available to connect to. + +:::note +Use Camunda secrets to avoid exposing your sensitive data as plain text. +See [managing secrets](/components/console/manage-clusters/manage-secrets.md). +::: + +## Authentication + +You can authenticate to a mail server as follows. + +### Simple Authentication + +This method allows the user to connect to any IMAP server using an email address and password. + +#### Parameters + +| Parameter | Description | +| :--------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `username` | Enter your full email address (for example, user@example.com) or the username provided by your email service. This is used to authenticate your access to the mail server. | +| `password` | Enter the password for your email account. Keep your password secure and do not share it with others. | + +## Listener information + +This inbound connector creates a new process each time a new email is received. + +| Parameter | Description | +| :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Folder` |

    (Optional) Define the folder the inbound connector will monitor.

    • If not specified, the default folder is set to `INBOX`.
    • For subfolders, use `.` or `/` separated path (for example, `inside/folder` or `inside.folder`)

    | +| `Polling Wait Time` | Set the interval between each polling operation. See [timer events](/docs/components/modeler/bpmn/timer-events/timer-events.md#time-duration) for more information on time duration and correct format. | +| `Polling Configuration` |

    This section contains settings related to the polling behavior of the connector.

    • Poll All Emails: Poll every email found in the specified folder.
      • Move to Another Folder After Processing: Move processed emails to a specific folder.

        • Folder: Specify the target folder to move processed emails to. To specify a new folder or a nested hierarchy, use a `.` or `/` separated path (for example, Archive/test or Projects.2023.January). Non-existent folders in the path are automatically created.

      • Delete After Processing: Permanently delete each email after processing.

    • `Poll Unseen Emails`: Poll only emails not marked as read in the specified folder.
      • `Move to Another Folder After Processing`: Move processed unseen emails to a specific folder.

        • `Folder`: Specify the target folder to move processed unseen emails to. To specify a new folder or a nested hierarchy, use a `.` or `/` separated path (for example, Archive/test or Projects.2023.January). Non-existent folders in the path are automatically created.
      • `Delete After Processing`: Permanently delete unseen emails from the folder after processing.

      • `Mark as Read After Processing`: Mark each unseen email as read after it is processed.

    | + +## Response Structure + +The task returns a JSON object containing detailed information about the email: + +- `messageId`: The unique identifier of the email message. +- `fromAddress`: The email addresses of the sender. +- `headers` : A list of the email headers. +- `subject`: The subject line of the email. +- `size`: The size of the email (in bytes). +- `plainTextBody`: The plain text version of the email content. +- `htmlBody`: The HTML version of the email content, if it exists. +- `receivedDateTime`: The date and time the email was received. + +#### Example Response + +The following example JSON response shows the data structure produced when an email triggers the creation of a process +instance: + +```json +{ + "messageId": "messageId", + "fromAddress": "example@camunda.com", + "subject": "Urgent Test", + "size": 65646, + "plainTextBody": "Hey how are you?\r\n", + "htmlBody": "Hello", + "headers": [ + { + "header": "header1", + "value": "example" + }, + { + "header": "header2", + "value": "test" + } + ], + "sentDate": "2024-08-19T06:54:28Z" +} +``` + +This response includes essential email details such as the `messageId`, sender addresses, subject, size, and the content +of the email both in plain text and HTML format. This information can be used by the process for various workflows, such +as prioritizing tasks, content analysis, and automated responses. + +## Activation condition + +The optional **Activation condition** field allows you to specify a Friendly Enough Expression Language ([FEEL](/components/modeler/feel/what-is-feel.md)) expression to control when this Connector should trigger a process instance. This condition acts as a filter, allowing the process to be initiated only when certain criteria are met by the incoming email. + +For example, the FEEL expression `=(response.subject = "urgent")` ensures that the process is only triggered if the subject of the incoming email matches "urgent". If this field is left blank, the process is triggered for every email received by the connector. + +## Correlation + +The **Correlation** section allows you to configure the message correlation parameters. + +### Correlation key + +- **Correlation key (process)** is a FEEL expression that defines the correlation key for the subscription. This + corresponds to the **Correlation key** property of a regular **message intermediate catch event**. +- **Correlation key (payload)** is a FEEL expression used to extract the correlation key from the incoming message. This + expression is evaluated in the Connector Runtime and the result is used to correlate the message. + +For example, given that your correlation key is defined with `myCorrelationKey` process variable, and the incoming email +message contains `value:{correlationKey:myValue}`, your correlation key settings will look like this: + +- **Correlation key (process)**: `=myCorrelationKey` +- **Correlation key (payload)**: `=message.plainTextBody.correlationKey` + +You can also use the key of the message to accomplish this in the **Correlation key (payload)** field with `=key`. + +:::info +To learn more about correlation keys, see [messages](../../../concepts/messages). +::: + +### Message ID expression + +The optional **Message ID expression** field allows you to extract the message ID from the incoming message. + +- The message ID serves as a unique identifier for the message and is used for message correlation. +- This expression is evaluated in the Connector Runtime and the result used to correlate the message. + +In most cases, you do not need to configure the **Message ID expression**. However, it is useful if you want to ensure +message deduplication or achieve a specific message correlation behavior. + +:::info +To learn more about how message IDs influence message correlation, +see [messages](../../../concepts/messages#message-correlation-overview). +::: + +For example, if you want to set the message ID to the value of the `messageId` field in the incoming message, you can +configure the **Message ID expression** as follows: + +``` += message.messageId +``` + +### Message TTL + +The optional **Message TTL** field allows you to set the time-to-live (TTL) for the correlated messages. + +- TTL defines the time for which the message is buffered in Zeebe before being correlated to the process instance (if it + cannot be correlated immediately). +- The value is specified as an ISO 8601 duration. For example, `PT1H` sets the TTL to one hour. + +:::info +To learn more about TTL in Zeebe, see [message correlation](../../../concepts/messages#message-buffering). +::: + +## Deduplication + +The **Deduplication** section allows you to configure the Connector deduplication parameters. + +- **Connector deduplication** is a mechanism in the Connector Runtime that determines how many email listeners are created if there are multiple occurrences of the **Email Listener Connector** in a BPMN diagram. This is different to **message deduplication**. + +- By default, the Connector runtime deduplicates Connectors based on properties, so elements with the same subscription properties only result in one subscription. + +To customize the deduplication behavior, select the **Manual mode** checkbox and configure the custom deduplication ID. + +:::info +To learn more about deduplication, see [deduplication](../use-connectors/inbound.md#connector-deduplication). +::: + +
    + +
    diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/github.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/github.md index 06d2623894e..00ced81c5e4 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/github.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/github.md @@ -153,8 +153,8 @@ The **GitHub Connector** currently supports the following operations. - **GitHub API:** [Update a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#update-a-release). - **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Release id:** The unique identifier of the release. +- **Repository:** The name of the repository. The name is not case-sensitive. +- **Release ID:** The unique identifier of the release. - **Body:** Text describing the contents of the tag. - **Tag name:** The name of the tag. - **Release name:** The name of the release @@ -164,8 +164,8 @@ The **GitHub Connector** currently supports the following operations. - **GitHub API:** [Delete a release](https://docs.github.com/en/rest/releases/releases?apiVersion=2022-11-28#delete-a-release). - **Owner:** The account owner of the repository. The name is not case-sensitive. -- **Repo:** The name of the repository. The name is not case-sensitive. -- **Release id:** The unique identifier of the release. +- **Repository:** The name of the repository. The name is not case-sensitive. +- **Release ID:** The unique identifier of the release. #### List releases @@ -280,12 +280,6 @@ handling response is still applicable [as described](/components/connectors/prot The **GitHub Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by a [GitHub event](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). -:::note -If you have used the GitHub Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a GitHub Webhook Connector task 1. Start building your BPMN diagram. You can use GitHub Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/microsoft-teams.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/microsoft-teams.md index f9e156d16b9..fd6d381cf75 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/microsoft-teams.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/microsoft-teams.md @@ -70,9 +70,9 @@ Visit [Microsoft Teams Access Token](https://learn.microsoft.com/azure/active-di For a **Refresh Token** type authentication, take the following steps: -1. Click the **Refresh Token** connection type in the **Authentication** section. -2. Set **Refresh Token** to `Refresh Token`. Read more on [how to get a refresh token](https://learn.microsoft.com/en-us/graph/auth-v2-user). -3. Set **Tenant id** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. Read more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). +1. Click the **Refresh token** connection type in the **Authentication** section. +2. Set **Refresh token** to `Refresh Token`. Read more on [how to get a refresh token](https://learn.microsoft.com/en-us/graph/auth-v2-user). +3. Set **Tenant ID** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. Read more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). 4. Set the **Client ID** field: the application ID that the [Azure app registration portal](https://go.microsoft.com/fwlink/?linkid=2083908) assigned to your app. 5. Set the **Secret ID** field: the client secret that you created in the app registration portal for your app. @@ -81,7 +81,7 @@ For a **Refresh Token** type authentication, take the following steps: For a **Client credentials** type authentication, take the following steps: 1. Click the **Client credentials** connection type in the **Authentication** section. -2. Set **Tenant id** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. See more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). +2. Set **Tenant ID** to `Tenant ID`. Your Microsoft Teams tenant ID is a unique identifier. See more on [how to find a tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant). 3. Set the **Client ID** field: the application ID that the [Azure app registration portal](https://go.microsoft.com/fwlink/?linkid=2083908) assigned to your app. 4. Set the **Secret ID** field: the client secret that you created in the app registration portal for your app. @@ -103,7 +103,7 @@ For example, if you want to send a message in a Microsoft Teams channel, choose | Property | Methods | Required | Type | Description | | :-------------: | :---------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Chat ID | Get chat by id
    List chat members
    Send message in chat
    List messages in chat
    Get message in chat
    List chat members | Yes | string | Microsoft Teams chat id | +| Chat ID | Get chat by ID
    List chat members
    Send message in chat
    List messages in chat
    Get message in chat
    List chat members | Yes | string | Microsoft Teams chat ID | | Content | Send message in chat | Yes | text | Content that will be sent to chat | | Content Type | Send message in chat | Yes | dropdown | Content type of body message | | Chat type | Create a new chat | Yes | dropdown | Click **one on one** to create a one-on-one chat or **group** to create a group chat. | @@ -111,9 +111,9 @@ For example, if you want to send a message in a Microsoft Teams channel, choose | Members | Create a new chat | Yes | FEEL expression | See [members property](#members-property) to learn more. | | Top | List messages in chat | No | numbers | Controls the number of items per response; maximum allowed top value is 50. | | Order by | List messages in chat | Yes | dropdown | Can order by 'lastModifiedDateTime' and 'createdDateTime'. | -| Expand response | Get chat by id | Yes | dropdown | Choose | +| Expand response | Get chat by ID | Yes | dropdown | Choose | | Filter | List messages in chat | No | string | Sets the date range filter for the lastModifiedDateTime and createdDateTime properties. [Learn more about filtering](https://learn.microsoft.com/en-us/graph/filter-query-parameter). | -| Message ID | Get message in chat | Yes | string | Microsoft Teams chat message id | +| Message ID | Get message in chat | Yes | string | Microsoft Teams chat message ID | ##### Expand response @@ -164,19 +164,19 @@ The **members** property must contain a list of members: | Property | Methods | Required | Type | Description | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :------: | :--------------------------------------------------------------------------------------------------------------------: | -| Group ID | Create channel
    Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams group id | -| Channel ID | Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams channel id | +| Group ID | Create channel
    Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams group ID | +| Channel ID | Get channel
    List channels
    Send message to channel
    Get channel message
    List channel messages
    List message replies
    List members | Yes | string | Microsoft Teams channel ID | | Display name | Create channel | No | string | Displayed name of new Microsoft Teams channel | | Description | Create channel | No | text | Description of new Microsoft Teams channel | | Channel membership type | Create channel | Yes | dropdown | See [teams-channels-overview](https://learn.microsoft.com/microsoftteams/teams-channels-overview) for more information | -| Owner | Create channel (if Channel membership type != STANDARD) | Yes | string | Channel owner; Microsoft Teams user id or Microsoft Teams principal name | +| Owner | Create channel (if Channel membership type != STANDARD) | Yes | string | Channel owner; Microsoft Teams user ID or Microsoft Teams principal name | | Filter | List channels | No | string | The search filter. [Learn more about filtering](https://learn.microsoft.com/en-us/graph/filter-query-parameter) | | Content | Send message to channel | Yes | text | Content that will be sent to chat | | Content Type | Send message to channel | Yes | dropdown | Content type of body message | -| Message ID | Get channel message | Yes | string | Message id of Microsoft Teams in channel | +| Message ID | Get channel message | Yes | string | Message ID of Microsoft Teams in channel | | Top | List channel messages | No | numbers | Controls the number of items per response | | With replies | List channel messages | Yes | boolean | Choose **FALSE** for get messages without replies
    Choose **FALSE** for get messages without replies | -| Message ID | List message replies | Yes | string | Microsoft Teams channel message id | +| Message ID | List message replies | Yes | string | Microsoft Teams channel message ID | #### Channel methods diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/sendgrid.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/sendgrid.md index 28ea9c2424c..b26f0cc828d 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/sendgrid.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/sendgrid.md @@ -95,7 +95,7 @@ To make the **SendGrid Email Template Connector** executable, fill out all the m 4. Set **Receiver Name** to `Your Name`. 5. Set **Receiver Email** to `Your email address`. 6. Log in to your SendGrid account and navigate to [the Dynamic Template you created](#configure-a-dynamic-template). -7. Copy the id of the template and paste it in the **Template ID field**. +7. Copy the ID of the template and paste it in the **Template ID field**. 8. Provide the test data in the **Template Data** field as a [FEEL context expression](/components/modeler/feel/language-guide/feel-context-expressions.md): ```text diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/slack.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/slack.md index 15d9e52cfb4..a65a756bea4 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/slack.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/slack.md @@ -33,7 +33,7 @@ To make the **Slack Connector** executable, fill out the mandatory fields highli ### Authentication -Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, '{{secrets.SLACK_OAUTH_TOKEN}}'. +Set **Authentication** to your Slack OAuth token, which is stored as a secret. For example, `{{secrets.SLACK_OAUTH_TOKEN}}`. ### Create channel @@ -67,16 +67,16 @@ To invite users to a channel, take the following steps: - The channel name can be up to 80 characters, and can contain lowercase letters, digits, and symbols `-` and `_`. - This can be provided as a FEEL expression. - Invite by **Channel ID**: - - The channel ID must be a valid slack Channel ID. + - The channel ID must be a valid Slack Channel ID. - This can be provided as a FEEL expression. 3. Set the **Users** as required: - 1. One single user name or email or id (for example: `@myUser` or `my.user@company.com` or `ABCDEF12345`). + 1. One single username or email or ID (for example: `@myUser` or `my.user@company.com` or `ABCDEF12345`). 2. A comma separated list of users (for example: `@myUser, my.user@company.com, ABCDEF12345`). 3. FEEL expression. In this case you can provide a valid list of strings (for example: `["@myUser", "my.user@company.com", "ABCDEF12345"]`). - Formats: - If a username starts with an `@` symbol, it will be handled as user name. - If a username is in an email format, it will be handled as an email. - - If a username doesn't start with an `@`, and isn't an email, it will be handled as a user id. + - If a username doesn't start with an `@`, and isn't an email, it will be handled as a user ID. - If a null input or an input which is not a type of String or a Collection provided, you will get an Exception. - If all username is provided as any other type than a String, you will get an Exception. - If one of the usernames is provided as any other type than a String, it will be omitted. diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/twilio.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/twilio.md index 6c43eaca5bc..782f428334c 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/twilio.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/twilio.md @@ -179,10 +179,6 @@ To learn more about implementing retry and error handling logic in your BPMN dia The **Twilio Webhook Connector** is an inbound Connector that enables you to start a BPMN process instance triggered by a [Twilio event](https://www.twilio.com/docs/usage/webhooks). -:::note -If you have used the **Twilio Webhook Connector** with a Self-Managed Camunda 8 configuration before the Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. Refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create a Twilio Webhook Connector task 1. Start building your BPMN diagram. You can use the **Twilio Webhook Connector** with either a **Start Event** or an **Intermediate Catch Event** building block. diff --git a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/uipath.md b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/uipath.md index ce7f2536227..329b1b79c6b 100644 --- a/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/uipath.md +++ b/versioned_docs/version-8.6/components/connectors/out-of-the-box-connectors/uipath.md @@ -77,7 +77,7 @@ For this section, you must fill out the following fields: 1. **Cloud URL**: Comes with a default value of `cloud.uipath.com`. You can always change it, if needed. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. 2. **Cloud organization**: The name of your organization. See [about organizations](https://docs.uipath.com/automation-cloud/docs/about-organizations) to learn more. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. 3. **Cloud tenant**: The name of the tenant. See [about tenants](https://docs.uipath.com/automation-cloud/docs/about-tenants) to learn more. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. -4. **Organization Unit ID**: Click **Orchestrator** and you will find the id in the URL. For example, `https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/?tid=26929&fid=112233` where the **Organization Unit ID** is `112233`. To use a Connectors secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. +4. **Organization Unit ID**: Click **Orchestrator** and you will find the ID in the URL. For example, `https://cloud.uipath.com/MyOrg/MyTenant/orchestrator_/?tid=26929&fid=112233` where the **Organization Unit ID** is `112233`. To use a Connector secret, use a double curly braces notation, e.g. `{{secrets.MY_SECRET_VALUE}}`. #### Input diff --git a/versioned_docs/version-8.6/components/connectors/protocol/http-webhook.md b/versioned_docs/version-8.6/components/connectors/protocol/http-webhook.md index 6aea2097267..59db8580fd3 100644 --- a/versioned_docs/version-8.6/components/connectors/protocol/http-webhook.md +++ b/versioned_docs/version-8.6/components/connectors/protocol/http-webhook.md @@ -7,12 +7,6 @@ description: Start a process instance with your custom webhook configuration, tr The **HTTP Webhook Connector** is an inbound Connector that allows you to start a BPMN process instance triggered by external HTTP call. -:::note -If you have used the HTTP Webhook Connector with a self-managed Camunda 8 configuration before the -Connector SDK [0.7.0 release](https://github.com/camunda/connector-sdk/releases/tag/0.7.0), you might need to manually replace the element template. -Please refer to the [update guide](/components/connectors/custom-built-connectors/update-guide/060-to-070.md) for more details. -::: - ## Create an HTTP Webhook Connector event 1. Start building your BPMN diagram. You can use HTTP Webhook Connector with either **Start Event** or **Intermediate Catch Event** building blocks. @@ -83,7 +77,7 @@ Please refer to the [update guide](/components/connectors/custom-built-connector - Set the **API Key** property to the expected value of the API key. - Set the **API Key locator** property that will be evaluated against the incoming request to extract the API key. [See the example](#how-to-configure-api-key-authorization). -- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer {JWT_TOKEN}. +- **[JWT authorization](https://jwt.io/)** - The token should be in the _Authorization_ header of the request in the format of Bearer `{JWT_TOKEN}`. - Set JWK URL which is used as a well-known public URL to fetch the [JWKs](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets). - Set JWT role property expression which will be evaluated against the content of the JWT to extract the list of roles. See more details on extracting roles from JWT data [here](#how-to-extract-roles-from-jwt-data). diff --git a/versioned_docs/version-8.6/components/connectors/protocol/rest.md b/versioned_docs/version-8.6/components/connectors/protocol/rest.md index 4a2b159f3bd..1cdeca28c18 100644 --- a/versioned_docs/version-8.6/components/connectors/protocol/rest.md +++ b/versioned_docs/version-8.6/components/connectors/protocol/rest.md @@ -27,6 +27,40 @@ To make the **REST Connector** executable, choose the required authentication ty All the mandatory and non-mandatory fields will be covered in the upcoming sections. Depending on the authentication selection you make, more fields might be required. We will also cover this in the next section. ::: +### Configure a proxy server in Self-Managed + +If you are using Camunda Self-Managed, you can configure this Connector to use an HTTP or HTTPS proxy server. + +You can do this using the `JAVA_OPTS` environment variable. For example: + +``` +JAVA_OPTS=-Dhttp.proxyHost=proxy -Dhttp.proxyPort=3128 -Dhttps.proxyHost=proxy -Dhttps.proxyPort=3128 -Dhttp.nonProxyHosts=OTHER_DOMAIN +``` + +#### HTTP + +To specify the proxy as an HTTP protocol handler, set the following standard JVM properties: + +| Property | Description | +| :------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `http.proxyHost` | The host name of the proxy server. | +| `http.proxyPort` | The port number (default is 80). | +| `http.nonProxyHosts` |

    A list of hosts to connect to directly, bypassing the proxy.

    • Specify as a list of patterns, separated by |.
    • Patterns can start or end with a `*` for wildcards.
    • Any host matching one of these patterns uses a direct connection instead of a proxy.

    | + +#### HTTPS + +To specify the proxy as an HTTPS (HTTP over SSL) protocol handler, set the following standard JVM properties: + +| Property | Description | +| :------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `https.proxyHost` | The host name of the proxy server. | +| `https.proxyPort` | The port number (default is 443). | +| `http.nonProxyHosts` |

    A list of hosts to connect to directly, bypassing the proxy.

    • Specify as a list of patterns, separated by |.
    • Patterns can start or end with a `*` for wildcards.
    • Any host matching one of these patterns uses a direct connection instead of a proxy.

    | + +:::note +The HTTPS protocol handler also uses the `http.nonProxyHosts` property to specify non-proxy hosts. +::: + ### Authentication You can choose among the available authentication type according to your authentication requirements. @@ -57,7 +91,7 @@ Select the **REST Connector** and fill out the following properties under the ** - **Headers**: The API key will be included in the request headers. 3. Specify your API key details: - **API key name**: Enter the parameter name expected by the API (e.g., apiKey). - - **API key value**: Reference the secret you created for your API key (e.g., {{secrets.REST_API_KEY_SECRET}}). + - **API key value**: Reference the secret you created for your API key (e.g., `{{secrets.REST_API_KEY_SECRET}}`). ### REST Connector (Basic) @@ -207,6 +241,44 @@ Additionally, you can choose to unpack the content of your `response` into multi } ``` +## Error handling + +If an error occurs, the Connector throws an error and includes the error response in the `error` variable in Operate. Click on the REST Connector in Operate to see this variable. + +The following example shows the `error` variable in an error response: + +```json +{ + "code": "400", + "variables": { + "response": { + "headers": { + "Content-Length": "70", + "Date": "Thu, 17 Oct 2024 09:31:51 GMT", + "Content-Type": "application/json" + }, + "body": { + "temperature": 36, + "message": "My custom error message", + "booleanField": true + } + } + }, + "message": "Bad Request", + "type": "io.camunda.connector.api.error.ConnectorException" +} +``` + +You can handle this error using an Error Boundary Event and the following error expression: + +```json +if matches(error.code, "400") and error.variables.response.body.temp = 36 +then bpmnError("Too hot", error.variables.response.body.message, error.variables.response.body) +else null +``` + +In this example, passing `error.variables.response.body` as the third argument to the `bpmnError` function allows you to pass additional information about the error to the error boundary event. For example, the `message`, `temperature` and `booleanField` fields from the error response are passed to the error boundary event. + ## OData support The **REST Connector** supports JSON-based [OData protocol](https://www.odata.org/). diff --git a/versioned_docs/version-8.6/components/console/manage-clusters/cluster-backups.md b/versioned_docs/version-8.6/components/console/manage-clusters/cluster-backups.md index 1b7029ad27a..4b92980fb19 100644 --- a/versioned_docs/version-8.6/components/console/manage-clusters/cluster-backups.md +++ b/versioned_docs/version-8.6/components/console/manage-clusters/cluster-backups.md @@ -1,6 +1,6 @@ --- id: create-backups -title: Create backup +title: Backups description: "If your organization works within Camunda's Enterprise plan, you can create cluster backups." --- diff --git a/versioned_docs/version-8.6/components/console/manage-clusters/manage-alerts.md b/versioned_docs/version-8.6/components/console/manage-clusters/manage-alerts.md index c5c9e983ce8..fa6f952f999 100644 --- a/versioned_docs/version-8.6/components/console/manage-clusters/manage-alerts.md +++ b/versioned_docs/version-8.6/components/console/manage-clusters/manage-alerts.md @@ -1,6 +1,6 @@ --- id: manage-alerts -title: Manage alerts +title: Alerts description: "Camunda 8 can notify you when process instances stop with an error." --- diff --git a/versioned_docs/version-8.6/components/console/manage-clusters/manage-api-clients.md b/versioned_docs/version-8.6/components/console/manage-clusters/manage-api-clients.md index 54b582977a9..bd1c4de41e8 100644 --- a/versioned_docs/version-8.6/components/console/manage-clusters/manage-api-clients.md +++ b/versioned_docs/version-8.6/components/console/manage-clusters/manage-api-clients.md @@ -1,6 +1,6 @@ --- id: manage-api-clients -title: Manage API clients +title: API clients description: "Let's create a client and manage our API clients." --- diff --git a/versioned_docs/version-8.6/components/console/manage-clusters/manage-cluster.md b/versioned_docs/version-8.6/components/console/manage-clusters/manage-cluster.md index 6b09e55a28e..0f4aa6a105e 100644 --- a/versioned_docs/version-8.6/components/console/manage-clusters/manage-cluster.md +++ b/versioned_docs/version-8.6/components/console/manage-clusters/manage-cluster.md @@ -1,10 +1,10 @@ --- id: manage-cluster title: Manage your cluster -description: "Follow these steps to rename, resume, update, or delete your cluster." +description: "Follow these steps to rename, resume, update, resize, or delete your cluster." --- -Read through the following sections to rename, resume, update, or delete your cluster. +Learn how to rename, resume, update, resize, or delete your cluster. ## Rename a cluster @@ -59,10 +59,23 @@ When an update is available, an **Update** button will appear. This button is no You can decide if you want to have [automated updates](/reference/auto-updates.md) to new versions of Camunda 8 activated. You can also toggle this feature anytime later in the **Settings** tab of your cluster. -## Delete a cluster +## Resize a cluster + +You can increase or decrease the [cluster size](/components/concepts/clusters.md#cluster-size) at any time. For example, increase the cluster size to improve performance and add capacity, or decrease the cluster size to free up reservations for another cluster. + +1. Open the cluster details by clicking on the cluster name. +1. Select **Resize cluster** next to the cluster type. +1. Select the new cluster size from the available sizes. +1. Click **Confirm** to resize the cluster, or **Cancel** to close the modal without resizing the cluster. :::note -This action cannot be undone. +Contact your Customer Success Manager to increase the cluster size beyond the maximum 4x size. This requires custom sizing and pricing. +::: + +## Delete a cluster + +:::caution +Deleting a cluster is **permanent** and cannot be undone. ::: A cluster can be deleted at any time. To delete your cluster, navigate to the **Clusters** tab in the top navigation and click **Delete** to the far right of the cluster name. diff --git a/versioned_docs/version-8.6/components/console/manage-clusters/manage-ip-allowlists.md b/versioned_docs/version-8.6/components/console/manage-clusters/manage-ip-allowlists.md index 0437ba49e2d..c5586806f7b 100644 --- a/versioned_docs/version-8.6/components/console/manage-clusters/manage-ip-allowlists.md +++ b/versioned_docs/version-8.6/components/console/manage-clusters/manage-ip-allowlists.md @@ -1,6 +1,6 @@ --- id: manage-ip-allowlists -title: Manage IP allowlists +title: IP allowlists description: "If your organization works within Camunda's Enterprise plan, you can restrict access to clusters with an IP allowlist." keywords: [whitelist, allowlist, ip whitelist, ip allowlist] --- diff --git a/versioned_docs/version-8.6/components/console/manage-clusters/manage-secrets.md b/versioned_docs/version-8.6/components/console/manage-clusters/manage-secrets.md index 7518b65d507..ea7025f8011 100644 --- a/versioned_docs/version-8.6/components/console/manage-clusters/manage-secrets.md +++ b/versioned_docs/version-8.6/components/console/manage-clusters/manage-secrets.md @@ -1,6 +1,6 @@ --- id: manage-secrets -title: Manage secrets +title: Connector secrets description: Create secrets and reference them in your Connectors without exposing sensitive information in your BPMN processes. --- diff --git a/versioned_docs/version-8.6/components/console/manage-organization/advanced-search.md b/versioned_docs/version-8.6/components/console/manage-organization/advanced-search.md index 54ad0f5f64d..ba8d6437d02 100644 --- a/versioned_docs/version-8.6/components/console/manage-organization/advanced-search.md +++ b/versioned_docs/version-8.6/components/console/manage-organization/advanced-search.md @@ -14,7 +14,7 @@ This search functionality allows users to: ## Open the search bar -Press `ctrl`+`k`, `⌘`+`k`, or click the magnifier in the top navigation bar to open the search bar. +Press `Ctrl+K`, `⌘+K`, or click the magnifier in the top navigation bar to open the search bar. ![Open the search bar](./img/open_console_search.png) diff --git a/versioned_docs/version-8.6/components/console/manage-organization/img/activity-view.png b/versioned_docs/version-8.6/components/console/manage-organization/img/activity-view.png index cbae303ee37..1a946c8fec7 100644 Binary files a/versioned_docs/version-8.6/components/console/manage-organization/img/activity-view.png and b/versioned_docs/version-8.6/components/console/manage-organization/img/activity-view.png differ diff --git a/versioned_docs/version-8.6/components/console/manage-organization/view-organization-activity.md b/versioned_docs/version-8.6/components/console/manage-organization/view-organization-activity.md index 06f45af8a04..5c7a68a2050 100644 --- a/versioned_docs/version-8.6/components/console/manage-organization/view-organization-activity.md +++ b/versioned_docs/version-8.6/components/console/manage-organization/view-organization-activity.md @@ -1,9 +1,15 @@ --- id: view-organization-activity title: View organization activity -description: "Let's analyze the capabilities of the Activity tab." +description: "The Activity tab allows you to view details of all activity within an Organization, such as cluster creation, deletion, updates, and user invitations." --- -The **Activity** tab lists all activities within an organization. Here, you can note when a cluster was created or deleted. +You can view all activity within an organization on the **Activity** tab. + +For example, you can see details for cluster creation, deletion, updates, and user invitations. ![activity-view](./img/activity-view.png) + +## Export activity + +Click **Export activity**, and select whether to export and download the activity list as a JSON or CSV file. diff --git a/versioned_docs/version-8.6/components/console/manage-plan/migrate-from-prof-to-starter.md b/versioned_docs/version-8.6/components/console/manage-plan/migrate-from-prof-to-starter.md index 1fdcd43ec98..8f3d548da9b 100644 --- a/versioned_docs/version-8.6/components/console/manage-plan/migrate-from-prof-to-starter.md +++ b/versioned_docs/version-8.6/components/console/manage-plan/migrate-from-prof-to-starter.md @@ -11,7 +11,7 @@ Here are a few important remarks to consider before completing the migration ste - Since the two plans have different types of clusters included and fees for those, we recommend comparing the [Professional plan](https://camunda.com/blog/2023/05/camunda-professional-edition-accelerate-projects/) with the [Starter plan](https://camunda.com/blog/2023/09/camunda-starter/) to [understand your monthly costs](https://camunda.com/pricing/starter-plan-price-calculator/) before the migration. - General users and development/production cluster reservations in the Professional plan are migrated “as is” to the Starter plan, which may result in overage costs (e.g. production clusters in Professional will be transferred to production clusters in the Starter plan). If you are not using your production cluster in the Professional plan, we recommend you delete it beforehand and create a new development cluster in the Starter plan afterward. - Once you have edited the plan below, the changes will take effect on the first day of your next subscription period. -- If you have any questions, do not hesitate to [contact us](https://camunda.com/contact/). +- If you have any questions, do not hesitate to [contact us](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.6/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md b/versioned_docs/version-8.6/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md index 6e3114634eb..73d526f0fcd 100644 --- a/versioned_docs/version-8.6/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md +++ b/versioned_docs/version-8.6/components/modeler/bpmn/business-rule-tasks/business-rule-tasks.md @@ -48,7 +48,7 @@ The `bindingType` attribute determines which version of the called decision is e - `deployment`: The version that was deployed together with the currently running version of the process. - `versionTag`: The latest deployed version that is annotated with the version tag specified in the `versionTag` attribute. -To learn more about choosing binding types, see [Choosing the resource binding type](/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md). +To learn more about choosing binding types, see [choosing the resource binding type](/components/best-practices/modeling/choosing-the-resource-binding-type.md). :::note If the `bindingType` attribute is not specified, `latest` is used as the default. diff --git a/versioned_docs/version-8.6/components/modeler/bpmn/call-activities/call-activities.md b/versioned_docs/version-8.6/components/modeler/bpmn/call-activities/call-activities.md index fca00892937..3c238d20e9f 100644 --- a/versioned_docs/version-8.6/components/modeler/bpmn/call-activities/call-activities.md +++ b/versioned_docs/version-8.6/components/modeler/bpmn/call-activities/call-activities.md @@ -24,7 +24,7 @@ The `bindingType` attribute determines which version of the called process is in - `deployment`: The version that was deployed together with the currently running version of the calling process. - `versionTag`: The latest deployed version that is annotated with the version tag specified in the `versionTag` attribute. -To learn more about choosing binding types, see [Choosing the resource binding type](/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md). +To learn more about choosing binding types, see [choosing the resource binding type](/components/best-practices/modeling/choosing-the-resource-binding-type.md). :::note If the `bindingType` attribute is not specified, `latest` is used as the default. @@ -42,10 +42,6 @@ When a non-interrupting boundary event is triggered, the created process instanc ## Variable mappings -By default, all variables of the call activity scope are copied to the created process instance. This can be limited to copying only the local variables of the call activity, by setting the attribute `propagateAllParentVariables` to `false`. - -By disabling this attribute, variables existing at higher scopes are no longer copied. If the attribute `propagateAllParentVariables` is set (default: `true`), all variables are propagated to the child process instance. - Input mappings can be used to create new local variables in the scope of the call activity. These variables are also copied to the created process instance. If the attribute `propagateAllChildVariables` is set (default: `true`), all variables of the created process instance are propagated to the call activity. This behavior can be customized by defining output mappings at the call activity. The output mappings are applied on completing the call activity and only those variables that are defined in the output mappings are propagated. diff --git a/versioned_docs/version-8.6/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md b/versioned_docs/version-8.6/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md index 56bee823e46..5a1b961b484 100644 --- a/versioned_docs/version-8.6/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md +++ b/versioned_docs/version-8.6/components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses.md @@ -14,6 +14,8 @@ When an embedded subprocess is entered, the start event is activated. The subpro Embedded subprocesses are often used together with **boundary events**. One or more boundary events can be attached to a subprocess. When an interrupting boundary event is triggered, the entire subprocess (including all active elements) is terminated. +When adding an embedded subprocess to your model, you can either add a collapsed or expanded subprocess. You cannot collapse an existing expanded subprocess in your model. + ## Collapsed subprocesses :::caution @@ -22,7 +24,7 @@ Collapsed subprocesses are currently only partially supported by Optimize. While All other Camunda components fully support collapsed subprocesses. ::: -A subprocess can be collapsed to conceal its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. +A collapsed subprocess conceals its internal details, thereby hiding complexity within an activity and enabling the nesting of multiple levels of subprocesses. This functionality allows you to simplify the view of a process diagram and facilitates drill-down capabilities to examine details. Collapsed subprocesses serve purely display purposes. For the creation of reusable processes, it is recommended to utilize [call activities](../call-activities/call-activities.md). diff --git a/versioned_docs/version-8.6/components/modeler/bpmn/manual-tasks/manual-tasks.md b/versioned_docs/version-8.6/components/modeler/bpmn/manual-tasks/manual-tasks.md index 5d2ac406614..d444261ac2f 100644 --- a/versioned_docs/version-8.6/components/modeler/bpmn/manual-tasks/manual-tasks.md +++ b/versioned_docs/version-8.6/components/modeler/bpmn/manual-tasks/manual-tasks.md @@ -4,18 +4,15 @@ title: "Manual tasks" description: "A manual task defines a task that is external to the BPM engine." --- -A manual task defines a task that is external to the BPM engine. This is used to model work that is done -by somebody who the engine does not need to know of and there is no known system or UI interface. - -For the engine, a manual task is handled as a pass-through activity, automatically continuing the -process at the moment the process instance arrives. +A manual task defines a task that requires human interaction but no external tooling or UI interface. For example, a user reviewing a document or completing a physical task. ![task](assets/manual-task.png) -Manual tasks have no real benefit for automating processes. Manual tasks instead provide insights into the tasks -that are performed outside of the process engine. +Manual tasks are part of [human task orchestration](/guides/getting-started-orchestrate-human-tasks.md), but differ from [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) which define an actionable task assisted by a business process execution engine or software application. + +Within the engine and BPMN model, a manual task is handled as a pass-through activity, automatically continuing the process at the moment the process instance arrives. -## Additional resources +Manual tasks provide insights into the tasks performed outside the process engine, aiding in modeling a process, though no linked automation process is utilized. ### XML representation diff --git a/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md b/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md index b8ad4445655..90a64a6191c 100644 --- a/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md +++ b/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md @@ -4,8 +4,9 @@ title: "User tasks" description: "A user task is used to model work that needs to be done by a human actor." --- -A user task is used to model work that needs to be done by a human actor. When -the process instance arrives at such a user task, a new user task instance is created at Zeebe. +A user task is used to model work that needs to be done by a human and is assisted by a business process execution engine or software application. This differs from [manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md), which are not assisted by external tooling. + +When the process instance arrives at a user task, a new user task instance is created at Zeebe. The process instance stops at this point and waits until the user task instance is completed. When the user task instance is completed, the process instance continues. @@ -125,7 +126,7 @@ Depending on your use case, two different types of form references can be used: - `deployment`: The version that was deployed together with the currently running version of the process. - `versionTag`: The latest deployed version that is annotated with the version tag specified in the `versionTag` attribute. - To learn more about choosing binding types, see [Choosing the resource binding type](/docs/components/best-practices/modeling/choosing-the-resource-binding-type.md). + To learn more about choosing binding types, see [choosing the resource binding type](/components/best-practices/modeling/choosing-the-resource-binding-type.md). :::note If the `bindingType` attribute is not specified, `latest` is used as the default. @@ -161,7 +162,7 @@ A user task does not have to be managed by Zeebe. Instead, you can also use job workers to implement a custom user task logic. Note that you will lose all the task lifecycle and state management features that Zeebe provides and will have to implement them yourself. Use job workers only in case you require a very specific implementation of user tasks that can't be implemented on top of Zeebe user tasks. :::info -If you started using Camunda 8 with version 8.4 or a lower version and upgraded to 8.5 or newer, your user tasks are probably implemented as job workers. Refer to the [migration guide](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md) to find a detailed list of the differences between the task implementation types and learn how to migrate to Zeebe user tasks. +If you started using Camunda 8 with version 8.4 or a lower version and upgraded to 8.5 or newer, your user tasks are probably implemented as job workers. Refer to the [migration guide](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md) to find a detailed list of the differences between the task implementation types and learn how to migrate to Zeebe user tasks. ::: You can define a job worker implementation for a user task by removing its `zeebe:userTask` extension element. diff --git a/versioned_docs/version-8.6/components/modeler/desktop-modeler/plugins/plugins.md b/versioned_docs/version-8.6/components/modeler/desktop-modeler/plugins/plugins.md index aec6a119f91..e83cf793981 100644 --- a/versioned_docs/version-8.6/components/modeler/desktop-modeler/plugins/plugins.md +++ b/versioned_docs/version-8.6/components/modeler/desktop-modeler/plugins/plugins.md @@ -193,7 +193,7 @@ npm run build When creating a plugin, you can place the directory containing your plugin in the aforementioned `resources/plugins` directory. -Plugins will be loaded on application startup (menu plugins) or reload (style and modeling tool plugins). To reload the application, open the developer tools F12 and press `CtrlOrCmd + R`. This will clear all unsaved diagrams. +Plugins will be loaded on application startup (menu plugins) or reload (style and modeling tool plugins). To reload the application, open the developer tools F12 and press `Ctrl+R` or `Cmd+R`. This will clear all unsaved diagrams. ## Additional resources diff --git a/versioned_docs/version-8.6/components/modeler/desktop-modeler/telemetry/telemetry.md b/versioned_docs/version-8.6/components/modeler/desktop-modeler/telemetry/telemetry.md index 8db915f21f0..33fe66c6901 100644 --- a/versioned_docs/version-8.6/components/modeler/desktop-modeler/telemetry/telemetry.md +++ b/versioned_docs/version-8.6/components/modeler/desktop-modeler/telemetry/telemetry.md @@ -54,8 +54,8 @@ These events include the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the case of a form, the payload also includes the `formFieldTypes`: @@ -78,8 +78,8 @@ The `Deployment Event` and `Start Instance` have the following properties: - `diagramType`: BPMN, DMN, or Form - Engine profile: - - `executionPlatform`: - - `executionPlatformVersion`: + - `executionPlatform`: <target platform\> + - `executionPlatformVersion`: <target platform version\> In the event of an unsuccessful deployment, an `error` property will be present in the payload containing an error code. diff --git a/versioned_docs/version-8.6/components/modeler/desktop-modeler/troubleshooting.md b/versioned_docs/version-8.6/components/modeler/desktop-modeler/troubleshooting.md index cd88267e3cc..c43dfc076d3 100644 --- a/versioned_docs/version-8.6/components/modeler/desktop-modeler/troubleshooting.md +++ b/versioned_docs/version-8.6/components/modeler/desktop-modeler/troubleshooting.md @@ -171,6 +171,18 @@ DEBUG=* ZEEBE_NODE_LOG_LEVEL=DEBUG GRPC_VERBOSITY=DEBUG GRPC_TRACE=all camunda-m +## Desktop Modeler does not start on Ubuntu 24 / modern Linux + +Modern Linux operating systems introduce restrictions on user namespaces, a sandboxing (isolation) mechanism Modeler uses. You may see an error message when you start the application: + +```sh +$ ./camunda-modeler +[46193:1114/170934.837319:FATAL:setuid_sandbox_host.cc(163)] The SUID sandbox helper binary was found, but is not configured correctly. Rather than run without sandboxing I'm aborting now. You need to make sure that [...]/camunda-modeler-[...]-linux-x64/chrome-sandbox is owned by root and has mode 4755. +zsh: trace trap (core dumped) ./camunda-modeler +``` + +To remedy this, configure your system to allow sandboxing by [creating an AppArmor profile](https://github.com/camunda/camunda-modeler/issues/4695#issuecomment-2478458250), or review [this issue](https://github.com/camunda/camunda-modeler/issues/4695#issuecomment-2478581677) for an in-depth explanation of available options. If you don't have the necessary permissions to permit sandboxing, you may choose to disable the sandbox, though this is not recommended. + ## Other questions? Head over to the [Modeler category on the forum](https://forum.camunda.io/c/modeler/6) to receive help from the community. diff --git a/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md b/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md index 921b5c0f693..6a0f28ee837 100644 --- a/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md +++ b/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md @@ -12,7 +12,7 @@ Desktop Modeler automatically fetches and updates [element templates](./element- ## Automatic Connector template fetching -Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. +Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. The fetch is triggered whenever you start the application, or every 24 hours if the application is not closed. After an update check has concluded, a notification indicates if the templates are up to date or have been updated: diff --git a/versioned_docs/version-8.6/components/modeler/dmn/dmn.md b/versioned_docs/version-8.6/components/modeler/dmn/dmn.md index f8a894fe113..d19f0a38f3f 100644 --- a/versioned_docs/version-8.6/components/modeler/dmn/dmn.md +++ b/versioned_docs/version-8.6/components/modeler/dmn/dmn.md @@ -53,6 +53,10 @@ You can also edit literal expressions. Just as with decision tables, in the deci ## Business knowledge models +:::caution +Viewing the result of BKM evaluation is currently not supported in Operate. +::: + A _business knowledge model_ (BKM) is a reusable function containing a piece of decision logic. Typically, a BKM instantiates business logic that is required in multiple decisions, such as a common computation. For example, an amortization formula might be used in different loan application processes. You can make BKM elements executable using literal expressions written in FEEL, in almost the same way you would create a decision using a literal expression. A BKM literal expression can optionally accept parameters to be used as inputs to the FEEL expression, and it returns a single result whose name is the same as the BKM element name. Once you’ve created a BKM, it appears in autosuggestions when you’re using literal expressions to create decision logic. diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-button.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-button.md index 577b3c0615e..e3614b67505 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-button.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-button.md @@ -6,7 +6,7 @@ description: A form element to trigger form actions A button allowing the user to trigger form actions. -![Form Button Symbol](/img/form-icons/form-button.svg) +Form Button Symbol ### Configurable properties @@ -15,4 +15,4 @@ A button allowing the user to trigger form actions. - **Submit**: Submit the form (given there are no validation errors). - **Reset**: Reset the form, all user inputs will be lost. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the button. -- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the button will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md index f3812384600..3b60e61f43b 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox-group.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A set of checkbox options providing data multi-selection for small datasets. -![Form Checklist Symbol](/img/form-icons/form-checklist.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A set of checkbox options providing data multi-selection for small datasets. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox group. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox group must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox.md index 834a18cfa63..9546284e2d4 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-checkbox.md @@ -6,7 +6,7 @@ description: A form element to read and edit boolean data A checkbox allowing the user to read and edit boolean data. -![Form Checkbox Symbol](/img/form-icons/form-checkbox.svg) +Form Checkbox Symbol ### Configurable properties @@ -19,7 +19,7 @@ A checkbox allowing the user to read and edit boolean data. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the checkbox. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Checkbox must contain a value. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Datatypes diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-datetime.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-datetime.md index aeb127128dc..c02c2912925 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-datetime.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-datetime.md @@ -6,7 +6,7 @@ description: Learn about the datetime form element to read and edit date and tim A component allowing the user to read and edit date and time data. -![Form Datetime Symbol](/img/form-icons/form-datetime.svg) +Form Datetime Symbol ## Configurable properties @@ -19,7 +19,7 @@ A component allowing the user to read and edit date and time data. - **Read only**: Makes the datetime component read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the datetime component, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the datetime component. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Time format**: Defines the time data format. This can either be **UTC offset**, **UTC normalized**, or **No timezone**. - **Time interval**: Defines the steps of time that can be selected in the time input field. - **Disallow past dates**: Enables the restriction to not allow past dates. diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md index 4af25ce51cc..1a24ae1440e 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-dynamiclist.md @@ -6,7 +6,7 @@ description: Learn about the dynamic list form element to dynamically manage a l The **dynamic list** element is designed to dynamically manage a list of form elements. It enables users to add or remove items from the list and is particularly useful in scenarios where the number of items in a list is not fixed. -![Dynamic List Symbol](/img/form-icons/form-dynamiclist.svg) +Dynamic List Symbol ## Configurable properties diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-expression.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-expression.md index f4f24988c5e..27133f2d8de 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-expression.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-expression.md @@ -6,7 +6,7 @@ description: A form element to compute form state An expression field allowing the user to compute new data based on form state. -![Form Expression Field Symbol](/img/form-icons/form-expression.svg) +Form Expression Field Symbol ### Configurable properties @@ -14,7 +14,7 @@ An expression field allowing the user to compute new data based on form state. - **Target value**: Defines an [expression](../../feel/language-guide/feel-expressions-introduction.md) to evaluate. - **Compute on**: Defines when the expression should be evaluated. Either whenever the result changes, or only on form submission. - **Deactivate if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to disable the expression. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). :::info diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-group.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-group.md index a90f822b54a..4353743a7b1 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-group.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-group.md @@ -6,7 +6,7 @@ description: Learn about the group form element to group multiple form elements The group element serves as a container to group various form elements together. It allows for nesting of fields and assists in organizing complex forms. -![Form Group Symbol](/img/form-icons/form-group.svg) +Form Group Symbol ### Configurable properties @@ -15,7 +15,7 @@ The group element serves as a container to group various form elements together. - **Show outline**: Can be toggled on and off to display a separating outline around the group - **Vertical alignment**: Determines the alignment of items in the list. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ### Usage diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-html.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-html.md index bc8e56f454f..bad7f9a9483 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-html.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-html.md @@ -6,13 +6,13 @@ description: A form element to display HTML content. A flexible display component designed to quickly render HTML content for the user. -![Form HTML Symbol](/img/form-icons/form-html.svg) +Form HTML Symbol ## Configurable properties - **Content**: This property accepts HTML content. Define it using [templating syntax](../configuration/forms-config-templating-syntax.md) or as plaintext HTML. The rendered content is sanitized for security reasons, see below for details. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to conditionally hide the HTML content. -- **Columns**: Space the field will use inside its row. The **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. The **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Our security and sanitation strategy diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-iframe.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-iframe.md index f928bf92197..782621b9c84 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-iframe.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-iframe.md @@ -12,7 +12,7 @@ Every iframe component is a sandbox. This means that the content of the iframe i ::: -![Form iframe Symbol](/img/form-icons/form-iframe.svg) +Form iframe Symbol ## Configurable properties @@ -30,7 +30,7 @@ Every iframe component is a sandbox. This means that the content of the iframe i - **Top level navigation**: Gives the iframe permission to change the URL of the parent page, navigating away entirely from it. - **Storage access by user**: Controls access of local storage based on user interactions, may be expected in addition to allow same origin on certain browsers for functionality depending on storage. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the iframe. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Security advisory diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-image.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-image.md index 3becca2d45e..ca0c674345e 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-image.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-image.md @@ -6,11 +6,11 @@ description: Learn about the image view form element to display an image. An element allowing the user to display images. -![Form Image Symbol](/img/form-icons/form-image.svg) +Form Image Symbol ## Configurable properties - **Image source**: Specifies the image source via [expression](../../feel/language-guide/feel-expressions-introduction.md), [templating syntax](../configuration/forms-config-templating-syntax.md) or [static value](/components/concepts/expressions.md#expressions-vs-static-values) (hyperlink or data URI). - **Alternative text**: Provides an alternative text to the image in case it cannot be displayed. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the image. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-number.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-number.md index 6e9d108ca19..902253e3642 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-number.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-number.md @@ -6,7 +6,7 @@ description: A form element to read and edit numeric data A number field allowing the user to read and edit numeric data. -![Form Number Symbol](/img/form-icons/form-number.svg) +Form Number Symbol ### Configurable properties @@ -19,7 +19,7 @@ A number field allowing the user to read and edit numeric data. - **Read only**: Makes the number field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the number field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the number. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Serialize to string**: Configures the output format of the datetime value. This enables unlimited precision digits. - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Number field must contain a value. diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-radio.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-radio.md index 285fc2b6272..3d9c8928bcc 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-radio.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-radio.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A radio group allowing the user to select one of multiple data options for small datasets. -![Form Radio Symbol](/img/form-icons/form-radio.svg) +Form Radio Symbol ### Configurable properties @@ -18,7 +18,7 @@ A radio group allowing the user to select one of multiple data options for small - **Disabled**: Disables the radio group, for use during development. - **Options source**: Radio group components can be configured with an options source defining the individual choices the component provides, refer to [options source docs](../configuration/forms-config-options.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the radio group. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One radio option must be selected. diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-select.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-select.md index 17ae7dd2ce0..102a401ff77 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-select.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-select.md @@ -6,7 +6,7 @@ description: A form element to select a value from set options A Select dropdown allowing the user to select one of multiple data option from larger datasets. -![Form Select Symbol](/img/form-icons/form-select.svg) +Form Select Symbol ### Configurable properties @@ -18,7 +18,7 @@ A Select dropdown allowing the user to select one of multiple data option from l - **Read only**: Makes the select read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the select, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the select. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Options source**: Selects can be configured with an options source defining the individual choices the select provides, refer to [options source docs](../configuration/forms-config-options.md). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: One select entry must be selected. diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-separator.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-separator.md index a83fc9ed785..392d1418cb4 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-separator.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-separator.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add a visual separation between A **separator** element is used to create a visual separation between two elements. -![Form Spacer Symbol](/img/form-icons/form-separator.svg) +Form Separator Symbol ## Usage diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-spacer.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-spacer.md index 22043da492b..7284c15dfd3 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-spacer.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-spacer.md @@ -6,7 +6,7 @@ description: Learn about this layout element to add vertical space between eleme A **spacer** element is used to create a defined amount of vertical space between two elements. -![Form Spacer Symbol](/img/form-icons/form-spacer.svg) +Form Spacer Symbol ## Configurable properties diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-table.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-table.md index 947b4f5e505..5ff8cb77e7d 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-table.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-table.md @@ -6,7 +6,7 @@ description: Learn about the table form element to render tabular data. This is an element allowing the user to render tabular data. -![Form table Symbol](/img/form-icons/form-table.svg) +Form Table Symbol ## Configurable properties @@ -16,4 +16,4 @@ This is an element allowing the user to render tabular data. - **Number of rows per page**: The size of each page. Used only if pagination is enabled. Must be greater than zero. - **Headers source**: Defines which headers will be used in the table. This can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md) or a list of static headers. Review [table data binding](../configuration/forms-config-table-data-binding.md) for the required header structure. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the table. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-taglist.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-taglist.md index 97411ce8db5..e4dd64059ea 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-taglist.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-taglist.md @@ -6,7 +6,7 @@ description: A form element to select multiple values from set options A complex and searchable tag based component providing multi-selection for large datasets. -![Form Taglist Symbol](/img/form-icons/form-taglist.svg) +Form Taglist Symbol ### Configurable properties @@ -14,7 +14,7 @@ A complex and searchable tag based component providing multi-selection for large - **Field description**: Description provided below the taglist. Can either be an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). - **Key**: Binds the field to a form variable, refer to [data binding docs](../configuration/forms-config-data-binding.md). - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the taglist. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Taglist must contain a value. - **Options source**: Taglists can be configured with an options source defining the individual choices your user can make, refer to [options source docs](../configuration/forms-config-options.md). diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-text.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-text.md index c2b043cacee..a3fe68e537e 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-text.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-text.md @@ -6,13 +6,13 @@ description: A form element to display simple Markdown-powered text. A Markdown-powered text component allowing to display simple information to the user. -![Form Text Symbol](/img/form-icons/form-text.svg) +Form Text Symbol ## Configurable properties - **Text**: Either an [expression](../../feel/language-guide/feel-expressions-introduction.md), plain text, or [templating syntax](../configuration/forms-config-templating-syntax.md). After evaluation, the result is processed using a Markdown renderer that supports basic HTML and [GitHub-flavored Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). To ensure safety and prevent cross-site scripting in Camunda Forms, potentially harmful HTML elements will not be rendered. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). ## Example text configurations diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textarea.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textarea.md index d851a970a92..2e66e4d0712 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textarea.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textarea.md @@ -6,7 +6,7 @@ description: Learn about the text area form element to read and edit multiline t A text area allowing the user to read and edit multiline textual data. -![Form Textarea Symbol](/img/form-icons/form-textArea.svg) +Form Textarea Symbol ## Configurable properties @@ -17,7 +17,7 @@ A text area allowing the user to read and edit multiline textual data. - **Read only**: Makes the text area read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text area; for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text area. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text area must contain a value. - **Minimum length**: Text area must have at least `n` characters. diff --git a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textfield.md b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textfield.md index 1aafa0e824f..da45e37b3c9 100644 --- a/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textfield.md +++ b/versioned_docs/version-8.6/components/modeler/forms/form-element-library/forms-element-library-textfield.md @@ -6,7 +6,7 @@ description: A form element to read and edit textual data A text field allowing the user to read and edit textual data. -![Form Text Field Symbol](/img/form-icons/form-textField.svg) +Form Text Field Symbol ### Configurable properties @@ -17,7 +17,7 @@ A text field allowing the user to read and edit textual data. - **Read only**: Makes the text field read-only, meaning the user can't change but only read its state. Can be dynamically set using an [expression](../../feel/language-guide/feel-expressions-introduction.md). - **Disabled**: Disables the text field, for use during development. - **Hide if**: [Expression](../../feel/language-guide/feel-expressions-introduction.md) to hide the text field. -- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/guidelines/2x-grid/overview). +- **Columns**: Space the field will use inside its row. **Auto** means it will automatically adjust to available space in the row. Read more about the underlying grid layout in the [Carbon Grid documentation](https://carbondesignsystem.com/elements/2x-grid/overview/). - **Validation**: Given that one of the following properties is set, the form will only submit when the respective condition is fulfilled. Otherwise, a validation error will be displayed. - **Required**: Text field must contain a value. - **Regular expression validation**: Use predefined validation patterns. Available options are: `Email`, `Phone`, and `Custom`. diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/advanced-modeling/form-linking.md b/versioned_docs/version-8.6/components/modeler/web-modeler/advanced-modeling/form-linking.md index c7cf43464fe..cf43c6b50c5 100644 --- a/versioned_docs/version-8.6/components/modeler/web-modeler/advanced-modeling/form-linking.md +++ b/versioned_docs/version-8.6/components/modeler/web-modeler/advanced-modeling/form-linking.md @@ -89,6 +89,10 @@ To correct any instances affected by this issue, we recommend the following step ### Camunda Form (embedded) +:::info +Embedded forms are supported only by job worker-based user tasks and are not available for the [Zeebe user task implementation type](/components/modeler/bpmn/user-tasks/user-tasks.md#user-task-implementation-types). +::: + When choosing **Camunda Form (embedded)** as type you have the option to directly paste the form's JSON schema into the **Form JSON configuration** field of the properties panel. The form will be embedded directly into the BPMN diagram's XML representation. diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/camunda-marketplace.md b/versioned_docs/version-8.6/components/modeler/web-modeler/camunda-marketplace.md index 1e366d34ebb..0842066baee 100644 --- a/versioned_docs/version-8.6/components/modeler/web-modeler/camunda-marketplace.md +++ b/versioned_docs/version-8.6/components/modeler/web-modeler/camunda-marketplace.md @@ -12,6 +12,10 @@ If you are a **[Web Modeler Self-Managed](/self-managed/modeler/web-modeler/inst ## Browse Marketplace Connectors +:::note +Connectors created by partners or the community are not part of the commercial Camunda product. Camunda does not support these Connectors as part of its commercial services to enterprise customers. Please evaluate each client to make sure it meets your requirements before using. +::: + To navigate to the Camunda Marketplace, take the following steps: 1. Log in to your Camunda account. diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/git-sync.md b/versioned_docs/version-8.6/components/modeler/web-modeler/git-sync.md index b4b10c9130f..e61588ef5de 100644 --- a/versioned_docs/version-8.6/components/modeler/web-modeler/git-sync.md +++ b/versioned_docs/version-8.6/components/modeler/web-modeler/git-sync.md @@ -36,20 +36,24 @@ Click **Create GitHub App** to finish. 3. Select **Only select repositories**, and choose the repository to sync with Web Modeler. 4. Once redirected to your application's installation page, copy the **Installation ID** located at the end of the page's URL: `https://github.com/settings/installations/{installation_id}`. -### Configure GitHub in Web Modeler +### [Configure GitHub in Web Modeler](#configure-github-in-web-modeler) :::note An organization administration account (or project administrator in Camunda Self-Managed) is required for the initial GitHub configuration. ::: +:::note +When using a self-hosted GitHub instance, ensure the environment variable `CAMUNDA_MODELER_GITSYNC_GITHUB_BASEURL` is set to the API URL of your self-hosted GitHub instance. It usually looks like `http(s)://HOSTNAME/api/v3`. Refer to [GitHub documentation](https://docs.github.com/en/enterprise-server@3.15/rest/enterprise-admin?apiVersion=2022-11-28#endpoint-urls) and choose the correct enterprise server version. +::: + 1. Within Web Modeler, navigate to the process application you would like to connect to GitHub, and click **Connect GitHub**. 2. Provide the following information in the GitHub Configuration modal: - **Installation ID:** Found in the URL of your GitHub App's installation page. - - **Client ID:** Found in your GitHub App's settings page. + - **Client ID:** Found in your GitHub App's settings page. You can also use Application ID as an alternative. (If you are using GitHub Enterprise Server 3.13 or prior, you **have** to use Application ID) - **Private Key:** The contents of the .pem file downloaded from your GitHub App's settings page. - - **GitHub repository URL:** The URL of the repository you would like to sync with. + - **GitHub repository URL:** The base URL of the repository you want to sync with, for example `https://github.com/camunda/example-repo`. The URL cannot contain the `.git` extension or a folder path. - **Branch name:** The branch name to use for merging and managing changes. 3. Click **Save Configuration**. @@ -64,10 +68,6 @@ When successful, your project will display a new **Sync with GitHub** button. ## Sync with GitHub -:::note -File synchronization only happens at the root level of the remote repository. Files contained in subfolders will not be synchronized. -::: - Organization owners/administrators, project administrators, and project editors can sync their version of Web Modeler with the connected GitHub repository at any time. 1. In your connected process application, click **Sync with GitHub**. @@ -95,3 +95,5 @@ Existing GitHub configurations can be edited from the gear icon beside the **Syn - When synchronizing for the first time with a remote repository that already contains commits, Web Modeler will attempt to select a main process with a file name that matches its own main process. If there is no matching process, Web Modeler will select a process at random from the available `.bpmn` files. In the event that no `.bpmn` files exist in the remote repository, Web Modeler will not proceed, and will instead display an error message. Ensure the main process is correctly assigned, especially in cases where a random process has been selected. - Actions which alter the SHA of the commit to which Web Modeler is synced (for example, squash) may cause synchronization errors. - Timeouts may occur during a sync. In the event of a timeout, close the modal and retry the synchronization. +- A single synchronization action is limited to incorporating a maximum of 250 commits or making changes to up to 300 files, regardless of whether these changes affect the Web Modeler files directly. Be aware that Web Modeler does not provide a notification when these thresholds are exceeded. Should you encounter this limitation, it may be necessary to initiate a fresh synchronization. A fresh synchronization fetches all the files in the repository without relying on the incremental changes, thus bypassing the limitations. This can be achieved by either changing the branch or modifying the GitHub repository URL. +- Using self-hosted instances of Git providers may require additional configuration. Refer to the Web Modeler [configuration](#configure-github-in-web-modeler) for more details. diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png index 4dbd018b1e8..baa536d91a8 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-choose-role.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png index 181ef2fe17c..9aee9ca25c7 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-modal-opened.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png index 7f58e0844db..d1d2f6c3c40 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-sent.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png index d5dfda7d208..e12fc4628b0 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-suggestions.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png index c03ecf4b021..d6ad478a770 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-collaborator-invite-type-message.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png index 89b87af5218..92572f56797 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-mention-suggestions.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png index e69b47a3b0a..0a1a13ca2de 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-overlay-on-diagram.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png index a25925bce92..8286a484e2f 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-type-here.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png index 54660f1fd52..5561d5de7e0 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-comment-with-context-menu.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-home.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-home.png index 6374c015832..1f482c5dc85 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-home.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-home.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png index c30496a06b2..13682310801 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-icon-button.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png index fff1f8ad0f1..b299fb877a3 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-create.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png index e535d5c54b8..3df77ca71d2 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-email.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png index 86838c1950e..a804f8fb698 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal-password-protect.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png index 5219a796154..c2e6e89611d 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/collaboration/web-modeler-share-modal.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/design-mode.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/design-mode.png index a3dfbfa0130..2f7dcbc4d64 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/design-mode.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/design-mode.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/implement-mode.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/implement-mode.png index c54fe4ef18b..cea00459251 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/implement-mode.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/implement-mode.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png index cc2cda7fff3..a663caaa99d 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-diagram-replace-via-drag-and-drop.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png index e2c75332646..fff577bad0e 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-project-drag-and-drop.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png index 42613495b8b..1df7fdb58fe 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-choose.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png index 10466afdff0..3d088c32deb 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-replace-via-upload-menu-item.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png index c21b5dc4567..5813766fb23 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-choose.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png index d98c2b940d0..e22dbc5c803 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/import-diagram/web-modeler-upload-file-completed.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png index 664a0feeae9..8bde0c3efd3 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png index 5018b3ef613..bdfe9014ba8 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-action-menu-item.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png index bf65f281071..2b8b9289449 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-code-diffing.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png index 5ecc85f84da..e95ab6f6ad3 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-compare-process-application-files-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png index a68638ba6d9..d9b0243c171 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-versioned-milestone-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png index 3d961631d73..1b9cc9659c4 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png index f8f54538a3b..e8c54754784 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-breadcrumb.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png index 51560aa75eb..38c9b0cfd27 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png index ca70a36d5c9..933ab3662f1 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-create-via-icon.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png index f5440304331..12fc1953318 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-diffing.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png index fb6224defa7..ad160f87c97 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png index 20fce005c76..4e165589c69 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-complete.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png index c46de3c033f..59f4cc97c22 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png index 2516876f300..70f8e5f2de8 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-restore.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png index 7badc1a6061..27e06e6fe45 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-show-history-via-breadcrumb-highlight.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png index 087600bec01..9696f5f681b 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/milestones/web-modeler-milestone-visual-diffing.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/read-only-properties.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/read-only-properties.png index 6e88ad52269..172fcfe1248 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/read-only-properties.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/read-only-properties.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/real-time-collaboration.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/real-time-collaboration.png index 9516ed0aa6c..b20ef040fd7 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/real-time-collaboration.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/real-time-collaboration.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-endevent.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-endevent.png index 1c9ccf8c843..f1278a7909c 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-endevent.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-endevent.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-task.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-task.png index a1880dd3aab..20e1173de3f 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-task.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-add-task.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-deploy.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-deploy.png index 6597df42fed..18dc70503b4 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-deploy.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-deploy.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png index fe3d00c4fc2..409f09dc983 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-new-diagram-with-configuration.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-start-instance.png b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-start-instance.png index 4e50d016129..dc2ac7d54f1 100644 Binary files a/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-start-instance.png and b/versioned_docs/version-8.6/components/modeler/web-modeler/img/web-modeler-start-instance.png differ diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/milestones.md b/versioned_docs/version-8.6/components/modeler/web-modeler/milestones.md index 6782f0d162b..2c8f9259151 100644 --- a/versioned_docs/version-8.6/components/modeler/web-modeler/milestones.md +++ b/versioned_docs/version-8.6/components/modeler/web-modeler/milestones.md @@ -32,7 +32,7 @@ You can create a new milestone either from your diagram or the milestone history ![milestones create via the breadcrumb menu](img/milestones/web-modeler-milestone-create-via-breadcrumb-highlight.png) -- From the milestone history, hover over the the latest version in the **Milestones** panel and select **Create a new milestone**. +- From the milestone history, hover over the latest version in the **Milestones** panel and select **Create a new milestone**. ![milestones create via icon](img/milestones/web-modeler-milestone-create-via-icon-highlight.png) diff --git a/versioned_docs/version-8.6/components/modeler/web-modeler/play-your-process.md b/versioned_docs/version-8.6/components/modeler/web-modeler/play-your-process.md index 55e63a62768..857207d6c67 100644 --- a/versioned_docs/version-8.6/components/modeler/web-modeler/play-your-process.md +++ b/versioned_docs/version-8.6/components/modeler/web-modeler/play-your-process.md @@ -151,7 +151,9 @@ This section explains why you might not see the **Play** tab, and any additional For more information about terms, refer to our [licensing and terms page](https://legal.camunda.com/licensing-and-other-legal-terms#c8-saas-trial-edition-and-free-tier-edition-terms). -Although Play is compatible with cluster versions 8.5.1 and above, we fully support and recommend using versions 8.6.0 or higher. +**Version compatibility:** Although Play is compatible with cluster versions 8.5.1 and above, Camunda fully supports and recommends using versions 8.6.0 or higher. + +**Execution listeners:** Play does not currently support [execution listeners](/components/concepts/execution-listeners.md). As a workaround, you can skip the element using [modifications](#modify-a-process-instance). ### Camunda 8 SaaS @@ -196,6 +198,7 @@ If no cluster is configured, Web Modeler requests the following cluster details ### Limitations +- Play does not support multi-tenancy. - The environment variables `CAMUNDA_CUSTOM_CERT_CHAIN_PATH`, `CAMUNDA_CUSTOM_PRIVATE_KEY_PATH`, `CAMUNDA_CUSTOM_ROOT_CERT_PATH`, and `CAMUNDA_CUSTOM_ROOT_CERT_STRING` can be set in Docker or Helm chart setups. However, these configurations have not been tested with Play's behavior, and therefore are not supported when used with Play. - Play cannot check the presence of Connector secrets in Self-Managed setups. If a secret is missing, Play will show an incident at runtime. diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png index 3ad06d1daa5..98c7592a604 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-finished-instance-detail.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png index 1bd77e49196..8c44872ab94 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-deleted-notification.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png index 9d14fb9b8b3..2db1a42914d 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-delete-operation-confirm.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png index f9eb0b6a355..d26a7cbb744 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances-navigate.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png index b9c552a2d45..83a4708f98d 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instance-detail-finished-instances.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png index 56f4feedcb5..09a9d86a33e 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-click-delete-operation.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png index c6cf678d3c2..ba0a4fba1b6 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-delete-operation-confirm.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png index f9363676e39..c14fe0332dd 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-instances-finished-instances.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png index e404c64a5d8..25c3e55c0ee 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-finished-instances/operate-operations-panel-delete-operation.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-button.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-button.png index 89568da7abb..e4fb5d3a6d9 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-button.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-button.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-filters.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-filters.png index c1297720b91..8f373547314 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-filters.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-filters.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-modal.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-modal.png index 04ecfe5fa1b..3f5a2f4b81c 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-modal.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-modal.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-operations-panel.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-operations-panel.png index 995dd31e432..dab311866eb 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-operations-panel.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/decision-operations-panel.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-button.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-button.png index 759862de71d..266feed89de 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-button.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-button.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-filters.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-filters.png index 10b92398fe0..e59dd6c0be6 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-filters.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-filters.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-modal.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-modal.png index 987cd8c2a2e..e7b43dd6881 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-modal.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-modal.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-operations-panel.png b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-operations-panel.png index 294a89a256e..4695be9a40a 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-operations-panel.png and b/versioned_docs/version-8.6/components/operate/userguide/img/delete-resources/process-operations-panel.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png index 0420066f41c..ab58ba88927 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png and b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-introduction.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png index 529f8459e71..af55d0f72e3 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png and b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-process-instance-id.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png index 6ad81347f76..512e61877cd 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png and b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-instance-detail.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png index a4dfa0b55b7..216cb1ba1f7 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png and b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process-cancel.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png index bda1fae52da..60828f6b37e 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png and b/versioned_docs/version-8.6/components/operate/userguide/img/get-familiar-with-operate/operate-view-process.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expand-row-button.png b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expand-row-button.png index 070a4e3ceaa..196c9f12acd 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expand-row-button.png and b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expand-row-button.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png index 4080f3073a5..e1a7ec398bb 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png and b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-instances-row.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png index cfdd18812c9..d6e33aaf57d 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png and b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/expanded-operations-panel.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operation-state-row.png b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operation-state-row.png index 2b715e663fe..5e7cc20f808 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operation-state-row.png and b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operation-state-row.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operations-panel.png b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operations-panel.png index 553d9f6b9f6..dc65cf042f3 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operations-panel.png and b/versioned_docs/version-8.6/components/operate/userguide/img/monitor-operation-state/operations-panel.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/highlight-mapping.png b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/highlight-mapping.png index 278f7715043..ea4f1f654b4 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/highlight-mapping.png and b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/highlight-mapping.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/map-elements.png b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/map-elements.png index 535b2533e78..5de38787656 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/map-elements.png and b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/map-elements.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/migrate-button.png b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/migrate-button.png index 13dec77b84e..0583fc4e6a7 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/migrate-button.png and b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/migrate-button.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/process-filters.png b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/process-filters.png index 88f090c5e3d..041af6d034e 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/process-filters.png and b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/process-filters.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/select-target-process.png b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/select-target-process.png index 8636cb9a24d..5d00a37fad7 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/select-target-process.png and b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/select-target-process.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/summary.png b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/summary.png index a8a903642c3..b554d902212 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/summary.png and b/versioned_docs/version-8.6/components/operate/userguide/img/process-instance-migration/summary.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png b/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png index bda1fae52da..60828f6b37e 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png and b/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-many-instances-with-incident.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png b/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png index fb46985eba1..bd377b6013b 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png and b/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-operations-panel.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-select-operation.png b/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-select-operation.png index a136dd6b3fd..526a870144d 100644 Binary files a/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-select-operation.png and b/versioned_docs/version-8.6/components/operate/userguide/img/selections-and-operations/operate-select-operation.png differ diff --git a/versioned_docs/version-8.6/components/operate/userguide/process-instance-migration.md b/versioned_docs/version-8.6/components/operate/userguide/process-instance-migration.md index e05e36cb63a..e6973291c9a 100644 --- a/versioned_docs/version-8.6/components/operate/userguide/process-instance-migration.md +++ b/versioned_docs/version-8.6/components/operate/userguide/process-instance-migration.md @@ -19,7 +19,7 @@ Process instances can be migrated from one specific process definition version t ![operate-migrate-button](./img/process-instance-migration/migrate-button.png) :::note -It is only possible to migrate running process instances, meaning instances in active or incident state. All other process instances will not be part of the migration plan and will be ignored. +It is only possible to migrate running process instances, meaning instances in an active or incident state. All other process instances will not be part of the migration plan and will be ignored. Learn more about [all limitations](/components/concepts/process-instance-migration.md#limitations). ::: The migration view features three areas: the source process diagram (top left), the target process diagram (top right) and the flow node mapping (bottom panel). @@ -37,7 +37,7 @@ In this example, all service tasks from version 1 of `orderProcess` are each map ![operate-view-process-filters](./img/process-instance-migration/map-elements.png) :::note -It is currently only possible to map service tasks, user tasks, subprocesses, call activities, and child instances. Mapping subprocesses to a different scope or mapping event subprocesses is not yet supported by Zeebe. To learn about all limitations, visit the [concepts section](/components/concepts/process-instance-migration.md#limitations). +It is currently only possible to map elements with migration supported by Zeebe. Learn more about [supported elements](/components/concepts/process-instance-migration.md#supported-bpmn-elements). ::: 6. (Optional) Click on a flow node in the diagram or on a source flow node row in the bottom panel to see how flow nodes are mapped. diff --git a/versioned_docs/version-8.6/components/tasklist/userguide/defining-task-priorities.md b/versioned_docs/version-8.6/components/tasklist/userguide/defining-task-priorities.md index 01a41c9bdc7..2517497ee53 100644 --- a/versioned_docs/version-8.6/components/tasklist/userguide/defining-task-priorities.md +++ b/versioned_docs/version-8.6/components/tasklist/userguide/defining-task-priorities.md @@ -6,11 +6,15 @@ description: "Organize and order your tasks with clear prioritization." import styles from "./styles.module.css"; -You can add prioritization to [User Task elements](/components/modeler/bpmn/user-tasks/user-tasks.md) by specifying a priority value for a user task. This determines the task's importance in relation to other tasks within processes. +You can add prioritization to [user task elements](/components/modeler/bpmn/user-tasks/user-tasks.md) by specifying a priority value for a user task. This determines the task's importance in relation to other tasks within processes. - The task priority is an **integer** value ranging from 0 to 100, with a default value of 50. - A higher priority value indicates higher importance. +:::note +Task priority is supported only for user tasks with the _Zeebe user task_ implementation type. +::: + When displayed in Tasklist, priority values are mapped to the following default labels: | Priority value | Default label | @@ -26,11 +30,11 @@ These labels give Tasklist users a clear view of task priority, making it easier This step-by-step guide shows you how to define task priorities for Tasklist users. -### 1. Model a BPMN Process +### 1. Model a BPMN process Start by modeling your [BPMN process in Modeler](/guides/automating-a-process-using-bpmn.md), ensuring that the required user tasks are defined within the process. -### 2. Set a Priority for User Tasks +### 2. Set a priority for user tasks During user task configuration you can specify a priority value. You can also define the value using an [expression](/components/concepts/expressions.md). @@ -38,17 +42,17 @@ The priority value determines the task's importance relative to other tasks. ![set-user-task-priority-in-modeler](img/modeler-user-task-priority.jpg) -### 3. Deploy and Start the Process +### 3. Deploy and start the process After the process is fully defined and all configurations are complete, the process can be deployed and started. The priority values are now associated with each user task within the process. -### 4. Task Priority in Tasklist +### 4. View task priority in Tasklist Tasklist users can view the tasks assigned to them within their task list. Each task card displays the assigned priority label, ensuring users have a clear understanding of the task's importance and priority. ![set-user-task-priority-in-modeler](img/tasklist–tasks-with-priority.jpg) -### 5. Sort Tasks by Priority +### 5. Sort tasks by priority Task users can sort tasks by priority. This helps users organize their workload by focusing on urgent items first. diff --git a/versioned_docs/version-8.6/components/tasklist/userguide/img/modeler-user-task-priority.jpg b/versioned_docs/version-8.6/components/tasklist/userguide/img/modeler-user-task-priority.jpg index df0f3db07a8..847680dfd8a 100644 Binary files a/versioned_docs/version-8.6/components/tasklist/userguide/img/modeler-user-task-priority.jpg and b/versioned_docs/version-8.6/components/tasklist/userguide/img/modeler-user-task-priority.jpg differ diff --git a/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-language-settings.jpg b/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-language-settings.jpg index 564d12ea78e..edc13fe3bc5 100644 Binary files a/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-language-settings.jpg and b/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-language-settings.jpg differ diff --git a/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg b/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg index 7d7035c447b..15478961d8b 100644 Binary files a/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg and b/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist-tasks-with-priority-sorting.jpg differ diff --git "a/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" "b/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" index 86c1ed19da5..52310ac02de 100644 Binary files "a/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" and "b/versioned_docs/version-8.6/components/tasklist/userguide/img/tasklist\342\200\223tasks-with-priority.jpg" differ diff --git a/versioned_docs/version-8.6/components/tasklist/userguide/using-tasklist.md b/versioned_docs/version-8.6/components/tasklist/userguide/using-tasklist.md index 2d6b9b5f825..cef0c352bf5 100644 --- a/versioned_docs/version-8.6/components/tasklist/userguide/using-tasklist.md +++ b/versioned_docs/version-8.6/components/tasklist/userguide/using-tasklist.md @@ -68,6 +68,8 @@ From the task detail page you can switch to the **Process** tab. This provides a :::note The diagram indicates the version of the process instance in which the task was initiated. + +This feature is available for diagrams deployed on version 8.6 or higher. ::: #### Resource-based access (RBA) diff --git a/versioned_docs/version-8.6/components/zeebe/technical-concepts/architecture.md b/versioned_docs/version-8.6/components/zeebe/technical-concepts/architecture.md index 8921c856a4d..18487a401ed 100644 --- a/versioned_docs/version-8.6/components/zeebe/technical-concepts/architecture.md +++ b/versioned_docs/version-8.6/components/zeebe/technical-concepts/architecture.md @@ -52,7 +52,7 @@ The gateway is stateless and sessionless, and gateways can be added as necessary ## Brokers -The Zeebe broker is the distributed workflow engine that tracks the state of active process instances. +The Zeebe Broker is the distributed workflow engine that tracks the state of active process instances. Brokers can be partitioned for horizontal scalability and replicated for fault tolerance. A Zeebe deployment often consists of more than one broker. diff --git a/versioned_docs/version-8.6/components/zeebe/technical-concepts/internal-processing.md b/versioned_docs/version-8.6/components/zeebe/technical-concepts/internal-processing.md index b1ffabbdc74..53a00f92d5a 100644 --- a/versioned_docs/version-8.6/components/zeebe/technical-concepts/internal-processing.md +++ b/versioned_docs/version-8.6/components/zeebe/technical-concepts/internal-processing.md @@ -64,7 +64,7 @@ To avoid such problems, Zeebe employs [flow control](/self-managed/operational-g In the case of backpressure when the broker receives more requests than it can process with an acceptable latency, it rejects some requests. For flow control, it can be used with static write rate limits or throttling which prevents the partition from building an excessive backlog of records not exported. -Backpressure is indicated to the client by throwing a **resource exhausted** exception. If a client sees this exception, it can retry the requests with an appropriate retry strategy. If the rejection rate is high, it indicates the broker is constantly under high load and you need to reduce the rate of requests. Alternatively, you can also increase broker resources to adjust to your needs. In high-load scenarios, it is recommended to [benchmark](https://camunda.com/blog/2022/05/how-to-benchmark-your-camunda-platform-8-cluster/) your Zeebe broker up front to size it correctly. +Backpressure is indicated to the client by throwing a **resource exhausted** exception. If a client sees this exception, it can retry the requests with an appropriate retry strategy. If the rejection rate is high, it indicates the broker is constantly under high load and you need to reduce the rate of requests. Alternatively, you can also increase broker resources to adjust to your needs. In high-load scenarios, it is recommended to [benchmark](https://camunda.com/blog/2022/05/how-to-benchmark-your-camunda-platform-8-cluster/) your Zeebe Broker up front to size it correctly. The maximum rate of requests that can be processed by a broker depends on the processing capacity of the machine, the network latency, current load of the system, etc. There is no fixed limit configured in Zeebe for the maximum rate of requests it accepts. Instead, Zeebe uses an adaptive algorithm to dynamically determine the limit of the number of in-flight requests (the requests that are accepted by the broker, but not yet processed). diff --git a/versioned_docs/version-8.6/components/zeebe/zeebe-overview.md b/versioned_docs/version-8.6/components/zeebe/zeebe-overview.md index c1fda640f43..0e80316cde3 100644 --- a/versioned_docs/version-8.6/components/zeebe/zeebe-overview.md +++ b/versioned_docs/version-8.6/components/zeebe/zeebe-overview.md @@ -20,12 +20,6 @@ With Zeebe you can: For documentation on deploying Zeebe as part of Camunda 8 Self-Managed, refer to the [deployment guide](../../self-managed/zeebe-deployment/zeebe-installation.md). -## Enterprise support for Zeebe - -Paid support for Zeebe is available via either Camunda 8 Starter or Camunda 8 Enterprise plans. Customers can choose either plan based on their process automation requirements. Camunda 8 Enterprise customers also have the option of on-premises or private cloud deployment. - -Additionally, regardless of how you are working with Zeebe and Camunda 8, you can always find support through the [community](/contact/). - ## Next steps - Get familiar with [technical concepts](technical-concepts/technical-concepts-overview.md). diff --git a/versioned_docs/version-8.6/guides/configuring-out-of-the-box-connector.md b/versioned_docs/version-8.6/guides/configuring-out-of-the-box-connector.md index 18316fce3a6..c6bc2d5b6f4 100644 --- a/versioned_docs/version-8.6/guides/configuring-out-of-the-box-connector.md +++ b/versioned_docs/version-8.6/guides/configuring-out-of-the-box-connector.md @@ -1,6 +1,6 @@ --- id: configuring-out-of-the-box-connectors -title: Configure an out-of-the-box Connector +title: Integrate a Camunda Connector description: "Ready to use out of the box, Connectors help automate complex business processes by inserting them into BPMN diagrams." keywords: [connector, modeling, connectors, low-code, no-code] --- diff --git a/versioned_docs/version-8.6/guides/create-cluster.md b/versioned_docs/version-8.6/guides/create-cluster.md index a93f27b4e87..aed054f616a 100644 --- a/versioned_docs/version-8.6/guides/create-cluster.md +++ b/versioned_docs/version-8.6/guides/create-cluster.md @@ -1,6 +1,6 @@ --- id: create-cluster -title: Create your cluster +title: Create a cluster description: "Create a cluster in Camunda 8 to deploy and run your process." --- diff --git a/versioned_docs/version-8.6/guides/getting-started-java-spring.md b/versioned_docs/version-8.6/guides/getting-started-java-spring.md index 58f628a3891..c4a964fc92c 100644 --- a/versioned_docs/version-8.6/guides/getting-started-java-spring.md +++ b/versioned_docs/version-8.6/guides/getting-started-java-spring.md @@ -113,39 +113,19 @@ To implement a service task, take the following steps: ### Configure Spring Boot Starter -See our documentation on [adding the Spring Zeebe SDK to your project](/apis-tools/spring-zeebe-sdk/getting-started.md#add-the-spring-zeebe-sdk-to-your-project) for more details, also described below: - -1. Copy the following code snippet into the `pom.xml` file of your Spring project, below properties and above dependencies: - -```xml - - - - true - - - false - - identity - Camunda Identity - https://artifacts.camunda.com/artifactory/camunda-identity/ - - -``` - -2. Add the following dependency to your `pom.xml` file, as a child of the `` element: +Add the following Maven dependency to your Spring Boot Starter project, replacing `x` with the latest patch level available: ```xml - io.camunda - spring-boot-starter-camunda-sdk - 8.6.3 + io.camunda + spring-boot-starter-camunda-sdk + 8.6.x ``` ### Configure the Zeebe client -Open your `src/main/resources/application.yaml` file, and paste the following snippet to connect to the Self-Managed Zeebe broker: +Open your `src/main/resources/application.yaml` file, and paste the following snippet to connect to the Self-Managed Zeebe Broker: ```yaml camunda: diff --git a/versioned_docs/version-8.6/guides/getting-started-orchestrate-apis.md b/versioned_docs/version-8.6/guides/getting-started-orchestrate-apis.md index 9d40d76137a..b68f3193a22 100644 --- a/versioned_docs/version-8.6/guides/getting-started-orchestrate-apis.md +++ b/versioned_docs/version-8.6/guides/getting-started-orchestrate-apis.md @@ -1,7 +1,7 @@ --- id: orchestrate-apis title: Get started with API orchestration -sidebar_label: Get started with API orchestration +sidebar_label: APIs description: "Use Connectors to build low code process automation solutions" keywords: [api endpoints, orchestration, getting started, user guide, connectors] diff --git a/versioned_docs/version-8.6/guides/getting-started-orchestrate-human-tasks.md b/versioned_docs/version-8.6/guides/getting-started-orchestrate-human-tasks.md index 7f21235bf64..999775428ce 100644 --- a/versioned_docs/version-8.6/guides/getting-started-orchestrate-human-tasks.md +++ b/versioned_docs/version-8.6/guides/getting-started-orchestrate-human-tasks.md @@ -1,7 +1,7 @@ --- id: orchestrate-human-tasks title: Get started with human task orchestration -sidebar_label: Get started with human task orchestration +sidebar_label: Human tasks description: "Efficiently allocate work through user tasks." keywords: [human tasks, orchestration, getting started, user guide] --- diff --git a/versioned_docs/version-8.6/guides/getting-started-orchestrate-microservices.md b/versioned_docs/version-8.6/guides/getting-started-orchestrate-microservices.md index b62fd44fb08..fd0e3433539 100644 --- a/versioned_docs/version-8.6/guides/getting-started-orchestrate-microservices.md +++ b/versioned_docs/version-8.6/guides/getting-started-orchestrate-microservices.md @@ -1,7 +1,7 @@ --- id: orchestrate-microservices title: Get started with microservice orchestration -sidebar_label: Get started with microservice orchestration +sidebar_label: Microservices description: "Orchestrate microservices for visibility and resilience." keywords: [microservices, orchestration, getting-started] --- @@ -32,7 +32,7 @@ You must have access to a Camunda 8 SaaS account. Additionally, you need the following: -- Java >= 8 +- Java ≥ 8 - Maven - IDE (IntelliJ, VSCode, or similar) - Download and unzip or clone the [repo](https://github.com/camunda/camunda-platform-tutorials), then `cd` into `camunda-platform-tutorials/orchestrate-microservices/worker-java` diff --git a/versioned_docs/version-8.6/guides/improve-processes-with-optimize.md b/versioned_docs/version-8.6/guides/improve-processes-with-optimize.md index 55b073e667f..6ae6fc0a6bc 100644 --- a/versioned_docs/version-8.6/guides/improve-processes-with-optimize.md +++ b/versioned_docs/version-8.6/guides/improve-processes-with-optimize.md @@ -1,7 +1,7 @@ --- id: improve-processes-with-optimize title: Improve processes with Optimize -sidebar_label: Improve processes with Optimize +sidebar_label: Analyze processes with Optimize description: "Leverage data collected during process execution, analyze bottlenecks, and examine areas for improvement." --- diff --git a/versioned_docs/version-8.6/guides/introduction-to-camunda-8.md b/versioned_docs/version-8.6/guides/introduction-to-camunda-8.md index bcf3c0d7b1c..74ef3f25773 100644 --- a/versioned_docs/version-8.6/guides/introduction-to-camunda-8.md +++ b/versioned_docs/version-8.6/guides/introduction-to-camunda-8.md @@ -36,7 +36,7 @@ type:"link", href:"/docs/next/guides/getting-started-java-spring/", label: "Get } ]}/> -With these guides, start working with [Web Modeler](/components/modeler/about-modeler.md) to get familiar with BMPN and model a business process, or as a Java developer, step through using Spring Boot and the Spring Zeebe SDK with Desktop Modeler to interact with a local Self-Managed Camunda 8 installation. +With these guides, start working with [Web Modeler](/components/modeler/about-modeler.md) to get familiar with BPMN and model a business process, or as a Java developer, step through using Spring Boot and the Spring Zeebe SDK with Desktop Modeler to interact with a local Self-Managed Camunda 8 installation. ### Use cases diff --git a/versioned_docs/version-8.6/guides/message-correlation.md b/versioned_docs/version-8.6/guides/message-correlation.md deleted file mode 100644 index 7c0dc0ef853..00000000000 --- a/versioned_docs/version-8.6/guides/message-correlation.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -id: message-correlation -title: Message correlation -description: "Message correlation allows you to target a running workflow with a state update from an external system asynchronously." ---- - -Intermediate -Time estimate: 20 minutes - -## Prerequisites - -- [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js) -- [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) -- [Desktop Modeler](https://camunda.com/download/modeler/) - -## Message correlation - -Message correlation is a powerful feature in Camunda 8. It allows you to target a running workflow with a state update from an external system asynchronously. - -This tutorial uses the [Node.js client](https://github.com/camunda-community-hub/zeebe-client-node-js), but it serves to illustrate message correlation concepts that are applicable to all language clients. - -We will use [Simple Monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor) to inspect the running workflow state. Simple Monitor is a community-supported tool, and is not designed to be used in production. However, it is useful during development. - -## Workflow - -Here is a basic example from [the Camunda 8 documentation](/components/concepts/messages.md): - -![message correlation workflow](img/message-correlation-workflow.png) - -Use [Desktop Modeler](https://camunda.com/download/modeler/) to open the [test-messaging](https://github.com/jwulf/zeebe-message-correlation/blob/master/bpmn/test-messaging.bpmn) file in [this GitHub project](https://github.com/jwulf/zeebe-message-correlation). - -Click on the intermediate message catch event to see how it is configured: - -![message properties](img/message-correlation-message-properties.png) - -A crucial piece here is the **Subscription Correlation Key**. In a running instance of this workflow, an incoming **Money Collected** message will have a `correlationKey` property: - -```typescript - zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid" - }); -``` - -The concrete value of the message `correlationKey` is matched against running workflow instances by comparing the supplied value against the `orderId` variable of running instances subscribed to this message. This is the relationship established by setting the `correlationKey` to `orderId` in the message catch event in the BPMN. - -## Running the demonstration - -To run the demonstration, take the following steps: - -1. Clone this repository. -2. Install dependencies: - :::note - This guide requires `npm` version 6. - ::: - `npm i && npm i -g ts-node typescript` -3. In another terminal, start the Zeebe Broker in addition to [simple-monitor](https://github.com/camunda-community-hub/zeebe-simple-monitor). -4. Deploy the workflow and start an instance: - `ts-node start-workflow.ts` - This starts a workflow instance with the `orderId` set to 345: - -```typescript -await zbc.createProcessInstance("test-messaging", { - orderId: "345", - customerId: "110110", - paymentStatus: "unpaid", -}); -``` - -5. Open Simple Monitor at [http://localhost:8082](http://localhost:8082). -6. Click on the workflow instance. You will see the current state of the workflow: - ![workflow state](img/message-correlation-workflow-state.png) - The numbers above the BPMN symbols indicate that no tokens are waiting at the start event, and one has passed through. One token is waiting at the **Collect Money** task, and none have passed through. -7. Take a look at the **Variables** tab at the bottom of the screen. (If you don't see it, you are probably looking at the workflow, rather than the instance. In that case, drill down into the instance): - ![message correlation variables](img/message-correlation-variables.png) - You can see that this workflow instance has the variable `orderId` set to the value 345. -8. Start the workers: - `ts-node workers.ts` -9. Refresh Simple Monitor to see the current state of the workflow: - ![message correlation wait on message](img/message-correlation-wait-on-message.png) - Now, the token is at the message catch event, waiting for a message to be correlated. -10. Take a look at the **Message Subscriptions** tab: - ![message subscriptions](img/message-correlation-message-subscriptions.png) - You can see the broker has opened a message subscription for this workflow instance with the concrete value of the `orderId` 345. This was created when the token entered the message catch event. -11. Send the message in another terminal: - `ts-node send-message.ts` -12. Refresh Simple Monitor, and note that the message has been correlated and the workflow has run to completion: - -![message correlation completed](img/message-correlation-completed.png) - -The **Message Subscriptions** tab now reports that the message was correlated: - -![message correlation correlated](img/message-correlation-correlated.png) - -## Message buffering - -Messages are buffered on the broker, so your external systems can emit messages before your process arrives at the catch event. The amount of time a message is buffered is configured when publishing the message from the client library. - -For example, to send a message buffered for 10 minutes with the JavaScript client: - -```typescript -zbc.publishMessage({ - correlationKey: "345", - name: "Money Collected", - variables: { - paymentStatus: "paid", - }, - timeToLive: 600000, -}); -``` - -To see it in action, take the following steps: - -1. Keep the workers running. -2. Publish the message: - -```typescript -ts-node send-message.ts -``` - -3. Click on **Messages** at the top of the Simple Monitor page. You will see the message buffered on the broker: - -![message buffered on broker](img/message-correlation-buffered.png) - -4. Start another instance of the workflow: - -```typescript -ts-node start-workflow.ts -``` - -Note that the message is correlated to the workflow instance, even though it arrived before the workflow instance was started. - -## Common mistakes - -A couple of common gotchas: - -- The `correlationKey` in the BPMN message definition is the name of the workflow variable to match against. The `correlationKey` in the message is the concrete value to match against that variable in the workflow instance. - -- The message subscription _is not updated after it is opened_. That is not an issue in the case of a message catch event. However, for boundary message events (both interrupting and non-interrupting,) the subscription is opened _as soon as the token enters the bounding subprocess_. If any service task modifies the `orderId` value inside the subprocess, the subscription is not updated. - -For example, the interrupting boundary message event in the following example will not be correlated on the updated value, because the subscription is opened when the token enters the subprocess, using the value at that time: - -![not correlating](img/message-correlation-not-like-this.png) - -If you need a boundary message event correlated on a value modified somewhere in your process, put the boundary message event in a subprocess after the task that sets the variable. The message subscription for the boundary message event will open when the token enters the subprocess, with the current variable value. - -![correlating](img/message-correlation-like-this.png) - -## Summary - -Message Correlation is a powerful feature in Camunda 8. Knowing how messages are correlated, and how and when the message subscription is created is important to design systems that perform as expected. - -Simple Monitor is a useful tool for inspecting the behavior of a local Camunda 8 system to figure out what is happening during development. diff --git a/versioned_docs/version-8.6/guides/migrating-from-camunda-7/adjusting-bpmn-models.md b/versioned_docs/version-8.6/guides/migrating-from-camunda-7/adjusting-bpmn-models.md index 08c4f1b36e3..c653c147aa7 100644 --- a/versioned_docs/version-8.6/guides/migrating-from-camunda-7/adjusting-bpmn-models.md +++ b/versioned_docs/version-8.6/guides/migrating-from-camunda-7/adjusting-bpmn-models.md @@ -92,7 +92,7 @@ The following is **not** possible: ![User Task](../../components/modeler/bpmn/assets/bpmn-symbols/user-task.svg) -Human task management is also available in Camunda 8, but uses a different Tasklist user interface and API. +[Human task management](/guides/getting-started-orchestrate-human-tasks.md) is also available in Camunda 8, but uses a different Tasklist user interface and API. In Camunda 7, you have [different ways to provide forms for user tasks](https://docs.camunda.org/manual/latest/user-guide/task-forms/): diff --git a/versioned_docs/version-8.6/guides/migrating-from-camunda-7/conceptual-differences.md b/versioned_docs/version-8.6/guides/migrating-from-camunda-7/conceptual-differences.md index fa6375d3f8f..96fa8a44d71 100644 --- a/versioned_docs/version-8.6/guides/migrating-from-camunda-7/conceptual-differences.md +++ b/versioned_docs/version-8.6/guides/migrating-from-camunda-7/conceptual-differences.md @@ -58,7 +58,7 @@ There are several differences between how [multi-tenancy](/self-managed/concepts 2. In Camunda 7, users can deploy shared resources (processes, decisions, and forms) available to all tenants. In Camunda 8, there are no shared resources. This will be added in the future. 3. In Camunda 7, data is mapped to a `null` tenant identifier, meaning by default resources are shared. In Camunda 8, data is mapped to the `` tenant identifier when multi-tenancy is disabled. 4. [Tenant checks in Camunda 7](https://docs.camunda.org/manual/develop/user-guide/process-engine/multi-tenancy/#disable-the-transparent-access-restrictions) can be disabled to perform admin/maintenance operations. This can't be done in Camunda 8, but an admin user can be authorized to all tenants, which would result in the same thing. -5. If a user tries to trigger a command on a resource mapped to multiple tenants in Camunda 7, an exception is thrown, and [the `tenantId` must be explicitly provided](https://docs.camunda.org/manual/develop/user-guide/process-engine/multi-tenancy/#run-commands-for-a-tenant). However, the Camunda 7 engine will try to infer the correct `tenantId` as much as possible. Users in Camunda 7 that are authorized for multiple tenants may perform a lot more operations without providing a `tenantId`. This inference in the Zeebe broker doesn't happen in Camunda 8, and Zeebe asks users to provide the `tenantId` explicitly. +5. If a user tries to trigger a command on a resource mapped to multiple tenants in Camunda 7, an exception is thrown, and [the `tenantId` must be explicitly provided](https://docs.camunda.org/manual/develop/user-guide/process-engine/multi-tenancy/#run-commands-for-a-tenant). However, the Camunda 7 engine will try to infer the correct `tenantId` as much as possible. Users in Camunda 7 that are authorized for multiple tenants may perform a lot more operations without providing a `tenantId`. This inference in the Zeebe Broker doesn't happen in Camunda 8, and Zeebe asks users to provide the `tenantId` explicitly. ## Process solutions using Spring Boot @@ -123,7 +123,7 @@ With Camunda 7 a typical deployment includes: With Camunda 8 you deploy: - Your Spring Boot application with all custom code and the Zeebe client embedded. This application is typically scaled to at least two instances (for resilience) -- The Zeebe broker, typically scaled to at least three instances (for resilience) +- The Zeebe Broker, typically scaled to at least three instances (for resilience) - An elastic database (for Operate, Tasklist, and Optimize) - Optimize, Operate, and Tasklist (each one is a Java application). You can scale those applications to increase availability if you want. diff --git a/versioned_docs/version-8.6/guides/migrating-from-camunda-7/index.md b/versioned_docs/version-8.6/guides/migrating-from-camunda-7/index.md index 000e09dd12f..39764a6da25 100644 --- a/versioned_docs/version-8.6/guides/migrating-from-camunda-7/index.md +++ b/versioned_docs/version-8.6/guides/migrating-from-camunda-7/index.md @@ -53,4 +53,4 @@ As described earlier in this guide, migration is an ongoing topic and this guide - Discuss workload migrations (operations) - Eventual consistency -[Reach out to us](/contact/) to discuss your specific migration use case. +[Reach out to us](/reference/contact.md) to discuss your specific migration use case. diff --git a/versioned_docs/version-8.6/guides/react-components/_install-c8run.md b/versioned_docs/version-8.6/guides/react-components/_install-c8run.md index adba10df495..50fb42c0795 100644 --- a/versioned_docs/version-8.6/guides/react-components/_install-c8run.md +++ b/versioned_docs/version-8.6/guides/react-components/_install-c8run.md @@ -11,14 +11,14 @@ If no version of Java is found, follow your chosen installation's instructions f ### Install and start Camunda 8 Run -1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/c8run-8.6.2) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. +1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/8.6.6) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. 2. Navigate to the new `c8run` directory. 3. Start Camunda 8 Run by running `./start.sh` (or `.\c8run.exe start` on Windows) in your terminal. When successful, a new Operate window automatically opens. :::note -If Camunda 8 Run fails to start, run the [shutdown script](/self-managed/setup/deploy/local/c8run.md/#shut-down-camunda-8-run) to end the current processes, then run the start script again. +If Camunda 8 Run fails to start, run the [shutdown script](/self-managed/setup/deploy/local/c8run.md#shut-down-camunda-8-run) to end the current processes, then run the start script again. ::: For more information and local configuration options, see the [Camunda 8 Run installation guide](/self-managed/setup/deploy/local/c8run.md). diff --git a/versioned_docs/version-8.6/guides/setting-up-development-project.md b/versioned_docs/version-8.6/guides/setting-up-development-project.md index 8f099e9a8d4..d0d2d4b9c53 100644 --- a/versioned_docs/version-8.6/guides/setting-up-development-project.md +++ b/versioned_docs/version-8.6/guides/setting-up-development-project.md @@ -1,6 +1,6 @@ --- id: setting-up-development-project -title: Set up your first development project +title: Set up a development project description: "Set up your first project to model, deploy, and start a process instance." keywords: [get-started, local-install] --- diff --git a/versioned_docs/version-8.6/images/operate/modifications/add-token-result.png b/versioned_docs/version-8.6/images/operate/modifications/add-token-result.png index 1e0b91a61c1..bfeae7ff743 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/add-token-result.png and b/versioned_docs/version-8.6/images/operate/modifications/add-token-result.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/add-token.png b/versioned_docs/version-8.6/images/operate/modifications/add-token.png index 0360f69efb2..0521bc21194 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/add-token.png and b/versioned_docs/version-8.6/images/operate/modifications/add-token.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/add-variable-result.png b/versioned_docs/version-8.6/images/operate/modifications/add-variable-result.png index fd78323d1bd..6e1cadc84d2 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/add-variable-result.png and b/versioned_docs/version-8.6/images/operate/modifications/add-variable-result.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/add-variable-to-new-scope.png b/versioned_docs/version-8.6/images/operate/modifications/add-variable-to-new-scope.png index 5418b9a8ec6..52381c7272f 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/add-variable-to-new-scope.png and b/versioned_docs/version-8.6/images/operate/modifications/add-variable-to-new-scope.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/applied-modifications.png b/versioned_docs/version-8.6/images/operate/modifications/applied-modifications.png index 197d640cad4..4e06133cc7c 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/applied-modifications.png and b/versioned_docs/version-8.6/images/operate/modifications/applied-modifications.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/apply-modifications-button.png b/versioned_docs/version-8.6/images/operate/modifications/apply-modifications-button.png index d126abd0a96..a3cd4141566 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/apply-modifications-button.png and b/versioned_docs/version-8.6/images/operate/modifications/apply-modifications-button.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/cancel-token-result.png b/versioned_docs/version-8.6/images/operate/modifications/cancel-token-result.png index c013cb90496..955fac999ef 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/cancel-token-result.png and b/versioned_docs/version-8.6/images/operate/modifications/cancel-token-result.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/cancel-token.png b/versioned_docs/version-8.6/images/operate/modifications/cancel-token.png index 75bea13b206..945e21b6051 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/cancel-token.png and b/versioned_docs/version-8.6/images/operate/modifications/cancel-token.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/edit-variable-on-existing-scope.png b/versioned_docs/version-8.6/images/operate/modifications/edit-variable-on-existing-scope.png index 4e45f37115b..904e47f4f59 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/edit-variable-on-existing-scope.png and b/versioned_docs/version-8.6/images/operate/modifications/edit-variable-on-existing-scope.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/edit-variable-result.png b/versioned_docs/version-8.6/images/operate/modifications/edit-variable-result.png index 8d9ba4dc484..dde7a69b65d 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/edit-variable-result.png and b/versioned_docs/version-8.6/images/operate/modifications/edit-variable-result.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/edit-variable-value.png b/versioned_docs/version-8.6/images/operate/modifications/edit-variable-value.png index 934a49555a9..885a0ed7a04 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/edit-variable-value.png and b/versioned_docs/version-8.6/images/operate/modifications/edit-variable-value.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/enter-modification-mode.png b/versioned_docs/version-8.6/images/operate/modifications/enter-modification-mode.png index 0ed6c1ebd8c..5980c67990f 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/enter-modification-mode.png and b/versioned_docs/version-8.6/images/operate/modifications/enter-modification-mode.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/modification-mode.png b/versioned_docs/version-8.6/images/operate/modifications/modification-mode.png index e11a1068930..91957edddba 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/modification-mode.png and b/versioned_docs/version-8.6/images/operate/modifications/modification-mode.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/modification-summary-modal.png b/versioned_docs/version-8.6/images/operate/modifications/modification-summary-modal.png index 02f11aff5d6..6d45bb76287 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/modification-summary-modal.png and b/versioned_docs/version-8.6/images/operate/modifications/modification-summary-modal.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/move-token-result.png b/versioned_docs/version-8.6/images/operate/modifications/move-token-result.png index 3c6ec632f8a..a1bb3c32d94 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/move-token-result.png and b/versioned_docs/version-8.6/images/operate/modifications/move-token-result.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/move-token-select-target.png b/versioned_docs/version-8.6/images/operate/modifications/move-token-select-target.png index 951a9800de1..6c29db4594f 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/move-token-select-target.png and b/versioned_docs/version-8.6/images/operate/modifications/move-token-select-target.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/move-token.png b/versioned_docs/version-8.6/images/operate/modifications/move-token.png index 87827dd98c6..a13883038c8 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/move-token.png and b/versioned_docs/version-8.6/images/operate/modifications/move-token.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/not-supported-flow-nodes.png b/versioned_docs/version-8.6/images/operate/modifications/not-supported-flow-nodes.png index bb2a5f59e8a..d5901a73ee6 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/not-supported-flow-nodes.png and b/versioned_docs/version-8.6/images/operate/modifications/not-supported-flow-nodes.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/select-new-scope.png b/versioned_docs/version-8.6/images/operate/modifications/select-new-scope.png index ed9f149dd06..484a2225189 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/select-new-scope.png and b/versioned_docs/version-8.6/images/operate/modifications/select-new-scope.png differ diff --git a/versioned_docs/version-8.6/images/operate/modifications/undo-modification.png b/versioned_docs/version-8.6/images/operate/modifications/undo-modification.png index 8d9ba4dc484..dde7a69b65d 100644 Binary files a/versioned_docs/version-8.6/images/operate/modifications/undo-modification.png and b/versioned_docs/version-8.6/images/operate/modifications/undo-modification.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-dashboard-no-processes.png b/versioned_docs/version-8.6/images/operate/operate-dashboard-no-processes.png index d00ff42eb40..fbcdb3efc52 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-dashboard-no-processes.png and b/versioned_docs/version-8.6/images/operate/operate-dashboard-no-processes.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-incident-resolved-path.png b/versioned_docs/version-8.6/images/operate/operate-incident-resolved-path.png index 9aeb29afa54..a30cb3138ff 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-incident-resolved-path.png and b/versioned_docs/version-8.6/images/operate/operate-incident-resolved-path.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-incident-resolved.png b/versioned_docs/version-8.6/images/operate/operate-incident-resolved.png index 859243dd709..bd1c6f3dc1d 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-incident-resolved.png and b/versioned_docs/version-8.6/images/operate/operate-incident-resolved.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-introduction.png b/versioned_docs/version-8.6/images/operate/operate-introduction.png index 6935f5092d9..ab58ba88927 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-introduction.png and b/versioned_docs/version-8.6/images/operate/operate-introduction.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-process-retry-incident.png b/versioned_docs/version-8.6/images/operate/operate-process-retry-incident.png index 40946bbde83..e222a68906f 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-process-retry-incident.png and b/versioned_docs/version-8.6/images/operate/operate-process-retry-incident.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-process-view-incident.png b/versioned_docs/version-8.6/images/operate/operate-process-view-incident.png index 110cc2596e4..1689e3c8096 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-process-view-incident.png and b/versioned_docs/version-8.6/images/operate/operate-process-view-incident.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-view-instance-edit-icon.png b/versioned_docs/version-8.6/images/operate/operate-view-instance-edit-icon.png index 3974cfba959..87a4d30cb79 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-view-instance-edit-icon.png and b/versioned_docs/version-8.6/images/operate/operate-view-instance-edit-icon.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-view-instance-incident.png b/versioned_docs/version-8.6/images/operate/operate-view-instance-incident.png index 9ece37d7ec2..6c96d4e5fe7 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-view-instance-incident.png and b/versioned_docs/version-8.6/images/operate/operate-view-instance-incident.png differ diff --git a/versioned_docs/version-8.6/images/operate/operate-view-instance-save-variable-icon.png b/versioned_docs/version-8.6/images/operate/operate-view-instance-save-variable-icon.png index 9af83353843..9e0bc3d659d 100644 Binary files a/versioned_docs/version-8.6/images/operate/operate-view-instance-save-variable-icon.png and b/versioned_docs/version-8.6/images/operate/operate-view-instance-save-variable-icon.png differ diff --git a/versioned_docs/version-8.6/reference/alpha-features.md b/versioned_docs/version-8.6/reference/alpha-features.md index 8a65e9ee0fa..c8229dff48c 100644 --- a/versioned_docs/version-8.6/reference/alpha-features.md +++ b/versioned_docs/version-8.6/reference/alpha-features.md @@ -22,7 +22,7 @@ Limitations of alpha features and components include: - Not necessarily feature-complete. - Might lack full documentation. - No guaranteed updates to newer releases. -- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://docs.camunda.org/enterprise/support/). +- Support based on SLAs agreed with you, but bugs are treated with the same priority as feature or help requests. See [Camunda Enterprise Support Guide](https://camunda.com/services/enterprise-support-guide/). - No maintenance service. - (SaaS) No availability targets. - Released outside the standard [release policy](release-policy.md). @@ -32,7 +32,7 @@ To learn more about using alpha features, see [enabling alpha features](/compone :::note - Alpha features can also be included in a minor version (stable) release. -- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/contact). +- Although there is no maintenance service, customers can still provide feedback through designated support channels, depending on their SLAs. These channels include filing issues in the respective [GitHub repositories](https://github.com/camunda) and submitting questions and suggestions by [contacting us](/reference/contact.md). ::: @@ -43,7 +43,7 @@ Once features and components are released and considered stable, they become gen Stable features and components are: - Ready for production use for most users with minimal risk. -- Supported by [L1 Priority-level support](https://docs.camunda.org/enterprise/support/#priority-level) for production use. +- Supported by [L1 Priority-level support](https://camunda.com/services/enterprise-support-guide/) for production use. - Fully documented. A release or component is considered stable if it has passed all verification and test stages and can be released to production. diff --git a/versioned_docs/version-8.6/reference/contact.md b/versioned_docs/version-8.6/reference/contact.md new file mode 100644 index 00000000000..4e6c7fc3892 --- /dev/null +++ b/versioned_docs/version-8.6/reference/contact.md @@ -0,0 +1,45 @@ +--- +id: contact +title: Contact +description: Contact Camunda, submit feedback, find support using the Camunda community forum, note bug reports and feature requests, and review security notices. +keywords: + [ + support, + contact-us, + get-support, + help, + need-help, + bug, + bug-report, + feature-request, + issue, + enterprise-support, + ] +--- + +There are a few different channels you can reach us based on your needs: + +- We encourage everyone to participate in our **community** via the [Camunda community forum](https://forum.camunda.io/), where you can exchange ideas with other Camunda users, as well as Camunda employees. For all other Camunda community programs and resources, visit our [Camunda Developer Hub](https://camunda.com/developers). + +- We welcome your **bug** reports and **feature requests** through our community channels mentioned above. + +- For **security-related issues**, review our [security notices](/reference/notices.md) for the most up-to-date information on known issues and steps to report a vulnerability so we can solve the problem as quickly as possible. Do not use GitHub for security-related issues. + +- **Feedback and support** can be submitted or requested via JIRA by following our [Enterprise support process](https://camunda.com/services/enterprise-support-guide/). All users can also find feedback and support options in the Help Center or [Camunda community forum](https://forum.camunda.io/). + +- For sales inquiries, information about Camunda 8 performance and benchmarking, or anything not listed above, use our [Contact Us](https://camunda.com/contact/) form. + +## Locating Camunda 8 credentials + +Need assistance locating your Camunda 8 credentials? You can obtain these credentials from Camunda by submitting a **Help Request**. To do this, take the following steps: + +1. Log in to [Jira](https://jira.camunda.com/secure/Dashboard.jspa). +2. Click **Create** in the navigation bar at the top of the page. This launches a **Create Issue** pop-up. +3. In the **Issue Type** field, select **Help Request**. +4. In the **Help Request Type** field, click the option that reads **I need the credentials for downloading Camunda**. +5. In the **Summary** and **Description** fields, **I need the credentials for downloading Camunda** will populate by default. + ![completed help request example](./img/create-issue-request.png) +6. (Optional) Add more details, such as the priority level or authorized support contacts. +7. Click **Create** at the bottom of the pop-up **Create Issue** box. + +After completing these steps, your request is generated. Find additional details on submitting a self-service help request [here](https://camunda.com/services/enterprise-support-guide/). diff --git a/versioned_docs/version-8.6/reference/glossary.md b/versioned_docs/version-8.6/reference/glossary.md index b659d040f8b..486cd1eb278 100644 --- a/versioned_docs/version-8.6/reference/glossary.md +++ b/versioned_docs/version-8.6/reference/glossary.md @@ -10,13 +10,13 @@ Synonym to "[Connector](#connector)". ### Broker -A broker is an instance of a Zeebe installation which executes processes and manages process state. A single broker is installed on a single machine. +The [Zeebe Broker](#zeebe-broker) is the distributed workflow engine that tracks the state of active process instances. However, a Zeebe deployment often consists of more than one broker. Brokers can be partitioned for horizontal scalability and replicated for fault tolerance. -- [Architecture](/components/zeebe/technical-concepts/architecture.md#brokers) +- [Architecture](/components/zeebe/technical-concepts/architecture.md) ### Client -A client interacts with the Zeebe broker on behalf of the business application. Clients poll for work from the broker. +A client interacts with the Zeebe Broker on behalf of the business application. Clients poll for work from the broker. - [Architecture](/components/zeebe/technical-concepts/architecture.md#clients) @@ -82,11 +82,11 @@ In a clustered environment, a broker which is not a leader is a follower of a gi - [Clustering](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol) -### Gateway +### Human task -Clients communicate with the Zeebe cluster through a gateway. The gateway provides a REST and gRPC API and forwards client commands to the cluster. Depending on the setup, a gateway can be embedded in the broker or can be configured to be standalone. +Camunda 8 allows you to orchestrate processes with human tasks, which may be [user tasks](#user-task) or [manual tasks](#manual-task). -- [Architecture](/components/zeebe/technical-concepts/architecture.md#gateways) +- [Human task orchestration](/guides/getting-started-orchestrate-human-tasks.md) ### Hybrid mode @@ -149,6 +149,14 @@ The log is comprised of an ordered sequence of records written to persistent sto - [Partitions](/components/zeebe/technical-concepts/partitions.md#partition-data-layout) +### Manual task + +A manual task defines a task that requires human interaction but no external tooling or UI interface. For example, a user reviewing a document or completing a physical task. + +Manual tasks are part of [human task orchestration](/guides/getting-started-orchestrate-human-tasks.md), but differ from [user tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) which define an actionable task assisted by a business process execution engine or software application. + +- [Manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md) + ### Message A message contains information to be delivered to interested parties during execution of a process instance. Messages can be published via Kafka or Zeebe’s internal messaging system. Messages are associated with timestamp and other constraints such as time-to-live (TTL). @@ -161,7 +169,7 @@ Outbound [Connectors](#connector) in Camunda 8 allow workflows to trigger with e ### Partition -A partition represents a logical grouping of data in a Zeebe broker. This data includes process instance variables stored in RocksDB, commands, and events generated by Zeebe stored in the log. The number of partitions is defined by configuration. +A partition represents a logical grouping of data in a Zeebe Broker. This data includes process instance variables stored in RocksDB, commands, and events generated by Zeebe stored in the log. The number of partitions is defined by configuration. - [Partitions](/components/zeebe/technical-concepts/partitions.md) @@ -250,6 +258,12 @@ An [inbound Connector](#inbound-connector) that subscribes to a message queue. This way, a Camunda workflow can receive messages from an external system or service (like Kafka or RabbitMQ) using message queuing technology. This type of inbound Connector is commonly used in distributed systems where different components of the system need to communicate with each other asynchronously. +### User task + +A user task is used to model work that needs to be done by a human and is assisted by a business process execution engine or software application. This differs from [manual tasks](/components/modeler/bpmn/manual-tasks/manual-tasks.md), which are not assisted by external tooling. + +- [User tasks](/components/modeler/bpmn/user-tasks/user-tasks.md) + ### Webhook Connector Webhooks are a subtype of [inbound Connector](#inbound-connector). @@ -273,3 +287,13 @@ See [process instance](#process-instance). ### Workflow instance variable See [process instance variable](#process-instance-variable). + +## Zeebe Broker + +The [Zeebe Broker](/components/zeebe/technical-concepts/architecture.md#brokers) is the distributed workflow engine that tracks the state of active process instances. The Zeebe Broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. + +### Zeebe Gateway + +The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. + +- [Zeebe Gateway overview](/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md) diff --git a/versioned_docs/version-8.6/reference/img/create-issue-request.png b/versioned_docs/version-8.6/reference/img/create-issue-request.png new file mode 100644 index 00000000000..374fdfece6f Binary files /dev/null and b/versioned_docs/version-8.6/reference/img/create-issue-request.png differ diff --git a/versioned_docs/version-8.6/reference/notices.md b/versioned_docs/version-8.6/reference/notices.md index 1773709bb43..60b0a1322ba 100644 --- a/versioned_docs/version-8.6/reference/notices.md +++ b/versioned_docs/version-8.6/reference/notices.md @@ -74,11 +74,11 @@ Tasklist The REST API functionality of Tasklist 8.2.0 and 8.2.1 allows unauthenticated access to the following methods/URLs: -- GET /v1/tasks/{taskId} +- GET /v1/tasks/\{taskId} - POST /v1/tasks/search -- POST /v1/tasks/{taskId}/variables/search -- POST /v1/forms/{formId} -- POST /v1/variables/{variableId} +- POST /v1/tasks/\{taskId}/variables/search +- POST /v1/forms/\{formId} +- POST /v1/variables/\{variableId} Find more information about the methods in our [Tasklist REST API documentation](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md). @@ -115,7 +115,7 @@ At this point, Camunda is not aware of any specific attack vector in Tasklist al #### How to determine if the installation is affected -You are Tasklist version (8.0.3 >= version <= 8.0.7) or <= 8.1.2 +You are Tasklist version (8.0.3 ≥ version ≤ 8.0.7) or ≤ 8.1.2 #### Solution @@ -142,7 +142,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.11 or <= 1.3.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.11 or ≤ 1.3.6 #### Solution @@ -168,7 +168,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.8 or <= 1.1.9 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.8 or ≤ 1.1.9 #### Solution @@ -194,7 +194,7 @@ Specifically, IAM bundles log4j-api and log4j-to-slf4j. However, IAM does not bu #### How to determine if the installation is affected -You are using IAM version <= 1.2.8 +You are using IAM version ≤ 1.2.8 #### Solution @@ -219,7 +219,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.7 or <= 1.1.8 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.7 or ≤ 1.1.8 #### Solution @@ -248,7 +248,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.7 +You are using IAM version ≤ 1.2.7 #### Solution @@ -273,7 +273,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.6 or <= 1.1.7 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.6 or ≤ 1.1.7 #### Solution @@ -302,7 +302,7 @@ At this point, Camunda is not aware of any specific attack vector in IAM allowin #### How to determine if the installation is affected -You are using IAM version <= 1.2.6 +You are using IAM version ≤ 1.2.6 #### Solution @@ -327,7 +327,7 @@ At this point, Camunda is not aware of any specific attack vector in Zeebe, Oper #### How to determine if the installation is affected -You are using Zeebe, Operate or Tasklist version <= 1.2.5 or <= 1.1.6 +You are using Zeebe, Operate or Tasklist version ≤ 1.2.5 or ≤ 1.1.6 #### Solution @@ -357,7 +357,7 @@ Still, Camunda recommends applying fixes as mentioned in the Solution section be #### How to determine if the installation is affected -You are using IAM version <= 1.2.5 +You are using IAM version ≤ 1.2.5 #### Solution diff --git a/versioned_docs/version-8.6/reference/regions.md b/versioned_docs/version-8.6/reference/regions.md index 2ea6139f85b..41ed59eaf2c 100644 --- a/versioned_docs/version-8.6/reference/regions.md +++ b/versioned_docs/version-8.6/reference/regions.md @@ -9,7 +9,7 @@ When you create a cluster in Camunda 8 SaaS, you must specify a region for that Currently, we make these regions available for customers on the Trial, Starter, and Enterprise Plans. Enterprise customers can discuss custom regions with their Customer Success Manager. :::note -Our Console and Web Modeler components are currently hosted in the EU. [Contact us](https://camunda.com/contact/) if you have additional questions. +Our Console and Web Modeler components are currently hosted in the EU. [Contact us](/reference/contact.md) if you have additional questions. ::: Below, find a list of regions currently supported in Camunda 8 SaaS. @@ -21,6 +21,7 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. | Belgium, Europe (europe-west1) | Germany, Europe (europe-west3) | | Iowa, North America (us-central1) | Salt Lake City, North America (us-west1) | | London, Europe (europe-west2) | _Not available_ | +| Singapore, Asia (asia-southeast1) | Changhua County, Taiwan (asia-east1) | | South Carolina, North America (us-east1) | Iowa, North America (us-central1) | | Sydney, Australia (australia-southeast1) | Melbourne, Australia (australia-southeast2) | | Toronto, North America (northamerica-northeast2) | Montréal, North America (northamerica-northeast1) | @@ -28,5 +29,5 @@ Below, find a list of regions currently supported in Camunda 8 SaaS. You can find the locations behind the region codes [on the Google page](https://cloud.google.com/about/locations). :::note -Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](https://camunda.com/contact/) as we are able to make additional regions available on request. +Running on a Trial or Starter plan and want to try a different region, or interested in other regions or cloud providers? [Contact us](/reference/contact.md) as we are able to make additional regions available on request. ::: diff --git a/versioned_docs/version-8.6/reference/release-notes/850.md b/versioned_docs/version-8.6/reference/release-notes/850.md index 43f3806758c..d514f55adca 100644 --- a/versioned_docs/version-8.6/reference/release-notes/850.md +++ b/versioned_docs/version-8.6/reference/release-notes/850.md @@ -108,7 +108,7 @@ The first iteration of this feature brings back existing features from Camunda 7 -The first step to offer an intuitive and consistent experience via a single, [unified Camunda 8 REST API](https://camunda.com/blog/2024/03/streamlining-camunda-apis-zeebe-rest-api/) is to provide the Zeebe REST API. With this release, developers can use the Zeebe REST API to manage [Zeebe user tasks](/apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks.md), enabling immediate task state changes. The Zeebe REST API includes support for Identity authentication and multi-tenancy, ensuring parity to the Zeebe gRPC API. +The first step to offer an intuitive and consistent experience via a single, [unified Camunda 8 REST API](https://camunda.com/blog/2024/03/streamlining-camunda-apis-zeebe-rest-api/) is to provide the Zeebe REST API. With this release, developers can use the Zeebe REST API to manage [Zeebe user tasks](/apis-tools/migration-manuals/migrate-to-zeebe-user-tasks.md), enabling immediate task state changes. The Zeebe REST API includes support for Identity authentication and multi-tenancy, ensuring parity to the Zeebe gRPC API. ### Refactoring suggestions Modeler diff --git a/versioned_docs/version-8.6/reference/release-notes/860.md b/versioned_docs/version-8.6/reference/release-notes/860.md index cad16c2db31..6a501006d0a 100644 --- a/versioned_docs/version-8.6/reference/release-notes/860.md +++ b/versioned_docs/version-8.6/reference/release-notes/860.md @@ -11,13 +11,13 @@ keywords: ] --- -These release notes identify the new features included in 8.6, including [alpha feature releases](/docs/reference/alpha-features.md). +These release notes identify the new features included in 8.6, including [alpha feature releases](/reference/alpha-features.md). ## 8.6 minor -| Release date | End of maintenance | Changelog(s) | Release blog | Update guide | -| -------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | -| 8 October 2024 | 14 April 2026 | | [Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) | [Self-Managed update guide](self-managed/operational-guides/update-guide/850-to-860.md) | +| Release date | End of maintenance | Changelog(s) | Release blog | Update guide | +| -------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| 8 October 2024 | 14 April 2026 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0) | [Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) | [Self-Managed update guide](self-managed/operational-guides/update-guide/850-to-860.md) | ### Advanced SaaS offering SaaS Console @@ -66,6 +66,10 @@ Business Knowledge Models (BKM) can now be implemented in a decision model. - Users can extract and reuse expressions in their DMN diagrams. - When writing an expression in a decision, the BKM name autocompletes together with the required parameters. +:::note +Viewing a BKM in Operate is not supported yet. +::: + ### Deprecate zbctl and GO client Zeebe @@ -154,7 +158,7 @@ Auto-mapping simplifies the process of migrating complex and lengthy process def | Release date | Changelog(s) | | | ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| 10 September 2024 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0-alpha5)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0-alpha5) | [Release blog](https://camunda.com/blog/2024/08/camunda-alpha-release-september-2024/) | +| 10 September 2024 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0-alpha5)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0-alpha5) | [Release blog](https://camunda.com/blog/2024/09/camunda-alpha-release-september-2024/) | ### Auto-pause SaaS Console @@ -211,7 +215,7 @@ When creating a process for a local (non-English) region, you can design forms i -Real-time feedback is added for message correlation for messages with `ttl=o`, enabling external systems to immediately determine the success or failure of message correlation. This enhancement allows external systems to take prompt and appropriate actions based on the correlation result, improving overall efficiency and reducing response times. +The Camunda 8 REST API provides an [endpoint for synchronous message correlation](/apis-tools/camunda-api-rest/specifications/correlate-a-message.api.mdx), enabling external systems to immediately determine the success or failure of message correlation. This enhancement allows external systems to take prompt and appropriate actions based on the correlation result, improving overall efficiency and reducing response times. ### Public Marketplace blueprint support for HTO & DMN Web Modeler Marketplace @@ -320,7 +324,7 @@ You can now synchronize process applications with GitHub using a native integrat - After an admin approves and configures the basic integration, you can select a path to synchronize with in a GitHub repository. - You can pull changes from GitHub to integrate contributions from Desktop Modeler users, make changes, and begin the process to make a pull request so every change is properly reviewed and approved. -Do you use another tool such as GitLab or Bitbucket? [Contact us](/contact/) to make your request. Until then, you can use our Connectors system and the CI/CD blueprint on the Marketplace. +Do you use another tool such as GitLab or Bitbucket? [Contact us](/reference/contact.md) to make your request. Until then, you can use our Connectors system and the CI/CD blueprint on the Marketplace. ### Persist data across sessions @@ -389,7 +393,7 @@ New platform users interested in orchestrating API endpoints now have a high-lev "Cloud" has been removed from the URLs in SaaS versions of Modeler and Console for conciseness. -### Incident Copilot Alpha Play +### Incident copilot alpha Play @@ -423,7 +427,7 @@ Enhance BPMN workflow reliability with selective message acknowledgement, enabli Time-to-live (TTL) is now configurable for inbound Connectors via a property in all inbound intermediate element templates called `Message TTL`. The new default value for TTL is 0. Read more about [message buffering](/components/concepts/messages.md#message-buffering) and [message correlation](/components/concepts/messages.md#message-correlation-overview). -### Incident Copilot Alpha Play +### Incident copilot alpha Play diff --git a/versioned_docs/version-8.6/reference/release-notes/release-notes.md b/versioned_docs/version-8.6/reference/release-notes/release-notes.md index a256125a9b3..c7ee7d647ec 100644 --- a/versioned_docs/version-8.6/reference/release-notes/release-notes.md +++ b/versioned_docs/version-8.6/reference/release-notes/release-notes.md @@ -7,8 +7,8 @@ description: "Release notes for Camunda 8 and its components." Camunda 8 release notes include notable new and improved features, enhancements, and bug fixes. Release notes are separated by minor release pages and include alphas released during the development cycle. | Version | Release date | Scheduled end of maintenance | Changelogs | Release blog | -| -------------------------------------- | --------------- | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | -| [8.6](/reference/release-notes/860.md) | 8 October 2024 | 14 April 2026 | | TBD | +| -------------------------------------- | --------------- | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | --- | +| [8.6](/reference/release-notes/860.md) | 8 October 2024 | 14 April 2026 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.6.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.6.0) | [Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) | | | [8.5](/reference/release-notes/850.md) | 9 April 2024 | 14 October 2025 | - [ Camunda 8 core ](https://github.com/camunda/camunda/releases/tag/8.5.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.5.0) | [Release blog](https://camunda.com/blog/2024/04/camunda-8-5-release/) | | 8.4 | 9 January 2024 | 9 July 2025 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.4.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.4.0) | [Release blog](https://camunda.com/blog/2024/01/camunda-8-4-simplifying-installation-enhancing-user-experience/) | | 8.3 | 10 October 2023 | 9 April 2025 | - [ Camunda Platform ](https://github.com/camunda/camunda-platform/releases/tag/8.3.0)
    - [ Connectors ](https://github.com/camunda/connectors/releases/tag/8.3.0) | [Release blog](https://camunda.com/blog/2023/10/camunda-8-3-scaling-automation-maximize-value/) | diff --git a/versioned_docs/version-8.6/reference/status.md b/versioned_docs/version-8.6/reference/status.md index c8de779c5d2..ecb840a23ab 100644 --- a/versioned_docs/version-8.6/reference/status.md +++ b/versioned_docs/version-8.6/reference/status.md @@ -21,4 +21,4 @@ To receive service status updates: ## Support -Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/contact). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). +Support can be requested by subscription or enterprise customers via [JIRA](https://jira.camunda.com/projects/SUPPORT/). Otherwise, [contact us](/reference/contact.md). For more information about Enterprise support and additional support resources, see [Enterprise Support](https://camunda.com/services/support/). diff --git a/versioned_docs/version-8.6/reference/supported-environments.md b/versioned_docs/version-8.6/reference/supported-environments.md index bebc2dc47bf..ab86efcc6ed 100644 --- a/versioned_docs/version-8.6/reference/supported-environments.md +++ b/versioned_docs/version-8.6/reference/supported-environments.md @@ -8,7 +8,7 @@ The supported environments page lists browsers, operating systems, clients, depl **If the particular technology is not listed, we cannot resolve issues caused by the usage of that unlisted technology.** -You may [raise a feature request](/contact) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/contact) to work with Consulting services. +You may [raise a feature request](/reference/contact.md) that will be evaluated by our product teams to provide official support from Camunda, or you can make a [help request](/reference/contact.md) to work with Consulting services. Recommendations are denoted with [recommended], however, other listed options are supported as well. @@ -30,15 +30,13 @@ For example, 1.2+ means support for the minor version 2, and any higher minors ( ## Desktop Modeler - Windows 10 / 11 -- Mac OS 12 / 13 / 14 +- Mac OS 12 / 13 / 14 / 15 - Ubuntu LTS (latest) ## Clients - **Zeebe Java Client**: OpenJDK 8+ - **Zeebe Spring SDK**: OpenJDK 17+ -- **Zeebe Go Client**: Go 1.13+ -- **zbctl**: Windows, macOS, and Linux (latest) - **Connector SDK**: OpenJDK 17+ - **Spring SDK**: Spring Boot 3.3.x (for the exact version, check the [version matrix](/apis-tools/spring-zeebe-sdk/getting-started.md#version-compatibility).) - **Helm CLI**: 3.14.x (for the exact version, check the [version matrix](https://helm.camunda.io/camunda-platform/version-matrix/).) @@ -92,10 +90,12 @@ Requirements for the components can be seen below: | Operate | OpenJDK 21+ | Elasticsearch 8.13+
    Amazon OpenSearch 2.9+ | | Tasklist | OpenJDK 21+ | Elasticsearch 8.13+
    Amazon OpenSearch 2.9+ | | Identity | OpenJDK 17+ | Keycloak 24.x, 25.x
    PostgreSQL 14.x, 15.x or Amazon Aurora PostgreSQL 13.x, 14.x, 15.x (required for [certain features](/self-managed/identity/deployment/configuration-variables.md#database-configuration)) | -| Optimize | OpenJDK 21+ | Elasticsearch 8.13+
    Amazon OpenSearch 2.9+ | +| Optimize | OpenJDK 21+ | Elasticsearch 8.13+
    Amazon OpenSearch 2.9+\* | | Connectors | OpenJDK 21+ | | | Web Modeler | - | PostgreSQL 13.x, 14.x, 15.x, 16.x or Amazon Aurora PostgreSQL 13.x, 14.x, 15.x, 16.x | +\*Not all Optimize features are supported when using OpenSearch as a database. For a full list of the features that are currently supported, please refer to the [Camunda 8](https://github.com/camunda/issues/issues/635) OpenSearch features. + When running Elasticsearch, you must have the [appropriate Elasticsearch privileges](/self-managed/concepts/elasticsearch-privileges.md). When running Amazon OpenSearch 2.11 or higher, we do not support [OR1 instances](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/or1.html) @@ -112,6 +112,7 @@ This matrix shows which component versions work together: | Design | Automate | | Improve | | --------------------- | ----------- | -------------------------------------------------------------------------- | --------------- | +| Desktop Modeler 5.28+ | Zeebe 8.6.x | Operate 8.6.x Tasklist 8.6.x Identity 8.6.x Connectors 8.6.x Console 8.6.x | Optimize 8.6.x | | Desktop Modeler 5.22+ | Zeebe 8.5.x | Operate 8.5.x Tasklist 8.5.x Identity 8.5.x Connectors 8.5.x Console 8.5.x | Optimize 8.5.x | | Desktop Modeler 5.19+ | Zeebe 8.4.x | Operate 8.4.x Tasklist 8.4.x Identity 8.4.x Connectors 8.4.x | Optimize 8.4.x | | Desktop Modeler 5.16+ | Zeebe 8.3.x | Operate 8.3.x Tasklist 8.3.x Identity 8.3.x Connectors 8.3.x | Optimize 8.3.x | diff --git a/versioned_docs/version-8.6/self-managed/about-self-managed.md b/versioned_docs/version-8.6/self-managed/about-self-managed.md index 1ee25e43bf4..a1d1f7eae1e 100644 --- a/versioned_docs/version-8.6/self-managed/about-self-managed.md +++ b/versioned_docs/version-8.6/self-managed/about-self-managed.md @@ -38,4 +38,4 @@ In this configuration, Camunda 8 Self-Managed can be accessed as follows: - Identity, Operate, Optimize, Tasklist, Modeler: `https://camunda.example.com/[identity|operate|optimize|tasklist|modeler]` - Web Modeler also exposes a WebSocket endpoint on `https://camunda.example.com/modeler-ws`. This is only used by the application itself and should not be accessed by users directly. - Keycloak authentication: `https://camunda.example.com/auth` -- Zeebe gateway: `grpc://zeebe.camunda.example.com` +- Zeebe Gateway: `grpc://zeebe.camunda.example.com` diff --git a/versioned_docs/version-8.6/self-managed/concepts/exporters.md b/versioned_docs/version-8.6/self-managed/concepts/exporters.md index a6dc88b2ae7..be9af05b892 100644 --- a/versioned_docs/version-8.6/self-managed/concepts/exporters.md +++ b/versioned_docs/version-8.6/self-managed/concepts/exporters.md @@ -66,7 +66,24 @@ heavy work during instantiation/configuration. ### Metrics -The exporter is provided with a Micrometer [MeterRegistry](https://docs.micrometer.io/micrometer/reference/concepts/registry.html) in the `Exporter#configure(Context)` method through the configuration. Any metrics to be exported should interact with the registry. +The exporter is provided with a Micrometer [MeterRegistry](https://docs.micrometer.io/micrometer/reference/concepts/registry.html) in the `Exporter#configure(Context)` method through the configuration. Any metrics to be exported should interact with the registry, for example: + +```java +public class SomeExporter implements Exporter { + @Override + public void configure(final Context context) { + // ... + registry = context.getMeterRegistry(); + // ... + } + + public void flush() { + try (final var ignored = Timer.resource(registry, "meter.name")) { + exportBulk(); + } + } +} +``` When an exporter is validated, it is only provided with an in-memory register which is then discarded. diff --git a/versioned_docs/version-8.6/self-managed/concepts/multi-region/dual-region.md b/versioned_docs/version-8.6/self-managed/concepts/multi-region/dual-region.md index 7d44cadefb3..3e85a365a9b 100644 --- a/versioned_docs/version-8.6/self-managed/concepts/multi-region/dual-region.md +++ b/versioned_docs/version-8.6/self-managed/concepts/multi-region/dual-region.md @@ -98,7 +98,7 @@ Amazon OpenSearch is **not supported** in dual-region configurations. - Required open ports between the two regions: - **9200** for Elasticsearch (for cross-region data pushed by Zeebe). - **26500** for communication to the Zeebe Gateway from clients/workers. - - **26501** and **26502** for communication between Zeebe brokers and Zeebe Gateway. + - **26501** and **26502** for communication between Zeebe brokers and the Zeebe Gateway. ### Zeebe cluster configuration @@ -149,9 +149,9 @@ This means the Zeebe stretch cluster will not have a quorum when half of its bro The [operational procedure](./../../operational-guides/multi-region/dual-region-ops.md) looks in detail at a recovery from a region loss and how to long-term fully re-establish the lost region. -::caution +:::caution Customers are expected to proactively monitor for regional failures and take ownership of executing the necessary [operational procedures](./../../operational-guides/multi-region/dual-region-ops.md) to ensure smooth recovery and failover. -:: +::: ### Active region loss diff --git a/versioned_docs/version-8.6/self-managed/connectors-deployment/connectors-configuration.md b/versioned_docs/version-8.6/self-managed/connectors-deployment/connectors-configuration.md index ddc999ba271..672fca6831e 100644 --- a/versioned_docs/version-8.6/self-managed/connectors-deployment/connectors-configuration.md +++ b/versioned_docs/version-8.6/self-managed/connectors-deployment/connectors-configuration.md @@ -1,6 +1,7 @@ --- id: connectors-configuration title: Configuration +description: "Configure the Connector runtime environment based on the Zeebe instance to connect to, the Connector functions to run, and secrets available to the Connectors." --- import Tabs from "@theme/Tabs"; @@ -14,8 +15,6 @@ You can configure the Connector runtime environment in the following ways: ## Connecting to Zeebe -In general, the Connector Runtime will respect all properties known to [Spring Zeebe](https://github.com/camunda-community-hub/spring-zeebe). - -Disabling Operate polling will lead to inability to use inbound (e.g., webhook) capabilities. -However, if you still wish to do so, you need to start your Connector runtime with the following environment variables: +Disabling Operate polling will lead to inability to use inbound capabilities like webhooks. If you still wish to do so, start your Connector runtime with the following environment variables: ```bash CAMUNDA_CONNECTOR_POLLING_ENABLED=false @@ -102,7 +100,7 @@ OPERATE_CLIENT_ENABLED=false ## Manual discovery of Connectors By default, the Connector runtime picks up outbound Connectors available on the classpath automatically. -To disable this behavior, use the following environment variables to configure Connectors and their configuration explicitly: +To disable this behavior, use the following environment variables to configure Connectors explicitly: | Environment variable | Purpose | | :-------------------------------------------- | :------------------------------------------------------------ | @@ -111,9 +109,9 @@ To disable this behavior, use the following environment variables to configure C | `CONNECTOR_{NAME}_INPUT_VARIABLES` (optional) | Variables to fetch for worker with `NAME` | | `CONNECTOR_{NAME}_TIMEOUT` (optional) | Timeout in milliseconds for worker with `NAME` | -Through that configuration, you define all job workers to run. +Through this configuration, you define all job workers to run. -Specifying optional values allow you to override `@OutboundConnector`-provided Connector configuration. +Specifying optional values allows you to override `@OutboundConnector`-provided Connector configuration. ```bash CONNECTOR_HTTPJSON_FUNCTION=io.camunda.connector.http.rest.HttpJsonFunction @@ -196,7 +194,7 @@ Reference the secret in the Connector's input in the prefixed style `{{secrets.M Create your own implementation of the `io.camunda.connector.api.secret.SecretProvider` interface that [comes with the SDK](https://github.com/camunda/connectors/blob/main/connector-sdk/core/src/main/java/io/camunda/connector/api/secret/SecretProvider.java). -Package this class and all its dependencies as a JAR, e.g. `my-secret-provider-with-dependencies.jar`. This needs to include a file +Package this class and all its dependencies as a JAR, for example `my-secret-provider-with-dependencies.jar`. This needs to include a file `META-INF/services/io.camunda.connector.api.secret.SecretProvider` that contains the fully qualified class name of your secret provider implementation. Add this JAR to the runtime environment, depending on your deployment setup. Your secret provider will serve secrets as implemented. @@ -223,75 +221,72 @@ java -cp 'connector-runtime-application-VERSION-with-dependencies.jar:...:my-sec ## Multi-tenancy -The Connector Runtime supports multiple tenants for inbound and outbound Connectors. +The Connector Runtime supports multiple tenants for inbound and outbound Connectors. These are configurable in [Identity](/self-managed/identity/user-guide/tenants/managing-tenants.md). + A single Connector Runtime can serve a single tenant or can be configured to serve -multiple tenants. By default, the runtime uses the `` tenant id for all -Zeebe related operations like handling Jobs and publishing Messages. +multiple tenants. By default, the runtime uses the tenant ID `` for all +Zeebe-related operations like handling jobs and publishing messages. :::info Support for **outbound Connectors** with multiple tenants requires a dedicated -tenant job worker config (described below). **Inbound Connectors** will automatically work for all tenants -the configured Connector Runtime client has access to. This can be configured in Identity via -the application assignment. +tenant job worker config (described below). **Inbound Connectors** automatically work for all tenants +the configured Connector Runtime client has access to. This can be configured in Identity via the application assignment. ::: ### Environment variables -The following environment variables are used by the Connector Runtime -for the configuration of multi-tenancy. +The Connector Runtime uses the following environment variables to configure multi-tenancy: -| Name | Description | Default value | -| ------------------------------------------ | --------------------------------------------------------------- | ------------- | -| ZEEBE_CLIENT_DEFAULT-TENANT-ID | The default tenant id used to communicate with Zeebe | `` | -| ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS | The default tenants ids (comma separated) used to activate jobs | `` | +| Name | Description | Default value | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| ZEEBE_CLIENT_DEFAULT-TENANT-ID | The default tenant ID used to communicate with Zeebe. Changing this value will set a new default tenant ID used for fetching jobs and publishing messages. | `` | +| ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS | The default tenant IDs (comma separated) used to activate jobs. To run the Connector Runtime in a setup where a single runtime serves multiple tenants, add each tenant ID to this list. | `` | -If you are using an embedded version of the Connector Runtime you can specify the tenant information -in your Spring configuration like in this example `application.properties` file: +If you are using an embedded version of the Connector Runtime, you can specify the tenant information in your Spring configuration like in this example `application.properties` file: ```bash -zeebe.client.default-tenant-id= -zeebe.client.default-job-worker-tenant-ids=t1, +zeebe.client.default-tenant-id=myTenant +zeebe.client.default-job-worker-tenant-ids=myTenant ``` ### Outbound Connector config -The Connector Runtime uses the `` tenant for outbound Connector related features. +The Connector Runtime uses the default tenant for outbound Connector-related features. If support for a different tenant or multiple tenants should be enabled, the tenants need to be configured individually using the following environment variables. If you want to use outbound Connectors for a single tenant that is different -from the `` tenant you can specify a different default tenant id using: +from the default tenant, you can specify a different default tenant ID using: ```bash -ZEEBE_CLIENT_DEFAULT-TENANT-ID=tenant1 +ZEEBE_CLIENT_DEFAULT-TENANT-ID=myTenant ``` -This will change the default tenant id used for fetching jobs and publishing messages -to the tenant id `tenant1`. +This will change the default tenant ID used for fetching jobs and publishing messages +to the tenant ID `myTenant`. :::note -Please keep in mind that inbound Connectors will still be enabled for -all tenants that the Connector Runtime client has access to. +Inbound Connectors will still be enabled for +all tenants the Connector Runtime client has access to. ::: -If you want to run the Connector Runtime in a setup where a single runtime -serves multiple tenants you have to add each tenant id to the list of the default job workers: +To run the Connector Runtime in a setup where a single runtime +serves multiple tenants, add each tenant ID to the list of the default job workers: ```bash -ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS=tenant1, tenant2 +ZEEBE_CLIENT_DEFAULT-JOB-WORKER-TENANT-IDS=`myTenant, otherTenant` ``` -In this case the `ZEEBE_CLIENT_DEFAULT-TENANT-ID` will **not** be used for the +In this case, the `ZEEBE_CLIENT_DEFAULT-TENANT-ID` will **not** be used for the configuration of job workers. -### Inbound Connector config +### Inbound Connector configuration -The Connector Runtime will fetch and execute all inbound Connectors it receives from +The Connector Runtime fetches and executes all inbound Connectors it receives from Operate independently of the outbound Connector configuration without any additional configuration required from the user. -If you want to restrict the Connector Runtime inbound Connector feature to a single tenant or multiple tenants -you have to use Identity and assign the tenants the Connector application should have access to. +To restrict the Connector Runtime inbound Connector feature to a single tenant or multiple tenants, use Identity and assign the tenants the Connector application should have access to. ### Troubleshooting diff --git a/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md b/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md index 874312d78c7..8bfa775bf15 100644 --- a/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md +++ b/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md @@ -5,10 +5,6 @@ sidebar_label: "Configuration" description: "Read details on the configuration variables of Console Self-Managed." --- -:::note -Console Self-Managed is available only to [Enterprise customers](/reference/licenses.md#console). -::: - Console Self-Managed can be configured using environment variables and configuration parameters. :::note @@ -23,10 +19,10 @@ Underscores in environment variables correspond to configuration file key levels | `KEYCLOAK_INTERNAL_BASE_URL` | Internal Base URL for Keycloak | http://camunda-platform-keycloak:80/auth | | `KEYCLOAK_REALM` | Realm for Keycloak | camunda-platform | | `CAMUNDA_IDENTITY_AUDIENCE` | Audience for Console client | console | -| `CAMUNDA_IDENTITY_CLIENT_ID` | Client Id for Console client | console | +| `CAMUNDA_IDENTITY_CLIENT_ID` | Client ID for Console client | console | | `CAMUNDA_CONSOLE_CONTEXT_PATH` | Context path for Console | console | | `CAMUNDA_CONSOLE_CUSTOMERID` | Unique identifier of the customer | `customer-id` | -| `CAMUNDA_CONSOLE_INSTALLATIONID` | Unique installation id of the current customer installation | `installation-id` | +| `CAMUNDA_CONSOLE_INSTALLATIONID` | Unique installation ID of the current customer installation | `installation-id` | | `CAMUNDA_CONSOLE_TELEMETRY` | Telemetry config for Console Self-Managed: `disabled`, `online`, or `download` | `online` | | `CAMUNDA_CONSOLE_DISABLE_AUTH` | Disables authentication for Console. With this option, set users don't have to log in to use Console and API requests can be executed without an Authorization header.
    By disabling authentication all `CAMUNDA_IDENTITY`, variables won't be used. | `true` | | `CAMUNDA_LICENSE_KEY` | Your Camunda 8 license key, if your installation requires a license. For Helm installations, license keys can be configured globally in your `values.yaml` file. See the [Helm installation documentation](/self-managed/setup/install.md#configure-license-key) for more details. | N/A | @@ -54,7 +50,7 @@ To enable telemetry, the following parameters need to be configured. Camunda wil | Parameter | Description | Example value | | ---------------- | ----------------------------------------------------------------------------------- | --------------- | | `customerId` | Unique identifier of the customer. This is also a Camunda Docker registry user name | `customername` | -| `installationId` | Unique installation id of the current customer installation | `my-deployment` | +| `installationId` | Unique installation ID of the current customer installation | `my-deployment` | | `telemetry` | Telemetry config for Console Self-Managed: `disabled`, `online` or `download` | `online` | Console environment variables could be set in Helm. For more details, check [Console Helm values](https://artifacthub.io/packages/helm/camunda/camunda-platform#console-parameters). diff --git a/versioned_docs/version-8.6/self-managed/identity/deployment/configuration-variables.md b/versioned_docs/version-8.6/self-managed/identity/deployment/configuration-variables.md index 2f37d61ef18..09066025e1f 100644 --- a/versioned_docs/version-8.6/self-managed/identity/deployment/configuration-variables.md +++ b/versioned_docs/version-8.6/self-managed/identity/deployment/configuration-variables.md @@ -35,10 +35,16 @@ import Licensing from '../../../self-managed/react-components/licensing.md' ## OIDC configuration -| Evnironment variable | Description | Default value | -| ---------------------------- | --------------------------------------------------- | ------------- | -| IDENTITY_INITIAL_CLAIM_NAME | The name of the claim to use for the initial user. | oid | -| IDENTITY_INITIAL_CLAIM_VALUE | The value of the claim to use for the initial user. | | +Claims are name/value pairs used to represent an individual identity. Configure your initial claim and value to match the claim used with your OIDC provider. For example, to use your Microsoft Entra unique account ID, set `IDENTITY_INITIAL_CLAIM_NAME` to `oid`, and `IDENTITY_INITIAL_CLAIM_VALUE` to the ID. + +:::note +Once set, you cannot update your initial claim name and value using environment or Helm values. You must change these values directly in the database. +::: + +| Environment variable | Description | Default value | +| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `IDENTITY_INITIAL_CLAIM_NAME` | The type of claim to use for the initial user. Examples can include `oid`, `name` or `email`. | `oid` | +| `IDENTITY_INITIAL_CLAIM_VALUE` | The value of the claim to use for the initial user. For the default `oid`, the value usually corresponds to the unique ID of your user account. | | ## Component configuration diff --git a/versioned_docs/version-8.6/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md b/versioned_docs/version-8.6/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md index 835be739ecc..2e61c28b921 100644 --- a/versioned_docs/version-8.6/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md +++ b/versioned_docs/version-8.6/self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak.md @@ -5,12 +5,15 @@ sidebar_label: "Connect to an existing Keycloak instance" description: "Learn how to connect Identity to your existing Keycloak instance." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + In this guide, we'll demonstrate how to connect Identity to your existing Keycloak instance. ## Prerequisites - Access to your [Keycloak Admin Console](https://www.keycloak.org/docs/23.0.1/server_admin/#using-the-admin-console) -- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/23.0.1/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak. +- A basic understanding of [administering realms and clients](https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide) in Keycloak :::note Clients in Camunda 8 SaaS and applications in Camunda 8 Self-Managed provide a similar purpose. One key difference is that for Camunda 8 SaaS, you can set up specific [client connection credentials](/guides/setup-client-connection-credentials.md), whereas in Identity, an application is created with credentials automatically assigned. @@ -24,7 +27,15 @@ As of the 8.5.3 release, Identity uses the Keycloak frontend URL instead of the To avoid connectivity issues, ensure your Keycloak frontend URL is accessible by adjusting your network, firewall, or security settings as needed. This adjustment is crucial to maintain the integration with Keycloak and ensure compatibility. ::: -To connect Identity to an existing Keycloak instance, take the following steps: +To connect Identity to an existing Keycloak instance, take the following steps for your Camunda installation: + + + + 1. Log in to your Keycloak Admin Console. 2. Select the realm you would like to connect Identity to. In our example, this is **camunda-platform**. @@ -56,6 +67,18 @@ To connect Identity to an existing Keycloak instance, take the following steps: ::: 13. Start Identity. + + + +1. Log in to your Keycloak Admin Console. +2. Verify the name of the realm you would like to connect Identity to. In our example, this is **camunda-platform**. + ![keycloak-admin-realm-select](../img/keycloak-admin-realm-select.png) +3. Set the `KEYCLOAK_REALM` [environment variable](/self-managed/identity/deployment/configuration-variables.md) to the realm you selected in **Step 2**. +4. Start Identity. + + + + :::note What does Identity create when starting? Identity creates a base set of configurations required to function successfully. To understand more about what is created and why, see [the starting configuration](/self-managed/identity/deployment/starting-configuration.md). ::: diff --git a/versioned_docs/version-8.6/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md b/versioned_docs/version-8.6/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md index f39ada6f300..2f6d9bd6f54 100644 --- a/versioned_docs/version-8.6/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md +++ b/versioned_docs/version-8.6/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md @@ -4,46 +4,69 @@ title: Deploy diagram description: "Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + Desktop Modeler can directly deploy diagrams and start process instances in Camunda 8 Self-Managed. Follow the steps below to deploy a diagram: 1. Click the rocket-shaped deployment icon: -![deployment icon](./img/deploy-icon.png) + ![deployment icon](./img/deploy-icon.png) 2. Click **Camunda 8 Self-Managed**: -![deployment configuration](./img/deploy-empty.png) + ![deployment configuration](./img/deploy-empty.png) 3. Input the `Cluster endpoint`: -:::note -You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. + :::note + You can connect to Camunda 8 both securely and insecurely through the `https` and `http` protocols. + + Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). + ::: + + :::caution + + Multi-tenancy is only available with authentication enabled [through Identity](/self-managed/identity/what-is-identity.md), and [enabled in all required components](/self-managed/concepts/multi-tenancy.md). + + ::: + + ![deployment via Camunda 8](./img/deploy-endpoint.png) + +4. Select your authentication method, and input the required credentials: -Secured connections to a remote endpoint will only be established if the remote server certificate is trusted by the app. Ensure that signing trusted roots and intermediate certificates [are known to the app](/components/modeler/desktop-modeler/flags/flags.md#zeebe-ssl-certificate). -::: + -:::caution + -Multi-tenancy is only available with authentication enabled [through Identity](/self-managed/identity/what-is-identity.md), and [enabled in all required components](/self-managed/concepts/multi-tenancy.md). + For **basic authentication**, input your username and password: -::: + ![basic auth configuration](./img/deploy-with-basic-auth.png) -![deployment via Camunda 8](./img/deploy-endpoint.png) + -4. Select **Basic**, and input your username and password in case your gateway requires basic authentication: + -![basic auth configuration](./img/deploy-with-basic-auth.png) + For **OAuth**, input the credentials for your OAuth provider. These are configured as part of the default [Helm installation](/self-managed/setup/install.md) and can be discovered in [Identity](/self-managed/identity/what-is-identity.md), or are set by Zeebe [environment variables](/self-managed/zeebe-deployment/security/client-authorization.md#environment-variables). -5. Select **OAuth**, and input the credentials in case your gateway requires authentication with OAuth: + ![oauth configuration](./img/deploy-with-oauth.png) -:::note -The OAuth URL needs to contain the full path to the token endpoint, i.e. `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. -::: + | Name | Description | Example value | + | --------------- | ------------------------------------ | ----------------------------------------------------------------------------------------- | + | Client ID | The name of your Zeebe client. | `zeebe` | + | Client secret | The password of your Zeebe client. | `zecret` | + | OAuth token url | The full path to the token endpoint. | `https:///auth/realms/camunda-platform/protocol/openid-connect/token`. | + | OAuth audience | The permission name for Zeebe. | `zeebe-api` | -![oauth configuration](./img/deploy-with-oauth.png) + + -6. Select the **Remember** checkbox if you want to locally store the connection information. +5. Select the **Remember** checkbox if you want to locally store the connection information. -7. Click **Deploy** to perform the deployment. +6. Click **Deploy** to perform the deployment. ![deployment successful](./img/deploy-success.png) diff --git a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/configuration.md b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/configuration.md index 9b989e42c79..980147abc20 100644 --- a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/configuration.md +++ b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/configuration.md @@ -83,11 +83,13 @@ The `restapi` component sends certain events (e.g. "file updated", "comment adde Web Modeler integrates with Identity and Keycloak for authentication and authorization (using OAuth 2.0 + OpenID Connect) as well as user management. -| Environment variable | Description | Example value | -| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | -| `CAMUNDA_IDENTITY_BASEURL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | -| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | -| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | +| Environment variable | Description | Example value | +| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `CAMUNDA_IDENTITY_BASEURL` | [Internal](#notes-on-host-names-and-port-numbers) base URL of the Identity API (used to fetch user data). | `http://identity:8080` | +| `RESTAPI_OAUTH2_TOKEN_ISSUER_BACKEND_URL` | [optional]
    [Internal](#notes-on-host-names-and-port-numbers) URL used to request Keycloak's [OpenID Provider Configuration](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig); if not set, `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` is used. | `http://keycloak:8080/auth/realms/camunda-platform` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI` | URL of the token issuer (used for JWT validation). | `https://keycloak.example.com/auth/realms/camunda-platform` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWK_SET_URI` | [optional] URL of the JWK Set endpoint (used for JWT validation). Only necessary if URL cannot be derived from the OIDC configuration endpoint. | `https://keycloak.example.com/auth/realms/camunda-platform/protocol/openid-connect/certs` | +| `SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_JWS_ALGORITHMS` | [optional] List of trusted JWS algorithms used for JWT validation. Only necessary if the algorithms cannot be derived from the JWK Set response. | `ES256` | Refer to the [advanced Identity configuration guide](./identity.md) for additional details on how to connect a custom OpenID Connect (OIDC) authentication provider. @@ -140,6 +142,20 @@ Refer to the [advanced SSL configuration guide](./ssl.md) for additional details | `RESTAPI_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the regular API endpoints. | `8081` | `8081` | | `RESTAPI_MANAGEMENT_PORT` | [Internal](#notes-on-host-names-and-port-numbers) port number on which the `restapi` serves the management API endpoints. | `8091` | `8091` | +### Proxy + +These settings are useful when the application needs to make outgoing network requests in environments that require traffic to pass through a proxy server. + +| Environment variable | Description | Example value | Default value | +| -------------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------- | ------------- | +| `http_proxy` | Specifies the proxy server to be used for outgoing HTTP requests. | `http://proxy.example.com:8080` | - | +| `https_proxy` | Specifies the proxy server to be used for outgoing HTTPS requests. | `https://secureproxy.example.com:443` | - | +| `no_proxy` | A comma-separated list of domain names or IP addresses for which the proxy should be bypassed. | `localhost,127.0.0.1,.example.com` | - | + +:::note +The proxy-related environment variables are lowercase because they follow a widely accepted convention used in many system environments and tools. +::: + ### Feature Flags | Environment variable | Description | Example value | Default value | @@ -181,10 +197,13 @@ The `webapp` component sends certain events (e.g. "user opened diagram", "user l ### Logging -| Environment variable | Description | Example value | -| -------------------- | -------------------------------------- | ---------------------------- | -| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| Environment variable | Description | Example value | +| -------------------- | ----------------------------------------------- | ---------------------------- | +| `LOG_FILE_PATH` | [optional]
    Path to log file output | `/full/path/to/log/file.log` | +| `LOG_LEVEL_CLIENT` | [optional]
    Log level for the client | `DEBUG` | +| `LOG_LEVEL_WEBAPP` | [optional]
    Log level for the Node.js server | `DEBUG` | +The `LOG_LEVEL_*` options can be found [here](../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). Refer to the [Advanced Logging Configuration Guide](./logging.md#logging-configuration-for-the-webapp-component) for additional details on how to customize the `webapp` logging output. ### SSL diff --git a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/logging.md b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/logging.md index 307a2ea773b..ab86be8bc30 100644 --- a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/logging.md +++ b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/configuration/logging.md @@ -38,6 +38,16 @@ To enable additional log output to a file, adjust the following environment vari LOG_FILE_PATH=/full/path/to/log/file.log ``` +### Configuring log levels + +To control the verbosity of the logs, adjust the environment variables `LOG_LEVEL_CLIENT` (browser client) and `LOG_LEVEL_WEBAPP` (Node.js server). + +```properties +LOG_LEVEL_CLIENT=DEBUG +``` + +The `LOG_LEVEL_*` options can be found [here](../../../../operational-guides/troubleshooting/log-levels/#understanding-log-levels). + ## Logging configuration for the `websocket` component By default, the `websocket` component logs to the Docker container's standard output. diff --git a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/installation.md b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/installation.md index 9cc2ac07094..4162f8482f7 100644 --- a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/installation.md +++ b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/installation.md @@ -4,4 +4,4 @@ title: Installation description: "Details on installation of Web Modeler Self-Managed." --- -Refer to the [installation guide](/self-managed/setup/overview.md) for details on how to install Web Modeler, and the [contact page](/contact) for guidance on obtaining Camunda 8 credentials. +Refer to the [installation guide](/self-managed/setup/overview.md) for details on how to install Web Modeler, and the [contact page](/reference/contact.md) for guidance on obtaining Camunda 8 credentials. diff --git a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md new file mode 100644 index 00000000000..6059d9bed8c --- /dev/null +++ b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration.md @@ -0,0 +1,36 @@ +--- +id: troubleshoot-proxy-configuration +title: "Troubleshoot proxy configuration issues" +sidebar_label: "Proxy configuration" +description: "Troubleshooting guide for issues caused by incorrect proxy configuration in Web Modeler." +--- + +Troubleshoot and resolve issues in Web Modeler caused by incorrect or incomplete proxy configuration. + +## Issue + +Users experience a variety of failures when Web Modeler attempts to communicate with external services. These issues can manifest as: + +- Failed authentication due to the inability to access the JWKS (JSON Web Key Set) endpoint. Error message: "Expected 200 OK from the JSON Web Key Set HTTP response." +- Failure to reach other external services, such as the Camunda Marketplace. + +## Cause + +Proxy settings must be correctly configured for Web Modeler to route outgoing requests through a network proxy. Common issues occur when: + +- The proxy server is not properly configured or unreachable. +- Requests to external services are being blocked by the proxy configuration. +- Authentication requests, such as those to the OIDC provider, fail when the JWKS endpoint is unreachable via the proxy. + +## Resolution + +Ensure correct proxy configuration for both `webapp` and `restapi` components. + +- For the `webapp` component, proxy configuration is handled via the environment variables `http_proxy`, `https_proxy` and `no_proxy`. + ```properties + http_proxy=http://proxy.example.com:8080 https_proxy=https://secureproxy.example.com:443 no_proxy=localhost,127.0.0.1,.example.com + ``` +- For the `restapi` component, the proxy configuration is handled via JVM settings passed as the value of the environment variable `JAVA_OPTS`. + ```properties + JAVA_OPTS=-Dhttp.proxyHost= -Dhttps.proxyPort= + ``` diff --git a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md index a06f1b6898b..0e6313d149c 100644 --- a/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md +++ b/versioned_docs/version-8.6/self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection.md @@ -21,7 +21,7 @@ Depending on your infrastructure, the default timeouts configured may be too sho You can pass custom timeouts in milliseconds for Web Modeler's Zeebe client to `modeler-restapi` via three individual environment variables: ```shell -ZEEBE_CLIENT_REQUESTTIMEOUT=30000 # limit the time to wait for a response from the Zeebe gateway +ZEEBE_CLIENT_REQUESTTIMEOUT=30000 # limit the time to wait for a response from the Zeebe Gateway ZEEBE_AUTH_CONNECT_TIMEOUT=60000 # limit the time to wait for a connection to the OAuth server ZEEBE_AUTH_READ_TIMEOUT=60000 # limits the time to wait for a response from the OAuth server ``` diff --git a/versioned_docs/version-8.6/self-managed/operate-deployment/importer-and-archiver.md b/versioned_docs/version-8.6/self-managed/operate-deployment/importer-and-archiver.md index 04cad62c780..17a48be690f 100644 --- a/versioned_docs/version-8.6/self-managed/operate-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.6/self-managed/operate-deployment/importer-and-archiver.md @@ -32,7 +32,7 @@ Each single importer/archiver node must be configured using the following config | ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------- | | camunda.operate.clusterNode.partitionIds | Array of Zeebe partition ids this Importer (or Archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. | | camunda.operate.clusterNode.nodeCount | Total amount of Importer (or Archiver) nodes in the cluster. | 1 | -| camunda.operate.clusterNode.currentNodeId | Id of current Importer (or Archiver) node, starting from 0. | 0 | +| camunda.operate.clusterNode.currentNodeId | ID of current Importer (or Archiver) node, starting from 0. | 0 | It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.operate.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.6/self-managed/operate-deployment/operate-configuration.md b/versioned_docs/version-8.6/self-managed/operate-deployment/operate-configuration.md index 59bde307f9b..ef22cf4c196 100644 --- a/versioned_docs/version-8.6/self-managed/operate-deployment/operate-configuration.md +++ b/versioned_docs/version-8.6/self-managed/operate-deployment/operate-configuration.md @@ -73,7 +73,7 @@ in terms of tenant assignment, Operate - Zeebe connection must be secured. Check ### Troubleshooting multi-tenancy in Operate -If users can view data from the `` tenant only and no data from other tenants (and you have not [configured multi-tenancy using Helm](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#global-parameters)), multi-tenancy is not enabled in Operate. Refer to the [configuration instructions above](#multi-tenancy). +If users can view data from the `` tenant only and no data from other tenants (and you have not [configured multi-tenancy using Helm](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.6#global-parameters)), multi-tenancy is not enabled in Operate. Refer to the [configuration instructions above](#multi-tenancy). If multi-tenancy is enabled in Operate but disabled in [Identity](/self-managed/identity/what-is-identity.md), users will not have any tenant authorizations in Operate and will not be able to access the data of any tenants in Operate. @@ -209,9 +209,9 @@ camunda.operate: selfSigned: true ``` -## Zeebe broker connection +## Zeebe Broker connection -Operate needs a connection to the Zeebe broker to start the import and execute user operations. +Operate needs a connection to the Zeebe Broker to start the import and execute user operations. ### Settings to connect diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/application-configs.md b/versioned_docs/version-8.6/self-managed/operational-guides/application-configs.md index 47bfcee3638..100daa62bfc 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/application-configs.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/application-configs.md @@ -63,20 +63,20 @@ operate: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 numberOfShards: 3 # Zeebe instance zeebe: # Broker contact point - brokerContactPoint: "cpt-zeebe-gateway:26500" + brokerContactPoint: "-zeebe-gateway:26500" # ELS instance to export Zeebe data to zeebeElasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Index prefix, configured in Zeebe Elasticsearch exporter @@ -122,37 +122,34 @@ operate: ## Default properties set by the helm chart -Before you supply a configuration, it's helpful to know what the default configuration is so you can start from a working configuration and then update the values you want: +The `helm template` command generates the application's default configuration, allowing you to only update the values required by your setup. Use the following command to generate the default configuration, substituting in the name of your release: ```bash -helm template \ +helm template \ -f values.yaml \ camunda/camunda-platform \ --show-only templates/operate/configmap.yaml ``` -`--show-only` will allow you to print out the `configmap` to the console: +The `--show-only` flag prints out the `configmap` to the console: ```yaml # Source: camunda-platform/templates/operate/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: cpt-operate + name: -operate-configuration labels: app: camunda-platform app.kubernetes.io/name: camunda-platform - app.kubernetes.io/instance: cpt + app.kubernetes.io/instance: app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: camunda-platform - helm.sh/chart: camunda-platform-9.3.1 - app.kubernetes.io/version: "8.4.5" + helm.sh/chart: camunda-platform-10.3.2 app.kubernetes.io/component: operate + app.kubernetes.io/version: "8.5.5" data: application.yml: | - server: - servlet: - context-path: "/operate" spring: profiles: active: "identity-auth" @@ -160,41 +157,46 @@ data: oauth2: resourceserver: jwt: - issuer-uri: "http://cpt-keycloak:80/auth/realms/camunda-platform" - jwk-set-uri: "http://cpt-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/certs" + issuer-uri: "http://-keycloak:80/auth/realms/camunda-platform" + jwk-set-uri: "http://-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/certs" camunda: identity: clientId: "operate" audience: "operate-api" + baseUrl: "http://-identity:80" # Operate configuration file camunda.operate: identity: - redirectRootUrl: "https://dev.jlscode.com" + redirectRootUrl: "http://localhost:8081" # ELS instance to store Operate data elasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 - # Zeebe instance - zeebe: - # Broker contact point - brokerContactPoint: "cpt-zeebe-gateway:26500" + # Elasticsearch full url + url: "http://-elasticsearch:9200" # ELS instance to export Zeebe data to zeebeElasticsearch: # Cluster name clusterName: elasticsearch # Host - host: cpt-elasticsearch + host: -elasticsearch # Transport port port: 9200 # Index prefix, configured in Zeebe Elasticsearch exporter prefix: zeebe-record + # Elasticsearch full url + url: "http://-elasticsearch:9200" + # Zeebe instance + zeebe: + # Broker contact point + brokerContactPoint: "-zeebe-gateway:26500" logging: level: ROOT: INFO @@ -212,7 +214,7 @@ Then, take the contents under `application.yml` and put it under the `operate.co - [Operate](docs/self-managed/operate-deployment/operate-configuration.md) - [Tasklist](docs/self-managed/tasklist-deployment/tasklist-configuration.md) - [Web Modeler](docs/self-managed/modeler/web-modeler/configuration/configuration.md) -- [Console](docs/self-managed/console-deployment/configuration.md) +- [Console](/self-managed/console-deployment/configuration.md) - [Connectors](docs/self-managed/connectors-deployment/connectors-configuration.md) - [Identity](docs/self-managed/identity/deployment/configuration-variables.md) - [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration) diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/backup-and-restore.md b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/backup-and-restore.md index 045a13485d0..c5d68a21004 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/backup-and-restore.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/backup-and-restore.md @@ -24,7 +24,7 @@ Zeebe stores its backup to an external storage and must be configured before the ### Backup process -The backup of each component and the backup of a Camunda 8 cluster is identified by an id. This means a backup `x` of Camunda 8 consists of backup `x` of Zeebe, backup `x` of Optimize, backup `x` of Operate, and backup `x` of Tasklist. The backup id must be an integer and greater than the previous backups. +The backup of each component and the backup of a Camunda 8 cluster is identified by an id. This means a backup `x` of Camunda 8 consists of backup `x` of Zeebe, backup `x` of Optimize, backup `x` of Operate, and backup `x` of Tasklist. The backup ID must be an integer and greater than the previous backups. :::note We recommend using the timestamp as the backup id. diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md index bae5f4f7250..55fa19ff22c 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/operate-tasklist-backup.md @@ -5,17 +5,15 @@ description: "How to perform a backup and restore of Operate and Tasklist data." keywords: ["backup", "backups"] --- -:::note -This release introduces breaking changes, including: - -- The [get backup state API and response codes](#get-backup-state-api). -- The utilized URL has changed. For example, `curl 'http://localhost:8080/actuator/backups'` rather than the previously used `backup`. -- `backupId` must be of integer type now instead of string, which is in sync with Zeebe `backupId` requirements. +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +:::warning breaking changes! +As of the Camunda 8.6 release, the `/actuator` endpoints (including `/backups`) now default to port 9600. Ensure your `management.server.port` configuration parameter is correctly set before continuing. ::: Operate stores its data over multiple indices in Elasticsearch. Backup of Operate data includes several -Elasticsearch snapshots containing sets of Operate indices. Each backup is identified by `backupId`. For example, a backup with an id of `123` may contain the following Elasticsearch snapshots: +Elasticsearch snapshots containing sets of Operate indices. Each backup is identified by `backupId`. For example, a backup with an ID of `123` may contain the following Elasticsearch snapshots: ``` camunda_operate_123_8.1.0_part_1_of_6 @@ -29,7 +27,7 @@ camunda_operate_123_8.1.0_part_6_of_6 Operate provides an API to perform a backup and manage backups (list, check state, delete). Restore a backup using the standard Elasticsearch API. :::note -The backup API can be reached via the Actuator management port, which by default is the same as application HTTP port (and in turn defaults to 8080). The port may be reconfigured with the help of `management.server.port` configuration parameter. +The backup API can be reached via the Actuator management port, which by default is the same as application HTTP port (and in turn defaults to 9600). The port may be reconfigured with the help of `management.server.port` configuration parameter. ::: ## Prerequisites @@ -37,33 +35,68 @@ The backup API can be reached via the Actuator management port, which by default Before you can use the backup and restore feature: 1. The [Elasticsearch snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html) must be configured. -2. Operate and Tasklist must be configured with the repository name using the following configuration parameters: +2. Operate and Tasklist must be configured with the repository name using one of the following configuration options: + + + + + +#### Operate ```yaml -for Operate: camunda: operate: backup: repositoryName: +``` -for Tasklist: + + + + +#### Operate + +``` +CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= +``` + + + + +#### Tasklist + + + + + +```yaml camunda: tasklist: backup: repositoryName: ``` -or with environmental variables: + -``` -for Operate: -CAMUNDA_OPERATE_BACKUP_REPOSITORYNAME= + -for Tasklist: +``` CAMUNDA_TASKLIST_BACKUP_REPOSITORYNAME= - ``` + + + ## Create backup API During backup creation Operate can continue running. To create the backup, call the following endpoint: @@ -79,15 +112,15 @@ Response: | Code | Description | | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 200 OK | Backup was successfully started, snapshots will be created asynchronously. List of snapshots is returned in the response body (see example below). This list must be persisted together with the backup id to be able to restore it later. | -| 400 Bad Request | In case something is wrong with `backupId`, e.g. the same backup id already exists. | +| 200 OK | Backup was successfully started, snapshots will be created asynchronously. List of snapshots is returned in the response body (see example below). This list must be persisted together with the backup ID to be able to restore it later. | +| 400 Bad Request | In case something is wrong with `backupId`, e.g. the same backup ID already exists. | | 500 Server Error | All other errors, e.g. ES returned error response when attempting to create a snapshot. | | 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | Example request: ``` -curl --request POST 'http://localhost:8080/actuator/backups' \ +curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123 }' ``` @@ -120,14 +153,14 @@ Response: | Code | Description | | ---------------- | --------------------------------------------------------------------------------------- | | 200 OK | Backup state could be determined and is returned in the response body. | -| 404 Not Found | Backup with given id does not exist. | +| 404 Not Found | Backup with given ID does not exist. | | 500 Server Error | All other errors, e.g. ES returned error response when attempting to execute the query. | | 502 Bad Gateway | Elasticsearch is not accessible, the request can be retried when it is back. | For example, the request could look like this: ``` -curl 'http://localhost:8080/actuator/backups/123' +curl 'http://localhost:9600/actuator/backups/123' ``` Example response: @@ -180,7 +213,7 @@ Response: For example, the request could look like this: ``` -curl 'http://localhost:8080/actuator/backups' +curl 'http://localhost:9600/actuator/backups' ``` Response will contain JSON with array of objects representing state of each backup (see [get backup state API endpoint](#get-backup-state-api)). diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/optimize-backup.md b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/optimize-backup.md index c694ff53341..fd86d993ec0 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/optimize-backup.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/optimize-backup.md @@ -25,7 +25,7 @@ Optimize provides an API to trigger a backup and retrieve information about a gi The following prerequisites must be set up before using the backup API: 1. A snapshot repository of your choice must be registered with Elasticsearch. -2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable or by adding it to your Optimize configuration: +2. The repository name must be specified using the `CAMUNDA_OPTIMIZE_BACKUP_REPOSITORY_NAME` environment variable, or by adding it to your Optimize [`environment-config.yaml`]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/): ```yaml backup: @@ -58,7 +58,7 @@ POST actuator/backups ### Example request -``` +```shell curl --request POST 'http://localhost:8092/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": 123456 }' @@ -101,8 +101,8 @@ GET actuator/backup ### Example request -``` -curl ---request GET 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request GET 'http://localhost:8092/actuator/backups/123456' ``` ### Example response @@ -161,8 +161,8 @@ DELETE actuator/backups/{backupId} ### Example request -``` -curl ---request DELETE 'http://localhost:8092/actuator/backups/123456' +```shell +curl --request DELETE 'http://localhost:8092/actuator/backups/123456' ``` ## Restore backup @@ -184,6 +184,6 @@ To restore a given backup, the following steps must be performed: Example Elasticsearch request: -``` +```shell curl --request POST `http://localhost:9200/_snapshot/repository_name/camunda_optimize_123456_3.9.0_part_1_of_2/_restore?wait_for_completion=true` ``` diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md index fc3a1fc1ab6..aa4f90d6b80 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/backup-restore/zeebe-backup-and-restore.md @@ -26,7 +26,7 @@ Even when the underlying storage bucket is the same, backups from one are not co ### S3 backup store -To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket: +To store your backups in any S3 compatible storage system such as [AWS S3] or [MinIO], set the backup store to `S3` and tell Zeebe how to connect to your bucket. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -87,7 +87,7 @@ zeebe.broker.data.backup.s3.compression: zstd # or use environment variable ZEEB The GCS backup strategy utilizes the [Google Cloud Storage REST API](https://cloud.google.com/storage/docs/request-endpoints). ::: -To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use: +To store your backups in Google Cloud Storage (GCS), choose the `GCS` backup store and tell Zeebe which bucket to use. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -124,7 +124,7 @@ There are multiple [data encryption options](https://cloud.google.com/storage/do ### Azure backup store -To store your backups in Azure Storage, choose the `AZURE` backup store and specify how to connect with the Azure container: +To store your backups in Azure Storage, choose the `AZURE` backup store and specify how to connect with the Azure container. This configuration can be set in your Zeebe [`config/application.yaml`](/self-managed/zeebe-deployment/configuration/configuration.md): ```yaml zeebe: @@ -170,14 +170,14 @@ POST actuator/backups } ``` -A `backupId` is an integer and must be greater than the id of previous backups that are completed, failed, or deleted. +A `backupId` is an integer and must be greater than the ID of previous backups that are completed, failed, or deleted. Zeebe does not take two backups with the same ids. If a backup fails, a new `backupId` must be provided to trigger a new backup. -The `backupId` cannot be reused, even if the backup corresponding to the backup id is deleted. +The `backupId` cannot be reused, even if the backup corresponding to the backup ID is deleted.
    Example request -``` +```shell curl --request POST 'http://localhost:9600/actuator/backups' \ -H 'Content-Type: application/json' \ -d '{ "backupId": "100" }' @@ -191,7 +191,7 @@ curl --request POST 'http://localhost:9600/actuator/backups' \ | ---------------- | ------------------------------------------------------------------------------------------------------------------------ | | 202 Accepted | A Backup has been successfully scheduled. To determine if the backup process was completed, refer to the GET API. | | 400 Bad Request | Indicates issues with the request, for example when the `backupId` is not valid or backup is not enabled on the cluster. | -| 409 Conflict | Indicates a backup with the same `backupId` or a higher id already exists. | +| 409 Conflict | Indicates a backup with the same `backupId` or a higher ID already exists. | | 500 Server Error | All other errors. Refer to the returned error message for more details. | | 502 Bad Gateway | Zeebe has encountered issues while communicating to different brokers. | | 504 Timeout | Zeebe failed to process the request within a pre-determined timeout. | @@ -220,7 +220,7 @@ GET actuator/backups/{backupId}
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups/100' ``` @@ -293,7 +293,7 @@ GET actuator/backups
    Example request -``` +```shell curl --request GET 'http://localhost:9600/actuator/backups' ``` @@ -371,7 +371,7 @@ DELETE actuator/backups/{backupId}
    Example request -``` +```shell curl --request DELETE 'http://localhost:9600/actuator/backups/100' ``` diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/configure-flow-control/configure-flow-control.md b/versioned_docs/version-8.6/self-managed/operational-guides/configure-flow-control/configure-flow-control.md index 08cbf734eae..22514755159 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/configure-flow-control/configure-flow-control.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/configure-flow-control/configure-flow-control.md @@ -21,7 +21,7 @@ A static write rate limit can prevent throughput peaks, and write rate throttlin These write limits are enabled by default in SaaS and disabled in Self-Managed. For most use cases, write rate limits can be enabled as needed if an issue arises. ::: -Flow control is configured in your Zeebe broker's `application.yaml` file. The default values can be found in the `# flowControl` section of the Zeebe broker [configuration](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) and [standalone](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) templates. +Flow control is configured in your Zeebe Broker's `application.yaml` file. The default values can be found in the `# flowControl` section of the Zeebe Broker [configuration](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) and [standalone](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) templates. ```yaml zeebe: diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/configure-multi-tenancy.md b/versioned_docs/version-8.6/self-managed/operational-guides/configure-multi-tenancy.md index e74efe332b4..7b09cd1715b 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/configure-multi-tenancy.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/configure-multi-tenancy.md @@ -19,7 +19,7 @@ Multi-tenancy must be enabled for each required component. Using the single glob ## Helm charts When using Helm charts, you can enable multi-tenancy globally with the flag `global.multitenancy.enabled`. -Visit [the Helm chart configuration](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#global-parameters) for additional details. +Visit [the Helm chart configuration](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.6#global-parameters) for additional details. ## Environment variables diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/multi-region/dual-region-ops.md b/versioned_docs/version-8.6/self-managed/operational-guides/multi-region/dual-region-ops.md index fe18d7ab6c1..8b55692c578 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/multi-region/dual-region-ops.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/multi-region/dual-region-ops.md @@ -147,7 +147,7 @@ Start with creating a port-forward to the `Zeebe Gateway` in the surviving regio The following alternatives to port-forwarding are possible: -- if Zeebe Gateway is exposed to the outside of the Kubernetes cluster, you can skip port-forwarding and use the URL directly +- If the Zeebe Gateway is exposed to the outside of the Kubernetes cluster, you can skip port-forwarding and use the URL directly - [`exec`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_exec/) into an existing pod (such as Elasticsearch), and execute `curl` commands from inside of the pod - [`run`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_run/) an Ubuntu pod in the cluster to execute `curl` commands from inside the Kubernetes cluster diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/log-levels.md b/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/log-levels.md index f5423bb4a8b..365aa71fd33 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/log-levels.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/log-levels.md @@ -24,3 +24,4 @@ Enable logging for each component of Camunda 8 using the following instructions: - [Operate](/self-managed/operate-deployment/operate-configuration.md#logging) - [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#logging) - [Web Modeler](/self-managed/modeler/web-modeler/configuration/logging.md) +- [Identity](/self-managed/identity/user-guide/configuration/configure-logging.md) diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/troubleshooting.md b/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/troubleshooting.md index 53c0cc54a0c..56a2df8bd47 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/troubleshooting.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/troubleshooting/troubleshooting.md @@ -34,7 +34,7 @@ global: ## Zeebe Ingress (gRPC) -Zeebe requires an Ingress controller that supports `gRPC` which is built on top of `HTTP/2` transport layer. Therefore, to expose Zeebe Gateway externally, you need the following: +Zeebe requires an Ingress controller that supports `gRPC` which is built on top of `HTTP/2` transport layer. Therefore, to expose the Zeebe Gateway externally, you need the following: 1. An Ingress controller that supports `gRPC` ([ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) supports it out of the box). 2. TLS (HTTPS) via [Application-Layer Protocol Negotiation (ALPN)](https://www.rfc-editor.org/rfc/rfc7301.html) enabled in the Zeebe Gateway Ingress object. @@ -67,6 +67,25 @@ A gateway timeout can occur if the headers of a response are too big (for exampl If you encounter errors during Helm chart installation, such as type mismatches or other template rendering issues, you may be using an outdated version of the Helm CLI. Helm's handling of data types and template syntax can vary significantly between versions. Ensure you use the Helm CLI version `3.13` or higher. +## DNS disruption issue for Zeebe in Kubernetes clusters (1.29-1.31) + +Kubernetes clusters running versions 1.29 to 1.31 may experience DNS disruptions during complete node restarts, such as during upgrades or evictions, particularly if the cluster's DNS resolver pods are affected. + +This issue is specifically noticeable for Zeebe (Netty), as it will no longer be able to form a cluster because of improper DNS responses. This occurs because Zeebe continues to communicate with a non-existent DNS resolver, caused by improper cleanup of conntrack entries for UDP connections. + +Details on this issue can be found in [this Kubernetes issue](https://github.com/kubernetes/kubernetes/issues/125467) and has been resolved in the following patch releases: + +- Kubernetes 1.29.10 +- Kubernetes 1.30.6 +- Kubernetes 1.31.2 + +Kubernetes versions 1.32 and versions before 1.29 are not affected. + +If an immediate cluster upgrade to a fixed version is not possible, the following temporary workarounds can be applied if you encounter DNS issues: + +- Restart the `kube-proxy` pod(s) +- Delete the affected Zeebe pod + ## Anomaly detection scripts The [c8-sm-checks](https://github.com/camunda/c8-sm-checks) project introduces a set of scripts to aid detection of Camunda deployment anomalies. @@ -133,6 +152,12 @@ This script verifies connectivity to a Zeebe instance using HTTP/2 and gRPC prot Find more information on [how to register your application on Identity](https://github.com/camunda-community-hub/camunda-8-examples/blob/main/payment-example-process-application/kube/README.md#4-generating-an-m2m-token-for-our-application). +### IRSA configuration check + +The AWS EKS IRSA configuration scripts are focused on verifying the correct setup of IAM Roles for Service Accounts (IRSA) within your Kubernetes deployment on AWS. These scripts ensure that your Kubernetes service accounts are correctly associated with IAM roles, allowing components like PostgreSQL, OpenSearch, and others in your deployment to securely interact with AWS resources. + +For detailed usage instructions and setup information, please refer to the [IRSA guide](/self-managed/setup/deploy/amazon/amazon-eks/irsa.md#irsa-check-script). + ### Interpretation of the results Each script produces an output indicating the status of individual checks, which can be either `[OK]`, which signals a healthy status, or `[FAIL]`, which signals an unhealthy status. diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/840-to-850.md b/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/840-to-850.md index b94830b187a..0a36d850327 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/840-to-850.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/840-to-850.md @@ -31,11 +31,11 @@ Note that there is **no** actual corruption or data loss, however. The broker health check routes have moved, and the old routes are now deprecated. Specifically, the following routes will return [a status code of 301](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/301) and redirect you. See the table below about the new mappings: -| Old route | **New route** | -| --------------------------------------- | ------------------------------------------------------------- | -| http://{zeebe-broker-host}:9600/health | **http://{zeebe-broker-host}:9600/actuator/health/status** | -| http://{zeebe-broker-host}:9600/ready | **http://{zeebe-broker-host}:9600/actuator/health/readiness** | -| http://{zeebe-broker-host}:9600/startup | **http://{zeebe-broker-host}:9600/actuator/health/startup** | +| Old route | **New route** | +| ---------------------------------------- | -------------------------------------------------------------- | +| http://\{zeebe-broker-host}:9600/health | **http://\{zeebe-broker-host}:9600/actuator/health/status** | +| http://\{zeebe-broker-host}:9600/ready | **http://\{zeebe-broker-host}:9600/actuator/health/readiness** | +| http://\{zeebe-broker-host}:9600/startup | **http://\{zeebe-broker-host}:9600/actuator/health/startup** | Please migrate to the new routes in your deployments. **If you're using the official Helm charts, then you don't have to do anything here.** diff --git a/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/introduction.md b/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/introduction.md index 3a0ddf4236b..516bad6e7e5 100644 --- a/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/introduction.md +++ b/versioned_docs/version-8.6/self-managed/operational-guides/update-guide/introduction.md @@ -12,12 +12,15 @@ When updating from one minor version to the next, you do not need to update to e Depending on your amount of data, run a minor version for at least 24 hours before updating to the next version. -:::note -Versions prior to Camunda 8 are listed below and identified as Camunda Cloud versions. -::: - There is a dedicated update guide for each version: +### [Camunda 8.5 to Camunda 8.6](../850-to-860) + +Update from 8.5.x to 8.6.0 + +[Release notes](/reference/release-notes/860.md) | +[Release blog](https://camunda.com/blog/2024/10/camunda-8-6-release/) + ### [Camunda 8.4 to Camunda 8.5](../840-to-850) Update from 8.4.x to 8.5.0 diff --git a/versioned_docs/version-8.6/self-managed/react-components/components.md b/versioned_docs/version-8.6/self-managed/react-components/components.md index d934e5a1deb..3f7f96ad8e7 100644 --- a/versioned_docs/version-8.6/self-managed/react-components/components.md +++ b/versioned_docs/version-8.6/self-managed/react-components/components.md @@ -16,6 +16,6 @@ Camunda 8 Self-Managed users may also use [Desktop Modeler](../../components/mod :::note -To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/contact). +To obtain or retrieve your Camunda 8 credentials for Enterprise licenses, visit the [contact page](/reference/contact.md). ::: diff --git a/versioned_docs/version-8.6/self-managed/setup/assets/operate-dashboard-no-processes.png b/versioned_docs/version-8.6/self-managed/setup/assets/operate-dashboard-no-processes.png index d00ff42eb40..fbcdb3efc52 100644 Binary files a/versioned_docs/version-8.6/self-managed/setup/assets/operate-dashboard-no-processes.png and b/versioned_docs/version-8.6/self-managed/setup/assets/operate-dashboard-no-processes.png differ diff --git a/versioned_docs/version-8.6/self-managed/setup/assets/operate-introduction.png b/versioned_docs/version-8.6/self-managed/setup/assets/operate-introduction.png index 6935f5092d9..ab58ba88927 100644 Binary files a/versioned_docs/version-8.6/self-managed/setup/assets/operate-introduction.png and b/versioned_docs/version-8.6/self-managed/setup/assets/operate-introduction.png differ diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md index 71c2cb13681..15670e7721b 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/dual-region.md @@ -95,13 +95,13 @@ https://github.com/camunda/c8-multi-region/blob/stable/8.6/aws/dual-region/scrip #### config.tf -This file contains the [backend](https://developer.hashicorp.com/terraform/language/settings/backends/configuration) and [provider](https://developer.hashicorp.com/terraform/language/providers/configuration) configuration, meaning where to store the [Terraform state](https://developer.hashicorp.com/terraform/language/state) and which providers to use, their versions, and potential credentials. +This file contains the [backend](https://developer.hashicorp.com/terraform/language/backend) and [provider](https://developer.hashicorp.com/terraform/language/providers/configuration) configuration, meaning where to store the [Terraform state](https://developer.hashicorp.com/terraform/language/state) and which providers to use, their versions, and potential credentials. The important part of `config.tf` is the initialization of two AWS providers, as you need one per region and this is a limitation by AWS given everything is scoped to a region. :::note -It's recommended to use a different backend than `local`. Find more information in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +It's recommended to use a different backend than `local`. Find more information in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/backend). ::: @@ -233,7 +233,8 @@ kubectl --context $CLUSTER_1 apply -f https://raw.githubusercontent.com/camunda/ ``` 3. The script will retrieve the IPs of the load balancer via the AWS CLI and return the required config change. -4. As the script suggests, copy the statement between the placeholders to edit the CoreDNS configmap in cluster 0 and cluster 1, depending on the placeholder. +4. The script prints the `kubectl edit` commands to change the DNS settings of each cluster inline. Copy the statement between the placeholders to edit the CoreDNS configmap in cluster 0 and cluster 1, depending on the placeholder. + An alternative to inline editing is to create two copies of the file `kubernetes/coredns.yml`, one for each cluster. Add the section generated by the script to each file. Apply the changes to each cluster with e.g. `kubectl --context cluster-london -n kube-system apply -f file.yml`. Replace the `context` parameter with your current values.
    Example output diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md index d0d28f778da..1a46c925e7b 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md @@ -1,30 +1,40 @@ --- id: eks-helm title: "Install Camunda 8 on an EKS cluster" -description: "Set up the Camunda 8 environment with Helm and an optional DNS setup on Amazon EKS." +description: "Set up the Camunda 8 environment with Helm and an optional Ingress setup on Amazon EKS." --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -This guide offers a comprehensive guide for installing the Camunda 8 Helm chart on your pre-existing AWS Kubernetes EKS cluster. Additionally, it includes instructions for setting up an optional DNS configuration. -Lastly you'll verify that the connection to your Self-Managed Camunda 8 environment is working. +This guide provides a comprehensive walkthrough for installing the Camunda 8 Helm chart on your existing AWS Kubernetes EKS cluster. It also includes instructions for setting up optional DNS configurations and other optional AWS-managed services, such as OpenSearch and PostgreSQL. -## Prerequisites +Lastly you'll verify that the connection to your Self-Managed Camunda 8 environment is working. -- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [terraform](./terraform-setup.md) guide. +## Requirements +- A Kubernetes cluster; see the [eksctl](./eksctl.md) or [Terraform](./terraform-setup.md) guide. - [Helm (3.16+)](https://helm.sh/docs/intro/install/) - [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. +- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some variables. +- [GNU envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) to generate manifests. - (optional) Domain name/[hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) in Route53. This allows you to expose Camunda 8 and connect via [zbctl](/apis-tools/community-clients/cli-client/index.md) or [Camunda Modeler](https://camunda.com/download/modeler/). +- A namespace to host the Camunda Platform, in this guide we will reference `camunda` as the target namespace. -## Considerations +### Considerations While this guide is primarily tailored for UNIX systems, it can also be run under Windows by utilizing the [Windows Subsystem for Linux](https://learn.microsoft.com/windows/wsl/about). Multi-tenancy is disabled by default and is not covered further in this guide. If you decide to enable it, you may use the same PostgreSQL instance and add an extra database for multi-tenancy purposes. -### Architecture +:::caution Optimize compatibility with OpenSearch + +**Migration:** The migration step will be disabled during the installation. For more information, refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md). +::: + +## Architecture + + Note the [existing architecture](../../../../about-self-managed.md#architecture) extended by deploying a Network Load Balancer with TLS termination within the [ingress](https://kubernetes.github.io/ingress-nginx/user-guide/tls/) below. @@ -32,51 +42,85 @@ Additionally, two components ([external-dns](https://github.com/kubernetes-sigs/ ![Camunda 8 Self-Managed AWS Architecture Diagram](./assets/camunda-8-self-managed-architecture-aws.png) -## Usage - -In the following, we're using `helm upgrade --install` as it runs install on initial deployment and upgrades future usage. This may make it easier for future [Camunda 8 Helm upgrades](/self-managed/setup/upgrade.md) or any other component upgrades. - -### Environment prerequisites +## Export environment variables To streamline the execution of the subsequent commands, it is recommended to export multiple environment variables. +### Export the AWS region and Helm chart version + The following are the required environment variables with some example values: +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/chart-env.sh +``` + +### Export database values + +When using either standard authentication (network based or username and password) or IRSA authentication, specific environment variables must be set with valid values. Follow the guide for either [eksctl](./eksctl.md#configuration-1) or [Terraform](./terraform-setup.md#export-values-for-the-helm-chart) to set them correctly. + +Verify the configuration of your environment variables by running the following loop: + + + + + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/check-env-variables.sh +``` + + + + + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/procedure/check-env-variables.sh +``` + + + + + +## (Optional) Ingress Setup + +:::info Domain or domainless installation + +If you do not have a domain name, external access to Camunda 8 web endpoints from outside the AWS VPC will not be possible. In this case, you may skip the DNS setup and proceed directly to [deploying Camunda 8 via Helm charts](#deploy-camunda-8-via-helm-charts). + +Alternatively, you can use `kubectl port-forward` to access the Camunda platform without a domain or Ingress configuration. For more information, see the [kubectl port-forward documentation](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_port-forward/). + +Throughout the rest of this installation guide, we will refer to configurations as **"With domain"** or **"Without domain"** depending on whether the application is exposed via a domain. +::: + +In this section, we provide an optional setup guide for configuring an Ingress with TLS and DNS management, allowing you to access your application through a specified domain. If you haven't set up an Ingress, refer to the [Kubernetes Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) for more details. In Kubernetes, an Ingress is an API object that manages external access to services in a cluster, typically over HTTP, and can also handle TLS encryption for secure connections. + +To monitor your Ingress setup using Amazon CloudWatch, you may also find the official AWS guide on [monitoring nginx workloads with CloudWatch Container Insights and Prometheus](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights-Prometheus-Sample-Workloads-nginx.html) helpful. Additionally, for detailed steps on exposing Kubernetes applications with the nginx ingress controller, refer to the [official AWS tutorial](https://aws.amazon.com/fr/blogs/containers/exposing-kubernetes-applications-part-3-nginx-ingress-controller/). + +### Export Values + +Set the following values for your Ingress configuration: + ```shell -# Your standard region that you host AWS resources in -export REGION=eu-central-1 -# Following two environment variables can be skipped if you don't have a domain -# The domain name that you intend to use +# The domain name you intend to use export DOMAIN_NAME=camunda.example.com -# The e-mail to register with Let's Encrypt +# The email address for Let's Encrypt registration export MAIL=admin@camunda.example.com -# The Ingress-Nginx Helm Chart version +# Helm chart versions for Ingress components export INGRESS_HELM_CHART_VERSION="4.11.2" -# The External DNS Helm Chart version export EXTERNAL_DNS_HELM_CHART_VERSION="1.15.0" -# The Cert-Manager Helm Chart version export CERT_MANAGER_HELM_CHART_VERSION="1.15.3" -# The Camunda 8 Helm Chart version -export CAMUNDA_HELM_CHART_VERSION="11.0.0" ``` -Additionally, follow the guide from either [eksctl](./eks-helm.md) or [Terraform](./terraform-setup.md) to retrieve the following values, which will be required for subsequent steps: - -- EXTERNAL_DNS_IRSA_ARN -- CERT_MANAGER_IRSA_ARN -- DB_HOST -- PG_USERNAME -- PG_PASSWORD -- DEFAULT_DB_NAME -- REGION +Additionally, obtain these values by following the guide for either [eksctl](./eks-helm.md) or [Terraform](./terraform-setup.md), as they will be needed in later steps: -### DNS set up +- `EXTERNAL_DNS_IRSA_ARN` +- `CERT_MANAGER_IRSA_ARN` +- `REGION` -:::info -If you don't have a domain name, you cannot access Camunda 8 web endpoints from outside the AWS VPC. Therefore, you can skip the DNS set up and continue with deploying [Camunda 8](#deploy-camunda-8-via-helm-charts). -::: - -#### ingress-nginx +### ingress-nginx [Ingress-nginx](https://github.com/kubernetes/ingress-nginx) is an open-source Kubernetes Ingress controller that provides a way to manage external access to services within a Kubernetes cluster. It acts as a reverse proxy and load balancer, routing incoming traffic to the appropriate services based on rules defined in the Ingress resource. @@ -94,7 +138,7 @@ helm upgrade --install \ --create-namespace ``` -#### external-dns +### external-dns [External-dns](https://github.com/kubernetes-sigs/external-dns) is a Kubernetes add-on that automates the management of DNS records for external resources, such as load balancers or Ingress controllers. It monitors the Kubernetes resources and dynamically updates the DNS provider with the appropriate DNS records. @@ -106,7 +150,8 @@ Consider setting `domainFilters` via `--set` to restrict access to certain hoste Make sure to have `EXTERNAL_DNS_IRSA_ARN` exported prior by either having followed the [eksctl](./eksctl.md#policy-for-external-dns) or [Terraform](./terraform-setup.md#outputs) guide. ::: -:::warning +:::warning Uniqueness of txtOwnerId for DNS + If you are already running `external-dns` in a different cluster, ensure each instance has a **unique** `txtOwnerId` for the TXT record. Without unique identifiers, the `external-dns` instances will conflict and inadvertently delete existing DNS records. In the example below, it's set to `external-dns` and should be changed if this identifier is already in use. Consult the [documentation](https://kubernetes-sigs.github.io/external-dns/v0.15.0/#note) to learn more about DNS record ownership. @@ -126,7 +171,7 @@ helm upgrade --install \ --create-namespace ``` -#### cert-manager +### cert-manager [Cert-manager](https://cert-manager.io/) is an open-source Kubernetes add-on that automates the management and issuance of TLS certificates. It integrates with various certificate authorities (CAs) and provides a straightforward way to obtain, renew, and manage SSL/TLS certificates for your Kubernetes applications. @@ -181,120 +226,434 @@ spec: EOF ``` -### Deploy Camunda 8 via Helm charts +## Deploy Camunda 8 via Helm charts For more configuration options, refer to the [Helm chart documentation](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters). Additionally, explore our existing resources on the [Camunda 8 Helm chart](/self-managed/setup/install.md) and [guides](/self-managed/setup/guides/guides.md). - - +Depending of your installation path, you may use different settings. +For easy and reproducible installations, we will use yaml files to configure the chart. + +### 1. Create the `values.yml` file + +Start by creating a `values.yml` file to store the configuration for your environment. This file will contain key-value pairs that will be substituted using `envsubst`. You can find a reference example of this file here: + + + The following makes use of the [combined Ingress setup](/self-managed/setup/guides/ingress-setup.md#combined-ingress-setup) by deploying a single Ingress for all HTTP components and a separate Ingress for the gRPC endpoint. -:::warning +:::info Cert-manager annotation for domain installation +The annotation `kubernetes.io/tls-acme=true` will be [interpreted by cert-manager](https://cert-manager.io/docs/usage/ingress/) and automatically results in the creation of the required certificate request, easing the setup. +::: + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/helm-values/values-domain.yml +``` + +:::warning Exposure of the Zeebe Gateway + +Publicly exposing the Zeebe Gateway without proper authorization can pose significant security risks. To avoid this, consider disabling the Ingress for the Zeebe Gateway by setting the following values to `false` in your configuration file: -Publicly exposing the Zeebe Gateway without authorization enabled can lead to severe security risks. Consider disabling the Ingress for the Zeebe Gateway by setting the `zeebeGateway.ingress.grpc.enabled` and `zeebeGateway.ingress.rest.enabled` to `false`. +- `zeebeGateway.ingress.grpc.enabled` +- `zeebeGateway.ingress.rest.enabled` -By default, authorization is enabled to ensure secure access to Zeebe. Typically, only internal components need direct access, making it unnecessary to expose Zeebe externally. +By default, authorization is enabled to ensure secure access to Zeebe. Typically, only internal components need direct access to Zeebe, making it unnecessary to expose the gateway externally. ::: -```shell -helm upgrade --install \ - camunda camunda-platform \ - --repo https://helm.camunda.io \ - --version $CAMUNDA_HELM_CHART_VERSION \ - --namespace camunda \ - --create-namespace \ - --set identityKeycloak.postgresql.enabled=false \ - --set identityKeycloak.externalDatabase.host=$DB_HOST \ - --set identityKeycloak.externalDatabase.user=$PG_USERNAME \ - --set identityKeycloak.externalDatabase.password=$PG_PASSWORD \ - --set identityKeycloak.externalDatabase.database=$DEFAULT_DB_NAME \ - --set global.ingress.enabled=true \ - --set global.ingress.host=$DOMAIN_NAME \ - --set global.ingress.tls.enabled=true \ - --set global.ingress.tls.secretName=camunda-c8-tls \ - --set-string 'global.ingress.annotations.kubernetes\.io\/tls-acme=true' \ - --set global.identity.auth.publicIssuerUrl="https://$DOMAIN_NAME/auth/realms/camunda-platform" \ - --set global.identity.auth.operate.redirectUrl="https://$DOMAIN_NAME/operate" \ - --set global.identity.auth.tasklist.redirectUrl="https://$DOMAIN_NAME/tasklist" \ - --set global.identity.auth.optimize.redirectUrl="https://$DOMAIN_NAME/optimize" \ - --set identity.contextPath="/identity" \ - --set identity.fullURL="https://$DOMAIN_NAME/identity" \ - --set operate.contextPath="/operate" \ - --set tasklist.contextPath="/tasklist" \ - --set optimize.contextPath="/optimize" \ - --set zeebeGateway.ingress.grpc.enabled=true \ - --set zeebeGateway.ingress.grpc.host=zeebe.$DOMAIN_NAME \ - --set zeebeGateway.ingress.grpc.tls.enabled=true \ - --set zeebeGateway.ingress.grpc.tls.secretName=zeebe-c8-tls-grpc \ - --set-string 'zeebeGateway.ingress.grpc.annotations.kubernetes\.io\/tls-acme=true' \ - --set zeebeGateway.contextPath="/zeebe" -``` - -The annotation `kubernetes.io/tls-acme=true` is [interpreted by cert-manager](https://cert-manager.io/docs/usage/ingress/) and automatically results in the creation of the required certificate request, easing the setup. +#### Reference the credentials in secrets + +Before installing the Helm chart, create Kubernetes secrets to store the Keycloak database authentication credentials and the OpenSearch authentication credentials. + +To create the secrets, run the following commands: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/create-external-db-secrets.sh +``` + + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/helm-values/values-no-domain.yml +``` + +#### Reference the credentials in secrets + +Before installing the Helm chart, create Kubernetes secrets to store the Keycloak database authentication credentials and the OpenSearch authentication credentials. + +To create the secrets, run the following commands: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/create-external-db-secrets.sh +``` - -```shell -helm upgrade --install \ - camunda camunda-platform \ - --repo https://helm.camunda.io \ - --version $CAMUNDA_HELM_CHART_VERSION \ - --namespace camunda \ - --create-namespace \ - --set identityKeycloak.postgresql.enabled=false \ - --set identityKeycloak.externalDatabase.host=$DB_HOST \ - --set identityKeycloak.externalDatabase.user=$PG_USERNAME \ - --set identityKeycloak.externalDatabase.password=$PG_PASSWORD \ - --set identityKeycloak.externalDatabase.database=$DEFAULT_DB_NAME + + +The following makes use of the [combined Ingress setup](/self-managed/setup/guides/ingress-setup.md#combined-ingress-setup) by deploying a single Ingress for all HTTP components and a separate Ingress for the gRPC endpoint. + +:::info Cert-manager annotation for domain installation +The annotation `kubernetes.io/tls-acme=true` will be [interpreted by cert-manager](https://cert-manager.io/docs/usage/ingress/) and automatically results in the creation of the required certificate request, easing the setup. +::: + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/helm-values/values-domain.yml +``` + +:::warning Exposure of the Zeebe Gateway + +Publicly exposing the Zeebe Gateway without proper authorization can pose significant security risks. To avoid this, consider disabling the Ingress for the Zeebe Gateway by setting the following values to `false` in your configuration file: + +- `zeebeGateway.ingress.grpc.enabled` +- `zeebeGateway.ingress.rest.enabled` + +By default, authorization is enabled to ensure secure access to Zeebe. Typically, only internal components need direct access to Zeebe, making it unnecessary to expose the gateway externally. + +::: + + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/helm-values/values-no-domain.yml ``` + -### Verify connectivity to Camunda 8 +### 2. Configure your deployment + +#### Enable Enterprise components + +Some components are not enabled by default in this deployment. For more information on how to configure and enable these components, refer to [configuring Web Modeler, Console, and Connectors](../../../install.md#configuring-web-modeler-console-and-connectors). + +#### Use internal Elasticsearch instead of the managed OpenSearch + +If you do not wish to use a managed OpenSearch service, you can opt to use the internal Elasticsearch deployment. This configuration disables OpenSearch and enables the internal Kubernetes Elasticsearch deployment: + +
    +Show configuration changes to disable external OpenSearch usage + +```yaml +global: + elasticsearch: + enabled: true + opensearch: + enabled: false + +elasticsearch: + enabled: true +``` + +
    + +#### Use internal PostgreSQL instead of the managed Aurora + +If you prefer not to use an external PostgreSQL service, you can switch to the internal PostgreSQL deployment. In this case, you will need to configure the Helm chart as follows and remove certain configurations related to the external database and service account: + +
    +Show configuration changes to disable external database usage + +```yaml +identityKeycloak: + postgresql: + enabled: true + + # Remove external database configuration + # externalDatabase: + # ... + + # Remove service account and annotations + # serviceAccount: + # ... + + # Remove extra environment variables for external database driver + # extraEnvVars: + # ... + +webModeler: + # Remove this part + + # restapi: + # externalDatabase: + # url: jdbc:aws-wrapper:postgresql://${DB_HOST}:5432/${DB_WEBMODELER_NAME} + # user: ${DB_WEBMODELER_USERNAME} + # existingSecret: webmodeler-postgres-secret + # existingSecretPasswordKey: password + +identity: + # Remove this part + + # externalDatabase: + # enabled: true + # host: ${DB_HOST} + # port: 5432 + # username: ${DB_IDENTITY_USERNAME} + # database: ${DB_IDENTITY_NAME} + # existingSecret: identity-postgres-secret + # existingSecretPasswordKey: password +``` + +
    + +#### Fill your deployment with actual values + +Once you've prepared the `values.yml` file, run the following `envsubst` command to substitute the environment variables with their actual values: + +```bash +# generate the final values +envsubst < values.yml > generated-values.yml + +# print the result +cat generated-values.yml +``` + +:::info Camunda Helm chart no longer automatically generates passwords + +Starting from **Camunda 8.6**, the Helm chart deprecated the automatic generation of secrets, and this feature has been fully removed in **Camunda 8.7**. + +::: + +Next, store various passwords in a Kubernetes secret, which will be used by the Helm chart. Below is an example of how to set up the required secret. You can use `openssl` to generate random secrets and store them in environment variables: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/generate-passwords.sh +``` + +Use these environment variables in the `kubectl` command to create the secret. + +- The values for `postgres-password` and `password` are not required if you are using an external database. If you choose not to use an external database, you must provide those values. +- The `smtp-password` should be replaced with the appropriate external value ([see how it's used by Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#smtp--email)). + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/create-identity-secret.sh +``` + +### 3. Install Camunda 8 using Helm + +Now that the `generated-values.yml` is ready, you can install Camunda 8 using Helm. Run the following command: + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/install-chart.sh +``` + +This command: + +- Installs (or upgrades) the Camunda platform using the Helm chart. +- Substitutes the appropriate version using the `$CAMUNDA_HELM_CHART_VERSION` environment variable. +- Applies the configuration from `generated-values.yml`. + +:::note + +This guide uses `helm upgrade --install` as it runs install on initial deployment and upgrades future usage. This may make it easier for future [Camunda 8 Helm upgrades](/self-managed/setup/upgrade.md) or any other component upgrades. + +::: + +You can track the progress of the installation using the following command: + +```bash +watch -n 5 ' + kubectl get pods -n camunda --output=wide; + if [ $(kubectl get pods -n camunda --field-selector=status.phase!=Running -o name | wc -l) -eq 0 ] && + [ $(kubectl get pods -n camunda -o json | jq -r ".items[] | select(.status.containerStatuses[]?.ready == false)" | wc -l) -eq 0 ]; + then + echo "All pods are Running and Healthy - Installation completed!"; + else + echo "Some pods are not Running or Healthy"; + fi +' +``` + +
    +Understand how each component interacts with IRSA + + +#### Web Modeler + +As the Web Modeler REST API uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. +Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. + +#### Keycloak + +:::caution Only available from v21+ + +IAM Roles for Service Accounts can only be implemented with Keycloak 21+. This may require you to adjust the version used in the Camunda Helm chart. + +::: + +From Keycloak versions 21+, the default JDBC driver can be overwritten, allowing use of a custom wrapper like the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) to utilize the features of IRSA. This is a wrapper around the default JDBC driver, but takes care of signing the requests. + +The [official Keycloak documentation](https://www.keycloak.org/server/db#preparing-keycloak-for-amazon-aurora-postgresql) also provides detailed instructions for utilizing Amazon Aurora PostgreSQL. + +A custom Keycloak container image containing necessary configurations is accessible on Docker Hub at [camunda/keycloak](https://hub.docker.com/r/camunda/keycloak). This image, built upon the base image [bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak), incorporates the required wrapper for seamless integration. + +#### Container image sources + +The sources of the [Camunda Keycloak images](https://hub.docker.com/r/camunda/keycloak) can be found on [GitHub](https://github.com/camunda/keycloak). In this repository, the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) is assembled in the `Dockerfile`. + +Maintenance of these images is based on the upstream [Bitnami Keycloak images](https://hub.docker.com/r/bitnami/keycloak), ensuring they are always up-to-date with the latest Keycloak releases. The lifecycle details for Keycloak can be found on [endoflife.date](https://endoflife.date/keycloak). + +##### Keycloak image configuration + +Bitnami Keycloak container image configuration is available at [hub.docker.com/bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak). + +##### Identity + +Identity uses PostgreSQL, and `identity` is configured to use IRSA with Amazon Aurora PostgreSQL. Check the [Identity database configuration](../../../../identity/deployment/configuration-variables.md#running-identity-on-amazon-aurora-postgresql) for more details. Identity includes the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. + +#### Amazon OpenSearch Service + +##### Internal database configuration + +The default setup is sufficient for Amazon OpenSearch Service clusters without **fine-grained access control**. + +Fine-grained access control adds another layer of security to OpenSearch, requiring you to add a mapping between the IAM role and the internal OpenSearch role. Visit the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) on fine-grained access control. + +There are different ways to configure the mapping within Amazon OpenSearch Service: + +- Via a [Terraform module](https://registry.terraform.io/modules/idealo/opensearch/aws/latest) in case your OpenSearch instance is exposed. +- Via the [OpenSearch dashboard](https://opensearch.org/docs/latest/security/access-control/users-roles/). +- Via the **REST API**. To authorize the IAM role in OpenSearch for access, follow these steps: + + Use the following `curl` command to update the OpenSearch internal database and authorize the IAM role for access. Replace placeholders with your specific values: + + ```bash + curl -sS -u ":" \ + -X PATCH \ + "https:///_opendistro/_security/api/rolesmapping/all_access?pretty" \ + -H 'Content-Type: application/json' \ + -d' + [ + { + "op": "add", + "path": "/backend_roles", + "value": [""] + } + ] + ' + ``` + + - Replace `` and `` with your OpenSearch domain admin credentials. + - Replace `` with your OpenSearch endpoint URL. + - Replace `` with the IAM role name created by Terraform, which is output by the `opensearch_role` module. + + :::note Security of basic auth usage + + **This example uses basic authentication (username and password), which may not be the best practice for all scenarios, especially if fine-grained access control is enabled.** The endpoint used in this example is not exposed by default, so consult your OpenSearch documentation for specifics on enabling and securing this endpoint. + + ::: + +Ensure that the `iam_role_arn` of the previously created `opensearch_role` is assigned to an internal role within Amazon OpenSearch Service. For example, `all_access` on the Amazon OpenSearch Service side is a good candidate, or if required, extra roles can be created with more restrictive access. + + +
    + +## Verify connectivity to Camunda 8 First, we need an OAuth client to be able to connect to the Camunda 8 cluster. -This can be done by following the [Identity getting started guide](/self-managed/identity/getting-started/install-identity.md) followed by the [incorporating applications documentation](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). -Instead of creating a confidential application, a machine-to-machine (M2M) application is required to be created. -This reveals a `client-id` and `client-secret` that can be used to connect to the Camunda 8 cluster. +### Generate an M2M token using Identity + +Generate an M2M token by following the steps outlined in the [Identity getting started guide](/self-managed/identity/getting-started/install-identity.md), along with the [incorporating applications documentation](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). + +Below is a summary of the necessary instructions: + + + + +1. Open Identity in your browser at `https://${DOMAIN_NAME}/identity`. You will be redirected to Keycloak and prompted to log in with a username and password. +2. Use `demo` as both the username and password. +3. Select **Add application** and select **M2M** as the type. Assign a name like "test." +4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Zeebe API** with "write" permission. +5. Retrieve the `client-id` and `client-secret` values from the application details + +```shell +export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application +export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +``` + + + + + +Identity and Keycloak must be port-forwarded to be able to connect to the cluster. + +```shell +kubectl port-forward services/camunda-identity 8080:80 --namespace camunda +kubectl port-forward services/camunda-keycloak 18080:80 --namespace camunda +``` + +1. Open Identity in your browser at `http://localhost:8080`. You will be redirected to Keycloak and prompted to log in with a username and password. +2. Use `demo` as both the username and password. +3. Select **Add application** and select **M2M** as the type. Assign a name like "test." +4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Zeebe API** with "write" permission. +5. Retrieve the `client-id` and `client-secret` values from the application details + +```shell +export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application +export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +``` + +
    +To access the other services and their UIs, port-forward those Components as well: + + +```shell +Operate: +> kubectl port-forward svc/camunda-operate 8081:80 --namespace camunda +Tasklist: +> kubectl port-forward svc/camunda-tasklist 8082:80 --namespace camunda +Optimize: +> kubectl port-forward svc/camunda-optimize 8083:80 --namespace camunda +Connectors: +> kubectl port-forward svc/camunda-connectors 8086:8080 --namespace camunda +WebModeler: +> kubectl port-forward svc/camunda-web-modeler-webapp 8084:80 --namespace camunda +Console: +> kubectl port-forward svc/camunda-console 8085:80 --namespace camunda +``` + + +
    + +
    +
    + +### Use the token - + For a detailed guide on generating and using a token, please conduct the relevant documentation on [authenticating with the REST API](./../../../../../apis-tools/camunda-api-rest/camunda-api-rest-authentication.md?environment=self-managed). - + Export the following environment variables: ```shell -export ZEEBE_ADDRESS=zeebe-rest.$DOMAIN_NAME -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +export ZEEBE_ADDRESS_REST=https://$DOMAIN_NAME/zeebe export ZEEBE_AUTHORIZATION_SERVER_URL=https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token ``` - + -This requires to port-forward the Zeebe Gateway and Keycloak to be able to connect to the cluster. +This requires to port-forward the Zeebe Gateway to be able to connect to the cluster. ```shell -kubectl port-forward services/camunda-zeebe-gateway 8080:8080 -kubectl port-forward services/camunda-keycloak 18080:80 +kubectl port-forward services/camunda-zeebe-gateway 8080:8080 --namespace camunda ``` Export the following environment variables: ```shell -export ZEEBE_ADDRESS=localhost:8080 -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application +export ZEEBE_ADDRESS_REST=http://localhost:8080 export ZEEBE_AUTHORIZATION_SERVER_URL=http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token ``` @@ -302,22 +661,20 @@ export ZEEBE_AUTHORIZATION_SERVER_URL=http://localhost:18080/auth/realms/camunda -Generate a temporary token to access the REST API: +Generate a temporary token to access the REST API, then capture the value of the `access_token` property and store it as your token. ```shell -curl --location --request POST "${ZEEBE_AUTHORIZATION_SERVER_URL}" \ +export TOKEN=$(curl --location --request POST "${ZEEBE_AUTHORIZATION_SERVER_URL}" \ --header "Content-Type: application/x-www-form-urlencoded" \ --data-urlencode "client_id=${ZEEBE_CLIENT_ID}" \ --data-urlencode "client_secret=${ZEEBE_CLIENT_SECRET}" \ ---data-urlencode "grant_type=client_credentials" +--data-urlencode "grant_type=client_credentials" | jq '.access_token' -r) ``` -Capture the value of the `access_token` property and store it as your token. - Use the stored token, in our case `TOKEN`, to use the REST API to print the cluster topology. ```shell -curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS}/v2/topology" +curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS_REST}/v2/topology" ``` ...and results in the following output: @@ -415,41 +772,36 @@ curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS}/v2/topology" After following the installation instructions in the [zbctl docs](/apis-tools/community-clients/cli-client/index.md), we can configure the required connectivity to check that the Zeebe cluster is reachable. - + Export the following environment variables: ```shell export ZEEBE_ADDRESS=zeebe.$DOMAIN_NAME:443 -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application export ZEEBE_AUTHORIZATION_SERVER_URL=https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token export ZEEBE_TOKEN_AUDIENCE='zeebe-api' export ZEEBE_TOKEN_SCOPE='camunda-identity' ``` - - + + -This requires to port-forward the Zeebe Gateway and Keycloak to be able to connect to the cluster. +This requires to port-forward the Zeebe Gateway to be able to connect to the cluster. ```shell -kubectl port-forward services/camunda-zeebe-gateway 26500:26500 -kubectl port-forward services/camunda-keycloak 18080:80 +kubectl port-forward services/camunda-zeebe-gateway 26500:26500 --namespace camunda ``` Export the following environment variables: ```shell export ZEEBE_ADDRESS=localhost:26500 -export ZEEBE_CLIENT_ID='client-id' # retrieve the value from the identity page of your created m2m application -export ZEEBE_CLIENT_SECRET='client-secret' # retrieve the value from the identity page of your created m2m application export ZEEBE_AUTHORIZATION_SERVER_URL=http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token export ZEEBE_TOKEN_AUDIENCE='zeebe-api' export ZEEBE_TOKEN_SCOPE='camunda-identity' ``` - + @@ -495,103 +847,56 @@ Brokers: For more advanced topics, like deploying a process or registering a worker, consult the [zbctl docs](/apis-tools/community-clients/cli-client/cli-get-started.md). -If you want to access the other services and their UI, you can port-forward those as well: - -```shell -Identity: -> kubectl port-forward svc/camunda-identity 8080:80 -Operate: -> kubectl port-forward svc/camunda-operate 8081:80 -Tasklist: -> kubectl port-forward svc/camunda-tasklist 8082:80 -Optimize: -> kubectl port-forward svc/camunda-optimize 8083:80 -Connectors: -> kubectl port-forward svc/camunda-connectors 8088:8080 -``` - -:::note -Keycloak must be port-forwarded at all times as it is required to authenticate. -::: - -```shell -kubectl port-forward services/camunda-keycloak 18080:80 -``` - - + Follow our existing [Modeler guide on deploying a diagram](/self-managed/modeler/desktop-modeler/deploy-to-self-managed.md). Below are the helper values required to be filled in Modeler: - - + + + The following values are required for the OAuth authentication: -```shell -# Make sure to manually replace #DOMAIN_NAME with your actual domain since Modeler can't access the shell context -Cluster endpoint=https://zeebe.$DOMAIN_NAME -Client ID='client-id' # retrieve the value from the identity page of your created m2m application -Client Secret='client-secret' # retrieve the value from the identity page of your created m2m application -OAuth Token URL=https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token -Audience=zeebe-api # the default for Camunda 8 Self-Managed -``` +- **Cluster endpoint:** `https://zeebe.$DOMAIN_NAME`, replacing `$DOMAIN_NAME` with your domain +- **Client ID:** Retrieve the client ID value from the identity page of your created M2M application +- **Client Secret:** Retrieve the client secret value from the Identity page of your created M2M application +- **OAuth Token URL:** `https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token`, replacing `$DOMAIN_NAME` with your domain +- **Audience:** `zeebe-api`, the default for Camunda 8 Self-Managed - - + -This requires to port-forward the Zeebe Gateway and Keycloak to be able to connect to the cluster. + -```shell -kubectl port-forward services/camunda-zeebe-gateway 26500:26500 -kubectl port-forward services/camunda-keycloak 18080:80 -``` - -The following values are required for the OAuth authentication: +This requires port-forwarding the Zeebe Gateway to be able to connect to the cluster: ```shell -# Make sure to manually replace #DOMAIN_NAME with your actual domain since Modeler can't access the shell context -Cluster endpoint=http://localhost:26500 -Client ID='client-id' # retrieve the value from the identity page of your created m2m application -Client Secret='client-secret' # retrieve the value from the identity page of your created m2m application -OAuth Token URL=http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token -Audience=zeebe-api # the default for Camunda 8 Self-Managed +kubectl port-forward services/camunda-zeebe-gateway 26500:26500 --namespace camunda ``` -If you want to access the other services and their UI, you can port-forward those as well: +The following values are required for OAuth authentication: -```shell -Identity: -> kubectl port-forward svc/camunda-identity 8080:80 -Operate: -> kubectl port-forward svc/camunda-operate 8081:80 -Tasklist: -> kubectl port-forward svc/camunda-tasklist 8082:80 -Optimize: -> kubectl port-forward svc/camunda-optimize 8083:80 -Connectors: -> kubectl port-forward svc/camunda-connectors 8088:8080 -``` +- **Cluster endpoint:** `http://localhost:26500` +- **Client ID:** Retrieve the client ID value from the identity page of your created M2M application +- **Client Secret:** Retrieve the client secret value from the Identity page of your created M2M application +- **OAuth Token URL:** `http://localhost:18080/auth/realms/camunda-platform/protocol/openid-connect/token` +- **Audience:** `zeebe-api`, the default for Camunda 8 Self-Managed -:::note -Keycloak must be port-forwarded at all times as it is required to authenticate. -::: - -```shell -kubectl port-forward services/camunda-keycloak 18080:80 -``` - - + - + -### Testing installation with payment example application +## Test the installation with payment example application To test your installation with the deployment of a sample application, refer to the [installing payment example guide](../../../guides/installing-payment-example.md). -### Advanced topics +## Advanced topics The following are some advanced configuration topics to consider for your cluster: diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md index 146e779ad8e..23e6f3381ff 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eksctl.md @@ -1,6 +1,6 @@ --- id: eks-eksctl -title: "Deploy an EKS cluster with eksctl" +title: "Deploy an EKS cluster with eksctl (quickstart)" description: "Deploy an Amazon Kubernetes cluster (EKS) with eksctl with step-by-step guidance." --- @@ -8,79 +8,103 @@ This guide explores the streamlined process of deploying Camunda 8 Self-Managed [Eksctl](https://eksctl.io/) is a common CLI tool for quickly creating and managing your Amazon EKS clusters and is [officially endorsed](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html) by Amazon. +While this guide is suitable for testing purposes, building a robust, scalable, and reproducible infrastructure is better achieved using Infrastructure as Code (IaC) tools like those described in the [Terraform guide](./terraform-setup.md), which offers more flexibility and control over your cloud environment. + This guide provides a user-friendly approach for setting up and managing Amazon EKS clusters. It covers everything from the prerequisites, such as AWS IAM role configuration, to creating a fully functional Amazon EKS cluster and a managed Aurora PostgreSQL instance. Ideal for those seeking a practical and efficient method to deploy Camunda 8 on AWS, this guide provides detailed instructions for setting up the necessary environment and AWS IAM configurations. ## Prerequisites - An [AWS account](https://docs.aws.amazon.com/accounts/latest/reference/accounts-welcome.html) is required to create resources within AWS. -- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources. -- [eksctl (0.191+)](https://eksctl.io/getting-started/), a CLI tool for creating and managing Amazon EKS clusters. - [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl), a CLI tool to interact with the cluster. +- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources. +- [eksctl (0.193+)](https://eksctl.io/getting-started/), a CLI tool for creating and managing Amazon EKS clusters. +- This guide uses GNU/Bash for all the shell commands listed. -## Considerations +### Considerations This is a basic setup to get started with Camunda 8 but does not reflect a high performance setup. For a better starting point towards production, we recommend utilizing [Infrastructure as Code tooling](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code) and following our [Terraform guide](./terraform-setup.md). +We refer to this architecture as the **standard installation**, which can be set up with or without a **domain** ([Ingress](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html)). +The standard installation utilizes a username and password connection for the Camunda components (or simply relies on network isolation for certain components). This option is straightforward and easier to implement, making it ideal for environments where simplicity and rapid deployment are priorities, or where network isolation provides sufficient security. + To try out Camunda 8 or develop against it, consider signing up for our [SaaS offering](https://camunda.com/platform/), or if you already have an Amazon EKS cluster, consider skipping to the [Helm guide](./eks-helm.md). While the guide is primarily tailored for UNIX systems, it can also be run under Windows by utilizing the [Windows Subsystem for Linux](https://learn.microsoft.com/windows/wsl/about). -:::warning +:::warning Cost management + Following this guide will incur costs on your Cloud provider account, namely for the managed Kubernetes service, running Kubernetes nodes in EC2, Elastic Block Storage (EBS), and Route53. More information can be found on [AWS](https://aws.amazon.com/eks/pricing/) and their [pricing calculator](https://calculator.aws/#/) as the total cost varies per region. + ::: -## Outcome +### Outcome + + -Following this guide results in the following: +This guide results in the following: -- An Amazon EKS 1.30 Kubernetes cluster with four nodes. +- An Amazon EKS Kubernetes cluster running the latest Kubernetes version with four nodes ready for Camunda 8 installation. - Installed and configured [EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html), which is used by the Camunda 8 Helm chart to create [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - A [managed Aurora PostgreSQL 15.x](https://aws.amazon.com/rds/aurora/) instance that will be used by the Camunda 8 components. -- [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured. +- A [managed OpenSearch domain](https://aws.amazon.com/opensearch-service/) created and configured for use with the Camunda platform.. +- [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured and [Pod Identities](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). - This simplifies the setup by not relying on explicit credentials, but instead allows creating a mapping between IAM roles and Kubernetes service accounts based on a trust relationship. A [blog post](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/) by AWS visualizes this on a technical level. - This allows a Kubernetes service account to temporarily impersonate an AWS IAM role to interact with AWS services like S3, RDS, or Route53 without supplying explicit credentials. This basic cluster setup is required to continue with the Helm set up as described in our [AWS Helm guide](./eks-helm.md). -## Deploying Amazon EKS cluster with eksctl +## 1. Configure AWS and eksctl -The `eksctl` tool allows the creation of clusters via a single command, but this doesn't support all configuration options. Therefore, we're supplying a YAML file that can be used with the CLI to create the cluster preconfigured with various settings. +### Set up AWS authentication -### `eksctl` prerequisites +Use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) to run the following commands: -To configure access, set up authentication to allow interaction with AWS via the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html). +```bash +# set your region +export AWS_REGION="eu-central-1" -A user creating AWS resources will be the owner and will always be linked to them. This means that the user will always have admin access on Kubernetes unless you delete it. +aws configure +``` + +Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_REGION`, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). + +:::caution Ownership of the created resources -Therefore, it is a good practice to create a separate [IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) that will be solely used for the `eksctl` command. [Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl`. +A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) to ensure that the resources are managed and owned by that specific user. +This ensures that the user maintains admin access to Kubernetes and associated resources unless those resources are explicitly deleted. + +[Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl`. + +::: -### Environment prerequisites +### Set up eksctl -We recommended exporting multiple environment variables to streamline the execution of the subsequent commands. +[eksctl](https://eksctl.io/) is a tool that allows the creation of clusters via a single command, but does not support all configuration options. This setup supplies a YAML file that can be used with the CLI to create the cluster preconfigured with various settings. -The following are the required environment variables with some example values. Define your secure password for the Postgres database. +Review the [installation guide](https://eksctl.io/installation/) for additional details. + +### Configure your infrastructure + +In this guide, we will set up multiple environment variables to configure the components. +Each component starts with a section that configures the different variables according to your needs. + +## 2. EKS cluster + +### Configuration ```shell +##### Kubernetes parameters + # The name used for the Kubernetes cluster export CLUSTER_NAME=camunda-cluster # Your standard region that you host AWS resources in -export REGION=eu-central-1 +export REGION="$AWS_REGION" # Multi-region zones, derived from the region -export ZONES="eu-central-1a eu-central-1b eu-central-1c" +export ZONES="${REGION}a ${REGION}b ${REGION}c" # The AWS Account ID export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) # CIDR range used for the VPC subnets export CIDR=10.192.0.0/16 -# Name for the Postgres DB cluster and instance -export RDS_NAME=camunda-postgres -# Postgres DB admin username -export PG_USERNAME=camunda -# Postgres DB password of the admin user -export PG_PASSWORD=camundarocks123 -# The default database name created within Postgres. Can directly be consumed by the Helm chart -export DEFAULT_DB_NAME=camunda -# The PostgreSQL version -export POSTGRESQL_VERSION=15.8 # Optional # Default node type for the Kubernetes cluster @@ -108,7 +132,7 @@ The variable `KMS_ARN` contains the required output. It should look something li For more information concerning the KMS encryption, refer to the [eksctl documentation](https://eksctl.io/usage/kms-encryption/). -### eksctl cluster YAML +### Create the cluster using eksctl Execute the following script, which creates a file called `cluster.yaml` with the following contents: @@ -119,7 +143,7 @@ apiVersion: eksctl.io/v1alpha5 metadata: name: ${CLUSTER_NAME:-camunda-cluster} # e.g. camunda-cluster region: ${REGION:-eu-central-1} # e.g. eu-central-1 - version: "1.30" + version: "1.31" availabilityZones: - ${REGION:-eu-central-1}c # e.g. eu-central-1c, the minimal is two distinct Availability Zones (AZs) within the region - ${REGION:-eu-central-1}b @@ -128,20 +152,30 @@ cloudWatch: clusterLogging: {} iam: vpcResourceControllerPolicy: true - withOIDC: true # enables and configures OIDC for IAM Roles for Service Accounts (IRSA) addons: - name: vpc-cni resolveConflicts: overwrite version: latest + useDefaultPodIdentityAssociations: true + - name: kube-proxy resolveConflicts: overwrite version: latest - - name: aws-ebs-csi-driver # automatically configures IRSA + useDefaultPodIdentityAssociations: true + + - name: aws-ebs-csi-driver resolveConflicts: overwrite version: latest + useDefaultPodIdentityAssociations: true + - name: coredns resolveConflicts: overwrite version: latest + useDefaultPodIdentityAssociations: true + + - name: eks-pod-identity-agent + version: latest + kind: ClusterConfig kubernetesNetworkConfig: ipFamily: IPv4 @@ -149,7 +183,13 @@ managedNodeGroups: - amiFamily: AmazonLinux2 desiredCapacity: ${NODE_COUNT:-4} # number of default nodes spawned if no cluster autoscaler is used disableIMDSv1: true - disablePodIMDS: true + iam: + withAddonPolicies: + albIngress: true + autoScaler: true + cloudWatch: true + ebs: true + awsLoadBalancerController: true instanceSelector: {} instanceTypes: - ${NODE_TYPE:-m6i.xlarge} # node type that is selected as default @@ -194,25 +234,27 @@ EOF With eksctl you can execute the previously created file as follows and takes 25-30 minutes. ```shell +cat cluster.yaml + eksctl create cluster --config-file cluster.yaml ``` ### (Optional) IAM access management -The access concerning Kubernetes is split into two layers. One being the IAM permissions allowing general Amazon EKS usage, like accessing the Amazon EKS UI, generating the Amazon EKS access via the AWS CLI, etc. The other being the cluster access itself determining which access the user should have within the Kubernetes cluster. +Kubernetes access is divided into two distinct layers. The **first layer** involves **AWS IAM permissions**, which enable basic Amazon EKS functionalities such as using the Amazon EKS UI and generating Amazon EKS access through the AWS CLI. The **second layer** provides **cluster access**, determining the user's permissions within the Kubernetes cluster. -Therefore, we first have to supply the user with the sufficient IAM permissions and afterward assign the user a role within the Kubernetes cluster. +As a result, we must initially grant the user adequate AWS IAM permissions and subsequently assign them a specific role within the Kubernetes cluster for proper access management.
    -

    IAM Permissions

    + First Layer: IAM Permissions

    A minimum set of permissions is required to gain access to an Amazon EKS cluster. These two permissions allow a user to execute `aws eks update-kubeconfig` to update the local `kubeconfig` with cluster access to the Amazon EKS cluster. The policy should look as follows and can be restricted further to specific Amazon EKS clusters if required: -```shell +```json cat <./policy-eks.json { "Version": "2012-10-17", @@ -233,7 +275,7 @@ EOF Via the AWS CLI, you can run the following to create the policy above in IAM. ```shell - aws iam create-policy --policy-name "BasicEKSPermissions" --policy-document file://policy-eks.json +aws iam create-policy --policy-name "BasicEKSPermissions" --policy-document file://policy-eks.json ``` The created policy `BasicEKSPermissions` has to be assigned to a group, a role, or a user to work. Consult the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policy-cli) to find the correct approach for you. @@ -243,19 +285,19 @@ The created policy `BasicEKSPermissions` has to be assigned to a group, a role,

    -

    Cluster Access

    + Second Layer: Cluster Access

    By default, the user creating the Amazon EKS cluster has admin access. To allow other users to access it, we have to adjust the `aws-auth` configmap. This can either be done manually via `kubectl` or via `eksctl`. In the following sections, we explain how to do this. -##### eksctl +#### eksctl With `eksctl`, you can create an AWS IAM user to Kubernetes role mapping with the following command: ```shell eksctl create iamidentitymapping \ --cluster=$CLUSTER_NAME \ - --region=eu-central-1 \ + --region=$REGION \ --arn arn:aws:iam::0123456789:user/ops-admin \ --group system:masters \ --username admin @@ -270,7 +312,7 @@ Example: ```shell eksctl create iamidentitymapping \ --cluster=$CLUSTER_NAME \ - --region=eu-central-1 \ + --region=$REGION \ --arn arn:aws:iam::0123456789:user/ops-admin \ --group system:masters \ --username admin @@ -278,7 +320,7 @@ eksctl create iamidentitymapping \ More information about usage and other configuration options can be found in the [eksctl documentation](https://eksctl.io/usage/iam-identity-mappings/). -##### kubectl +#### kubectl The same can also be achieved by using `kubectl` and manually adding the mapping as part of the `mapRoles` or `mapUsers` section. @@ -291,152 +333,107 @@ For detailed examples, review the [documentation provided by AWS](https://docs.a

    -## PostgreSQL database - -Creating a Postgres database can be solved in various ways. For example, by using the UI or the AWS CLI. -In this guide, we provide you with a reproducible setup. Therefore, we use the CLI. For creating PostgreSQL with the UI, refer to [the AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_GettingStarted.CreatingConnecting.PostgreSQL.html). - -The resulting PostgreSQL instance and default database `camunda` is intended to be used with Keycloak. You may manually add extra databases after creation for Identity with multi-tenancy. -This will not be covered in this guide as the Identity default for multi-tenancy is to be disabled. +### Access the created EKS cluster -1. Identify the VPC associated with the Amazon EKS cluster: +Access the Amazon EKS cluster via the `AWS CLI` using the following command: ```shell -export VPC_ID=$(aws ec2 describe-vpcs \ - --query "Vpcs[?Tags[?Key=='alpha.eksctl.io/cluster-name']|[?Value=='$CLUSTER_NAME']].VpcId" \ - --output text) +aws eks --region "$REGION" update-kubeconfig --name "$CLUSTER_NAME" --alias "$CLUSTER_NAME" ``` -2. The variable `VPC_ID` contains the output value required for the next step (the value should look like this: `vpc-1234567890`). -3. Create a security group within the VPC to allow connection to the Aurora PostgreSQL instance: +After updating the kubeconfig, verify your connection to the cluster with `kubectl`: ```shell -export GROUP_ID=$(aws ec2 create-security-group \ - --group-name aurora-postgres-sg \ - --description "Security Group to allow the Amazon EKS cluster to connect to Aurora PostgreSQL" \ - --vpc-id $VPC_ID \ - --output text) +kubectl get nodes ``` -4. The variable `GROUP_ID` contains the output (the value should look like this: `sg-1234567890`). -5. Create a security Ingress rule to allow access to PostgreSQL. +Create a namespace for Camunda: ```shell -aws ec2 authorize-security-group-ingress \ - --group-id $GROUP_ID \ - --protocol tcp \ - --port 5432 \ - --cidr $CIDR - # the CIDR range should be exactly the same value as in the `cluster.yaml` +kubectl create namespace camunda ``` -6. Retrieve subnets of the VPC to create a database subnet group: +In the remainder of the guide, we reference the `camunda` namespace to create some required resources in the Kubernetes cluster, such as secrets or one-time setup jobs. -```shell -export SUBNET_IDS=$(aws ec2 describe-subnets \ - --filter Name=vpc-id,Values=$VPC_ID \ - --query "Subnets[?Tags[?Key=='aws:cloudformation:logical-id']|[?contains(Value, 'Private')]].SubnetId" \ - --output text | expand -t 1) -``` +### Check existing StorageClasses -7. The variable `SUBNET_IDS` contains the output values of the private subnets (the value should look like this: `subnet-0123456789 subnet-1234567890 subnet-9876543210`). +We recommend using **gp3** volumes with Camunda 8 (see [volume performance](./amazon-eks.md#volume-performance)). It may be necessary to create the `gp3` StorageClass, as the default configuration only includes **gp2**. For detailed information, refer to the [AWS documentation](https://aws.amazon.com/ebs/general-purpose/). -8. Create a database subnet group to associate PostgreSQL within the existing VPC: +To see the available StorageClasses in your Kubernetes cluster, including which one is set as default, use the following command: -```shell -aws rds create-db-subnet-group \ - --db-subnet-group-name camunda-postgres \ - --db-subnet-group-description "Subnet for Camunda PostgreSQL" \ - --subnet-ids $(echo $SUBNET_IDS) +```bash +kubectl describe storageclass ``` -9. Create a PostgreSQL cluster within a private subnet of the VPC. +To check if `gp3` is set as the default StorageClass, look for the annotation `storageclass.kubernetes.io/is-default-class: "true"` in the output of the previous command. -For the latest Camunda-supported PostgreSQL engine version, check our [documentation](../../../../../reference/supported-environments.md#camunda-8-self-managed). +If `gp3` is not installed, or is not set as the default StorageClass, complete the following steps to install it and set it as default: -```shell -aws rds create-db-cluster \ - --db-cluster-identifier $RDS_NAME \ - --engine aurora-postgresql \ - --engine-version $POSTGRESQL_VERSION \ - --master-username $PG_USERNAME \ - --master-user-password $PG_PASSWORD \ - --vpc-security-group-ids $GROUP_ID \ - --availability-zones $(echo $ZONES) \ - --database-name $DEFAULT_DB_NAME \ - --db-subnet-group-name camunda-postgres -``` +1. Create the `gp3` StorageClass: -More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-cluster.html). + ```shell + cat << EOF | kubectl apply -f - + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: ebs-sc + annotations: + storageclass.kubernetes.io/is-default-class: "true" + provisioner: ebs.csi.aws.com + parameters: + type: gp3 + reclaimPolicy: Retain + volumeBindingMode: WaitForFirstConsumer + EOF + ``` -10. Wait for the PostgreSQL cluster to be ready: + This manifest defines an `ebs-sc` StorageClass to be created. This StorageClass uses the `ebs.csi.aws.com` provisioner, which is supplied by the **aws-ebs-csi-driver** addon installed during cluster creation. For more information, refer to the [official AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html). -```shell -aws rds wait db-cluster-available \ - --db-cluster-identifier $RDS_NAME -``` +2. Modify the `gp2` StorageClass to mark it as a non-default StorageClass: -11. Create a database instance within the DB cluster. + ```shell + kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' + ``` -The `engine-version` must be the same as the previously created PostgreSQL cluster. +3. Verify the changes by running the `kubectl get storageclass` command. -```shell -aws rds create-db-instance \ - --db-instance-identifier $RDS_NAME \ - --db-cluster-identifier $RDS_NAME \ - --engine aurora-postgresql \ - --engine-version $POSTGRESQL_VERSION \ - --no-publicly-accessible \ - --db-instance-class db.t3.medium -``` +After executing these commands, you will have a `gp3` StorageClass set as the default and the `gp2` StorageClass marked as non-default, provided that **gp2** was already present. -More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-instance.html). +### Domain deployment requirements -12. Wait for changes to be applied: +If you plan to deploy Camunda using an external domain associated with an external certificate, you will need to set up some IAM policies to allow both **external-dns** and **cert-manager** to interact with Route 53, which controls the DNS. -```shell -aws rds wait db-instance-available \ - --db-instance-identifier $RDS_NAME -``` +By default, the cluster uses **Pod Identity** to manage IAM roles for your applications. This means that service accounts are associated with IAM roles, allowing your pods to securely access AWS resources without hardcoding credentials. For more information on configuring Pod Identity, refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). -### Verifying connectivity between the Amazon EKS cluster and the PostgreSQL database +#### Enable OIDC and IAM roles for Service Accounts (IRSA) -1. Retrieve the writer endpoint of the DB cluster. +To [enable OpenID Connect (OIDC) and IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) on your cluster, complete the following steps: -```shell -export DB_HOST=$(aws rds describe-db-cluster-endpoints \ - --db-cluster-identifier $RDS_NAME \ - --query "DBClusterEndpoints[?EndpointType=='WRITER'].Endpoint" \ - --output text) -``` - -2. Start Ubuntu container in interactive mode within the Amazon EKS cluster. +1. Determine the OIDC issuer ID for your cluster. -```shell -kubectl run ubuntu --rm -i --tty --image ubuntu --env="DB_HOST=$DB_HOST" --env="PG_USERNAME=$PG_USERNAME" -- bash -``` + First, ensure that your EKS cluster is set up with an OIDC provider. The following command should show you the OIDC issuer: -3. Install required dependencies: + ```bash + export oidc_id=$(aws eks describe-cluster --name "$CLUSTER_NAME" --query "cluster.identity.oidc.issuer" --output text | cut -d '/' -f 5) + echo "$oidc_id" + ``` -```shell -apt update && apt install -y postgresql-client -``` + Determine whether an IAM OIDC provider with your cluster’s issuer ID is already in your account: -4. Connect to PostgreSQL database: + ```bash + aws iam list-open-id-connect-providers | grep $oidc_id | cut -d "/" -f4 + ``` -```shell -psql \ - --host=$DB_HOST \ - --username=$PG_USERNAME \ - --port=5432 \ - --dbname=postgres -``` + If output is returned, an IAM OIDC provider is already set up for your cluster, so you can skip the next step. If no output is returned, you will need to set up an IAM OIDC provider for your cluster. -Verify that the connection is successful. +1. Create an IAM OIDC identity provider for your cluster with the following command: -## Prerequisites for Camunda 8 installation + ```bash + eksctl utils associate-iam-oidc-provider --region "$REGION" --cluster "$CLUSTER_NAME" --approve + ``` -### Policy for external-dns +#### Policy for external-dns The following instructions are based on the [external-dns](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md) guide concerning the AWS setup and only covers the required IAM setup. The Helm chart will be installed in the [follow-up guide](./eks-helm.md). @@ -479,11 +476,13 @@ aws iam create-policy --policy-name "AllowExternalDNSUpdates" --policy-document export EXTERNAL_DNS_POLICY_ARN=$(aws iam list-policies \ --query 'Policies[?PolicyName==`AllowExternalDNSUpdates`].Arn' \ --output text) + +echo "EXTERNAL_DNS_POLICY_ARN=$EXTERNAL_DNS_POLICY_ARN" ``` The `EXTERNAL_DNS_POLICY_ARN` will be used in the next step to create a role mapping between the Kubernetes Service Account and AWS IAM Service Account. -Using `eksctl` allows us to create the required role mapping for external-dns. +Use `eksctl` to create the required role mapping for external-dns: ```shell eksctl create iamserviceaccount \ @@ -500,13 +499,15 @@ eksctl create iamserviceaccount \ export EXTERNAL_DNS_IRSA_ARN=$(aws iam list-roles \ --query "Roles[?RoleName=='external-dns-irsa'].Arn" \ --output text) + +echo "EXTERNAL_DNS_IRSA_ARN=$EXTERNAL_DNS_IRSA_ARN" ``` The variable `EXTERNAL_DNS_IRSA_ARN` contains the `arn` (it should look like this: `arn:aws:iam::XXXXXXXXXXXX:role/external-dns-irsa`). Alternatively, you can deploy the Helm chart first and then use `eksctl` with the option `--override-existing-serviceaccounts` instead of `--role-only` to reconfigure the created service account. -### Policy for cert-manager +#### Policy for cert-manager The following instructions are taken from the [cert-manager](https://cert-manager.io/docs/configuration/acme/dns01/route53/) guide concerning the AWS setup and only covers the required IAM setup. The Helm chart will be installed in the [follow-up guide](./eks-helm.md). @@ -553,11 +554,13 @@ aws iam create-policy --policy-name "AllowCertManagerUpdates" --policy-document export CERT_MANAGER_POLICY_ARN=$(aws iam list-policies \ --query 'Policies[?PolicyName==`AllowCertManagerUpdates`].Arn' \ --output text) + +echo "CERT_MANAGER_POLICY_ARN=$CERT_MANAGER_POLICY_ARN" ``` The `CERT_MANAGER_POLICY_ARN` is used in the next step to create a role mapping between the Amazon EKS Service Account and the AWS IAM Service Account. -Using `eksctl` allows us to create the required role mapping for cert-manager. +Use `eksctl` to create the required role mapping for cert-manager: ```shell eksctl create iamserviceaccount \ @@ -574,39 +577,419 @@ eksctl create iamserviceaccount \ export CERT_MANAGER_IRSA_ARN=$(aws iam list-roles \ --query "Roles[?RoleName=='cert-manager-irsa'].Arn" \ --output text) + +echo "CERT_MANAGER_IRSA_ARN=$CERT_MANAGER_IRSA_ARN" ``` The variable `CERT_MANAGER_IRSA_ARN` will contain the `arn` (it should look like this: `arn:aws:iam::XXXXXXXXXXXX:role/cert-manager-irsa`). Alternatively, you can deploy the Helm chart first and then use `eksctl` with the option `--override-existing-serviceaccounts` instead of `--role-only` to reconfigure the created service account. -### StorageClass +## 3. PostgreSQL database + +Creating a PostgreSQL database can be accomplished through various methods, such as using the AWS Management Console or the AWS CLI. This guide focuses on providing a reproducible setup using the CLI. For information on creating PostgreSQL using the UI, refer to the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_GettingStarted.CreatingConnecting.PostgreSQL.html). + +:::info Optional service + +If you don't want to use the Amazon RDS Aurora managed service for PostgreSQL, you can skip this section. +However, note that you may need to adjust the following instructions to remove references to it. + +If you choose not to use this service, you'll need to either provide a managed PostgreSQL service or use the internal deployment by the Camunda Helm chart in Kubernetes. + +::: -We recommend using gp3 volumes with Camunda 8 (see [volume performance](./amazon-eks.md#volume-performance)). It is necessary to create the StorageClass as the default configuration only includes `gp2`. For detailed information, refer to the [AWS documentation](https://aws.amazon.com/ebs/general-purpose/). +The following components use the PostgreSQL database: -The following steps create the `gp3` StorageClass: +- Keycloak +- Identity +- Web Modeler -1. Create `gp3` StorageClass. +### Configuration ```shell -cat << EOF | kubectl apply -f - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ebs-sc - annotations: - storageclass.kubernetes.io/is-default-class: "true" -provisioner: ebs.csi.aws.com -parameters: - type: gp3 -reclaimPolicy: Retain -volumeBindingMode: WaitForFirstConsumer -EOF +##### Postgres parameters + +# Name for the Postgres DB cluster and instance +export RDS_NAME=camunda-postgres +# Postgres DB admin username +export AURORA_USERNAME=secret_user +# Postgres DB password of the admin user +export AURORA_PASSWORD=camundarocks123 +# The PostgreSQL version +export POSTGRESQL_VERSION=15.8 + +# For each database, we need to generate a username, password and database name +export DB_KEYCLOAK_NAME="keycloak_db" +export DB_KEYCLOAK_USERNAME="keycloak-pg" +export DB_KEYCLOAK_PASSWORD="CHANGE-ME-PLEASE" + +export DB_IDENTITY_NAME="identity_db" +export DB_IDENTITY_USERNAME="identity-pg" +export DB_IDENTITY_PASSWORD="CHANGE-ME-PLEASE" + +export DB_WEBMODELER_NAME="webmodeler_db" +export DB_WEBMODELER_USERNAME="webmodeler-pg" +export DB_WEBMODELER_PASSWORD="CHANGE-ME-PLEASE" ``` -2. Modify the `gp2` storage class to mark it as a non-default storage class: +### Step-by-step setup + +1. Identify the VPC associated with the Amazon EKS cluster: + + ```shell + export VPC_ID=$(aws ec2 describe-vpcs \ + --query "Vpcs[?Tags[?Key=='alpha.eksctl.io/cluster-name']|[?Value=='$CLUSTER_NAME']].VpcId" \ + --output text) + + echo "VPC_ID=$VPC_ID" + ``` + + The variable `VPC_ID` contains the output value required for the next step (the value should look like this: `vpc-1234567890`). + +2. Create a security group within the VPC to allow connections to the Aurora PostgreSQL instance: + + ```shell + export GROUP_ID_AURORA=$(aws ec2 create-security-group \ + --group-name aurora-postgres-sg \ + --description "Security Group to allow the Amazon EKS cluster $CLUSTER_NAME to connect to Aurora PostgreSQL $RDS_NAME" \ + --vpc-id $VPC_ID \ + --output text) + + echo "GROUP_ID_AURORA=$GROUP_ID_AURORA" + ``` + + The variable `GROUP_ID_AURORA` contains the output (the value should look like this: `sg-1234567890`). + +3. Create a security ingress rule to allow access to PostgreSQL: + + ```shell + aws ec2 authorize-security-group-ingress \ + --group-id $GROUP_ID_AURORA \ + --protocol tcp \ + --port 5432 \ + --cidr $CIDR + # The CIDR range should match the value in the `cluster.yaml` + ``` + +4. Retrieve subnets of the VPC to create a database subnet group: + + ```shell + export SUBNET_IDS=$(aws ec2 describe-subnets \ + --filter Name=vpc-id,Values=$VPC_ID \ + --query "Subnets[?Tags[?Key=='aws:cloudformation:logical-id']|[?contains(Value, 'Private')]].SubnetId" \ + --output text | expand -t 1) + + echo "SUBNET_IDS=$SUBNET_IDS" + ``` + + The variable `SUBNET_IDS` contains the output values of the private subnets (the value should look like this: `subnet-0123456789 subnet-1234567890 subnet-9876543210`). + +5. Create a database subnet group to associate PostgreSQL within the existing VPC: + + ```shell + aws rds create-db-subnet-group \ + --db-subnet-group-name camunda-postgres \ + --db-subnet-group-description "Subnet for Camunda PostgreSQL $RDS_NAME" \ + --subnet-ids $(echo "$SUBNET_IDS") + ``` + +6. Create a PostgreSQL cluster within a private subnet of the VPC: + + For the latest Camunda-supported PostgreSQL engine version, check our [documentation](../../../../../reference/supported-environments.md#camunda-8-self-managed). + + ```shell + aws rds create-db-cluster \ + --db-cluster-identifier $RDS_NAME \ + --engine aurora-postgresql \ + --engine-version $POSTGRESQL_VERSION \ + --master-username $AURORA_USERNAME \ + --master-user-password $AURORA_PASSWORD \ + --vpc-security-group-ids $GROUP_ID_AURORA \ + --availability-zones $(echo $ZONES) \ + --db-subnet-group-name camunda-postgres + ``` + + More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-cluster.html). + +7. Wait for the PostgreSQL cluster to be ready: + + ```shell + aws rds wait db-cluster-available \ + --db-cluster-identifier $RDS_NAME + ``` + +8. Create a database instance within the DB cluster: + + Ensure that the `engine-version` matches the previously created PostgreSQL cluster. + + ```shell + aws rds create-db-instance \ + --db-instance-identifier $RDS_NAME \ + --db-cluster-identifier $RDS_NAME \ + --engine aurora-postgresql \ + --engine-version $POSTGRESQL_VERSION \ + --no-publicly-accessible \ + --db-instance-class db.t3.medium + ``` + + More configuration options can be found in the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-instance.html). + +9. Wait for changes to be applied: + + ```shell + aws rds wait db-instance-available \ + --db-instance-identifier $RDS_NAME + ``` + + This command will wait until the instance is ready. + +### Create the databases + +Now that you have a database, you need to create dedicated databases for each Camunda component along with associated users that have configured access. + +We will also use this step to verify connectivity to the database from the created EKS cluster. The creation of the databases will be performed by spawning a pod job in the Kubernetes cluster, using the main user to create the different databases. + +1. Retrieve the writer endpoint of the DB cluster: + + ```shell + export DB_HOST=$(aws rds describe-db-cluster-endpoints \ + --db-cluster-identifier $RDS_NAME \ + --query "DBClusterEndpoints[?EndpointType=='WRITER'].Endpoint" \ + --output text) + + echo "DB_HOST=$DB_HOST" + ``` + +2. Create a secret that references the environment variables: + + ```bash + kubectl create secret generic setup-db-secret --namespace camunda \ + --from-literal=AURORA_ENDPOINT="$DB_HOST" \ + --from-literal=AURORA_PORT="5432" \ + --from-literal=AURORA_DB_NAME="postgres" \ + --from-literal=AURORA_USERNAME="$AURORA_USERNAME" \ + --from-literal=AURORA_PASSWORD="$AURORA_PASSWORD" \ + --from-literal=DB_KEYCLOAK_NAME="$DB_KEYCLOAK_NAME" \ + --from-literal=DB_KEYCLOAK_USERNAME="$DB_KEYCLOAK_USERNAME" \ + --from-literal=DB_KEYCLOAK_PASSWORD="$DB_KEYCLOAK_PASSWORD" \ + --from-literal=DB_IDENTITY_NAME="$DB_IDENTITY_NAME" \ + --from-literal=DB_IDENTITY_USERNAME="$DB_IDENTITY_USERNAME" \ + --from-literal=DB_IDENTITY_PASSWORD="$DB_IDENTITY_PASSWORD" \ + --from-literal=DB_WEBMODELER_NAME="$DB_WEBMODELER_NAME" \ + --from-literal=DB_WEBMODELER_USERNAME="$DB_WEBMODELER_USERNAME" \ + --from-literal=DB_WEBMODELER_PASSWORD="$DB_WEBMODELER_PASSWORD" + ``` + + This command creates a secret named `setup-db-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-db-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + +3. Save the following manifest to a file, for example, `setup-postgres-create-db.yml`: + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/setup-postgres-create-db.yml + ``` + +4. Apply the manifest: + + ```bash + kubectl apply -f setup-postgres-create-db.yml --namespace camunda + ``` + + Once the secret is created, the **Job** manifest from the previous step can consume this secret to securely access the database credentials. + +5. Once the job is created, monitor its progress using: + + ```bash + kubectl get job/create-setup-user-db --namespace camunda --watch + ``` + + Once the job shows as `Completed`, the users and databases will have been successfully created. + +6. View the logs of the job to confirm that the users were created and privileges were granted successfully: + + ```bash + kubectl logs job/create-setup-user-db --namespace camunda + ``` + +7. Cleanup the resources: + + ```bash + kubectl delete job create-setup-user-db --namespace camunda + kubectl delete secret setup-db-secret --namespace camunda + ``` + + Running these commands will clean up both the job and the secret, ensuring that no unnecessary resources remain in the cluster. + +## 4. OpenSearch domain + +Creating an OpenSearch domain can be accomplished through various methods, such as using the AWS Management Console or the AWS CLI. This guide focuses on providing a reproducible setup using the CLI. For information on creating an OpenSearch domain using the UI, refer to the [AWS OpenSearch documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/create-managed-domain.html). + +The resulting OpenSearch domain is intended for use with the Camunda platform, the following components utilize OpenSearch: + +- Operate +- Optimize +- Tasklist +- Zeebe + +:::info Optional service + +If you don't want to use the Amazon OpenSearch managed service for OpenSearch, you can skip this section. +However, note that you may need to adjust the following instructions to remove references to it. + +If you choose not to use this service, you'll need to either provide a managed OpenSearch or Elasticsearch service or use the internal deployment by the Camunda Helm chart in Kubernetes. + +::: + +:::note Migration to OpenSearch is not supported + +Using Amazon OpenSearch Service requires [setting up a new Camunda installation](/self-managed/setup/overview.md). Migration from previous Camunda versions or Elasticsearch environments is currently not supported. Switching between Elasticsearch and OpenSearch, in either direction, is also not supported. + +::: + +### Configuration ```shell -kubectl patch storageclass gp2 -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' +##### OpenSearch parameters + +# Name for the OpenSearch domain +export OPENSEARCH_NAME=camunda-opensearch ``` + +:::caution Network based security + +The standard deployment for OpenSearch relies on the first layer of security, which is the Network. +While this setup allows easy access, it may expose sensitive data. To enhance security, consider implementing IAM Roles for Service Accounts (IRSA) to restrict access to the OpenSearch cluster, providing a more secure environment. +For more information, see the [Amazon OpenSearch Service fine-grained access control documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html#fgac-access-policies). + +::: + +### Step-by-step setup + +1. Identify the VPC associated with the Amazon EKS cluster: + + ```shell + export VPC_ID=$(aws ec2 describe-vpcs \ + --query "Vpcs[?Tags[?Key=='alpha.eksctl.io/cluster-name']|[?Value=='$CLUSTER_NAME']].VpcId" \ + --output text) + + echo "VPC_ID=$VPC_ID" + ``` + + The variable `VPC_ID` contains the output value required for the next steps (the value should look like this: `vpc-1234567890`). + +2. Create a security group within the VPC to allow connections to the OpenSearch domain: + + ```shell + export GROUP_ID_OPENSEARCH=$(aws ec2 create-security-group \ + --group-name opensearch-sg \ + --description "Security Group to allow internal connections From EKS $CLUSTER_NAME to OpenSearch $OPENSEARCH_NAME" \ + --vpc-id $VPC_ID \ + --output text) + + echo "GROUP_ID_OPENSEARCH=$GROUP_ID_OPENSEARCH" + ``` + + The variable `GROUP_ID_OPENSEARCH` contains the output (the value should look like this: `sg-1234567890`). + +3. Create a security ingress rule to allow access to OpenSearch over HTTPS (port 443) from within the VPC: + + ```shell + aws ec2 authorize-security-group-ingress \ + --group-id $GROUP_ID_OPENSEARCH \ + --protocol tcp \ + --port 443 \ + --cidr $CIDR # Replace with the CIDR range of your EKS cluster, e.g., + ``` + + Ensure that the CIDR range is appropriate for your environment. OpenSearch uses `443` as the https transport port. + +4. Retrieve the private subnets of the VPC: + + ```shell + export SUBNET_IDS=$(aws ec2 describe-subnets \ + --filter Name=vpc-id,Values=$VPC_ID \ + --query "Subnets[?Tags[?Key=='aws:cloudformation:logical-id']|[?contains(Value, 'Private')]].SubnetId" \ + --output text | expand -t 1) + + # format it with coma + export SUBNET_IDS=$(echo "$SUBNET_IDS" | sed 's/ /,/g') + + echo "SUBNET_IDS=$SUBNET_IDS" + ``` + + The variable `SUBNET_IDS` now contains the output values of the private subnets (the value should look like this: `subnet-0123456789 subnet-1234567890`). + +5. Create the OpenSearch domain: + + ```shell + aws opensearch create-domain --domain-name $OPENSEARCH_NAME \ + --engine-version OpenSearch_2.15 \ + --cluster-config "InstanceType=t3.medium.search,InstanceCount=3,ZoneAwarenessEnabled=true,ZoneAwarenessConfig={AvailabilityZoneCount=3}" \ + --node-to-node-encryption-options Enabled=true \ + --ebs-options "EBSEnabled=true,VolumeType=gp3,VolumeSize=50,Iops=3000,Throughput=125" \ + --encryption-at-rest-options Enabled=true \ + --access-policies "{ \"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"*\" }, \"Action\": \"es:*\", \"Resource\": \"arn:aws:es:$REGION:*:domain/$OPENSEARCH_NAME/*\" }]}" \ + --vpc-options "SubnetIds=${SUBNET_IDS},SecurityGroupIds=${GROUP_ID_OPENSEARCH}" + ``` + + - **Domain Name**: `$OPENSEARCH_NAME` is the name of the OpenSearch domain being created. + - **Engine Version**: Uses OpenSearch version `2.15`. + - **Cluster Configuration**: + - `InstanceType=t3.medium.search` specifies the instance type for the domain. + - `InstanceCount=3` creates a cluster with 3 instances. + - `ZoneAwarenessEnabled=true` and `ZoneAwarenessConfig={AvailabilityZoneCount=3}` enable zone awareness and spread the instances across 3 availability zones to improve fault tolerance. + - **Node-to-Node Encryption**: Encryption for traffic between nodes in the OpenSearch cluster is enabled (`Enabled=true`). + - **EBS Options**: + - `EBSEnabled=true` enables Elastic Block Store (EBS) for storage. + - `VolumeType=gp3` specifies the volume type as `gp3` with 50 GiB of storage. + - `Iops=3000` and `Throughput=125` set the IOPS and throughput for the storage. + - **Encryption at Rest**: Data stored in the domain is encrypted at rest (`Enabled=true`). + - **Access Policies**: The default access policy allows all actions (`es:*`) on resources within the domain for any AWS account (`"Principal": { "AWS": "*" }`). This is scoped to the OpenSearch domain resources using the `arn:aws:es:$REGION:*:domain/$OPENSEARCH_NAME/*` resource ARN. + - **VPC Options**: The domain is deployed within the specified VPC, restricted to the provided subnets (`SubnetIds=${SUBNET_IDS}`) and associated security group (`SecurityGroupIds=${GROUP_ID_OPENSEARCH}`). + + This configuration creates a secure OpenSearch domain with encryption both in transit (between nodes) and at rest, zonal fault tolerance, and sufficient storage performance using `gp3` volumes. The access is restricted to resources in the VPC of the EKS cluster and is governed by the specified security group. + +6. Wait for the OpenSearch domain to be active: + + ```shell + while [ "$(aws opensearch describe-domain --domain-name $OPENSEARCH_NAME --query 'DomainStatus.Processing' --output text)" != "False" ]; do echo "Waiting for OpenSearch domain to become availablen this can up to take 20-30 minutes..."; sleep 30; done && echo "OpenSearch domain is now available\!" + ``` + +7. Retrieve the endpoint of the OpenSearch domain: + + ```shell + export OPENSEARCH_HOST=$(aws opensearch describe-domains --domain-names $OPENSEARCH_NAME --query "DomainStatusList[0].Endpoints.vpc" --output text) + + echo "OPENSEARCH_HOST=$OPENSEARCH_HOST" + ``` + + This endpoint will be used to connect to your OpenSearch domain. + +### Verify connectivity from within the EKS cluster + +To verify that the OpenSearch domain is accessible from within your Amazon EKS cluster, follow these steps: + +1. Deploy a temporary pod to test connectivity: + + Create a temporary pod using the `amazonlinux` image in the `camunda` namespace, install `curl`, and test the connection to OpenSearch—all in a single command: + + ```bash + kubectl run amazonlinux-opensearch -n camunda --rm -i --tty --image amazonlinux -- sh -c "curl -XGET https://$OPENSEARCH_HOST/_cluster/health" + ``` + +2. Verify the response: + + If everything is set up correctly, you should receive a response from the OpenSearch service indicating its health status. + +You have successfully set up an OpenSearch domain that is accessible from within your Amazon EKS cluster. For further details, refer to the [OpenSearch documentation](https://opensearch.org/docs/latest/index/). + +## 5. Install Camunda 8 using the Helm chart + +Now that you've exported the necessary values, you can proceed with installing Camunda 8 using Helm charts. Follow the guide [Camunda 8 on Kubernetes](./eks-helm.md) for detailed instructions on deploying the platform to your Kubernetes cluster. diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/irsa.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/irsa.md index bee98384ac6..bd12fb27a28 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/irsa.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/irsa.md @@ -1,606 +1,177 @@ --- id: irsa -title: "IAM roles for service accounts" +title: "Troubleshooting IAM Roles for Service Accounts (IRSA)" description: "Learn how to configure IAM roles for service accounts (IRSA) within AWS to authenticate workloads." --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -IAM roles for service accounts (IRSA) is a way within AWS to authenticate workloads in Amazon EKS (Kubernetes), for example, to execute signed requests against AWS services. This is a replacement for basic auth and is generally considered a [best practice by AWS](https://aws.github.io/aws-eks-best-practices/security/docs/iam/). +## IRSA configuration validation of a Camunda 8 helm deployment -The following considers the managed services by AWS and provided examples are in Terraform syntax. +The [c8-sm-checks](/self-managed/operational-guides/troubleshooting/troubleshooting.md#anomaly-detection-scripts) utility is designed to validate IAM Roles for Service Accounts ([IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)) configuration in EKS Kubernetes clusters on AWS. It ensures that key components in a Camunda 8 deployment, such as PostgreSQL and OpenSearch, are properly configured to securely interact with AWS resources via the appropriate IAM roles. -## Aurora PostgreSQL +### IRSA check script -[Aurora PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.AuroraPostgreSQL.html) is a managed AWS PostgreSQL–compatible service. +The `/checks/kube/aws-irsa.sh` script verifies IRSA setup in your AWS Kubernetes environment by performing two types of checks: -### Setup +1. **Configuration Verification**: Ensures key IRSA configurations are correctly set, using specific checks on IAM roles, policies, and mappings to service accounts. +2. **Namespace Commands and Job Execution**: Runs commands within the specified namespace using Kubernetes jobs (if necessary) to verify network and access configurations. -When using the Terraform provider of [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest) with the resource [aws_rds_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/rds_cluster) to create a new rational database (RDS) or Aurora cluster, supply the argument `iam_database_authentication_enabled = true` to enable the IAM roles functionality. See the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) for availability and limitations. +This utility is non-intrusive and will not alter any deployment settings. +If the `-s` flag is provided, the script skips spawning debugging pods for network flow verification, which can be helpful if pod creation is restricted or not required for troubleshooting. -#### AWS policy +:::info Compatibility with Helm Deployments -An AWS policy (later assigned to a role) is required to allow assuming a database user within a managed database. See the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html) for policy details. +The script relies on Helm chart values and is compatible only with deployments installed or updated through standard Helm commands. It will not work with other deployment methods, such as those using `helm template` (e.g., [ArgoCD](https://argo-cd.readthedocs.io/en/latest/faq/#after-deploying-my-helm-application-with-argo-cd-i-cannot-see-it-with-helm-ls-and-other-helm-commands)). - - +Compatibility is confirmed for [Camunda Helm chart releases version 11 and above](https://artifacthub.io/packages/helm/camunda/camunda-platform). -To create the AWS policy using Terraform, you can define it with the [aws_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) resource. Here’s an example configuration: - -```json -resource "aws_iam_policy" "rds_policy" { - name = "rds-policy" - - policy = jsonencode({ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "rds-db:connect" - ], - "Resource": [ - "arn:aws:rds-db:::dbuser:/" - ] - } - ] - }) -} -``` - -Replace ``, ``, ``, and `` with the appropriate values for your AWS environment. - - - - - -To create the AWS policy using the AWS CLI, use the `aws iam create-policy` command: - -```bash -aws iam create-policy \ - --policy-name rds-policy \ - --policy-document '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "rds-db:connect" - ], - "Resource": [ - "arn:aws:rds-db:::dbuser:/" - ] - } - ] - }' -``` - -Replace ``, ``, ``, and `` with the appropriate values for your AWS environment. - - - - -#### IAM to Kubernetes mapping - - - - -To assign the policy to a role for IAM role to service account mapping in Amazon EKS, use a Terraform module like [iam-role-for-service-accounts-eks](https://registry.terraform.io/modules/terraform-aws-modules/iam/aws/latest/submodules/iam-role-for-service-accounts-eks): - -```json -module "aurora_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - role_name = "aurora-role" - - role_policy_arns = { - policy = aws_iam_policy.rds_policy.arn - } - - oidc_providers = { - main = { - provider_arn = "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - namespace_service_accounts = [":"] - } - } -} -``` - -This Terraform snippet creates a role that allows the service account `` within the `` to assume the user `` within the database ``. The output of the `aurora_role` module includes the `iam_role_arn`, which you need to annotate the service account. +::: - +#### Key features - +- **Helm values retrieval**: Extracts deployment values using Helm to ensure all required configurations are set. +- **EKS and OIDC configuration check**: Confirms that EKS is configured with IAM and OIDC, matching the minimum required version for IRSA compatibility. +- **Service account role validation**: For each specified component, verifies that the service account exists and has the correct IAM role annotations. +- **Network access verification**: Ensures that PostgreSQL (Aurora) or OpenSearch instances are accessible from within the cluster. This step involves an `nmap` scan through a Kubernetes job. Use the `-s` option to skip this step if network flow verification is unnecessary. +- **IRSA value check**: Validates that the Helm deployment values are correctly configured to use IRSA for secure service interactions with AWS. +- **Aurora PostgreSQL and OpenSearch IAM configuration**: Confirms that these services support IAM login, ensuring secure access configurations. +- **Access and Trust Policy verification**: Checks that access and trust policies are correctly set. Note that the script performs basic checks; if issues arise with these policies, further manual verification may be needed. +- **Service Account Role association test**: Tests that the IAM role association with the service account is functioning as expected by spawning a job with the specified service account and validating the resulting ARN. This step can also be skipped using the `-s` option. +- **OpenSearch Access Policy check**: Validates that the OpenSearch access policy is configured correctly to support secure connections from the cluster. -To assign the policy to a role using the AWS CLI, follow these steps: +#### Example usage -1. **Create the IAM role**: +You can find the complete usage details in the [c8-sm-checks repository](https://github.com/camunda/c8-sm-checks). Below is a quick reference for common usage options: ```bash -aws iam create-role \ - --role-name aurora-role \ - --assume-role-policy-document '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::" - } - } - } - ] - }' +Usage: ./checks/kube/aws-irsa.sh [-h] [-n NAMESPACE] [-e EXCLUDE_COMPONENTS] [-p] [-l] [-s] +Options: + -h Display this help message + -n NAMESPACE Specify the namespace to use (required) + -e EXCLUDE_COMPONENTS Comma-separated list of Components to exclude from the check (reference of the component is the root key used in the chart) + -p Comma-separated list of Components to check IRSA for PostgreSQL (overrides default list: identityKeycloak,identity,webModeler) + -l Comma-separated list of Components to check IRSA for OpenSearch (overrides default list: zeebe,operate,tasklist,optimize) + -s Disable pod spawn for IRSA and connectivity verification. + By default, the script spawns jobs in the specified namespace to perform + IRSA checks and network connectivity tests. These jobs use the amazonlinux:latest + image and scan with nmap to verify connectivity. ``` -2. **Attach the policy to the role**: +**Example Command:** ```bash -aws iam attach-role-policy \ - --role-name aurora-role \ - --policy-arn arn:aws:iam:::policy/rds-policy -``` - - - - -Annotate the service account with the `iam_role_arn`: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - eks.amazonaws.com/role-arn: arn:aws:iam:::role/aurora-role - name: - namespace: -``` - -Replace ``, ``, ``, ``, ``, and `` with the appropriate values for your AWS environment. - -#### Database configuration - -The setup required on the Aurora PostgreSQL side is to create the user and assign the required permissions to it. The following is an example when connected to the PostgreSQL database, and can also be realized by using a [Terraform PostgreSQL Provider](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs). See the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html#UsingWithRDS.IAMDBAuth.DBAccounts.PostgreSQL) for reference concerning Aurora specific configurations. - -```SQL -# create user and grant rds_iam role, which requires the user to login via IAM authentication over password -CREATE USER ""; -GRANT rds_iam TO ""; - -# create some database and grant the user all privileges to it -CREATE DATABASE "some-db"; -GRANT ALL privileges on database "some-db" to ""; -``` - -### Keycloak - -:::caution -IAM Roles for Service Accounts can only be implemented with Keycloak 21 onwards. This may require you to adjust the version used in the Camunda Helm Chart. -::: - -From Keycloak versions 21+, the default JDBC driver can be overwritten, allowing use of a custom wrapper like the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) to utilize the features of IRSA. This is a wrapper around the default JDBC driver, but takes care of signing the requests. - -Furthermore, the [official Keycloak documentation](https://www.keycloak.org/server/db#preparing-keycloak-for-amazon-aurora-postgresql) also provides detailed instructions for utilizing Amazon Aurora PostgreSQL. - -A custom Keycloak container image containing necessary configurations is conveniently accessible on Docker Hub at [camunda/keycloak](https://hub.docker.com/r/camunda/keycloak). This image, built upon the base image [bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak), incorporates the required wrapper for seamless integration. - -#### Container image sources - -The sources of the [Camunda Keycloak images](https://hub.docker.com/r/camunda/keycloak) can be found on [GitHub](https://github.com/camunda/keycloak). In this repository, the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) is assembled in the `Dockerfile`. - -Maintenance of these images is based on the upstream [Bitnami Keycloak images](https://hub.docker.com/r/bitnami/keycloak), ensuring they are always up-to-date with the latest Keycloak releases. The lifecycle details for Keycloak can be found on [endoflife.date](https://endoflife.date/keycloak). - -#### Keycloak image configuration - -Bitnami Keycloak container image configuration is available at [hub.docker.com/bitnami/keycloak](https://hub.docker.com/r/bitnami/keycloak). - -#### Kubernetes configuration - -As an example, configure the following environment variables to enable IRSA: - -```yaml -# The AWS wrapper is not capable of XA transactions -- name: KEYCLOAK_EXTRA_ARGS - value: "--db-driver=software.amazon.jdbc.Driver --transaction-xa-enabled=false --log-level=INFO,software.amazon.jdbc:INFO" - -# Enable the AWS IAM plugin -- name: KEYCLOAK_JDBC_PARAMS - value: "wrapperPlugins=iam" -- name: KEYCLOAK_JDBC_DRIVER - value: "aws-wrapper:postgresql" - -# Configure database -- name: KEYCLOAK_DATABASE_USER - value: db-user-name -- name: KEYCLOAK_DATABASE_NAME - value: db-name -- name: KEYCLOAK_DATABASE_HOST - value: db-host -- name: KEYCLOAK_DATABASE_PORT - value: 5432 - -# Ref: https://www.keycloak.org/server/configuration-metrics -- name: KEYCLOAK_ENABLE_STATISTICS - value: "true" - -# Needed to see if Keycloak is healthy: https://www.keycloak.org/server/health -- name: KEYCLOAK_ENABLE_HEALTH_ENDPOINTS - value: "true" -``` - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -##### Helm chart - -For a Helm-based deployment, you can directly configure these settings using Helm values. Below is an example of how you can incorporate these settings into your Helm chart deployment: - -```yaml -identityKeycloak: - postgresql: - enabled: false - image: docker.io/camunda/keycloak:25 # use a supported and updated version listed at https://hub.docker.com/r/camunda/keycloak/tags - extraEnvVars: - - name: KEYCLOAK_EXTRA_ARGS - value: "--db-driver=software.amazon.jdbc.Driver --transaction-xa-enabled=false --log-level=INFO,software.amazon.jdbc:INFO" - - name: KEYCLOAK_JDBC_PARAMS - value: "wrapperPlugins=iam" - - name: KEYCLOAK_JDBC_DRIVER - value: "aws-wrapper:postgresql" - externalDatabase: - host: "aurora.rds.your.domain" - port: 5432 - user: keycloak - database: keycloak -``` - -:::note -For additional details, refer to the [Camunda 8 Helm deployment documentation](/self-managed/setup/install.md). -::: - -### Web Modeler - -Since Web Modeler RestAPI uses PostgreSQL, configure the `restapi` to use IRSA with Amazon Aurora PostgreSQL. Check the [Web Modeler database configuration](../../../../modeler/web-modeler/configuration/database.md#running-web-modeler-on-amazon-aurora-postgresql) for more details. -Web Modeler already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. - -#### Kubernetes configuration - -As an example, configure the following environment variables - -```yaml -- name: SPRING_DATASOURCE_DRIVER_CLASS_NAME - value: software.amazon.jdbc.Driver -- name: SPRING_DATASOURCE_URL - value: jdbc:aws-wrapper:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?wrapperPlugins=iam -- name: SPRING_DATASOURCE_USERNAME - value: db-user-name -``` - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -### Identity - -Since Identity uses PostgreSQL, configure `identity` to use IRSA with Amazon Aurora PostgreSQL. Check the [Identity database configuration](../../../../identity/deployment/configuration-variables.md#running-identity-on-amazon-aurora-postgresql) for more details. -Identity already comes fitted with the [aws-advanced-jdbc-wrapper](https://github.com/awslabs/aws-advanced-jdbc-wrapper) within the Docker image. - -#### Kubernetes configuration - -As an example, configure the following environment variables - -```yaml -- name: SPRING_DATASOURCE_DRIVER_CLASS_NAME - value: software.amazon.jdbc.Driver -- name: SPRING_DATASOURCE_URL - value: jdbc:aws-wrapper:postgresql://[DB_HOST]:[DB_PORT]/[DB_NAME]?wrapperPlugins=iam -- name: SPRING_DATASOURCE_USERNAME - value: db-user-name -``` - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -## Amazon OpenSearch Service - -[Amazon OpenSearch Service](https://aws.amazon.com/opensearch-service/) is a managed OpenSearch service provided by AWS, which is a distributed search and analytics engine built on Apache Lucene. - -:::note -As of the 8.4 release, Zeebe, Operate, and Tasklist are now compatible with [Amazon OpenSearch Service](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch Service requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. -::: - -:::caution - -Optimize is not supported using the IRSA method. However, Optimize can be utilized by supplying a username and password. The migration step must also be disabled. For more information, refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md). - -::: - -### Setup - -For Amazon OpenSearch Service, the most common use case is the use of `fine-grained access control`. - -When using the Terraform provider of [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest) with the resource [opensearch_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/opensearch_domain) to create a new Amazon OpenSearch Service cluster, supply the arguments: - -- `advanced_security_options.enabled = true` -- `advanced_security_options.anonymous_auth_enabled = false` to activate `fine-grained access control`. - -Without `fine-grained access control`, anonymous access is enabled and would be sufficient to supply an IAM role with the right policy to allow access. In our case, we'll have a look at `fine-grained access control` and the use without it can be derived from this more complex example. - -#### AWS Policy - -An AWS policy, which later is assigned to a role, is required to allow general access to Amazon OpenSearch Service. See the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ac.html) for the explanation of the policy. - - - - -To create an AWS policy for Amazon OpenSearch Service using Terraform, you can use the [aws_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) resource. Here’s an example configuration: - -```json -resource "aws_iam_policy" "opensearch_policy" { - name = "opensearch_policy" - - policy = jsonencode({ - "Version" : "2012-10-17", - "Statement" : [ - { - "Effect" : "Allow", - "Action" : [ - "es:DescribeElasticsearchDomains", - "es:DescribeElasticsearchInstanceTypeLimits", - "es:DescribeReservedElasticsearchInstanceOfferings", - "es:DescribeReservedElasticsearchInstances", - "es:GetCompatibleElasticsearchVersions", - "es:ListDomainNames", - "es:ListElasticsearchInstanceTypes", - "es:ListElasticsearchVersions", - "es:DescribeElasticsearchDomain", - "es:DescribeElasticsearchDomainConfig", - "es:ESHttpGet", - "es:ESHttpHead", - "es:GetUpgradeHistory", - "es:GetUpgradeStatus", - "es:ListTags", - "es:AddTags", - "es:RemoveTags", - "es:ESHttpDelete", - "es:ESHttpPost", - "es:ESHttpPut" - ], - "Resource" : [ - "arn:aws:es:::domain//*" - ] - } - ] - }) -} +./checks/kube/aws-irsa.sh -n camunda-primary -p "identity,webModeler" -l "zeebe,operate" ``` -Replace ``, ``, and `` with the appropriate values for your Amazon OpenSearch Service domain. +In this example, the script will check **`identity`** and **`webModeler`** components (references of the component name in the helm chart) for Aurora PostgreSQL access and **`zeebe`** and **`operate`** components for OpenSearch access in the `camunda-primary` namespace. - +#### Script output overview - +The script offers detailed output to confirm that each component is properly configured for IRSA. Below is an outline of the checks it performs and the expected output format: -To create an AWS policy for Amazon OpenSearch Service using the AWS CLI, you use the `aws iam create-policy` command: +**Example Output:** -```bash -aws iam create-policy \ - --policy-name opensearch_policy \ - --policy-document '{ - "Version" : "2012-10-17", - "Statement" : [ - { - "Effect" : "Allow", - "Action" : [ - "es:DescribeElasticsearchDomains", - "es:DescribeElasticsearchInstanceTypeLimits", - "es:DescribeReservedElasticsearchInstanceOfferings", - "es:DescribeReservedElasticsearchInstances", - "es:GetCompatibleElasticsearchVersions", - "es:ListDomainNames", - "es:ListElasticsearchInstanceTypes", - "es:ListElasticsearchVersions", - "es:DescribeElasticsearchDomain", - "es:DescribeElasticsearchDomainConfig", - "es:ESHttpGet", - "es:ESHttpHead", - "es:GetUpgradeHistory", - "es:GetUpgradeStatus", - "es:ListTags", - "es:AddTags", - "es:RemoveTags", - "es:ESHttpDelete", - "es:ESHttpPost", - "es:ESHttpPut" - ], - "Resource" : [ - "arn:aws:es:::domain//*" - ] - } - ] - }' ``` - -Replace ``, ``, and `` with the appropriate values for your Amazon OpenSearch Service domain. - - - - -#### IAM to Kubernetes mapping - -To assign the policy to a role for the IAM role to service account mapping in Amazon EKS: - - - - -You can use a Terraform module like [iam-role-for-service-accounts-eks](https://registry.terraform.io/modules/terraform-aws-modules/iam/aws/latest/submodules/iam-role-for-service-accounts-eks): - -```json -module "opensearch_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - role_name = "opensearch-role" - - role_policy_arns = { - policy = aws_iam_policy.opensearch_policy.arn - } - - oidc_providers = { - main = { - provider_arn = "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - namespace_service_accounts = [":"] - } - } -} +[OK] AWS CLI version 2.15.20 is compatible and user is logged in. +[OK] AWS environment detected. Proceeding with the script. +[INFO] Chart camunda-platform is deployed in namespace camunda-primary. +[INFO] Retrieved values for Helm deployment: camunda-platform-11.0.1. +[FAIL] The service account keycloak-sa does not have a valid eks.amazonaws.com/role-arn annotation. You must add it in the chart, see https://docs.camunda.io/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm/ +[FAIL] RoleArn name for component 'identityKeycloak' is empty. Skipping verification. ``` -This Terraform configuration allows the service account `` within the namespace `` to access the Amazon OpenSearch Service for the cluster ``. The output of the `opensearch_role` module includes the `iam_role_arn` needed to annotate the service account. +The script highlights errors with the `[FAIL]` prefix, and these are directed to `stderr` for easier filtering. We recommend capturing `stderr` output to quickly identify failed configurations. -Annotate the service account with the `iam_role_arn` output. +If the script returns a false positive—indicating success when issues are actually present—manually review each output line to ensure reported configuration details (like Role ARNs or annotations) are accurate. For example, ensure that each service account has the correct Role ARN and associated permissions to avoid undetected issues. - +### Advanced troubleshooting for IRSA configuration - +The troubleshooting script provides essential checks but may not capture all potential issues, particularly those related to IAM policies and configurations. If IRSA is not functioning as expected and no errors are flagged by the script, follow the steps below for deeper troubleshooting. -To assign the policy to a role using the AWS CLI, follow these steps: +#### Spawn a debug pod to simulate the pod environment -1. **Create the IAM role**: +To troubleshoot in an environment identical to your pod, deploy a debug pod with the necessary service account. Here are examples of debug manifests you can customize for your needs: -```bash -aws iam create-role \ - --role-name opensearch-role \ - --assume-role-policy-document '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::" - } - } - } - ] - }' -``` +- [OpenSearch client pod](https://github.com/camunda/camunda-tf-eks-module/blob/main/modules/fixtures/opensearch-client.yml) +- [PostgreSQL client pod](https://github.com/camunda/camunda-tf-eks-module/blob/main/modules/fixtures/postgres-client.yml) -2. **Attach the policy to the role**: +1. Adapt the manifests to use the specific `serviceAccountName` (e.g., `aurora-access-sa`) you want to test. +2. Insert a sleep timer in the command to allow time to exec into the pod for live debugging. +3. Create the pod with the `kubectl apply` command: + ```bash + kubectl apply -f debug-client.yaml + ``` +4. Once the pod is running, connect to it with a bash shell (make sure to adjust the app label with your value): + ```bash + kubectl exec -it $(kubectl get pods -l app=REPLACE-WITH-LABEL -o jsonpath='{.items[0].metadata.name}') -- /bin/bash + ``` +5. Inside the pod, display all environment variables to check for IAM and AWS configurations: + ```bash + env + ``` + This command will print out all environment variables, including those related to IRSA. + Inside the pod, validate that key environment variables are correctly injected: + - `AWS_WEB_IDENTITY_TOKEN_FILE`: Path to the token (JWT) file for WebIdentity. + - `AWS_ROLE_ARN`: ARN of the associated IAM role. + - `AWS_REGION`, `AWS_STS_REGIONAL_ENDPOINTS`, and other AWS configuration variables. -```bash -aws iam attach-role-policy \ - --role-name opensearch-role \ - --policy-arn arn:aws:iam:::policy/opensearch_policy -``` +To ensure that IRSA and role associations are functioning: - - +- Check that the expected `AWS_ROLE_ARN` and token are present. +- Decode the JWT token to validate the correct trust relationship with the service account and namespace. -Annotate the service account with the `iam_role_arn`: +#### Verify OpenSearch fine-grained access control (fgac) configuration -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - eks.amazonaws.com/role-arn: arn:aws:iam:::role/opensearch-role - name: - namespace: -``` - -Replace ``, ``, ``, and `` with the appropriate values for your Amazon OpenSearch Service and EKS setup. +For OpenSearch clusters, ensure [fine-grained access control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) is set up to allow the role’s access to the cluster. If you deployed OpenSearch with the [terraform reference architecture implementation for EKS](terraform-setup.md), fgac should already be configured. For manual deployments, follow the process outlined in the [OpenSearch configuration guide](terraform-setup.md#configure-opensearch-fine-grained-access-control) to apply similar controls. -This step is required to be repeated for Tasklist and Zeebe, to grant their service accounts access to OpenSearch. +#### Confirm PostgreSQL IAM role access -#### Database configuration +Verify that PostgreSQL roles are correctly configured to support IAM-based authentication. The database user should have the `rds_iam` role to allow IAM authentication. If the setup was automated with the [terraform reference architecture implementation for EKS](terraform-setup.md), the necessary access configuration should already be in place. For manual configurations, refer to [PostgreSQL configuration instructions](terraform-setup.md#configure-the-database-and-associated-access). -This setup is sufficient for Amazon OpenSearch Service clusters without `fine-grained access control`. +To test connectivity: -`Fine-grained access control` adds another layer of security to OpenSearch, requiring you to add a mapping between the IAM role and the internal OpenSearch role. Visit the [AWS documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) on `fine-grained access control`. +- Run a manual connection test using the [PostgreSQL client manifest](https://raw.githubusercontent.com/camunda/camunda-tf-eks-module/refs/heads/main/modules/fixtures/postgres-client.yml). +- Use `psql` within the pod to verify the correct roles are assigned. Run: + ```bash + SELECT * FROM pg_roles WHERE rolname=''; + ``` + Confirm that `rds_iam` is listed among the assigned roles. -There are different ways to configure the mapping within Amazon OpenSearch Service: +#### Validate IAM Policies for each role -- Via a [Terraform module](https://registry.terraform.io/modules/idealo/opensearch/aws/latest) in case your OpenSearch instance is exposed. -- Via the [OpenSearch dashboard](https://opensearch.org/docs/latest/security/access-control/users-roles/). +Both trust and permission policies are crucial in configuring IAM Roles for Service Accounts (IRSA) in AWS. Each IAM role should have policies that precisely permit necessary actions and correctly trust the relevant Kubernetes service accounts associated with your components. -
    +##### AssumeRole policies -Via the REST API +In AWS, [AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) allows a user or service to assume a role and temporarily gain permissions to execute specific actions. Each role needs an **AssumeRole policy** that precisely matches AWS requirements for the specific services and actions your components perform. -To authorize the IAM role in OpenSearch for access, follow these steps: +For each IAM role, ensure the **trust policy** includes: -**_Note that this example uses basic authentication (username and password), which may not be the best practice for all scenarios, especially if fine-grained access control is enabled._** The endpoint used in this example is not exposed by default, so consult your OpenSearch documentation for specifics on enabling and securing this endpoint. +1. The correct `Service` field, allowing the pod’s service account to assume the role. +2. An `Action` for `sts:AssumeRoleWithWebIdentity`, as IRSA uses WebIdentity to enable IAM role assumption. -Use the following `curl` command to update the OpenSearch internal database and authorize the IAM role for access. Replace placeholders with your specific values: - -```bash -curl -sS -u ":" \ - -X PATCH \ - "https:///_opendistro/_security/api/rolesmapping/all_access?pretty" \ - -H 'Content-Type: application/json' \ - -d' -[ - { - "op": "add", - "path": "/backend_roles", - "value": [""] - } -] -' -``` +Verify that the policy is configured according to [AWS’s role trust policy guidelines](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) for Kubernetes IRSA. -- Replace `` and `` with your OpenSearch domain admin credentials. -- Replace `` with your OpenSearch endpoint URL. -- Replace `` with the IAM role name created by Terraform, which is output by the `opensearch_role` module. +##### Trust policies -
    +For each role, verify that the [trust policy syntax is correct](https://aws.amazon.com/fr/blogs/security/how-to-use-trust-policies-with-iam-roles/), allowing the appropriate service accounts to assume the role. Refer to AWS’s [trust policy validation tool](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_policy-validator.html) for [accurate syntax and configuration](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-reference-policy-checks.html). -The important part is assigning the `iam_role_arn` of the previously created `opensearch_role` to an internal role within Amazon OpenSearch Service. For example, `all_access` on the Amazon OpenSearch Service side is a good candidate, or if required, extra roles can be created with more restrictive access. +##### Permission policies -### Camunda 8 Self-Managed Helm chart configuration +Each IAM role should also have appropriate permission policies attached. These policies define what actions the role can perform on AWS resources. Verify that permission policies: -The following is an example configuration that can be used to configure the Camunda 8 Self-Managed Helm chart to use the feature set of IRSA for the Amazon OpenSearch Service Exporter: +- Are configured correctly to allow the necessary operations for your resources (e.g., read and write access to S3 buckets or access to RDS). +- Align with your security model by only granting the minimum required permissions. -```yaml -global: - elasticsearch: - enabled: false - opensearch: - enabled: true - aws: - enabled: true - url: - protocol: https - host: aws.opensearch.example.com - port: 443 +The AWS’s [policy simulator](https://policysim.aws.amazon.com/) is a valuable tool for testing how permissions are applied and for spotting misconfigurations. -elasticsearch: - enabled: false +#### If issues persist -optimize: - enabled: false -``` +If issues remain unresolved, compare your configuration with Camunda’s [reference architecture](terraform-setup.md) deployed with Terraform. This setup has been validated to work with IRSA and contains the correct permissions. By comparing it to your setup, you may identify discrepancies that are causing your issues. -:::note -Amazon OpenSearch Service listens on port 443 opposed to the usual port 9200. -::: - -:::note -Don't forget to set the `serviceAccountName` of the deployment/statefulset to the created service account with the IRSA annotation. -::: - -## Troubleshooting - -### Instance Metadata Service (IMDS) +## Instance Metadata Service (IMDS) [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html) is a default fallback for the AWS SDK due to the [default credentials provider chain](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/credentials-chain.html). Within the context of Amazon EKS, it means a pod will automatically assume the role of a node. This can hide many problems, including whether IRSA was set up correctly or not, since it will fall back to IMDS in case of failure and hide the actual error. @@ -617,3 +188,9 @@ eks_managed_node_group_defaults { ``` Overall, this will disable the role assumption of the node for the Kubernetes pod. Depending on the resulting error within Operate, Zeebe, and Web-Modeler, you'll get a clearer error, which is helpful to debug the error more easily. + +:::note Enabled by default in the terraform reference architecture of EKS + +In the [reference architecture with terraform](terraform-setup.md), this setting is configured like that by default. + +::: diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md index 28c2429c2ee..40fe91e4dc8 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md @@ -1,12 +1,17 @@ --- id: eks-terraform -title: "Deploy an EKS cluster with Terraform" +title: "Deploy an EKS cluster with Terraform (advanced)" description: "Deploy an Amazon Kubernetes Cluster (EKS) with a Terraform module for a quick Camunda 8 setup." --- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + This guide offers a detailed tutorial for deploying an Amazon Web Services (AWS) Elastic Kubernetes Service (EKS) cluster, tailored explicitly for deploying Camunda 8 and using Terraform, a popular Infrastructure as Code (IaC) tool. -This is designed to help leverage the power of IaC to streamline and reproduce a Cloud infrastructure setup. By walking through the essentials of setting up an Amazon EKS cluster, configuring AWS IAM permissions, and integrating a PostgreSQL database, this guide explains the process of using Terraform with AWS, making it accessible even to those new to Terraform or IaC concepts. +It is recommended to use this guide for building a robust and sustainable infrastructure over time. However, for a quicker trial or proof of concept, using the [eksctl](./eksctl.md) method may suffice. + +This guide is designed to help leverage the power of Infrastructure as Code (IaC) to streamline and reproduce a cloud infrastructure setup. By walking through the essentials of setting up an Amazon EKS cluster, configuring AWS IAM permissions, and integrating a PostgreSQL database and an OpenSearch domain (as an alternative to Elasticsearch), this guide explains how to use Terraform with AWS, making it accessible even to those new to Terraform or IaC concepts. It utilizes AWS-managed services when available, providing these as an optional convenience that you can choose to use or not. :::tip @@ -14,16 +19,20 @@ If you are completely new to Terraform and the idea of IaC, read through the [Te ::: -## Prerequisites +## Requirements - An [AWS account](https://docs.aws.amazon.com/accounts/latest/reference/accounts-welcome.html) to create any resources within AWS. +- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources. - [Terraform (1.9+)](https://developer.hashicorp.com/terraform/downloads) -- [Kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. +- [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster. +- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some Terraform variables. - [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured. - This simplifies the setup by not relying on explicit credentials and instead creating a mapping between IAM roles and Kubernetes service account based on a trust relationship. A [blog post](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/) by AWS visualizes this on a technical level. - This allows a Kubernetes service account to temporarily impersonate an AWS IAM role to interact with AWS services like S3, RDS, or Route53 without having to supply explicit credentials. + - IRSA is recommended as an [EKS best practice](https://aws.github.io/aws-eks-best-practices/security/docs/iam/). +- This guide uses GNU/Bash for all the shell commands listed. -## Considerations +### Considerations This setup provides an essential foundation for beginning with Camunda 8, though it's not tailored for optimal performance. It's a good initial step for preparing a production environment by incorporating [IaC tooling](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code). @@ -33,282 +42,677 @@ To try out Camunda 8 or develop against it, consider signing up for our [SaaS of For the simplicity of this guide, certain best practices will be provided with links to additional documents, enabling you to explore the topic in more detail. -:::warning +:::info Module update notice (November 2024) + +Modules referenced in this guide have been updated recently from **v2** to **v3**. For more information, refer to our [migration guide from v2 to v3](https://github.com/camunda/camunda-tf-eks-module/blob/main/guides/MIGRATION_GUIDE_v2_to_v3.md). + +::: + +:::warning Cost management + Following this guide will incur costs on your Cloud provider account, namely for the managed Kubernetes service, running Kubernetes nodes in EC2, Elastic Block Storage (EBS), and Route53. More information can be found on [AWS](https://aws.amazon.com/eks/pricing/) and their [pricing calculator](https://calculator.aws/#/) as the total cost varies per region. + ::: -## Outcome +### Variants + +We support two variants of this architecture: + +- The first, **standard installation**, utilizes a username and password connection for the Camunda components (or simply relies on network isolation for certain components). This option is straightforward and easier to implement, making it ideal for environments where simplicity and rapid deployment are priorities, or where network isolation provides sufficient security. + +- The second variant, **IRSA** (IAM Roles for Service Accounts), uses service accounts to perform authentication with IAM policies. This approach offers stronger security and better integration with AWS services, as it eliminates the need to manage credentials manually. It is especially beneficial in environments with strict security requirements, where fine-grained access control and dynamic role-based access are essential. + +#### How to choose + +- If you prefer a simpler setup with basic authentication or network isolation, and your security needs are moderate, the **standard installation** is a suitable choice. +- If you require enhanced security, dynamic role-based access management, and want to leverage AWS’s identity services for fine-grained control, the **IRSA** variant is the better option. + +Both can be set up with or without a **Domain** ([ingress](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html)). + +### Outcome + + Following this tutorial and steps will result in: - An Amazon EKS Kubernetes cluster running the latest Kubernetes version with four nodes ready for Camunda 8 installation. - The [EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) is installed and configured, which is used by the Camunda 8 Helm chart to create [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). -- A [managed Aurora PostgreSQL 15.8](https://aws.amazon.com/rds/postgresql/) instance to be used by the Camunda 8 components. +- A [managed Aurora PostgreSQL 15.x](https://aws.amazon.com/rds/postgresql/) instance to be used by the Camunda platform. +- A [managed OpenSearch domain](https://aws.amazon.com/opensearch-service/) created and configured for use with the Camunda platform. +- (optional) [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) (IRSA) configured. + - This simplifies the setup by not relying on explicit credentials, but instead allows creating a mapping between IAM roles and Kubernetes service accounts based on a trust relationship. A [blog post](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/) by AWS visualizes this on a technical level. + - This allows a Kubernetes service account to temporarily impersonate an AWS IAM role to interact with AWS services like S3, RDS, or Route53 without supplying explicit credentials. + +## 1. Configure AWS and initialize Terraform + +### Terraform prerequisites + +To manage the infrastructure for Camunda 8 on AWS using Terraform, we need to set up Terraform's backend to store the state file remotely in an S3 bucket. This ensures secure and persistent storage of the state file. -## Installing Amazon EKS cluster with Terraform +:::note +Advanced users may want to handle this part differently and use a different backend. The backend setup provided is an example for new users. +::: -### Terraform prerequsites +#### Set up AWS authentication -1. Create an empty folder to place your Terraform files in. -2. Create a `config.tf` with the following setup: +The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. Before you can use the provider, you must authenticate it using your AWS credentials. +You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods. -```hcl -terraform { - backend "local" { - path = "terraform.tfstate" - } +We recommend using the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials. - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 5.69" - } - } -} +To configure the AWS CLI: -provider "aws" { - region = "eu-central-1" -} +```bash +aws configure ``` -3. Set up the authentication for the `AWS` provider. +Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). -:::note +:::caution Ownership of the created resources -It's recommended to use a different backend than `local`. More information can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/settings/backends/configuration). +A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user. + +[Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl` ::: -:::note +#### Create an S3 bucket for Terraform state management + +Before setting up Terraform, you need to create an S3 bucket that will store the state file. This is important for collaboration and to prevent issues like state file corruption. + +To start, set the region as an environment variable upfront to avoid repeating it in each command: + +```bash +export AWS_REGION= +``` + +Replace `` with your chosen AWS region (for example, `eu-central-1`). + +Now, follow these steps to create the S3 bucket with versioning enabled: + +1. Open your terminal and ensure the AWS CLI is installed and configured. + +2. Run the following command to create an S3 bucket for storing your Terraform state. Make sure to use a unique bucket name and set the `AWS_REGION` environment variable beforehand: + + ```bash + # Replace "my-eks-tf-state" with your unique bucket name + export S3_TF_BUCKET_NAME="my-eks-tf-state" + + aws s3api create-bucket --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION" \ + --create-bucket-configuration LocationConstraint="$AWS_REGION" + ``` + +3. Enable versioning on the S3 bucket to track changes and protect the state file from accidental deletions or overwrites: + + ```bash + aws s3api put-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --versioning-configuration Status=Enabled --region "$AWS_REGION" + ``` + +4. Secure the bucket by blocking public access: + + ```bash + aws s3api put-public-access-block --bucket "$S3_TF_BUCKET_NAME" --public-access-block-configuration \ + "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" --region "$AWS_REGION" + ``` + +5. Verify versioning is enabled on the bucket: + + ```bash + aws s3api get-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION" + ``` + +This S3 bucket will now securely store your Terraform state files with versioning enabled. + +#### Create a `config.tf` with the following setup + +Once the S3 bucket is created, configure your `config.tf` file to use the S3 backend for managing the Terraform state: + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/config.tf +``` + + + + +```hcl reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/config.tf +``` -The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. You must configure the provider with the proper credentials before using it. You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods. + + -There are several ways to authenticate the `AWS` provider. +#### Initialize Terraform -- (Recommended) Use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) to configure access. Terraform will automatically default to AWS CLI configuration when present. -- Set environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`, which can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). +Once your `config.tf` and authentication are set up, you can initialize your Terraform project. The previous steps configured a dedicated S3 Bucket (`S3_TF_BUCKET_NAME`) to store your state, and the following creates a bucket key that will be used by your configuration. +Configure the backend and download the necessary provider plugins: + +```bash +export S3_TF_BUCKET_KEY="camunda-terraform/terraform.tfstate" + +echo "Storing terraform state in s3://$S3_TF_BUCKET_NAME/$S3_TF_BUCKET_KEY" + +terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" +``` + +Terraform will connect to the S3 bucket to manage the state file, ensuring remote and persistent storage. + +### EKS cluster module setup + +This module establishes the foundational configuration for AWS access and Terraform. + +We will utilize [Terraform modules](https://developer.hashicorp.com/terraform/language/modules), which allow us to abstract resources into reusable components, streamlining our infrastructure management. + +The [Camunda-provided module](https://github.com/camunda/camunda-tf-eks-module) is publicly available and offers a robust starting point for deploying an EKS cluster. It is highly recommended to review this module prior to implementation to understand its structure and capabilities. + +#### Set up the EKS cluster module + +1. Create a `cluster.tf` file in the same directory as your `config.tf` file. +2. Add the following content to your newly created `cluster.tf` file to utilize the provided module: + + + + + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/cluster.tf + ``` + + + + + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/cluster.tf + ``` + + + + +3. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command: + + ```bash + terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" + ``` + +4. Configure user access to the cluster. By default, the user who creates the Amazon EKS cluster has administrative access. + +
    + Grant cluster access to other users +

    + + If you want to grant access to other users, you can configure this by using the `access_entries` input. + + Amazon EKS access management is divided into two distinct layers: + + - The **first layer** involves **AWS IAM permissions**, which allow basic Amazon EKS functionalities such as interacting with the Amazon EKS UI and generating EKS access through the AWS CLI. The module handles this part for you by creating the necessary IAM roles and policies. + + - The **second layer** controls **cluster access** within Kubernetes, defining the user's permissions inside the cluster (for example, policy association). This can be configured directly through the module's `access_entries` input. + + To manage user access, use the `access_entries` configuration, introduced in module version [2.0.0](https://github.com/camunda/camunda-tf-eks-module/releases/tag/2.0.0): + + ```hcl + access_entries = { + example = { + kubernetes_groups = [] + principal_arn = "" + + policy_associations = { + example = { + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + access_scope = { + namespaces = ["default"] + type = "namespace" + } + } + } + } + } + ``` + + In this configuration: + + - Replace `principal_arn` with the ARN of the IAM user or role. + - Use `policy_associations` to define policies for fine-grained access control. + + For a full list of available policies, refer to the [AWS EKS Access Policies documentation](https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html). + + :::info Module deprecation notice + Starting from version 2.x.x of this module, direct mappings through `aws_auth_roles` and `aws_auth_users` are no longer supported. If you are upgrading from version [1.x.x](https://github.com/camunda/camunda-tf-eks-module/releases/tag/1.0.3), you will need to fork the module and follow AWS's official instructions for managing the `aws-auth` ConfigMap. + + For more details, refer to the [official upgrade guide](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md). + ::: + +

    +
    + +5. Customize the cluster setup. The module offers various input options that allow you to further customize the cluster configuration. For a comprehensive list of available options and detailed usage instructions, refer to the [EKS module documentation](https://github.com/camunda/camunda-tf-eks-module/blob/2.6.0/modules/eks-cluster/README.md). + +### PostgreSQL module setup + +:::info Optional module + +If you don't want to use this module, you can skip this section. However, you may need to adjust the remaining instructions to remove references to this module. + +If you choose not to use this module, you must either provide a managed PostgreSQL service or use the internal deployment by the Camunda Helm chart in Kubernetes. ::: -:::warning +We separated the cluster and PostgreSQL modules to offer you more customization options. + +#### Set up the Aurora PostgreSQL module + +1. Create a `db.tf` file in the same directory as your `config.tf` file. +2. Add the following content to your newly created `db.tf` file to utilize the provided module: + + + + + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/db.tf + ``` + + + + + In addition to using standard username and password authentication, you can opt to use [**IRSA (IAM Roles for Service Accounts)**](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) for secure, role-based access to your Aurora database. This method allows your EKS workloads to assume IAM roles without needing to manage AWS credentials directly. + + :::note + Using IRSA is optional. If preferred, you can continue using traditional password-based authentication for database access. + ::: + + If you choose to use IRSA, you’ll need to take note of the **IAM role** created for Aurora and the **AWS Account ID**, as these will be used later to annotate the Kubernetes service account. + + ##### Aurora IRSA role and policy + + The Aurora module uses outputs from the EKS cluster module to configure the IRSA role and policy. Below are the required parameters: -Do not store sensitive information (credentials) in your Terraform files. + Here’s how to define the IAM role trust policy and access policy for Aurora: + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/db.tf + ``` + + Once the IRSA configuration is complete, ensure you **record the IAM role name** (from the `iam_aurora_role_name` configuration), it is required to annotate the Kubernetes service account in the next step. + + + + +3. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command: + + ```bash + terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" + ``` + +4. Customize the Aurora cluster setup through various input options. Refer to the [Aurora module documentation](https://github.com/camunda/camunda-tf-eks-module/blob/2.6.0/modules/aurora/README.md) for more details on other customization options. + +### OpenSearch module setup + +:::info Optional module + +If you don't want to use this module, you can skip this section. However, you may need to adjust the remaining instructions to remove references to this module. + +If you choose not to use this module, you'll need to either provide a managed Elasticsearch or OpenSearch service or use the internal deployment by the Camunda Helm chart in Kubernetes. ::: -:::warning +The OpenSearch module creates an OpenSearch domain intended for Camunda platform. OpenSearch is a powerful alternative to Elasticsearch. For more information on using OpenSearch with Camunda, refer to the [Camunda documentation](/self-managed/setup/guides/using-existing-opensearch.md). -A user who creates resources in AWS will therefore own these resources. In this particular case, the user will always have admin access to the Kubernetes cluster until the cluster is deleted. +:::note Migration to OpenSearch is not supported -Therefore, it can make sense to create an extra [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) which credentials are used for Terraform purposes. +Using Amazon OpenSearch Service requires [setting up a new Camunda installation](/self-managed/setup/overview.md). Migration from previous Camunda versions or Elasticsearch environments is currently not supported. Switching between Elasticsearch and OpenSearch, in either direction, is also not supported. ::: -### Cluster module +#### Set up the OpenSearch domain module -This module creates the basic layout that configures AWS access and Terraform. +1. Create a `opensearch.tf` file in the same directory as your `config.tf` file. +1. Add the following content to your newly created `opensearch.tf` file to utilize the provided module: -The following will use [Terraform modules](https://developer.hashicorp.com/terraform/language/modules), which allows abstracting resources into reusable components. + + -The [Camunda provided module](https://github.com/camunda/camunda-tf-eks-module/tree/2.5.0/modules/eks-cluster) is publicly available. It's advisable to review this module before usage. + :::caution Network based security + The standard deployment for OpenSearch relies on the first layer of security, which is the Network. + While this setup allows easy access, it may expose sensitive data. To enhance security, consider implementing IAM Roles for Service Accounts (IRSA) to restrict access to the OpenSearch cluster, providing a more secure environment. + For more information, see the [Amazon OpenSearch Service Fine-Grained Access Control documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html#fgac-access-policies). + ::: -1. In the folder where your `config.tf` resides, create an additional `cluster.tf`. -2. Paste the following content into the newly created `cluster.tf` file to make use of the provided module: + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/opensearch.tf + ``` -```hcl -module "eks_cluster" { - source = "git::https://github.com/camunda/camunda-tf-eks-module//modules/eks-cluster?ref=2.5.0" + + + - region = "eu-central-1" # change to your AWS region - name = "cluster-name" # change to name of your choosing + In addition to standard authentication, which uses anonymous users and relies on the network for access control, you can also use [**IRSA (IAM Roles for Service Accounts)**](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) to securely connect to OpenSearch. IRSA enables your Kubernetes workloads to assume IAM roles without managing AWS credentials directly. - # Set CIDR ranges or use the defaults - cluster_service_ipv4_cidr = "10.190.0.0/16" - cluster_node_ipv4_cidr = "10.192.0.0/16" -} -``` + :::note + Using IRSA is optional. If you prefer, you can continue using password-based access to your OpenSearch domain. + ::: -There are various other input options to customize the cluster setup further; see the [module documentation](https://github.com/camunda/camunda-tf-eks-module/tree/2.5.0/modules/eks-cluster). + If you choose to use IRSA, you’ll need to take note of the **IAM role name** created for OpenSearch and the **AWS Account ID**, as these will be required later to annotate the Kubernetes service account. -### PostgreSQL module + ##### OpenSearch IRSA role and policy -The resulting PostgreSQL instance and default database `camunda` is intended to be used with Keycloak. You may manually add extra databases after creation for Identity with multi-tenancy. -This will not be covered in this guide as the Identity default for multi-tenancy is to be disabled. + To configure IRSA for OpenSearch, the OpenSearch module uses outputs from the EKS cluster module to define the necessary IAM role and policies. -We separated the cluster and PostgreSQL modules from each other to allow more customization options to the user. + Here's an example of how to define the IAM role trust policy and access policy for OpenSearch, this configuration will deploy an OpenSearch domain with advanced security enabled: -1. In the folder where your `config.tf` resides, create an additional `db.tf` file. -2. Paste the following contents into `db.tf` to make use of the provided module: + ```hcl reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/opensearch.tf + ``` -```hcl -module "postgresql" { - source = "git::https://github.com/camunda/camunda-tf-eks-module//modules/aurora?ref=2.5.0" - engine_version = "15.8" - auto_minor_version_upgrade = false - cluster_name = "cluster-name-postgresql" # change "cluster-name" to your name - default_database_name = "camunda" + Once the IRSA configuration is complete, ensure you **record the IAM role name** (from the `iam_opensearch_role_name` configuration), it is required to annotate the Kubernetes service account in the next step. - # Please supply your own secret values - username = "secret_user" - password = "secretvalue%23" - vpc_id = module.eks_cluster.vpc_id - subnet_ids = module.eks_cluster.private_subnet_ids - cidr_blocks = concat(module.eks_cluster.private_vpc_cidr_blocks, module.eks_cluster.public_vpc_cidr_blocks) - instance_class = "db.t3.medium" - iam_auth_enabled = true + As the OpenSearch domain has advanced security enabled and [fine-grained access control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html), we will later use your provided master username (`advanced_security_master_user_name`) and password (`advanced_security_master_user_password`) to perform the initial setup of the security component, allowing the created IRSA role to access the domain. - depends_on = [module.eks_cluster] -} -``` + + + +1. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command: + + ```bash + terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY" + ``` -To manage secrets in Terraform, we recommend [injecting those via Vault](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault). +1. Customize the cluster setup using various input options. For a full list of available parameters, see the [OpenSearch module documentation](https://github.com/camunda/camunda-tf-eks-module/blob/2.6.0/modules/opensearch/README.md). + +### Define outputs + +**Terraform** allows you to define outputs, which make it easier to retrieve important values generated during execution, such as database endpoints and other necessary configurations for Helm setup. + +Each module that you have previously set up contains an output definition at the end of the file. You can adjust them to your needs. + +Outputs allow you to easily reference the **cert-manager** ARN, **external-dns** ARN, and the endpoints for both **PostgreSQL** and **OpenSearch** in subsequent steps or scripts, streamlining your deployment process. ### Execution -1. Open a terminal in the created Terraform folder where `config.tf` and `cluster.tf` are. -2. Initialize the working directory: +:::note Secret management -```hcl -terraform init -``` +We strongly recommend managing sensitive information such as the OpenSearch, Aurora username and password using a secure secrets management solution like HashiCorp Vault. For details on how to inject secrets directly into Terraform via Vault, see the [Terraform Vault Secrets Injection Guide](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault). + +::: -3. Apply the configuration files: +1. Open a terminal in the created Terraform folder where `config.tf` and other `.tf` files are. -```hcl -terraform apply -``` +2. Plan the configuration files: -4. After reviewing the plan, you can type `yes` to confirm and apply the changes. + ```bash + terraform plan -out cluster.plan # describe what will be created + ``` -At this point, Terraform will create the Amazon EKS cluster with all the necessary configurations. The completion of this process may require approximately 20-30 minutes. +3. After reviewing the plan, you can confirm and apply the changes. -## (Optional) AWS IAM access management + ```bash + terraform apply cluster.plan # apply the creation + ``` -Kubernetes access is divided into two distinct layers. The first involves AWS IAM permissions, which enable basic Amazon EKS functionalities such as using the Amazon EKS UI and generating Amazon EKS access through the AWS CLI. The second layer provides access within the cluster itself, determining the user's permissions within the Kubernetes cluster. +Terraform will now create the Amazon EKS cluster with all the necessary configurations. The completion of this process may require approximately 20-30 minutes for each component. -As a result, we must initially grant the user adequate AWS IAM permissions and subsequently assign them a specific role within the Kubernetes cluster for proper access management. +### Reference files -### AWS IAM permissions +Depending on the installation path you have chosen, you can find the reference files used on this page: -A minimum set of permissions is required to access an Amazon EKS cluster to allow a user to execute `aws eks update-kubeconfig` to update the local `kubeconfig` with cluster access to the Amazon EKS cluster. +- **Standard installation:** [Reference Files](https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/) +- **IRSA Installation:** [Reference Files](https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/) -The policy should look as follows and can be restricted to specific Amazon EKS clusters if required: +## 2. Preparation for Camunda 8 installation -```json -cat <./policy-eks.json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "eks:DescribeCluster", - "eks:ListClusters" - ], - "Resource": "*" - } - ] -} -EOF -``` +### Access the created EKS cluster -Via the AWS CLI, you can run the following to create the above policy in AWS IAM. +You can gain access to the Amazon EKS cluster via the `AWS CLI` using the following command: ```shell -aws iam create-policy --policy-name "BasicEKSPermissions" --policy-document file://policy-eks.json +export CLUSTER_NAME="$(terraform console << update-kubeconfig --name +kubectl create namespace camunda ``` -### Terraform AWS IAM permissions - -The user creating the Amazon EKS cluster has admin access by default. -To manage user access use the `access_entries` configuration introduced in module version [2.0.0](https://github.com/camunda/camunda-tf-eks-module/releases/tag/2.0.0): - -```hcl -access_entries = { - example = { - kubernetes_groups = [] - principal_arn = "" - - policy_associations = { - example = { - policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" - access_scope = { - namespaces = ["default"] - type = "namespace" - } - } - } - } -} +In the remainder of the guide, we reference the `camunda` namespace to create some required resources in the Kubernetes cluster, such as secrets or one-time setup jobs. + +### Export values for the Helm chart + +After configuring and deploying your infrastructure with Terraform, follow these instructions to export key values for use in Helm charts to deploy [Camunda 8 on Kubernetes](./eks-helm.md). + +The following commands will export the required outputs as environment variables. You may need to omit some if you have chosen not to use certain modules. These values will be necessary for deploying Camunda 8 with Helm charts: + + + + +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/export-helm-values.sh ``` -In this updated configuration: + + + -- `principal_arn` should be replaced with the ARN of the IAM user or role. -- `policy_associations` allow you to associate policies for fine-grained access control. +```bash reference +https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/procedure/export-helm-values.sh +``` -For a list of policies, please visit the [AWS EKS Access Policies documentation](https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html). +:::note IRSA users -:::info +To authenticate and authorize access to PostgreSQL and OpenSearch, **you do not need to export the PostgreSQL or OpenSearch passwords**, IRSA will handle the authentication. -Please note that the version 2.x.x of this module no longer supports direct mappings via `aws_auth_roles` and `aws_auth_users`. If you are upgrading from version [1.x.x](https://github.com/camunda/camunda-tf-eks-module/releases/tag/1.0.3), fork the module repository and follow the official AWS instructions for managing the `aws-auth` ConfigMap. -For more details, refer to the [official upgrade guide](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md). +**However**, you will still need to export the relevant usernames and other settings to Helm. ::: -## Outputs + + -Terraform can define outputs to make the retrieval of values generated as part of the execution easier; for example, DB endpoints or values required for the Helm setup. +Ensure that you use the actual values you passed to the Terraform module during the setup of PostgreSQL and OpenSearch. -1. In the folder where your `config.tf` resides, create an additional `output.tf`. -2. Paste the following content to expose those variables: +### Configure the database and associated access -```hcl -output "cert_manager_arn" { - value = module.eks_cluster.cert_manager_arn - description = "The Amazon Resource Name (ARN) of the AWS IAM Roles for Service Account mapping for the cert-manager" -} +As you now have a database, you need to create dedicated databases for each Camunda component and an associated user that have a configured access. Follow these steps to create the database users and configure access. -output "external_dns_arn" { - value = module.eks_cluster.external_dns_arn - description = "The Amazon Resource Name (ARN) of the AWS IAM Roles for Service Account mapping for the external-dns" -} +You can access the created database in two ways: -output "postgres_endpoint" { - value = module.postgresql.aurora_endpoint - description = "The Postgres endpoint URL" -} -``` +1. **Bastion host:** Set up a bastion host within the same network to securely access the database. +2. **Pod within the EKS cluster:** Deploy a pod in your EKS cluster equipped with the necessary tools to connect to the database. -3. Run `terraform apply` again to print the outputs in the terraform state. +The choice depends on your infrastructure setup and security preferences. In this guide, we'll use a pod within the EKS cluster to configure the database. -We can now export those values to environment variables to be used by Helm charts: +1. In your terminal, set the necessary environment variables that will be substituted in the setup manifest: -```shell -export CERT_MANAGER_IRSA_ARN=$(terraform output -raw cert_manager_arn) + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/vars-create-db.sh + ``` -export EXTERNAL_DNS_IRSA_ARN=$(terraform output -raw external_dns_arn) + A **Kubernetes job** will connect to the database and create the necessary users with the required privileges. The script installs the necessary dependencies and runs SQL commands to create the IRSA user and assign it the correct roles and privileges. -export DB_HOST=$(terraform output -raw postgres_endpoint) -``` +2. Create a secret that references the environment variables: -4. Export required values for the [Camunda 8 on Kubernetes](./eks-helm.md) guide. The values will likely differ based on your definitions in the [PostgreSQL setup](#postgresql-module), so ensure you use the values passed to the Terraform module. + + -```shell -# Example guide values, ensure you use the values you pass to the Terraform module -export PG_USERNAME="secret_user" -export PG_PASSWORD="secretvalue%23" -export DEFAULT_DB_NAME="camunda" -``` + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/procedure/create-setup-db-secret.sh + ``` + + This command creates a secret named `setup-db-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-db-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + + + + + + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/procedure/create-setup-db-secret.sh + ``` + + This command creates a secret named `setup-db-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-db-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + + + + +3. Save the following manifest to a file, for example, `setup-postgres-create-db.yml`. + + + + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/setup-postgres-create-db.yml + ``` + + + + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/setup-postgres-create-db.yml + ``` + + + + +4. Apply the manifest: + + ```bash + kubectl apply -f setup-postgres-create-db.yml --namespace camunda + ``` + + Once the secret is created, the **Job** manifest from the previous step can consume this secret to securely access the database credentials. + +5. Once the job is created, monitor its progress using: + + ```bash + kubectl get job/create-setup-user-db --namespace camunda --watch + ``` + + Once the job shows as `Completed`, the users and databases will have been successfully created. + +6. View the logs of the job to confirm that the users were created and privileges were granted successfully: + + ```bash + kubectl logs job/create-setup-user-db --namespace camunda + ``` + +7. Clean up the resources: + + ```bash + kubectl delete job create-setup-user-db --namespace camunda + kubectl delete secret setup-db-secret --namespace camunda + ``` + +Running these commands cleans up both the job and the secret, ensuring that no unnecessary resources remain in the cluster. + +### Configure OpenSearch fine grained access control + +As you now have an OpenSearch domain, you need to configure the related access for each Camunda component. + +You can access the created OpenSearch domain in two ways: + +1. **Bastion host:** Set up a bastion host within the same network to securely access the OpenSearch domain. +2. **Pod within the EKS cluster:** Alternatively, deploy a pod in your EKS cluster equipped with the necessary tools to connect to the OpenSearch domain. + +The choice depends on your infrastructure setup and security preferences. In this tutorial, we'll use a pod within the EKS cluster to configure the domain. + + + + +The standard installation comes already pre-configured, and no additional steps are required. + + + + +1. In your terminal, set the necessary environment variables that will be substituted in the setup manifest: + + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/procedure/vars-create-os.sh + ``` + + A **Kubernetes job** will connect to the OpenSearch dommain and configure it. + +1. Create a secret that references the environment variables: + + ```bash reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/procedure/create-setup-os-secret.sh + ``` + + This command creates a secret named `setup-os-secret` and dynamically populates it with the values from your environment variables. + + After running the above command, you can verify that the secret was created successfully by using: + + ```bash + kubectl get secret setup-os-secret -o yaml --namespace camunda + ``` + + This should display the secret with the base64 encoded values. + +1. Save the following manifest to a file, for example, `setup-opensearch-fgac.yml`. + + ```yaml reference + https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6-irsa/setup-opensearch-fgac.yml + ``` + +1. Apply the manifest: + + ```bash + kubectl apply -f setup-opensearch-fgac.yml --namespace camunda + ``` + + Once the secret is created, the **Job** manifest from the previous step can consume this secret to securely access the OpenSearch domain credentials. + +1. Once the job is created, monitor its progress using: + + ```bash + kubectl get job/setup-opensearch-fgac --namespace camunda --watch + ``` + + Once the job shows as `Completed`, the OpenSearch domain is configured correctly for fine grained access control. + +1. View the logs of the job to confirm that the privileges were granted successfully: + + ```bash + kubectl logs job/setup-opensearch-fgac --namespace camunda + ``` + +1. Clean up the resources: + + ```bash + kubectl delete job setup-opensearch-fgac --namespace camunda + kubectl delete secret setup-os-secret --namespace camunda + ``` + +Running these commands will clean up both the job and the secret, ensuring that no unnecessary resources remain in the cluster. + + + -## Next steps +## 3. Install Camunda 8 using the Helm chart -Install Camunda 8 using Helm charts by following our installation guide [Camunda 8 on Kubernetes](./eks-helm.md). +Now that you've exported the necessary values, you can proceed with installing Camunda 8 using Helm charts. Follow the guide [Camunda 8 on Kubernetes](./eks-helm.md) for detailed instructions on deploying the platform to your Kubernetes cluster. diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/local/c8run.md b/versioned_docs/version-8.6/self-managed/setup/deploy/local/c8run.md index 13f06b5603f..de44b096321 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/local/c8run.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/local/c8run.md @@ -36,7 +36,7 @@ If no version of Java is found, follow your chosen installation's instructions f ## Install and start Camunda 8 Run -1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/c8run-8.6.2) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. +1. Download the [latest release of Camunda 8 Run](https://github.com/camunda/camunda/releases/tag/8.6.6) for your operating system and architecture. Opening the .tgz file extracts the Camunda 8 Run script into a new directory. 2. Navigate to the new `c8run` directory. 3. Start Camunda 8 Run by running `./start.sh` (or `.\c8run.exe start` on Windows) in your terminal. diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/local/local-kubernetes-cluster.md b/versioned_docs/version-8.6/self-managed/setup/deploy/local/local-kubernetes-cluster.md index 158da8cc6f8..82762e83662 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/local/local-kubernetes-cluster.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/local/local-kubernetes-cluster.md @@ -54,9 +54,7 @@ helm repo update If you are deploying Camunda 8 with Ingress configuration, make sure to add additional values to the file you just downloaded `camunda-platform-core-kind-values.yaml` as described in [connecting to Camunda 8 components](#connecting-to-camunda-8-components). ::: -3. Install Camunda 8 using the `camunda-platform-core-kind-values.yaml` file you downloaded previously. This file might contain additional values if you are adding Ingress, TLS, or using a variety of other configuration properties. See [Camunda Helm chart parameters](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters). - -4. Execute the following command: +3. Install Camunda 8 using the `camunda-platform-core-kind-values.yaml` file you downloaded previously. This file might contain additional values if you are adding Ingress, TLS, or using a variety of other configuration properties. See [Camunda Helm chart parameters](https://artifacthub.io/packages/helm/camunda/camunda-platform#parameters). Execute the following command: ```sh helm install camunda-platform camunda/camunda-platform \ @@ -67,7 +65,7 @@ This will deploy Camunda 8 components (Optimize, Connectors, and Zeebe), but wit Depending on your machine hardware and internet connection speed, the services might take some time to get started as it will download the Docker images of all Camunda 8 components to your local kind cluster. -5. Check that each pod is running and ready with `kubectl get pods`. If one or more of your pods are pending for long time, it means it cannot be scheduled onto a node. Usually, this happens because there are insufficient resources that prevent it. Use the `kubectl describe ` command to check its status. +4. Check that each pod is running and ready with `kubectl get pods`. If one or more of your pods are pending for long time, it means it cannot be scheduled onto a node. Usually, this happens because there are insufficient resources that prevent it. Use the `kubectl describe ` command to check its status. ## Connecting to Camunda 8 components @@ -91,7 +89,7 @@ First, port-forward each of the components. Use a separate terminal for each com ## Connecting to the workflow engine -To interact with the Camunda workflow engine via Zeebe Gateway using [zbctl](/apis-tools/community-clients/cli-client/cli-get-started.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe gateway as follows: +To interact with the Camunda workflow engine via the Zeebe Gateway using [zbctl](/apis-tools/community-clients/cli-client/cli-get-started.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe Gateway as follows: ```sh kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 @@ -169,7 +167,7 @@ Make sure all pods are running with the `kubectl get pods --namespace ingress-ng ## Ingress configuration in Camunda 8 Helm charts -In this document, we will use the combined Ingress configuration. However, there is one quirk with this particular setup to be aware of - Zeebe Gateway uses gRPC, which uses HTTP/2. This means the Zeebe Gateway has to use its own subdomain `zeebe.camunda.local` instead of context path (such as `/zeebe`). +In this document, we will use the combined Ingress configuration. However, there is one quirk with this particular setup to be aware of - the Zeebe Gateway uses gRPC, which uses HTTP/2. This means the Zeebe Gateway has to use its own subdomain `zeebe.camunda.local` instead of context path (such as `/zeebe`). Add the following values to `camunda-platform-core-kind-values.yaml` to allow Camunda 8 components to be discovered by the Ingress controller. diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/local/manual.md b/versioned_docs/version-8.6/self-managed/setup/deploy/local/manual.md index 053d9b8454b..d03b18b592c 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/local/manual.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/local/manual.md @@ -70,7 +70,7 @@ For **Windows users**, take the following steps: 4. Navigate to the `bin` folder. 5. Start the broker by double-clicking on the `broker.bat` file. -Once the Zeebe broker has started, it should produce the following output: +Once the Zeebe Broker has started, it should produce the following output: ```log 23:39:13.246 [] [main] INFO io.camunda.zeebe.broker.system - Scheduler configuration: Threads{cpu-bound: 2, io-bound: 2}. diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/openshift/redhat-openshift.md b/versioned_docs/version-8.6/self-managed/setup/deploy/openshift/redhat-openshift.md index b512b1636a3..2dc2a19d437 100644 --- a/versioned_docs/version-8.6/self-managed/setup/deploy/openshift/redhat-openshift.md +++ b/versioned_docs/version-8.6/self-managed/setup/deploy/openshift/redhat-openshift.md @@ -33,7 +33,7 @@ We conduct testing and ensure compatibility against the following OpenShift vers | 4.13.x | November 17, 2024 | :::caution -Compatibility is not guaranteed for OpenShift versions no longer supported by Red Hat, as per the End of Support Date. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift). +Camunda 8 supports OpenShift versions in the Red Hat General Availability, Full Support, and Maintenance Support life cycle phases. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift). ::: ## Deploying Camunda 8 in OpenShift diff --git a/versioned_docs/version-8.6/self-managed/setup/guides/accessing-components-without-ingress.md b/versioned_docs/version-8.6/self-managed/setup/guides/accessing-components-without-ingress.md index 706b4293756..0051b6f00d6 100644 --- a/versioned_docs/version-8.6/self-managed/setup/guides/accessing-components-without-ingress.md +++ b/versioned_docs/version-8.6/self-managed/setup/guides/accessing-components-without-ingress.md @@ -12,13 +12,13 @@ You need to keep `port-forward` running all the time to communicate with the rem ## Accessing workflow engine -To interact with Camunda workflow engine via [Zeebe Gateway](/self-managed/zeebe-deployment/configuration/gateway.md) using [zbctl](/apis-tools/community-clients/cli-client/index.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe cluster as following: +To interact with Camunda workflow engine via the [Zeebe Gateway](/self-managed/zeebe-deployment/configuration/gateway.md) using [zbctl](/apis-tools/community-clients/cli-client/index.md) or a local client/worker from outside the Kubernetes cluster, run `kubectl port-forward` to the Zeebe cluster as following: ``` kubectl port-forward svc/camunda-zeebe-gateway 26500:26500 ``` -Now, you can connect and execute operations against your new Zeebe cluster. This allows you to use `zbctl` as a command line interface to read and create resources inside the Zeebe broker. +Now, you can connect and execute operations against your new Zeebe cluster. This allows you to use `zbctl` as a command line interface to read and create resources inside the Zeebe Broker. :::note Accessing the Zeebe cluster directly using `kubectl port-forward` is recommended for development purposes. diff --git a/versioned_docs/version-8.6/self-managed/setup/guides/connect-to-an-oidc-provider.md b/versioned_docs/version-8.6/self-managed/setup/guides/connect-to-an-oidc-provider.md index f51e1f47b3a..f70c37d5614 100644 --- a/versioned_docs/version-8.6/self-managed/setup/guides/connect-to-an-oidc-provider.md +++ b/versioned_docs/version-8.6/self-managed/setup/guides/connect-to-an-oidc-provider.md @@ -34,11 +34,14 @@ configuration](#component-specific-configuration) to ensure the components are c

    Steps

    1. In your OIDC provider, create an application for each of the components you want to connect. The expected redirect URI of the component you are configuring an app for can be found in [component-specific configuration](#component-specific-configuration). -2. Make a note of the following values for each application you create: +2. For all Components, ensure the appropriate application type is used: + - **Operate, Tasklist, Optimize, Identity:** Web applications requiring confidential access/a confidential client + - **Modeler, Console:** Single-page applications requiring public access/a public client +3. Make a note of the following values for each application you create: - Client ID - Client secret - Audience -3. Set the following environment variables for the component you are configuring an app for: +4. Set the following environment variables for the component you are configuring an app for: @@ -103,7 +106,7 @@ global: -:::warning +:::note Once set, you cannot update your initial claim name and value using environment or Helm values. You must change these values directly in the database. ::: @@ -124,7 +127,7 @@ Ensure you register a new application for each component. 2. Navigate to the new application's **Overview** page, and make note of the **Client ID**. 3. Within your new application, [configure a platform](https://learn.microsoft.com/en-gb/entra/identity-platform/quickstart-register-app#configure-platform-settings) for the appropriate component: - **Web**: Operate, Tasklist, Optimize, Identity - - **Single-page application**: Modeler + - **Single-page application**: Modeler, Console 4. Add your component's **Microsoft Entra ID** redirect URI, found under [Component-specific configuration](#component-specific-configuration). 5. [Create a new client secret](https://learn.microsoft.com/en-gb/entra/identity-platform/quickstart-register-app?tabs=client-secret#add-credentials), and note the new secret's value for later use. 6. Set the following environment variables for the component you are configuring an app for: @@ -135,8 +138,8 @@ Ensure you register a new application for each component. ``` CAMUNDA_IDENTITY_TYPE=MICROSOFT CAMUNDA_IDENTITY_BASE_URL= - CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 - CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER=https://login.microsoftonline.com//v2.0 + CAMUNDA_IDENTITY_ISSUER_BACKEND_URL=https://login.microsoftonline.com//v2.0 CAMUNDA_IDENTITY_CLIENT_ID= CAMUNDA_IDENTITY_CLIENT_SECRET= CAMUNDA_IDENTITY_AUDIENCE= @@ -152,13 +155,13 @@ Ensure you register a new application for each component. global: identity: auth: - issuer: https://login.microsoftonline.com//v2.0 + issuer: https://login.microsoftonline.com//v2.0 # this is used for container to container communication - issuerBackendUrl: https://login.microsoftonline.com//v2.0 - tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token - jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys + issuerBackendUrl: https://login.microsoftonline.com//v2.0 + tokenUrl: https://login.microsoftonline.com//oauth2/v2.0/token + jwksUrl: https://login.microsoftonline.com//discovery/v2.0/keys type: "MICROSOFT" - publicIssuerUrl: https://login.microsoftonline.com//v2.0 + publicIssuerUrl: https://login.microsoftonline.com//v2.0 identity: clientId: existingSecret: @@ -184,7 +187,7 @@ global: zeebe: clientId: audience: - existingSecret: + existingSecret: tokenScope: "/.default" webModeler: clientId: diff --git a/versioned_docs/version-8.6/self-managed/setup/guides/ingress-setup.md b/versioned_docs/version-8.6/self-managed/setup/guides/ingress-setup.md index 4c67751e418..ed13861bf1c 100644 --- a/versioned_docs/version-8.6/self-managed/setup/guides/ingress-setup.md +++ b/versioned_docs/version-8.6/self-managed/setup/guides/ingress-setup.md @@ -13,7 +13,7 @@ The separated Ingress configuration has been deprecated in version 8.6. To ensur Camunda 8 Self-Managed has multiple web applications and gRPC services. Both can be accessed externally using Ingress. There are two ways to do this: -1. **Combined setup:** In this setup, there are two Ingress objects: one Ingress object for all Camunda 8 web applications using a single domain. Each application has a sub-path e.g. `camunda.example.com/operate`, and `camunda.example.com/optimize` and another Ingress which uses gRPC protocol for Zeebe Gateway e.g. `zeebe.camunda.example.com`. +1. **Combined setup:** In this setup, there are two Ingress objects: one Ingress object for all Camunda 8 web applications using a single domain. Each application has a sub-path e.g. `camunda.example.com/operate`, and `camunda.example.com/optimize` and another Ingress which uses gRPC protocol for the Zeebe Gateway e.g. `zeebe.camunda.example.com`. 2. **Separated setup:** In this setup, each component has its own Ingress/host e.g. `operate.camunda.example.com`, `optimize.camunda.example.com`, `zeebe.camunda.example.com`, etc. There are no significant differences between the two setups. Rather, they both offer flexibility for different workflows. @@ -25,7 +25,7 @@ Camunda 8 Helm chart doesn't manage or deploy Ingress controllers, it only deplo ## Preparation - An Ingress controller should be deployed in advance. The examples below use the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), but any Ingress controller could be used by setting `ingress.className`. -- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-latest#configuration). +- TLS configuration is not handled in the examples because it varies between different workflows. It could be configured directly using `ingress.tls` options or via an external tool like [Cert-Manager](https://github.com/cert-manager/cert-manager) using `ingress.annotations`. For more details, check available [configuration options](https://github.com/camunda/camunda-platform-helm/tree/main/charts/camunda-platform-8.6#configuration). ## Configuration @@ -38,7 +38,7 @@ Camunda 8 Helm chart doesn't manage or deploy Ingress controllers, it only deplo -In this setup, a single Ingress/domain is used to access Camunda 8 web applications, and another for Zeebe Gateway. By default, all web applications use `/` as a base, so we just need to set the context path, Ingress configuration, and authentication redirect URLs. +In this setup, a single Ingress/domain is used to access Camunda 8 web applications, and another for the Zeebe Gateway. By default, all web applications use `/` as a base, so we just need to set the context path, Ingress configuration, and authentication redirect URLs. ![Camunda 8 Self-Managed Architecture Diagram - Combined Ingress](../../assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) diff --git a/versioned_docs/version-8.6/self-managed/setup/guides/using-existing-opensearch.md b/versioned_docs/version-8.6/self-managed/setup/guides/using-existing-opensearch.md index c8c7f96d0d1..9adb3d4244d 100644 --- a/versioned_docs/version-8.6/self-managed/setup/guides/using-existing-opensearch.md +++ b/versioned_docs/version-8.6/self-managed/setup/guides/using-existing-opensearch.md @@ -18,10 +18,8 @@ Otherwise, if it is intended to connect to Amazon OpenSearch Service with basic ## Values file -:::caution - -The migration step within Optimize is currently not supported with OpenSearch. Disable the migration as shown in the example below. - +:::note +Not all Optimize features are supported when using OpenSearch as a database. For a full list of the features that are currently supported, please refer to the [Camunda 8](https://github.com/camunda/issues/issues/635) OpenSearch features. ::: The following values can be configured in the Camunda 8 Self-Managed Helm chart in order to use Amazon OpenSearch Service: @@ -42,10 +40,6 @@ global: host: opensearch.example.com port: 443 -optimize: - migration: - enabled: false - elasticsearch: enabled: false ``` @@ -56,7 +50,7 @@ If you do not wish to specify the username and password in plaintext within the ```yaml global: - opensearcn: + opensearch: auth: existingSecret: secretName existingSecretKey: secretKey diff --git a/versioned_docs/version-8.6/self-managed/setup/install.md b/versioned_docs/version-8.6/self-managed/setup/install.md index b3a083fd18e..550f696068b 100644 --- a/versioned_docs/version-8.6/self-managed/setup/install.md +++ b/versioned_docs/version-8.6/self-managed/setup/install.md @@ -34,18 +34,18 @@ The following charts will be installed as part of Camunda 8 Self-Managed: - **Web Modeler**: Deploys the Web Modeler component that allows you to model BPMN processes in a collaborative way. - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#install-web-modeler). - **Console**: Deploys Camunda Console Self-Managed. - - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#install-console) as the Console is only available to enterprise customers. + - _Note_: The chart is disabled by default and needs to be [enabled explicitly](#install-console). :::note Amazon OpenSearch Helm support The existing Helm charts use the Elasticsearch configurations by default. The Helm charts can still be used to connect to Amazon OpenSearch Service. Refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md). **Zeebe**: Configure the [OpenSearch exporter](/self-managed/zeebe-deployment/exporters/opensearch-exporter.md). -**Operate** & **Tasklist**: These components use the same parameters for both Elasticsearch and OpenSearch. Replace the `elasticsearch` part of the relevant configuration key with `opensearch`, together with its appropriate value. +**Operate**, **Tasklist**, and **Optimize**: These components use the same parameters for both Elasticsearch and OpenSearch. Replace the `elasticsearch` part of the relevant configuration key with `opensearch`, together with its appropriate value. -For example, `CAMUNDA_OPERATE_ELASTICSEARCH_URL` becomes `CAMUNDA_OPERATE_OPENSEARCH_URL`. +For example, `CAMUNDA_OPERATE_ELASTICSEARCH_URL` becomes `CAMUNDA_OPERATE_OPENSEARCH_URL`. In the case of Optimize, please make sure all variables have the proper `CAMUNDA_OPTIMIZE` prefix, i.e. `OPTIMIZE_ELASTICSEARCH_HTTP_PORT` becomes `CAMUNDA_OPTIMIZE_OPENSEARCH_HTTP_PORT`. -Refer to the [Operate](/self-managed/operate-deployment/operate-configuration.md#settings-for-opensearch) and [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#elasticsearch-or-opensearch) configuration documentation for additional component configuration parameters to update. +Refer to the [Operate](/self-managed/operate-deployment/operate-configuration.md#settings-for-opensearch), [Tasklist](/self-managed/tasklist-deployment/tasklist-configuration.md#elasticsearch-or-opensearch) and [Optimize]($optimize$/self-managed/optimize-deployment/configuration/system-configuration/#opensearch) configuration documentation for additional component configuration parameters to update. ::: ![Camunda 8 Self-Managed Architecture Diagram](../assets/camunda-platform-8-self-managed-architecture-diagram-combined-ingress.png) @@ -280,11 +280,11 @@ global: Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on startup or functionality, with the exception that Web Modeler has a limitation of five users. ::: -## Configuring Enterprise components and Connectors +## Configuring Web Modeler, Console, and Connectors -### Enterprise components secret +### Web Modeler and Console secrets -Enterprise components such as Console are published in Camunda's private Docker registry (registry.camunda.cloud) and are exclusive to enterprise customers. These components are not available in public repositories. +The Console and Web Modeler Components are published in Camunda's private Docker registry (registry.camunda.cloud) and are under a [proprietary license](/reference/licenses.md#web-modeler-and-console). These components are not available in public repositories. To enable Kubernetes to pull the images from this registry, first [create an image pull secret](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) using the credentials you received from Camunda: @@ -339,7 +339,7 @@ To set up Web Modeler, you need to provide the following required configuration - Configure the database connection - Web Modeler requires a PostgreSQL database as persistent data storage (other database systems are currently not supported). - _Option 1_: Set `postgresql.enabled: true`. This will install a new PostgreSQL instance as part of the Helm release (using the [PostgreSQL Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) by Bitnami as a dependency). - - _Option 2_: Set `postgresql.enabled: false` and configure a [connection to an external database](#optional-configure-external-database). + - _Option 2_: Set `postgresql.enabled: false` and configure a connection to an external database (see the second example below). We recommend specifying these values in a YAML file that you pass to the `helm install` command. A minimum configuration file would look as follows: @@ -376,11 +376,11 @@ For more details, check [Web Modeler Helm values](https://artifacthub.io/package ### Install Console -Console Self-Managed is an [Enterprise component](/reference/licenses.md#console), which means it is disabled by default in the Camunda 8 Helm chart since it requires an Enterprise license to access the Camunda container registry. +Console Self-Managed is disabled by default in the Camunda 8 Helm chart, as it requires a [proprietary license](/reference/licenses.md#web-modeler-and-console) to access the Camunda container registry. To install Console, two steps are required: -1. [Create a secret with Camunda registry credentials](#enterprise-components-secret). +1. [Create a secret with Camunda registry credentials](#web-modeler-console-secrets). 2. Enable Console, and reference the created Kubernetes secret object via Helm values. ```yaml @@ -418,5 +418,5 @@ For upgrading the Camunda Helm chart from one release to another, perform a [Hel ## General notes -- **Zeebe gateway** is deployed as a stateless service. We support [Kubernetes startup and liveness probes](/self-managed/zeebe-deployment/configuration/gateway-health-probes.md) for Zeebe. +- **Zeebe Gateway** is deployed as a stateless service. We support [Kubernetes startup and liveness probes](/self-managed/zeebe-deployment/configuration/gateway-health-probes.md) for Zeebe. - **Zeebe broker nodes** need to be deployed as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) to preserve the identity of cluster nodes. StatefulSets require persistent storage, which must be allocated in advance. Depending on your cloud provider, the persistent storage differs as it is provider-specific. diff --git a/versioned_docs/version-8.6/self-managed/setup/upgrade.md b/versioned_docs/version-8.6/self-managed/setup/upgrade.md index 633756166ab..01ff728a945 100644 --- a/versioned_docs/version-8.6/self-managed/setup/upgrade.md +++ b/versioned_docs/version-8.6/self-managed/setup/upgrade.md @@ -9,7 +9,9 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; :::note -When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the **next major version**. +When upgrading to a new version of the Camunda 8 Helm charts, we recommend updating to the **latest patch** release of the next **major** version of the chart. + +For example, if the current Helm chart version is 10.x.x, and the latest next major version is 11.0.1, the recommended upgrade is to 11.0.1 (not 11.0.0). ::: Upgrading between minor versions of the Camunda Helm chart may require [configuration changes](#update-your-configuration). To upgrade between patch versions or when no configuration changes are required, see the [`helm upgrade`](#identity-disabled) instructions. @@ -114,9 +116,7 @@ Ensure to use Helm CLI with version `3.14.3` or more. The upgrade could fail to #### Deprecation notes -The following keys in the values file have been changed in Camunda Helm chart v10.0.2. For compatibility, the keys are deprecated in the Camunda release cycle 8.5 and will be removed in the Camunda 8.6 release (October 2024). - -We highly recommend updating the keys in your values file rather than waiting until the 8.6 release. +The following keys were deprecated in 8.5, and their removal has been delayed until the release of Camunda 8.7 (January 2025). We highly recommend updating the keys in your values file rather than waiting until the 8.7 release. | Component | Old Key | New Key | | ------------- | ---------------------------------- | ----------------------------------- | @@ -136,8 +136,6 @@ We highly recommend updating the keys in your values file rather than waiting un | | `global.elasticsearch.host` | `global.elasticsearch.url.host` | | | `global.elasticsearch.port` | `global.elasticsearch.url.port` | -Also, the Web Modeler PostgreSQL key will be changed in the 8.6 release (the new key `webModelerPostgresql` will not work in any chart using Camunda 8.5). - | Component | Old Key | New Key | | ----------- | ------------ | ---------------------- | | Web Modeler | diff --git a/versioned_docs/version-8.6/self-managed/tasklist-deployment/data-retention.md b/versioned_docs/version-8.6/self-managed/tasklist-deployment/data-retention.md index ce9da9ca7c4..94c86bdd23a 100644 --- a/versioned_docs/version-8.6/self-managed/tasklist-deployment/data-retention.md +++ b/versioned_docs/version-8.6/self-managed/tasklist-deployment/data-retention.md @@ -56,6 +56,4 @@ Only indices containing dates in their suffix may be deleted. OpenSearch does not support the Index Lifecycle Management (ILM) Policy, and instead uses Index State Management (ISM). The same environment variables that are used to activate ILM on Elasticsearch can be used to activate ISM on OpenSearch. -```yaml -As of the 8.4 release, Tasklist is now compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. -``` +As of the 8.4 release, Tasklist is compatible with [Amazon OpenSearch](https://aws.amazon.com/de/opensearch-service/) 2.5.x. Note that using Amazon OpenSearch requires [setting up a new Camunda installation](/self-managed/setup/overview.md). A migration from previous versions or Elasticsearch environments is currently not supported. diff --git a/versioned_docs/version-8.6/self-managed/tasklist-deployment/importer-and-archiver.md b/versioned_docs/version-8.6/self-managed/tasklist-deployment/importer-and-archiver.md index c75e1fe9655..3950abebce1 100644 --- a/versioned_docs/version-8.6/self-managed/tasklist-deployment/importer-and-archiver.md +++ b/versioned_docs/version-8.6/self-managed/tasklist-deployment/importer-and-archiver.md @@ -32,7 +32,7 @@ Each single importer/archiver node must be configured using the following config | ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------------------------------------- | | camunda.tasklist.clusterNode.partitionIds | Array of Zeebe partition ids this importer (or archiver) node must be responsible for. | Empty array, meaning all partitions data is loaded. | | camunda.tasklist.clusterNode.nodeCount | Total amount of Importer (or archiver) nodes in the cluster. | 1 | -| camunda.tasklist.clusterNode.currentNodeId | Id of current Importer (or archiver) node, starting from 0. | 0 | +| camunda.tasklist.clusterNode.currentNodeId | ID of current Importer (or archiver) node, starting from 0. | 0 | It's enough to configure either `partitionIds` or a pair of `nodeCount` and `currentNodeId`. If you provide `nodeCount` and `currentNodeId`, each node will automatically guess the Zeebe partitions they're responsible for. @@ -86,7 +86,7 @@ You can further parallelize archiver and/or importer within one node using the f | camunda.tasklist.importer.threadsCount | Number of threads in which data will be imported. | 3 | :::note -Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) <= (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. +Parallelization of import and archiving within one node will also happen based on Zeebe partitions, meaning only configurations with (number of nodes) \* (threadsCount) ≤ (total number of Zeebe partitions) will make sense. Too many threads and nodes will still work, but some of them will be idle. ::: ## Archive period diff --git a/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-authentication.md b/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-authentication.md index 81be5e1415d..da8765da0a5 100644 --- a/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-authentication.md +++ b/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-authentication.md @@ -117,7 +117,7 @@ For more information, visit the [Identity documentation](/self-managed/concepts/ ## Use Identity JWT token to access Tasklist API -Tasklist provides a [GraphQL API](/apis-tools/tasklist-api/tasklist-api-overview.md) under the endpoint `/graphql`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. +Tasklist provides a [REST API](/apis-tools/tasklist-api-rest/tasklist-api-rest-overview.md) under the endpoint `/v1`. Clients can access this API using a JWT access token in an authorization header `Authorization: Bearer `. :::note Be aware a JWT token is intended to be used for M2M communication and is therefore issued for the relevant application, not for the user. @@ -127,7 +127,7 @@ Be aware a JWT token is intended to be used for M2M communication and is therefo 1. [Add an application in Identity](/self-managed/identity/user-guide/additional-features/incorporate-applications.md). 2. [Add permissions to an application](/self-managed/identity/user-guide/additional-features/incorporate-applications.md) for Tasklist API. -3. Obtain a token to access the GraphQL API. +3. Obtain a token to access the REST API. You will need: - `client_id` and `client_secret` from Identity application you created. - URL of the authorization server will look like: `http://:/auth/realms/camunda-platform/protocol/openid-connect/token`, where host and port reference Keycloak URL (e.g. `localhost:18080`). @@ -157,7 +157,7 @@ Take the `access_token` value from the response object and store it as your toke 4. Send the token as an authorization header in each request. In this case, request all tasks. ```shell -curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " -d '{"query": "{tasks(query:{}){id name}}"}' http://localhost:8080/graphql +curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer " http://localhost:8080/v1/tasks/search ``` ### User task access restrictions diff --git a/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-configuration.md b/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-configuration.md index c2c7dd029af..ffb2b6b4f35 100644 --- a/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-configuration.md +++ b/versioned_docs/version-8.6/self-managed/tasklist-deployment/tasklist-configuration.md @@ -136,9 +136,9 @@ camunda.tasklist: selfSigned: true ``` -## Zeebe broker connection +## Zeebe Broker connection -Tasklist needs a connection to Zeebe broker to start the import. +Tasklist needs a connection to the Zeebe Broker to start the import. ### Settings to connect diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/broker.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/broker.md index 2c44bdd2466..121a90177ea 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/broker.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/broker.md @@ -2,7 +2,7 @@ id: broker-config title: "Broker configuration" sidebar_label: "Broker configuration" -description: "Let's analyze how to configure the Zeebe broker" +description: "Let's analyze how to configure the Zeebe Broker" --- A complete broker configuration template is available in the [Zeebe repo](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template). @@ -386,7 +386,7 @@ This section contains all cluster related configurations, to setup a zeebe clust | Field | Description | Example Value | | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | -| nodeId | Specifies the unique id of this broker node in a cluster. The id should be between 0 and number of nodes in the cluster (exclusive). This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_NODEID`. | 0 | +| nodeId | Specifies the unique ID of this broker node in a cluster. The ID should be between 0 and number of nodes in the cluster (exclusive). This setting can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_NODEID`. | 0 | | partitionsCount | Controls the number of partitions, which should exist in the cluster. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT`. | 1 | | replicationFactor | Controls the replication factor, which defines the count of replicas per partition. The replication factor cannot be greater than the number of nodes in the cluster. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR`. | 1 | | clusterSize | Specifies the zeebe cluster size. This value is used to determine which broker is responsible for which partition. This can also be overridden using the environment variable `ZEEBE_BROKER_CLUSTER_CLUSTERSIZE`. | 1 | @@ -591,11 +591,11 @@ request: ### zeebe.broker.flowControl.request.gradient -| Field | Description | Example Value | -| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_RTTTOLERANCE` | 2.0 | +| Field | Description | Example Value | +| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT_RTTTOLERANCE` | 2.0 | #### YAML snippet @@ -610,12 +610,12 @@ request: ### zeebe.broker.flowControl.request.gradient2 -| Field | Description | Example Value | -| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_FLOWCONTROL_REQUEST_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet @@ -742,11 +742,11 @@ backpressure: ### zeebe.broker.backpressure.gradient -| Field | Description | Example Value | -| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | +| Field | Description | Example Value | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable ZEEBE_BROKER_BACKPRESSURE_GRADIENT_RTTTOLERANCE | 2.0 | #### YAML snippet @@ -761,12 +761,12 @@ backpressure: ### zeebe.broker.backpressure.gradient2 -| Field | Description | Example Value | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | -| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | -| rttTolerance | Tolerance for changes from minimum latency. A value >= 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | -| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | +| Field | Description | Example Value | +| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| minLimit | The minimum limit. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_MINLIMIT`. | 10 | +| initialLimit | The initial limit to be used when the broker starts. The limit will be reset to this value when the broker restarts. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_INITIALLIMIT`. | 20 | +| rttTolerance | Tolerance for changes from minimum latency. A value ≥ 1.0 indicating how much change from minimum latency is acceptable before reducing the limit. For example, a value of 2.0 means that a 2x increase in latency is acceptable. This setting can also be overridden using the environment variable `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_RTTTOLERANCE`. | 2.0 | +| longWindow | longWindow is the length of the window (the number of samples) to calculate the exponentially smoothed average latency. This setting can also be overridden using the environment `ZEEBE_BROKER_BACKPRESSURE_GRADIENT2_LONGWINDOW`. | 600 | #### YAML snippet diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/configuration.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/configuration.md index 76e47c4b4d0..2243be60370 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/configuration.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/configuration.md @@ -31,10 +31,10 @@ The default configuration is not suitable for a standalone gateway node. To run We provide templates that contain all possible configuration settings, along with explanations for each setting, though you may find it easier to search through our [broker](broker.md) and [gateway](gateway.md) configuration documentation to adjust the templates: -- [`config/application.yaml` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/blob/main/dist/src/main/config/application.yaml) - Default configuration containing only the most common configuration settings. Use this as the basis for a single broker deployment for test or development. -- [`config/broker.standalone.yaml.template` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) - Complete configuration template for a standalone broker with embedded gateway. Use this as the basis for a single broker deployment for test or development. -- [`config/broker.yaml.template` Broker Node (without embedded gateway)](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) - Complete configuration template for a broker node without embedded gateway. Use this as the basis for deploying multiple broker nodes as part of a cluster. -- [`config/gateway.yaml.template`](https://github.com/camunda/camunda/blob/main/dist/src/main/config/gateway.yaml.template) - Complete configuration template for a standalone gateway. +- [`config/application.yaml` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/blob/8.6.0/dist/src/main/config/application.yaml) - Default configuration containing only the most common configuration settings. Use this as the basis for a single broker deployment for test or development. +- [`config/broker.standalone.yaml.template` Standalone Broker (with embedded gateway)](https://github.com/camunda/camunda/blob/8.6.0/dist/src/main/config/broker.standalone.yaml.template) - Complete configuration template for a standalone broker with embedded gateway. Use this as the basis for a single broker deployment for test or development. +- [`config/broker.yaml.template` Broker Node (without embedded gateway)](https://github.com/camunda/camunda/blob/8.6.0/dist/src/main/config/broker.yaml.template) - Complete configuration template for a broker node without embedded gateway. Use this as the basis for deploying multiple broker nodes as part of a cluster. +- [`config/gateway.yaml.template`](https://github.com/camunda/camunda/blob/8.6.0/dist/src/main/config/gateway.yaml.template) - Complete configuration template for a standalone gateway. :::note These templates also include the corresponding environment variables to use for every setting. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/gateway.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/gateway.md index 2c43c323087..b9b07fa78bc 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/gateway.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/gateway.md @@ -2,7 +2,7 @@ id: gateway-config title: "Gateway configuration" sidebar_label: "Gateway configuration" -description: "Analyze how to configure the Zeebe gateway, including byte sizes, time units, paths, and sample YAML snippets." +description: "Analyze how to configure the Zeebe Gateway, including byte sizes, time units, paths, and sample YAML snippets." --- The Zeebe Gateway can be configured similarly to the broker via the `application.yaml` file or environment variables. A complete gateway configuration template is available in the [Zeebe repository](https://github.com/camunda/camunda/blob/main/dist/src/main/config/gateway.yaml.template). @@ -154,7 +154,7 @@ If you use the Helm charts, both properties are configured for you already. | contactPoint | WARNING: This setting is deprecated! Use initialContactPoints instead. Sets the broker the gateway should initial contact. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_CONTACTPOINT`. | 127.0.0.1:26502 | | requestTimeout | Sets the timeout of requests sent to the broker cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_REQUESTTIMEOUT`. | 15s | | clusterName | Sets name of the Zeebe cluster to connect to. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_CLUSTERNAME`. | zeebe-cluster | -| memberId | Sets the member id of the gateway in the cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERID`. | gateway | +| memberId | Sets the member ID of the gateway in the cluster. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_MEMBERID`. | gateway | | host | Sets the host the gateway node binds to for internal cluster communication. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_HOST`. | 0.0.0.0 | | port | Sets the port the gateway node binds to for internal cluster communication. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_PORT`. | 26502 | | advertisedHost | Controls the advertised host; if omitted defaults to the host. This is particularly useful if your gateway stands behind a proxy. This setting can also be overridden using the environment variable `ZEEBE_GATEWAY_CLUSTER_ADVERTISEDHOST`. | 0.0.0.0 | @@ -390,7 +390,8 @@ Each interceptor should be configured with the values described below:
    classNameEntry point of the interceptor, a class which must: + + Entry point of the interceptor, a class which must:
  • implement io.grpc.ServerInterceptor
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • @@ -437,7 +438,8 @@ Each filter should be configured with the values described below:
    classNameEntry point of the filter, a class which must: + + Entry point of the filter, a class which must:
  • implement jakarta.servlet.Filter
  • have public visibility
  • have a public default constructor (i.e. no-arg constructor)
  • @@ -484,3 +486,9 @@ If you are using an embedded gateway, refer to the [broker configuration guide]( multiTenancy: enabled: true ``` + +### Experimental configuration + +See the experimental section of the [gateway.yaml.template](https://github.com/camunda/camunda/blob/stable/8.6/dist/src/main/config/gateway.yaml.template#L298). + +Be aware that all configuration properties which are part of the experimental section are subject to change and can be dropped at any time. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/priority-election.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/priority-election.md index 4e466a0640a..d7fc45c0413 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/priority-election.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/configuration/priority-election.md @@ -10,8 +10,8 @@ It aims to achieve a more uniform leader distribution by assigning each node a p ## Configuration -Enable priority election by setting `zeebe.broker.raft.enablePriorityElection = "true"` in your config or -by setting the equivalent environment variable `ZEEBE_BROKER_RAFT_ENABLEPRIORITYELECTION="true"`. +Enable priority election by setting `zeebe.broker.cluster.raft.enablePriorityElection=true` in your config or +by setting the equivalent environment variable `ZEEBE_BROKER_CLUSTER_RAFT_ENABLEPRIORITYELECTION=true`. If you are using the fixed partitioning scheme (experimental), you may need [additional configuration](fixed-partitioning.md#priority-election). @@ -19,7 +19,7 @@ If you are using the fixed partitioning scheme (experimental), you may need [add With priority election enabled, election latency and thus failover time increases. -The result of leader election is not deterministic and priority election can only increase the chance of having a +The result of a leader election is not deterministic, and priority election can only increase the chance of having a uniform leader distribution, not guarantee it. -Factors such as high load can prevent high priority nodes from becoming the leader. +Factors such as high load can prevent high-priority nodes from becoming the leader. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md index c654534a988..2181e26ef58 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/elasticsearch-exporter.md @@ -25,7 +25,7 @@ your own ETL jobs. When configured to do so, the exporter will automatically create an index per record value type (see the value type in the Zeebe protocol). Each of these indexes has a corresponding pre-defined mapping to facilitate data ingestion for your own ETL jobs. You can find -those as templates in [the resources folder of the exporter's source code](https://github.com/camunda/camunda/tree/main/zeebe/exporters/elasticsearch-exporter/src/main/resources). +those as templates in [the resources folder of the exporter's source code](https://github.com/camunda/camunda/tree/stable/8.6/zeebe/exporters/elasticsearch-exporter/src/main/resources). :::note The indexes are created as required, and will not be created twice if they already exist. However, once disabled, they will not be deleted (that is up to the administrator.) Similarly, data is never deleted by the exporter, and must be deleted by the administrator when it is safe to do so. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md index fc621c0a837..52a2ab7516c 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/exporters/install-zeebe-exporters.md @@ -40,7 +40,7 @@ env: ``` This example is downloading the exporters' JAR from a URL and adding the JAR to the `exporters` directory, -which will be scanned for JARs and added to the Zeebe broker classpath. Then, with `environment variables`, +which will be scanned for JARs and added to the Zeebe Broker classpath. Then, with `environment variables`, you can configure the exporter parameters. :::note diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/backpressure.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/backpressure.md index 377d12a7492..0f373ef2c20 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/backpressure.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/backpressure.md @@ -19,6 +19,13 @@ control write rate limits](/docs/self-managed/operational-guides/configure-flow- be used with static write rate limits or throttling. This prevents the partition from building an excessive backlog of records not exported. +:::note +When [multi-tenancy](./../../concepts/multi-tenancy.md) is enabled in Camunda 8, a large number of concurrent requests +may also lead to issues with Camunda Identity. In such cases, it is recommended to enable and configure the management of +Identity requests in the Zeebe Gateway. This allows Zeebe to employ a backpressure mechanism against these requests. +For more information, see the Zeebe Gateway [experimental configuration documentation](./../configuration/gateway.md#experimental-configuration). +::: + ### Terminology - **RTT** - Round-Trip Time, known as the time between when the request is accepted by the broker and when the response to the request is sent back to the gateway. @@ -34,7 +41,7 @@ The limit and inflight count are calculated per partition. Zeebe uses adaptive algorithms from [concurrency-limits](https://github.com/Netflix/concurrency-limits) to dynamically calculate the limit. Configure Zeebe with one of the backpressure algorithms in the following sections. -The default values can be found in the [Zeebe broker standalone configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) or in the [Zeebe broker configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) in the `# backpressure` section. +The default values can be found in the [Zeebe Broker standalone configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.standalone.yaml.template) or in the [Zeebe Broker configuration template](https://github.com/camunda/camunda/blob/main/dist/src/main/config/broker.yaml.template) in the `# backpressure` section. #### Fixed limit diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/cluster-scaling.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/cluster-scaling.md index 379359c94dc..d7a6bb569b0 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/cluster-scaling.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/cluster-scaling.md @@ -418,7 +418,7 @@ The response is a JSON object. See detailed specs [here](https://github.com/camu } ``` -- `changeId`: Id of the changes initiated to scale the cluster. This can be used to monitor the progress of the scaling operation. The id typically increases so new requests get a higher id than the previous one. +- `changeId`: The ID of the changes initiated to scale the cluster. This can be used to monitor the progress of the scaling operation. The ID typically increases so new requests get a higher ID than the previous one. - `currentTopology`: A list of current brokers and the partition distribution. - `plannedChanges`: A sequence of operations that has to be executed to achieve scaling. - `expectedToplogy`: The expected list of brokers and the partition distribution once the scaling is completed. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/disk-space.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/disk-space.md index 12cdff52389..d3496d8ce9e 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/disk-space.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/disk-space.md @@ -4,7 +4,7 @@ title: "Disk space" description: "Understand how Zeebe uses the local disk for storage of its persistent data, and configuring Zeebe settings for the disk usage watermarks." --- -Zeebe uses the local disk for storage of its persistent data. Therefore, if the Zeebe broker runs out of disk space, the system is in an invalid state as the broker cannot update its state. +Zeebe uses the local disk for storage of its persistent data. Therefore, if the Zeebe Broker runs out of disk space, the system is in an invalid state as the broker cannot update its state. To prevent the system from reaching an unrecoverable state, Zeebe expects a minimum size of free disk space available. If this limit is violated, the broker rejects new requests to allow the operations team to free more disk space, and allows the broker to continue to update its state. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/health.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/health.md index b550b8e4325..9ad212e1cdf 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/health.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/health.md @@ -6,7 +6,7 @@ description: "This document analyzes health status checks and responses." ## Broker -Zeebe broker exposes three HTTP endpoints to query its health status: +The Zeebe Broker exposes three HTTP endpoints to query its health status: - Startup check - Ready check @@ -62,7 +62,7 @@ When a broker becomes unhealthy, it's recommended to check the logs to see what ## Gateway -Zeebe gateway exposes three HTTP endpoints to query its health status: +The Zeebe Gateway exposes three HTTP endpoints to query its health status: - Health status - `http://{zeebe-gateway}:9600/actuator/health` - Startup probe - `http://{zeebe-gateway}:9600/actuator/health/startup` diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/management-api.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/management-api.md index 66a49949710..dce07ca6696 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/management-api.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/management-api.md @@ -1,13 +1,13 @@ --- id: management-api title: "Management API" -description: "Zeebe Gateway also exposes an HTTP endpoint for cluster management operations." +description: "The Zeebe Gateway also exposes an HTTP endpoint for cluster management operations." --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -Besides the [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) and [gRPC API](/apis-tools/zeebe-api/grpc.md) for process instance execution, Zeebe Gateway also exposes an HTTP endpoint for cluster management operations. This API is not expected to be used by a typical user, but by a privileged user such as a cluster administrator. It is exposed via a different port and configured using configuration `management.server.port` (or via environment variable `MANAGEMENT_SERVER_PORT`). By default, this is set to `9600`. +Besides the [REST](/apis-tools/zeebe-api-rest/zeebe-api-rest-overview.md) and [gRPC API](/apis-tools/zeebe-api/grpc.md) for process instance execution, the Zeebe Gateway also exposes an HTTP endpoint for cluster management operations. This API is not expected to be used by a typical user, but by a privileged user such as a cluster administrator. It is exposed via a different port and configured using configuration `management.server.port` (or via environment variable `MANAGEMENT_SERVER_PORT`). By default, this is set to `9600`. The API is a custom endpoint available via [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/actuator.html#actuator.endpoints). For additional configurations such as security, refer to the Spring Boot documentation. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/metrics.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/metrics.md index 52cabef81b7..609ff35f86d 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/metrics.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/metrics.md @@ -51,7 +51,7 @@ All Zeebe-related metrics have a `zeebe_`-prefix. Most metrics have the following common label: -- `partition`: Cluster-unique id of the partition +- `partition`: Cluster-unique ID of the partition :::note Both brokers and gateways expose their respective metrics. The brokers have an optional metrics exporter that can be enabled for maximum insight. @@ -91,7 +91,7 @@ The health of partitions in a broker can be monitored by the metric `zeebe_healt ## Grafana Zeebe comes with a pre-built dashboard, available in the repository: -[monitor/grafana/zeebe.json](https://github.com/camunda/camunda/blob/main/monitor/grafana/zeebe.json). +[monitor/grafana/zeebe.json](https://github.com/camunda/camunda/blob/stable/8.6/monitor/grafana/zeebe.json). [Import](https://grafana.com/docs/grafana/latest/reference/export_import/#importing-a-dashboard) it into your Grafana instance and select the correct Prometheus data source (important if you have more than one). You will then be greeted with the following dashboard, which displays a healthy cluster topology, general throughput metrics, handled requests, exported events per second, disk and memory usage, and more. diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/network-ports.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/network-ports.md index 45512e7e298..3b3946c2ace 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/network-ports.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/network-ports.md @@ -19,7 +19,7 @@ Additionally, it will need to communicate with other nodes (mostly brokers) in t To join the cluster, it will also need at least one initial contact point, typically a broker, configured via `zeebe.gateway.cluster.initialContactPoints: [127.0.0.1:26502]`. :::note -You can use all broker connections instead of one to make the startup process of the Zeebe gateway more resilient. +You can use all broker connections instead of one to make the startup process of the Zeebe Gateway more resilient. ::: The relevant [configuration](../configuration/configuration.md) settings are: diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/resource-planning.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/resource-planning.md index 7a488c310f9..0f12ee6028f 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/resource-planning.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/resource-planning.md @@ -148,7 +148,7 @@ Only the leader of a partition exports events. Only committed events (events tha When a partition fails over to a new leader, the new leader is able to construct the current partition state by projecting the event log from the point of the last snapshot. The position of exporters cannot be reconstructed from the event log, so it is set to the last snapshot. This means an exporter can see the same events twice in the event of a fail-over. -You should assign idempotent ids to events in your exporter if this is an issue for your system. The combination of record position and partition id is reliable as a unique id for an event. +You should assign idempotent ids to events in your exporter if this is an issue for your system. The combination of record position and partition ID is reliable as a unique ID for an event. ### Effect of quorum loss diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/update-zeebe.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/update-zeebe.md index 5df604da86e..a8d66b7481b 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/update-zeebe.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/operations/update-zeebe.md @@ -19,7 +19,7 @@ Refer to the [update guide](/self-managed/operational-guides/update-guide/introd A **rolling update** ensures the Zeebe cluster stays available by updating brokers and gateways one by one instead of all at once. -There are three parties to a rolling update: the Zeebe brokers, Zeebe gateways, and the clients. +There are three parts to a rolling update: the Zeebe Broker, Zeebe Gateway, and clients. We recommend updating brokers first, then gateways, and finally clients. This ensures clients don't use new APIs that are not yet supported by the brokers or gateways. @@ -29,7 +29,7 @@ While updating brokers, leadership for partitions will rotate which may cause br The procedure to do a rolling update of Zeebe brokers is the following: -1. Pick the broker with the highest id that runs the old version. +1. Pick the broker with the highest ID that runs the old version. 2. Shut down the broker. 3. Update the broker software to the new version. 4. Start the broker and wait for it to become ready and healthy. @@ -49,7 +49,7 @@ The snapshot period is five minutes by default but is [configurable via `snapsho If your Zeebe deployment is managed by our [Helm charts](/self-managed/setup/install.md), the rolling update procedure is already automated. :::note -Zeebe brokers are managed by a [`StatefulSet`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies). Zeebe gateways are managed by a []`Deployment`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment). +Zeebe brokers are managed by a [`StatefulSet`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies). Zeebe Gateways are managed by a [`Deployment`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment). ::: #### Updating brokers diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md index 9671048eb13..58cc4817d6b 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/interceptors.md @@ -155,7 +155,7 @@ When compiling your class, you need to make sure all compile-time dependencies are provided. In the example above, that means we need the `grpc-api` and `slf4j-api` libraries available when compiling. -Since the interceptor will be running inside the Zeebe gateway, the language +Since the interceptor will be running inside the Zeebe Gateway, the language level of the compiled code must be the same as Zeebe's (i.e. currently JDK 21) or lower. This example thus assumes you're using version 21 of `javac`. ```sh diff --git a/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md b/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md index b3175754810..f67d92f1467 100644 --- a/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md +++ b/versioned_docs/version-8.6/self-managed/zeebe-deployment/zeebe-gateway/zeebe-gateway-overview.md @@ -8,11 +8,11 @@ description: "Learn about this component and contact point of the Zeebe cluster import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. For more information about the Zeebe broker, visit our [additional documentation](../../../components/zeebe/technical-concepts/architecture.md#brokers). +The Zeebe Gateway is a component of the Zeebe cluster; it can be considered the contact point for the Zeebe cluster which allows Zeebe clients to communicate with Zeebe brokers inside a Zeebe cluster. For more information about the Zeebe Broker, visit our [additional documentation](../../../components/zeebe/technical-concepts/architecture.md#brokers). -To summarize, the Zeebe broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. The Zeebe Gateway acts as a load balancer and router between Zeebe’s processing partitions. +To summarize, the Zeebe Broker is the main part of the Zeebe cluster, which does all the heavy work like processing, replicating, exporting, and everything based on partitions. The Zeebe Gateway acts as a load balancer and router between Zeebe’s processing partitions. -![Zeebe gateway overview](assets/zeebe-gateway-overview.png) +![Zeebe Gateway overview](assets/zeebe-gateway-overview.png) To interact with the Zeebe cluster, the Zeebe client sends a command to the gateway either as a gRPC message (to port `26500` by default), or a plain HTTP request to its REST API (to port `8080` by default). Given the gateway supports gRPC as well as an OpenAPI spec, the user can use several clients in different languages to interact with the Zeebe cluster. For more information, read our [overview](../../../apis-tools/working-with-apis-tools.md). @@ -42,7 +42,7 @@ The Zeebe Gateway can be run in two different ways: embedded and standalone. -Running the gateway in embedded mode means it will run as part of the Zeebe broker. The broker will accept gRPC client messages via the embedded gateway and distribute the translated requests inside the cluster. This means the request accepted by the embedded gateway does not necessarily go to the same broker, where the embedded gateway is running. +Running the gateway in embedded mode means it will run as part of the Zeebe Broker. The broker will accept gRPC client messages via the embedded gateway and distribute the translated requests inside the cluster. This means the request accepted by the embedded gateway does not necessarily go to the same broker, where the embedded gateway is running. The embedded gateway is useful for development and testing purposes, and to reduce the burden of deploying and running multiple applications. For example, in [zeebe-process-test](https://github.com/camunda/zeebe-process-test) an embedded gateway is used to accept the client commands and write directly to the engine. diff --git a/versioned_sidebars/version-1.3-sidebars.json b/versioned_sidebars/version-1.3-sidebars.json deleted file mode 100644 index 80841502dbb..00000000000 --- a/versioned_sidebars/version-1.3-sidebars.json +++ /dev/null @@ -1,918 +0,0 @@ -{ - "Guides": [ - "guides/introduction-to-camunda", - { - "Getting started with Camunda Cloud": [ - "guides/getting-started/create-camunda-cloud-account", - "guides/getting-started/create-your-cluster", - "guides/getting-started/setup-client-connection-credentials", - "guides/getting-started/connect-to-your-cluster", - "guides/getting-started/model-your-first-process", - "guides/getting-started/deploy-your-process-and-start-process-instance", - "guides/getting-started/implement-service-task", - "guides/getting-started/implement-decision-gateway", - "guides/getting-started/monitor-your-process-in-operate" - ] - }, - "guides/getting-started-orchestrate-human-tasks", - "guides/getting-started-orchestrate-microservices", - "guides/setting-up-development-project", - "guides/automating-a-process-using-bpmn", - "guides/utilizing-forms", - "guides/improve-processes-with-optimize", - "guides/message-correlation", - { - "Update Guide": [ - "guides/update-guide/introduction", - "guides/update-guide/120-to-130", - "guides/update-guide/110-to-120", - "guides/update-guide/100-to-110", - "guides/update-guide/026-to-100" - ] - }, - "guides/migrating-from-Camunda-Platform" - ], - "Components": [ - "components/components-overview", - { - "Concepts": [ - "components/concepts/what-is-camunda-cloud", - "components/concepts/processes", - "components/concepts/job-workers", - "components/concepts/process-instance-creation", - "components/concepts/messages", - "components/concepts/incidents", - "components/concepts/variables", - "components/concepts/expressions" - ], - "Cloud Console": [ - "components/cloud-console/introduction", - { - "Manage your organization": [ - "components/cloud-console/manage-organization/organization-settings", - "components/cloud-console/manage-organization/manage-users", - "components/cloud-console/manage-organization/view-organization-activity", - "components/cloud-console/manage-organization/usage-history", - "components/cloud-console/manage-organization/update-billing-reservations", - "components/cloud-console/manage-organization/switch-organization" - ] - }, - { - "Manage clusters": [ - "components/cloud-console/manage-clusters/create-cluster", - "components/cloud-console/manage-clusters/rename-cluster", - "components/cloud-console/manage-clusters/delete-cluster", - "components/cloud-console/manage-clusters/manage-api-clients", - "components/cloud-console/manage-clusters/manage-alerts", - "components/cloud-console/manage-clusters/manage-ip-whitelists" - ] - }, - { - "Manage your plan": [ - "components/cloud-console/manage-plan/available-plans", - "components/cloud-console/manage-plan/upgrade-to-professional-plan" - ] - }, - { - "Troubleshooting": [ - "components/cloud-console/troubleshooting/common-pitfalls", - "components/cloud-console/troubleshooting/feedback-and-support" - ] - } - ], - "Modeler": [ - "components/modeler/about-modeler", - { - "Web Modeler": [ - "components/modeler/web-modeler/new-web-modeler", - "components/modeler/web-modeler/launch-web-modeler", - "components/modeler/web-modeler/model-your-first-diagram", - "components/modeler/web-modeler/import-diagram", - "components/modeler/web-modeler/save-and-deploy", - "components/modeler/web-modeler/start-instance", - "components/modeler/web-modeler/collaboration", - "components/modeler/web-modeler/milestones", - "components/modeler/web-modeler/token-simulation" - ] - }, - { - "Desktop Modeler": [ - "components/modeler/desktop-modeler/install-the-modeler", - "components/modeler/desktop-modeler/model-your-first-diagram", - "components/modeler/desktop-modeler/connect-to-camunda-cloud", - "components/modeler/desktop-modeler/start-instance", - { - "Element templates": [ - "components/modeler/desktop-modeler/element-templates/about-templates", - "components/modeler/desktop-modeler/element-templates/configuring-templates", - "components/modeler/desktop-modeler/element-templates/using-templates", - "components/modeler/desktop-modeler/element-templates/defining-templates", - "components/modeler/desktop-modeler/element-templates/additional-resources" - ] - }, - { - "Additional configuration": [ - "components/modeler/desktop-modeler/flags/flags", - "components/modeler/desktop-modeler/plugins/plugins", - "components/modeler/desktop-modeler/search-paths/search-paths", - "components/modeler/desktop-modeler/telemetry/telemetry" - ] - } - ] - }, - { - "BPMN": [ - "components/modeler/bpmn/modeler-bpmn", - "components/modeler/bpmn/bpmn-primer", - "components/modeler/bpmn/bpmn-coverage", - "components/modeler/bpmn/data-flow", - { - "Tasks": [ - "components/modeler/bpmn/tasks", - "components/modeler/bpmn/service-tasks/service-tasks", - "components/modeler/bpmn/user-tasks/user-tasks", - "components/modeler/bpmn/receive-tasks/receive-tasks", - "components/modeler/bpmn/business-rule-tasks/business-rule-tasks", - "components/modeler/bpmn/script-tasks/script-tasks", - "components/modeler/bpmn/send-tasks/send-tasks", - "components/modeler/bpmn/manual-tasks/manual-tasks" - ] - }, - { - "Gateways": [ - "components/modeler/bpmn/gateways", - "components/modeler/bpmn/exclusive-gateways/exclusive-gateways", - "components/modeler/bpmn/parallel-gateways/parallel-gateways", - "components/modeler/bpmn/event-based-gateways/event-based-gateways" - ] - }, - { - "Events": [ - "components/modeler/bpmn/events", - "components/modeler/bpmn/none-events/none-events", - "components/modeler/bpmn/message-events/message-events", - "components/modeler/bpmn/timer-events/timer-events", - "components/modeler/bpmn/error-events/error-events" - ] - }, - { - "Subprocesses": [ - "components/modeler/bpmn/subprocesses", - "components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses", - "components/modeler/bpmn/call-activities/call-activities", - "components/modeler/bpmn/event-subprocesses/event-subprocesses" - ] - }, - { - "Markers": [ - "components/modeler/bpmn/markers", - "components/modeler/bpmn/multi-instance/multi-instance" - ] - } - ] - }, - { - "DMN": ["components/modeler/dmn/desktop-modeler-dmn"] - }, - { - "Forms": ["components/modeler/forms/camunda-forms-reference"] - } - ], - "Zeebe": [ - "components/zeebe/zeebe-overview", - { - "Technical concepts": [ - "components/zeebe/technical-concepts/index", - "components/zeebe/technical-concepts/architecture", - "components/zeebe/technical-concepts/clustering", - "components/zeebe/technical-concepts/partitions", - "components/zeebe/technical-concepts/internal-processing", - "components/zeebe/technical-concepts/process-lifecycles", - "components/zeebe/technical-concepts/protocols", - "components/zeebe/technical-concepts/exporters" - ] - }, - { - "Open Source community": [ - "components/zeebe/open-source/community-contributions", - "components/zeebe/open-source/get-help-get-involved" - ] - } - ], - "Operate": [ - "components/operate/index", - { - "User guide": [ - "components/operate/userguide/basic-operate-navigation", - "components/operate/userguide/resolve-incidents-update-variables", - "components/operate/userguide/selections-operations", - "components/operate/userguide/delete-finished-instances", - "components/operate/userguide/operate-feedback-and-questions" - ] - } - ], - "Optimize": [ - { - "type": "link", - "label": "What is Optimize?", - "href": "/optimize/3.7.0/components/what-is-optimize/" - }, - - { - "User guide": [ - { - "type": "link", - "label": "Collections, dashboards, and reports", - "href": "/optimize/3.7.0/components/userguide/collections-dashboards-reports/" - }, - { - "type": "link", - "label": "Data sources", - "href": "/optimize/3.7.0/components/userguide/data-sources/" - }, - { - "type": "link", - "label": "Creating dashboards", - "href": "/optimize/3.7.0/components/userguide/creating-dashboards/" - }, - { - "type": "link", - "label": "Creating reports", - "href": "/optimize/3.7.0/components/userguide/creating-reports/" - }, - { - "type": "link", - "label": "Combined reports", - "href": "/optimize/3.7.0/components/userguide/combined-reports/" - }, - - { - "Process analysis": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.7.0/components/userguide/process-analysis/overview/" - }, - { - "type": "link", - "label": "Outlier analysis", - "href": "/optimize/3.7.0/components/userguide/process-analysis/outlier-analysis/" - }, - { - "type": "link", - "label": "Branch analysis", - "href": "/optimize/3.7.0/components/userguide/process-analysis/branch-analysis/" - }, - - { - "Report analysis": [ - { - "type": "link", - "label": "Report process analysis", - "href": "/optimize/3.7.0/components/userguide/process-analysis/report-analysis/overview/" - }, - { - "type": "link", - "label": "Edit mode", - "href": "/optimize/3.7.0/components/userguide/process-analysis/report-analysis/edit-mode/" - }, - { - "type": "link", - "label": "View mode", - "href": "/optimize/3.7.0/components/userguide/process-analysis/report-analysis/view-mode/" - } - ] - } - ] - }, - - { - "Decision analysis": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.7.0/components/userguide/decision-analysis/overview/" - }, - { - "type": "link", - "label": "Single report", - "href": "/optimize/3.7.0/components/userguide/decision-analysis/decision-report/" - }, - { - "type": "link", - "label": "Filters", - "href": "/optimize/3.7.0/components/userguide/decision-analysis/decision-filter/" - } - ] - }, - - { - "Additional features": [ - { - "type": "link", - "label": "Alerts", - "href": "/optimize/3.7.0/components/userguide/additional-features/alerts/" - }, - { - "type": "link", - "label": "Event-based processes", - "href": "/optimize/3.7.0/components/userguide/additional-features/event-based-processes/" - }, - { - "type": "link", - "label": "Export and import", - "href": "/optimize/3.7.0/components/userguide/additional-features/export-import/" - }, - { - "type": "link", - "label": "Filters", - "href": "/optimize/3.7.0/components/userguide/additional-features/filters/" - }, - { - "type": "link", - "label": "Footer", - "href": "/optimize/3.7.0/components/userguide/additional-features/footer/" - }, - { - "type": "link", - "label": "Variable labeling", - "href": "/optimize/3.7.0/components/userguide/additional-features/variable-labeling/" - } - ] - } - ] - } - ], - "Tasklist": [ - "components/tasklist/introduction", - { - "User guide": [ - { - "API mode": [ - "components/tasklist/userguide/api/overview", - "components/tasklist/userguide/api/tutorial" - ], - "User interface mode": [ - "components/tasklist/userguide/user-interface/overview" - ] - } - ] - } - ], - "Best Practices": [ - "components/best-practices/overview", - { - "Project Management": [ - "components/best-practices/management/following-the-customer-success-path", - "components/best-practices/management/doing-a-proper-poc" - ], - "Architecture": [ - "components/best-practices/architecture/deciding-about-your-stack", - "components/best-practices/architecture/sizing-your-environment", - "components/best-practices/architecture/understanding-human-tasks-management" - ], - "Development": [ - "components/best-practices/development/connecting-the-workflow-engine-with-your-world", - "components/best-practices/development/service-integration-patterns", - "components/best-practices/development/writing-good-workers", - "components/best-practices/development/dealing-with-problems-and-exceptions", - "components/best-practices/development/handling-data-in-processes", - "components/best-practices/development/routing-events-to-processes", - "components/best-practices/development/testing-process-definitions" - ], - "Modeling": [ - "components/best-practices/modeling/creating-readable-process-models", - "components/best-practices/modeling/naming-bpmn-elements", - "components/best-practices/modeling/naming-technically-relevant-ids", - "components/best-practices/modeling/modeling-beyond-the-happy-path", - "components/best-practices/modeling/modeling-with-situation-patterns", - "components/best-practices/modeling/building-flexibility-into-bpmn-models", - "components/best-practices/modeling/choosing-the-dmn-hit-policy" - ], - "Operations": [ - "components/best-practices/operations/versioning-process-definitions", - "components/best-practices/operations/reporting-about-processes" - ], - "Camunda 7 specific": [ - "components/best-practices/architecture/deciding-about-your-stack-c7", - "components/best-practices/architecture/sizing-your-environment-c7", - "components/best-practices/development/invoking-services-from-the-process-c7", - "components/best-practices/development/understanding-transaction-handling-c7", - "components/best-practices/operations/operating-camunda-c7", - "components/best-practices/operations/performance-tuning-camunda-c7", - "components/best-practices/operations/securing-camunda-c7", - "components/best-practices/architecture/extending-human-task-management-c7" - ] - } - ] - } - ], - "APIs & Tools": [ - "apis-tools/working-with-apis-tools", - { - "APIs": [ - "apis-tools/grpc", - { - "Tasklist API (GraphQL)": [ - { - "type": "autogenerated", - "dirName": "apis-tools/tasklist-api" - } - ] - }, - "apis-tools/cloud-console-api-reference" - ] - }, - { - "Clients": [ - { - "Java client": [ - "apis-tools/java-client/index", - "apis-tools/java-client/job-worker", - "apis-tools/java-client/logging", - "apis-tools/java-client/testing", - { - "Examples": [ - "apis-tools/java-client-examples/index", - "apis-tools/java-client-examples/process-deploy", - "apis-tools/java-client-examples/process-instance-create", - "apis-tools/java-client-examples/process-instance-create-nonblocking", - "apis-tools/java-client-examples/process-instance-create-with-result", - "apis-tools/java-client-examples/job-worker-open", - "apis-tools/java-client-examples/data-pojo", - "apis-tools/java-client-examples/cluster-topology-request" - ] - } - ] - }, - { - "Go client": [ - "apis-tools/go-client/index", - "apis-tools/go-client/get-started" - ] - }, - { - "CLI client": [ - "apis-tools/cli-client/index", - "apis-tools/cli-client/get-started" - ] - }, - { - "Community clients": [ - "apis-tools/community-clients/index", - "apis-tools/community-clients/spring", - "apis-tools/community-clients/javascript", - "apis-tools/community-clients/micronaut", - "apis-tools/community-clients/c-sharp", - "apis-tools/community-clients/python", - "apis-tools/community-clients/ruby", - "apis-tools/community-clients/rust" - ] - }, - "apis-tools/build-your-own-client" - ] - } - ], - "Reference": [ - "reference/overview", - { - "FEEL expressions": [ - "reference/feel/what-is-feel", - "reference/feel/language-guide/feel-data-types", - "reference/feel/language-guide/feel-unary-tests", - "reference/feel/language-guide/feel-expression", - { - "Built-in functions": [ - "reference/feel/builtin-functions/feel-built-in-functions-conversion", - "reference/feel/builtin-functions/feel-built-in-functions-boolean", - "reference/feel/builtin-functions/feel-built-in-functions-string", - "reference/feel/builtin-functions/feel-built-in-functions-numeric", - "reference/feel/builtin-functions/feel-built-in-functions-list", - "reference/feel/builtin-functions/feel-built-in-functions-context", - "reference/feel/builtin-functions/feel-built-in-functions-temporal" - ] - } - ] - }, - "reference/glossary", - "reference/announcements", - "reference/licenses", - "reference/notices", - "reference/release-policy", - "reference/supported-environments", - "reference/dependencies" - ], - "Self-Managed": [ - "self-managed/about-self-managed", - { - "Zeebe": [ - "self-managed/zeebe-deployment/index", - { - "Local installation": [ - "self-managed/zeebe-deployment/local/install", - "self-managed/zeebe-deployment/local/quickstart" - ] - }, - "self-managed/zeebe-deployment/docker/install", - { - "Kubernetes deployment": [ - "self-managed/zeebe-deployment/kubernetes/index", - "self-managed/zeebe-deployment/kubernetes/helm/installing-helm", - "self-managed/zeebe-deployment/kubernetes/helm/accessing-operate-tasklist" - ] - }, - { - "Configuration": [ - "self-managed/zeebe-deployment/configuration/configuration", - "self-managed/zeebe-deployment/configuration/logging", - "self-managed/zeebe-deployment/configuration/gateway-health-probes", - "self-managed/zeebe-deployment/configuration/environment-variables", - "self-managed/zeebe-deployment/configuration/fixed-partitioning", - "self-managed/zeebe-deployment/configuration/priority-election" - ] - }, - { - "Security": [ - "self-managed/zeebe-deployment/security/security", - "self-managed/zeebe-deployment/security/secure-client-communication", - "self-managed/zeebe-deployment/security/client-authorization", - "self-managed/zeebe-deployment/security/secure-cluster-communication" - ] - }, - { - "Operation": [ - "self-managed/zeebe-deployment/operations/index", - "self-managed/zeebe-deployment/operations/resource-planning", - "self-managed/zeebe-deployment/operations/network-ports", - "self-managed/zeebe-deployment/operations/setting-up-a-cluster", - "self-managed/zeebe-deployment/operations/metrics", - "self-managed/zeebe-deployment/operations/health", - "self-managed/zeebe-deployment/operations/backpressure", - "self-managed/zeebe-deployment/operations/disk-space", - "self-managed/zeebe-deployment/operations/update-zeebe", - "self-managed/zeebe-deployment/operations/rebalancing", - "self-managed/zeebe-deployment/operations/backups" - ] - } - ], - "Operate": [ - "self-managed/operate-deployment/install-and-start", - "self-managed/operate-deployment/configuration", - "self-managed/operate-deployment/data-retention", - "self-managed/operate-deployment/schema-and-migration", - "self-managed/operate-deployment/importer-and-archiver", - "self-managed/operate-deployment/authentication", - "self-managed/operate-deployment/usage-metrics" - ], - "Tasklist": [ - "self-managed/tasklist-deployment/install-and-start", - "self-managed/tasklist-deployment/configuration", - "self-managed/tasklist-deployment/authentication", - "self-managed/tasklist-deployment/usage-metrics" - ], - "Optimize": [ - { - "Setup": [ - { - "type": "link", - "label": "Installation", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/installation/" - }, - { - "type": "link", - "label": "Optimize license key", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/optimize-license/" - }, - { - "type": "link", - "label": "Security instructions", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/security-instructions/" - }, - { - "type": "link", - "label": "Configuration", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/configuration/" - }, - { - "type": "link", - "label": "User access management", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/user-management/" - }, - { - "type": "link", - "label": "Authorization management", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/authorization-management/" - }, - { - "type": "link", - "label": "Secure Elasticsearch", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/secure-elasticsearch/" - }, - { - "type": "link", - "label": "Shared Elasticsearch cluster", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/shared-elasticsearch-cluster/" - }, - { - "type": "link", - "label": "History cleanup", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/history-cleanup/" - }, - { - "type": "link", - "label": "Localization", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/localization/" - }, - { - "type": "link", - "label": "Multi-tenancy", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/multi-tenancy/" - }, - { - "type": "link", - "label": "Multiple process engines", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/multiple-engines/" - }, - { - "type": "link", - "label": "Object and list variable support", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/object-variables/" - }, - { - "type": "link", - "label": "Clustering", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/clustering/" - }, - { - "type": "link", - "label": "Webhooks", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/webhooks/" - }, - { - "type": "link", - "label": "Event-based processes", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/setup-event-based-processes/" - }, - { - "type": "link", - "label": "Telemetry", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/telemetry/" - }, - { - "type": "link", - "label": "Common problems", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/common-problems/" - } - ] - }, - - { - "type": "link", - "label": "Self-Managed setup", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/setup/" - }, - - { - "Plugins": [ - { - "type": "link", - "label": "Optimize plugin system", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/plugin-system/" - }, - { - "type": "link", - "label": "Business key import customization", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin/" - }, - { - "type": "link", - "label": "Decision inputs and outputs import customization", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/decision-import-plugin/" - }, - { - "type": "link", - "label": "Elasticsearch header", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/elasticsearch-header/" - }, - { - "type": "link", - "label": "Engine REST filter", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin/" - }, - { - "type": "link", - "label": "Single sign on", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/single-sign-on/" - }, - { - "type": "link", - "label": "Variable import customization", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/plugins/variable-import-plugin/" - } - ] - }, - - { - "REST API": [ - { - "type": "link", - "label": "Authorization", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/authorization/" - }, - - { - "Dashboard": [ - { - "type": "link", - "label": "Get dashboard IDs", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/dashboard/get-dashboard-ids/" - }, - { - "type": "link", - "label": "Delete dashboards", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/dashboard/delete-dashboard/" - }, - { - "type": "link", - "label": "Export dashboard definitions", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/dashboard/export-dashboard-definitions/" - } - ] - }, - - { - "Report": [ - { - "type": "link", - "label": "Get report IDs", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/report/get-report-ids/" - }, - { - "type": "link", - "label": "Delete reports", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/report/delete-report/" - }, - { - "type": "link", - "label": "Export report definitions", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/report/export-report-definitions/" - }, - { - "type": "link", - "label": "Export report result data", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/report/get-data-export/" - } - ] - }, - - { - "type": "link", - "label": "Event ingestion", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/event-ingestion/" - }, - { - "type": "link", - "label": "External variable ingestion", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/external-variable-ingestion/" - }, - { - "type": "link", - "label": "Health readiness", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/health-readiness/" - }, - { - "type": "link", - "label": "Import entities", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/rest-api/import-entities/" - } - ] - }, - - { - "type": "link", - "label": "Camunda engine data reimport", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/reimport/" - }, - - { - "Migration & Update": [ - { - "type": "link", - "label": "Instructions", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/instructions/" - }, - { - "type": "link", - "label": "Update notes (3.6 to 3.7.x)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7/" - }, - { - "type": "link", - "label": "Update notes (3.5 to 3.6)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6/" - }, - { - "type": "link", - "label": "Update notes (3.4 to 3.5)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5/" - }, - { - "type": "link", - "label": "Update notes (3.3 to 3.4)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4/" - }, - { - "type": "link", - "label": "Update notes (3.2 to 3.3)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3/" - }, - { - "type": "link", - "label": "Update notes (3.1 to 3.2)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2/" - }, - { - "type": "link", - "label": "Update notes (3.0 to 3.1)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1/" - }, - { - "type": "link", - "label": "Update notes (2.7 to 3.0)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0/" - }, - { - "type": "link", - "label": "Update notes (2.6 to 2.7)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7/" - }, - { - "type": "link", - "label": "Update notes (2.5 to 2.6)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6/" - }, - { - "type": "link", - "label": "Update notes (2.4 to 2.5)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5/" - }, - { - "type": "link", - "label": "Update notes (2.3 to 2.4)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4/" - }, - { - "type": "link", - "label": "Update notes (2.2 to 2.3)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3/" - }, - { - "type": "link", - "label": "Update notes (2.1 to 2.2)", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2/" - } - ] - }, - - { - "Optimize Explained": [ - { - "type": "link", - "label": "Engine data deletion", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/optimize-explained/engine-data-deletion/" - }, - { - "type": "link", - "label": "Data import", - "href": "/optimize/3.7.0/self-managed/optimize-deployment/optimize-explained/import-guide/" - } - ] - } - ], - - "IAM": [ - "self-managed/iam/what-is-iam", - { - "Getting started": [ - { - "Running IAM with Docker": [ - "self-managed/iam/getting-started/docker/setup-environment", - "self-managed/iam/getting-started/docker/start-iam", - "self-managed/iam/getting-started/docker/accessing-the-ui" - ] - } - ] - }, - { - "Deployment": [ - "self-managed/iam/deployment/configuration-variables", - "self-managed/iam/deployment/making-iam-production-ready" - ] - } - ] - }, - { - "Troubleshooting": ["self-managed/troubleshooting/log-levels"] - } - ] -} diff --git a/versioned_sidebars/version-8.2-sidebars.json b/versioned_sidebars/version-8.2-sidebars.json deleted file mode 100644 index e088749d122..00000000000 --- a/versioned_sidebars/version-8.2-sidebars.json +++ /dev/null @@ -1,1457 +0,0 @@ -{ - "Guides": [ - { - "Get started": [ - "guides/introduction-to-camunda", - "guides/create-account", - "guides/model-your-first-process", - "guides/orchestrate-human-tasks", - "guides/orchestrate-apis", - "guides/orchestrate-microservices" - ] - }, - { - "Design": [ - "guides/automating-a-process-using-bpmn", - "guides/create-decision-tables-using-dmn", - "guides/utilizing-forms" - ], - "Automate": [ - "guides/create-cluster", - "guides/setting-up-development-project", - "guides/setup-client-connection-credentials", - "guides/configuring-out-of-the-box-connectors", - "guides/message-correlation", - "guides/use-connectors-in-hybrid-mode", - "guides/host-custom-connectors" - ], - "Improve": [ - "guides/improve-processes-with-optimize", - { - "DevOps lifecycle": [ - "guides/devops-lifecycle/integrate-web-modeler-in-ci-cd" - ] - } - ] - }, - { - "Update guide": [ - "guides/update-guide/introduction", - "guides/update-guide/810-to-820", - "guides/update-guide/800-to-810", - "guides/update-guide/130-to-800", - "guides/update-guide/120-to-130", - "guides/update-guide/110-to-120", - "guides/update-guide/100-to-110", - "guides/update-guide/026-to-100", - { - "Connectors": [ - "guides/update-guide/connectors/introduction", - "guides/update-guide/connectors/0100-to-0110", - "guides/update-guide/connectors/090-to-0100", - "guides/update-guide/connectors/080-to-090", - "guides/update-guide/connectors/070-to-080", - "guides/update-guide/connectors/060-to-070", - "guides/update-guide/connectors/050-to-060", - "guides/update-guide/connectors/040-to-050", - "guides/update-guide/connectors/030-to-040", - "guides/update-guide/connectors/020-to-030", - "guides/update-guide/connectors/010-to-020" - ] - }, - { - "Elasticsearch": ["guides/update-guide/elasticsearch/7-to-8"], - "Keycloak": ["guides/update-guide/keycloak/keycloak-update"] - } - ] - }, - { - "Migrate from Camunda 7": [ - "guides/migrating-from-camunda-7/index", - "guides/migrating-from-camunda-7/conceptual-differences", - "guides/migrating-from-camunda-7/migration-readiness", - "guides/migrating-from-camunda-7/adjusting-bpmn-models", - "guides/migrating-from-camunda-7/adjusting-dmn-models", - "guides/migrating-from-camunda-7/adjusting-source-code" - ] - } - ], - "Components": [ - "components/components-overview", - { - "Concepts": [ - "components/concepts/what-is-camunda-8", - "components/concepts/clusters", - "components/concepts/processes", - "components/concepts/job-workers", - "components/concepts/process-instance-creation", - "components/concepts/messages", - "components/concepts/signals", - "components/concepts/incidents", - "components/concepts/variables", - "components/concepts/expressions", - "components/concepts/workflow-patterns", - "components/concepts/process-instance-modification", - "components/concepts/data-retention", - "components/concepts/encryption-at-rest", - "components/concepts/backups" - ], - "Console": [ - "components/console/introduction-to-console", - { - "Manage your organization": [ - "components/console/manage-organization/organization-settings", - "components/console/manage-organization/manage-users", - "components/console/manage-organization/view-organization-activity", - "components/console/manage-organization/enable-alpha-features", - "components/console/manage-organization/usage-history", - "components/console/manage-organization/usage-alerts", - "components/console/manage-organization/advanced-search", - "components/console/manage-organization/switch-organization", - "components/console/manage-organization/external-sso", - "components/console/manage-organization/delete-account" - ] - }, - { - "Manage clusters": [ - "components/console/manage-clusters/create-cluster", - "components/console/manage-clusters/rename-cluster", - "components/console/manage-clusters/delete-cluster", - "components/console/manage-clusters/manage-api-clients", - "components/console/manage-clusters/manage-alerts", - "components/console/manage-clusters/manage-ip-allowlists", - "components/console/manage-clusters/manage-secrets" - ] - }, - { - "Manage your plan": [ - "components/console/manage-plan/available-plans", - "components/console/manage-plan/upgrade-to-starter-plan", - "components/console/manage-plan/update-billing-reservations", - "components/console/manage-plan/update-creditcard", - "components/console/manage-plan/retrieve-invoices-or-update-billing-info", - "components/console/manage-plan/cancel-starter-subscription" - ] - }, - { - "Troubleshooting": [ - "components/console/console-troubleshooting/common-pitfalls" - ] - } - ], - "Modeler": [ - "components/modeler/about-modeler", - { - "Web Modeler": [ - "components/modeler/web-modeler/launch-web-modeler", - "components/modeler/web-modeler/model-your-first-diagram", - "components/modeler/web-modeler/import-diagram", - "components/modeler/web-modeler/fix-problems-in-your-diagram", - "components/modeler/web-modeler/run-or-publish-your-process", - { - "Collaboration": [ - "components/modeler/web-modeler/collaboration", - "components/modeler/web-modeler/collaborate-with-modes", - "components/modeler/web-modeler/design-your-process", - "components/modeler/web-modeler/implement-your-process", - "components/modeler/web-modeler/play-your-process" - ] - }, - "components/modeler/web-modeler/milestones", - "components/modeler/web-modeler/token-simulation", - { - "Advanced modeling": [ - "components/modeler/web-modeler/advanced-modeling/call-activity-linking", - "components/modeler/web-modeler/advanced-modeling/business-rule-task-linking" - ] - } - ] - }, - { - "Desktop Modeler": [ - "components/modeler/desktop-modeler/index", - "components/modeler/desktop-modeler/install-the-modeler", - "components/modeler/desktop-modeler/model-your-first-diagram", - "components/modeler/desktop-modeler/connect-to-camunda-8", - "components/modeler/desktop-modeler/start-instance", - { - "Element templates": [ - "components/modeler/desktop-modeler/element-templates/about-templates", - "components/modeler/desktop-modeler/element-templates/configuring-templates", - "components/modeler/desktop-modeler/element-templates/using-templates", - "components/modeler/desktop-modeler/element-templates/defining-templates", - "components/modeler/desktop-modeler/element-templates/c7-defining-templates", - "components/modeler/desktop-modeler/element-templates/additional-resources" - ] - }, - { - "Additional configuration": [ - "components/modeler/desktop-modeler/flags/flags", - "components/modeler/desktop-modeler/plugins/plugins", - "components/modeler/desktop-modeler/custom-lint-rules/custom-lint-rules", - "components/modeler/desktop-modeler/search-paths/search-paths", - "components/modeler/desktop-modeler/telemetry/telemetry" - ] - }, - "components/modeler/desktop-modeler/troubleshooting" - ] - }, - { - "BPMN": [ - "components/modeler/bpmn/modeler-bpmn", - "components/modeler/bpmn/bpmn-primer", - "components/modeler/bpmn/bpmn-coverage", - "components/modeler/bpmn/data-flow", - { - "Tasks": [ - "components/modeler/bpmn/tasks", - "components/modeler/bpmn/service-tasks/service-tasks", - "components/modeler/bpmn/user-tasks/user-tasks", - "components/modeler/bpmn/receive-tasks/receive-tasks", - "components/modeler/bpmn/business-rule-tasks/business-rule-tasks", - "components/modeler/bpmn/script-tasks/script-tasks", - "components/modeler/bpmn/send-tasks/send-tasks", - "components/modeler/bpmn/manual-tasks/manual-tasks", - "components/modeler/bpmn/undefined-tasks/undefined-tasks" - ] - }, - { - "Gateways": [ - "components/modeler/bpmn/gateways", - "components/modeler/bpmn/exclusive-gateways/exclusive-gateways", - "components/modeler/bpmn/parallel-gateways/parallel-gateways", - "components/modeler/bpmn/event-based-gateways/event-based-gateways", - "components/modeler/bpmn/inclusive-gateways/inclusive-gateways" - ] - }, - { - "Events": [ - "components/modeler/bpmn/events", - "components/modeler/bpmn/none-events/none-events", - "components/modeler/bpmn/message-events/message-events", - "components/modeler/bpmn/signal-events/signal-events", - "components/modeler/bpmn/timer-events/timer-events", - "components/modeler/bpmn/error-events/error-events", - "components/modeler/bpmn/escalation-events/escalation-events", - "components/modeler/bpmn/terminate-events/terminate-events", - "components/modeler/bpmn/link-events/link-events" - ] - }, - { - "Subprocesses": [ - "components/modeler/bpmn/subprocesses", - "components/modeler/bpmn/embedded-subprocesses/embedded-subprocesses", - "components/modeler/bpmn/call-activities/call-activities", - "components/modeler/bpmn/event-subprocesses/event-subprocesses" - ] - }, - { - "Markers": [ - "components/modeler/bpmn/markers", - "components/modeler/bpmn/multi-instance/multi-instance" - ] - } - ] - }, - { - "DMN": [ - "components/modeler/dmn/desktop-modeler-dmn", - "components/modeler/dmn/decision-requirements-graph", - { - "Decision table": [ - "components/modeler/dmn/decision-table", - "components/modeler/dmn/decision-table-input", - "components/modeler/dmn/decision-table-output", - "components/modeler/dmn/decision-table-rule", - "components/modeler/dmn/decision-table-hit-policy" - ] - }, - "components/modeler/dmn/decision-literal-expression", - "components/modeler/dmn/dmn-data-types" - ] - }, - { - "FEEL expressions": [ - "components/modeler/feel/what-is-feel", - "components/modeler/feel/language-guide/feel-data-types", - "components/modeler/feel/language-guide/feel-unary-tests", - { - "Expressions": [ - "components/modeler/feel/language-guide/feel-expressions-introduction", - "components/modeler/feel/language-guide/feel-boolean-expressions", - "components/modeler/feel/language-guide/feel-string-expressions", - "components/modeler/feel/language-guide/feel-numeric-expressions", - "components/modeler/feel/language-guide/feel-list-expressions", - "components/modeler/feel/language-guide/feel-context-expressions", - "components/modeler/feel/language-guide/feel-temporal-expressions", - "components/modeler/feel/language-guide/feel-variables", - "components/modeler/feel/language-guide/feel-control-flow", - "components/modeler/feel/language-guide/feel-functions" - ] - }, - { - "Built-in Functions": [ - "components/modeler/feel/builtin-functions/feel-built-in-functions-introduction", - "components/modeler/feel/builtin-functions/feel-built-in-functions-conversion", - "components/modeler/feel/builtin-functions/feel-built-in-functions-boolean", - "components/modeler/feel/builtin-functions/feel-built-in-functions-string", - "components/modeler/feel/builtin-functions/feel-built-in-functions-numeric", - "components/modeler/feel/builtin-functions/feel-built-in-functions-list", - "components/modeler/feel/builtin-functions/feel-built-in-functions-context", - "components/modeler/feel/builtin-functions/feel-built-in-functions-temporal", - "components/modeler/feel/builtin-functions/feel-built-in-functions-range" - ] - } - ] - }, - { - "Camunda Forms": [ - "components/modeler/forms/camunda-forms-reference", - { - "Form Element Library": [ - "components/modeler/forms/form-element-library/forms-element-library", - "components/modeler/forms/form-element-library/forms-element-library-text", - "components/modeler/forms/form-element-library/forms-element-library-textfield", - "components/modeler/forms/form-element-library/forms-element-library-textarea", - "components/modeler/forms/form-element-library/forms-element-library-number", - "components/modeler/forms/form-element-library/forms-element-library-datetime", - "components/modeler/forms/form-element-library/forms-element-library-checkbox", - "components/modeler/forms/form-element-library/forms-element-library-radio", - "components/modeler/forms/form-element-library/forms-element-library-select", - "components/modeler/forms/form-element-library/forms-element-library-checklist", - "components/modeler/forms/form-element-library/forms-element-library-taglist", - "components/modeler/forms/form-element-library/forms-element-library-image", - "components/modeler/forms/form-element-library/forms-element-library-button" - ] - }, - { - "Configuration": [ - "components/modeler/forms/configuration/forms-config-data-binding", - "components/modeler/forms/configuration/forms-config-options" - ] - } - ] - }, - "components/modeler/data-handling" - ], - "Connectors": [ - "components/connectors/introduction-to-connectors", - "components/connectors/connector-types", - { - "Use Connectors": [ - "components/connectors/use-connectors/index", - "components/connectors/use-connectors/inbound", - "components/connectors/use-connectors/outbound" - ] - }, - { - "Out-of-the-box Connectors": [ - "components/connectors/out-of-the-box-connectors/available-connectors-overview", - "components/connectors/out-of-the-box-connectors/asana", - "components/connectors/out-of-the-box-connectors/automation-anywhere", - { - "AWS": [ - "components/connectors/out-of-the-box-connectors/amazon-dynamodb", - "components/connectors/out-of-the-box-connectors/amazon-eventbridge", - "components/connectors/out-of-the-box-connectors/aws-lambda", - "components/connectors/out-of-the-box-connectors/amazon-sns", - "components/connectors/out-of-the-box-connectors/amazon-sqs" - ] - }, - "components/connectors/out-of-the-box-connectors/blueprism", - "components/connectors/out-of-the-box-connectors/easy-post", - "components/connectors/out-of-the-box-connectors/github", - "components/connectors/out-of-the-box-connectors/gitlab", - { - "Google": [ - "components/connectors/out-of-the-box-connectors/googledrive", - "components/connectors/out-of-the-box-connectors/google-maps-platform", - "components/connectors/out-of-the-box-connectors/google-sheets" - ] - }, - "components/connectors/out-of-the-box-connectors/hugging-face", - "components/connectors/out-of-the-box-connectors/kafka", - { - "Microsoft": [ - "components/connectors/out-of-the-box-connectors/azure-open-ai", - "components/connectors/out-of-the-box-connectors/microsoft-teams", - "components/connectors/out-of-the-box-connectors/microsoft-o365-mail" - ] - }, - "components/connectors/out-of-the-box-connectors/openai", - "components/connectors/out-of-the-box-connectors/operate", - "components/connectors/out-of-the-box-connectors/rabbitmq", - "components/connectors/out-of-the-box-connectors/salesforce", - "components/connectors/out-of-the-box-connectors/slack", - "components/connectors/out-of-the-box-connectors/sendgrid", - "components/connectors/out-of-the-box-connectors/twilio", - "components/connectors/out-of-the-box-connectors/uipath", - "components/connectors/out-of-the-box-connectors/whatsapp" - ] - }, - { - "Protocol Connectors": [ - "components/connectors/protocol/graphql", - "components/connectors/protocol/http-webhook", - "components/connectors/protocol/polling", - "components/connectors/protocol/rest" - ] - }, - "components/connectors/manage-connector-templates", - { - "Building custom Connectors": [ - "components/connectors/custom-built-connectors/connector-sdk", - "components/connectors/custom-built-connectors/connector-templates" - ] - } - ], - "Zeebe": [ - "components/zeebe/zeebe-overview", - { - "Technical concepts": [ - "components/zeebe/technical-concepts/technical-concepts-overview", - "components/zeebe/technical-concepts/architecture", - "components/zeebe/technical-concepts/clustering", - "components/zeebe/technical-concepts/partitions", - "components/zeebe/technical-concepts/internal-processing", - "components/zeebe/technical-concepts/process-lifecycles", - "components/zeebe/technical-concepts/protocols" - ] - } - ], - "Operate": [ - "components/operate/operate-introduction", - { - "User guide": [ - "components/operate/userguide/basic-operate-navigation", - "components/operate/userguide/resolve-incidents-update-variables", - "components/operate/userguide/selections-operations", - "components/operate/userguide/delete-finished-instances", - "components/operate/userguide/process-instance-modification" - ] - } - ], - "Tasklist": [ - "components/tasklist/introduction-to-tasklist", - { - "User guide": ["components/tasklist/userguide/using-tasklist"] - } - ], - "Optimize": [ - { - "type": "link", - "label": "What is Optimize?", - "href": "/optimize/3.10.0/components/what-is-optimize/" - }, - - { - "User guide": [ - { - "type": "link", - "label": "Collections, dashboards, and reports", - "href": "/optimize/3.10.0/components/userguide/collections-dashboards-reports/" - }, - { - "type": "link", - "label": "User permissions", - "href": "/optimize/3.10.0/components/userguide/user-permissions/" - }, - { - "type": "link", - "label": "Data sources", - "href": "/optimize/3.10.0/components/userguide/data-sources/" - }, - - { - "Dashboards": [ - { - "type": "link", - "label": "Creating dashboards", - "href": "/optimize/3.10.0/components/userguide/creating-dashboards/" - }, - { - "type": "link", - "label": "Edit mode", - "href": "/optimize/3.10.0/components/userguide/edit-mode/" - }, - { - "type": "link", - "label": "View mode", - "href": "/optimize/3.10.0/components/userguide/view-mode/" - } - ] - }, - - { - "Dashboards maintained by Camunda": [ - { - "type": "link", - "label": "Process dashboards", - "href": "/optimize/3.10.0/components/userguide/process-dashboards/" - }, - { - "type": "link", - "label": "Instant preview dashboards", - "href": "/optimize/3.10.0/components/userguide/instant-preview-dashboards/" - } - ] - }, - - { - "type": "link", - "label": "Creating reports", - "href": "/optimize/3.10.0/components/userguide/creating-reports/" - }, - { - "type": "link", - "label": "Combined process reports", - "href": "/optimize/3.10.0/components/userguide/combined-process-reports/" - }, - - { - "Process analysis": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.10.0/components/userguide/process-analysis/process-analysis-overview/" - }, - { - "type": "link", - "label": "Outlier analysis", - "href": "/optimize/3.10.0/components/userguide/process-analysis/outlier-analysis/" - }, - { - "type": "link", - "label": "Branch analysis", - "href": "/optimize/3.10.0/components/userguide/process-analysis/branch-analysis/" - }, - - { - "Report analysis": [ - { - "type": "link", - "label": "Report process analysis", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/overview/" - }, - - { - "Edit mode": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/edit-mode/" - }, - { - "type": "link", - "label": "Select process definitions", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/select-process-definitions/" - }, - { - "type": "link", - "label": "Define reports", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/define-reports/" - }, - { - "type": "link", - "label": "Measures", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/measures/" - }, - { - "type": "link", - "label": "Compare target values", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/compare-target-values/" - }, - { - "type": "link", - "label": "Process instance parts", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/process-instance-parts/" - }, - { - "type": "link", - "label": "Configure reports", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/configure-reports/" - } - ] - }, - - { - "type": "link", - "label": "View mode", - "href": "/optimize/3.10.0/components/userguide/process-analysis/report-analysis/view-mode/" - } - ] - }, - - { - "Filters": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.10.0/components/userguide/process-analysis/filters/" - }, - { - "type": "link", - "label": "Metadata filters", - "href": "/optimize/3.10.0/components/userguide/process-analysis/metadata-filters/" - }, - { - "type": "link", - "label": "Instance state filters", - "href": "/optimize/3.10.0/components/userguide/process-analysis/instance-state-filters/" - }, - { - "type": "link", - "label": "Flow node filters", - "href": "/optimize/3.10.0/components/userguide/process-analysis/flow-node-filters/" - }, - { - "type": "link", - "label": "Process instance filters", - "href": "/optimize/3.10.0/components/userguide/process-analysis/process-instance-filters/" - }, - { - "type": "link", - "label": "Variable filters", - "href": "/optimize/3.10.0/components/userguide/process-analysis/variable-filters/" - } - ] - } - ] - }, - - { - "Decision analysis": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.10.0/components/userguide/decision-analysis/decision-analysis-overview/" - }, - { - "type": "link", - "label": "Single report", - "href": "/optimize/3.10.0/components/userguide/decision-analysis/decision-report/" - }, - { - "type": "link", - "label": "Filters", - "href": "/optimize/3.10.0/components/userguide/decision-analysis/decision-filter/" - } - ] - }, - - { - "Additional features": [ - { - "type": "link", - "label": "Alerts", - "href": "/optimize/3.10.0/components/userguide/additional-features/alerts/" - }, - { - "type": "link", - "label": "Event-based processes", - "href": "/optimize/3.10.0/components/userguide/additional-features/event-based-processes/" - }, - { - "type": "link", - "label": "Export and import", - "href": "/optimize/3.10.0/components/userguide/additional-features/export-import/" - }, - { - "type": "link", - "label": "Footer", - "href": "/optimize/3.10.0/components/userguide/additional-features/footer/" - }, - { - "type": "link", - "label": "Variable labeling", - "href": "/optimize/3.10.0/components/userguide/additional-features/variable-labeling/" - }, - { - "type": "link", - "label": "Process variants comparison", - "href": "/optimize/3.10.0/components/userguide/additional-features/process-variants-comparison/" - }, - { - "type": "link", - "label": "Machine learning-ready data set", - "href": "/optimize/3.10.0/components/userguide/additional-features/ml-dataset/" - } - ] - } - ] - } - ], - "Best Practices": [ - "components/best-practices/best-practices-overview", - { - "Project management": [ - "components/best-practices/management/following-the-customer-success-path", - "components/best-practices/management/doing-a-proper-poc" - ], - "Architecture": [ - "components/best-practices/architecture/deciding-about-your-stack", - "components/best-practices/architecture/sizing-your-environment", - "components/best-practices/architecture/understanding-human-tasks-management" - ], - "Development": [ - "components/best-practices/development/connecting-the-workflow-engine-with-your-world", - "components/best-practices/development/service-integration-patterns", - "components/best-practices/development/writing-good-workers", - "components/best-practices/development/dealing-with-problems-and-exceptions", - "components/best-practices/development/handling-data-in-processes", - "components/best-practices/development/routing-events-to-processes", - "components/best-practices/development/testing-process-definitions" - ], - "Modeling": [ - "components/best-practices/modeling/creating-readable-process-models", - "components/best-practices/modeling/naming-bpmn-elements", - "components/best-practices/modeling/naming-technically-relevant-ids", - "components/best-practices/modeling/modeling-beyond-the-happy-path", - "components/best-practices/modeling/modeling-with-situation-patterns", - "components/best-practices/modeling/building-flexibility-into-bpmn-models", - "components/best-practices/modeling/choosing-the-dmn-hit-policy" - ], - "Operations": [ - "components/best-practices/operations/versioning-process-definitions", - "components/best-practices/operations/reporting-about-processes" - ], - "Camunda 7 specific": [ - "components/best-practices/architecture/deciding-about-your-stack-c7", - "components/best-practices/architecture/sizing-your-environment-c7", - "components/best-practices/development/invoking-services-from-the-process-c7", - "components/best-practices/development/understanding-transaction-handling-c7", - "components/best-practices/operations/operating-camunda-c7", - "components/best-practices/operations/performance-tuning-camunda-c7", - "components/best-practices/operations/securing-camunda-c7", - "components/best-practices/architecture/extending-human-task-management-c7" - ] - } - ] - } - ], - "APIs & Tools": [ - "apis-tools/working-with-apis-tools", - { - "APIs": [ - "apis-tools/administration-api-reference", - { - "Operate API (REST)": [ - { - "type": "autogenerated", - "dirName": "apis-tools/operate-api" - } - ] - }, - { - "Optimize API (REST)": [ - { - "type": "link", - "label": "Authorization", - "href": "/optimize/3.10.0/apis-tools/optimize-api/optimize-api-authorization/" - }, - - { - "Configuration": [ - { - "type": "link", - "label": "Enable sharing", - "href": "/optimize/3.10.0/apis-tools/optimize-api/configuration/enable-sharing/" - }, - { - "type": "link", - "label": "Disable sharing", - "href": "/optimize/3.10.0/apis-tools/optimize-api/configuration/disable-sharing/" - } - ] - }, - - { - "Dashboard": [ - { - "type": "link", - "label": "Get dashboard IDs", - "href": "/optimize/3.10.0/apis-tools/optimize-api/dashboard/get-dashboard-ids/" - }, - { - "type": "link", - "label": "Delete dashboards", - "href": "/optimize/3.10.0/apis-tools/optimize-api/dashboard/delete-dashboard/" - }, - { - "type": "link", - "label": "Export dashboard definitions", - "href": "/optimize/3.10.0/apis-tools/optimize-api/dashboard/export-dashboard-definitions/" - } - ] - }, - - { - "Report": [ - { - "type": "link", - "label": "Get report IDs", - "href": "/optimize/3.10.0/apis-tools/optimize-api/report/get-report-ids/" - }, - { - "type": "link", - "label": "Delete reports", - "href": "/optimize/3.10.0/apis-tools/optimize-api/report/delete-report/" - }, - { - "type": "link", - "label": "Export report definitions", - "href": "/optimize/3.10.0/apis-tools/optimize-api/report/export-report-definitions/" - }, - { - "type": "link", - "label": "Export report result data", - "href": "/optimize/3.10.0/apis-tools/optimize-api/report/get-data-export/" - } - ] - }, - - { - "type": "link", - "label": "Event ingestion", - "href": "/optimize/3.10.0/apis-tools/optimize-api/event-ingestion/" - }, - { - "type": "link", - "label": "External variable ingestion", - "href": "/optimize/3.10.0/apis-tools/optimize-api/external-variable-ingestion/" - }, - { - "type": "link", - "label": "Health readiness", - "href": "/optimize/3.10.0/apis-tools/optimize-api/health-readiness/" - }, - { - "type": "link", - "label": "Import entities", - "href": "/optimize/3.10.0/apis-tools/optimize-api/import-entities/" - }, - { - "type": "link", - "label": "Variable labeling", - "href": "/optimize/3.10.0/apis-tools/optimize-api/variable-labeling/" - } - ] - }, - { - "Tasklist API (GraphQL)": [ - { - "type": "autogenerated", - "dirName": "apis-tools/tasklist-api" - } - ] - }, - { - "Tasklist API (REST)": [ - { - "type": "autogenerated", - "dirName": "apis-tools/tasklist-api-rest" - } - ] - }, - "apis-tools/web-modeler-api/index", - "apis-tools/grpc" - ] - }, - { - "Clients": [ - { - "CLI client": [ - "apis-tools/cli-client/index", - "apis-tools/cli-client/cli-get-started" - ] - }, - { - "Go client": [ - "apis-tools/go-client/index", - "apis-tools/go-client/go-get-started" - ] - }, - { - "Java client": [ - "apis-tools/java-client/index", - "apis-tools/java-client/job-worker", - "apis-tools/java-client/logging", - "apis-tools/java-client/zeebe-process-test", - { - "Examples": [ - "apis-tools/java-client-examples/index", - "apis-tools/java-client-examples/process-deploy", - "apis-tools/java-client-examples/process-instance-create", - "apis-tools/java-client-examples/process-instance-create-nonblocking", - "apis-tools/java-client-examples/process-instance-create-with-result", - "apis-tools/java-client-examples/decision-evaluate", - "apis-tools/java-client-examples/job-worker-open", - "apis-tools/java-client-examples/data-pojo", - "apis-tools/java-client-examples/cluster-topology-request" - ] - } - ] - }, - { - "Community clients": [ - "apis-tools/community-clients/index", - { - "Zeebe clients": [ - "apis-tools/community-clients/c-sharp", - "apis-tools/community-clients/javascript", - "apis-tools/community-clients/micronaut", - "apis-tools/community-clients/python", - "apis-tools/community-clients/ruby", - "apis-tools/community-clients/rust", - "apis-tools/community-clients/spring", - "apis-tools/community-clients/quarkus" - ] - } - ] - }, - "apis-tools/build-your-own-client" - ] - } - ], - "Reference": [ - "reference/overview", - "reference/glossary", - "reference/announcements", - "reference/release-notes", - "reference/licenses", - "reference/notices", - "reference/status", - "reference/release-policy", - "reference/alpha-features", - "reference/supported-environments", - "reference/regions", - "reference/dependencies", - "reference/usage-metrics" - ], - "Self-Managed": [ - "self-managed/about-self-managed", - { - "Installation": [ - "self-managed/platform-deployment/overview", - { - "Helm/Kubernetes": [ - "self-managed/platform-deployment/helm-kubernetes/overview", - "self-managed/platform-deployment/helm-kubernetes/deploy", - "self-managed/platform-deployment/helm-kubernetes/upgrade", - { - "type": "category", - "label": "Platforms", - "link": { - "type": "doc", - "id": "self-managed/platform-deployment/helm-kubernetes/platforms/platforms" - }, - "items": [ - "self-managed/platform-deployment/helm-kubernetes/platforms/amazon-eks", - "self-managed/platform-deployment/helm-kubernetes/platforms/microsoft-aks", - "self-managed/platform-deployment/helm-kubernetes/platforms/google-gke", - "self-managed/platform-deployment/helm-kubernetes/platforms/redhat-openshift" - ] - }, - { - "type": "category", - "label": "Guides", - "link": { - "type": "doc", - "id": "self-managed/platform-deployment/helm-kubernetes/guides/guides" - }, - "items": [ - "self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster", - "self-managed/platform-deployment/helm-kubernetes/guides/accessing-components-without-ingress", - "self-managed/platform-deployment/helm-kubernetes/guides/ingress-setup", - "self-managed/platform-deployment/helm-kubernetes/guides/using-existing-keycloak", - "self-managed/platform-deployment/helm-kubernetes/guides/air-gapped-installation", - "self-managed/platform-deployment/helm-kubernetes/guides/running-custom-connectors" - ] - }, - "self-managed/platform-deployment/troubleshooting" - ] - }, - "self-managed/platform-deployment/docker", - "self-managed/platform-deployment/manual" - ] - }, - { - "Concepts": [ - { - "Access control": [ - "self-managed/concepts/access-control/applications", - "self-managed/concepts/access-control/apis", - "self-managed/concepts/access-control/groups", - "self-managed/concepts/access-control/permissions", - "self-managed/concepts/access-control/resource-authorizations", - "self-managed/concepts/access-control/roles", - "self-managed/concepts/access-control/users" - ], - "Authentication": ["self-managed/concepts/authentication/m2m-tokens"] - }, - "self-managed/concepts/exporters", - "self-managed/concepts/elasticsearch-privileges" - ] - }, - { - "Zeebe": [ - "self-managed/zeebe-deployment/zeebe-installation", - { - "Zeebe Gateway": [ - "self-managed/zeebe-deployment/zeebe-gateway/overview", - "self-managed/zeebe-deployment/zeebe-gateway/interceptors" - ] - }, - { - "Configuration": [ - "self-managed/zeebe-deployment/configuration/configuration", - "self-managed/zeebe-deployment/configuration/logging", - "self-managed/zeebe-deployment/configuration/gateway-health-probes", - "self-managed/zeebe-deployment/configuration/environment-variables", - "self-managed/zeebe-deployment/configuration/fixed-partitioning", - "self-managed/zeebe-deployment/configuration/priority-election", - "self-managed/zeebe-deployment/configuration/broker-config", - "self-managed/zeebe-deployment/configuration/gateway-config" - ] - }, - { - "Security": [ - "self-managed/zeebe-deployment/security/security", - "self-managed/zeebe-deployment/security/client-authorization", - "self-managed/zeebe-deployment/security/secure-client-communication", - "self-managed/zeebe-deployment/security/secure-cluster-communication" - ] - }, - { - "Operation": [ - "self-managed/zeebe-deployment/operations/zeebe-in-production", - "self-managed/zeebe-deployment/operations/resource-planning", - "self-managed/zeebe-deployment/operations/network-ports", - "self-managed/zeebe-deployment/operations/setting-up-a-cluster", - "self-managed/zeebe-deployment/operations/metrics", - "self-managed/zeebe-deployment/operations/health", - "self-managed/zeebe-deployment/operations/backpressure", - "self-managed/zeebe-deployment/operations/disk-space", - "self-managed/zeebe-deployment/operations/update-zeebe", - "self-managed/zeebe-deployment/operations/rebalancing", - "self-managed/zeebe-deployment/operations/management-api", - "self-managed/zeebe-deployment/operations/backups" - ] - }, - { - "type": "category", - "label": "Exporters", - "link": { - "type": "doc", - "id": "self-managed/zeebe-deployment/exporters/exporters" - }, - "items": [ - "self-managed/zeebe-deployment/exporters/install-zeebe-exporters", - "self-managed/zeebe-deployment/exporters/elasticsearch-exporter", - "self-managed/zeebe-deployment/exporters/opensearch-exporter" - ] - } - ], - "Operate": [ - "self-managed/operate-deployment/install-and-start", - "self-managed/operate-deployment/operate-configuration", - "self-managed/operate-deployment/data-retention", - "self-managed/operate-deployment/schema-and-migration", - "self-managed/operate-deployment/importer-and-archiver", - "self-managed/operate-deployment/operate-authentication", - "self-managed/operate-deployment/usage-metrics" - ], - "Tasklist": [ - "self-managed/tasklist-deployment/install-and-start", - "self-managed/tasklist-deployment/tasklist-configuration", - "self-managed/tasklist-deployment/data-retention", - "self-managed/tasklist-deployment/importer-and-archiver", - "self-managed/tasklist-deployment/tasklist-authentication", - "self-managed/tasklist-deployment/usage-metrics" - ], - "Connectors": [ - "self-managed/connectors-deployment/install-and-start", - "self-managed/connectors-deployment/connectors-configuration" - ], - "Optimize": [ - { - "type": "link", - "label": "Installation", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/install-and-start/" - }, - { - "type": "link", - "label": "Version policy", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/version-policy/" - }, - - { - "Configuration": [ - { - "type": "link", - "label": "Getting started", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/getting-started/" - }, - - { - "System configuration": [ - { - "type": "link", - "label": "Overview", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/system-configuration/" - }, - { - "type": "link", - "label": "Camunda 8 system configuration", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-8/" - }, - { - "type": "link", - "label": "Camunda 7 system configuration", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/system-configuration-platform-7/" - }, - { - "type": "link", - "label": "Event-based process system configuration", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/event-based-process-configuration/" - } - ] - }, - - { - "type": "link", - "label": "Logging", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/logging/" - }, - { - "type": "link", - "label": "Optimize license key", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/optimize-license/" - }, - { - "type": "link", - "label": "Security instructions", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/security-instructions/" - }, - { - "type": "link", - "label": "Shared Elasticsearch cluster", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster/" - }, - { - "type": "link", - "label": "History cleanup", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/history-cleanup/" - }, - { - "type": "link", - "label": "Localization", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/localization/" - }, - { - "type": "link", - "label": "Object and list variable support", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/object-variables/" - }, - { - "type": "link", - "label": "Clustering", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/clustering/" - }, - { - "type": "link", - "label": "Webhooks", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/webhooks/" - }, - { - "type": "link", - "label": "Authorization management", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/authorization-management/" - }, - { - "type": "link", - "label": "User access management", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/user-management/" - }, - { - "type": "link", - "label": "Multi-tenancy", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/multi-tenancy/" - }, - { - "type": "link", - "label": "Multiple process engines", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/multiple-engines/" - }, - { - "type": "link", - "label": "Event-based processes", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/setup-event-based-processes/" - }, - { - "type": "link", - "label": "Telemetry", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/telemetry/" - }, - { - "type": "link", - "label": "Common problems", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/configuration/common-problems/" - } - ] - }, - - { - "Plugins": [ - { - "type": "link", - "label": "Optimize plugin system", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/plugin-system/" - }, - { - "type": "link", - "label": "Business key import customization", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/businesskey-import-plugin/" - }, - { - "type": "link", - "label": "Decision inputs and outputs import customization", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/decision-import-plugin/" - }, - { - "type": "link", - "label": "Elasticsearch header", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/elasticsearch-header/" - }, - { - "type": "link", - "label": "Engine REST filter", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/engine-rest-filter-plugin/" - }, - { - "type": "link", - "label": "Single sign on", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/single-sign-on/" - }, - { - "type": "link", - "label": "Variable import customization", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/plugins/variable-import-plugin/" - } - ] - }, - - { - "type": "link", - "label": "Camunda engine data reimport", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/reimport/" - }, - - { - "Migration & update": [ - { - "type": "link", - "label": "Instructions", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/instructions/" - }, - { - "type": "link", - "label": "Update notes (3.9.x to 3.10)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.9-to-3.10/" - }, - { - "type": "link", - "label": "Update notes (3.9-preview-x to 3.9.x)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.9-preview-1-to-3.9/" - }, - { - "type": "link", - "label": "Update notes (3.8.x to 3.9.x-preview-1)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.8-to-3.9-preview-1/" - }, - { - "type": "link", - "label": "Update notes (3.7.x to 3.8.x)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.7-to-3.8/" - }, - { - "type": "link", - "label": "Update notes (3.6 to 3.7.x)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.6-to-3.7/" - }, - { - "type": "link", - "label": "Update notes (3.5 to 3.6)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.5-to-3.6/" - }, - { - "type": "link", - "label": "Update notes (3.4 to 3.5)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.4-to-3.5/" - }, - { - "type": "link", - "label": "Update notes (3.3 to 3.4)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.3-to-3.4/" - }, - { - "type": "link", - "label": "Update notes (3.2 to 3.3)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.2-to-3.3/" - }, - { - "type": "link", - "label": "Update notes (3.1 to 3.2)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.1-to-3.2/" - }, - { - "type": "link", - "label": "Update notes (3.0 to 3.1)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/3.0-to-3.1/" - }, - { - "type": "link", - "label": "Update notes (2.7 to 3.0)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.7-to-3.0/" - }, - { - "type": "link", - "label": "Update notes (2.6 to 2.7)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.6-to-2.7/" - }, - { - "type": "link", - "label": "Update notes (2.5 to 2.6)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.5-to-2.6/" - }, - { - "type": "link", - "label": "Update notes (2.4 to 2.5)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.4-to-2.5/" - }, - { - "type": "link", - "label": "Update notes (2.3 to 2.4)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.3-to-2.4/" - }, - { - "type": "link", - "label": "Update notes (2.2 to 2.3)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.2-to-2.3/" - }, - { - "type": "link", - "label": "Update notes (2.1 to 2.2)", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/migration-update/2.1-to-2.2/" - } - ] - }, - - { - "Advanced features": [ - { - "type": "link", - "label": "Engine data deletion", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/advanced-features/engine-data-deletion/" - }, - { - "type": "link", - "label": "Data import", - "href": "/optimize/3.10.0/self-managed/optimize-deployment/advanced-features/import-guide/" - } - ] - } - ], - "Identity": [ - "self-managed/identity/what-is-identity", - "self-managed/identity/getting-started/install-identity", - { - "User guide": [ - { - "Configuration": [ - "self-managed/identity/user-guide/configuration/making-identity-production-ready", - "self-managed/identity/user-guide/configuration/configure-external-identity-provider", - "self-managed/identity/user-guide/configuration/configure-logging", - "self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak" - ] - }, - { - "Roles": [ - "self-managed/identity/user-guide/roles/add-assign-role", - "self-managed/identity/user-guide/roles/add-assign-permission" - ] - }, - { - "Groups": [ - "self-managed/identity/user-guide/groups/create-group", - "self-managed/identity/user-guide/groups/assign-users-roles-to-group" - ] - }, - { - "Authorizations": [ - "self-managed/identity/user-guide/authorizations/managing-resource-authorizations", - "self-managed/identity/user-guide/authorizations/managing-user-access", - "self-managed/identity/user-guide/authorizations/generating-m2m-tokens" - ] - }, - { - "Additional features": [ - "self-managed/identity/user-guide/additional-features/adding-an-api", - "self-managed/identity/user-guide/additional-features/incorporate-applications" - ] - } - ] - }, - { - "Deployment": [ - "self-managed/identity/deployment/configuration-variables", - "self-managed/identity/deployment/application-monitoring", - "self-managed/identity/deployment/starting-configuration-for-identity" - ], - "Troubleshooting": [ - "self-managed/identity/troubleshooting/troubleshoot-identity", - "self-managed/identity/troubleshooting/common-problems" - ] - } - ] - }, - { - "Modeler": [ - { - "Web Modeler": [ - "self-managed/modeler/web-modeler/installation", - { - "Configuration": [ - "self-managed/modeler/web-modeler/configuration/configuration", - "self-managed/modeler/web-modeler/configuration/database", - "self-managed/modeler/web-modeler/configuration/logging" - ], - "Troubleshooting": [ - "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection", - "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection", - "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login" - ] - } - ] - }, - { - "Desktop Modeler": [ - "self-managed/modeler/desktop-modeler/deploy-to-self-managed" - ] - } - ] - }, - { - "Backup and restore": [ - "self-managed/backup-restore/backup-and-restore", - "self-managed/backup-restore/optimize-backup", - "self-managed/backup-restore/operate-tasklist-backup", - "self-managed/backup-restore/zeebe-backup-and-restore", - "self-managed/backup-restore/modeler-backup-and-restore" - ] - }, - { - "Troubleshooting": ["self-managed/troubleshooting/log-levels"] - } - ] -} diff --git a/versioned_sidebars/version-8.3-sidebars.json b/versioned_sidebars/version-8.3-sidebars.json index 51c19e939cb..e7e04ad51b2 100644 --- a/versioned_sidebars/version-8.3-sidebars.json +++ b/versioned_sidebars/version-8.3-sidebars.json @@ -419,22 +419,7 @@ { "Building custom Connectors": [ "components/connectors/custom-built-connectors/connector-sdk", - "components/connectors/custom-built-connectors/connector-templates", - { - "Update guide": [ - "components/connectors/custom-built-connectors/update-guide/introduction", - "components/connectors/custom-built-connectors/update-guide/0100-to-0110", - "components/connectors/custom-built-connectors/update-guide/090-to-0100", - "components/connectors/custom-built-connectors/update-guide/080-to-090", - "components/connectors/custom-built-connectors/update-guide/070-to-080", - "components/connectors/custom-built-connectors/update-guide/060-to-070", - "components/connectors/custom-built-connectors/update-guide/050-to-060", - "components/connectors/custom-built-connectors/update-guide/040-to-050", - "components/connectors/custom-built-connectors/update-guide/030-to-040", - "components/connectors/custom-built-connectors/update-guide/020-to-030", - "components/connectors/custom-built-connectors/update-guide/010-to-020" - ] - } + "components/connectors/custom-built-connectors/connector-templates" ] } ], @@ -954,6 +939,7 @@ "reference/overview", "reference/announcements", "reference/release-notes", + "reference/contact", "reference/auto-updates", "reference/status", "reference/supported-environments", diff --git a/versioned_sidebars/version-8.4-sidebars.json b/versioned_sidebars/version-8.4-sidebars.json index 004f5cbf7a8..e2fefe6dc0d 100644 --- a/versioned_sidebars/version-8.4-sidebars.json +++ b/versioned_sidebars/version-8.4-sidebars.json @@ -430,22 +430,7 @@ { "Building custom Connectors": [ "components/connectors/custom-built-connectors/connector-sdk", - "components/connectors/custom-built-connectors/connector-templates", - { - "Update guide": [ - "components/connectors/custom-built-connectors/update-guide/introduction", - "components/connectors/custom-built-connectors/update-guide/0100-to-0110", - "components/connectors/custom-built-connectors/update-guide/090-to-0100", - "components/connectors/custom-built-connectors/update-guide/080-to-090", - "components/connectors/custom-built-connectors/update-guide/070-to-080", - "components/connectors/custom-built-connectors/update-guide/060-to-070", - "components/connectors/custom-built-connectors/update-guide/050-to-060", - "components/connectors/custom-built-connectors/update-guide/040-to-050", - "components/connectors/custom-built-connectors/update-guide/030-to-040", - "components/connectors/custom-built-connectors/update-guide/020-to-030", - "components/connectors/custom-built-connectors/update-guide/010-to-020" - ] - } + "components/connectors/custom-built-connectors/connector-templates" ] } ], @@ -801,7 +786,7 @@ "Specifications": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/operate-public-api" + "id": "apis-tools/operate-api/specifications/operate-public-api" }, { "type": "category", @@ -809,19 +794,19 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-2", + "id": "apis-tools/operate-api/specifications/search-2", "label": "Search process definitions", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key-2", + "id": "apis-tools/operate-api/specifications/by-key-2", "label": "Get process definition by key", "className": "api-method get" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/xml-by-key", + "id": "apis-tools/operate-api/specifications/xml-by-key", "label": "Get process definition as XML by key", "className": "api-method get" } @@ -833,13 +818,13 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-7", + "id": "apis-tools/operate-api/specifications/search-7", "label": "Search decision definitions", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key-6", + "id": "apis-tools/operate-api/specifications/by-key-6", "label": "Get decision definition by key", "className": "api-method get" } @@ -851,13 +836,13 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-6", + "id": "apis-tools/operate-api/specifications/search-6", "label": "Search decision instances", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-id", + "id": "apis-tools/operate-api/specifications/by-id", "label": "Get decision instance by id", "className": "api-method get" } @@ -869,13 +854,13 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-4", + "id": "apis-tools/operate-api/specifications/search-4", "label": "Search flownode-instances", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key-4", + "id": "apis-tools/operate-api/specifications/by-key-4", "label": "Get flow node instance by key", "className": "api-method get" } @@ -887,13 +872,13 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search", + "id": "apis-tools/operate-api/specifications/search", "label": "Search variables for process instances", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key", + "id": "apis-tools/operate-api/specifications/by-key", "label": "Get variable by key", "className": "api-method get" } @@ -905,31 +890,31 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-1", + "id": "apis-tools/operate-api/specifications/search-1", "label": "Search process instances", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key-1", + "id": "apis-tools/operate-api/specifications/by-key-1", "label": "Get process instance by key", "className": "api-method get" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/delete", + "id": "apis-tools/operate-api/specifications/delete", "label": "Delete process instance and all dependant data by key", "className": "api-method delete" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/get-statistics", + "id": "apis-tools/operate-api/specifications/get-statistics", "label": "Get flow node statistic by process instance id", "className": "api-method get" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/sequence-flows-by-key", + "id": "apis-tools/operate-api/specifications/sequence-flows-by-key", "label": "Get sequence flows of process instance by key", "className": "api-method get" } @@ -941,19 +926,19 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-5", + "id": "apis-tools/operate-api/specifications/search-5", "label": "Search decision requirements", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key-5", + "id": "apis-tools/operate-api/specifications/by-key-5", "label": "Get decision requirements by key", "className": "api-method get" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/xml-by-key-1", + "id": "apis-tools/operate-api/specifications/xml-by-key-1", "label": "Get decision requirements as XML by key", "className": "api-method get" } @@ -965,13 +950,13 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/search-3", + "id": "apis-tools/operate-api/specifications/search-3", "label": "Search incidents", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/operate-api/specifications/by-key-3", + "id": "apis-tools/operate-api/specifications/by-key-3", "label": "Get incident by key", "className": "api-method get" } @@ -1097,7 +1082,7 @@ "Specifications": [ { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/tasklist-rest-api" + "id": "apis-tools/tasklist-api-rest/specifications/tasklist-rest-api" }, { "type": "category", @@ -1105,7 +1090,7 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/get-form", + "id": "apis-tools/tasklist-api-rest/specifications/get-form", "label": "Get a form", "className": "api-method get" } @@ -1117,7 +1102,7 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/get-variable-by-id", + "id": "apis-tools/tasklist-api-rest/specifications/get-variable-by-id", "label": "Get a variable", "className": "api-method get" } @@ -1129,43 +1114,43 @@ "items": [ { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/save-draft-task-variables", + "id": "apis-tools/tasklist-api-rest/specifications/save-draft-task-variables", "label": "Save draft variables", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/search-task-variables", + "id": "apis-tools/tasklist-api-rest/specifications/search-task-variables", "label": "Search task variables", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/search-tasks", + "id": "apis-tools/tasklist-api-rest/specifications/search-tasks", "label": "Search tasks", "className": "api-method post" }, { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/unassign-task", + "id": "apis-tools/tasklist-api-rest/specifications/unassign-task", "label": "Unassign a task", "className": "api-method patch" }, { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/complete-task", + "id": "apis-tools/tasklist-api-rest/specifications/complete-task", "label": "Complete a task", "className": "api-method patch" }, { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/assign-task", + "id": "apis-tools/tasklist-api-rest/specifications/assign-task", "label": "Assign a task", "className": "api-method patch" }, { "type": "doc", - "id": "version-8.4/apis-tools/tasklist-api-rest/specifications/get-task-by-id", + "id": "apis-tools/tasklist-api-rest/specifications/get-task-by-id", "label": "Get a task", "className": "api-method get" } @@ -1262,6 +1247,7 @@ "reference/overview", "reference/announcements", "reference/release-notes", + "reference/contact", "reference/auto-updates", "reference/status", "reference/supported-environments", diff --git a/versioned_sidebars/version-8.5-sidebars.json b/versioned_sidebars/version-8.5-sidebars.json index 26d8e28c856..380d8c17295 100644 --- a/versioned_sidebars/version-8.5-sidebars.json +++ b/versioned_sidebars/version-8.5-sidebars.json @@ -22,7 +22,6 @@ "guides/setting-up-development-project", "guides/setup-client-connection-credentials", "guides/configuring-out-of-the-box-connectors", - "guides/message-correlation", "guides/use-connectors-in-hybrid-mode", "guides/host-custom-connectors" ], @@ -444,22 +443,7 @@ { "Building custom Connectors": [ "components/connectors/custom-built-connectors/connector-sdk", - "components/connectors/custom-built-connectors/connector-templates", - { - "Update guide": [ - "components/connectors/custom-built-connectors/update-guide/introduction", - "components/connectors/custom-built-connectors/update-guide/0100-to-0110", - "components/connectors/custom-built-connectors/update-guide/090-to-0100", - "components/connectors/custom-built-connectors/update-guide/080-to-090", - "components/connectors/custom-built-connectors/update-guide/070-to-080", - "components/connectors/custom-built-connectors/update-guide/060-to-070", - "components/connectors/custom-built-connectors/update-guide/050-to-060", - "components/connectors/custom-built-connectors/update-guide/040-to-050", - "components/connectors/custom-built-connectors/update-guide/030-to-040", - "components/connectors/custom-built-connectors/update-guide/020-to-030", - "components/connectors/custom-built-connectors/update-guide/010-to-020" - ] - } + "components/connectors/custom-built-connectors/connector-templates" ] } ], @@ -1270,7 +1254,7 @@ ] }, { - "Clients": [ + "Clients & SDKs": [ { "CLI client": [ "apis-tools/cli-client/index", @@ -1321,16 +1305,16 @@ } ] }, - "apis-tools/build-your-own-client" - ] - }, - { - "SDKs": [ - "apis-tools/node-js-sdk", + "apis-tools/build-your-own-client", { - "Spring Zeebe": [ - "apis-tools/spring-zeebe-sdk/getting-started", - "apis-tools/spring-zeebe-sdk/configuration" + "SDKs": [ + "apis-tools/node-js-sdk", + { + "Spring Zeebe": [ + "apis-tools/spring-zeebe-sdk/getting-started", + "apis-tools/spring-zeebe-sdk/configuration" + ] + } ] } ] @@ -1356,6 +1340,7 @@ }, "items": ["reference/release-notes/850"] }, + "reference/contact", "reference/auto-updates", "reference/status", "reference/supported-environments", @@ -1988,7 +1973,8 @@ "Troubleshooting": [ "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection", "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection", - "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login" + "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-login", + "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration" ] } ] diff --git a/versioned_sidebars/version-8.6-sidebars.json b/versioned_sidebars/version-8.6-sidebars.json index 45af8548ab5..66f10535b88 100644 --- a/versioned_sidebars/version-8.6-sidebars.json +++ b/versioned_sidebars/version-8.6-sidebars.json @@ -6,7 +6,7 @@ "guides/getting-started-java-spring", "guides/model-your-first-process", { - "By use case": [ + "Orchestration use cases": [ "guides/orchestrate-human-tasks", "guides/orchestrate-apis", "guides/orchestrate-microservices" @@ -25,7 +25,6 @@ "guides/setting-up-development-project", "guides/setup-client-connection-credentials", "guides/configuring-out-of-the-box-connectors", - "guides/message-correlation", "guides/use-connectors-in-hybrid-mode", "guides/host-custom-connectors" ], @@ -101,10 +100,10 @@ "components/console/manage-clusters/create-cluster", "components/console/manage-clusters/manage-cluster", "components/console/manage-clusters/manage-api-clients", + "components/console/manage-clusters/manage-secrets", "components/console/manage-clusters/manage-alerts", "components/console/manage-clusters/manage-ip-allowlists", "components/console/manage-clusters/create-backups", - "components/console/manage-clusters/manage-secrets", "components/console/manage-clusters/settings" ] }, @@ -475,21 +474,7 @@ "Building custom Connectors": [ "components/connectors/custom-built-connectors/connector-sdk", "components/connectors/custom-built-connectors/connector-templates", - { - "Update guide": [ - "components/connectors/custom-built-connectors/update-guide/introduction", - "components/connectors/custom-built-connectors/update-guide/0100-to-0110", - "components/connectors/custom-built-connectors/update-guide/090-to-0100", - "components/connectors/custom-built-connectors/update-guide/080-to-090", - "components/connectors/custom-built-connectors/update-guide/070-to-080", - "components/connectors/custom-built-connectors/update-guide/060-to-070", - "components/connectors/custom-built-connectors/update-guide/050-to-060", - "components/connectors/custom-built-connectors/update-guide/040-to-050", - "components/connectors/custom-built-connectors/update-guide/030-to-040", - "components/connectors/custom-built-connectors/update-guide/020-to-030", - "components/connectors/custom-built-connectors/update-guide/010-to-020" - ] - } + "components/connectors/custom-built-connectors/connector-template-generator" ] } ], @@ -1607,8 +1592,7 @@ ] } ] - }, - "apis-tools/tasklist-api-rest/migrate-to-zeebe-user-tasks" + } ] }, { @@ -1699,7 +1683,7 @@ ] }, { - "Clients": [ + "Clients & SDKs": [ { "Java client": [ "apis-tools/java-client/index", @@ -1747,16 +1731,16 @@ } ] }, - "apis-tools/build-your-own-client" - ] - }, - { - "SDKs": [ - "apis-tools/node-js-sdk", + "apis-tools/build-your-own-client", { - "Spring Zeebe": [ - "apis-tools/spring-zeebe-sdk/getting-started", - "apis-tools/spring-zeebe-sdk/configuration" + "SDKs": [ + "apis-tools/node-js-sdk", + { + "Spring Zeebe": [ + "apis-tools/spring-zeebe-sdk/getting-started", + "apis-tools/spring-zeebe-sdk/configuration" + ] + } ] } ] @@ -1780,8 +1764,14 @@ ] } ] + }, + { + "Migration manuals": [ + "apis-tools/migration-manuals/migrate-to-zeebe-user-tasks" + ] } ], + "Reference": [ "reference/overview", "reference/announcements", @@ -1794,6 +1784,7 @@ }, "items": ["reference/release-notes/860", "reference/release-notes/850"] }, + "reference/contact", "reference/supported-environments", "reference/dependencies", "reference/camunda-help-center", @@ -2485,7 +2476,8 @@ "Troubleshooting": [ "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection", "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection", - "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-missing-data" + "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-missing-data", + "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-proxy-configuration" ] } ] diff --git a/versions.json b/versions.json index befa1524951..85dc2d816a2 100644 --- a/versions.json +++ b/versions.json @@ -1 +1 @@ -["8.6", "8.5", "8.4", "8.3", "8.2", "1.3"] +["8.6", "8.5", "8.4", "8.3"]